Repository: nervosnetwork/muta Branch: master Commit: 28f2a60c574d Files: 348 Total size: 1.8 MB Directory structure: gitextract_lg42y7z7/ ├── .dockerignore ├── .github/ │ ├── CODEOWNERS │ ├── ISSUE_TEMPLATE/ │ │ ├── bug-report.md │ │ ├── feature.md │ │ └── help.md │ ├── PULL_REQUEST_TEMPLATE.md │ └── semantic.yml ├── .gitignore ├── .helmignore ├── CHANGELOG/ │ ├── CHANGELOG-0.1.md │ ├── CHANGELOG-0.2.md │ └── README.md ├── CONTRIBUTING.md ├── Cargo.toml ├── LICENSE ├── Makefile ├── OWNERS ├── OWNERS_ALIASES ├── README.md ├── README_CN.md ├── SECURITY.md ├── benchmark/ │ ├── bench_executor.rs │ ├── bench_mempool.rs │ ├── benchmark_genesis.toml │ ├── governance/ │ │ ├── mod.rs │ │ └── types.rs │ └── mod.rs ├── binding-macro/ │ ├── Cargo.toml │ ├── src/ │ │ ├── common.rs │ │ ├── cycles.rs │ │ ├── hooks.rs │ │ ├── lib.rs │ │ ├── read_write.rs │ │ └── service.rs │ └── tests/ │ └── mod.rs ├── built-in-services/ │ ├── asset/ │ │ ├── Cargo.toml │ │ └── src/ │ │ ├── lib.rs │ │ ├── tests/ │ │ │ └── mod.rs │ │ └── types.rs │ ├── authorization/ │ │ ├── Cargo.toml │ │ └── src/ │ │ └── lib.rs │ ├── metadata/ │ │ ├── Cargo.toml │ │ └── src/ │ │ ├── lib.rs │ │ └── tests/ │ │ └── mod.rs │ ├── multi-signature/ │ │ ├── Cargo.toml │ │ └── src/ │ │ ├── lib.rs │ │ ├── tests/ │ │ │ ├── curd_test.rs │ │ │ ├── mod.rs │ │ │ └── recursion_test.rs │ │ └── types.rs │ └── util/ │ ├── Cargo.toml │ └── src/ │ ├── lib.rs │ ├── tests/ │ │ └── mod.rs │ └── types.rs ├── byzantine/ │ ├── Cargo.toml │ ├── README.md │ ├── generators.toml │ ├── src/ │ │ ├── behaviors.rs │ │ ├── commander.rs │ │ ├── config.rs │ │ ├── default_start.rs │ │ ├── invalid_types.rs │ │ ├── lib.rs │ │ ├── message.rs │ │ ├── strategy.rs │ │ ├── utils.rs │ │ └── worker.rs │ └── tests/ │ ├── byz.test.ts │ ├── jest.config.js │ └── package.json ├── charts/ │ ├── deploy-chaos/ │ │ ├── .helmignore │ │ ├── Chart.yaml │ │ ├── templates/ │ │ │ ├── _helpers.tpl │ │ │ ├── muta-benchmark.yaml │ │ │ └── muta-chaos-crd.yaml │ │ └── values.yaml │ ├── muta/ │ │ ├── .helmignore │ │ ├── Chart.yaml │ │ ├── Makefile │ │ ├── README.md │ │ ├── templates/ │ │ │ ├── NOTES.txt │ │ │ ├── _helpers.tpl │ │ │ ├── canary.yaml │ │ │ ├── deployment.yaml │ │ │ ├── hpa.yaml │ │ │ ├── ingress.yaml │ │ │ ├── ksvc.yaml │ │ │ └── service.yaml │ │ └── values.yaml │ └── preview/ │ ├── Chart.yaml │ ├── Makefile │ ├── requirements.yaml │ └── values.yaml ├── clippy.toml ├── common/ │ ├── apm/ │ │ ├── Cargo.toml │ │ ├── README.md │ │ └── src/ │ │ ├── lib.rs │ │ ├── metrics/ │ │ │ ├── api.rs │ │ │ ├── consensus.rs │ │ │ ├── mempool.rs │ │ │ ├── network.rs │ │ │ └── storage.rs │ │ └── metrics.rs │ ├── channel/ │ │ ├── Cargo.toml │ │ └── src/ │ │ └── lib.rs │ ├── config-parser/ │ │ ├── Cargo.toml │ │ └── src/ │ │ ├── lib.rs │ │ └── types.rs │ ├── crypto/ │ │ ├── Cargo.toml │ │ └── src/ │ │ └── lib.rs │ ├── logger/ │ │ ├── Cargo.toml │ │ ├── README.md │ │ ├── log.yml │ │ └── src/ │ │ ├── date_fixed_roller.rs │ │ └── lib.rs │ ├── merkle/ │ │ ├── Cargo.toml │ │ └── src/ │ │ └── lib.rs │ └── pubsub/ │ ├── Cargo.toml │ └── src/ │ └── lib.rs ├── core/ │ ├── api/ │ │ ├── Cargo.toml │ │ ├── source/ │ │ │ └── graphiql.html │ │ └── src/ │ │ ├── adapter/ │ │ │ └── mod.rs │ │ ├── config.rs │ │ ├── lib.rs │ │ └── schema/ │ │ ├── block.rs │ │ ├── mod.rs │ │ ├── receipt.rs │ │ └── transaction.rs │ ├── cli/ │ │ ├── Cargo.toml │ │ └── src/ │ │ ├── error.rs │ │ ├── lib.rs │ │ └── tests/ │ │ ├── config.toml │ │ ├── genesis.toml │ │ ├── mod.rs │ │ └── service_mapping.rs │ ├── consensus/ │ │ ├── Cargo.toml │ │ └── src/ │ │ ├── adapter.rs │ │ ├── consensus.rs │ │ ├── engine.rs │ │ ├── fixed_types.rs │ │ ├── lib.rs │ │ ├── message.rs │ │ ├── status.rs │ │ ├── synchronization.rs │ │ ├── tests/ │ │ │ ├── engine.rs │ │ │ ├── mod.rs │ │ │ ├── status.rs │ │ │ └── synchronization.rs │ │ ├── util.rs │ │ ├── wal.rs │ │ └── wal_proto.rs │ ├── mempool/ │ │ ├── Cargo.toml │ │ └── src/ │ │ ├── adapter/ │ │ │ ├── message.rs │ │ │ └── mod.rs │ │ ├── context.rs │ │ ├── lib.rs │ │ ├── map.rs │ │ ├── tests/ │ │ │ ├── mempool.rs │ │ │ └── mod.rs │ │ └── tx_cache.rs │ ├── network/ │ │ ├── Cargo.toml │ │ ├── examples/ │ │ │ └── buycopy.rs │ │ ├── src/ │ │ │ ├── common.rs │ │ │ ├── compression/ │ │ │ │ ├── mod.rs │ │ │ │ └── snappy.rs │ │ │ ├── config.rs │ │ │ ├── connection/ │ │ │ │ ├── control.rs │ │ │ │ ├── keeper.rs │ │ │ │ └── mod.rs │ │ │ ├── endpoint.rs │ │ │ ├── error.rs │ │ │ ├── event.rs │ │ │ ├── lib.rs │ │ │ ├── message/ │ │ │ │ ├── mod.rs │ │ │ │ ├── serde.rs │ │ │ │ └── serde_multi.rs │ │ │ ├── metrics.rs │ │ │ ├── outbound/ │ │ │ │ ├── gossip.rs │ │ │ │ ├── mod.rs │ │ │ │ └── rpc.rs │ │ │ ├── peer_manager/ │ │ │ │ ├── addr_set.rs │ │ │ │ ├── diagnostic.rs │ │ │ │ ├── mod.rs │ │ │ │ ├── peer.rs │ │ │ │ ├── retry.rs │ │ │ │ ├── save_restore.rs │ │ │ │ ├── session_book.rs │ │ │ │ ├── shared.rs │ │ │ │ ├── tags.rs │ │ │ │ ├── test_manager.rs │ │ │ │ ├── time.rs │ │ │ │ └── trust_metric.rs │ │ │ ├── protocols/ │ │ │ │ ├── core.rs │ │ │ │ ├── discovery/ │ │ │ │ │ ├── addr.rs │ │ │ │ │ ├── behaviour.rs │ │ │ │ │ ├── message.rs │ │ │ │ │ ├── protocol.rs │ │ │ │ │ └── substream.rs │ │ │ │ ├── discovery.rs │ │ │ │ ├── identify/ │ │ │ │ │ ├── behaviour.rs │ │ │ │ │ ├── common.rs │ │ │ │ │ ├── identification.rs │ │ │ │ │ ├── message.rs │ │ │ │ │ ├── protocol.rs │ │ │ │ │ └── tests.rs │ │ │ │ ├── identify.rs │ │ │ │ ├── macro.rs │ │ │ │ ├── mod.rs │ │ │ │ ├── ping/ │ │ │ │ │ ├── behaviour.rs │ │ │ │ │ ├── message.rs │ │ │ │ │ └── protocol.rs │ │ │ │ ├── ping.rs │ │ │ │ ├── transmitter/ │ │ │ │ │ ├── behaviour.rs │ │ │ │ │ ├── message.rs │ │ │ │ │ └── protocol.rs │ │ │ │ └── transmitter.rs │ │ │ ├── reactor/ │ │ │ │ ├── mod.rs │ │ │ │ ├── router.rs │ │ │ │ └── rpc_map.rs │ │ │ ├── rpc.rs │ │ │ ├── selfcheck.rs │ │ │ ├── service.rs │ │ │ ├── test/ │ │ │ │ └── mock.rs │ │ │ ├── test.rs │ │ │ └── traits.rs │ │ └── tests/ │ │ ├── common.rs │ │ └── gossip_test.rs │ ├── run/ │ │ ├── Cargo.toml │ │ └── src/ │ │ └── lib.rs │ └── storage/ │ ├── Cargo.toml │ ├── examples/ │ │ └── bench.rs │ └── src/ │ ├── adapter/ │ │ ├── memory.rs │ │ ├── mod.rs │ │ └── rocks.rs │ ├── lib.rs │ └── tests/ │ ├── adapter.rs │ ├── mod.rs │ └── storage.rs ├── devtools/ │ ├── chain/ │ │ ├── README.md │ │ ├── config.toml │ │ └── genesis.toml │ ├── docker-build/ │ │ ├── Dockerfile │ │ ├── Dockerfile.build-env │ │ └── Dockerfile.e2e-env │ ├── keypair/ │ │ ├── Cargo.toml │ │ └── src/ │ │ ├── keypair.yml │ │ └── main.rs │ └── kube/ │ └── deploy-chaos-crd-template.yml ├── docs/ │ ├── _config.yml │ ├── build/ │ │ └── gql_api.sh │ ├── graphql_api.md │ ├── how_to_deploy_a_core_crate.md │ ├── layout.md │ └── resources.md ├── examples/ │ ├── byzantine_node.rs │ ├── config-1.toml │ ├── config-2.toml │ ├── config-3.toml │ ├── config-4.toml │ ├── genesis.toml │ └── muta-chain.rs ├── framework/ │ ├── Cargo.toml │ └── src/ │ ├── binding/ │ │ ├── mod.rs │ │ ├── sdk/ │ │ │ ├── chain_querier.rs │ │ │ └── mod.rs │ │ ├── state/ │ │ │ ├── mod.rs │ │ │ ├── trie.rs │ │ │ └── trie_db.rs │ │ ├── store/ │ │ │ ├── array.rs │ │ │ ├── map.rs │ │ │ ├── mod.rs │ │ │ └── primitive.rs │ │ └── tests/ │ │ ├── mod.rs │ │ ├── sdk.rs │ │ ├── state.rs │ │ └── store.rs │ ├── executor/ │ │ ├── error.rs │ │ ├── factory.rs │ │ ├── mod.rs │ │ └── tests/ │ │ ├── framework.rs │ │ ├── framework_genesis_services.toml │ │ ├── genesis_services.toml │ │ ├── mod.rs │ │ └── test_service.rs │ └── lib.rs ├── jenkins-x-chaos.yml ├── jenkins-x-e2e.yml ├── jenkins-x-lint.yml ├── jenkins-x-unit.yml ├── jenkins-x.yml ├── protocol/ │ ├── Cargo.toml │ └── src/ │ ├── codec/ │ │ ├── block.rs │ │ ├── macro.rs │ │ ├── mod.rs │ │ ├── primitive.rs │ │ ├── receipt.rs │ │ ├── tests/ │ │ │ └── mod.rs │ │ └── transaction.rs │ ├── fixed_codec/ │ │ ├── mod.rs │ │ ├── primitive.rs │ │ ├── receipt.rs │ │ ├── tests/ │ │ │ ├── fixed_codec.rs │ │ │ └── mod.rs │ │ └── transaction.rs │ ├── lib.rs │ ├── traits/ │ │ ├── api.rs │ │ ├── binding.rs │ │ ├── consensus.rs │ │ ├── executor.rs │ │ ├── mempool.rs │ │ ├── mod.rs │ │ ├── network.rs │ │ └── storage.rs │ └── types/ │ ├── block.rs │ ├── genesis.rs │ ├── mod.rs │ ├── primitive.rs │ ├── receipt.rs │ ├── service_context.rs │ └── transaction.rs ├── rust-toolchain ├── rustfmt.toml ├── src/ │ └── lib.rs └── tests/ ├── common/ │ ├── mod.rs │ ├── node/ │ │ ├── config.rs │ │ ├── consts.rs │ │ ├── diagnostic.rs │ │ ├── full_node/ │ │ │ ├── builder.rs │ │ │ ├── default_start.rs │ │ │ ├── error.rs │ │ │ └── memory_db.rs │ │ ├── full_node.rs │ │ └── sync.rs │ └── node.rs ├── e2e/ │ ├── jest.config.js │ ├── package.json │ ├── sdk.test.ts │ ├── tsconfig.json │ ├── utils.ts │ └── wait-for-it.sh ├── trust_metric.rs ├── trust_metric_all/ │ ├── client_node.rs │ ├── common.rs │ ├── consensus.rs │ ├── logger.rs │ ├── mempool.rs │ └── mod.rs └── verify_chain_id.rs ================================================ FILE CONTENTS ================================================ ================================================ FILE: .dockerignore ================================================ devtools/chain/data devtools/dex .github docs tests/e2e/node_modules ================================================ FILE: .github/CODEOWNERS ================================================ / @nervosnetwork/muta-dev-team ================================================ FILE: .github/ISSUE_TEMPLATE/bug-report.md ================================================ --- name: Bug Report about: Report a bug labels: t:bug --- **What happened**: **What you expected to happen**: **How to reproduce it (as minimally and precisely as possible)**: **Anything else we need to know?**: **Environment**: - MutaChain version or commit hash (`MutaChain -V`): - OS (e.g: `cat /etc/os-release`): - Kernel (e.g. `uname -a`): - Others: ================================================ FILE: .github/ISSUE_TEMPLATE/feature.md ================================================ --- name: Feature Request about: Suggest a feature to the Muta-Chain project labels: t:feature --- **What would you like to be added**: **Why is this needed**: ================================================ FILE: .github/ISSUE_TEMPLATE/help.md ================================================ --- name: Help me about: What kind of help do you want? labels: t:help --- ================================================ FILE: .github/PULL_REQUEST_TEMPLATE.md ================================================ **What this PR does / why we need it**: **Which issue(s) this PR fixes**: Fixes # **Which docs this PR relation**: Ref # **Which toolchain this PR adaption**: No Breaking Change **Special notes for your reviewer**: ================================================ FILE: .github/semantic.yml ================================================ # By default types specified in commitizen/conventional-commit-types is used. # See: https://github.com/commitizen/conventional-commit-types/blob/v3.0.0/index.json # You can override the valid types # Angular types: - build # Changes that affect the build system or external dependencies (example scopes: gulp, broccoli, npm) - ci # Changes to our CI configuration files and scripts (example scopes: Travis, Circle, BrowserStack, SauceLabs) - docs # Documentation only changes - feat # A new feature - fix # A bug fix - perf # A code change that improves performance - refactor # A code change that neither fixes a bug nor adds a feature - style # Changes that do not affect the meaning of the code (white-space, formatting, missing semi-colons, etc) - test # Adding missing tests or correcting existing tests ================================================ FILE: .gitignore ================================================ # Generated by Cargo # will have compiled files and executables /target/ # These are backup files generated by rustfmt **/*.rs.bk # Added by cargo # # already existing elements are commented out /target #**/*.rs.bk # OS .DS_Store # IDE .idea/ .vscode/ # dev devtools/chain/data tests/e2e/node_modules tests/e2e/yarn-error.log # rocksdb **/rocksdb/ logs/ # cargo.lock Cargo.lock # free space, you can store anything you want here free-space byzantine/tests/node_modules ================================================ FILE: .helmignore ================================================ # Patterns to ignore when building packages. # This supports shell glob matching, relative path matching, and # negation (prefixed with !). Only one pattern per line. .DS_Store # Common VCS dirs .git/ .gitignore .bzr/ .bzrignore .hg/ .hgignore .svn/ # Common backup files *.swp *.bak *.tmp *~ # Various IDEs .project .idea/ *.tmproj *.png # known compile time folders target/ node_modules/ vendor/ ================================================ FILE: CHANGELOG/CHANGELOG-0.1.md ================================================ ## [0.1.2-beta](https://github.com/nervosnetwork/muta/compare/v0.1.2-beta2...v0.1.2-beta) (2020-06-04) ## [0.1.2-beta2](https://github.com/nervosnetwork/muta/compare/v0.1.2-beta1...v0.1.2-beta2) (2020-06-03) ### Features * supported storage metrics ([#307](https://github.com/nervosnetwork/muta/issues/307)) ([2531b8d](https://github.com/nervosnetwork/muta/commit/2531b8da8e8f2a839484adef62dd93f1deff12dd)) ## [0.1.2-beta1](https://github.com/nervosnetwork/muta/compare/v0.1.0-rc.2-huobi...v0.1.2-beta1) (2020-06-01) ### Bug Fixes * **ci:** Increase timeout in ci ([#262](https://github.com/nervosnetwork/muta/issues/262)) ([a12124a](https://github.com/nervosnetwork/muta/commit/a12124a115512196894a7ca88fc42555db927666)) * **mempool:** check exsit before insert a transaction ([#257](https://github.com/nervosnetwork/muta/issues/257)) ([be3c139](https://github.com/nervosnetwork/muta/commit/be3c13929d2a59f21655b040aa6738c3d43db611)) * **network:** broken users_cast ([#261](https://github.com/nervosnetwork/muta/issues/261)) ([f36eabd](https://github.com/nervosnetwork/muta/commit/f36eabdc5040bc5cbf0d2011c942867150534a41)) * **network:** reconnection fialure ([#273](https://github.com/nervosnetwork/muta/issues/273)) ([9f594b8](https://github.com/nervosnetwork/muta/commit/9f594b8af12e1810bd0cbf23f20ca718d96f6e3a)) * reboot when the diff between height and exec_height more than one ([#267](https://github.com/nervosnetwork/muta/issues/267)) ([e8f8595](https://github.com/nervosnetwork/muta/commit/e8f85958d85e3363fccbfde3971684ebf2fceb4d)) * **sync:** Avoid requesting redundant transactions ([#259](https://github.com/nervosnetwork/muta/issues/259)) ([8ece029](https://github.com/nervosnetwork/muta/commit/8ece0299fe185667ac23fed92d8c2f156c0e2c5b)) * binding store type should return Option None instead of panic when get none ([#238](https://github.com/nervosnetwork/muta/issues/238)) ([54bdbb9](https://github.com/nervosnetwork/muta/commit/54bdbb93df1a1a85a83814dcb29461acf3645d10)) * **config:** use serde(default) for rocksdb conf ([#229](https://github.com/nervosnetwork/muta/issues/229)) ([2a03e73](https://github.com/nervosnetwork/muta/commit/2a03e73c77807e80020c50bb287adf4d428632e5)) * **storage:** fix rocksdb too many open files error ([#228](https://github.com/nervosnetwork/muta/issues/228)) ([96c32cd](https://github.com/nervosnetwork/muta/commit/96c32cd7956220beddca33b22d4663a675573ba9)) * **sync:** set crypto info when synchronization ([#235](https://github.com/nervosnetwork/muta/issues/235)) ([84ccfc1](https://github.com/nervosnetwork/muta/commit/84ccfc1d8422265028ad7a0b460b4e297d161fe3)) * docker compose configs ([#210](https://github.com/nervosnetwork/muta/issues/210)) ([acc5265](https://github.com/nervosnetwork/muta/commit/acc52653d304ac5cd25a9d643b263a2f462f7d43)) * hang when kill it ([#225](https://github.com/nervosnetwork/muta/issues/225)) ([dc51240](https://github.com/nervosnetwork/muta/commit/dc512405f32854f165f3145c01d022bca4fff93b)) * panic when start ([#214](https://github.com/nervosnetwork/muta/issues/214)) ([d2da69b](https://github.com/nervosnetwork/muta/commit/d2da69b5941a88376b64453f7d3c10eca3f67d81)) * **muta:** hangs up on one cpu core ([#203](https://github.com/nervosnetwork/muta/issues/203)) ([555dd9e](https://github.com/nervosnetwork/muta/commit/555dd9e694fda043be01f90c91396efd7fe0ace5)) ### Features * split monitor network url ([#300](https://github.com/nervosnetwork/muta/issues/300)) ([1237354](https://github.com/nervosnetwork/muta/commit/12373544598d0dae852321cbe3b4e8dab5c70e54)) * supported mempool monitor ([#298](https://github.com/nervosnetwork/muta/issues/298)) ([cc7fdfa](https://github.com/nervosnetwork/muta/commit/cc7fdfa7a7c99466d76d4fe9c1a3537ab8754837)) * supported new metrics ([#294](https://github.com/nervosnetwork/muta/issues/294)) ([e59364a](https://github.com/nervosnetwork/muta/commit/e59364a7759960d8a3279dc78844965f54f4bf62)) * **apm:** add api get_block metrics ([#276](https://github.com/nervosnetwork/muta/issues/276)) ([6ea21e3](https://github.com/nervosnetwork/muta/commit/6ea21e3e0fe08898264f13938cf849c197531afa)) * **apm:** Add opentracing ([#270](https://github.com/nervosnetwork/muta/issues/270)) ([cece21d](https://github.com/nervosnetwork/muta/commit/cece21d8e865223c8679e54d0253ced70dab4c0a)) * **apm:** tracing height and round in OverlordMsg ([#287](https://github.com/nervosnetwork/muta/issues/287)) ([a8c09ff](https://github.com/nervosnetwork/muta/commit/a8c09ff363e8caac9c0977db2fc6cffb782961d7)) * **ci:** add e2e ([#236](https://github.com/nervosnetwork/muta/issues/236)) ([3058722](https://github.com/nervosnetwork/muta/commit/3058722081084b7cb8f423c26eba9e88707fca18)) * **consensus:** add proof check logic for sync and consensus ([#224](https://github.com/nervosnetwork/muta/issues/224)) ([b19502f](https://github.com/nervosnetwork/muta/commit/b19502f48e6d314717a8a2286ada58f6097c6f31)) * **consensus:** change validator list ([#211](https://github.com/nervosnetwork/muta/issues/211)) ([bb04d2c](https://github.com/nervosnetwork/muta/commit/bb04d2c961110276d38cf0e07239d5e72e8125a8)) * **consensus:** integrate trust metric to consensus ([#244](https://github.com/nervosnetwork/muta/issues/244)) ([3dd6bc1](https://github.com/nervosnetwork/muta/commit/3dd6bc1796ca3e6c76cb99beefd5911d35a5e8ee)) * **mempool:** integrate trust metric ([#245](https://github.com/nervosnetwork/muta/issues/245)) ([49474fd](https://github.com/nervosnetwork/muta/commit/49474fddde3ffc45d564544bb5887bb09a37da1d)) * **metric:** introduce metric using prometheus ([#271](https://github.com/nervosnetwork/muta/issues/271)) ([3d1dc4f](https://github.com/nervosnetwork/muta/commit/3d1dc4fcf196b8616f41dc4cd2a5ba0c0a5ab422)) * **metrics:** mempool, consensus and sync ([#275](https://github.com/nervosnetwork/muta/issues/275)) ([12e4918](https://github.com/nervosnetwork/muta/commit/12e4918d9925868407f854af29410d8ecafe4d48)) * **network:** add metrics ([#274](https://github.com/nervosnetwork/muta/issues/274)) ([56a9b62](https://github.com/nervosnetwork/muta/commit/56a9b62251106d44df33c43d4590575df25df61a)) * **network:** add trace header to network msg ([#281](https://github.com/nervosnetwork/muta/issues/281)) ([6509cbe](https://github.com/nervosnetwork/muta/commit/6509cbec2f700238b2259943212e0968b58404ce)) * **network:** peer trust metric ([#231](https://github.com/nervosnetwork/muta/issues/231)) ([5abefeb](https://github.com/nervosnetwork/muta/commit/5abefebddacfb58415f2a319098bb164ceaa8c81)) * add tx hook in framework ([#218](https://github.com/nervosnetwork/muta/issues/218)) ([cdeb9fd](https://github.com/nervosnetwork/muta/commit/cdeb9fd1e18e198636fa59d91aead85d65cf9852)) * re-execute blocks to recover current status ([#222](https://github.com/nervosnetwork/muta/issues/222)) ([1cd7cb6](https://github.com/nervosnetwork/muta/commit/1cd7cb6d4fbc599bac65bd2c36b507088a3fa041)) * **network:** rpc remote server error response ([#205](https://github.com/nervosnetwork/muta/issues/205)) ([bb993ac](https://github.com/nervosnetwork/muta/commit/bb993ac1f5fe44a2f6a72c8718572accacb27dc3)) * **sync:** Split a transaction in a block into multiple requests ([#221](https://github.com/nervosnetwork/muta/issues/221)) ([0bbf43c](https://github.com/nervosnetwork/muta/commit/0bbf43c49d2df49d70b4bc816ac24c3bc3603a1a)) * add actix payload size limit config ([#204](https://github.com/nervosnetwork/muta/issues/204)) ([97319d6](https://github.com/nervosnetwork/muta/commit/97319d6d22c8143ba35c3fe42d56f2cfbc131e37)) ### BREAKING CHANGES * **network:** change rpc response * change(network): bump transmitter protocol version # [0.1.0-rc.2-huobi](https://github.com/nervosnetwork/muta/compare/v0.0.1-rc1-huobi...v0.1.0-rc.2-huobi) (2020-02-24) ### Bug Fixes * **mempool:** fix repeat txs, add flush_incumbent_queue ([#189](https://github.com/nervosnetwork/muta/issues/189)) ([e0db745](https://github.com/nervosnetwork/muta/commit/e0db745419c5ada3d6e9dc4416945a0775a8f18b)) * **muta:** hangs up running on single core environment ([#201](https://github.com/nervosnetwork/muta/issues/201)) ([09f5b4e](https://github.com/nervosnetwork/muta/commit/09f5b4ed70a519155933f7fd4c2015ff512dfdb1)) * block hash from bytes ([#192](https://github.com/nervosnetwork/muta/issues/192)) ([7ca0af4](https://github.com/nervosnetwork/muta/commit/7ca0af46edbd00e4ba43e8646e77fa41aba781cf)) ### Features * check size and cycle limit when insert tx into mempool ([#195](https://github.com/nervosnetwork/muta/issues/195)) ([92bdf2d](https://github.com/nervosnetwork/muta/commit/92bdf2d5147502e1d250fdae47b8ae2c2cfce23f)) * remove redundant wal transactions when commit ([#197](https://github.com/nervosnetwork/muta/issues/197)) ([3aff1db](https://github.com/nervosnetwork/muta/commit/3aff1dbb2dcdabaaf9cbecb9c3e9757a2c737354)) * Supports actix in tokio ([#200](https://github.com/nervosnetwork/muta/issues/200)) ([266c1cb](https://github.com/nervosnetwork/muta/commit/266c1cb2cf6223759eba4ca9771ee21b244db3a4)) * **api:** Supports configuring the max number of connections. ([#194](https://github.com/nervosnetwork/muta/issues/194)) ([6cbdd26](https://github.com/nervosnetwork/muta/commit/6cbdd267b7ff56eefbe23bffc8e4dc589272111d)) * **service:** upgrade asset service ([#150](https://github.com/nervosnetwork/muta/issues/150)) ([8925390](https://github.com/nervosnetwork/muta/commit/8925390b59353d853dd1266cdcfe6db1258a8296)) ### Reverts * Revert "fix(muta): hangs up running on single core environment (#201)" (#202) ([28e685a](https://github.com/nervosnetwork/muta/commit/28e685a62b82c1a91699b4495d430b0757e5438d)), closes [#201](https://github.com/nervosnetwork/muta/issues/201) [#202](https://github.com/nervosnetwork/muta/issues/202) ## [0.0.1-rc1-huobi](https://github.com/nervosnetwork/muta/compare/v0.0.1-rc.1-huobi...v0.0.1-rc1-huobi) (2020-02-15) ### Bug Fixes * **ci:** fail to install sccache after new rust-toolchain ([#68](https://github.com/nervosnetwork/muta/issues/68)) ([f961415](https://github.com/nervosnetwork/muta/commit/f961415803ae6d38b70e97a810f33a1b60639d43)) * **consensus:** check logs bloom when check block ([#168](https://github.com/nervosnetwork/muta/issues/168)) ([0984989](https://github.com/nervosnetwork/muta/commit/09849893270cc0908e2ee965e7e8b7c46ada0f16)) * **consensus:** empty block receipts root ([#61](https://github.com/nervosnetwork/muta/issues/61)) ([89ed4d2](https://github.com/nervosnetwork/muta/commit/89ed4d2c4a708f278e7cd777c562f1f1fb5a9755)) * **consensus:** encode overlord message and verify signature ([#39](https://github.com/nervosnetwork/muta/issues/39)) ([b11e69e](https://github.com/nervosnetwork/muta/commit/b11e69e49ed195d0d23f22b6abf1387f4a4c0c94)) * **consensus:** fix check state roots ([#107](https://github.com/nervosnetwork/muta/issues/107)) ([cf45c3b](https://github.com/nervosnetwork/muta/commit/cf45c3ba39eb65bdb012165e232352a9187a6f0d)) * **consensus:** Get authority list returns none. ([#4](https://github.com/nervosnetwork/muta/issues/4)) ([2a7eb3c](https://github.com/nervosnetwork/muta/commit/2a7eb3c26fade5a065ec2435b4ba46b6c16f223a)) * **consensus:** state root can not be clear ([#140](https://github.com/nervosnetwork/muta/issues/140)) ([4ea1df4](https://github.com/nervosnetwork/muta/commit/4ea1df425620482f36daf61b4b50edb83807efdd)) * **consensus:** sync txs context no session id ([#167](https://github.com/nervosnetwork/muta/issues/167)) ([53136c3](https://github.com/nervosnetwork/muta/commit/53136c3dfdf0e7b29762cd72f51eeb35d52804c2)) * **doc:** fix graphql_api doc link and doc-api build sh ([#161](https://github.com/nervosnetwork/muta/issues/161)) ([e67e2b2](https://github.com/nervosnetwork/muta/commit/e67e2b24bf0609c263f59381a83fcf04d2227583)) * **executor:** wrong hook logic ([#127](https://github.com/nervosnetwork/muta/issues/127)) ([8c6a246](https://github.com/nervosnetwork/muta/commit/8c6a246a1b64a197371305856148b034320f1fa0)) * **framework/executor:** Catch any errors in the call. ([#92](https://github.com/nervosnetwork/muta/issues/92)) ([739a126](https://github.com/nervosnetwork/muta/commit/739a126c86643b28e1c47aef87d8bd803b9fe8d9)) * **keypair:** Use hex encoding common_ref. ([#79](https://github.com/nervosnetwork/muta/issues/79)) ([abbce4c](https://github.com/nervosnetwork/muta/commit/abbce4c15919f45f824bd4967ea64f8234548765)) * **makefile:** Docker push to the correct image ([#146](https://github.com/nervosnetwork/muta/issues/146)) ([05f6396](https://github.com/nervosnetwork/muta/commit/05f6396f1786b46b4cf9c41e3f700b37ebaddb68)) * **mempool:** Always get the latest epoch id when `package`. ([#30](https://github.com/nervosnetwork/muta/issues/30)) ([9a77ebf](https://github.com/nervosnetwork/muta/commit/9a77ebf9ecba6323cc81cd094774e32fd28b946e)) * **mempool:** broadcast new transactions ([#32](https://github.com/nervosnetwork/muta/issues/32)) ([086ec7e](https://github.com/nervosnetwork/muta/commit/086ec7eb6ca2c8f6afc14767d51efdb91533f932)) * **mempool:** Fix concurrent insert bug of mempool ([#19](https://github.com/nervosnetwork/muta/issues/19)) ([515eec2](https://github.com/nervosnetwork/muta/commit/515eec2ab65a2d57a5ca742c774daeb9cef99354)) * **mempool:** Resize the queue to ensure correct switching. ([#18](https://github.com/nervosnetwork/muta/issues/18)) ([ebf1ae3](https://github.com/nervosnetwork/muta/commit/ebf1ae34861fc48297813cdc465e4d9c99e059d4)) * **mempool:** sync proposal txs doesn't insert txs at all ([#179](https://github.com/nervosnetwork/muta/issues/179)) ([33f39c5](https://github.com/nervosnetwork/muta/commit/33f39c5bac0235a8261c53327c558864a6149c8a)) * **network:** dead lock in peer manager ([#24](https://github.com/nervosnetwork/muta/issues/24)) ([a74017a](https://github.com/nervosnetwork/muta/commit/a74017aa9d84b6b862683860e63c000b4048e459)) * **network:** default rpc timeout to 4 seconds ([#115](https://github.com/nervosnetwork/muta/issues/115)) ([666049c](https://github.com/nervosnetwork/muta/commit/666049c54c8eee8291cc173230caccb35de137ca)) * **network:** fail to bootstrap if bootstrap isn't start already ([#46](https://github.com/nervosnetwork/muta/issues/46)) ([9dd515a](https://github.com/nervosnetwork/muta/commit/9dd515a3e09f1c158dff6536ed38eb5116f4317f)) * **network:** give up retry ([#152](https://github.com/nervosnetwork/muta/issues/152)) ([34d052a](https://github.com/nervosnetwork/muta/commit/34d052aaba1684333fdd49f86e54c103064fa2f6)) * **network:** never reconnect bootstrap again after failure ([#22](https://github.com/nervosnetwork/muta/issues/22)) ([79d66bd](https://github.com/nervosnetwork/muta/commit/79d66bd06e61ff6ef41c12ada91cf6485482aa43)) * **network:** NoSessionId Error ([#33](https://github.com/nervosnetwork/muta/issues/33)) ([4761d79](https://github.com/nervosnetwork/muta/commit/4761d797dded9534e0c0b5e43c6e519055542c2c)) * **network:** rpc memory leak if rpc call future is dropped ([#166](https://github.com/nervosnetwork/muta/issues/166)) ([8476a4b](https://github.com/nervosnetwork/muta/commit/8476a4b85bf3cf923adcd7555cef04ae73a225f1)) * **sync:** Check the height again after get the lock ([#171](https://github.com/nervosnetwork/muta/issues/171)) ([68164f3](https://github.com/nervosnetwork/muta/commit/68164f3f75d83b9507ee68a099fb712492339edb)) * **sync:** Flush the memory pool when the storage success ([#165](https://github.com/nervosnetwork/muta/issues/165)) ([3b9cbd5](https://github.com/nervosnetwork/muta/commit/3b9cbd55310993c783b0a5794237df75accf118e)) * fix overlord not found error ([#95](https://github.com/nervosnetwork/muta/issues/95)) ([0754c64](https://github.com/nervosnetwork/muta/commit/0754c64973f7fca92e49080c3a03a869b43a4c46)) * Ignore bootstraps when empty. ([#41](https://github.com/nervosnetwork/muta/issues/41)) ([2b3566b](https://github.com/nervosnetwork/muta/commit/2b3566b4acb91f6086b9cca2b1ea4d2883a75be9)) ### Features * **config:** move bls_pub_key config to genesis.toml ([#162](https://github.com/nervosnetwork/muta/issues/162)) ([337b01f](https://github.com/nervosnetwork/muta/commit/337b01fda21fc33f4d4817d93a27d86af9e2b164)) * **network:** interval report pending data size ([#160](https://github.com/nervosnetwork/muta/issues/160)) ([3c46aca](https://github.com/nervosnetwork/muta/commit/3c46aca4873abf9b8afd01d5f464df57bb1b9b9a)) * **sync:** Trigger sync after waiting for consensus interval ([#169](https://github.com/nervosnetwork/muta/issues/169)) ([fe355f1](https://github.com/nervosnetwork/muta/commit/fe355f1d7d6359dfa97809f1bc603cb99975ba46)) * add api schema ([#90](https://github.com/nervosnetwork/muta/issues/90)) ([3f8adfa](https://github.com/nervosnetwork/muta/commit/3f8adfa0a717b055a4455fd102de68003f835bf2)) * add common_ref argument for keypair tool ([#154](https://github.com/nervosnetwork/muta/issues/154)) ([2651346](https://github.com/nervosnetwork/muta/commit/26513469206aa8a4480c5fffad9d134d5d0e8ded)) * add panic hook to logger ([#156](https://github.com/nervosnetwork/muta/issues/156)) ([93b65fe](https://github.com/nervosnetwork/muta/commit/93b65feb89502b7d7836d7f4c423db37fbd1ef4f)) * Extract muta as crate. ([1b62fe7](https://github.com/nervosnetwork/muta/commit/1b62fe786fbd576b67ea28df3d304d235ae3e94e)) * Metadata service ([#133](https://github.com/nervosnetwork/muta/issues/133)) ([a588b12](https://github.com/nervosnetwork/muta/commit/a588b12de4f3c0de666b66e2a5dea65d71977f5f)) * spawn sync txs in check epoch ([6dca1dd](https://github.com/nervosnetwork/muta/commit/6dca1ddcd9256a3061f132a5abc5d784d466c168)) * support specify module log level via config ([#105](https://github.com/nervosnetwork/muta/issues/105)) ([c06061b](https://github.com/nervosnetwork/muta/commit/c06061b4ccd755177385dfee000783e2b11b0dcd)) * Update juniper, supports async ([#149](https://github.com/nervosnetwork/muta/issues/149)) ([cbabf50](https://github.com/nervosnetwork/muta/commit/cbabf507c25ee8feb8a57de408bc97efc8a4a4ab)) * update overlord with brake engine ([#159](https://github.com/nervosnetwork/muta/issues/159)) ([8cd886a](https://github.com/nervosnetwork/muta/commit/8cd886a79fec934a53d409a27de941f16166c176)), closes [#156](https://github.com/nervosnetwork/muta/issues/156) [#158](https://github.com/nervosnetwork/muta/issues/158) * **api:** Add the exec_height field to the block ([#138](https://github.com/nervosnetwork/muta/issues/138)) ([417153c](https://github.com/nervosnetwork/muta/commit/417153c632793c7ac4e7bc3ffa5b2832dd2dbe66)) * **binding-macro:** service method supports none payload and none response ([#103](https://github.com/nervosnetwork/muta/issues/103)) ([3a5783e](https://github.com/nervosnetwork/muta/commit/3a5783eadd1090cf739d4fdbe94f049115eb65f0)) * **consensus:** develop aggregate crypto with overlord ([#60](https://github.com/nervosnetwork/muta/issues/60)) ([2bc0869](https://github.com/nervosnetwork/muta/commit/2bc0869e928b35c674b4cafdf48540298752b5b5)) * **core/binding:** Implementation of service state. ([#48](https://github.com/nervosnetwork/muta/issues/48)) ([301be6f](https://github.com/nervosnetwork/muta/commit/301be6f39379bd3826b5f605c999ce107f7404e4)) * **core/binding-macro:** Add `read` and `write` proc-macro. ([#49](https://github.com/nervosnetwork/muta/issues/49)) ([687b6e1](https://github.com/nervosnetwork/muta/commit/687b6e1e1a960f679394843c42b861981828d8aa)) * **core/binding-macro:** Add cycles proc-marco. ([#52](https://github.com/nervosnetwork/muta/issues/52)) ([e2289a2](https://github.com/nervosnetwork/muta/commit/e2289a2481510b59c18e37d0fc8bedd9f5d4537e)) * **core/binding-macro:** Support for returning a struct. ([#70](https://github.com/nervosnetwork/muta/issues/70)) ([e13b1ff](https://github.com/nervosnetwork/muta/commit/e13b1ff7834279de9c2df5a0df6967035b7fb8b3)) * **framework:** add ExecutorParams into hook method ([#116](https://github.com/nervosnetwork/muta/issues/116)) ([8036bd6](https://github.com/nervosnetwork/muta/commit/8036bd6f9be1f49eedbc40bbc260ad82952c2e71)) * **framework:** add extra: Option to ServiceContext ([#118](https://github.com/nervosnetwork/muta/issues/118)) ([694c4a3](https://github.com/nervosnetwork/muta/commit/694c4a34f32dc1ba4940db19e304de7a927e1531)) * **framework:** add tx_hash, nonce to ServiceContext ([#111](https://github.com/nervosnetwork/muta/issues/111)) ([352f71f](https://github.com/nervosnetwork/muta/commit/352f71fb3b8b024d533d26c7a344fad801b7a91c)) * **framework/executor:** create service genesis from config ([#104](https://github.com/nervosnetwork/muta/issues/104)) ([8988ccb](https://github.com/nervosnetwork/muta/commit/8988ccb3e5cb2a25bfeabe93c5a63ac1600290a2)) * **graphql:** Modify the API to fit the framework data structure. ([#74](https://github.com/nervosnetwork/muta/issues/74)) ([a1ca2b0](https://github.com/nervosnetwork/muta/commit/a1ca2b0d68e32e335d8d388b70bca83137519f5a)) * **muta:** flush metadata while commit ([#137](https://github.com/nervosnetwork/muta/issues/137)) ([383a481](https://github.com/nervosnetwork/muta/commit/383a481c348efdf73fd690b42b2430fca6d9a0db)) * **muta:** link up metadata service with muta ([#136](https://github.com/nervosnetwork/muta/issues/136)) ([ba65b80](https://github.com/nervosnetwork/muta/commit/ba65b80dffd128f12336b44d4e80ed40cced8e75)) * **protocol/traits:** Add traits of binding. ([#47](https://github.com/nervosnetwork/muta/issues/47)) ([c6b85ee](https://github.com/nervosnetwork/muta/commit/c6b85ee7bee5b14c5da1676ff44d743c031a0fa6)) * **protocol/types:** Add cycles_price for raw_transaction. ([#46](https://github.com/nervosnetwork/muta/issues/46)) ([55f64a4](https://github.com/nervosnetwork/muta/commit/55f64a49634061ca05c75cbf5923f183fc83936d)) * **sync:** Wait for the execution queue. ([#132](https://github.com/nervosnetwork/muta/issues/132)) ([a8d2013](https://github.com/nervosnetwork/muta/commit/a8d2013991cc6b5b579429954c8411c7954b1da4)) * add end to end test ([#42](https://github.com/nervosnetwork/muta/issues/42)) ([e84756d](https://github.com/nervosnetwork/muta/commit/e84756d1734ad58943309c3c2299393f5a2022e4)) * Extract muta as crate. ([#75](https://github.com/nervosnetwork/muta/issues/75)) ([fc576ea](https://github.com/nervosnetwork/muta/commit/fc576eaa67a3b4b4fa459b0ab970251d63b06b4f)), closes [#46](https://github.com/nervosnetwork/muta/issues/46) [#47](https://github.com/nervosnetwork/muta/issues/47) [#48](https://github.com/nervosnetwork/muta/issues/48) [#49](https://github.com/nervosnetwork/muta/issues/49) [#52](https://github.com/nervosnetwork/muta/issues/52) [#51](https://github.com/nervosnetwork/muta/issues/51) [#55](https://github.com/nervosnetwork/muta/issues/55) [#58](https://github.com/nervosnetwork/muta/issues/58) [#56](https://github.com/nervosnetwork/muta/issues/56) [#64](https://github.com/nervosnetwork/muta/issues/64) [#65](https://github.com/nervosnetwork/muta/issues/65) [#70](https://github.com/nervosnetwork/muta/issues/70) [#71](https://github.com/nervosnetwork/muta/issues/71) [#72](https://github.com/nervosnetwork/muta/issues/72) [#73](https://github.com/nervosnetwork/muta/issues/73) [#43](https://github.com/nervosnetwork/muta/issues/43) [#54](https://github.com/nervosnetwork/muta/issues/54) [#53](https://github.com/nervosnetwork/muta/issues/53) [#57](https://github.com/nervosnetwork/muta/issues/57) [#45](https://github.com/nervosnetwork/muta/issues/45) [#62](https://github.com/nervosnetwork/muta/issues/62) [#63](https://github.com/nervosnetwork/muta/issues/63) [#66](https://github.com/nervosnetwork/muta/issues/66) [#61](https://github.com/nervosnetwork/muta/issues/61) [#67](https://github.com/nervosnetwork/muta/issues/67) [#68](https://github.com/nervosnetwork/muta/issues/68) [#60](https://github.com/nervosnetwork/muta/issues/60) [#46](https://github.com/nervosnetwork/muta/issues/46) [#47](https://github.com/nervosnetwork/muta/issues/47) [#48](https://github.com/nervosnetwork/muta/issues/48) [#49](https://github.com/nervosnetwork/muta/issues/49) [#52](https://github.com/nervosnetwork/muta/issues/52) [#51](https://github.com/nervosnetwork/muta/issues/51) [#55](https://github.com/nervosnetwork/muta/issues/55) [#58](https://github.com/nervosnetwork/muta/issues/58) [#56](https://github.com/nervosnetwork/muta/issues/56) [#64](https://github.com/nervosnetwork/muta/issues/64) [#65](https://github.com/nervosnetwork/muta/issues/65) [#70](https://github.com/nervosnetwork/muta/issues/70) [#72](https://github.com/nervosnetwork/muta/issues/72) [#74](https://github.com/nervosnetwork/muta/issues/74) * metrics logger ([#43](https://github.com/nervosnetwork/muta/issues/43)) ([d633309](https://github.com/nervosnetwork/muta/commit/d6333091959da6ab0a12630282f6ea783d509319)) * support consensus tracing ([#53](https://github.com/nervosnetwork/muta/issues/53)) ([03942f0](https://github.com/nervosnetwork/muta/commit/03942f08cfdcc573d7feef3a1111e59f63d077f1)) * **api:** make API more user-friendly ([#38](https://github.com/nervosnetwork/muta/issues/38)) ([ba33467](https://github.com/nervosnetwork/muta/commit/ba33467e52c114576b82850e11662d168ede293a)) * **mempool:** implement cached batch txs broadcast ([#20](https://github.com/nervosnetwork/muta/issues/20)) ([d2af811](https://github.com/nervosnetwork/muta/commit/d2af811bb99becc9600d784ce19e021fec11627d)) * **sync:** synchronization epoch ([#9](https://github.com/nervosnetwork/muta/issues/9)) ([fb4bf0d](https://github.com/nervosnetwork/muta/commit/fb4bf0d7c4bde7c86d1b09f469037ff1219f15fa)), closes [#17](https://github.com/nervosnetwork/muta/issues/17) [#18](https://github.com/nervosnetwork/muta/issues/18) * add compile and run in README ([#11](https://github.com/nervosnetwork/muta/issues/11)) ([1058322](https://github.com/nervosnetwork/muta/commit/10583224053ab91c32dbec815cd0a5af6b0dbeb3)) * add docker ([#31](https://github.com/nervosnetwork/muta/issues/31)) ([8a4386a](https://github.com/nervosnetwork/muta/commit/8a4386ad4c1f66783cada885db9851609b6f5f8d)) * change rlp in executor to fixed-codec ([#29](https://github.com/nervosnetwork/muta/issues/29)) ([7f737cd](https://github.com/nervosnetwork/muta/commit/7f737cdfc9353148b945ad52dd5ab3fd46e2c4db)) * Get balance. ([#28](https://github.com/nervosnetwork/muta/issues/28)) ([8c4a3f9](https://github.com/nervosnetwork/muta/commit/8c4a3f9af8b9e1e8f19cc50b280b66b5d8e270bb)) * **codec:** Add codec tests and benchmarks ([#22](https://github.com/nervosnetwork/muta/issues/22)) ([dcbe522](https://github.com/nervosnetwork/muta/commit/dcbe522be22596059280f6ef845a6d6f4e798551)) * **consensus:** develop consensus interfaces ([#21](https://github.com/nervosnetwork/muta/issues/21)) ([62e3c06](https://github.com/nervosnetwork/muta/commit/62e3c063cd4f82efda43ca5c87c042db5adb9abb)) * **consensus:** develop consensus provider and engine ([#28](https://github.com/nervosnetwork/muta/issues/28)) ([b2ccf9c](https://github.com/nervosnetwork/muta/commit/b2ccf9c84502a6dd476b1737aa9cbb2a283ced32)) * **consensus:** Execute the transactions on commit. ([#7](https://github.com/nervosnetwork/muta/issues/7)) ([b54e7d2](https://github.com/nervosnetwork/muta/commit/b54e7d2bbd5d0ac45ef0d4c728e398b87a1f5450)) * **consensus:** joint overlord and chain ([#32](https://github.com/nervosnetwork/muta/issues/32)) ([72cec41](https://github.com/nervosnetwork/muta/commit/72cec41c86824455ad35cfb1da8a246c50731568)) * **consensus:** mutex lock and timer config ([#45](https://github.com/nervosnetwork/muta/issues/45)) ([cf09687](https://github.com/nervosnetwork/muta/commit/cf09687299b5be39a9c40f13d4b88a496ec7c943)) * **consensus:** Support trsanction executor. ([#6](https://github.com/nervosnetwork/muta/issues/6)) ([e1188f9](https://github.com/nervosnetwork/muta/commit/e1188f9296b3947f833d6bc9a9beff22ebbbf4e7)) * **executor:** Create genesis. ([#1](https://github.com/nervosnetwork/muta/issues/1)) ([a1111d8](https://github.com/nervosnetwork/muta/commit/a1111d8db709c62d119edf3238a22dd656e8035f)) * **graphql:** Support transfer and contract deployment ([#44](https://github.com/nervosnetwork/muta/issues/44)) ([bfcb520](https://github.com/nervosnetwork/muta/commit/bfcb5203fe245e364922d5d8966197a8a8f8d91c)) * **mempool:** fix fixed_codec ([#25](https://github.com/nervosnetwork/muta/issues/25)) ([c1ac607](https://github.com/nervosnetwork/muta/commit/c1ac607ac9b61f4867c17f69c50dad9797dc1c2b)) * **mempool:** Remove cycle_limit ([#23](https://github.com/nervosnetwork/muta/issues/23)) ([8a19ae8](https://github.com/nervosnetwork/muta/commit/8a19ae867fd5b82c4fd56a1f8b59a83e24ca5bc0)) * **native-contract:** Support for asset creation and transfer. ([#37](https://github.com/nervosnetwork/muta/issues/37)) ([1c505fb](https://github.com/nervosnetwork/muta/commit/1c505fbdd57fcb2ef3df3e8b19c65599d77c9bf1)) * **network:** log connected peer ips ([#23](https://github.com/nervosnetwork/muta/issues/23)) ([1691bfa](https://github.com/nervosnetwork/muta/commit/1691bfa47ac561a2f27243e21b1b2fad2fb64be9)) * develop merkle root ([#17](https://github.com/nervosnetwork/muta/issues/17)) ([03cec31](https://github.com/nervosnetwork/muta/commit/03cec318645ee49158f09ec59e356210a80f8bbf)) * Fill in the main function ([#36](https://github.com/nervosnetwork/muta/issues/36)) ([d783f3b](https://github.com/nervosnetwork/muta/commit/d783f3b2d36507a695abd47b303b6c0108e2030b)) * **mempool:** Develop mempool's tests and benches ([#9](https://github.com/nervosnetwork/muta/issues/9)) ([5ddd5f4](https://github.com/nervosnetwork/muta/commit/5ddd5f4d0c1fa9630971ade538dcf954b6aa8f54)) * **mempool:** Implement MemPool interfaces ([#8](https://github.com/nervosnetwork/muta/issues/8)) ([934ce58](https://github.com/nervosnetwork/muta/commit/934ce58b7a7a6b89b65ff931ce5487e553dd927d)) * **native_contract:** Add an adapter that provides access to the world state. ([#27](https://github.com/nervosnetwork/muta/issues/27)) ([3281bea](https://github.com/nervosnetwork/muta/commit/3281beab2d054470b5edf330515df933cc713bb8)) * **protocol:** Add the mempool traits ([#7](https://github.com/nervosnetwork/muta/issues/7)) ([9f6c19b](https://github.com/nervosnetwork/muta/commit/9f6c19bbfbff6c8f82bb732c3503d757833f837e)) * **protocol:** Add the underlying data structure. ([#5](https://github.com/nervosnetwork/muta/issues/5)) ([5dae189](https://github.com/nervosnetwork/muta/commit/5dae189104c986348adddd43fbaa47af01781828)) * **protocol:** Protobuf serialize ([#6](https://github.com/nervosnetwork/muta/issues/6)) ([ff00595](https://github.com/nervosnetwork/muta/commit/ff00595d100e44148b1cc243437798db8233ca2b)) * **storage:** add storage test ([#18](https://github.com/nervosnetwork/muta/issues/18)) ([f78df5b](https://github.com/nervosnetwork/muta/commit/f78df5b0357eade7855152eee9c79070866477ac)) * **storage:** Implement memory adapter API ([#11](https://github.com/nervosnetwork/muta/issues/11)) ([b0a8090](https://github.com/nervosnetwork/muta/commit/b0a80901229f85e8cf89bd806dcb32c95ae059b8)) * **storage:** Implement storage ([#17](https://github.com/nervosnetwork/muta/issues/17)) ([7728b5b](https://github.com/nervosnetwork/muta/commit/7728b5b0307bd58b11671f123f37e3e365b14b97)) * **types:** Add account structure. ([#24](https://github.com/nervosnetwork/muta/issues/24)) ([f6b93f0](https://github.com/nervosnetwork/muta/commit/f6b93f0f08b03a20761aef47f08343eb5d8e6a85)) ### Performance Improvements * **storage:** cache latest epoch ([#128](https://github.com/nervosnetwork/muta/issues/128)) ([da4d7a9](https://github.com/nervosnetwork/muta/commit/da4d7a92363596b7339518e24c64ab49648749dd)) ### Reverts * Revert "[ᚬdebug-muta] feat(service): Upgrade asset (#181)" (#182) ([dad3f99](https://github.com/nervosnetwork/muta/commit/dad3f99f7c694eea57b546c6b2169950c5692ea1)), closes [#181](https://github.com/nervosnetwork/muta/issues/181) [#182](https://github.com/nervosnetwork/muta/issues/182) * Revert "feat: Extract muta as crate. (#75)" (#77) ([3baacc5](https://github.com/nervosnetwork/muta/commit/3baacc5c781615377e9a6ba50cfc7b17dcb0ec6e)), closes [#75](https://github.com/nervosnetwork/muta/issues/75) [#77](https://github.com/nervosnetwork/muta/issues/77) # [0.1.0](https://github.com/nervosnetwork/muta/compare/733ee8e6be7649c9aa2d772bb1dc661bd0879917...v0.1.0) (2019-09-22) ### Bug Fixes * **ci:** build on push and pull request ([d28aa55](https://github.com/nervosnetwork/muta/commit/d28aa55f5df240277e2b75e87aa948cdcf11ea7f)) * **ci:** temporarily amend code to pass lint ([9441236](https://github.com/nervosnetwork/muta/commit/9441236a5107e0042753915ed943b487cd02d6a5)) * **consensus:** Clear cache of last proposal. ([#199](https://github.com/nervosnetwork/muta/issues/199)) ([f548653](https://github.com/nervosnetwork/muta/commit/f5486531f43fa720171941ad4be5ec7646a269c2)) * **consensus:** fix lock free too early problem and add state root check ([#277](https://github.com/nervosnetwork/muta/issues/277)) ([7238c5b](https://github.com/nervosnetwork/muta/commit/7238c5bc057bd6c6f31773fa4bd3e06aaea72255)) * **consensus:** Makes sure that proposer is this node. ([#281](https://github.com/nervosnetwork/muta/issues/281)) ([d7f4e50](https://github.com/nervosnetwork/muta/commit/d7f4e5081f00a04aee934d0ce700cd107f4f345f)) * **core-network:** CallbackItemNotFound ([#243](https://github.com/nervosnetwork/muta/issues/243)) ([47365fa](https://github.com/nervosnetwork/muta/commit/47365faf5fa7171dde8951661fa095a6c43bcb1f)) * **core-network:** false bootstrapped connections ([#275](https://github.com/nervosnetwork/muta/issues/275)) ([26e76f0](https://github.com/nervosnetwork/muta/commit/26e76f0a2879aed3da745529f64ba3828a1cc30e)) * **core-types:** compilation failure ([#269](https://github.com/nervosnetwork/muta/issues/269)) ([56d8649](https://github.com/nervosnetwork/muta/commit/56d86491f69ab16fd2c76b66b28ad76df78c6ca7)) * **core/crypto:** pubkey_to_address() consistent with cita ([acb5e63](https://github.com/nervosnetwork/muta/commit/acb5e63ea577429bc94c16a3430035ea139aaf15)) * **executor:** Save the full node data. ([b57a1c5](https://github.com/nervosnetwork/muta/commit/b57a1c5fa775479b85d1531f7d2dced817de4729)) * **jsonrpc:** give default value for newFilter ([#289](https://github.com/nervosnetwork/muta/issues/289)) ([17069b4](https://github.com/nervosnetwork/muta/commit/17069b49067dd7335f243d248e3c8d633e455a73)) * **jsonrpc:** logic error in getTransactionCount ([#290](https://github.com/nervosnetwork/muta/issues/290)) ([464bfdf](https://github.com/nervosnetwork/muta/commit/464bfdf08a9954206bb595b3861c52208fc9630d)) * **jsonrpc:** make the response compatible with jsonrpc 2.0 spec ([1db5190](https://github.com/nervosnetwork/muta/commit/1db5190bc91d431bacce6bb44a1185b19520c1a2)) * **jsonrpc:** prefix with 0x by API getTransactionProof ([#295](https://github.com/nervosnetwork/muta/issues/295)) ([b1c0160](https://github.com/nervosnetwork/muta/commit/b1c0160b65fc91e8a2bcfd908943fb238d1101c1)) * **jsonrpc:** raise error when key not found in state ([#294](https://github.com/nervosnetwork/muta/issues/294)) ([7a7c294](https://github.com/nervosnetwork/muta/commit/7a7c294df5ae75f50ec0fe3620634c7280f837e7)) * **jsonrpc:** returns the correct block hash ([#280](https://github.com/nervosnetwork/muta/issues/280)) ([f6a58d0](https://github.com/nervosnetwork/muta/commit/f6a58d0cfc743d1fa84fe5de99798157ba5f25a6)) * Call header.hash ([#94](https://github.com/nervosnetwork/muta/issues/94)) ([636aa54](https://github.com/nervosnetwork/muta/commit/636aa549c21a04611b6f4575dfc7e78fa47d768e)) * change the blocking thread from rayon to std::thread ([5b80476](https://github.com/nervosnetwork/muta/commit/5b804765d0a76055e6e730560a6d7ecd576703be)) * return err if tx not found in get_batch to avoid forking ([#279](https://github.com/nervosnetwork/muta/issues/279)) ([6aed2fe](https://github.com/nervosnetwork/muta/commit/6aed2fe5ffcd0eb6a699cff00d92e9dd3ab7d7b3)) * **sync:** proof and proposal_hash hash not match. ([#239](https://github.com/nervosnetwork/muta/issues/239)) ([51f332e](https://github.com/nervosnetwork/muta/commit/51f332ee8c4a10b88844a272bc51a116b4d25dd2)) * tokio::spawn panic. ([#238](https://github.com/nervosnetwork/muta/issues/238)) ([12d8d01](https://github.com/nervosnetwork/muta/commit/12d8d01ed42f9cc5d9cc341edfd76a6076aa37e1)) * **common/logger:** cargo fmt ([e3a7f5a](https://github.com/nervosnetwork/muta/commit/e3a7f5a2217956b86191881caeb3ca6cea7ec2fc)) * **compoents/transaction-pool:** Use the latest crypto API. ([#86](https://github.com/nervosnetwork/muta/issues/86)) ([f6c94d3](https://github.com/nervosnetwork/muta/commit/f6c94d307d6e89afba75ed8b83b99088fc7ca9de)) * **components/transaction-pool:** Check if the transaction is repeated in histories block. ([dba25fe](https://github.com/nervosnetwork/muta/commit/dba25fe09d8e82f0e396415055ce08efbf1fe159)) * **core-p2p:** transmission example: a clippy warning ([6d2f42a](https://github.com/nervosnetwork/muta/commit/6d2f42ae97194333a823581406fc75d2c47536b2)) * **core-p2p:** transmission example: remove unreachable match branch ([0082bd6](https://github.com/nervosnetwork/muta/commit/0082bd6a3fb956f9ee17a9eba6ada77fc91f3dfe)) * **core-p2p:** transmission: future task starvation ([ba14db0](https://github.com/nervosnetwork/muta/commit/ba14db035413220ed7eba5e5543b8a6496267641)) * **devchain:** correct addresses matched with privkey ([#114](https://github.com/nervosnetwork/muta/issues/114)) ([f56744e](https://github.com/nervosnetwork/muta/commit/f56744e7809b39da79434a3fbcf3deb127fded27)) * **network:** RepeatedConnection and ConnectSelf errors ([#196](https://github.com/nervosnetwork/muta/issues/196)) ([2e5e888](https://github.com/nervosnetwork/muta/commit/2e5e888cdb0869e7622639919b12e62eca06f137)) * **p2p:** Make sure the "poll" is triggered. ([#182](https://github.com/nervosnetwork/muta/issues/182)) ([88daed1](https://github.com/nervosnetwork/muta/commit/88daed1e3e175c21e7923ddd5f1b4eb4ef4d6286)) * **p2p-identify:** empty local listen addresses ([#198](https://github.com/nervosnetwork/muta/issues/198)) ([c40ad8a](https://github.com/nervosnetwork/muta/commit/c40ad8a8dedd999efd17a88b9c30b198d4a0035a)) * **synchronizer:** add a pull_txs_sync method to sync txs from block ([#207](https://github.com/nervosnetwork/muta/issues/207)) ([317fca8](https://github.com/nervosnetwork/muta/commit/317fca8b8d2f270e5d140a94bb1a9227c4b7271b)) * **transaction-pool:** duplicate insertion transactions from network ([#191](https://github.com/nervosnetwork/muta/issues/191)) ([2c095bb](https://github.com/nervosnetwork/muta/commit/2c095bbe5649454abf2663df7355c0a56f54a71f)) * **tx-pool:** "get_count" returns the repeat transaction. ([f5612d0](https://github.com/nervosnetwork/muta/commit/f5612d09d02e9183b702f0233aecc14c31779945)) * **tx-pool:** `ensure` method always pull all txs from remote peer ([#194](https://github.com/nervosnetwork/muta/issues/194)) ([9ff300e](https://github.com/nervosnetwork/muta/commit/9ff300e191aa39b6301e481f8f287287b645ba39)) * **tx-pool:** Ensure the number of transactions meets expectations ([dcbf0dd](https://github.com/nervosnetwork/muta/commit/dcbf0dd8cf548ddfe3afb3226d7596637ae615dd)) * **tx-pool:** replace chashmap ([#211](https://github.com/nervosnetwork/muta/issues/211)) ([717f55e](https://github.com/nervosnetwork/muta/commit/717f55e4772c5818ab17e2b1c320b0b98f174122)) * Aviod drop ([4d0f986](https://github.com/nervosnetwork/muta/commit/4d0f986741c392489893f036989db7218db54743)) * build failure ([18ce8e4](https://github.com/nervosnetwork/muta/commit/18ce8e4642d8d27892fee53b9695e4ced7921055)) * jsonrpc call return value ([#104](https://github.com/nervosnetwork/muta/issues/104)) ([1fe41eb](https://github.com/nervosnetwork/muta/commit/1fe41eb491a16588019218144985eec143613c65)) * logic error of bloom filter ([#176](https://github.com/nervosnetwork/muta/issues/176)) ([70269cb](https://github.com/nervosnetwork/muta/commit/70269cb5cefd82f1a14eb5e85df419c1587d19c8)) * merkle typo ([4f63585](https://github.com/nervosnetwork/muta/commit/4f6358565ee8d486be18ac8ff6069b95b597ea4d)) * rlp encode ([b852ac1](https://github.com/nervosnetwork/muta/commit/b852ac147db818cf289b972f054028d293218a19)) * rlp hash ([837055a](https://github.com/nervosnetwork/muta/commit/837055a4eb78ba941004dbc0466955895de8bcab)) * Set quota limit for the genesis. ([#106](https://github.com/nervosnetwork/muta/issues/106)) ([931fe40](https://github.com/nervosnetwork/muta/commit/931fe404453a6f936cbd27bf37d0e326a03e4484)) * write lock ([de80439](https://github.com/nervosnetwork/muta/commit/de80439cb4e7889c1220fc7821604f9ef792422e)) ### Features * add business model support for executor ([#308](https://github.com/nervosnetwork/muta/issues/308)) ([e03396b](https://github.com/nervosnetwork/muta/commit/e03396bb6b964a0c93f43c2684a0e76a55db5540)) * add Deserialize for Hash and Address ([#259](https://github.com/nervosnetwork/muta/issues/259)) ([fef188c](https://github.com/nervosnetwork/muta/commit/fef188c5950fb7f64a92312894efdb4955201a93)) * add docker config for dev ([#197](https://github.com/nervosnetwork/muta/issues/197)) ([6e74aec](https://github.com/nervosnetwork/muta/commit/6e74aec0b51c2bf80c1d1b893130ea74f4a1a8f0)) * add fabric devops scripts ([fcdc25c](https://github.com/nervosnetwork/muta/commit/fcdc25c05b5c30ba38bf6af57885c2f45233d3fc)) * add height to the end of proposal msg ([#255](https://github.com/nervosnetwork/muta/issues/255)) ([c5cbc5e](https://github.com/nervosnetwork/muta/commit/c5cbc5ec70f1dc0fb46ef0bb87c3b994596b4571)) * add more info to version ([#298](https://github.com/nervosnetwork/muta/issues/298)) ([fd02a17](https://github.com/nervosnetwork/muta/commit/fd02a17a68bb6ef59bbd4cded13d69da221237ee)) * peerCount RPC API ([#257](https://github.com/nervosnetwork/muta/issues/257)) ([736ae8c](https://github.com/nervosnetwork/muta/commit/736ae8c7f537a56b01d648cf066f220e47108820)) * **components/cita-jsonrpc:** impl executor related apis ([#80](https://github.com/nervosnetwork/muta/issues/80)) ([bc8f340](https://github.com/nervosnetwork/muta/commit/bc8f34015617e1a01fb2fbb30d9709cdd806daea)) * **components/cita-jsonrpc:** impl get_code and finish some todo ([#87](https://github.com/nervosnetwork/muta/issues/87)) ([e1b0b9d](https://github.com/nervosnetwork/muta/commit/e1b0b9dc8c39965366c5b572905e63cacecdc958)) * **components/databse:** Implement RocksDB ([#72](https://github.com/nervosnetwork/muta/issues/72)) ([3516fbc](https://github.com/nervosnetwork/muta/commit/3516fbc41338a2f423e0ba56eb96c7fa697a6c77)) * **components/executor:** Add trie db for executor. ([#85](https://github.com/nervosnetwork/muta/issues/85)) ([fd7dc1d](https://github.com/nervosnetwork/muta/commit/fd7dc1da97a4b7dafb1ecbc2813c9506423689a5)) * **components/executor:** Implement EVM executor. ([#68](https://github.com/nervosnetwork/muta/issues/68)) ([021893d](https://github.com/nervosnetwork/muta/commit/021893db432f1ddadc89da9c9251bdb6fb79d925)) * **components/jsonrpc:** implement getStateProof ([#178](https://github.com/nervosnetwork/muta/issues/178)) ([69499fb](https://github.com/nervosnetwork/muta/commit/69499fbb98cbe7f23d426c15ebe67de552dd5d2b)) * **components/jsonrpc:** implement getTransactionProof ([0db8785](https://github.com/nervosnetwork/muta/commit/0db8785475e9d9c098fa123b9c23b4f0eab286dc)) * **components/jsonrpc:** running on microscope ([#200](https://github.com/nervosnetwork/muta/issues/200)) ([1c63a0e](https://github.com/nervosnetwork/muta/commit/1c63a0e3db751b7b7be6f053bed2b66245b105cd)) * **components/jsonrpc:** Try to convert tx to cita::tx ([#221](https://github.com/nervosnetwork/muta/issues/221)) ([b8ab16b](https://github.com/nervosnetwork/muta/commit/b8ab16b05ad01a0c6ef5a7b8d7ad76961e7749ff)) * **core-network:** expost send_buffer_size and recv_buffer_size ([#248](https://github.com/nervosnetwork/muta/issues/248)) ([e5120ad](https://github.com/nervosnetwork/muta/commit/e5120ad646c9d206b43b0d50911303507bdfe381)) * **core-network:** implement peer count feature ([#256](https://github.com/nervosnetwork/muta/issues/256)) ([8f7e7eb](https://github.com/nervosnetwork/muta/commit/8f7e7eb51cdeebfb9c679d88626ac2ec3fa651a4)) * add performance test lua script ([#244](https://github.com/nervosnetwork/muta/issues/244)) ([c727b73](https://github.com/nervosnetwork/muta/commit/c727b733340029f72d9280a57e07522f635eff44)) * **core-network:** implement concurrent reactor and real chained reactor ([#175](https://github.com/nervosnetwork/muta/issues/175)) ([dc9f897](https://github.com/nervosnetwork/muta/commit/dc9f897f08801d7b8a418750ed516a8acac057ca)) * **core-p2p:** implement datagram transport protocol ([fee2d45](https://github.com/nervosnetwork/muta/commit/fee2d4546552bd6c46376309eb399126219c55fb)) * **core-p2p:** transmission: use `poll` func to do broadcast ([b376cbe](https://github.com/nervosnetwork/muta/commit/b376cbef9211e55f809f16bb9bab1360dd4b3523)) * **core/consensus:** Implement solo mode for consensus ([e071b15](https://github.com/nervosnetwork/muta/commit/e071b1533b1107f65eb0f97563f011f644d73be6)) * **core/crypto:** Add secp256k1 ([8349eaa](https://github.com/nervosnetwork/muta/commit/8349eaa2817ee8c27e9e8367c89f3469e52b6f8a)) * **core/crypto:** Modify the return type to result. ([9f2424c](https://github.com/nervosnetwork/muta/commit/9f2424ca11fa300f7269f7a32195ec8bbde096e0)) * **core/network:** Support broadcast message ([#185](https://github.com/nervosnetwork/muta/issues/185)) ([992c55f](https://github.com/nervosnetwork/muta/commit/992c55f87458a38629944fb78ee69982d8329b2b)) * **core/types:** Add hash function for the header and receipts ([c982a52](https://github.com/nervosnetwork/muta/commit/c982a52ce29da7f0e783b2a7a52f1d541c15ea10)) * **executor:** Add flush for trie db. ([#240](https://github.com/nervosnetwork/muta/issues/240)) ([23fd538](https://github.com/nervosnetwork/muta/commit/23fd53849ac626cdeaabb165c0534bb90651aa90)) * **jsonrpc:** Implement filter APIs ([#190](https://github.com/nervosnetwork/muta/issues/190)) ([c97ed22](https://github.com/nervosnetwork/muta/commit/c97ed2273b6ddb2385d6d0285f2d5b4d267b130b)) * **tx-pool:** Batch broadcast transactions. ([#234](https://github.com/nervosnetwork/muta/issues/234)) ([d297b1a](https://github.com/nervosnetwork/muta/commit/d297b1a4d655fdfac25f7f5630253f7e8f6f70ea)) * add synchronizer ([#167](https://github.com/nervosnetwork/muta/issues/167)) ([38db7aa](https://github.com/nervosnetwork/muta/commit/38db7aa3f83e4a35417440e4787c5249b9eace63)) * Implement many JSONRPC APIs ([#166](https://github.com/nervosnetwork/muta/issues/166)) ([807b6a7](https://github.com/nervosnetwork/muta/commit/807b6a73cb098087179d9b086fa0070b6ced74d0)) * Implement RPC getTransactionCount ([#169](https://github.com/nervosnetwork/muta/issues/169)) ([dbf0c51](https://github.com/nervosnetwork/muta/commit/dbf0c51a17f3e285e1146eee3b5e9def08d16d50)) * rewrite network component ([#230](https://github.com/nervosnetwork/muta/issues/230)) ([585dabb](https://github.com/nervosnetwork/muta/commit/585dabb2d52dd70de7ebc26eee59345596301c1a)) * **components/jsonrpc:** Implements sendRawTransaction ([#159](https://github.com/nervosnetwork/muta/issues/159)) ([112d345](https://github.com/nervosnetwork/muta/commit/112d34582c00bea3c05d1663cf07d79aefbfa6a9)) * **core-context:** add `CommonValue` trait and `p2p_session_id` method ([#165](https://github.com/nervosnetwork/muta/issues/165)) ([216b743](https://github.com/nervosnetwork/muta/commit/216b74381c00b15ba61444cf462528ee170fcc41)) * **core/consensus:** Implements BFT ([#158](https://github.com/nervosnetwork/muta/issues/158)) ([e7a3bfd](https://github.com/nervosnetwork/muta/commit/e7a3bfd2f667c9bb8d6b9deb29a57c837ae296b9)) * **core/notify:** add notify as message-bus between components ([b53c50d](https://github.com/nervosnetwork/muta/commit/b53c50dc04090b6b0d5b6725b5c32697446aa5f8)) * **core/serialization:** Add proto file ([0bf7c59](https://github.com/nervosnetwork/muta/commit/0bf7c59200ad4a4cc7994efecaec5d8c683f175a)) * **core/storage:** Add the storage trait ([ffc8776](https://github.com/nervosnetwork/muta/commit/ffc8776b02bc0a4cf785c7c5c47a88266f186b49)) * **core/types:** Add the transactions hash calculation function. ([67d8170](https://github.com/nervosnetwork/muta/commit/67d817072c4c03b2fc2eaae5d1dc99d2d41240e0)) * **core/types:** Define serialization and deserialization methods ([f28c63d](https://github.com/nervosnetwork/muta/commit/f28c63d2b4c7b77dbe24e2b50e70cf649a6c714c)) * **database:** Add memory db ([d21a5a2](https://github.com/nervosnetwork/muta/commit/d21a5a29bd20e02f3ddd29f77c3df2963f8f3b4b)) * **jsonrpc:** support batch ([0a0c680](https://github.com/nervosnetwork/muta/commit/0a0c680993ff9be231f1ae8e583171e1f304f79b)) * **main:** add init command for genesis ([#96](https://github.com/nervosnetwork/muta/issues/96)) ([ec752b0](https://github.com/nervosnetwork/muta/commit/ec752b0602800055990fbfcc54bd2c2ab0b2cb60)) * **p2p:** Update to tentacle0.2.0-alpha.5 ([#177](https://github.com/nervosnetwork/muta/issues/177)) ([f6f83b6](https://github.com/nervosnetwork/muta/commit/f6f83b6b263579d66160cfab29b83bd5a709eeb4)) * **pubsub:** Implement pubsub components ([#143](https://github.com/nervosnetwork/muta/issues/143)) ([a079770](https://github.com/nervosnetwork/muta/commit/a079770b0e66e22552bd8cf504a9e1ba0c520d0e)) * **runtime:** add `Context` struct ([#155](https://github.com/nervosnetwork/muta/issues/155)) ([27e5aa7](https://github.com/nervosnetwork/muta/commit/27e5aa7f01f3559d2a9dd17346595c9161a9c0f6)) * Add project framework ([#24](https://github.com/nervosnetwork/muta/issues/24)) ([733ee8e](https://github.com/nervosnetwork/muta/commit/733ee8e6be7649c9aa2d772bb1dc661bd0879917)) * Add transaction pool component. ([360c935](https://github.com/nervosnetwork/muta/commit/360c93540ea77dc51551a3739e17682600d2b1b7)) * Fill main.rs ([#102](https://github.com/nervosnetwork/muta/issues/102)) ([b5b4c72](https://github.com/nervosnetwork/muta/commit/b5b4c7233efcd1c35e92248b7726ca20644800e9)) * impl cita-jsonrpc ([49e2a2d](https://github.com/nervosnetwork/muta/commit/49e2a2d22d094b2b6a2f71bc5201ccfe28308797)) * update db interface and storage interface ([#137](https://github.com/nervosnetwork/muta/issues/137)) ([36b3d07](https://github.com/nervosnetwork/muta/commit/36b3d07f23e2c7ada870cb699bf138cdd66c2860)) ### Reverts * Revert "chore: Update bft-rs (#203)" (#204) ([cc15ba9](https://github.com/nervosnetwork/muta/commit/cc15ba9ed302ab1389838a4a6c745675106179e9)), closes [#203](https://github.com/nervosnetwork/muta/issues/203) [#204](https://github.com/nervosnetwork/muta/issues/204) # [](https://github.com/nervosnetwork/muta/compare/v0.2.0-alpha.1...v) (2020-08-03) ### Bug Fixes * **consensus:** return an error when committing an outdated block ([#371](https://github.com/nervosnetwork/muta/issues/371)) ([b3d518b](https://github.com/nervosnetwork/muta/commit/b3d518b52658b40746ef708fa8cde5c96a39a539)) * **mempool:** Ensure that there are no duplicate transactions in the order transaction ([#379](https://github.com/nervosnetwork/muta/issues/379)) ([97708ac](https://github.com/nervosnetwork/muta/commit/97708ac385be2243344d700a0d7c928f18fd51b3)) * **storage:** test batch receipts get panic ([#373](https://github.com/nervosnetwork/muta/issues/373)) ([300a3c6](https://github.com/nervosnetwork/muta/commit/300a3c65cf0399c2ba37a3bd655e06719b660330)) ### Features * **network:** tag consensus peer ([#364](https://github.com/nervosnetwork/muta/issues/364)) ([9b27df1](https://github.com/nervosnetwork/muta/commit/9b27df1015a25792cc210c5aa0dd473a45ae885d)), closes [#354](https://github.com/nervosnetwork/muta/issues/354) [#2](https://github.com/nervosnetwork/muta/issues/2) [#3](https://github.com/nervosnetwork/muta/issues/3) [#4](https://github.com/nervosnetwork/muta/issues/4) [#5](https://github.com/nervosnetwork/muta/issues/5) [#6](https://github.com/nervosnetwork/muta/issues/6) [#7](https://github.com/nervosnetwork/muta/issues/7) * Add global panic hook ([#376](https://github.com/nervosnetwork/muta/issues/376)) ([7382279](https://github.com/nervosnetwork/muta/commit/738227962771a6a66b85f2fd199df2e699b43adc)) ### Performance Improvements * **executor:** use inner call instead of service dispatcher ([#365](https://github.com/nervosnetwork/muta/issues/365)) ([7b1d2a3](https://github.com/nervosnetwork/muta/commit/7b1d2a32d5c20306af3868e5265bd2530dd9493b)) ### BREAKING CHANGES * **network:** - replace Validator address bytes with pubkey bytes * change(consensus): log validator address instead of its public key Block proposer is address instead public key * fix: compilation failed * **network:** - change users_cast to multicast, take peer_ids bytes instead of Address - network bootstrap configuration now takes peer id instead of pubkey hex * refactor(network): PeerId api # [0.2.0-alpha.1](https://github.com/nervosnetwork/muta/compare/v0.1.2-beta...v0.2.0-alpha.1) (2020-07-22) ### Bug Fixes * **executor:** The logic to deal with tx_hook and tx_body ([#367](https://github.com/nervosnetwork/muta/issues/367)) ([749d558](https://github.com/nervosnetwork/muta/commit/749d558b8b58a1943bfa2842dcedcc45218c0f78)) * **executor:** tx events aren't cleared on execution error ([#313](https://github.com/nervosnetwork/muta/issues/313)) ([1605cf5](https://github.com/nervosnetwork/muta/commit/1605cf59b558b97889bb431da7f81fd424b90a89)) * **proof:** Verify aggregated signature in checking proof ([#308](https://github.com/nervosnetwork/muta/issues/308)) ([d2a98b0](https://github.com/nervosnetwork/muta/commit/d2a98b06e44449ca756f135c1b235ff0d80eaf67)) * **trust_metric_test:** unreliable full node exit check ([#327](https://github.com/nervosnetwork/muta/issues/327)) ([a4ab4a6](https://github.com/nervosnetwork/muta/commit/a4ab4a6209e0978148983e88447ac2d9178fa42a)) * **WAL:** Ignore path already exist ([#304](https://github.com/nervosnetwork/muta/issues/304)) ([02df937](https://github.com/nervosnetwork/muta/commit/02df937fb6449c9b3b0b50e790e0ecf6bfc1ee3d)) ### Performance Improvements * **mempool:** parallel verifying signatures in mempool ([#359](https://github.com/nervosnetwork/muta/issues/359)) ([2ccdf1a](https://github.com/nervosnetwork/muta/commit/2ccdf1a67a40cd483749a98a1a68c37bcf1d473c)) ### Reverts * Revert "refactor(consensus)!: replace Validator address bytes with pubkey bytes (#354)" (#361) ([4dabfa2](https://github.com/nervosnetwork/muta/commit/4dabfa231961d1ec8be1ba42bf05781f55395aed)), closes [#354](https://github.com/nervosnetwork/muta/issues/354) [#361](https://github.com/nervosnetwork/muta/issues/361) * refactor(consensus)!: replace Validator address bytes with pubkey bytes (#354) ([e4433d7](https://github.com/nervosnetwork/muta/commit/e4433d793e8a63788ec682880afc93474e0d2414)), closes [#354](https://github.com/nervosnetwork/muta/issues/354) ### Features * **executor:** allow cancel execution units through context ([#317](https://github.com/nervosnetwork/muta/issues/317)) ([eafb489](https://github.com/nervosnetwork/muta/commit/eafb489f78f7521487c6b2d25dd9912e43f76500)) * **executor:** indenpendent tx hook states commit ([#316](https://github.com/nervosnetwork/muta/issues/316)) ([fde6450](https://github.com/nervosnetwork/muta/commit/fde645010363a4664033370e4109e4d1f08b13bc)) * **protocol:** Remove the logs bloom from block header ([#312](https://github.com/nervosnetwork/muta/issues/312)) ([ff1e0df](https://github.com/nervosnetwork/muta/commit/ff1e0df1e8a65cc480825a49eed9495cc31ecee0)) ================================================ FILE: CHANGELOG/CHANGELOG-0.2.md ================================================ # [](https://github.com/nervosnetwork/muta/compare/v0.2.0-rc.2.1...v) (2020-09-15) ### Bug Fixes * **cli:** expose version, author and app_name to be customized ([#456](https://github.com/nervosnetwork/muta/issues/456)) ([93c551e](https://github.com/nervosnetwork/muta/commit/93c551e09ae0d79e5d1e3a03f3882c3ddc883da0)) * **logger:** add structured api ([#450](https://github.com/nervosnetwork/muta/issues/450)) ([4ef3d93](https://github.com/nervosnetwork/muta/commit/4ef3d93f2ff466d69dd22805c91812a8b74605b6)) * **metric:** network broadcast all data size ([#452](https://github.com/nervosnetwork/muta/issues/452)) ([5a8999a](https://github.com/nervosnetwork/muta/commit/5a8999ade29ad54e72caf85115c424361caaf379)) * **network:** wrong connected consensus peer count ([#451](https://github.com/nervosnetwork/muta/issues/451)) ([43357fa](https://github.com/nervosnetwork/muta/commit/43357fa29339d4540b5d86ed51f42277fe657a7d)) * **state:** If value is an empty byte it needs to return none ([#448](https://github.com/nervosnetwork/muta/issues/448)) ([5e1e4b6](https://github.com/nervosnetwork/muta/commit/5e1e4b631d692b2673d5fb039925cafafb8fcd06)) ### Features * **logger:** add a json macro to generate json object ([#455](https://github.com/nervosnetwork/muta/issues/455)) ([ffb1b45](https://github.com/nervosnetwork/muta/commit/ffb1b45159bad2d444f81b44ab57fae0dca16550)) * cli for maintance ([#436](https://github.com/nervosnetwork/muta/issues/436)) ([aebd85f](https://github.com/nervosnetwork/muta/commit/aebd85fd99424ddb50afcf434045bd0b78bcd53e)) * **api:** dump profile data through http request ([#446](https://github.com/nervosnetwork/muta/issues/446)) ([31d66ab](https://github.com/nervosnetwork/muta/commit/31d66ab5928f046af46630609c82e91eb916afc5)) * **metric:** add accumulated network message size count ([#449](https://github.com/nervosnetwork/muta/issues/449)) ([eda8f75](https://github.com/nervosnetwork/muta/commit/eda8f756a5de72601d6dc2bc1ac0abdae065467c)) # [0.2.0-rc.2.1](https://github.com/nervosnetwork/muta/compare/v0.2.0-rc...v0.2.0-rc.2.1) (2020-09-04) ### Bug Fixes * update example configs, fix send transaction in byzantine ([#442](https://github.com/nervosnetwork/muta/issues/442)) ([d6a1a85](https://github.com/nervosnetwork/muta/commit/d6a1a8513e9fdf9166839f5c6aaccd0b5dc9cee3)) * **consensus:** recover and insert tx to mempool to avoid inactivation ([#414](https://github.com/nervosnetwork/muta/issues/414)) ([fd9716e](https://github.com/nervosnetwork/muta/commit/fd9716e078289453b70dd0e378a4a94a6531d9b7)) * **network:** identify protocol: possible dead lock in identification ([#439](https://github.com/nervosnetwork/muta/issues/439)) ([b676c4c](https://github.com/nervosnetwork/muta/commit/b676c4ca3deb98d76cb5c2f6d771e69174cef632)) * fix framework to deal with state while tx runs fail ([#440](https://github.com/nervosnetwork/muta/issues/440)) ([d186505](https://github.com/nervosnetwork/muta/commit/d186505da89afe62840d406052125244bee357c7)) * **network:** cannot process message after reactor exit ([#412](https://github.com/nervosnetwork/muta/issues/412)) ([36af704](https://github.com/nervosnetwork/muta/commit/36af7047544628dd098d6cb34cbe2b5d3c0b1770)) * **network:** double decrease connecting gauge ([#424](https://github.com/nervosnetwork/muta/issues/424)) ([0a1cfcf](https://github.com/nervosnetwork/muta/commit/0a1cfcfa7ddedcc236243f9dc3e317610742ca5c)) * **network:** give up a peer without log a reason ([#423](https://github.com/nervosnetwork/muta/issues/423)) ([7151cd4](https://github.com/nervosnetwork/muta/commit/7151cd435e6bec2a961eac67cb779708c0ab0fd0)) * **network:** give up peer because of handshake timeout ([#418](https://github.com/nervosnetwork/muta/issues/418)) ([2627c00](https://github.com/nervosnetwork/muta/commit/2627c005485466373d632a60fb41d897db63fedc)) * **network:** give up peer due to secio io error ([#425](https://github.com/nervosnetwork/muta/issues/425)) ([27a8e8b](https://github.com/nervosnetwork/muta/commit/27a8e8ba5ce644f316d1cedb48230cab398a31da)) * **network:** negative connecting metric number ([#430](https://github.com/nervosnetwork/muta/issues/430)) ([dae62ae](https://github.com/nervosnetwork/muta/commit/dae62aeb760c3acb18b14be7f03da032dd495e9b)) * update to latest overlord ([#421](https://github.com/nervosnetwork/muta/issues/421)) ([c8f018c](https://github.com/nervosnetwork/muta/commit/c8f018c89eb9b7bf64c5525768c66f8d5f5038da)) ### Features * **logger:** add structured log api ([#434](https://github.com/nervosnetwork/muta/issues/434)) ([2e4de12](https://github.com/nervosnetwork/muta/commit/2e4de12f1d386af90f2fbb19d57d3832cd5d2e2a)) * **logger:** split log file by size ([#435](https://github.com/nervosnetwork/muta/issues/435)) ([5c4f075](https://github.com/nervosnetwork/muta/commit/5c4f075da31231a92100e8ba85438bde4e5c65b6)) * add byzantine test script ([#433](https://github.com/nervosnetwork/muta/issues/433)) ([b7ceda0](https://github.com/nervosnetwork/muta/commit/b7ceda00a65ebe87b500e5b0c489e5325e22747a)) * log the overlord view change reason ([#432](https://github.com/nervosnetwork/muta/issues/432)) ([8b25191](https://github.com/nervosnetwork/muta/commit/8b251917f28bc0762fa91e15127f659fe8f4685b)) * **apm:** add executing block num to apm ([#429](https://github.com/nervosnetwork/muta/issues/429)) ([b27ac99](https://github.com/nervosnetwork/muta/commit/b27ac99486f376075fb393fa0f80db6ecfb7b955)) * **network:** add more metrics ([#416](https://github.com/nervosnetwork/muta/issues/416)) ([d03ddde](https://github.com/nervosnetwork/muta/commit/d03ddde2763b43e77cced2ff8552910c5fcff1eb)) * **network:** add tentacle_metrics feature ([#417](https://github.com/nervosnetwork/muta/issues/417)) ([5181562](https://github.com/nervosnetwork/muta/commit/5181562c947a34d3c344e766171b60ba161dff29)) # [0.2.0-rc](https://github.com/nervosnetwork/muta/compare/v0.2.0-beta.4...v0.2.0-rc) (2020-08-12) ### Features * **network:** split transmitter data ([#380](https://github.com/nervosnetwork/muta/issues/380)) ([0322cd6](https://github.com/nervosnetwork/muta/commit/0322cd690cb118f56153e424e9a6bf4b2a11d8b4)) * **network:** verify chain id during protocol handshake ([#406](https://github.com/nervosnetwork/muta/issues/406)) ([e678e92](https://github.com/nervosnetwork/muta/commit/e678e92bf01bc4bc914e74b6fed22c8b55b3cdc7)) # [0.2.0-beta.4](https://github.com/nervosnetwork/muta/compare/v0.2.0-beta.3...v0.2.0-beta.4) (2020-08-10) ### Bug Fixes * load hrp before deserializing genesis payload to take hrp effect ([#405](https://github.com/nervosnetwork/muta/issues/405)) ([828e6d5](https://github.com/nervosnetwork/muta/commit/828e6d539cf4da9cf042c450418e75a944315014)) ### Features * **api:** Support enabled TLS ([#402](https://github.com/nervosnetwork/muta/issues/402)) ([c2908a3](https://github.com/nervosnetwork/muta/commit/c2908a3ba6a5ab1219ddc9b14ff6d7320cf70228)) ### Performance Improvements * **state:** add state cache for trieDB ([#404](https://github.com/nervosnetwork/muta/issues/404)) ([2a08c14](https://github.com/nervosnetwork/muta/commit/2a08c147571707507b72882788fd51f7a799f3ec)) # [0.2.0-beta.3](https://github.com/nervosnetwork/muta/compare/v0.2.0-beta.2...v0.2.0-beta.3) (2020-08-07) ### Bug Fixes * **apm:** Return the correct time ([#400](https://github.com/nervosnetwork/muta/issues/400)) ([fd6549a](https://github.com/nervosnetwork/muta/commit/fd6549a6352633cee7b5b747448129df7a0532ca)) ### Features * **network:** limit connections from same ip ([#388](https://github.com/nervosnetwork/muta/issues/388)) ([dc78c13](https://github.com/nervosnetwork/muta/commit/dc78c13b8aa25f3e4535e588149042f6345e4d25)) * **network:** limit inbound and outbound connections ([#393](https://github.com/nervosnetwork/muta/issues/393)) ([3a3111e](https://github.com/nervosnetwork/muta/commit/3a3111e1e332529bc8636c54526920c292c04f8a)) * **sync:** Limit the maximum height of once sync ([#390](https://github.com/nervosnetwork/muta/issues/390)) ([f951a95](https://github.com/nervosnetwork/muta/commit/f951a953daf307ffc98b4df0fe1a77a6a810ac71)) # [0.2.0-beta.2](https://github.com/nervosnetwork/muta/compare/v0.2.0-beta.1...v0.2.0-beta.2) (2020-08-04) ### Bug Fixes * **consensus:** Add timestamp checking ([#377](https://github.com/nervosnetwork/muta/issues/377)) ([382ede9](https://github.com/nervosnetwork/muta/commit/382ede9367b910a06b59f3562ecd28ab8100d39e)) ### Features * **benchmark:** add a perf benchmark macro ([#391](https://github.com/nervosnetwork/muta/issues/391)) ([eb24311](https://github.com/nervosnetwork/muta/commit/eb2431149b6865a82d0e4286536f65319a5e1d1f)) * **Cargo:** add random leader feature for muta ([#385](https://github.com/nervosnetwork/muta/issues/385)) ([43da977](https://github.com/nervosnetwork/muta/commit/43da9772b22b97ab4797b80ce5161f1a49827543)) ### Performance Improvements * **metrics:** Add metrics of state ([#397](https://github.com/nervosnetwork/muta/issues/397)) ([5822764](https://github.com/nervosnetwork/muta/commit/5822764240f8b4e8cfeca4bccf7d399a0bf71897)) # [0.2.0-beta.1](https://github.com/nervosnetwork/muta/compare/v0.2.0-alpha.1...v0.2.0-beta.1) (2020-08-03) ### Bug Fixes * **consensus:** return an error when committing an outdated block ([#371](https://github.com/nervosnetwork/muta/issues/371)) ([b3d518b](https://github.com/nervosnetwork/muta/commit/b3d518b52658b40746ef708fa8cde5c96a39a539)) * **mempool:** Ensure that there are no duplicate transactions in the order transaction ([#379](https://github.com/nervosnetwork/muta/issues/379)) ([97708ac](https://github.com/nervosnetwork/muta/commit/97708ac385be2243344d700a0d7c928f18fd51b3)) * **storage:** test batch receipts get panic ([#373](https://github.com/nervosnetwork/muta/issues/373)) ([300a3c6](https://github.com/nervosnetwork/muta/commit/300a3c65cf0399c2ba37a3bd655e06719b660330)) ### Features * **network:** tag consensus peer ([#364](https://github.com/nervosnetwork/muta/issues/364)) ([9b27df1](https://github.com/nervosnetwork/muta/commit/9b27df1015a25792cc210c5aa0dd473a45ae885d)), closes [#354](https://github.com/nervosnetwork/muta/issues/354) [#2](https://github.com/nervosnetwork/muta/issues/2) [#3](https://github.com/nervosnetwork/muta/issues/3) [#4](https://github.com/nervosnetwork/muta/issues/4) [#5](https://github.com/nervosnetwork/muta/issues/5) [#6](https://github.com/nervosnetwork/muta/issues/6) [#7](https://github.com/nervosnetwork/muta/issues/7) * Add global panic hook ([#376](https://github.com/nervosnetwork/muta/issues/376)) ([7382279](https://github.com/nervosnetwork/muta/commit/738227962771a6a66b85f2fd199df2e699b43adc)) ### Performance Improvements * **executor:** use inner call instead of service dispatcher ([#365](https://github.com/nervosnetwork/muta/issues/365)) ([7b1d2a3](https://github.com/nervosnetwork/muta/commit/7b1d2a32d5c20306af3868e5265bd2530dd9493b)) ### BREAKING CHANGES * **network:** - replace Validator address bytes with pubkey bytes * change(consensus): log validator address instead of its public key Block proposer is address instead public key * fix: compilation failed * **network:** - change users_cast to multicast, take peer_ids bytes instead of Address - network bootstrap configuration now takes peer id instead of pubkey hex * refactor(network): PeerId api # [0.2.0-alpha.1](https://github.com/nervosnetwork/muta/compare/v0.2.0-dev.2...v0.2.0-alpha.1) (2020-07-22) ### Bug Fixes * **executor:** The logic to deal with tx_hook and tx_body ([#367](https://github.com/nervosnetwork/muta/issues/367)) ([749d558](https://github.com/nervosnetwork/muta/commit/749d558b8b58a1943bfa2842dcedcc45218c0f78)) ### Performance Improvements * **mempool:** parallel verifying signatures in mempool ([#359](https://github.com/nervosnetwork/muta/issues/359)) ([2ccdf1a](https://github.com/nervosnetwork/muta/commit/2ccdf1a67a40cd483749a98a1a68c37bcf1d473c)) ### Reverts * Revert "refactor(consensus)!: replace Validator address bytes with pubkey bytes (#354)" (#361) ([4dabfa2](https://github.com/nervosnetwork/muta/commit/4dabfa231961d1ec8be1ba42bf05781f55395aed)), closes [#354](https://github.com/nervosnetwork/muta/issues/354) [#361](https://github.com/nervosnetwork/muta/issues/361) * refactor(consensus)!: replace Validator address bytes with pubkey bytes (#354) ([e4433d7](https://github.com/nervosnetwork/muta/commit/e4433d793e8a63788ec682880afc93474e0d2414)), closes [#354](https://github.com/nervosnetwork/muta/issues/354) ### BREAKING CHANGES * - replace Validator address bytes with pubkey bytes * change(consensus): log validator address instead of its public key Block proposer is address instead public key * fix: compilation failed # [0.2.0-dev.2](https://github.com/nervosnetwork/muta/compare/v0.2.0-dev.1...v0.2.0-dev.2) (2020-07-14) # [0.2.0-dev.1](https://github.com/nervosnetwork/muta/compare/v0.2.0-dev.0...v0.2.0-dev.1) (2020-07-09) ### Bug Fixes * **trust_metric_test:** unreliable full node exit check ([#327](https://github.com/nervosnetwork/muta/issues/327)) ([a4ab4a6](https://github.com/nervosnetwork/muta/commit/a4ab4a6209e0978148983e88447ac2d9178fa42a)) # [0.2.0-dev.0](https://github.com/nervosnetwork/muta/compare/v0.1.2-beta...v0.2.0-dev.0) (2020-07-01) ### Bug Fixes * **executor:** tx events aren't cleared on execution error ([#313](https://github.com/nervosnetwork/muta/issues/313)) ([1605cf5](https://github.com/nervosnetwork/muta/commit/1605cf59b558b97889bb431da7f81fd424b90a89)) * **proof:** Verify aggregated signature in checking proof ([#308](https://github.com/nervosnetwork/muta/issues/308)) ([d2a98b0](https://github.com/nervosnetwork/muta/commit/d2a98b06e44449ca756f135c1b235ff0d80eaf67)) * **WAL:** Ignore path already exist ([#304](https://github.com/nervosnetwork/muta/issues/304)) ([02df937](https://github.com/nervosnetwork/muta/commit/02df937fb6449c9b3b0b50e790e0ecf6bfc1ee3d)) ### Features * **executor:** allow cancel execution units through context ([#317](https://github.com/nervosnetwork/muta/issues/317)) ([eafb489](https://github.com/nervosnetwork/muta/commit/eafb489f78f7521487c6b2d25dd9912e43f76500)) * **executor:** indenpendent tx hook states commit ([#316](https://github.com/nervosnetwork/muta/issues/316)) ([fde6450](https://github.com/nervosnetwork/muta/commit/fde645010363a4664033370e4109e4d1f08b13bc)) * **protocol:** Remove the logs bloom from block header ([#312](https://github.com/nervosnetwork/muta/issues/312)) ([ff1e0df](https://github.com/nervosnetwork/muta/commit/ff1e0df1e8a65cc480825a49eed9495cc31ecee0)) ## [0.1.2-beta](https://github.com/nervosnetwork/muta/compare/v0.1.2-beta2...v0.1.2-beta) (2020-06-04) ## [0.1.2-beta2](https://github.com/nervosnetwork/muta/compare/v0.1.2-beta1...v0.1.2-beta2) (2020-06-03) ### Features * supported storage metrics ([#307](https://github.com/nervosnetwork/muta/issues/307)) ([2531b8d](https://github.com/nervosnetwork/muta/commit/2531b8da8e8f2a839484adef62dd93f1deff12dd)) ## [0.1.2-beta1](https://github.com/nervosnetwork/muta/compare/v0.1.0-rc.2-huobi...v0.1.2-beta1) (2020-06-01) ### Bug Fixes * **ci:** Increase timeout in ci ([#262](https://github.com/nervosnetwork/muta/issues/262)) ([a12124a](https://github.com/nervosnetwork/muta/commit/a12124a115512196894a7ca88fc42555db927666)) * **mempool:** check exsit before insert a transaction ([#257](https://github.com/nervosnetwork/muta/issues/257)) ([be3c139](https://github.com/nervosnetwork/muta/commit/be3c13929d2a59f21655b040aa6738c3d43db611)) * **network:** broken users_cast ([#261](https://github.com/nervosnetwork/muta/issues/261)) ([f36eabd](https://github.com/nervosnetwork/muta/commit/f36eabdc5040bc5cbf0d2011c942867150534a41)) * **network:** reconnection fialure ([#273](https://github.com/nervosnetwork/muta/issues/273)) ([9f594b8](https://github.com/nervosnetwork/muta/commit/9f594b8af12e1810bd0cbf23f20ca718d96f6e3a)) * reboot when the diff between height and exec_height more than one ([#267](https://github.com/nervosnetwork/muta/issues/267)) ([e8f8595](https://github.com/nervosnetwork/muta/commit/e8f85958d85e3363fccbfde3971684ebf2fceb4d)) * **sync:** Avoid requesting redundant transactions ([#259](https://github.com/nervosnetwork/muta/issues/259)) ([8ece029](https://github.com/nervosnetwork/muta/commit/8ece0299fe185667ac23fed92d8c2f156c0e2c5b)) * binding store type should return Option None instead of panic when get none ([#238](https://github.com/nervosnetwork/muta/issues/238)) ([54bdbb9](https://github.com/nervosnetwork/muta/commit/54bdbb93df1a1a85a83814dcb29461acf3645d10)) * **config:** use serde(default) for rocksdb conf ([#229](https://github.com/nervosnetwork/muta/issues/229)) ([2a03e73](https://github.com/nervosnetwork/muta/commit/2a03e73c77807e80020c50bb287adf4d428632e5)) * **storage:** fix rocksdb too many open files error ([#228](https://github.com/nervosnetwork/muta/issues/228)) ([96c32cd](https://github.com/nervosnetwork/muta/commit/96c32cd7956220beddca33b22d4663a675573ba9)) * **sync:** set crypto info when synchronization ([#235](https://github.com/nervosnetwork/muta/issues/235)) ([84ccfc1](https://github.com/nervosnetwork/muta/commit/84ccfc1d8422265028ad7a0b460b4e297d161fe3)) * docker compose configs ([#210](https://github.com/nervosnetwork/muta/issues/210)) ([acc5265](https://github.com/nervosnetwork/muta/commit/acc52653d304ac5cd25a9d643b263a2f462f7d43)) * hang when kill it ([#225](https://github.com/nervosnetwork/muta/issues/225)) ([dc51240](https://github.com/nervosnetwork/muta/commit/dc512405f32854f165f3145c01d022bca4fff93b)) * panic when start ([#214](https://github.com/nervosnetwork/muta/issues/214)) ([d2da69b](https://github.com/nervosnetwork/muta/commit/d2da69b5941a88376b64453f7d3c10eca3f67d81)) * **muta:** hangs up on one cpu core ([#203](https://github.com/nervosnetwork/muta/issues/203)) ([555dd9e](https://github.com/nervosnetwork/muta/commit/555dd9e694fda043be01f90c91396efd7fe0ace5)) ### Features * split monitor network url ([#300](https://github.com/nervosnetwork/muta/issues/300)) ([1237354](https://github.com/nervosnetwork/muta/commit/12373544598d0dae852321cbe3b4e8dab5c70e54)) * supported mempool monitor ([#298](https://github.com/nervosnetwork/muta/issues/298)) ([cc7fdfa](https://github.com/nervosnetwork/muta/commit/cc7fdfa7a7c99466d76d4fe9c1a3537ab8754837)) * supported new metrics ([#294](https://github.com/nervosnetwork/muta/issues/294)) ([e59364a](https://github.com/nervosnetwork/muta/commit/e59364a7759960d8a3279dc78844965f54f4bf62)) * **apm:** add api get_block metrics ([#276](https://github.com/nervosnetwork/muta/issues/276)) ([6ea21e3](https://github.com/nervosnetwork/muta/commit/6ea21e3e0fe08898264f13938cf849c197531afa)) * **apm:** Add opentracing ([#270](https://github.com/nervosnetwork/muta/issues/270)) ([cece21d](https://github.com/nervosnetwork/muta/commit/cece21d8e865223c8679e54d0253ced70dab4c0a)) * **apm:** tracing height and round in OverlordMsg ([#287](https://github.com/nervosnetwork/muta/issues/287)) ([a8c09ff](https://github.com/nervosnetwork/muta/commit/a8c09ff363e8caac9c0977db2fc6cffb782961d7)) * **ci:** add e2e ([#236](https://github.com/nervosnetwork/muta/issues/236)) ([3058722](https://github.com/nervosnetwork/muta/commit/3058722081084b7cb8f423c26eba9e88707fca18)) * **consensus:** add proof check logic for sync and consensus ([#224](https://github.com/nervosnetwork/muta/issues/224)) ([b19502f](https://github.com/nervosnetwork/muta/commit/b19502f48e6d314717a8a2286ada58f6097c6f31)) * **consensus:** change validator list ([#211](https://github.com/nervosnetwork/muta/issues/211)) ([bb04d2c](https://github.com/nervosnetwork/muta/commit/bb04d2c961110276d38cf0e07239d5e72e8125a8)) * **consensus:** integrate trust metric to consensus ([#244](https://github.com/nervosnetwork/muta/issues/244)) ([3dd6bc1](https://github.com/nervosnetwork/muta/commit/3dd6bc1796ca3e6c76cb99beefd5911d35a5e8ee)) * **mempool:** integrate trust metric ([#245](https://github.com/nervosnetwork/muta/issues/245)) ([49474fd](https://github.com/nervosnetwork/muta/commit/49474fddde3ffc45d564544bb5887bb09a37da1d)) * **metric:** introduce metric using prometheus ([#271](https://github.com/nervosnetwork/muta/issues/271)) ([3d1dc4f](https://github.com/nervosnetwork/muta/commit/3d1dc4fcf196b8616f41dc4cd2a5ba0c0a5ab422)) * **metrics:** mempool, consensus and sync ([#275](https://github.com/nervosnetwork/muta/issues/275)) ([12e4918](https://github.com/nervosnetwork/muta/commit/12e4918d9925868407f854af29410d8ecafe4d48)) * **network:** add metrics ([#274](https://github.com/nervosnetwork/muta/issues/274)) ([56a9b62](https://github.com/nervosnetwork/muta/commit/56a9b62251106d44df33c43d4590575df25df61a)) * **network:** add trace header to network msg ([#281](https://github.com/nervosnetwork/muta/issues/281)) ([6509cbe](https://github.com/nervosnetwork/muta/commit/6509cbec2f700238b2259943212e0968b58404ce)) * **network:** peer trust metric ([#231](https://github.com/nervosnetwork/muta/issues/231)) ([5abefeb](https://github.com/nervosnetwork/muta/commit/5abefebddacfb58415f2a319098bb164ceaa8c81)) * add tx hook in framework ([#218](https://github.com/nervosnetwork/muta/issues/218)) ([cdeb9fd](https://github.com/nervosnetwork/muta/commit/cdeb9fd1e18e198636fa59d91aead85d65cf9852)) * re-execute blocks to recover current status ([#222](https://github.com/nervosnetwork/muta/issues/222)) ([1cd7cb6](https://github.com/nervosnetwork/muta/commit/1cd7cb6d4fbc599bac65bd2c36b507088a3fa041)) * **network:** rpc remote server error response ([#205](https://github.com/nervosnetwork/muta/issues/205)) ([bb993ac](https://github.com/nervosnetwork/muta/commit/bb993ac1f5fe44a2f6a72c8718572accacb27dc3)) * **sync:** Split a transaction in a block into multiple requests ([#221](https://github.com/nervosnetwork/muta/issues/221)) ([0bbf43c](https://github.com/nervosnetwork/muta/commit/0bbf43c49d2df49d70b4bc816ac24c3bc3603a1a)) * add actix payload size limit config ([#204](https://github.com/nervosnetwork/muta/issues/204)) ([97319d6](https://github.com/nervosnetwork/muta/commit/97319d6d22c8143ba35c3fe42d56f2cfbc131e37)) ### BREAKING CHANGES * **network:** change rpc response * change(network): bump transmitter protocol version # [0.1.0-rc.2-huobi](https://github.com/nervosnetwork/muta/compare/v0.0.1-rc1-huobi...v0.1.0-rc.2-huobi) (2020-02-24) ### Bug Fixes * **mempool:** fix repeat txs, add flush_incumbent_queue ([#189](https://github.com/nervosnetwork/muta/issues/189)) ([e0db745](https://github.com/nervosnetwork/muta/commit/e0db745419c5ada3d6e9dc4416945a0775a8f18b)) * **muta:** hangs up running on single core environment ([#201](https://github.com/nervosnetwork/muta/issues/201)) ([09f5b4e](https://github.com/nervosnetwork/muta/commit/09f5b4ed70a519155933f7fd4c2015ff512dfdb1)) * block hash from bytes ([#192](https://github.com/nervosnetwork/muta/issues/192)) ([7ca0af4](https://github.com/nervosnetwork/muta/commit/7ca0af46edbd00e4ba43e8646e77fa41aba781cf)) ### Features * check size and cycle limit when insert tx into mempool ([#195](https://github.com/nervosnetwork/muta/issues/195)) ([92bdf2d](https://github.com/nervosnetwork/muta/commit/92bdf2d5147502e1d250fdae47b8ae2c2cfce23f)) * remove redundant wal transactions when commit ([#197](https://github.com/nervosnetwork/muta/issues/197)) ([3aff1db](https://github.com/nervosnetwork/muta/commit/3aff1dbb2dcdabaaf9cbecb9c3e9757a2c737354)) * Supports actix in tokio ([#200](https://github.com/nervosnetwork/muta/issues/200)) ([266c1cb](https://github.com/nervosnetwork/muta/commit/266c1cb2cf6223759eba4ca9771ee21b244db3a4)) * **api:** Supports configuring the max number of connections. ([#194](https://github.com/nervosnetwork/muta/issues/194)) ([6cbdd26](https://github.com/nervosnetwork/muta/commit/6cbdd267b7ff56eefbe23bffc8e4dc589272111d)) * **service:** upgrade asset service ([#150](https://github.com/nervosnetwork/muta/issues/150)) ([8925390](https://github.com/nervosnetwork/muta/commit/8925390b59353d853dd1266cdcfe6db1258a8296)) ### Reverts * Revert "fix(muta): hangs up running on single core environment (#201)" (#202) ([28e685a](https://github.com/nervosnetwork/muta/commit/28e685a62b82c1a91699b4495d430b0757e5438d)), closes [#201](https://github.com/nervosnetwork/muta/issues/201) [#202](https://github.com/nervosnetwork/muta/issues/202) ## [0.0.1-rc1-huobi](https://github.com/nervosnetwork/muta/compare/v0.0.1-rc.1-huobi...v0.0.1-rc1-huobi) (2020-02-15) ### Bug Fixes * **ci:** fail to install sccache after new rust-toolchain ([#68](https://github.com/nervosnetwork/muta/issues/68)) ([f961415](https://github.com/nervosnetwork/muta/commit/f961415803ae6d38b70e97a810f33a1b60639d43)) * **consensus:** check logs bloom when check block ([#168](https://github.com/nervosnetwork/muta/issues/168)) ([0984989](https://github.com/nervosnetwork/muta/commit/09849893270cc0908e2ee965e7e8b7c46ada0f16)) * **consensus:** empty block receipts root ([#61](https://github.com/nervosnetwork/muta/issues/61)) ([89ed4d2](https://github.com/nervosnetwork/muta/commit/89ed4d2c4a708f278e7cd777c562f1f1fb5a9755)) * **consensus:** encode overlord message and verify signature ([#39](https://github.com/nervosnetwork/muta/issues/39)) ([b11e69e](https://github.com/nervosnetwork/muta/commit/b11e69e49ed195d0d23f22b6abf1387f4a4c0c94)) * **consensus:** fix check state roots ([#107](https://github.com/nervosnetwork/muta/issues/107)) ([cf45c3b](https://github.com/nervosnetwork/muta/commit/cf45c3ba39eb65bdb012165e232352a9187a6f0d)) * **consensus:** Get authority list returns none. ([#4](https://github.com/nervosnetwork/muta/issues/4)) ([2a7eb3c](https://github.com/nervosnetwork/muta/commit/2a7eb3c26fade5a065ec2435b4ba46b6c16f223a)) * **consensus:** state root can not be clear ([#140](https://github.com/nervosnetwork/muta/issues/140)) ([4ea1df4](https://github.com/nervosnetwork/muta/commit/4ea1df425620482f36daf61b4b50edb83807efdd)) * **consensus:** sync txs context no session id ([#167](https://github.com/nervosnetwork/muta/issues/167)) ([53136c3](https://github.com/nervosnetwork/muta/commit/53136c3dfdf0e7b29762cd72f51eeb35d52804c2)) * **doc:** fix graphql_api doc link and doc-api build sh ([#161](https://github.com/nervosnetwork/muta/issues/161)) ([e67e2b2](https://github.com/nervosnetwork/muta/commit/e67e2b24bf0609c263f59381a83fcf04d2227583)) * **executor:** wrong hook logic ([#127](https://github.com/nervosnetwork/muta/issues/127)) ([8c6a246](https://github.com/nervosnetwork/muta/commit/8c6a246a1b64a197371305856148b034320f1fa0)) * **framework/executor:** Catch any errors in the call. ([#92](https://github.com/nervosnetwork/muta/issues/92)) ([739a126](https://github.com/nervosnetwork/muta/commit/739a126c86643b28e1c47aef87d8bd803b9fe8d9)) * **keypair:** Use hex encoding common_ref. ([#79](https://github.com/nervosnetwork/muta/issues/79)) ([abbce4c](https://github.com/nervosnetwork/muta/commit/abbce4c15919f45f824bd4967ea64f8234548765)) * **makefile:** Docker push to the correct image ([#146](https://github.com/nervosnetwork/muta/issues/146)) ([05f6396](https://github.com/nervosnetwork/muta/commit/05f6396f1786b46b4cf9c41e3f700b37ebaddb68)) * **mempool:** Always get the latest epoch id when `package`. ([#30](https://github.com/nervosnetwork/muta/issues/30)) ([9a77ebf](https://github.com/nervosnetwork/muta/commit/9a77ebf9ecba6323cc81cd094774e32fd28b946e)) * **mempool:** broadcast new transactions ([#32](https://github.com/nervosnetwork/muta/issues/32)) ([086ec7e](https://github.com/nervosnetwork/muta/commit/086ec7eb6ca2c8f6afc14767d51efdb91533f932)) * **mempool:** Fix concurrent insert bug of mempool ([#19](https://github.com/nervosnetwork/muta/issues/19)) ([515eec2](https://github.com/nervosnetwork/muta/commit/515eec2ab65a2d57a5ca742c774daeb9cef99354)) * **mempool:** Resize the queue to ensure correct switching. ([#18](https://github.com/nervosnetwork/muta/issues/18)) ([ebf1ae3](https://github.com/nervosnetwork/muta/commit/ebf1ae34861fc48297813cdc465e4d9c99e059d4)) * **mempool:** sync proposal txs doesn't insert txs at all ([#179](https://github.com/nervosnetwork/muta/issues/179)) ([33f39c5](https://github.com/nervosnetwork/muta/commit/33f39c5bac0235a8261c53327c558864a6149c8a)) * **network:** dead lock in peer manager ([#24](https://github.com/nervosnetwork/muta/issues/24)) ([a74017a](https://github.com/nervosnetwork/muta/commit/a74017aa9d84b6b862683860e63c000b4048e459)) * **network:** default rpc timeout to 4 seconds ([#115](https://github.com/nervosnetwork/muta/issues/115)) ([666049c](https://github.com/nervosnetwork/muta/commit/666049c54c8eee8291cc173230caccb35de137ca)) * **network:** fail to bootstrap if bootstrap isn't start already ([#46](https://github.com/nervosnetwork/muta/issues/46)) ([9dd515a](https://github.com/nervosnetwork/muta/commit/9dd515a3e09f1c158dff6536ed38eb5116f4317f)) * **network:** give up retry ([#152](https://github.com/nervosnetwork/muta/issues/152)) ([34d052a](https://github.com/nervosnetwork/muta/commit/34d052aaba1684333fdd49f86e54c103064fa2f6)) * **network:** never reconnect bootstrap again after failure ([#22](https://github.com/nervosnetwork/muta/issues/22)) ([79d66bd](https://github.com/nervosnetwork/muta/commit/79d66bd06e61ff6ef41c12ada91cf6485482aa43)) * **network:** NoSessionId Error ([#33](https://github.com/nervosnetwork/muta/issues/33)) ([4761d79](https://github.com/nervosnetwork/muta/commit/4761d797dded9534e0c0b5e43c6e519055542c2c)) * **network:** rpc memory leak if rpc call future is dropped ([#166](https://github.com/nervosnetwork/muta/issues/166)) ([8476a4b](https://github.com/nervosnetwork/muta/commit/8476a4b85bf3cf923adcd7555cef04ae73a225f1)) * **sync:** Check the height again after get the lock ([#171](https://github.com/nervosnetwork/muta/issues/171)) ([68164f3](https://github.com/nervosnetwork/muta/commit/68164f3f75d83b9507ee68a099fb712492339edb)) * **sync:** Flush the memory pool when the storage success ([#165](https://github.com/nervosnetwork/muta/issues/165)) ([3b9cbd5](https://github.com/nervosnetwork/muta/commit/3b9cbd55310993c783b0a5794237df75accf118e)) * fix overlord not found error ([#95](https://github.com/nervosnetwork/muta/issues/95)) ([0754c64](https://github.com/nervosnetwork/muta/commit/0754c64973f7fca92e49080c3a03a869b43a4c46)) * Ignore bootstraps when empty. ([#41](https://github.com/nervosnetwork/muta/issues/41)) ([2b3566b](https://github.com/nervosnetwork/muta/commit/2b3566b4acb91f6086b9cca2b1ea4d2883a75be9)) ### Features * **config:** move bls_pub_key config to genesis.toml ([#162](https://github.com/nervosnetwork/muta/issues/162)) ([337b01f](https://github.com/nervosnetwork/muta/commit/337b01fda21fc33f4d4817d93a27d86af9e2b164)) * **network:** interval report pending data size ([#160](https://github.com/nervosnetwork/muta/issues/160)) ([3c46aca](https://github.com/nervosnetwork/muta/commit/3c46aca4873abf9b8afd01d5f464df57bb1b9b9a)) * **sync:** Trigger sync after waiting for consensus interval ([#169](https://github.com/nervosnetwork/muta/issues/169)) ([fe355f1](https://github.com/nervosnetwork/muta/commit/fe355f1d7d6359dfa97809f1bc603cb99975ba46)) * add api schema ([#90](https://github.com/nervosnetwork/muta/issues/90)) ([3f8adfa](https://github.com/nervosnetwork/muta/commit/3f8adfa0a717b055a4455fd102de68003f835bf2)) * add common_ref argument for keypair tool ([#154](https://github.com/nervosnetwork/muta/issues/154)) ([2651346](https://github.com/nervosnetwork/muta/commit/26513469206aa8a4480c5fffad9d134d5d0e8ded)) * add panic hook to logger ([#156](https://github.com/nervosnetwork/muta/issues/156)) ([93b65fe](https://github.com/nervosnetwork/muta/commit/93b65feb89502b7d7836d7f4c423db37fbd1ef4f)) * Extract muta as crate. ([1b62fe7](https://github.com/nervosnetwork/muta/commit/1b62fe786fbd576b67ea28df3d304d235ae3e94e)) * Metadata service ([#133](https://github.com/nervosnetwork/muta/issues/133)) ([a588b12](https://github.com/nervosnetwork/muta/commit/a588b12de4f3c0de666b66e2a5dea65d71977f5f)) * spawn sync txs in check epoch ([6dca1dd](https://github.com/nervosnetwork/muta/commit/6dca1ddcd9256a3061f132a5abc5d784d466c168)) * support specify module log level via config ([#105](https://github.com/nervosnetwork/muta/issues/105)) ([c06061b](https://github.com/nervosnetwork/muta/commit/c06061b4ccd755177385dfee000783e2b11b0dcd)) * Update juniper, supports async ([#149](https://github.com/nervosnetwork/muta/issues/149)) ([cbabf50](https://github.com/nervosnetwork/muta/commit/cbabf507c25ee8feb8a57de408bc97efc8a4a4ab)) * update overlord with brake engine ([#159](https://github.com/nervosnetwork/muta/issues/159)) ([8cd886a](https://github.com/nervosnetwork/muta/commit/8cd886a79fec934a53d409a27de941f16166c176)), closes [#156](https://github.com/nervosnetwork/muta/issues/156) [#158](https://github.com/nervosnetwork/muta/issues/158) * **api:** Add the exec_height field to the block ([#138](https://github.com/nervosnetwork/muta/issues/138)) ([417153c](https://github.com/nervosnetwork/muta/commit/417153c632793c7ac4e7bc3ffa5b2832dd2dbe66)) * **binding-macro:** service method supports none payload and none response ([#103](https://github.com/nervosnetwork/muta/issues/103)) ([3a5783e](https://github.com/nervosnetwork/muta/commit/3a5783eadd1090cf739d4fdbe94f049115eb65f0)) * **consensus:** develop aggregate crypto with overlord ([#60](https://github.com/nervosnetwork/muta/issues/60)) ([2bc0869](https://github.com/nervosnetwork/muta/commit/2bc0869e928b35c674b4cafdf48540298752b5b5)) * **core/binding:** Implementation of service state. ([#48](https://github.com/nervosnetwork/muta/issues/48)) ([301be6f](https://github.com/nervosnetwork/muta/commit/301be6f39379bd3826b5f605c999ce107f7404e4)) * **core/binding-macro:** Add `read` and `write` proc-macro. ([#49](https://github.com/nervosnetwork/muta/issues/49)) ([687b6e1](https://github.com/nervosnetwork/muta/commit/687b6e1e1a960f679394843c42b861981828d8aa)) * **core/binding-macro:** Add cycles proc-marco. ([#52](https://github.com/nervosnetwork/muta/issues/52)) ([e2289a2](https://github.com/nervosnetwork/muta/commit/e2289a2481510b59c18e37d0fc8bedd9f5d4537e)) * **core/binding-macro:** Support for returning a struct. ([#70](https://github.com/nervosnetwork/muta/issues/70)) ([e13b1ff](https://github.com/nervosnetwork/muta/commit/e13b1ff7834279de9c2df5a0df6967035b7fb8b3)) * **framework:** add ExecutorParams into hook method ([#116](https://github.com/nervosnetwork/muta/issues/116)) ([8036bd6](https://github.com/nervosnetwork/muta/commit/8036bd6f9be1f49eedbc40bbc260ad82952c2e71)) * **framework:** add extra: Option to ServiceContext ([#118](https://github.com/nervosnetwork/muta/issues/118)) ([694c4a3](https://github.com/nervosnetwork/muta/commit/694c4a34f32dc1ba4940db19e304de7a927e1531)) * **framework:** add tx_hash, nonce to ServiceContext ([#111](https://github.com/nervosnetwork/muta/issues/111)) ([352f71f](https://github.com/nervosnetwork/muta/commit/352f71fb3b8b024d533d26c7a344fad801b7a91c)) * **framework/executor:** create service genesis from config ([#104](https://github.com/nervosnetwork/muta/issues/104)) ([8988ccb](https://github.com/nervosnetwork/muta/commit/8988ccb3e5cb2a25bfeabe93c5a63ac1600290a2)) * **graphql:** Modify the API to fit the framework data structure. ([#74](https://github.com/nervosnetwork/muta/issues/74)) ([a1ca2b0](https://github.com/nervosnetwork/muta/commit/a1ca2b0d68e32e335d8d388b70bca83137519f5a)) * **muta:** flush metadata while commit ([#137](https://github.com/nervosnetwork/muta/issues/137)) ([383a481](https://github.com/nervosnetwork/muta/commit/383a481c348efdf73fd690b42b2430fca6d9a0db)) * **muta:** link up metadata service with muta ([#136](https://github.com/nervosnetwork/muta/issues/136)) ([ba65b80](https://github.com/nervosnetwork/muta/commit/ba65b80dffd128f12336b44d4e80ed40cced8e75)) * **protocol/traits:** Add traits of binding. ([#47](https://github.com/nervosnetwork/muta/issues/47)) ([c6b85ee](https://github.com/nervosnetwork/muta/commit/c6b85ee7bee5b14c5da1676ff44d743c031a0fa6)) * **protocol/types:** Add cycles_price for raw_transaction. ([#46](https://github.com/nervosnetwork/muta/issues/46)) ([55f64a4](https://github.com/nervosnetwork/muta/commit/55f64a49634061ca05c75cbf5923f183fc83936d)) * **sync:** Wait for the execution queue. ([#132](https://github.com/nervosnetwork/muta/issues/132)) ([a8d2013](https://github.com/nervosnetwork/muta/commit/a8d2013991cc6b5b579429954c8411c7954b1da4)) * add end to end test ([#42](https://github.com/nervosnetwork/muta/issues/42)) ([e84756d](https://github.com/nervosnetwork/muta/commit/e84756d1734ad58943309c3c2299393f5a2022e4)) * Extract muta as crate. ([#75](https://github.com/nervosnetwork/muta/issues/75)) ([fc576ea](https://github.com/nervosnetwork/muta/commit/fc576eaa67a3b4b4fa459b0ab970251d63b06b4f)), closes [#46](https://github.com/nervosnetwork/muta/issues/46) [#47](https://github.com/nervosnetwork/muta/issues/47) [#48](https://github.com/nervosnetwork/muta/issues/48) [#49](https://github.com/nervosnetwork/muta/issues/49) [#52](https://github.com/nervosnetwork/muta/issues/52) [#51](https://github.com/nervosnetwork/muta/issues/51) [#55](https://github.com/nervosnetwork/muta/issues/55) [#58](https://github.com/nervosnetwork/muta/issues/58) [#56](https://github.com/nervosnetwork/muta/issues/56) [#64](https://github.com/nervosnetwork/muta/issues/64) [#65](https://github.com/nervosnetwork/muta/issues/65) [#70](https://github.com/nervosnetwork/muta/issues/70) [#71](https://github.com/nervosnetwork/muta/issues/71) [#72](https://github.com/nervosnetwork/muta/issues/72) [#73](https://github.com/nervosnetwork/muta/issues/73) [#43](https://github.com/nervosnetwork/muta/issues/43) [#54](https://github.com/nervosnetwork/muta/issues/54) [#53](https://github.com/nervosnetwork/muta/issues/53) [#57](https://github.com/nervosnetwork/muta/issues/57) [#45](https://github.com/nervosnetwork/muta/issues/45) [#62](https://github.com/nervosnetwork/muta/issues/62) [#63](https://github.com/nervosnetwork/muta/issues/63) [#66](https://github.com/nervosnetwork/muta/issues/66) [#61](https://github.com/nervosnetwork/muta/issues/61) [#67](https://github.com/nervosnetwork/muta/issues/67) [#68](https://github.com/nervosnetwork/muta/issues/68) [#60](https://github.com/nervosnetwork/muta/issues/60) [#46](https://github.com/nervosnetwork/muta/issues/46) [#47](https://github.com/nervosnetwork/muta/issues/47) [#48](https://github.com/nervosnetwork/muta/issues/48) [#49](https://github.com/nervosnetwork/muta/issues/49) [#52](https://github.com/nervosnetwork/muta/issues/52) [#51](https://github.com/nervosnetwork/muta/issues/51) [#55](https://github.com/nervosnetwork/muta/issues/55) [#58](https://github.com/nervosnetwork/muta/issues/58) [#56](https://github.com/nervosnetwork/muta/issues/56) [#64](https://github.com/nervosnetwork/muta/issues/64) [#65](https://github.com/nervosnetwork/muta/issues/65) [#70](https://github.com/nervosnetwork/muta/issues/70) [#72](https://github.com/nervosnetwork/muta/issues/72) [#74](https://github.com/nervosnetwork/muta/issues/74) * metrics logger ([#43](https://github.com/nervosnetwork/muta/issues/43)) ([d633309](https://github.com/nervosnetwork/muta/commit/d6333091959da6ab0a12630282f6ea783d509319)) * support consensus tracing ([#53](https://github.com/nervosnetwork/muta/issues/53)) ([03942f0](https://github.com/nervosnetwork/muta/commit/03942f08cfdcc573d7feef3a1111e59f63d077f1)) * **api:** make API more user-friendly ([#38](https://github.com/nervosnetwork/muta/issues/38)) ([ba33467](https://github.com/nervosnetwork/muta/commit/ba33467e52c114576b82850e11662d168ede293a)) * **mempool:** implement cached batch txs broadcast ([#20](https://github.com/nervosnetwork/muta/issues/20)) ([d2af811](https://github.com/nervosnetwork/muta/commit/d2af811bb99becc9600d784ce19e021fec11627d)) * **sync:** synchronization epoch ([#9](https://github.com/nervosnetwork/muta/issues/9)) ([fb4bf0d](https://github.com/nervosnetwork/muta/commit/fb4bf0d7c4bde7c86d1b09f469037ff1219f15fa)), closes [#17](https://github.com/nervosnetwork/muta/issues/17) [#18](https://github.com/nervosnetwork/muta/issues/18) * add compile and run in README ([#11](https://github.com/nervosnetwork/muta/issues/11)) ([1058322](https://github.com/nervosnetwork/muta/commit/10583224053ab91c32dbec815cd0a5af6b0dbeb3)) * add docker ([#31](https://github.com/nervosnetwork/muta/issues/31)) ([8a4386a](https://github.com/nervosnetwork/muta/commit/8a4386ad4c1f66783cada885db9851609b6f5f8d)) * change rlp in executor to fixed-codec ([#29](https://github.com/nervosnetwork/muta/issues/29)) ([7f737cd](https://github.com/nervosnetwork/muta/commit/7f737cdfc9353148b945ad52dd5ab3fd46e2c4db)) * Get balance. ([#28](https://github.com/nervosnetwork/muta/issues/28)) ([8c4a3f9](https://github.com/nervosnetwork/muta/commit/8c4a3f9af8b9e1e8f19cc50b280b66b5d8e270bb)) * **codec:** Add codec tests and benchmarks ([#22](https://github.com/nervosnetwork/muta/issues/22)) ([dcbe522](https://github.com/nervosnetwork/muta/commit/dcbe522be22596059280f6ef845a6d6f4e798551)) * **consensus:** develop consensus interfaces ([#21](https://github.com/nervosnetwork/muta/issues/21)) ([62e3c06](https://github.com/nervosnetwork/muta/commit/62e3c063cd4f82efda43ca5c87c042db5adb9abb)) * **consensus:** develop consensus provider and engine ([#28](https://github.com/nervosnetwork/muta/issues/28)) ([b2ccf9c](https://github.com/nervosnetwork/muta/commit/b2ccf9c84502a6dd476b1737aa9cbb2a283ced32)) * **consensus:** Execute the transactions on commit. ([#7](https://github.com/nervosnetwork/muta/issues/7)) ([b54e7d2](https://github.com/nervosnetwork/muta/commit/b54e7d2bbd5d0ac45ef0d4c728e398b87a1f5450)) * **consensus:** joint overlord and chain ([#32](https://github.com/nervosnetwork/muta/issues/32)) ([72cec41](https://github.com/nervosnetwork/muta/commit/72cec41c86824455ad35cfb1da8a246c50731568)) * **consensus:** mutex lock and timer config ([#45](https://github.com/nervosnetwork/muta/issues/45)) ([cf09687](https://github.com/nervosnetwork/muta/commit/cf09687299b5be39a9c40f13d4b88a496ec7c943)) * **consensus:** Support trsanction executor. ([#6](https://github.com/nervosnetwork/muta/issues/6)) ([e1188f9](https://github.com/nervosnetwork/muta/commit/e1188f9296b3947f833d6bc9a9beff22ebbbf4e7)) * **executor:** Create genesis. ([#1](https://github.com/nervosnetwork/muta/issues/1)) ([a1111d8](https://github.com/nervosnetwork/muta/commit/a1111d8db709c62d119edf3238a22dd656e8035f)) * **graphql:** Support transfer and contract deployment ([#44](https://github.com/nervosnetwork/muta/issues/44)) ([bfcb520](https://github.com/nervosnetwork/muta/commit/bfcb5203fe245e364922d5d8966197a8a8f8d91c)) * **mempool:** fix fixed_codec ([#25](https://github.com/nervosnetwork/muta/issues/25)) ([c1ac607](https://github.com/nervosnetwork/muta/commit/c1ac607ac9b61f4867c17f69c50dad9797dc1c2b)) * **mempool:** Remove cycle_limit ([#23](https://github.com/nervosnetwork/muta/issues/23)) ([8a19ae8](https://github.com/nervosnetwork/muta/commit/8a19ae867fd5b82c4fd56a1f8b59a83e24ca5bc0)) * **native-contract:** Support for asset creation and transfer. ([#37](https://github.com/nervosnetwork/muta/issues/37)) ([1c505fb](https://github.com/nervosnetwork/muta/commit/1c505fbdd57fcb2ef3df3e8b19c65599d77c9bf1)) * **network:** log connected peer ips ([#23](https://github.com/nervosnetwork/muta/issues/23)) ([1691bfa](https://github.com/nervosnetwork/muta/commit/1691bfa47ac561a2f27243e21b1b2fad2fb64be9)) * develop merkle root ([#17](https://github.com/nervosnetwork/muta/issues/17)) ([03cec31](https://github.com/nervosnetwork/muta/commit/03cec318645ee49158f09ec59e356210a80f8bbf)) * Fill in the main function ([#36](https://github.com/nervosnetwork/muta/issues/36)) ([d783f3b](https://github.com/nervosnetwork/muta/commit/d783f3b2d36507a695abd47b303b6c0108e2030b)) * **mempool:** Develop mempool's tests and benches ([#9](https://github.com/nervosnetwork/muta/issues/9)) ([5ddd5f4](https://github.com/nervosnetwork/muta/commit/5ddd5f4d0c1fa9630971ade538dcf954b6aa8f54)) * **mempool:** Implement MemPool interfaces ([#8](https://github.com/nervosnetwork/muta/issues/8)) ([934ce58](https://github.com/nervosnetwork/muta/commit/934ce58b7a7a6b89b65ff931ce5487e553dd927d)) * **native_contract:** Add an adapter that provides access to the world state. ([#27](https://github.com/nervosnetwork/muta/issues/27)) ([3281bea](https://github.com/nervosnetwork/muta/commit/3281beab2d054470b5edf330515df933cc713bb8)) * **protocol:** Add the mempool traits ([#7](https://github.com/nervosnetwork/muta/issues/7)) ([9f6c19b](https://github.com/nervosnetwork/muta/commit/9f6c19bbfbff6c8f82bb732c3503d757833f837e)) * **protocol:** Add the underlying data structure. ([#5](https://github.com/nervosnetwork/muta/issues/5)) ([5dae189](https://github.com/nervosnetwork/muta/commit/5dae189104c986348adddd43fbaa47af01781828)) * **protocol:** Protobuf serialize ([#6](https://github.com/nervosnetwork/muta/issues/6)) ([ff00595](https://github.com/nervosnetwork/muta/commit/ff00595d100e44148b1cc243437798db8233ca2b)) * **storage:** add storage test ([#18](https://github.com/nervosnetwork/muta/issues/18)) ([f78df5b](https://github.com/nervosnetwork/muta/commit/f78df5b0357eade7855152eee9c79070866477ac)) * **storage:** Implement memory adapter API ([#11](https://github.com/nervosnetwork/muta/issues/11)) ([b0a8090](https://github.com/nervosnetwork/muta/commit/b0a80901229f85e8cf89bd806dcb32c95ae059b8)) * **storage:** Implement storage ([#17](https://github.com/nervosnetwork/muta/issues/17)) ([7728b5b](https://github.com/nervosnetwork/muta/commit/7728b5b0307bd58b11671f123f37e3e365b14b97)) * **types:** Add account structure. ([#24](https://github.com/nervosnetwork/muta/issues/24)) ([f6b93f0](https://github.com/nervosnetwork/muta/commit/f6b93f0f08b03a20761aef47f08343eb5d8e6a85)) ### Performance Improvements * **storage:** cache latest epoch ([#128](https://github.com/nervosnetwork/muta/issues/128)) ([da4d7a9](https://github.com/nervosnetwork/muta/commit/da4d7a92363596b7339518e24c64ab49648749dd)) ### Reverts * Revert "[ᚬdebug-muta] feat(service): Upgrade asset (#181)" (#182) ([dad3f99](https://github.com/nervosnetwork/muta/commit/dad3f99f7c694eea57b546c6b2169950c5692ea1)), closes [#181](https://github.com/nervosnetwork/muta/issues/181) [#182](https://github.com/nervosnetwork/muta/issues/182) * Revert "feat: Extract muta as crate. (#75)" (#77) ([3baacc5](https://github.com/nervosnetwork/muta/commit/3baacc5c781615377e9a6ba50cfc7b17dcb0ec6e)), closes [#75](https://github.com/nervosnetwork/muta/issues/75) [#77](https://github.com/nervosnetwork/muta/issues/77) # [0.1.0](https://github.com/nervosnetwork/muta/compare/733ee8e6be7649c9aa2d772bb1dc661bd0879917...v0.1.0) (2019-09-22) ### Bug Fixes * **ci:** build on push and pull request ([d28aa55](https://github.com/nervosnetwork/muta/commit/d28aa55f5df240277e2b75e87aa948cdcf11ea7f)) * **ci:** temporarily amend code to pass lint ([9441236](https://github.com/nervosnetwork/muta/commit/9441236a5107e0042753915ed943b487cd02d6a5)) * **consensus:** Clear cache of last proposal. ([#199](https://github.com/nervosnetwork/muta/issues/199)) ([f548653](https://github.com/nervosnetwork/muta/commit/f5486531f43fa720171941ad4be5ec7646a269c2)) * **consensus:** fix lock free too early problem and add state root check ([#277](https://github.com/nervosnetwork/muta/issues/277)) ([7238c5b](https://github.com/nervosnetwork/muta/commit/7238c5bc057bd6c6f31773fa4bd3e06aaea72255)) * **consensus:** Makes sure that proposer is this node. ([#281](https://github.com/nervosnetwork/muta/issues/281)) ([d7f4e50](https://github.com/nervosnetwork/muta/commit/d7f4e5081f00a04aee934d0ce700cd107f4f345f)) * **core-network:** CallbackItemNotFound ([#243](https://github.com/nervosnetwork/muta/issues/243)) ([47365fa](https://github.com/nervosnetwork/muta/commit/47365faf5fa7171dde8951661fa095a6c43bcb1f)) * **core-network:** false bootstrapped connections ([#275](https://github.com/nervosnetwork/muta/issues/275)) ([26e76f0](https://github.com/nervosnetwork/muta/commit/26e76f0a2879aed3da745529f64ba3828a1cc30e)) * **core-types:** compilation failure ([#269](https://github.com/nervosnetwork/muta/issues/269)) ([56d8649](https://github.com/nervosnetwork/muta/commit/56d86491f69ab16fd2c76b66b28ad76df78c6ca7)) * **core/crypto:** pubkey_to_address() consistent with cita ([acb5e63](https://github.com/nervosnetwork/muta/commit/acb5e63ea577429bc94c16a3430035ea139aaf15)) * **executor:** Save the full node data. ([b57a1c5](https://github.com/nervosnetwork/muta/commit/b57a1c5fa775479b85d1531f7d2dced817de4729)) * **jsonrpc:** give default value for newFilter ([#289](https://github.com/nervosnetwork/muta/issues/289)) ([17069b4](https://github.com/nervosnetwork/muta/commit/17069b49067dd7335f243d248e3c8d633e455a73)) * **jsonrpc:** logic error in getTransactionCount ([#290](https://github.com/nervosnetwork/muta/issues/290)) ([464bfdf](https://github.com/nervosnetwork/muta/commit/464bfdf08a9954206bb595b3861c52208fc9630d)) * **jsonrpc:** make the response compatible with jsonrpc 2.0 spec ([1db5190](https://github.com/nervosnetwork/muta/commit/1db5190bc91d431bacce6bb44a1185b19520c1a2)) * **jsonrpc:** prefix with 0x by API getTransactionProof ([#295](https://github.com/nervosnetwork/muta/issues/295)) ([b1c0160](https://github.com/nervosnetwork/muta/commit/b1c0160b65fc91e8a2bcfd908943fb238d1101c1)) * **jsonrpc:** raise error when key not found in state ([#294](https://github.com/nervosnetwork/muta/issues/294)) ([7a7c294](https://github.com/nervosnetwork/muta/commit/7a7c294df5ae75f50ec0fe3620634c7280f837e7)) * **jsonrpc:** returns the correct block hash ([#280](https://github.com/nervosnetwork/muta/issues/280)) ([f6a58d0](https://github.com/nervosnetwork/muta/commit/f6a58d0cfc743d1fa84fe5de99798157ba5f25a6)) * Call header.hash ([#94](https://github.com/nervosnetwork/muta/issues/94)) ([636aa54](https://github.com/nervosnetwork/muta/commit/636aa549c21a04611b6f4575dfc7e78fa47d768e)) * change the blocking thread from rayon to std::thread ([5b80476](https://github.com/nervosnetwork/muta/commit/5b804765d0a76055e6e730560a6d7ecd576703be)) * return err if tx not found in get_batch to avoid forking ([#279](https://github.com/nervosnetwork/muta/issues/279)) ([6aed2fe](https://github.com/nervosnetwork/muta/commit/6aed2fe5ffcd0eb6a699cff00d92e9dd3ab7d7b3)) * **sync:** proof and proposal_hash hash not match. ([#239](https://github.com/nervosnetwork/muta/issues/239)) ([51f332e](https://github.com/nervosnetwork/muta/commit/51f332ee8c4a10b88844a272bc51a116b4d25dd2)) * tokio::spawn panic. ([#238](https://github.com/nervosnetwork/muta/issues/238)) ([12d8d01](https://github.com/nervosnetwork/muta/commit/12d8d01ed42f9cc5d9cc341edfd76a6076aa37e1)) * **common/logger:** cargo fmt ([e3a7f5a](https://github.com/nervosnetwork/muta/commit/e3a7f5a2217956b86191881caeb3ca6cea7ec2fc)) * **compoents/transaction-pool:** Use the latest crypto API. ([#86](https://github.com/nervosnetwork/muta/issues/86)) ([f6c94d3](https://github.com/nervosnetwork/muta/commit/f6c94d307d6e89afba75ed8b83b99088fc7ca9de)) * **components/transaction-pool:** Check if the transaction is repeated in histories block. ([dba25fe](https://github.com/nervosnetwork/muta/commit/dba25fe09d8e82f0e396415055ce08efbf1fe159)) * **core-p2p:** transmission example: a clippy warning ([6d2f42a](https://github.com/nervosnetwork/muta/commit/6d2f42ae97194333a823581406fc75d2c47536b2)) * **core-p2p:** transmission example: remove unreachable match branch ([0082bd6](https://github.com/nervosnetwork/muta/commit/0082bd6a3fb956f9ee17a9eba6ada77fc91f3dfe)) * **core-p2p:** transmission: future task starvation ([ba14db0](https://github.com/nervosnetwork/muta/commit/ba14db035413220ed7eba5e5543b8a6496267641)) * **devchain:** correct addresses matched with privkey ([#114](https://github.com/nervosnetwork/muta/issues/114)) ([f56744e](https://github.com/nervosnetwork/muta/commit/f56744e7809b39da79434a3fbcf3deb127fded27)) * **network:** RepeatedConnection and ConnectSelf errors ([#196](https://github.com/nervosnetwork/muta/issues/196)) ([2e5e888](https://github.com/nervosnetwork/muta/commit/2e5e888cdb0869e7622639919b12e62eca06f137)) * **p2p:** Make sure the "poll" is triggered. ([#182](https://github.com/nervosnetwork/muta/issues/182)) ([88daed1](https://github.com/nervosnetwork/muta/commit/88daed1e3e175c21e7923ddd5f1b4eb4ef4d6286)) * **p2p-identify:** empty local listen addresses ([#198](https://github.com/nervosnetwork/muta/issues/198)) ([c40ad8a](https://github.com/nervosnetwork/muta/commit/c40ad8a8dedd999efd17a88b9c30b198d4a0035a)) * **synchronizer:** add a pull_txs_sync method to sync txs from block ([#207](https://github.com/nervosnetwork/muta/issues/207)) ([317fca8](https://github.com/nervosnetwork/muta/commit/317fca8b8d2f270e5d140a94bb1a9227c4b7271b)) * **transaction-pool:** duplicate insertion transactions from network ([#191](https://github.com/nervosnetwork/muta/issues/191)) ([2c095bb](https://github.com/nervosnetwork/muta/commit/2c095bbe5649454abf2663df7355c0a56f54a71f)) * **tx-pool:** "get_count" returns the repeat transaction. ([f5612d0](https://github.com/nervosnetwork/muta/commit/f5612d09d02e9183b702f0233aecc14c31779945)) * **tx-pool:** `ensure` method always pull all txs from remote peer ([#194](https://github.com/nervosnetwork/muta/issues/194)) ([9ff300e](https://github.com/nervosnetwork/muta/commit/9ff300e191aa39b6301e481f8f287287b645ba39)) * **tx-pool:** Ensure the number of transactions meets expectations ([dcbf0dd](https://github.com/nervosnetwork/muta/commit/dcbf0dd8cf548ddfe3afb3226d7596637ae615dd)) * **tx-pool:** replace chashmap ([#211](https://github.com/nervosnetwork/muta/issues/211)) ([717f55e](https://github.com/nervosnetwork/muta/commit/717f55e4772c5818ab17e2b1c320b0b98f174122)) * Aviod drop ([4d0f986](https://github.com/nervosnetwork/muta/commit/4d0f986741c392489893f036989db7218db54743)) * build failure ([18ce8e4](https://github.com/nervosnetwork/muta/commit/18ce8e4642d8d27892fee53b9695e4ced7921055)) * jsonrpc call return value ([#104](https://github.com/nervosnetwork/muta/issues/104)) ([1fe41eb](https://github.com/nervosnetwork/muta/commit/1fe41eb491a16588019218144985eec143613c65)) * logic error of bloom filter ([#176](https://github.com/nervosnetwork/muta/issues/176)) ([70269cb](https://github.com/nervosnetwork/muta/commit/70269cb5cefd82f1a14eb5e85df419c1587d19c8)) * merkle typo ([4f63585](https://github.com/nervosnetwork/muta/commit/4f6358565ee8d486be18ac8ff6069b95b597ea4d)) * rlp encode ([b852ac1](https://github.com/nervosnetwork/muta/commit/b852ac147db818cf289b972f054028d293218a19)) * rlp hash ([837055a](https://github.com/nervosnetwork/muta/commit/837055a4eb78ba941004dbc0466955895de8bcab)) * Set quota limit for the genesis. ([#106](https://github.com/nervosnetwork/muta/issues/106)) ([931fe40](https://github.com/nervosnetwork/muta/commit/931fe404453a6f936cbd27bf37d0e326a03e4484)) * write lock ([de80439](https://github.com/nervosnetwork/muta/commit/de80439cb4e7889c1220fc7821604f9ef792422e)) ### Features * add business model support for executor ([#308](https://github.com/nervosnetwork/muta/issues/308)) ([e03396b](https://github.com/nervosnetwork/muta/commit/e03396bb6b964a0c93f43c2684a0e76a55db5540)) * add Deserialize for Hash and Address ([#259](https://github.com/nervosnetwork/muta/issues/259)) ([fef188c](https://github.com/nervosnetwork/muta/commit/fef188c5950fb7f64a92312894efdb4955201a93)) * add docker config for dev ([#197](https://github.com/nervosnetwork/muta/issues/197)) ([6e74aec](https://github.com/nervosnetwork/muta/commit/6e74aec0b51c2bf80c1d1b893130ea74f4a1a8f0)) * add fabric devops scripts ([fcdc25c](https://github.com/nervosnetwork/muta/commit/fcdc25c05b5c30ba38bf6af57885c2f45233d3fc)) * add height to the end of proposal msg ([#255](https://github.com/nervosnetwork/muta/issues/255)) ([c5cbc5e](https://github.com/nervosnetwork/muta/commit/c5cbc5ec70f1dc0fb46ef0bb87c3b994596b4571)) * add more info to version ([#298](https://github.com/nervosnetwork/muta/issues/298)) ([fd02a17](https://github.com/nervosnetwork/muta/commit/fd02a17a68bb6ef59bbd4cded13d69da221237ee)) * peerCount RPC API ([#257](https://github.com/nervosnetwork/muta/issues/257)) ([736ae8c](https://github.com/nervosnetwork/muta/commit/736ae8c7f537a56b01d648cf066f220e47108820)) * **components/cita-jsonrpc:** impl executor related apis ([#80](https://github.com/nervosnetwork/muta/issues/80)) ([bc8f340](https://github.com/nervosnetwork/muta/commit/bc8f34015617e1a01fb2fbb30d9709cdd806daea)) * **components/cita-jsonrpc:** impl get_code and finish some todo ([#87](https://github.com/nervosnetwork/muta/issues/87)) ([e1b0b9d](https://github.com/nervosnetwork/muta/commit/e1b0b9dc8c39965366c5b572905e63cacecdc958)) * **components/databse:** Implement RocksDB ([#72](https://github.com/nervosnetwork/muta/issues/72)) ([3516fbc](https://github.com/nervosnetwork/muta/commit/3516fbc41338a2f423e0ba56eb96c7fa697a6c77)) * **components/executor:** Add trie db for executor. ([#85](https://github.com/nervosnetwork/muta/issues/85)) ([fd7dc1d](https://github.com/nervosnetwork/muta/commit/fd7dc1da97a4b7dafb1ecbc2813c9506423689a5)) * **components/executor:** Implement EVM executor. ([#68](https://github.com/nervosnetwork/muta/issues/68)) ([021893d](https://github.com/nervosnetwork/muta/commit/021893db432f1ddadc89da9c9251bdb6fb79d925)) * **components/jsonrpc:** implement getStateProof ([#178](https://github.com/nervosnetwork/muta/issues/178)) ([69499fb](https://github.com/nervosnetwork/muta/commit/69499fbb98cbe7f23d426c15ebe67de552dd5d2b)) * **components/jsonrpc:** implement getTransactionProof ([0db8785](https://github.com/nervosnetwork/muta/commit/0db8785475e9d9c098fa123b9c23b4f0eab286dc)) * **components/jsonrpc:** running on microscope ([#200](https://github.com/nervosnetwork/muta/issues/200)) ([1c63a0e](https://github.com/nervosnetwork/muta/commit/1c63a0e3db751b7b7be6f053bed2b66245b105cd)) * **components/jsonrpc:** Try to convert tx to cita::tx ([#221](https://github.com/nervosnetwork/muta/issues/221)) ([b8ab16b](https://github.com/nervosnetwork/muta/commit/b8ab16b05ad01a0c6ef5a7b8d7ad76961e7749ff)) * **core-network:** expost send_buffer_size and recv_buffer_size ([#248](https://github.com/nervosnetwork/muta/issues/248)) ([e5120ad](https://github.com/nervosnetwork/muta/commit/e5120ad646c9d206b43b0d50911303507bdfe381)) * **core-network:** implement peer count feature ([#256](https://github.com/nervosnetwork/muta/issues/256)) ([8f7e7eb](https://github.com/nervosnetwork/muta/commit/8f7e7eb51cdeebfb9c679d88626ac2ec3fa651a4)) * add performance test lua script ([#244](https://github.com/nervosnetwork/muta/issues/244)) ([c727b73](https://github.com/nervosnetwork/muta/commit/c727b733340029f72d9280a57e07522f635eff44)) * **core-network:** implement concurrent reactor and real chained reactor ([#175](https://github.com/nervosnetwork/muta/issues/175)) ([dc9f897](https://github.com/nervosnetwork/muta/commit/dc9f897f08801d7b8a418750ed516a8acac057ca)) * **core-p2p:** implement datagram transport protocol ([fee2d45](https://github.com/nervosnetwork/muta/commit/fee2d4546552bd6c46376309eb399126219c55fb)) * **core-p2p:** transmission: use `poll` func to do broadcast ([b376cbe](https://github.com/nervosnetwork/muta/commit/b376cbef9211e55f809f16bb9bab1360dd4b3523)) * **core/consensus:** Implement solo mode for consensus ([e071b15](https://github.com/nervosnetwork/muta/commit/e071b1533b1107f65eb0f97563f011f644d73be6)) * **core/crypto:** Add secp256k1 ([8349eaa](https://github.com/nervosnetwork/muta/commit/8349eaa2817ee8c27e9e8367c89f3469e52b6f8a)) * **core/crypto:** Modify the return type to result. ([9f2424c](https://github.com/nervosnetwork/muta/commit/9f2424ca11fa300f7269f7a32195ec8bbde096e0)) * **core/network:** Support broadcast message ([#185](https://github.com/nervosnetwork/muta/issues/185)) ([992c55f](https://github.com/nervosnetwork/muta/commit/992c55f87458a38629944fb78ee69982d8329b2b)) * **core/types:** Add hash function for the header and receipts ([c982a52](https://github.com/nervosnetwork/muta/commit/c982a52ce29da7f0e783b2a7a52f1d541c15ea10)) * **executor:** Add flush for trie db. ([#240](https://github.com/nervosnetwork/muta/issues/240)) ([23fd538](https://github.com/nervosnetwork/muta/commit/23fd53849ac626cdeaabb165c0534bb90651aa90)) * **jsonrpc:** Implement filter APIs ([#190](https://github.com/nervosnetwork/muta/issues/190)) ([c97ed22](https://github.com/nervosnetwork/muta/commit/c97ed2273b6ddb2385d6d0285f2d5b4d267b130b)) * **tx-pool:** Batch broadcast transactions. ([#234](https://github.com/nervosnetwork/muta/issues/234)) ([d297b1a](https://github.com/nervosnetwork/muta/commit/d297b1a4d655fdfac25f7f5630253f7e8f6f70ea)) * add synchronizer ([#167](https://github.com/nervosnetwork/muta/issues/167)) ([38db7aa](https://github.com/nervosnetwork/muta/commit/38db7aa3f83e4a35417440e4787c5249b9eace63)) * Implement many JSONRPC APIs ([#166](https://github.com/nervosnetwork/muta/issues/166)) ([807b6a7](https://github.com/nervosnetwork/muta/commit/807b6a73cb098087179d9b086fa0070b6ced74d0)) * Implement RPC getTransactionCount ([#169](https://github.com/nervosnetwork/muta/issues/169)) ([dbf0c51](https://github.com/nervosnetwork/muta/commit/dbf0c51a17f3e285e1146eee3b5e9def08d16d50)) * rewrite network component ([#230](https://github.com/nervosnetwork/muta/issues/230)) ([585dabb](https://github.com/nervosnetwork/muta/commit/585dabb2d52dd70de7ebc26eee59345596301c1a)) * **components/jsonrpc:** Implements sendRawTransaction ([#159](https://github.com/nervosnetwork/muta/issues/159)) ([112d345](https://github.com/nervosnetwork/muta/commit/112d34582c00bea3c05d1663cf07d79aefbfa6a9)) * **core-context:** add `CommonValue` trait and `p2p_session_id` method ([#165](https://github.com/nervosnetwork/muta/issues/165)) ([216b743](https://github.com/nervosnetwork/muta/commit/216b74381c00b15ba61444cf462528ee170fcc41)) * **core/consensus:** Implements BFT ([#158](https://github.com/nervosnetwork/muta/issues/158)) ([e7a3bfd](https://github.com/nervosnetwork/muta/commit/e7a3bfd2f667c9bb8d6b9deb29a57c837ae296b9)) * **core/notify:** add notify as message-bus between components ([b53c50d](https://github.com/nervosnetwork/muta/commit/b53c50dc04090b6b0d5b6725b5c32697446aa5f8)) * **core/serialization:** Add proto file ([0bf7c59](https://github.com/nervosnetwork/muta/commit/0bf7c59200ad4a4cc7994efecaec5d8c683f175a)) * **core/storage:** Add the storage trait ([ffc8776](https://github.com/nervosnetwork/muta/commit/ffc8776b02bc0a4cf785c7c5c47a88266f186b49)) * **core/types:** Add the transactions hash calculation function. ([67d8170](https://github.com/nervosnetwork/muta/commit/67d817072c4c03b2fc2eaae5d1dc99d2d41240e0)) * **core/types:** Define serialization and deserialization methods ([f28c63d](https://github.com/nervosnetwork/muta/commit/f28c63d2b4c7b77dbe24e2b50e70cf649a6c714c)) * **database:** Add memory db ([d21a5a2](https://github.com/nervosnetwork/muta/commit/d21a5a29bd20e02f3ddd29f77c3df2963f8f3b4b)) * **jsonrpc:** support batch ([0a0c680](https://github.com/nervosnetwork/muta/commit/0a0c680993ff9be231f1ae8e583171e1f304f79b)) * **main:** add init command for genesis ([#96](https://github.com/nervosnetwork/muta/issues/96)) ([ec752b0](https://github.com/nervosnetwork/muta/commit/ec752b0602800055990fbfcc54bd2c2ab0b2cb60)) * **p2p:** Update to tentacle0.2.0-alpha.5 ([#177](https://github.com/nervosnetwork/muta/issues/177)) ([f6f83b6](https://github.com/nervosnetwork/muta/commit/f6f83b6b263579d66160cfab29b83bd5a709eeb4)) * **pubsub:** Implement pubsub components ([#143](https://github.com/nervosnetwork/muta/issues/143)) ([a079770](https://github.com/nervosnetwork/muta/commit/a079770b0e66e22552bd8cf504a9e1ba0c520d0e)) * **runtime:** add `Context` struct ([#155](https://github.com/nervosnetwork/muta/issues/155)) ([27e5aa7](https://github.com/nervosnetwork/muta/commit/27e5aa7f01f3559d2a9dd17346595c9161a9c0f6)) * Add project framework ([#24](https://github.com/nervosnetwork/muta/issues/24)) ([733ee8e](https://github.com/nervosnetwork/muta/commit/733ee8e6be7649c9aa2d772bb1dc661bd0879917)) * Add transaction pool component. ([360c935](https://github.com/nervosnetwork/muta/commit/360c93540ea77dc51551a3739e17682600d2b1b7)) * Fill main.rs ([#102](https://github.com/nervosnetwork/muta/issues/102)) ([b5b4c72](https://github.com/nervosnetwork/muta/commit/b5b4c7233efcd1c35e92248b7726ca20644800e9)) * impl cita-jsonrpc ([49e2a2d](https://github.com/nervosnetwork/muta/commit/49e2a2d22d094b2b6a2f71bc5201ccfe28308797)) * update db interface and storage interface ([#137](https://github.com/nervosnetwork/muta/issues/137)) ([36b3d07](https://github.com/nervosnetwork/muta/commit/36b3d07f23e2c7ada870cb699bf138cdd66c2860)) ### Reverts * Revert "chore: Update bft-rs (#203)" (#204) ([cc15ba9](https://github.com/nervosnetwork/muta/commit/cc15ba9ed302ab1389838a4a6c745675106179e9)), closes [#203](https://github.com/nervosnetwork/muta/issues/203) [#204](https://github.com/nervosnetwork/muta/issues/204) # [](https://github.com/nervosnetwork/muta/compare/v0.2.0-beta.4...v) (2020-08-12) ### Features * **network:** split transmitter data ([#380](https://github.com/nervosnetwork/muta/issues/380)) ([0322cd6](https://github.com/nervosnetwork/muta/commit/0322cd690cb118f56153e424e9a6bf4b2a11d8b4)) * **network:** verify chain id during protocol handshake ([#406](https://github.com/nervosnetwork/muta/issues/406)) ([e678e92](https://github.com/nervosnetwork/muta/commit/e678e92bf01bc4bc914e74b6fed22c8b55b3cdc7)) # [0.2.0-beta.4](https://github.com/nervosnetwork/muta/compare/v0.2.0-beta.3...v0.2.0-beta.4) (2020-08-10) ### Bug Fixes * load hrp before deserializing genesis payload to take hrp effect ([#405](https://github.com/nervosnetwork/muta/issues/405)) ([828e6d5](https://github.com/nervosnetwork/muta/commit/828e6d539cf4da9cf042c450418e75a944315014)) ### Features * **api:** Support enabled TLS ([#402](https://github.com/nervosnetwork/muta/issues/402)) ([c2908a3](https://github.com/nervosnetwork/muta/commit/c2908a3ba6a5ab1219ddc9b14ff6d7320cf70228)) ### Performance Improvements * **state:** add state cache for trieDB ([#404](https://github.com/nervosnetwork/muta/issues/404)) ([2a08c14](https://github.com/nervosnetwork/muta/commit/2a08c147571707507b72882788fd51f7a799f3ec)) # [0.2.0-beta.3](https://github.com/nervosnetwork/muta/compare/v0.2.0-beta.2...v0.2.0-beta.3) (2020-08-07) ### Bug Fixes * **apm:** Return the correct time ([#400](https://github.com/nervosnetwork/muta/issues/400)) ([fd6549a](https://github.com/nervosnetwork/muta/commit/fd6549a6352633cee7b5b747448129df7a0532ca)) ### Features * **network:** limit connections from same ip ([#388](https://github.com/nervosnetwork/muta/issues/388)) ([dc78c13](https://github.com/nervosnetwork/muta/commit/dc78c13b8aa25f3e4535e588149042f6345e4d25)) * **network:** limit inbound and outbound connections ([#393](https://github.com/nervosnetwork/muta/issues/393)) ([3a3111e](https://github.com/nervosnetwork/muta/commit/3a3111e1e332529bc8636c54526920c292c04f8a)) * **sync:** Limit the maximum height of once sync ([#390](https://github.com/nervosnetwork/muta/issues/390)) ([f951a95](https://github.com/nervosnetwork/muta/commit/f951a953daf307ffc98b4df0fe1a77a6a810ac71)) # [0.2.0-beta.2](https://github.com/nervosnetwork/muta/compare/v0.2.0-beta.1...v0.2.0-beta.2) (2020-08-04) ### Bug Fixes * **consensus:** Add timestamp checking ([#377](https://github.com/nervosnetwork/muta/issues/377)) ([382ede9](https://github.com/nervosnetwork/muta/commit/382ede9367b910a06b59f3562ecd28ab8100d39e)) ### Features * **benchmark:** add a perf benchmark macro ([#391](https://github.com/nervosnetwork/muta/issues/391)) ([eb24311](https://github.com/nervosnetwork/muta/commit/eb2431149b6865a82d0e4286536f65319a5e1d1f)) * **Cargo:** add random leader feature for muta ([#385](https://github.com/nervosnetwork/muta/issues/385)) ([43da977](https://github.com/nervosnetwork/muta/commit/43da9772b22b97ab4797b80ce5161f1a49827543)) ### Performance Improvements * **metrics:** Add metrics of state ([#397](https://github.com/nervosnetwork/muta/issues/397)) ([5822764](https://github.com/nervosnetwork/muta/commit/5822764240f8b4e8cfeca4bccf7d399a0bf71897)) # [0.2.0-beta.1](https://github.com/nervosnetwork/muta/compare/v0.2.0-alpha.1...v0.2.0-beta.1) (2020-08-03) ### Bug Fixes * **consensus:** return an error when committing an outdated block ([#371](https://github.com/nervosnetwork/muta/issues/371)) ([b3d518b](https://github.com/nervosnetwork/muta/commit/b3d518b52658b40746ef708fa8cde5c96a39a539)) * **mempool:** Ensure that there are no duplicate transactions in the order transaction ([#379](https://github.com/nervosnetwork/muta/issues/379)) ([97708ac](https://github.com/nervosnetwork/muta/commit/97708ac385be2243344d700a0d7c928f18fd51b3)) * **storage:** test batch receipts get panic ([#373](https://github.com/nervosnetwork/muta/issues/373)) ([300a3c6](https://github.com/nervosnetwork/muta/commit/300a3c65cf0399c2ba37a3bd655e06719b660330)) ### Features * **network:** tag consensus peer ([#364](https://github.com/nervosnetwork/muta/issues/364)) ([9b27df1](https://github.com/nervosnetwork/muta/commit/9b27df1015a25792cc210c5aa0dd473a45ae885d)), closes [#354](https://github.com/nervosnetwork/muta/issues/354) [#2](https://github.com/nervosnetwork/muta/issues/2) [#3](https://github.com/nervosnetwork/muta/issues/3) [#4](https://github.com/nervosnetwork/muta/issues/4) [#5](https://github.com/nervosnetwork/muta/issues/5) [#6](https://github.com/nervosnetwork/muta/issues/6) [#7](https://github.com/nervosnetwork/muta/issues/7) * Add global panic hook ([#376](https://github.com/nervosnetwork/muta/issues/376)) ([7382279](https://github.com/nervosnetwork/muta/commit/738227962771a6a66b85f2fd199df2e699b43adc)) ### Performance Improvements * **executor:** use inner call instead of service dispatcher ([#365](https://github.com/nervosnetwork/muta/issues/365)) ([7b1d2a3](https://github.com/nervosnetwork/muta/commit/7b1d2a32d5c20306af3868e5265bd2530dd9493b)) ### BREAKING CHANGES * **network:** - replace Validator address bytes with pubkey bytes * change(consensus): log validator address instead of its public key Block proposer is address instead public key * fix: compilation failed * **network:** - change users_cast to multicast, take peer_ids bytes instead of Address - network bootstrap configuration now takes peer id instead of pubkey hex * refactor(network): PeerId api # [0.2.0-alpha.1](https://github.com/nervosnetwork/muta/compare/v0.1.2-beta...v0.2.0-alpha.1) (2020-07-22) ### Bug Fixes * **executor:** The logic to deal with tx_hook and tx_body ([#367](https://github.com/nervosnetwork/muta/issues/367)) ([749d558](https://github.com/nervosnetwork/muta/commit/749d558b8b58a1943bfa2842dcedcc45218c0f78)) * **executor:** tx events aren't cleared on execution error ([#313](https://github.com/nervosnetwork/muta/issues/313)) ([1605cf5](https://github.com/nervosnetwork/muta/commit/1605cf59b558b97889bb431da7f81fd424b90a89)) * **proof:** Verify aggregated signature in checking proof ([#308](https://github.com/nervosnetwork/muta/issues/308)) ([d2a98b0](https://github.com/nervosnetwork/muta/commit/d2a98b06e44449ca756f135c1b235ff0d80eaf67)) * **trust_metric_test:** unreliable full node exit check ([#327](https://github.com/nervosnetwork/muta/issues/327)) ([a4ab4a6](https://github.com/nervosnetwork/muta/commit/a4ab4a6209e0978148983e88447ac2d9178fa42a)) * **WAL:** Ignore path already exist ([#304](https://github.com/nervosnetwork/muta/issues/304)) ([02df937](https://github.com/nervosnetwork/muta/commit/02df937fb6449c9b3b0b50e790e0ecf6bfc1ee3d)) ### Performance Improvements * **mempool:** parallel verifying signatures in mempool ([#359](https://github.com/nervosnetwork/muta/issues/359)) ([2ccdf1a](https://github.com/nervosnetwork/muta/commit/2ccdf1a67a40cd483749a98a1a68c37bcf1d473c)) ### Reverts * Revert "refactor(consensus)!: replace Validator address bytes with pubkey bytes (#354)" (#361) ([4dabfa2](https://github.com/nervosnetwork/muta/commit/4dabfa231961d1ec8be1ba42bf05781f55395aed)), closes [#354](https://github.com/nervosnetwork/muta/issues/354) [#361](https://github.com/nervosnetwork/muta/issues/361) * refactor(consensus)!: replace Validator address bytes with pubkey bytes (#354) ([e4433d7](https://github.com/nervosnetwork/muta/commit/e4433d793e8a63788ec682880afc93474e0d2414)), closes [#354](https://github.com/nervosnetwork/muta/issues/354) ### Features * **executor:** allow cancel execution units through context ([#317](https://github.com/nervosnetwork/muta/issues/317)) ([eafb489](https://github.com/nervosnetwork/muta/commit/eafb489f78f7521487c6b2d25dd9912e43f76500)) * **executor:** indenpendent tx hook states commit ([#316](https://github.com/nervosnetwork/muta/issues/316)) ([fde6450](https://github.com/nervosnetwork/muta/commit/fde645010363a4664033370e4109e4d1f08b13bc)) * **protocol:** Remove the logs bloom from block header ([#312](https://github.com/nervosnetwork/muta/issues/312)) ([ff1e0df](https://github.com/nervosnetwork/muta/commit/ff1e0df1e8a65cc480825a49eed9495cc31ecee0)) ### BREAKING CHANGES * - replace Validator address bytes with pubkey bytes * change(consensus): log validator address instead of its public key Block proposer is address instead public key * fix: compilation failed ================================================ FILE: CHANGELOG/README.md ================================================ # CHANGELOGs > use: conventional-changelog > > example command: conventional-changelog -p angular -i CHANGELOG-0.2.md -s -r 0.2 - [CHANGELOG-0.1.md](./CHANGELOG-0.1.md) - [CHANGELOG-0.2.md](./CHANGELOG-0.2.md) ================================================ FILE: CONTRIBUTING.md ================================================ # Contributing Our goal is to make contributing to the `muta` project easy and transparent. When contributing to this repository, please first discuss the change you wish to make via issue, or any other method with the community before making a change. ### Report Issue * Read known issues to see whether the issue is already addressed there. * Search existing issues to see whether others had already posted a similar issue. * **Do not open up a GitHub issue to report security vulnerabilities**. Instead, refer to the [security policy](SECURITY.md). * When creating a new issue, be sure to include a title and clear description. It is appreciated that if you can also attach as much relevant information as possible, such as version, environment, reproducing steps, samples. ### Send PR * See [Code Standards]() for code guidelines. * See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. 1. Fork the `muta` repo and create your branch from master. 2. If you have added code that should be tested, add unit tests. 3. Verify and ensure that the test suite passes. 4. Run `make ci` to lint and test the code before commit. 5. Make sure your code passes CI. 6. Ensure the PR description clearly describes the problem and solution. Include the relevant issue number if applicable. 7. Submit your pull request. ## Code of Conduct ### Our Pledge In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. ### Our Standards Examples of behavior that contributes to creating a positive environment include: * Using welcoming and inclusive language * Being respectful of differing viewpoints and experiences * Gracefully accepting constructive criticism * Focusing on what is best for the community * Showing empathy towards other community members Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery and unwelcome sexual attention or advances * Trolling, insulting/derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or electronic address, without explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting ### Our Responsibilities Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. ### Scope This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. ### Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at hello@nervos.org. All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. ### Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html [homepage]: https://www.contributor-covenant.org For answers to common questions about this code of conduct, see https://www.contributor-covenant.org/faq ================================================ FILE: Cargo.toml ================================================ [package] name = "muta" version = "0.2.1" authors = ["Muta Dev "] edition = "2018" repository = "https://github.com/nervosnetwork/muta" [dependencies] cli = { path = "./core/cli"} byzantine = { path = "./byzantine" } common-apm = { path = "./common/apm" } common-config-parser = { path = "./common/config-parser" } common-crypto = { path = "./common/crypto" } common-logger = { path = "./common/logger" } protocol = { path = "./protocol", package = "muta-protocol" } core-api = { path = "./core/api" } core-storage = { path = "./core/storage" } core-mempool = { path = "./core/mempool" } core-network = { path = "./core/network" } core-consensus = { path = "./core/consensus" } binding-macro = { path = "./binding-macro" } framework = { path = "./framework" } backtrace = "0.3" actix-rt = "1.0" derive_more = "0.99" futures = "0.3" parking_lot = "0.11" serde = "1.0" serde_derive = "1.0" serde_json = "1.0" log = "0.4" clap = "2.33" bytes = "0.5" hex = "0.4" rlp = "0.4" toml = "0.5" tokio = { version = "0.2", features = ["macros", "sync", "rt-core", "rt-util", "signal", "time"] } muta-apm = "0.1.0-alpha.7" futures-timer="3.0" cita_trie = "2.0" fs_extra = "1.2.0" [dev-dependencies] cita_trie = "2.0" async-trait = "0.1" toml = "0.5" lazy_static = "1.4" muta-codec-derive = "0.2" asset = { path = "built-in-services/asset" } multi-signature = { path = "built-in-services/multi-signature" } authorization = { path = "built-in-services/authorization" } metadata = { path = "built-in-services/metadata"} util = { path = "built-in-services/util"} rand = "0.7" core-network = { path = "./core/network", features = ["diagnostic"] } tokio = { version = "0.2", features = ["full"] } [workspace] members = [ "devtools/keypair", "common/channel", "common/config-parser", "common/crypto", "common/logger", "common/merkle", "common/pubsub", "core/api", "core/consensus", "core/mempool", "core/network", "core/storage", "core/cli", "core/run", "binding-macro", "framework", "built-in-services/asset", "built-in-services/metadata", "built-in-services/multi-signature", "built-in-services/authorization", "protocol", "byzantine", ] [features] default = [] random_leader = ["core-consensus/random_leader"] tentacle_metrics = ["core-network/tentacle_metrics"] [[example]] name = "muta-chain" crate-type = ["bin"] [[test]] name = "trust_metric" path = "tests/trust_metric.rs" required-features = [ "core-network/diagnostic" ] [[test]] name = "verify_chain_id" path = "tests/verify_chain_id.rs" required-features = [ "core-network/diagnostic" ] [[bench]] name = "bench_execute" path = "benchmark/mod.rs" ================================================ FILE: LICENSE ================================================ MIT License Copyright (c) 2019 Nervos Foundation Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: Makefile ================================================ ERBOSE := $(if ${CI},--verbose,) COMMIT := $(shell git rev-parse --short HEAD) ifneq ("$(wildcard /usr/lib/librocksdb.so)","") SYS_LIB_DIR := /usr/lib else ifneq ("$(wildcard /usr/lib64/librocksdb.so)","") SYS_LIB_DIR := /usr/lib64 else USE_SYS_ROCKSDB := endif USE_SYS_ROCKSDB := SYS_ROCKSDB := $(if ${USE_SYS_ROCKSDB},ROCKSDB_LIB_DIR=${SYS_LIB_DIR},) CARGO := env ${SYS_ROCKSDB} cargo test: ${CARGO} test ${VERBOSE} --all -- --skip trust_metric --nocapture doc: cargo doc --all --no-deps doc-deps: cargo doc --all # generate GraphQL API documentation doc-api: bash docs/build/gql_api.sh check: ${CARGO} check ${VERBOSE} --all build: ${CARGO} build ${VERBOSE} --release prod-muta-chain: ${CARGO} build ${VERBOSE} --release --example muta-chain fmt: cargo fmt ${VERBOSE} --all -- --check clippy: ${CARGO} clippy ${VERBOSE} --all --all-targets --all-features -- \ -D warnings -D clippy::clone_on_ref_ptr -D clippy::enum_glob_use ci: fmt clippy test info: date pwd env e2e-test: cargo build --example muta-chain rm -rf ./devtools/chain/data ./target/debug/examples/muta-chain -c ./devtools/chain/config.toml -g ./devtools/chain/genesis.toml > /tmp/log 2>&1 & cd tests/e2e && yarn && ./wait-for-it.sh -t 300 localhost:8000 -- yarn run test pkill -2 muta-chain byz-test: cargo build --example muta-chain cargo build --example byzantine_node rm -rf ./devtools/chain/data CONFIG=./examples/config-1.toml GENESIS=./examples/genesis.toml ./target/debug/examples/muta-chain > /tmp/log 2>&1 & CONFIG=./examples/config-2.toml GENESIS=./examples/genesis.toml ./target/debug/examples/muta-chain > /tmp/log 2>&1 & CONFIG=./examples/config-3.toml GENESIS=./examples/genesis.toml ./target/debug/examples/muta-chain > /tmp/log 2>&1 & CONFIG=./examples/config-4.toml GENESIS=./examples/genesis.toml ./target/debug/examples/byzantine_node > /tmp/log 2>&1 & cd byzantine/tests && yarn && ../../tests/e2e/wait-for-it.sh -t 300 localhost:8000 -- yarn run test pkill -2 muta-chain byzantine_node e2e-test-via-docker: docker-compose -f tests/e2e/docker-compose-e2e-test.yaml up --exit-code-from e2e-test --force-recreate # For counting lines of code stats: @cargo count --version || cargo +nightly install --git https://github.com/kbknapp/cargo-count @cargo count --separator , --unsafe-statistics # Use cargo-audit to audit Cargo.lock for crates with security vulnerabilities # expecting to see "Success No vulnerable packages found" security-audit: @cargo audit --version || cargo install cargo-audit @cargo audit .PHONY: build prod prod-test .PHONY: fmt test clippy doc doc-deps doc-api check stats .PHONY: ci info security-audit ================================================ FILE: OWNERS ================================================ # See the OWNERS docs at https://go.k8s.io/owners approvers: - yejiayu - zeroqn - KaoImin - LycrusHamster - rev-chaos - homura - zhouyun-zoe reviewers: - yejiayu - zeroqn - KaoImin - LycrusHamster - rev-chaos - homura - zhouyun-zoe ================================================ FILE: OWNERS_ALIASES ================================================ aliases: - yejiayu - zeroqn - KaoImin - LycrusHamster - rev-chaos - homura best-approvers: - yejiayu - zeroqn - KaoImin - LycrusHamster - rev-chaos - homura best-reviewers: - yejiayu - zeroqn - KaoImin - LycrusHamster - rev-chaos - homura ================================================ FILE: README.md ================================================

Build your own blockchain,today

chat on Discord

Developed by Nervos

English | [简体中文](./README_CN.md) ## What is Muta? Muta is a highly customizable high-performance blockchain framework. It has a built-in BFT-like consensus algorithm "Overlord" with high throughput and low latency, and it can also support different virtual machines, including CKB-VM, EVM, and WASM. Muta has interoperability across VMs. Different virtual machines can be used in a Muta-based blockchain at the same time. Developed by the Nervos team, Muta is designed to allow anyone in the world to build their own blockchain while enjoying the security and finality brought by Nervos CKB. Developers can customize PoA, PoS or DPoS chains based on Muta, and use different economic models and governance models. Developers can also develop different application chains (such as DEX chains) based on Muta to implement a specific business logic. Muta's core design philosophy is to make the development of a blockchain state transition as flexible and simple as possible, which means that while reducing the obstacles to build high-performance blockchains, it still maximizes its flexibility to facilitate developers to customize their business logic. Therefore, as a highly customizable high-performance blockchain framework, Muta provides a basic core component that a blockchain system needs, and developers can customize the functional parts of the chain freely. ## Getting Started! [Muta Documentation](https://nervosnetwork.github.io/muta-docs/) Quickly build a simple chain and try some simple interaction, please refer to [Quick Start](https://nervosnetwork.github.io/muta-docs/#/en-us/getting_started.md)。 ## The basic core component Muta provided Muta provided all the core components needed to build a blockchain: * [Transaction Pool](https://nervosnetwork.github.io/muta-docs/#/en-us/transaction_pool.md) * [P2P Network](https://nervosnetwork.github.io/muta-docs/#/en-us/network.md) * [Consensus](https://nervosnetwork.github.io/muta-docs/#/en-us/overlord.md) * [Storage](https://nervosnetwork.github.io/muta-docs/#/en-us/storage.md) ## Customizable Part Developers can customize the functional parts of the chain by developing Services. Service is an abstraction layer for extension in Muta framework. Users can define block management, add VMs, etc. based on Service. Each Service, as a relatively independent logical component, can implement its specific function, and at the same time, different services can directly interact with each other, so that more complex functional logic can be constructed. More flexible is that services from different chains can also be reused, which makes it easier for developers to build their own functional modules. We provide detailed service development guides and some service examples. * [Service Development Guide](https://nervosnetwork.github.io/muta-docs/#/en-us/service_dev.md) * [Service Examples](https://nervosnetwork.github.io/muta-docs/#/en-us/service_eg.md) * [Develop a DEX Chain](https://nervosnetwork.github.io/muta-docs/#/en-us/dex.md) ## Developer Resources Developer resources can be found [here](./docs/resources.md) ## Who is using Muta? Muta powers some open source projects.

Is your project using Muta? Edit this page with a Pull Request to add your logo.:tada: ## How to Contribute The contribution workflow is described in [CONTRIBUTING.md](CONTRIBUTING.md), and security policy is described in [SECURITY.md](SECURITY.md). ================================================ FILE: README_CN.md ================================================

让世界上任何一个人都可以搭建属于他们自己的区块链

chat on Discord

由 Nervos 团队开发

[English](./README.md) | 简体中文 ## 什么是 Muta? Muta 是一个高度可定制的高性能区块链框架。它内置了具有高吞吐量和低延迟特性的类 BFT 共识算法「Overlord」,并且可以支持不同的虚拟机,包括 CKB-VM、EVM 和 WASM。Muta 具有跨 VM 的互操作性,不同的虚拟机可以同时在一条基于 Muta 搭建的区块链中使用。Muta 由 Nervos 团队开发,旨在让世界上任何一个人都可以搭建属于他们自己的区块链,同时享受 Nervos CKB 所带来的安全性和最终性。 开发者可以基于 Muta 定制开发 PoA、PoS 或者 DPoS 链,并且可以使用不同的经济模型和治理模型进行部署。开发者也可以基于 Muta 来开发不同的应用链(例如 DEX 链),以实现某种特定的业务逻辑。 Muta 的核心理念是使一个区块链状态转换的开发尽可能的灵活和简便,也就是说在降低开发者搭建高性能区块链障碍的同时,仍然最大限度地保证其灵活性以方便开发者可以自由定制他们的协议。因此,作为一个高度可定制的高性能区块链框架,Muta 提供了一个区块链系统需要有的基础核心组件,开发者可以自由定制链的功能部分。 ## 快速开始! [Muta 文档网站](https://nervosnetwork.github.io/muta-docs/) 快速搭建一条简单的链并尝试简单的交互,请参考[快速开始](https://nervosnetwork.github.io/muta-docs/#/getting_started.md)。 ## Muta 提供哪些基础核心组件? Muta 框架提供了搭建一个分布式区块链网络所需的全部核心组件: * [交易池](https://nervosnetwork.github.io/muta-docs/#/transaction_pool.md) * [P2P 网络](https://nervosnetwork.github.io/muta-docs/#/network.md) * [共识](https://nervosnetwork.github.io/muta-docs/#/overlord.md) * [存储](https://nervosnetwork.github.io/muta-docs/#/storage.md) ## 开发者需要自己实现哪些部分? 开发者可以通过开发 Service 来定制链的功能部分。 Service 是 Muta 框架中用于扩展的抽象层,用户可以基于 Service 定义区块治理、添加 VM 等等。每一个 Service 作为一个相对独立的逻辑化组件,可以实现其特定的功能,同时,不同的 Service 之间又可以直接进行交互,从而可以构建更为复杂的功能逻辑。更为灵活的是,不同链的 Service 还可以复用,这使得开发者们可以更为轻松的搭建自己的功能模块。 我们提供了详细的 Service 开发指南,以及一些 Service 示例。 * [Service 开发指南](https://nervosnetwork.github.io/muta-docs/#service_dev.md) * [Service 示例](https://nervosnetwork.github.io/muta-docs/#service_eg.md) ## 开发资源 可以在[这边](./docs/resources.md)找到相关的开发资源 ## 谁在使用 Muta?

您的项目使用的是 Muta 吗?欢迎在这里添加您项目的 logo 和链接,请点击顶部的 `Edit Document` ,修改本文档的相关内容,并提交 Pull Request 即可:tada: ## 贡献 ![PRs](https://img.shields.io/badge/PRs-welcome-brightgreen.svg) 如何贡献请参考 [CONTRIBUTING.md](CONTRIBUTING.md),Security Policy 请参考 [SECURITY.md](SECURITY.md)。 ================================================ FILE: SECURITY.md ================================================ # Security Policy This project is still under development, the primary goal at this stage is to implement features but we also take security very seriously. This document defines the policy on how to report vulnerabilities and receive updates when patches to those are released. ## Reporting a vulnerability All security bugs should be reported by sending email to [Nervos Security Team](mailto:security@nervos.org). This will deliver a message to Nervos Security Team who handle security issues. Your report will be acknowledged within 24 hours, and you'll receive a more detailed response to your email within 72 hours indicating the next steps in handling your report. After the initial reply to your report the security team will endeavor to keep you informed of the progress being made towards a fix and full announcement. ## Disclosure process 1. Security report received and is assigned a primary handler. This person will coordinate the fix and release process. Problem is confirmed and all affected versions is determinted. Code is audited to find any potential similar problems. 2. Fixes are prepared for all supported releases. These fixes are not committed to the public repository but rather held locally pending the announcement. 3. A suggested embargo date for this vulnerability is chosen. This notification will include patches for all supported versions. 4. On the embargo date, the [Nervos security mailing list](#TBD) is sent a copy of the announcement. The changes are pushed to the public repository. At least 6 hours after the mailing list is notified, a copy of the advisory will be published on Nervos community channels. This process can take some time, especially when coordination is required with maintainers of other projects. Every effort will be made to handle the bug in as timely a manner as possible, however it's important that we follow the release process above to ensure that the disclosure is handled in a consistent manner. ## Receiving disclosures If you require prior notification of vulnerabilities please subscribe to the [Nervos Security mailing list](#TBD). The mailing list is very low traffic, and it receives the public notifications the moment the embargo is lifted. If you have any suggestions to improve this policy, please send an email to [Nervos Security Team](security@nervos.org). ================================================ FILE: benchmark/bench_executor.rs ================================================ #![allow(clippy::needless_collect)] use asset::types::TransferPayload; use super::*; #[rustfmt::skip] /// Bench in Intel(R) Core(TM) i7-4770HQ CPU @ 2.20GHz (8 x 2200) /// 100 txs bench_execute ... bench: 11,299,912 ns/iter (+/- 3,402,276) /// 1000 txs bench::bench_execute ... bench: 101,187,934 ns/iter (+/- 26,000,469) #[bench] fn bench_execute(b: &mut Bencher) { let mut bench_adapter = BenchmarkAdapter::new(); let payload = TransferPayload { asset_id: NATIVE_ASSET_ID.clone(), to: FEE_INLET_ACCOUNT.clone(), value: 1u64, }; let req = (0..1000).map(|_| TransactionRequest { service_name: "asset".to_string(), method: "transfer".to_string(), payload: serde_json::to_string(&payload).unwrap(), }).collect::>(); perf_exec!(bench_adapter, req, b); } #[rustfmt::skip] /// 10 assets bench::perf_execute ... bench: 109,202,563 ns/iter (+/- 6,378,009) /// 100 assets bench::perf_execute ... bench: 108,859,512 ns/iter (+/- 2,977,622) /// 1000 assets bench::bench_execute ... bench: 108,037,404 ns/iter (+/- 4,539,634) /// 10000 assets test bench::perf_execute ... bench: 100,244,123 ns/iter (+/- 18,935,087) #[bench] fn bench_execute_with_assets(b: &mut Bencher) { let mut bench_adapter = BenchmarkAdapter::new(); create_assets(&mut bench_adapter, 10000); let payload = TransferPayload { asset_id: NATIVE_ASSET_ID.clone(), to: FEE_INLET_ACCOUNT.clone(), value: 1u64, }; let req = (0..1000).map(|_| TransactionRequest { service_name: "asset".to_string(), method: "transfer".to_string(), payload: serde_json::to_string(&payload).unwrap(), }).collect::>(); perf_exec!(bench_adapter, req, b); } fn create_assets(bench_adapter: &mut BenchmarkAdapter, num: u64) { let create_assets = (0..num) .map(|n| { let payload = asset::types::CreateAssetPayload { name: "muta_".to_string() + n.to_string().as_str(), symbol: "muta_".to_string() + n.to_string().as_str(), supply: 100_000, }; TransactionRequest { service_name: "asset".to_string(), method: "create_asset".to_string(), payload: serde_json::to_string(&payload).unwrap(), } }) .collect::>(); exec!(bench_adapter, create_assets); } ================================================ FILE: benchmark/bench_mempool.rs ================================================ ================================================ FILE: benchmark/benchmark_genesis.toml ================================================ timestamp = 0 prevhash = "0x44915be5b6c20b0678cf05fcddbbaa832e25d7e6ac538784cd5c24de00d47472" [[services]] name = "asset" payload = ''' { "id": "0xf56924db538e77bb5951eb5ff0d02b88983c49c45eea30e8ae3e7234b311436c", "name": "MutaToken", "symbol": "MT", "supply": 320000011, "issuer": "muta14e0lmgck835vm2dfm0w3ckv6svmez8fdgdl705" } ''' # private key of this admin: # 5ec982173d54d830b6789cbbbe43eaa2853a5ff752d1ebc1b266cf9790314f8a [[services]] name = "governance" payload = ''' { "info": { "admin": "muta14e0lmgck835vm2dfm0w3ckv6svmez8fdgdl705", "tx_failure_fee": 10, "tx_floor_fee": 20, "profit_deduct_rate_per_million": 3, "tx_fee_discount": [ { "threshold": 1000, "discount_percent": 90 }, { "threshold": 10000, "discount_percent": 70 }, { "threshold": 100000, "discount_percent": 50 } ], "miner_benefit": 10 }, "tx_fee_inlet_address": "muta15a8a9ksxe3hhjpw3l7wz7ry778qg8h9wz8y35p", "miner_profit_outlet_address": "muta14e0lmgck835vm2dfm0w3ckv6svmez8fdgdl705", "miner_charge_map": [] } ''' ================================================ FILE: benchmark/governance/mod.rs ================================================ mod types; use std::cell::RefCell; use std::convert::From; use std::rc::Rc; use bytes::Bytes; use derive_more::{Display, From}; use binding_macro::{genesis, hook_after, service, tx_hook_after, tx_hook_before}; use protocol::traits::{ExecutorParams, ServiceResponse, ServiceSDK, StoreMap}; use protocol::try_service_response; use protocol::types::{Address, Hash, ServiceContext, ServiceContextParams}; use asset::types::TransferPayload; use asset::Assets; use types::{GovernanceInfo, InitGenesisPayload}; const INFO_KEY: &str = "admin"; const TX_FEE_INLET_KEY: &str = "fee_address"; const MINER_PROFIT_OUTLET_KEY: &str = "miner_address"; static ADMISSION_TOKEN: Bytes = Bytes::from_static(b"governance"); lazy_static::lazy_static! { pub static ref NATIVE_ASSET_ID: Hash = Hash::from_hex("0xf56924db538e77bb5951eb5ff0d02b88983c49c45eea30e8ae3e7234b311436c").unwrap(); } pub struct GovernanceService { sdk: SDK, profits: Box>, miners: Box>, asset: A, } #[service] impl GovernanceService { pub fn new(mut sdk: SDK, asset: A) -> Self { let profits: Box> = sdk.alloc_or_recover_map("profit"); let miners: Box> = sdk.alloc_or_recover_map("miner_address"); Self { sdk, profits, miners, asset, } } #[genesis] fn init_genesis(&mut self, payload: InitGenesisPayload) { assert!(self.profits.is_empty()); let mut info = payload.info; info.tx_fee_discount.sort(); self.sdk.set_value(INFO_KEY.to_string(), info); self.sdk .set_value(TX_FEE_INLET_KEY.to_string(), payload.tx_fee_inlet_address); self.sdk.set_value( MINER_PROFIT_OUTLET_KEY.to_string(), payload.miner_profit_outlet_address, ); for miner in payload.miner_charge_map.into_iter() { self.miners .insert(miner.address, miner.miner_charge_address); } } #[tx_hook_before] fn pledge_fee(&mut self, ctx: ServiceContext) -> ServiceResponse { let info = self .sdk .get_value::<_, GovernanceInfo>(&INFO_KEY.to_owned()); let tx_fee_inlet_address = self .sdk .get_value::<_, Address>(&TX_FEE_INLET_KEY.to_owned()); if info.is_none() || tx_fee_inlet_address.is_none() { return ServiceError::MissingInfo.into(); } let info = info.unwrap(); let tx_fee_inlet_address = tx_fee_inlet_address.unwrap(); let payload = TransferPayload { asset_id: NATIVE_ASSET_ID.clone(), to: tx_fee_inlet_address, value: info.tx_failure_fee, }; // Pledge the tx failure fee before executed the transaction. let res = self.asset.transfer_(&ctx, payload); try_service_response!(res); ServiceResponse::from_succeed(String::new()) } #[tx_hook_after] fn deduct_fee(&mut self, ctx: ServiceContext) -> ServiceResponse { let tx_fee_inlet_address = self .sdk .get_value::<_, Address>(&TX_FEE_INLET_KEY.to_owned()); if tx_fee_inlet_address.is_none() { return ServiceError::MissingInfo.into(); } let tx_fee_inlet_address = tx_fee_inlet_address.unwrap(); let payload = TransferPayload { asset_id: NATIVE_ASSET_ID.clone(), to: tx_fee_inlet_address, value: 1, }; let res = self.asset.transfer_(&ctx, payload); try_service_response!(res); ServiceResponse::from_succeed(String::new()) } #[hook_after] fn handle_miner_profit(&mut self, params: &ExecutorParams) { let info = self .sdk .get_value::<_, GovernanceInfo>(&INFO_KEY.to_owned()); let sender_address = self .sdk .get_value::<_, Address>(&MINER_PROFIT_OUTLET_KEY.to_owned()); if info.is_none() || sender_address.is_none() { return; } let info = info.unwrap(); let sender_address = sender_address.unwrap(); let ctx_params = ServiceContextParams { tx_hash: None, nonce: None, cycles_limit: params.cycles_limit, cycles_price: 1, cycles_used: Rc::new(RefCell::new(0)), caller: sender_address, height: params.height, service_name: String::new(), service_method: String::new(), service_payload: String::new(), extra: Some(ADMISSION_TOKEN.clone()), timestamp: params.timestamp, events: Rc::new(RefCell::new(vec![])), }; let recipient_addr = if let Some(addr) = self.miners.get(¶ms.proposer) { addr } else { params.proposer.clone() }; let payload = TransferPayload { asset_id: NATIVE_ASSET_ID.clone(), to: recipient_addr, value: info.miner_benefit, }; let _ = self .asset .transfer_(&ServiceContext::new(ctx_params), payload); } } #[derive(Debug, Display, From)] pub enum ServiceError { NonAuthorized, #[display(fmt = "Can not get governance info")] MissingInfo, #[display(fmt = "calc overflow")] Overflow, #[display(fmt = "query balance failed")] QueryBalance, #[display(fmt = "Parsing payload to json failed {:?}", _0)] JsonParse(serde_json::Error), } impl ServiceError { fn code(&self) -> u64 { match self { ServiceError::NonAuthorized => 101, ServiceError::JsonParse(_) => 102, ServiceError::MissingInfo => 103, ServiceError::Overflow => 104, ServiceError::QueryBalance => 105, } } } impl From for ServiceResponse { fn from(err: ServiceError) -> ServiceResponse { ServiceResponse::from_error(err.code(), err.to_string()) } } ================================================ FILE: benchmark/governance/types.rs ================================================ use std::cmp::Ordering; use muta_codec_derive::RlpFixedCodec; use serde::{Deserialize, Serialize}; use protocol::fixed_codec::{FixedCodec, FixedCodecError}; use protocol::types::{Address, Bytes}; use protocol::ProtocolResult; #[derive(RlpFixedCodec, Deserialize, Serialize, Clone, Debug)] pub struct InitGenesisPayload { pub info: GovernanceInfo, pub tx_fee_inlet_address: Address, pub miner_profit_outlet_address: Address, pub miner_charge_map: Vec, } #[derive(RlpFixedCodec, Deserialize, Serialize, Clone, Debug)] pub struct MinerChargeConfig { pub address: Address, pub miner_charge_address: Address, } #[derive(RlpFixedCodec, Deserialize, Serialize, Clone, Debug, Default)] pub struct GovernanceInfo { pub admin: Address, pub tx_failure_fee: u64, pub tx_floor_fee: u64, pub profit_deduct_rate_per_million: u64, pub tx_fee_discount: Vec, pub miner_benefit: u64, } #[derive(RlpFixedCodec, Deserialize, Serialize, Clone, Debug, Default, PartialEq, Eq)] pub struct DiscountLevel { pub threshold: u64, pub discount_percent: u64, } impl PartialOrd for DiscountLevel { fn partial_cmp(&self, other: &DiscountLevel) -> Option { self.threshold.partial_cmp(&other.threshold) } } impl Ord for DiscountLevel { fn cmp(&self, other: &DiscountLevel) -> Ordering { self.threshold.cmp(&other.threshold) } } #[derive(Deserialize, Serialize, Clone, Debug)] pub struct RecordProfitEvent { pub owner: Address, pub amount: u64, } #[derive(Deserialize, Serialize, Clone, Debug)] pub struct AccumulateProfitPayload { pub address: Address, pub accumulated_profit: u64, } #[derive(Deserialize, Serialize, Clone, Debug)] pub struct HookTransferFromPayload { pub sender: Address, pub recipient: Address, pub value: u64, pub memo: String, } ================================================ FILE: benchmark/mod.rs ================================================ #![allow(clippy::needless_collect)] #![feature(test)] extern crate test; use std::str::FromStr; use std::sync::Arc; use common_crypto::{Crypto, Secp256k1, Signature}; use core_mempool::DefaultMemPoolAdapter; use core_network::{NetworkConfig, NetworkService, NetworkServiceHandle}; use core_storage::{adapter::rocks::RocksAdapter, ImplStorage}; use framework::binding::state::RocksTrieDB; use framework::executor::{ServiceExecutor, ServiceExecutorFactory}; use protocol::fixed_codec::FixedCodec; use protocol::traits::{ CommonStorage, Context, Executor, ExecutorParams, SDKFactory, Service, ServiceMapping, ServiceSDK, Storage, }; use protocol::types::{ Address, Block, BlockHeader, Bytes, Genesis, Hash, Hex, MerkleRoot, Proof, RawTransaction, SignedTransaction, TransactionRequest, }; use protocol::ProtocolResult; use test::Bencher; use asset::AssetService; use governance::GovernanceService; use multi_signature::MultiSignatureService; const TRIE_PATH: &str = "./free-space/state"; const STORAGE_PATH: &str = "./free-space/block"; lazy_static::lazy_static! { pub static ref FEE_ACCOUNT: Address = Address::from_str("muta14e0lmgck835vm2dfm0w3ckv6svmez8fdgdl705").unwrap(); pub static ref FEE_INLET_ACCOUNT: Address = Address::from_str("muta15a8a9ksxe3hhjpw3l7wz7ry778qg8h9wz8y35p").unwrap(); pub static ref PROPOSER_ACCOUNT: Address = Address::from_str("muta1h99h6f54vytatam3ckftrmvcdpn4jlmnwm6hl0").unwrap(); pub static ref NATIVE_ASSET_ID: Hash = Hash::from_hex("0xf56924db538e77bb5951eb5ff0d02b88983c49c45eea30e8ae3e7234b311436c").unwrap(); pub static ref PRIV_KEY: Bytes = Hex::from_string("0x5ec982173d54d830b6789cbbbe43eaa2853a5ff752d1ebc1b266cf9790314f8a".to_string()).unwrap().decode(); pub static ref PUB_KEY: Bytes = Hex::from_string( "0x02ef0cb0d7bc6c18b4bea1f5908d9106522b35ab3c399369605d4242525bda7e60".to_string(), ) .unwrap() .decode(); } macro_rules! exec { ($adapter: expr, $payloads: expr) => {{ let stxs = $payloads.into_iter().map(construct_stx).collect::>(); let mut executor = $adapter.create_executor(); let params = $adapter.create_params(); executor.exec(Context::new(), ¶ms, &stxs).unwrap(); $adapter.next_height(); }}; } macro_rules! perf_exec { ($adapter: expr, $payloads: expr, $bencher: expr) => {{ let stxs = $payloads.into_iter().map(construct_stx).collect::>(); let mut executor = $adapter.create_executor(); let params = $adapter.create_params(); $bencher.iter(|| { let txs = stxs.clone(); executor.exec(Context::new(), ¶ms, &txs).unwrap(); }); }}; } mod bench_executor; mod bench_mempool; // This is a test service that provides transaction hooks. mod governance; pub struct BenchmarkAdapter { trie_db: Arc, storage: Arc>, height: u64, timestamp: u64, state_root: MerkleRoot, } impl Default for BenchmarkAdapter { fn default() -> Self { BenchmarkAdapter::new() } } impl BenchmarkAdapter { pub fn new() -> Self { let mut rt = tokio::runtime::Builder::new() .core_threads(4) .build() .unwrap(); let rocks_adapter = Arc::new(RocksAdapter::new(STORAGE_PATH, 1024).unwrap()); let toml_str = include_str!("./benchmark_genesis.toml"); let genesis: Genesis = toml::from_str(toml_str).unwrap(); let mut ret = BenchmarkAdapter { trie_db: Arc::new(RocksTrieDB::new(TRIE_PATH, false, 1024, 2000).unwrap()), storage: Arc::new(ImplStorage::new(Arc::clone(&rocks_adapter))), height: 1, timestamp: 1, state_root: Hash::default(), }; let root = ServiceExecutor::create_genesis( genesis.services, Arc::clone(&ret.trie_db), Arc::clone(&ret.storage), Arc::new(MockServiceMapping {}), ) .unwrap(); let genesis_block = BenchmarkAdapter::create_genesis_block(root.clone()); rt.block_on(async { ret.storage .update_latest_proof(Context::new(), genesis_block.header.proof.clone()) .await .expect("save proof"); ret.storage .insert_block(Context::new(), genesis_block) .await .expect("save genesis"); }); ret.state_root = root; ret } pub fn create_executor( &mut self, ) -> ServiceExecutor, RocksTrieDB, MockServiceMapping> { ServiceExecutor::with_root( self.state_root.clone(), Arc::clone(&self.trie_db), Arc::clone(&self.storage), Arc::new(MockServiceMapping {}), ) .unwrap() } pub fn create_params(&mut self) -> ExecutorParams { ExecutorParams { state_root: self.state_root.clone(), height: self.height, timestamp: self.timestamp, cycles_limit: u64::max_value(), proposer: PROPOSER_ACCOUNT.clone(), } } pub fn create_mempool_adapter( &mut self, ) -> DefaultMemPoolAdapter< ServiceExecutorFactory, Secp256k1, NetworkServiceHandle, ImplStorage, RocksTrieDB, MockServiceMapping, > { DefaultMemPoolAdapter::new( NetworkService::new(NetworkConfig::new()).handle(), Arc::clone(&self.storage), Arc::clone(&self.trie_db), Arc::new(MockServiceMapping {}), 3000, 100, ) } pub fn next_height(&mut self) { self.height += 1; self.timestamp += 2; } fn create_genesis_block(state_root: MerkleRoot) -> Block { let genesis_block_header = BlockHeader { chain_id: Hash::default(), height: 0, exec_height: 0, prev_hash: Hash::from_empty(), timestamp: 0, order_root: Hash::from_empty(), order_signed_transactions_hash: Hash::from_empty(), confirm_root: vec![], state_root, receipt_root: vec![], cycles_used: vec![], proposer: PROPOSER_ACCOUNT.clone(), proof: Proof { height: 0, round: 0, block_hash: Hash::from_empty(), signature: Bytes::new(), bitmap: Bytes::new(), }, validator_version: 0, validators: vec![], }; Block { header: genesis_block_header, ordered_tx_hashes: vec![], } } } pub fn construct_stx(req: TransactionRequest) -> SignedTransaction { let raw_tx = RawTransaction { chain_id: Hash::default(), nonce: Hash::from_empty(), timeout: 300, cycles_price: 1, cycles_limit: u64::max_value(), request: req, sender: FEE_ACCOUNT.clone(), }; let hash = Hash::digest(raw_tx.encode_fixed().unwrap()); let sig = Secp256k1::sign_message(&hash.as_bytes(), &PRIV_KEY).unwrap(); SignedTransaction { raw: raw_tx, tx_hash: hash, pubkey: Bytes::from(rlp::encode_list::, _>(&[PUB_KEY.clone().to_vec()])), signature: Bytes::from(rlp::encode_list::, _>(&[sig.to_bytes().to_vec()])), } } pub struct MockServiceMapping; impl ServiceMapping for MockServiceMapping { fn get_service>( &self, name: &str, factory: &Factory, ) -> ProtocolResult> { let asset_sdk = factory.get_sdk("asset")?; let governance_sdk = factory.get_sdk("governance")?; let multi_sig_sdk = factory.get_sdk("multi_signature")?; let service = match name { "asset" => Box::new(AssetService::new(asset_sdk)) as Box, "governance" => Box::new(GovernanceService::new( governance_sdk, AssetService::new(asset_sdk), )) as Box, "multi_signature" => { Box::new(MultiSignatureService::new(multi_sig_sdk)) as Box } _ => panic!("not found service"), }; Ok(service) } fn list_service_name(&self) -> Vec { vec![ "asset".to_owned(), "governance".to_owned(), "multi_signature".to_owned(), ] } } ================================================ FILE: binding-macro/Cargo.toml ================================================ [package] name = "binding-macro" version = "0.2.1" authors = ["Muta Dev "] edition = "2018" repository = "https://github.com/nervosnetwork/muta" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [lib] proc-macro = true doctest = false [dependencies] protocol = { path = "../protocol", package = "muta-protocol" } syn = { version = "1.0", features = ["full"] } proc-macro2 = "1.0" quote = "1.0" derive_more = "0.15" serde_json = "1.0" [dev-dependencies] framework = { path = "../framework" } bytes = "0.5" serde = { version = "1.0", features = ["derive"] } ================================================ FILE: binding-macro/src/common.rs ================================================ use syn::{FnArg, Pat, Path, Type}; pub fn get_request_context_pat(bound_name: &str, fn_arg: &FnArg) -> Option { if let FnArg::Typed(pat_type) = &*fn_arg { if let Type::Path(type_path) = &*pat_type.ty { if path_is_request_context(&type_path.path, &bound_name) { return Some(*pat_type.pat.clone()); } } } None } fn path_is_request_context(path: &Path, bound_name: &str) -> bool { // :::: if path.leading_colon.is_some() { return false; } // RequestContext path.segments.len() == 1 && path.segments[0].ident == bound_name } pub fn assert_type(ty: &Type, ty_str: &str) { match ty { Type::Path(ty_path) => { let path = &ty_path.path; assert_eq!(path.leading_colon.is_none(), true); assert_eq!(path.segments.len(), 1); assert_eq!(path.segments[0].ident, ty_str) } _ => panic!("asset type failed"), } } pub fn assert_reference_type(ty: &Type, ty_str: &str) { match ty { Type::Reference(ref_ty) => { let ty_ref = &ref_ty.elem.as_ref(); assert_type(ty_ref, ty_str) } _ => panic!("asset reference type failed"), } } // expect &mut self pub fn arg_is_mutable_receiver(fn_arg: &FnArg) -> bool { match fn_arg { FnArg::Receiver(receiver) => receiver.reference.is_some() && receiver.mutability.is_some(), _ => false, } } // expect &self pub fn arg_is_immutable_receiver(fn_arg: &FnArg) -> bool { match fn_arg { FnArg::Receiver(receiver) => receiver.reference.is_some() && receiver.mutability.is_none(), _ => false, } } ================================================ FILE: binding-macro/src/cycles.rs ================================================ use proc_macro::TokenStream; use quote::quote; use syn::parse::{Parse, ParseStream, Result}; use syn::punctuated::Punctuated; use syn::{ parse_macro_input, Block, FnArg, Generics, Ident, ImplItemMethod, ItemFn, LitInt, Pat, ReturnType, Token, Visibility, }; use crate::common::get_request_context_pat; #[derive(Debug)] struct Cycles { value: u64, } impl Parse for Cycles { fn parse(input: ParseStream) -> Result { let lit: LitInt = input.parse()?; let value = lit.base10_parse::()?; Ok(Self { value }) } } struct CyclesFnItem { pub func_name: Ident, pub func_vis: Visibility, pub inputs: Punctuated, pub ret: ReturnType, pub body: Block, pub generics: Generics, } impl Parse for CyclesFnItem { fn parse(input: ParseStream) -> Result { match input.parse::() { Ok(method_item) => Ok(CyclesFnItem { func_name: method_item.sig.ident.clone(), func_vis: method_item.vis.clone(), inputs: method_item.sig.inputs.clone(), ret: method_item.sig.output.clone(), body: method_item.block.clone(), generics: method_item.sig.generics, }), Err(_) => { let item = input.parse::()?; Ok(CyclesFnItem { func_name: item.sig.ident.clone(), func_vis: item.vis.clone(), inputs: item.sig.inputs.clone(), ret: item.sig.output.clone(), body: *item.block.clone(), generics: item.sig.generics, }) } } } } pub fn gen_cycles_code(attr: TokenStream, item: TokenStream) -> TokenStream { let cycles = parse_macro_input!(attr as Cycles); let fn_item = parse_macro_input!(item as CyclesFnItem); let func_name = &fn_item.func_name; let func_vis = &fn_item.func_vis; let inputs = &fn_item.inputs; let ret = &fn_item.ret; let body = &fn_item.body; let generics = &fn_item.generics; let request_pat = find_request_ident("ServiceContext", inputs) .expect("The first parameter to read/write must be ServiceContext"); // Extract the variable name of the RequestContext. let request_ident = match request_pat { Pat::Ident(pat_ident) => pat_ident.ident, _ => panic!("Make sure the RequestContext declaration is ctx: ServiceContext."), }; let cycles_value = cycles.value; TokenStream::from(quote! { #func_vis fn #func_name#generics(#inputs) #ret { if !#request_ident.sub_cycles(#cycles_value) { return ServiceResponse::<_>::from_error(3, "cycles macro consume cycles fialed: out of cycles".to_owned()); } #body } }) } fn find_request_ident(bound_name: &str, inputs: &Punctuated) -> Option { for fn_arg in inputs { let opt_request_pat = get_request_context_pat(bound_name, &fn_arg); if opt_request_pat.is_some() { return opt_request_pat; } } None } ================================================ FILE: binding-macro/src/hooks.rs ================================================ use proc_macro::TokenStream; use quote::quote; use syn::{parse_macro_input, FnArg, ImplItemMethod}; use crate::common::{arg_is_mutable_receiver, assert_reference_type}; pub fn verify_hook(item: TokenStream) -> TokenStream { let method_item = parse_macro_input!(item as ImplItemMethod); let inputs = &method_item.sig.inputs; assert_eq!(inputs.len(), 2); assert!(arg_is_mutable_receiver(&inputs[0])); match &inputs[1] { FnArg::Typed(pt) => { let ty = pt.ty.as_ref(); assert_reference_type(ty, "ExecutorParams") } _ => panic!("The second parameter type should be `&ExecutorParams`."), } TokenStream::from(quote! {#method_item}) } ================================================ FILE: binding-macro/src/lib.rs ================================================ extern crate proc_macro; mod common; mod cycles; mod hooks; mod read_write; mod service; use proc_macro::TokenStream; use crate::cycles::gen_cycles_code; use crate::hooks::verify_hook; use crate::read_write::verify_read_or_write; use crate::service::gen_service_code; #[rustfmt::skip] /// `#[genesis]` marks a service method to generate genesis states when fire up the chain /// /// Method input params should be `(&mut self)` or `(&mut self, payload: PayloadType)` /// /// # Example: /// /// ```rust /// struct Service; /// #[service] /// impl Service { /// #[genesis] /// fn init_genesis( /// &mut self, /// ) { /// do_work(); /// } /// } /// ``` /// /// Or /// /// ```rust /// struct Service; /// #[service] /// impl Service { /// #[genesis] /// fn init_genesis( /// &mut self, /// payload: PayloadType, /// ) { /// do_work(payload); /// } /// } /// ``` #[proc_macro_attribute] pub fn genesis(_: TokenStream, item: TokenStream) -> TokenStream { item } #[proc_macro_attribute] pub fn tx_hook_before(_: TokenStream, item: TokenStream) -> TokenStream { item } #[proc_macro_attribute] pub fn tx_hook_after(_: TokenStream, item: TokenStream) -> TokenStream { item } #[rustfmt::skip] /// `#[read]` marks a service method as readable. /// /// Methods marked with this macro will have: /// Methods with this macro allow access (readable) from outside (RPC or other services). /// /// - Verification /// 1. Is it a struct method marked with #[service]? /// 2. Is visibility private? /// 3. Parameter signature contains `&self and ctx: ServiceContext`? /// 4. Is the return value `ServiceResponse`? /// /// # Example: /// /// ```rust /// struct Service; /// #[service] /// impl Service { /// #[read] /// fn test_read_fn( /// &self, /// _ctx: ServiceContext, /// ) -> ServiceResponse { /// ServiceResponse::::from_succeed("ok".to_owned()) /// } /// } /// ``` #[proc_macro_attribute] pub fn read(_: TokenStream, item: TokenStream) -> TokenStream { verify_read_or_write(item, false) } #[rustfmt::skip] /// `#[write]` marks a service method as writable. /// /// Methods marked with this macro will have: /// - Accessibility /// Methods with this macro allow access (writeable) from outside (RPC or other services). /// /// - Verification /// 1. Is it a struct method marked with #[service]? /// 2. Is visibility private? /// 3. Parameter signature contains `&self and ctx: ServiceContext`? /// 4. Is the return value `ServiceResponse`? /// /// # Example: /// /// ```rust /// struct Service; /// #[service] /// impl Service { /// #[write] /// fn test_write_fn( /// &mut self, /// _ctx: ServiceContext, /// ) -> ServiceResponse { /// ServiceResponse::::from_succeed("ok".to_owned()) /// } /// } /// ``` #[proc_macro_attribute] pub fn write(_: TokenStream, item: TokenStream) -> TokenStream { verify_read_or_write(item, true) } #[rustfmt::skip] /// `# [cycles]` mark an `ImplFn` or `fn`, it will automatically generate code /// to complete the cycle deduction, /// /// ```rust /// // Source Code /// impl Tests { /// #[cycles(100)] /// fn test_cycles(&self, ctx: ServiceContext) -> ServiceResponse<()> { /// ServiceResponse::<()>::from_succeed(()) /// } /// } /// /// // Generated code. /// impl Tests { /// fn test_cycles(&self, ctx: ServiceContext) -> ServiceResponse<()> { /// ctx.sub_cycles(100); /// ServiceResponse::<()>::from_succeed(()) /// } /// } /// ``` #[proc_macro_attribute] pub fn cycles(attr: TokenStream, item: TokenStream) -> TokenStream { gen_cycles_code(attr, item) } /// Marks a method so that it executes after the entire block executes. // TODO(@yejiayu): Verify the function signature. #[proc_macro_attribute] pub fn hook_after(_: TokenStream, item: TokenStream) -> TokenStream { verify_hook(item) } /// Marks a method so that it executes before the entire block executes. // TODO(@yejiayu): Verify the function signature. #[proc_macro_attribute] pub fn hook_before(_: TokenStream, item: TokenStream) -> TokenStream { verify_hook(item) } #[rustfmt::skip] /// Marking a ImplItem for service, it will automatically trait /// `protocol::traits::Service`. /// /// # Example /// /// use serde::{Deserialize, Serialize}; /// use protocol::traits::ServiceSDK; /// use protocol::types::ServiceContext; /// use protocol::ProtocolResult; /// /// ```rust /// // Source code /// /// // serde::Deserialize and serde::Serialize are required. /// #[derive(Serialize, Deserialize)] /// struct CreateKittyPayload { /// // fields /// } /// /// // serde::Deserialize and serde::Serialize are required. /// #[derive(Serialize, Deserialize)] /// struct GetKittyPayload { /// // fields /// } /// /// #[service] /// impl KittyService { /// #[hook_before] /// fn custom_hook_before(&mut self) { /// // Do something /// } /// /// #[hook_after] /// fn custom_hook_after(&mut self) { /// // Do something /// } /// /// #[read] /// fn get_kitty( /// &self, /// ctx: ServiceContext, /// payload: GetKittyPayload, /// ) -> ServiceResponse { /// // Do something /// } /// /// #[write] /// fn create_kitty( /// &mut self, /// ctx: ServiceContext, /// payload: CreateKittyPayload, /// ) -> ServiceResponse { /// // Do something /// } /// } /// /// // Generated code. /// impl Service for KittyService { /// fn hook_before_(&mut self) { /// self.custom_hook_before() /// } /// /// fn hook_after(&mut self) { /// self.custom_hook_after() /// } /// /// fn write(&mut self, ctx: ServiceContext) -> ServiceResponse { /// let method = ctx.get_service_method(); /// /// match ctx.get_service_method() { /// "create_kitty" => { /// let payload_res: Result = serde_json::from_str(ctx.get_payload()); /// if payload_res.is_error() { /// return ServiceResponse::::from_error(1, "service macro decode payload failed".to_owned()); /// }; /// let payload = payload_res.unwrap(); /// let res = self.#list_read_ident(ctx, payload); /// if !res.is_error() { /// let mut data_json = serde_json::to_string(&res.succeed_data).unwrap_or_else(|e| panic!("service macro encode payload failed: {:?}", e)); /// if data_json == "null" { /// data_json = "".to_owned(); /// } /// ServiceResponse::::from_succeed(data_json) /// } else { /// ServiceResponse::::from_error(res.code, res.error_message.clone()) /// } /// _ => panic!("service macro not found method:{:?} of service:{:?}", method, service), /// } /// } /// /// fn read(&self, ctx: ServiceContext) -> ProtocolResult<&str> { /// let method = ctx.get_service_method(); /// /// match ctx.get_service_method() { /// "get_kitty" => { /// let payload_res: Result = serde_json::from_str(ctx.get_payload()); /// if payload_res.is_error() { /// return ServiceResponse::::from_error(1, "service macro decode payload failed".to_owned()); /// }; /// let payload = payload_res.unwrap(); /// let res = self.#list_read_ident(ctx, payload); /// if !res.is_error() { /// let mut data_json = serde_json::to_string(&res.succeed_data).unwrap_or_else(|e| panic!("service macro encode payload failed: {:?}", e)); /// if data_json == "null" { /// data_json = "".to_owned(); /// } /// ServiceResponse::::from_succeed(data_json) /// } else { /// ServiceResponse::::from_error(res.code, res.error_message.clone()) /// } /// _ => panic!("service macro not found method:{:?} of service:{:?}", method, service), /// } /// } /// } /// ``` #[proc_macro_attribute] pub fn service(attr: TokenStream, item: TokenStream) -> TokenStream { gen_service_code(attr, item) } ================================================ FILE: binding-macro/src/read_write.rs ================================================ use proc_macro::TokenStream; use quote::quote; use syn::punctuated::Punctuated; use syn::{parse_macro_input, FnArg, ImplItemMethod, ReturnType, Token, Visibility}; use crate::common::{arg_is_immutable_receiver, arg_is_mutable_receiver, assert_type}; pub fn verify_read_or_write(item: TokenStream, mutable: bool) -> TokenStream { let method_item = parse_macro_input!(item as ImplItemMethod); let visibility = &method_item.vis; let inputs = &method_item.sig.inputs; let ret_type = &method_item.sig.output; verify_visibiity(visibility); verify_inputs(inputs, mutable); verify_ret_type(ret_type); TokenStream::from(quote! {#method_item}) } fn verify_visibiity(visibility: &Visibility) { match visibility { Visibility::Inherited => {} _ => panic!("The visibility of read/write method must be private"), }; } fn verify_inputs(inputs: &Punctuated, mutable: bool) { if inputs.len() < 2 || inputs.len() > 3 { panic!("The input parameters should be `(&self/&mut self, ctx: ServiceContext)` or `(&self/&mut self, ctx: ServiceContext, payload: PayloadType)`") } if mutable { if !arg_is_mutable_receiver(&inputs[0]) { panic!("The receiver must be `&mut self`.") } } else if !arg_is_immutable_receiver(&inputs[0]) { panic!("The receiver must be `&self`.") } match &inputs[1] { FnArg::Typed(pt) => { let ty = pt.ty.as_ref(); assert_type(ty, "ServiceContext") } _ => panic!("The second parameter type should be `ServiceContext`."), } } fn verify_ret_type(ret_type: &ReturnType) { let real_ret_type = match ret_type { ReturnType::Type(_, t) => t.as_ref(), _ => panic!("The return type of read/write method must be protocol::ProtocolResult"), }; assert_type(&real_ret_type, "ServiceResponse"); } ================================================ FILE: binding-macro/src/service.rs ================================================ use proc_macro::TokenStream; use quote::quote; use syn::{parse_macro_input, FnArg, Ident, ImplItem, ImplItemMethod, ItemImpl, Type}; const READ_ATTRIBUTE: &str = "read"; const WRITE_ATTRIBUTE: &str = "write"; const GENESIS_ATTRIBUTE: &str = "genesis"; const HOOK_BEFORE_ATTRIBUTE: &str = "hook_before"; const HOOK_AFTER_ATTRIBUTE: &str = "hook_after"; const TX_HOOK_BEFORE_ATTRIBUTE: &str = "tx_hook_before"; const TX_HOOK_AFTER_ATTRIBUTE: &str = "tx_hook_after"; enum ServiceMethod { Read(ImplItemMethod), Write(ImplItemMethod), } struct Hooks { before: Option, after: Option, tx_before: Option, tx_after: Option, } struct MethodMeta { method_ident: Ident, payload_ident: Option, readonly: bool, } pub fn gen_service_code(_: TokenStream, item: TokenStream) -> TokenStream { let impl_item = parse_macro_input!(item as ItemImpl); let service_ident = get_service_ident(&impl_item); let items = &impl_item.items; let (impl_generics, ty_generics, where_clause) = impl_item.generics.split_for_impl(); let mut methods: Vec = vec![]; for item in items { if let ImplItem::Method(method) = item { if let Some(service_method) = find_service_method(method) { methods.push(service_method) } } } let genesis_method = find_genesis(items); let genesis_body = match genesis_method { Some(genesis_method) => get_genesis_body(&genesis_method), None => quote! {()}, }; let hooks = extract_hooks(items); let hook_before = &hooks.before; let hook_before_body = match hook_before { Some(hook_before) => quote! { self.#hook_before(_params) }, None => quote! {()}, }; let hook_after = &hooks.after; let hook_after_body = match hook_after { Some(hook_after) => quote! { self.#hook_after(_params) }, None => quote! {()}, }; let tx_hook_before = &hooks.tx_before; let tx_hook_before_body = match tx_hook_before { Some(tx_hook_before) => quote! { let res = self.#tx_hook_before(_ctx); if !res.is_error() { let mut data_json = serde_json::to_string(&res.succeed_data).unwrap_or_else(|e| panic!("encode succeed_data of ServiceResponse failed: {:?}", e)); if data_json == "null" { data_json = "".to_owned(); } ServiceResponse::::from_succeed(data_json) } else { ServiceResponse::::from_error(res.code, res.error_message.clone()) } }, None => quote! {ServiceResponse::::from_succeed("".to_owned())}, }; let tx_hook_after = &hooks.tx_after; let tx_hook_after_body = match tx_hook_after { Some(tx_hook_after) => { quote! { let res = self.#tx_hook_after(_ctx); if !res.is_error() { let mut data_json = serde_json::to_string(&res.succeed_data).unwrap_or_else(|e| panic!("encode succeed_data of ServiceResponse failed: {:?}", e)); if data_json == "null" { data_json = "".to_owned(); } ServiceResponse::::from_succeed(data_json) } else { ServiceResponse::::from_error(res.code, res.error_message.clone()) } } } None => quote! {ServiceResponse::::from_succeed("".to_owned())}, }; let list_method_meta: Vec = methods.into_iter().map(extract_method_meta).collect(); let (list_read_name, list_read_ident, list_read_payload) = split_list_for_metadata(&list_method_meta, true); let (list_write_name, list_write_ident, list_write_payload) = split_list_for_metadata(&list_method_meta, false); let (list_read_name_nonepayload, list_read_ident_nonepayload) = split_list_for_metadata_nonepayload(&list_method_meta, true); let (list_write_name_nonepayload, list_write_ident_nonepayload) = split_list_for_metadata_nonepayload(&list_method_meta, false); TokenStream::from(quote! { impl #impl_generics protocol::traits::Service for #service_ident #ty_generics #where_clause { fn genesis_(&mut self, _payload: String) { #genesis_body } fn hook_before_(&mut self, _params: &ExecutorParams) { #hook_before_body } fn hook_after_(&mut self, _params: &ExecutorParams) { #hook_after_body } fn tx_hook_before_(&mut self, _ctx: ServiceContext) -> ServiceResponse { #tx_hook_before_body } fn tx_hook_after_(&mut self, _ctx: ServiceContext) -> ServiceResponse { #tx_hook_after_body } fn read_(&self, ctx: protocol::types::ServiceContext) -> ServiceResponse { let service = ctx.get_service_name(); let method = ctx.get_service_method(); match method { #(#list_read_name => { let payload_res: Result<#list_read_payload, _> = serde_json::from_str(ctx.get_payload()); if payload_res.is_err() { return ServiceResponse::::from_error(1, "decode service payload failed".to_owned()); }; let payload = payload_res.unwrap(); let res = self.#list_read_ident(ctx, payload); if !res.is_error() { let mut data_json = serde_json::to_string(&res.succeed_data).unwrap_or_else(|e| panic!("encode succeed_data of ServiceResponse failed: {:?}", e)); if data_json == "null" { data_json = "".to_owned(); } ServiceResponse::::from_succeed(data_json) } else { ServiceResponse::::from_error(res.code, res.error_message.clone()) } },)* #(#list_read_name_nonepayload => { let res = self.#list_read_ident_nonepayload(ctx); if !res.is_error() { let mut data_json = serde_json::to_string(&res.succeed_data).unwrap_or_else(|e| panic!("encode succeed_data of ServiceResponse failed: {:?}", e)); if data_json == "null" { data_json = "".to_owned(); } ServiceResponse::::from_succeed(data_json) } else { ServiceResponse::::from_error(res.code, res.error_message.clone()) } },)* _ => ServiceResponse::::from_error(2, format!("not found method:{:?} of service:{:?}", method, service)) } } fn write_(&mut self, ctx: protocol::types::ServiceContext) -> ServiceResponse { let service = ctx.get_service_name(); let method = ctx.get_service_method(); match method { #(#list_write_name => { let payload_res: Result<#list_write_payload, _> = serde_json::from_str(ctx.get_payload()); if payload_res.is_err() { return ServiceResponse::::from_error(1, "decode service payload failed".to_owned()); }; let payload = payload_res.unwrap(); let res = self.#list_write_ident(ctx, payload); if !res.is_error() { let mut data_json = serde_json::to_string(&res.succeed_data).unwrap_or_else(|e| panic!("encode succeed_data of ServiceResponse failed: {:?}", e)); if data_json == "null" { data_json = "".to_owned(); } ServiceResponse::::from_succeed(data_json) } else { ServiceResponse::::from_error(res.code, res.error_message.clone()) } },)* #(#list_write_name_nonepayload => { let res = self.#list_write_ident_nonepayload(ctx); if !res.is_error() { let mut data_json = serde_json::to_string(&res.succeed_data).unwrap_or_else(|e| panic!("encode succeed_data of ServiceResponse failed: {:?}", e)); if data_json == "null" { data_json = "".to_owned(); } ServiceResponse::::from_succeed(data_json) } else { ServiceResponse::::from_error(res.code, res.error_message.clone()) } },)* _ => ServiceResponse::::from_error(2, format!("not found method:{:?} of service:{:?}", method, service)) } } } #impl_item }) } fn split_list_for_metadata( list: &[MethodMeta], readonly: bool, ) -> (Vec, Vec, Vec) { let mut methods = vec![]; let mut method_idents = vec![]; let mut payload_idents = vec![]; list.iter() .filter(|meta| meta.readonly == readonly && meta.payload_ident.is_some()) .for_each(|meta| { methods.push(meta.method_ident.to_string()); method_idents.push(meta.method_ident.clone()); payload_idents.push( meta.payload_ident .as_ref() .expect("MethodMeta should have payload ident") .clone(), ); }); (methods, method_idents, payload_idents) } fn split_list_for_metadata_nonepayload( list: &[MethodMeta], readonly: bool, ) -> (Vec, Vec) { let mut methods = vec![]; let mut method_idents = vec![]; list.iter() .filter(|meta| meta.readonly == readonly && meta.payload_ident.is_none()) .for_each(|meta| { methods.push(meta.method_ident.to_string()); method_idents.push(meta.method_ident.clone()); }); (methods, method_idents) } fn get_service_ident(impl_item: &ItemImpl) -> Ident { match &*impl_item.self_ty { Type::Path(type_path) => type_path.path.segments[0].ident.clone(), _ => panic!("The identity of the service was not found."), } } fn find_service_method(method: &ImplItemMethod) -> Option { let attrs = &method.attrs; for attr in attrs { for segment in &attr.path.segments { if segment.ident == READ_ATTRIBUTE { return Some(ServiceMethod::Read(method.clone())); } else if segment.ident == WRITE_ATTRIBUTE { return Some(ServiceMethod::Write(method.clone())); } } } None } fn find_genesis(items: &[ImplItem]) -> Option { let methods: Vec = find_list_for_item_method(items); let mut count = 0; let mut genesis: Option = None; for method in methods { for attr in &method.attrs { for segment in &attr.path.segments { if segment.ident == GENESIS_ATTRIBUTE { if count == 0 { genesis = Some(method.clone()); count = 1; } else { panic!("The genesis method can only have one") } } } } } genesis } fn get_genesis_body(item: &ImplItemMethod) -> proc_macro2::TokenStream { let method_name = item.sig.ident.clone(); match item.sig.inputs.len() { 1 => quote!{ self.#method_name()}, 2 => { let payload_arg = &item.sig.inputs[1]; let pat_type = match payload_arg { FnArg::Typed(pat_type) => pat_type, _ => unreachable!(), }; let payload_ident = if let Type::Path(path) = &*pat_type.ty { Some(path.path.get_ident().expect("No payload type found.").clone()) } else { panic!("No payload type found.") }; quote!{ let payload: #payload_ident = serde_json::from_str(&_payload) .unwrap_or_else(|e| panic!("decode genesis payload failed: {:?}", e)); self.#method_name(payload) } }, _ => panic!("genesis method input params should be `(&mut self)` or `(&mut self, payload: PayloadType)`") } } fn extract_hooks(items: &[ImplItem]) -> Hooks { let methods: Vec = find_list_for_item_method(items); let mut hooks = Hooks { before: None, after: None, tx_before: None, tx_after: None, }; let mut before_count = 0; let mut after_count = 0; let mut tx_before_count = 0; let mut tx_after_count = 0; for method in methods { for attr in &method.attrs { for segment in &attr.path.segments { if segment.ident == HOOK_BEFORE_ATTRIBUTE { if before_count == 0 { hooks.before = Some(method.sig.ident.clone()); before_count = 1; } else { panic!("The before hook can only have one") } } else if segment.ident == HOOK_AFTER_ATTRIBUTE { if after_count == 0 { hooks.after = Some(method.sig.ident.clone()); after_count = 1; } else { panic!("The after hook can only have one") } } else if segment.ident == TX_HOOK_BEFORE_ATTRIBUTE { if tx_before_count == 0 { hooks.tx_before = Some(method.sig.ident.clone()); tx_before_count = 1; } else { panic!("The tx before hook can only have one") } } else if segment.ident == TX_HOOK_AFTER_ATTRIBUTE { if tx_after_count == 0 { hooks.tx_after = Some(method.sig.ident.clone()); tx_after_count = 1; } else { panic!("The tx after hook can only have one") } } } } } hooks } fn find_list_for_item_method(items: &[ImplItem]) -> Vec { items .iter() .filter(|item| matches!(item, ImplItem::Method(_))) .map(|item| { if let ImplItem::Method(method) = item { method.clone() } else { unreachable!() } }) .collect() } fn extract_method_meta(method: ServiceMethod) -> MethodMeta { let (impl_method, readonly) = match method { ServiceMethod::Read(impl_method) => (impl_method, true), ServiceMethod::Write(impl_method) => (impl_method, false), }; match &impl_method.sig.inputs.len() { // Method input params: `(&self/&mut self, ctx: ServiceContext)` 2 => { MethodMeta { method_ident: impl_method.sig.ident, payload_ident: None, readonly, } }, // Method input params: `(&self/&mut self, ctx: ServiceContext, payload: PayloadType)` 3 => { let payload_arg = &impl_method.sig.inputs[2]; let pat_type = match payload_arg { FnArg::Typed(pat_type) => pat_type, _ => unreachable!(), }; let payload_ident = if let Type::Path(path) = &*pat_type.ty { Some(path.path.get_ident().expect("No payload type found.").clone()) } else { panic!("No payload type found.") }; MethodMeta { method_ident: impl_method.sig.ident, payload_ident, readonly, } }, _ => panic!("Method input params should be `(&self/&mut self, ctx: ServiceContext)` or `(&self/&mut self, ctx: ServiceContext, payload: PayloadType)`") } } ================================================ FILE: binding-macro/tests/mod.rs ================================================ #![allow(clippy::unit_cmp)] #[macro_use] extern crate binding_macro; use std::cell::RefCell; use std::panic::{self, AssertUnwindSafe}; use std::rc::Rc; use serde::{Deserialize, Serialize}; use protocol::fixed_codec::FixedCodec; use protocol::traits::{ ExecutorParams, Service, ServiceResponse, ServiceSDK, StoreArray, StoreBool, StoreMap, StoreString, StoreUint64, }; use protocol::types::{ Address, Block, Hash, Receipt, ServiceContext, ServiceContextParams, SignedTransaction, }; #[test] fn test_read_and_write() { struct Tests; #[service] impl Tests { #[read] fn test_read_fn(&self, _ctx: ServiceContext) -> ServiceResponse { ServiceResponse::::from_succeed("read".to_owned()) } #[write] fn test_write_fn(&mut self, _ctx: ServiceContext) -> ServiceResponse { ServiceResponse::::from_succeed("write".to_owned()) } } let context = get_context(1000, "", "", ""); let mut t = Tests {}; assert_eq!( t.test_read_fn(context.clone()).succeed_data, "read".to_owned() ); assert_eq!(t.test_write_fn(context).succeed_data, "write".to_owned()); } #[test] fn test_hooks() { struct Tests { pub height: u64, }; #[service] impl Tests { #[hook_after] fn hook_after(&mut self, params: &ExecutorParams) { self.height = params.height; } #[hook_before] fn hook_before(&mut self, params: &ExecutorParams) { self.height = params.height; } } let mut t = Tests { height: 0 }; t.hook_after(&mock_executor_params()); assert_eq!(t.height, 9); t.hook_before(&mock_executor_params()); assert_eq!(t.height, 9); } #[test] fn test_tx_hooks() { struct Tests { pub height: u64, }; #[service] impl Tests { #[tx_hook_after] fn tx_hook_after(&mut self, _ctx: ServiceContext) -> ServiceResponse<()> { self.height = 9; ServiceResponse::from_succeed(()) } #[tx_hook_before] fn tx_hook_before(&mut self, _ctx: ServiceContext) -> ServiceResponse<()> { self.height = 10; ServiceResponse::from_succeed(()) } } let mut t = Tests { height: 0 }; let context = get_context(1000, "", "", ""); t.tx_hook_after(context.clone()); assert_eq!(t.height, 9); t.tx_hook_before(context); assert_eq!(t.height, 10); } #[test] fn test_read_and_write_with_noneparams() { struct Tests; #[service] impl Tests { #[read] fn test_read_fn(&self, _ctx: ServiceContext) -> ServiceResponse<()> { ServiceResponse::<()>::from_succeed(()) } #[write] fn test_write_fn(&mut self, _ctx: ServiceContext) -> ServiceResponse<()> { ServiceResponse::<()>::from_succeed(()) } } let context = get_context(1000, "", "", ""); let mut t = Tests {}; assert_eq!(t.test_read_fn(context.clone()).succeed_data, ()); assert_eq!(t.test_write_fn(context).succeed_data, ()); } #[test] fn test_cycles() { struct Tests; #[service] impl Tests { #[cycles(100)] fn test_cycles(&self, ctx: ServiceContext) -> ServiceResponse<()> { ServiceResponse::<()>::from_succeed(()) } #[cycles(500)] fn test_cycles2(&self, ctx: ServiceContext) -> ServiceResponse<()> { ServiceResponse::<()>::from_succeed(()) } } #[cycles(200)] fn test_sub_cycles_fn1(ctx: ServiceContext) -> ServiceResponse<()> { ServiceResponse::<()>::from_succeed(()) } #[cycles(200)] fn test_sub_cycles_fn2(_foo: u64, ctx: ServiceContext) -> ServiceResponse<()> { ServiceResponse::<()>::from_succeed(()) } let t = Tests {}; let context = get_context(1000, "", "", ""); t.test_cycles(context.clone()); assert_eq!(context.get_cycles_used(), 100); t.test_cycles2(context.clone()); assert_eq!(context.get_cycles_used(), 600); test_sub_cycles_fn1(context.clone()); assert_eq!(context.get_cycles_used(), 800); test_sub_cycles_fn2(1, context.clone()); assert_eq!(context.get_cycles_used(), 1000); } #[test] fn test_service() { #[derive(Serialize, Deserialize, Debug)] struct TestServicePayload { name: String, age: u64, sex: bool, } #[derive(Serialize, Deserialize, Debug, Default)] struct TestServiceResponse { pub message: String, } struct Tests { _sdk: SDK, genesis_data: String, hook_before: bool, hook_after: bool, } #[service] impl Tests { #[genesis] fn init_genesis(&mut self) { self.genesis_data = "genesis".to_owned(); } #[hook_before] fn custom_hook_before(&mut self, _params: &ExecutorParams) { self.hook_before = true; } #[hook_after] fn custom_hook_after(&mut self, _params: &ExecutorParams) { self.hook_after = true; } #[read] fn test_read( &self, _ctx: ServiceContext, _payload: TestServicePayload, ) -> ServiceResponse { let res = TestServiceResponse { message: "read ok".to_owned(), }; ServiceResponse::::from_succeed(res) } #[write] fn test_write( &mut self, _ctx: ServiceContext, _payload: TestServicePayload, ) -> ServiceResponse { let res = TestServiceResponse { message: "write ok".to_owned(), }; ServiceResponse::::from_succeed(res) } } let payload = TestServicePayload { name: "test".to_owned(), age: 10, sex: false, }; let payload_str = serde_json::to_string(&payload).unwrap(); let sdk = MockServiceSDK {}; let mut test_service = Tests { _sdk: sdk, genesis_data: "".to_owned(), hook_after: false, hook_before: false, }; test_service.genesis_("".to_owned()); assert_eq!(test_service.genesis_data, "genesis"); let context = get_context(1024 * 1024, "", "test_write", &payload_str); let write_res = test_service.write_(context).succeed_data; assert_eq!(write_res, r#"{"message":"write ok"}"#); let context = get_context(1024 * 1024, "", "test_read", &payload_str); let read_res = test_service.read_(context).succeed_data; assert_eq!(read_res, r#"{"message":"read ok"}"#); let context = get_context(1024 * 1024, "", "test_notfound", &payload_str); let read_res = panic::catch_unwind(AssertUnwindSafe(|| test_service.read_(context.clone()))); assert_eq!(read_res.unwrap().is_error(), true); let write_res = panic::catch_unwind(AssertUnwindSafe(|| test_service.write_(context))); assert_eq!(write_res.unwrap().is_error(), true); test_service.hook_before_(&mock_executor_params()); assert_eq!(test_service.hook_before, true); test_service.hook_after_(&mock_executor_params()); assert_eq!(test_service.hook_after, true); } #[test] fn test_service_none_payload() { #[derive(Serialize, Deserialize, Debug, Default)] struct TestServiceResponse { pub message: String, } struct Tests { _sdk: SDK, genesis_data: String, hook_before: bool, hook_after: bool, } #[service] impl Tests { #[genesis] fn init_genesis(&mut self) { self.genesis_data = "genesis".to_owned(); } #[hook_before] fn custom_hook_before(&mut self, _params: &ExecutorParams) { self.hook_before = true; } #[hook_after] fn custom_hook_after(&mut self, _params: &ExecutorParams) { self.hook_after = true; } #[read] fn test_read(&self, _ctx: ServiceContext) -> ServiceResponse { let res = TestServiceResponse { message: "read ok".to_owned(), }; ServiceResponse::::from_succeed(res) } #[write] fn test_write(&mut self, _ctx: ServiceContext) -> ServiceResponse { let res = TestServiceResponse { message: "write ok".to_owned(), }; ServiceResponse::::from_succeed(res) } } let sdk = MockServiceSDK {}; let mut test_service = Tests { _sdk: sdk, genesis_data: "".to_owned(), hook_after: false, hook_before: false, }; test_service.genesis_("".to_owned()); assert_eq!(test_service.genesis_data, "genesis"); let context = get_context(1024 * 1024, "", "test_write", ""); let write_res = test_service.write_(context).succeed_data; assert_eq!(write_res, r#"{"message":"write ok"}"#); let context = get_context(1024 * 1024, "", "test_read", ""); let read_res = test_service.read_(context).succeed_data; assert_eq!(read_res, r#"{"message":"read ok"}"#); let context = get_context(1024 * 1024, "", "test_notfound", ""); let read_res = panic::catch_unwind(AssertUnwindSafe(|| test_service.read_(context.clone()))); assert_eq!(read_res.unwrap().is_error(), true); let write_res = panic::catch_unwind(AssertUnwindSafe(|| test_service.write_(context))); assert_eq!(write_res.unwrap().is_error(), true); test_service.hook_before_(&mock_executor_params()); assert_eq!(test_service.hook_before, true); test_service.hook_after_(&mock_executor_params()); assert_eq!(test_service.hook_after, true); } #[test] fn test_service_none_response() { struct Tests { _sdk: SDK, genesis_data: String, hook_before: bool, hook_after: bool, } #[service] impl Tests { #[genesis] fn init_genesis(&mut self) { self.genesis_data = "genesis".to_owned(); } #[hook_before] fn custom_hook_before(&mut self, _params: &ExecutorParams) { self.hook_before = true; } #[hook_after] fn custom_hook_after(&mut self, _params: &ExecutorParams) { self.hook_after = true; } #[read] fn test_read(&self, _ctx: ServiceContext) -> ServiceResponse<()> { ServiceResponse::<()>::from_succeed(()) } #[write] fn test_write(&mut self, _ctx: ServiceContext) -> ServiceResponse<()> { ServiceResponse::<()>::from_succeed(()) } } let sdk = MockServiceSDK {}; let mut test_service = Tests { _sdk: sdk, genesis_data: "".to_owned(), hook_after: false, hook_before: false, }; test_service.genesis_("".to_owned()); assert_eq!(test_service.genesis_data, "genesis"); let context = get_context(1024 * 1024, "", "test_write", ""); let write_res = test_service.write_(context).succeed_data; assert_eq!(write_res, ""); let context = get_context(1024 * 1024, "", "test_read", ""); let read_res = test_service.read_(context).succeed_data; assert_eq!(read_res, ""); let context = get_context(1024 * 1024, "", "test_notfound", ""); let read_res = panic::catch_unwind(AssertUnwindSafe(|| test_service.read_(context.clone()))); assert_eq!(read_res.unwrap().is_error(), true); let write_res = panic::catch_unwind(AssertUnwindSafe(|| test_service.write_(context))); assert_eq!(write_res.unwrap().is_error(), true); test_service.hook_before_(&mock_executor_params()); assert_eq!(test_service.hook_before, true); test_service.hook_after_(&mock_executor_params()); assert_eq!(test_service.hook_after, true); } fn get_context(cycles_limit: u64, service: &str, method: &str, payload: &str) -> ServiceContext { let params = ServiceContextParams { tx_hash: None, nonce: None, cycles_limit, cycles_price: 1, cycles_used: Rc::new(RefCell::new(0)), caller: Address::from_hash(Hash::from_empty()).unwrap(), height: 1, timestamp: 0, service_name: service.to_owned(), service_method: method.to_owned(), service_payload: payload.to_owned(), extra: None, events: Rc::new(RefCell::new(vec![])), }; ServiceContext::new(params) } fn mock_executor_params() -> ExecutorParams { ExecutorParams { state_root: Hash::default(), height: 9, timestamp: 99, cycles_limit: 99999, proposer: Address::from_hash(Hash::from_empty()).unwrap(), } } struct MockServiceSDK; impl ServiceSDK for MockServiceSDK { // Alloc or recover a `Map` by` var_name` fn alloc_or_recover_map( &mut self, _var_name: &str, ) -> Box> { unimplemented!() } // Alloc or recover a `Array` by` var_name` fn alloc_or_recover_array( &mut self, _var_name: &str, ) -> Box> { unimplemented!() } // Alloc or recover a `Uint64` by` var_name` fn alloc_or_recover_uint64(&mut self, _var_name: &str) -> Box { unimplemented!() } // Alloc or recover a `String` by` var_name` fn alloc_or_recover_string(&mut self, _var_name: &str) -> Box { unimplemented!() } // Alloc or recover a `Bool` by` var_name` fn alloc_or_recover_bool(&mut self, _var_name: &str) -> Box { unimplemented!() } // Get a value from the service state by key fn get_value(&self, _key: &Key) -> Option { unimplemented!() } // Set a value to the service state by key fn set_value(&mut self, _key: Key, _val: Val) { unimplemented!() } // Get a value from the specified address by key fn get_account_value( &self, _address: &Address, _key: &Key, ) -> Option { unimplemented!() } // Insert a pair of key / value to the specified address fn set_account_value( &mut self, _address: &Address, _key: Key, _val: Val, ) { unimplemented!() } // Get a signed transaction by `tx_hash` // if not found on the chain, return None fn get_transaction_by_hash(&self, _tx_hash: &Hash) -> Option { unimplemented!() } // Get a block by `height` // if not found on the chain, return None // When the parameter `height` is None, get the latest (executing)` block` fn get_block_by_height(&self, _height: Option) -> Option { unimplemented!() } // Get a receipt by `tx_hash` // if not found on the chain, return None fn get_receipt_by_hash(&self, _tx_hash: &Hash) -> Option { unimplemented!() } } ================================================ FILE: built-in-services/asset/Cargo.toml ================================================ [package] name = "asset" version = "0.2.1" authors = ["Muta Dev "] edition = "2018" repository = "https://github.com/nervosnetwork/muta" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] binding-macro = { path = "../../binding-macro" } protocol = { path = "../../protocol", package = "muta-protocol" } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" rlp = "0.4" bytes = "0.5" derive_more = "0.99" byteorder = "1.3" muta-codec-derive = "0.2" [dev-dependencies] cita_trie = "2.0" async-trait = "0.1" framework = { path = "../../framework" } ================================================ FILE: built-in-services/asset/src/lib.rs ================================================ #![allow(clippy::mutable_key_type)] #[cfg(test)] mod tests; pub mod types; use std::collections::BTreeMap; use binding_macro::{cycles, genesis, service}; use protocol::traits::{ExecutorParams, ServiceResponse, ServiceSDK, StoreMap}; use protocol::try_service_response; use protocol::types::{Address, Bytes, Hash, ServiceContext}; use crate::types::{ ApproveEvent, ApprovePayload, Asset, AssetBalance, CreateAssetPayload, GetAllowancePayload, GetAllowanceResponse, GetAssetPayload, GetBalancePayload, GetBalanceResponse, InitGenesisPayload, TransferEvent, TransferFromEvent, TransferFromPayload, TransferPayload, }; pub const ASSET_SERVICE_NAME: &str = "asset"; pub trait Assets { fn create_(&mut self, ctx: &ServiceContext, payload: CreateAssetPayload) -> ServiceResponse<()>; fn balance_( &self, ctx: &ServiceContext, payload: GetBalancePayload, ) -> ServiceResponse; fn transfer_(&mut self, ctx: &ServiceContext, payload: TransferPayload) -> ServiceResponse<()>; fn transfer_from_( &mut self, ctx: &ServiceContext, payload: TransferFromPayload, ) -> ServiceResponse<()>; fn allowance_( &self, ctx: &ServiceContext, payload: GetAllowancePayload, ) -> ServiceResponse; } pub struct AssetService { sdk: SDK, assets: Box>, } impl Assets for AssetService { fn create_( &mut self, ctx: &ServiceContext, payload: CreateAssetPayload, ) -> ServiceResponse<()> { let res = self.create_asset(ctx.clone(), payload); try_service_response!(res); ServiceResponse::from_succeed(()) } fn balance_( &self, ctx: &ServiceContext, payload: GetBalancePayload, ) -> ServiceResponse { self.get_balance(ctx.clone(), payload) } fn transfer_(&mut self, ctx: &ServiceContext, payload: TransferPayload) -> ServiceResponse<()> { self.transfer(ctx.clone(), payload) } fn transfer_from_( &mut self, ctx: &ServiceContext, payload: TransferFromPayload, ) -> ServiceResponse<()> { self.transfer_from(ctx.clone(), payload) } fn allowance_( &self, ctx: &ServiceContext, payload: GetAllowancePayload, ) -> ServiceResponse { self.get_allowance(ctx.clone(), payload) } } #[service] impl AssetService { pub fn new(mut sdk: SDK) -> Self { let assets: Box> = sdk.alloc_or_recover_map("assets"); Self { sdk, assets } } #[genesis] fn init_genesis(&mut self, payload: InitGenesisPayload) { let asset = Asset { id: payload.id, name: payload.name, symbol: payload.symbol, supply: payload.supply, issuer: payload.issuer.clone(), }; self.assets.insert(asset.id.clone(), asset.clone()); let asset_balance = AssetBalance { value: payload.supply, allowance: BTreeMap::new(), }; self.sdk .set_account_value(&asset.issuer, asset.id, asset_balance) } #[cycles(10_000)] #[read] fn get_asset(&self, ctx: ServiceContext, payload: GetAssetPayload) -> ServiceResponse { if let Some(asset) = self.assets.get(&payload.id) { ServiceResponse::::from_succeed(asset) } else { ServiceResponse::::from_error(101, "asset id not existed".to_owned()) } } #[cycles(10_000)] #[read] fn get_balance( &self, ctx: ServiceContext, payload: GetBalancePayload, ) -> ServiceResponse { if !self.assets.contains(&payload.asset_id) { return ServiceResponse::::from_error( 101, "asset id not existed".to_owned(), ); } let asset_balance = self .sdk .get_account_value(&payload.user, &payload.asset_id) .unwrap_or(AssetBalance { value: 0, allowance: BTreeMap::new(), }); let res = GetBalanceResponse { asset_id: payload.asset_id, user: payload.user, balance: asset_balance.value, }; ServiceResponse::::from_succeed(res) } #[cycles(10_000)] #[read] fn get_allowance( &self, ctx: ServiceContext, payload: GetAllowancePayload, ) -> ServiceResponse { if !self.assets.contains(&payload.asset_id) { return ServiceResponse::::from_error( 101, "asset id not existed".to_owned(), ); } let opt_asset_balance: Option = self .sdk .get_account_value(&payload.grantor, &payload.asset_id); if let Some(v) = opt_asset_balance { let allowance = v.allowance.get(&payload.grantee).unwrap_or(&0); let res = GetAllowanceResponse { asset_id: payload.asset_id, grantor: payload.grantor, grantee: payload.grantee, value: *allowance, }; ServiceResponse::::from_succeed(res) } else { let res = GetAllowanceResponse { asset_id: payload.asset_id, grantor: payload.grantor, grantee: payload.grantee, value: 0, }; ServiceResponse::::from_succeed(res) } } #[cycles(21_000)] #[write] fn create_asset( &mut self, ctx: ServiceContext, payload: CreateAssetPayload, ) -> ServiceResponse { let caller = ctx.get_caller(); let payload_res = serde_json::to_string(&payload); if let Err(e) = payload_res { return ServiceResponse::::from_error(103, format!("{:?}", e)); } let payload_str = payload_res.unwrap(); let id = Hash::digest(Bytes::from(payload_str + &caller.to_string())); if self.assets.contains(&id) { return ServiceResponse::::from_error(102, "asset id existed".to_owned()); } let asset = Asset { id: id.clone(), name: payload.name, symbol: payload.symbol, supply: payload.supply, issuer: caller, }; self.assets.insert(id, asset.clone()); let asset_balance = AssetBalance { value: payload.supply, allowance: BTreeMap::new(), }; self.sdk .set_account_value(&asset.issuer, asset.id.clone(), asset_balance); let event_res = serde_json::to_string(&asset); if let Err(e) = event_res { return ServiceResponse::::from_error(103, format!("{:?}", e)); } let event_str = event_res.unwrap(); ctx.emit_event( ASSET_SERVICE_NAME.to_owned(), "CreateAsset".to_owned(), event_str, ); ServiceResponse::::from_succeed(asset) } #[cycles(21_000)] #[write] pub fn transfer( &mut self, ctx: ServiceContext, payload: TransferPayload, ) -> ServiceResponse<()> { let caller = ctx.get_caller(); let asset_id = payload.asset_id.clone(); let value = payload.value; let to = payload.to; if !self.assets.contains(&payload.asset_id) { return ServiceResponse::<()>::from_error(101, "asset id not existed".to_owned()); } if let Err(e) = self._transfer(caller.clone(), to.clone(), asset_id.clone(), value) { return ServiceResponse::<()>::from_error(106, format!("{:?}", e)); }; let event = TransferEvent { asset_id, from: caller, to, value, }; let event_res = serde_json::to_string(&event); if let Err(e) = event_res { return ServiceResponse::<()>::from_error(103, format!("{:?}", e)); }; let event_str = event_res.unwrap(); ctx.emit_event( ASSET_SERVICE_NAME.to_owned(), "TransferAsset".to_owned(), event_str, ); ServiceResponse::<()>::from_succeed(()) } #[cycles(21_000)] #[write] fn approve(&mut self, ctx: ServiceContext, payload: ApprovePayload) -> ServiceResponse<()> { let caller = ctx.get_caller(); let asset_id = payload.asset_id.clone(); let value = payload.value; let to = payload.to; if caller == to { return ServiceResponse::<()>::from_error(104, "cann't approve to yourself".to_owned()); } if !self.assets.contains(&payload.asset_id) { return ServiceResponse::<()>::from_error(101, "asset id not existed".to_owned()); } let mut caller_asset_balance: AssetBalance = self .sdk .get_account_value(&caller, &asset_id) .unwrap_or(AssetBalance { value: 0, allowance: BTreeMap::new(), }); caller_asset_balance .allowance .entry(to.clone()) .and_modify(|e| *e = value) .or_insert(value); self.sdk .set_account_value(&caller, asset_id.clone(), caller_asset_balance); let event = ApproveEvent { asset_id, grantor: caller, grantee: to, value, }; let event_res = serde_json::to_string(&event); if let Err(e) = event_res { return ServiceResponse::<()>::from_error(103, format!("{:?}", e)); }; let event_str = event_res.unwrap(); ctx.emit_event( ASSET_SERVICE_NAME.to_owned(), "ApproveAsset".to_owned(), event_str, ); ServiceResponse::<()>::from_succeed(()) } #[cycles(21_000)] #[write] pub fn transfer_from( &mut self, ctx: ServiceContext, payload: TransferFromPayload, ) -> ServiceResponse<()> { let caller = ctx.get_caller(); let sender = payload.sender; let recipient = payload.recipient; let asset_id = payload.asset_id; let value = payload.value; if !self.assets.contains(&asset_id) { return ServiceResponse::<()>::from_error(101, "asset id not existed".to_owned()); } let mut sender_asset_balance: AssetBalance = self .sdk .get_account_value(&sender, &asset_id) .unwrap_or(AssetBalance { value: 0, allowance: BTreeMap::new(), }); let sender_allowance = sender_asset_balance .allowance .entry(caller.clone()) .or_insert(0); if *sender_allowance < value { return ServiceResponse::<()>::from_error(105, "insufficient balance".to_owned()); } let after_sender_allowance = *sender_allowance - value; sender_asset_balance .allowance .entry(caller.clone()) .and_modify(|e| *e = after_sender_allowance) .or_insert(after_sender_allowance); self.sdk .set_account_value(&sender, asset_id.clone(), sender_asset_balance); if let Err(e) = self._transfer(sender.clone(), recipient.clone(), asset_id.clone(), value) { return ServiceResponse::<()>::from_error(106, format!("{:?}", e)); }; let event = TransferFromEvent { asset_id, caller, sender, recipient, value, }; let event_res = serde_json::to_string(&event); if let Err(e) = event_res { return ServiceResponse::<()>::from_error(103, format!("{:?}", e)); }; let event_str = event_res.unwrap(); ctx.emit_event( ASSET_SERVICE_NAME.to_owned(), "TransferFrom".to_owned(), event_str, ); ServiceResponse::<()>::from_succeed(()) } fn _transfer( &mut self, sender: Address, recipient: Address, asset_id: Hash, value: u64, ) -> Result<(), String> { if recipient == sender { return Err("cann't send value to yourself".to_owned()); } let mut sender_asset_balance: AssetBalance = self .sdk .get_account_value(&sender, &asset_id) .unwrap_or(AssetBalance { value: 0, allowance: BTreeMap::new(), }); let sender_balance = sender_asset_balance.value; if sender_balance < value { return Err("insufficient balance".to_owned()); } let mut to_asset_balance: AssetBalance = self .sdk .get_account_value(&recipient, &asset_id) .unwrap_or(AssetBalance { value: 0, allowance: BTreeMap::new(), }); let (v, overflow) = to_asset_balance.value.overflowing_add(value); if overflow { return Err("u64 overflow".to_owned()); } to_asset_balance.value = v; self.sdk .set_account_value(&recipient, asset_id.clone(), to_asset_balance); let (v, overflow) = sender_balance.overflowing_sub(value); if overflow { return Err("u64 overflow".to_owned()); } sender_asset_balance.value = v; self.sdk .set_account_value(&sender, asset_id, sender_asset_balance); Ok(()) } } ================================================ FILE: built-in-services/asset/src/tests/mod.rs ================================================ use std::cell::RefCell; use std::rc::Rc; use std::str::FromStr; use std::sync::Arc; use async_trait::async_trait; use cita_trie::MemoryDB; use framework::binding::sdk::{DefaultChainQuerier, DefaultServiceSDK}; use framework::binding::state::{GeneralServiceState, MPTTrie}; use protocol::traits::{CommonStorage, Context, Storage}; use protocol::types::{ Address, Block, BlockHeader, Hash, Proof, Receipt, ServiceContext, ServiceContextParams, SignedTransaction, }; use protocol::ProtocolResult; use crate::types::{ ApprovePayload, CreateAssetPayload, GetAllowancePayload, GetAssetPayload, GetBalancePayload, TransferFromPayload, TransferPayload, }; use crate::AssetService; #[test] fn test_create_asset() { let cycles_limit = 1024 * 1024 * 1024; // 1073741824 let caller = Address::from_str("muta14e0lmgck835vm2dfm0w3ckv6svmez8fdgdl705").unwrap(); let context = mock_context(cycles_limit, caller.clone()); let mut service = new_asset_service(); let supply = 1024 * 1024; // test create_asset let asset = service .create_asset(context.clone(), CreateAssetPayload { name: "test".to_owned(), symbol: "test".to_owned(), supply, }) .succeed_data; let new_asset = service .get_asset(context.clone(), GetAssetPayload { id: asset.id.clone(), }) .succeed_data; assert_eq!(asset, new_asset); let balance_res = service .get_balance(context, GetBalancePayload { asset_id: asset.id.clone(), user: caller, }) .succeed_data; assert_eq!(balance_res.balance, supply); assert_eq!(balance_res.asset_id, asset.id); } #[test] fn test_transfer() { let cycles_limit = 1024 * 1024 * 1024; // 1073741824 let caller = Address::from_str("muta14e0lmgck835vm2dfm0w3ckv6svmez8fdgdl705").unwrap(); let context = mock_context(cycles_limit, caller.clone()); let mut service = new_asset_service(); let supply = 1024 * 1024; // test create_asset let asset = service .create_asset(context.clone(), CreateAssetPayload { name: "test".to_owned(), symbol: "test".to_owned(), supply, }) .succeed_data; let to_address = Address::from_str("muta15a8a9ksxe3hhjpw3l7wz7ry778qg8h9wz8y35p").unwrap(); service.transfer(context.clone(), TransferPayload { asset_id: asset.id.clone(), to: to_address.clone(), value: 1024, }); let balance_res = service .get_balance(context, GetBalancePayload { asset_id: asset.id.clone(), user: caller, }) .succeed_data; assert_eq!(balance_res.balance, supply - 1024); let context = mock_context(cycles_limit, to_address.clone()); let balance_res = service .get_balance(context, GetBalancePayload { asset_id: asset.id, user: to_address, }) .succeed_data; assert_eq!(balance_res.balance, 1024); } #[test] fn test_approve() { let cycles_limit = 1024 * 1024 * 1024; // 1073741824 let caller = Address::from_str("muta14e0lmgck835vm2dfm0w3ckv6svmez8fdgdl705").unwrap(); let context = mock_context(cycles_limit, caller.clone()); let mut service = new_asset_service(); let supply = 1024 * 1024; let asset = service .create_asset(context.clone(), CreateAssetPayload { name: "test".to_owned(), symbol: "test".to_owned(), supply, }) .succeed_data; let to_address = Address::from_str("muta15a8a9ksxe3hhjpw3l7wz7ry778qg8h9wz8y35p").unwrap(); service.approve(context.clone(), ApprovePayload { asset_id: asset.id.clone(), to: to_address.clone(), value: 1024, }); let allowance_res = service .get_allowance(context, GetAllowancePayload { asset_id: asset.id.clone(), grantor: caller, grantee: to_address.clone(), }) .succeed_data; assert_eq!(allowance_res.asset_id, asset.id); assert_eq!(allowance_res.grantee, to_address); assert_eq!(allowance_res.value, 1024); } #[test] fn test_transfer_from() { let cycles_limit = 1024 * 1024 * 1024; // 1073741824 let caller = Address::from_str("muta14e0lmgck835vm2dfm0w3ckv6svmez8fdgdl705").unwrap(); let context = mock_context(cycles_limit, caller.clone()); let mut service = new_asset_service(); let supply = 1024 * 1024; let asset = service .create_asset(context.clone(), CreateAssetPayload { name: "test".to_owned(), symbol: "test".to_owned(), supply, }) .succeed_data; let to_address = Address::from_str("muta15a8a9ksxe3hhjpw3l7wz7ry778qg8h9wz8y35p").unwrap(); service.approve(context.clone(), ApprovePayload { asset_id: asset.id.clone(), to: to_address.clone(), value: 1024, }); let to_context = mock_context(cycles_limit, to_address.clone()); service.transfer_from(to_context.clone(), TransferFromPayload { asset_id: asset.id.clone(), sender: caller.clone(), recipient: to_address.clone(), value: 24, }); let allowance_res = service .get_allowance(context.clone(), GetAllowancePayload { asset_id: asset.id.clone(), grantor: caller.clone(), grantee: to_address.clone(), }) .succeed_data; assert_eq!(allowance_res.asset_id, asset.id); assert_eq!(allowance_res.grantee, to_address); assert_eq!(allowance_res.value, 1000); let balance_res = service .get_balance(context, GetBalancePayload { asset_id: asset.id.clone(), user: caller, }) .succeed_data; assert_eq!(balance_res.balance, supply - 24); let balance_res = service .get_balance(to_context, GetBalancePayload { asset_id: asset.id, user: to_address, }) .succeed_data; assert_eq!(balance_res.balance, 24); } fn new_asset_service( ) -> AssetService, DefaultChainQuerier>> { let chain_db = DefaultChainQuerier::new(Arc::new(MockStorage {})); let trie = MPTTrie::new(Arc::new(MemoryDB::new(false))); let state = GeneralServiceState::new(trie); let sdk = DefaultServiceSDK::new(Rc::new(RefCell::new(state)), Rc::new(chain_db)); AssetService::new(sdk) } fn mock_context(cycles_limit: u64, caller: Address) -> ServiceContext { let params = ServiceContextParams { tx_hash: None, nonce: None, cycles_limit, cycles_price: 1, cycles_used: Rc::new(RefCell::new(0)), caller, height: 1, timestamp: 0, service_name: "service_name".to_owned(), service_method: "service_method".to_owned(), service_payload: "service_payload".to_owned(), extra: None, events: Rc::new(RefCell::new(vec![])), }; ServiceContext::new(params) } struct MockStorage; #[async_trait] impl CommonStorage for MockStorage { async fn insert_block(&self, _ctx: Context, _block: Block) -> ProtocolResult<()> { unimplemented!() } async fn get_block(&self, _ctx: Context, _height: u64) -> ProtocolResult> { unimplemented!() } async fn set_block(&self, _ctx: Context, _block: Block) -> ProtocolResult<()> { unimplemented!() } async fn remove_block(&self, _ctx: Context, _height: u64) -> ProtocolResult<()> { unimplemented!() } async fn get_latest_block(&self, _ctx: Context) -> ProtocolResult { unimplemented!() } async fn set_latest_block(&self, _ctx: Context, _block: Block) -> ProtocolResult<()> { unimplemented!() } async fn get_latest_block_header(&self, _ctx: Context) -> ProtocolResult { unimplemented!() } async fn get_block_header( &self, _ctx: Context, _height: u64, ) -> ProtocolResult> { unimplemented!() } } #[async_trait] impl Storage for MockStorage { async fn insert_transactions( &self, _ctx: Context, _: u64, _: Vec, ) -> ProtocolResult<()> { unimplemented!() } async fn insert_receipts(&self, _ctx: Context, _: u64, _: Vec) -> ProtocolResult<()> { unimplemented!() } async fn update_latest_proof(&self, _ctx: Context, _: Proof) -> ProtocolResult<()> { unimplemented!() } async fn get_transaction_by_hash( &self, _ctx: Context, _: &Hash, ) -> ProtocolResult> { unimplemented!() } async fn get_transactions( &self, _ctx: Context, _: u64, _: &[Hash], ) -> ProtocolResult>> { unimplemented!() } async fn get_receipt_by_hash(&self, _ctx: Context, _: Hash) -> ProtocolResult> { unimplemented!() } async fn get_receipts( &self, _ctx: Context, _: u64, _: Vec, ) -> ProtocolResult>> { unimplemented!() } async fn get_latest_proof(&self, _ctx: Context) -> ProtocolResult { unimplemented!() } } ================================================ FILE: built-in-services/asset/src/types.rs ================================================ use std::collections::BTreeMap; use muta_codec_derive::RlpFixedCodec; use serde::{Deserialize, Serialize}; use protocol::fixed_codec::{FixedCodec, FixedCodecError}; use protocol::types::{Address, Bytes, Hash}; use protocol::ProtocolResult; /// Payload #[derive(RlpFixedCodec, Deserialize, Serialize, Clone, Debug)] pub struct InitGenesisPayload { pub id: Hash, pub name: String, pub symbol: String, pub supply: u64, pub issuer: Address, } #[derive(RlpFixedCodec, Deserialize, Serialize, Clone, Debug)] pub struct CreateAssetPayload { pub name: String, pub symbol: String, pub supply: u64, } #[derive(RlpFixedCodec, Deserialize, Serialize, Clone, Debug)] pub struct GetAssetPayload { pub id: Hash, } #[derive(RlpFixedCodec, Deserialize, Serialize, Clone, Debug)] pub struct TransferPayload { pub asset_id: Hash, pub to: Address, pub value: u64, } #[derive(RlpFixedCodec, Deserialize, Serialize, Clone, Debug)] pub struct TransferEvent { pub asset_id: Hash, pub from: Address, pub to: Address, pub value: u64, } pub type ApprovePayload = TransferPayload; #[derive(RlpFixedCodec, Deserialize, Serialize, Clone, Debug)] pub struct ApproveEvent { pub asset_id: Hash, pub grantor: Address, pub grantee: Address, pub value: u64, } #[derive(RlpFixedCodec, Deserialize, Serialize, Clone, Debug)] pub struct TransferFromPayload { pub asset_id: Hash, pub sender: Address, pub recipient: Address, pub value: u64, } #[derive(RlpFixedCodec, Deserialize, Serialize, Clone, Debug)] pub struct TransferFromEvent { pub asset_id: Hash, pub caller: Address, pub sender: Address, pub recipient: Address, pub value: u64, } #[derive(RlpFixedCodec, Deserialize, Serialize, Clone, Debug)] pub struct GetBalancePayload { pub asset_id: Hash, pub user: Address, } #[derive(RlpFixedCodec, Deserialize, Serialize, Clone, Debug, Default)] pub struct GetBalanceResponse { pub asset_id: Hash, pub user: Address, pub balance: u64, } #[derive(RlpFixedCodec, Deserialize, Serialize, Clone, Debug)] pub struct GetAllowancePayload { pub asset_id: Hash, pub grantor: Address, pub grantee: Address, } #[derive(RlpFixedCodec, Deserialize, Serialize, Clone, Debug, Default)] pub struct GetAllowanceResponse { pub asset_id: Hash, pub grantor: Address, pub grantee: Address, pub value: u64, } #[derive(RlpFixedCodec, Deserialize, Serialize, Clone, Debug, PartialEq, Default)] pub struct Asset { pub id: Hash, pub name: String, pub symbol: String, pub supply: u64, pub issuer: Address, } pub struct AssetBalance { pub value: u64, pub allowance: BTreeMap, } #[derive(RlpFixedCodec)] struct AllowanceCodec { pub addr: Address, pub total: u64, } impl rlp::Decodable for AssetBalance { fn decode(rlp: &rlp::Rlp) -> Result { let value = rlp.at(0)?.as_val()?; let codec_list: Vec = rlp::decode_list(rlp.at(1)?.as_raw()); let mut allowance = BTreeMap::new(); for v in codec_list { allowance.insert(v.addr, v.total); } Ok(AssetBalance { value, allowance }) } } impl rlp::Encodable for AssetBalance { fn rlp_append(&self, s: &mut rlp::RlpStream) { s.begin_list(2); s.append(&self.value); let mut codec_list = Vec::with_capacity(self.allowance.len()); for (address, allowance) in self.allowance.iter() { let fixed_codec = AllowanceCodec { addr: address.clone(), total: *allowance, }; codec_list.push(fixed_codec); } s.append_list(&codec_list); } } impl FixedCodec for AssetBalance { fn encode_fixed(&self) -> ProtocolResult { Ok(Bytes::from(rlp::encode(self))) } fn decode_fixed(bytes: Bytes) -> ProtocolResult { Ok(rlp::decode(bytes.as_ref()).map_err(FixedCodecError::from)?) } } ================================================ FILE: built-in-services/authorization/Cargo.toml ================================================ [package] name = "authorization" version = "0.2.1" authors = ["Muta Dev "] edition = "2018" repository = "https://github.com/nervosnetwork/muta" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] binding-macro = { path = "../../binding-macro" } protocol = { path = "../../protocol", package = "muta-protocol" } multi-signature = { path = "../multi-signature" } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" lazy_static = "1.4" rlp = "0.4" bytes = "0.5" derive_more = "0.99" byteorder = "1.3" muta-codec-derive = "0.2" [dev-dependencies] cita_trie = "2.0" async-trait = "0.1" framework = { path = "../../framework" } ================================================ FILE: built-in-services/authorization/src/lib.rs ================================================ use binding_macro::{cycles, service}; use protocol::traits::{ExecutorParams, ServiceResponse, ServiceSDK}; use protocol::types::{ServiceContext, SignedTransaction}; use serde::Deserialize; use multi_signature::MultiSignatureService; pub const AUTHORIZATION_SERVICE_NAME: &str = "authorization"; #[derive(Deserialize)] pub struct PtrSignedTransaction { ptr: usize, } pub struct AuthorizationService { _sdk: SDK, multi_sig: MultiSignatureService, } #[service] impl AuthorizationService { pub fn new(_sdk: SDK, multi_sig: MultiSignatureService) -> Self { Self { _sdk, multi_sig } } #[cycles(21_000)] #[read] fn check_authorization_by_ptr( &self, ctx: ServiceContext, payload: PtrSignedTransaction, ) -> ServiceResponse<()> { let stx: SignedTransaction = { let boxed = unsafe { Box::from_raw(payload.ptr as *mut SignedTransaction) }; *boxed }; self.check_authorization(ctx, stx) } #[cycles(21_000)] #[read] fn check_authorization( &self, ctx: ServiceContext, payload: SignedTransaction, ) -> ServiceResponse<()> { let resp = self.multi_sig.verify_signature(ctx, payload); if resp.is_error() { return ServiceResponse::<()>::from_error( 102, format!( "verify transaction signature error {:?}", resp.error_message ), ); } ServiceResponse::from_succeed(()) } } ================================================ FILE: built-in-services/metadata/Cargo.toml ================================================ [package] name = "metadata" version = "0.2.1" authors = ["Muta Dev "] edition = "2018" repository = "https://github.com/nervosnetwork/muta" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] binding-macro = { path = "../../binding-macro" } protocol = { path = "../../protocol", package = "muta-protocol" } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" rlp = "0.4" bytes = "0.5" derive_more = "0.99" byteorder = "1.3" [dev-dependencies] hex = "0.4" cita_trie = "2.0" async-trait = "0.1" framework = { path = "../../framework" } ================================================ FILE: built-in-services/metadata/src/lib.rs ================================================ #[cfg(test)] mod tests; use binding_macro::{cycles, genesis, service}; use protocol::traits::{ExecutorParams, ServiceResponse, ServiceSDK}; use protocol::types::{Metadata, ServiceContext, METADATA_KEY}; pub const METADATA_SERVICE_NAME: &str = "metadata"; pub trait MetaData { fn get_(&self, ctx: &ServiceContext) -> ServiceResponse; } pub struct MetadataService { sdk: SDK, } impl MetaData for MetadataService { fn get_(&self, ctx: &ServiceContext) -> ServiceResponse { self.get_metadata(ctx.clone()) } } #[service] impl MetadataService { pub fn new(sdk: SDK) -> Self { Self { sdk } } #[genesis] fn init_genesis(&mut self, metadata: Metadata) { self.sdk.set_value(METADATA_KEY.to_string(), metadata) } #[cycles(21_000)] #[read] fn get_metadata(&self, ctx: ServiceContext) -> ServiceResponse { let metadata: Metadata = self .sdk .get_value(&METADATA_KEY.to_owned()) .expect("metadata should not be none"); ServiceResponse::::from_succeed(metadata) } } ================================================ FILE: built-in-services/metadata/src/tests/mod.rs ================================================ use std::cell::RefCell; use std::rc::Rc; use std::str::FromStr; use std::sync::Arc; use async_trait::async_trait; use cita_trie::MemoryDB; use framework::binding::sdk::{DefaultChainQuerier, DefaultServiceSDK}; use framework::binding::state::{GeneralServiceState, MPTTrie}; use protocol::traits::{CommonStorage, Context, ServiceSDK, Storage}; use protocol::types::{ Address, Block, BlockHeader, Hash, Hex, Metadata, Proof, Receipt, ServiceContext, ServiceContextParams, SignedTransaction, ValidatorExtend, METADATA_KEY, }; use protocol::{types::Bytes, ProtocolResult}; use crate::MetadataService; #[test] fn test_get_metadata() { let cycles_limit = 1024 * 1024 * 1024; // 1073741824 let caller = Address::from_str("muta14e0lmgck835vm2dfm0w3ckv6svmez8fdgdl705").unwrap(); let context = mock_context(cycles_limit, caller); let init_metadata = mock_metadata(); let service = new_metadata_service_with_metadata(init_metadata.clone()); let metadata = service.get_metadata(context).succeed_data; assert_eq!(metadata, init_metadata); } fn new_metadata_service_with_metadata( metadata: Metadata, ) -> MetadataService< DefaultServiceSDK, DefaultChainQuerier>, > { let chain_db = DefaultChainQuerier::new(Arc::new(MockStorage {})); let trie = MPTTrie::new(Arc::new(MemoryDB::new(false))); let state = GeneralServiceState::new(trie); let mut sdk = DefaultServiceSDK::new(Rc::new(RefCell::new(state)), Rc::new(chain_db)); sdk.set_value(METADATA_KEY.to_string(), metadata); MetadataService::new(sdk) } fn mock_metadata() -> Metadata { Metadata { chain_id: Hash::digest(Bytes::from("test")), bech32_address_hrp: "muta".to_owned(), common_ref: Hex::from_string("0x703873635a6b51513451".to_string()).unwrap(), timeout_gap: 20, cycles_limit: 99_999_999, cycles_price: 1, interval: 3000, verifier_list: [ValidatorExtend { bls_pub_key: Hex::from_string("0x04102947214862a503c73904deb5818298a186d68c7907bb609583192a7de6331493835e5b8281f4d9ee705537c0e765580e06f86ddce5867812fceb42eecefd209f0eddd0389d6b7b0100f00fb119ef9ab23826c6ea09aadcc76fa6cea6a32724".to_owned()).unwrap(), pub_key: Hex::from_string("0x02ef0cb0d7bc6c18b4bea1f5908d9106522b35ab3c399369605d4242525bda7e60".to_owned()).unwrap(), address: Address::from_str("muta14e0lmgck835vm2dfm0w3ckv6svmez8fdgdl705").unwrap(), propose_weight: 1, vote_weight: 1, }] .to_vec(), propose_ratio: 10, prevote_ratio: 10, precommit_ratio: 10, brake_ratio: 7, tx_num_limit: 20000, max_tx_size: 1_073_741_824, } } fn mock_context(cycles_limit: u64, caller: Address) -> ServiceContext { let params = ServiceContextParams { tx_hash: None, nonce: None, cycles_limit, cycles_price: 1, cycles_used: Rc::new(RefCell::new(0)), caller, height: 1, timestamp: 0, service_name: "service_name".to_owned(), service_method: "service_method".to_owned(), service_payload: "service_payload".to_owned(), extra: None, events: Rc::new(RefCell::new(vec![])), }; ServiceContext::new(params) } struct MockStorage; #[async_trait] impl CommonStorage for MockStorage { async fn insert_block(&self, _ctx: Context, _block: Block) -> ProtocolResult<()> { unimplemented!() } async fn get_block(&self, _ctx: Context, _height: u64) -> ProtocolResult> { unimplemented!() } async fn set_block(&self, _ctx: Context, _block: Block) -> ProtocolResult<()> { unimplemented!() } async fn remove_block(&self, _ctx: Context, _height: u64) -> ProtocolResult<()> { unimplemented!() } async fn get_latest_block(&self, _ctx: Context) -> ProtocolResult { unimplemented!() } async fn set_latest_block(&self, _ctx: Context, _block: Block) -> ProtocolResult<()> { unimplemented!() } async fn get_latest_block_header(&self, _ctx: Context) -> ProtocolResult { unimplemented!() } async fn get_block_header( &self, _ctx: Context, _height: u64, ) -> ProtocolResult> { unimplemented!() } } #[async_trait] impl Storage for MockStorage { async fn insert_transactions( &self, _ctx: Context, _: u64, _: Vec, ) -> ProtocolResult<()> { unimplemented!() } async fn insert_receipts(&self, _ctx: Context, _: u64, _: Vec) -> ProtocolResult<()> { unimplemented!() } async fn update_latest_proof(&self, _ctx: Context, _: Proof) -> ProtocolResult<()> { unimplemented!() } async fn get_transaction_by_hash( &self, _ctx: Context, _: &Hash, ) -> ProtocolResult> { unimplemented!() } async fn get_transactions( &self, _ctx: Context, _: u64, _: &[Hash], ) -> ProtocolResult>> { unimplemented!() } async fn get_receipt_by_hash(&self, _ctx: Context, _: Hash) -> ProtocolResult> { unimplemented!() } async fn get_receipts( &self, _ctx: Context, _: u64, _: Vec, ) -> ProtocolResult>> { unimplemented!() } async fn get_latest_proof(&self, _ctx: Context) -> ProtocolResult { unimplemented!() } } ================================================ FILE: built-in-services/multi-signature/Cargo.toml ================================================ [package] name = "multi-signature" version = "0.2.1" authors = ["Muta Dev "] edition = "2018" repository = "https://github.com/nervosnetwork/muta" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] binding-macro = { path = "../../binding-macro" } byteorder = "1.3" common-crypto = { path = "../../common/crypto" } derive_more = "0.99" hasher = { version="0.1", features = ["hash-keccak"] } hex = "0.4" lazy_static = "1.4" muta-codec-derive = "0.2" protocol = { path = "../../protocol", package = "muta-protocol" } rand = "0.7" rlp = "0.4" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" [dev-dependencies] async-trait = "0.1" cita_trie = "2.0" framework = { path = "../../framework" } ================================================ FILE: built-in-services/multi-signature/src/lib.rs ================================================ #![allow(clippy::suspicious_else_formatting, clippy::mutable_key_type)] #[cfg(test)] mod tests; pub mod types; use std::collections::HashMap; use binding_macro::{cycles, genesis, service}; use derive_more::Display; use rlp::{Decodable, Rlp}; use common_crypto::{Crypto, Secp256k1}; use protocol::traits::{ExecutorParams, ServiceResponse, ServiceSDK}; use protocol::types::{Address, Bytes, Hash, ServiceContext, SignedTransaction}; use crate::types::{ Account, AddAccountPayload, ChangeMemoPayload, ChangeOwnerPayload, GenerateMultiSigAccountPayload, GenerateMultiSigAccountResponse, GetMultiSigAccountPayload, GetMultiSigAccountResponse, InitGenesisPayload, MultiSigPermission, RemoveAccountPayload, RemoveAccountResult, SetAccountWeightPayload, SetThresholdPayload, SetWeightResult, UpdateAccountPayload, VerifySignaturePayload, Witness, }; pub const MULTI_SIG_SERVICE_NAME: &str = "multi_signature"; const MAX_MULTI_SIGNATURE_RECURSION_DEPTH: u8 = 8; const MAX_PERMISSION_ACCOUNTS: u8 = 16; pub trait MultiSignature { fn verify_signature_( &self, ctx: &ServiceContext, payload: SignedTransaction, ) -> ServiceResponse<()>; fn generate_account_( &mut self, ctx: &ServiceContext, payload: GenerateMultiSigAccountPayload, ) -> ServiceResponse; } pub struct MultiSignatureService { sdk: SDK, } impl MultiSignature for MultiSignatureService { fn verify_signature_( &self, ctx: &ServiceContext, payload: SignedTransaction, ) -> ServiceResponse<()> { self.verify_signature(ctx.clone(), payload) } fn generate_account_( &mut self, ctx: &ServiceContext, payload: GenerateMultiSigAccountPayload, ) -> ServiceResponse { self.generate_account(ctx.clone(), payload) } } #[service] impl MultiSignatureService { pub fn new(sdk: SDK) -> Self { MultiSignatureService { sdk } } #[genesis] fn init_genesis(&mut self, payload: InitGenesisPayload) { if payload.addr_with_weight.is_empty() || payload.addr_with_weight.len() > MAX_PERMISSION_ACCOUNTS as usize { panic!("Invalid account number"); } let weight_sum = payload .addr_with_weight .iter() .map(|item| item.weight as u32) .sum::(); if payload.threshold == 0 || weight_sum < payload.threshold { panic!("Invalid threshold or weights"); } let address = payload.address.clone(); let accounts = payload .addr_with_weight .iter() .map(|item| Account { address: item.address.clone(), weight: item.weight, is_multiple: false, }) .collect::>(); let permission = MultiSigPermission { accounts, owner: payload.owner, threshold: payload.threshold, memo: payload.memo, }; self.sdk.set_account_value(&address, 0u8, permission); } #[cycles(21_000)] #[write] fn generate_account( &mut self, ctx: ServiceContext, payload: GenerateMultiSigAccountPayload, ) -> ServiceResponse { if payload.addr_with_weight.is_empty() || payload.addr_with_weight.len() > MAX_PERMISSION_ACCOUNTS as usize { return ServiceError::InvalidAccountLength.into(); } let weight_sum = payload .addr_with_weight .iter() .map(|item| item.weight as u32) .sum::(); if payload.threshold == 0 || weight_sum < payload.threshold { return ServiceError::InvalidAccountWeights.into(); } // check the recursion depth if payload .addr_with_weight .iter() .map(|s| self._is_recursion_depth_overflow(&s.address, 0)) .any(|res| res) { return ServiceError::AboveMaxRecursionDepth.into(); } let tx_hash = match ctx.get_tx_hash() { Some(hash) => hash, None => return ServiceError::CtxMissingTxHash.into(), }; if let Ok(address) = Address::from_hash(Hash::digest(tx_hash.as_bytes())) { let accounts = payload .addr_with_weight .iter() .map(|item| Account { address: item.address.clone(), weight: item.weight, is_multiple: !self .get_account_from_address(ctx.clone(), GetMultiSigAccountPayload { multi_sig_address: item.address.clone(), }) .is_error(), }) .collect::>(); let owner = if payload.autonomy { address.clone() } else { payload.owner.clone() }; let permission = MultiSigPermission { accounts, owner, threshold: payload.threshold, memo: payload.memo, }; self.sdk.set_account_value(&address, 0u8, permission); ServiceResponse::::from_succeed( GenerateMultiSigAccountResponse { address }, ) } else { ServiceError::GenerateAddressFailed.into() } } #[cycles(10_000)] #[read] fn get_account_from_address( &self, _ctx: ServiceContext, payload: GetMultiSigAccountPayload, ) -> ServiceResponse { if let Some(permission) = self.sdk.get_account_value(&payload.multi_sig_address, &0u8) { ServiceResponse::::from_succeed( GetMultiSigAccountResponse { permission }, ) } else { ServiceError::AccountNotExsit.into() } } #[cycles(21_000)] #[read] pub fn verify_signature( &self, ctx: ServiceContext, payload: SignedTransaction, ) -> ServiceResponse<()> { let pubkeys = match decode_list::>(&payload.pubkey, "public key") { Ok(pks) => pks, Err(err) => return err.into(), }; let sigs = match decode_list::>(&payload.signature, "signature") { Ok(sig) => sig, Err(err) => return err.into(), }; self._inner_verify_signature(VerifySignaturePayload { tx_hash: payload.tx_hash, pubkeys: pubkeys.into_iter().map(Bytes::from).collect::>(), signatures: sigs.into_iter().map(Bytes::from).collect::>(), sender: payload.raw.sender, }) } #[cycles(21_000)] #[write] fn update_account( &mut self, ctx: ServiceContext, payload: UpdateAccountPayload, ) -> ServiceResponse<()> { if let Some(permission) = self .sdk .get_account_value::<_, MultiSigPermission>(&payload.account_address, &0u8) { // check owner address if ctx.get_caller() != permission.owner { return ServiceError::InvalidOwner.into(); } // check if account contains itself if payload .addr_with_weight .iter() .map(|a| a.address.clone()) .any(|addr| addr == payload.account_address) { return ServiceError::AccountSelfContained.into(); } // check sum of weight if payload.addr_with_weight.is_empty() || payload.addr_with_weight.len() > MAX_PERMISSION_ACCOUNTS as usize { return ServiceError::InvalidAccountLength.into(); } let weight_sum = payload .addr_with_weight .iter() .map(|item| item.weight as u32) .sum::(); // check if sum of the weights is above threshold if payload.threshold == 0 || weight_sum < payload.threshold { return ServiceError::InvalidAccountWeights.into(); } // check the recursion depth if payload .addr_with_weight .iter() .map(|s| self._is_recursion_depth_overflow(&s.address, 0)) .any(|res| res) { return ServiceError::AboveMaxRecursionDepth.into(); } let accounts = payload .addr_with_weight .iter() .map(|item| Account { address: item.address.clone(), weight: item.weight, is_multiple: !self .get_account_from_address(ctx.clone(), GetMultiSigAccountPayload { multi_sig_address: item.address.clone(), }) .is_error(), }) .collect::>(); self.sdk .set_account_value(&payload.account_address, 0u8, MultiSigPermission { accounts, owner: payload.owner, threshold: payload.threshold, memo: payload.memo, }); return ServiceResponse::<()>::from_succeed(()); } ServiceError::AccountNotExsit.into() } #[cycles(21_000)] #[write] fn change_owner( &mut self, ctx: ServiceContext, payload: ChangeOwnerPayload, ) -> ServiceResponse<()> { if let Some(mut permission) = self .sdk .get_account_value::<_, MultiSigPermission>(&payload.multi_sig_address, &0u8) { // check owner address if ctx.get_caller() != permission.owner { return ServiceError::InvalidOwner.into(); } // check new owner's recursion depth if self._is_recursion_depth_overflow(&payload.new_owner, 0) { return ServiceError::AboveMaxRecursionDepth.into(); } permission.set_owner(payload.new_owner); self.sdk .set_account_value(&payload.multi_sig_address, 0u8, permission); ServiceResponse::<()>::from_succeed(()) } else { ServiceError::AccountNotExsit.into() } } #[cycles(21_000)] #[write] fn change_memo( &mut self, ctx: ServiceContext, payload: ChangeMemoPayload, ) -> ServiceResponse<()> { if let Some(mut permission) = self .sdk .get_account_value::<_, MultiSigPermission>(&payload.multi_sig_address, &0u8) { // check owner address if ctx.get_caller() != permission.owner { return ServiceError::InvalidOwner.into(); } permission.set_memo(payload.new_memo); self.sdk .set_account_value(&payload.multi_sig_address, 0u8, permission); ServiceResponse::<()>::from_succeed(()) } else { ServiceError::AccountNotExsit.into() } } #[cycles(21_000)] #[write] fn add_account( &mut self, ctx: ServiceContext, payload: AddAccountPayload, ) -> ServiceResponse<()> { if let Some(mut permission) = self .sdk .get_account_value::<_, MultiSigPermission>(&payload.multi_sig_address, &0u8) { // check owner address if ctx.get_caller() != permission.owner { return ServiceError::InvalidOwner.into(); } // check whether reach the max count if permission.accounts.len() == MAX_PERMISSION_ACCOUNTS as usize { return ServiceError::AccountCountReachMaxValue.into(); } // check whether the new account above max recursion depth if self._is_recursion_depth_overflow(&payload.new_account.address, 1) { return ServiceError::AboveMaxRecursionDepth.into(); } permission.add_account(payload.new_account.clone()); self.sdk .set_account_value(&payload.multi_sig_address, 0u8, permission); ServiceResponse::<()>::from_succeed(()) } else { ServiceError::AccountNotExsit.into() } } #[cycles(21_000)] #[write] fn remove_account( &mut self, ctx: ServiceContext, payload: RemoveAccountPayload, ) -> ServiceResponse { if let Some(mut permission) = self .sdk .get_account_value::<_, MultiSigPermission>(&payload.multi_sig_address, &0u8) { // check owner address if ctx.get_caller() != permission.owner { return ServiceError::InvalidOwner.into(); } match permission.remove_account(&payload.account_address) { RemoveAccountResult::Success(ret) => { self.sdk .set_account_value(&payload.multi_sig_address, 0u8, permission); return ServiceResponse::::from_succeed(ret); } RemoveAccountResult::BelowThreshold => { return ServiceError::InvalidAccountWeights.into(); } _ => (), } } ServiceError::AccountNotExsit.into() } #[cycles(21_000)] #[write] fn set_account_weight( &mut self, ctx: ServiceContext, payload: SetAccountWeightPayload, ) -> ServiceResponse<()> { if let Some(mut permission) = self .sdk .get_account_value::<_, MultiSigPermission>(&payload.multi_sig_address, &0u8) { // check owner address if ctx.get_caller() != permission.owner { return ServiceError::InvalidOwner.into(); } match permission.set_account_weight(&payload.account_address, payload.new_weight) { SetWeightResult::Success => { self.sdk .set_account_value(&payload.multi_sig_address, 0u8, permission); return ServiceResponse::<()>::from_succeed(()); } SetWeightResult::InvalidNewWeight => { return ServiceError::InvalidAccountWeights.into(); } _ => (), } } ServiceError::AccountNotExsit.into() } #[cycles(21_000)] #[write] fn set_threshold( &mut self, ctx: ServiceContext, payload: SetThresholdPayload, ) -> ServiceResponse<()> { if let Some(mut permission) = self .sdk .get_account_value::<_, MultiSigPermission>(&payload.multi_sig_address, &0u8) { // check owner address if ctx.get_caller() != permission.owner { return ServiceError::InvalidOwner.into(); } // check new threshold if permission .accounts .iter() .map(|account| account.weight as u32) .sum::() < payload.new_threshold { return ServiceError::InvalidAccountWeights.into(); } permission.set_threshold(payload.new_threshold); self.sdk .set_account_value(&payload.multi_sig_address, 0u8, permission); ServiceResponse::<()>::from_succeed(()) } else { ServiceError::AccountNotExsit.into() } } fn _inner_verify_signature(&self, payload: VerifySignaturePayload) -> ServiceResponse<()> { if payload.pubkeys.len() != payload.signatures.len() { return ServiceError::PubkeyAndSignatureMismatch.into(); } if payload.pubkeys.len() == 1 { if let Ok(addr) = Address::from_pubkey_bytes(&payload.pubkeys[0]) { if addr == payload.sender { return self._verify_single_signature( &payload.tx_hash, &payload.signatures[0], &payload.pubkeys[0], ); } } else { return ServiceError::InvalidPublicKey.into(); } } self._verify_multi_signature( &payload.tx_hash, &Witness::new(payload.pubkeys, payload.signatures).into_addr_map(), &payload.sender, 0u8, ) } fn _verify_multi_signature( &self, tx_hash: &Hash, wit_map: &HashMap, sender: &Address, recursion_depth: u8, ) -> ServiceResponse<()> { // use local variable to do DFS let depth_clone = recursion_depth + 1; // check recursion depth if depth_clone >= MAX_MULTI_SIGNATURE_RECURSION_DEPTH { return ServiceError::AboveMaxRecursionDepth.into(); } let mut weight_acc = 0u32; let permission = self .sdk .get_account_value::<_, MultiSigPermission>(sender, &0u8); if permission.is_none() { return ServiceError::AccountNotExsit.into(); } let permission = permission.unwrap(); for account in permission.accounts.iter() { if !account.is_multiple { if let Some((pk, sig)) = wit_map.get(&account.address) { if !self._verify_single_signature(tx_hash, sig, pk).is_error() { weight_acc += account.weight as u32; } } } else if !self ._verify_multi_signature(tx_hash, wit_map, &account.address, depth_clone) .is_error() { weight_acc += account.weight as u32; } if weight_acc >= permission.threshold { return ServiceResponse::<()>::from_succeed(()); } } ServiceError::VerifyMultiSignatureFailed.into() } fn _verify_single_signature( &self, tx_hash: &Hash, sig: &Bytes, pubkey: &Bytes, ) -> ServiceResponse<()> { if Secp256k1::verify_signature(tx_hash.as_slice(), sig.as_ref(), pubkey.as_ref()).is_ok() { ServiceResponse::<()>::from_succeed(()) } else { ServiceError::VerifyMultiSignatureFailed.into() } } fn _is_recursion_depth_overflow(&self, address: &Address, recursion_depth: u8) -> bool { let depth_clone = recursion_depth + 1; if depth_clone >= MAX_MULTI_SIGNATURE_RECURSION_DEPTH { return true; } if let Some(permission) = self .sdk .get_account_value::<_, MultiSigPermission>(address, &0u8) { permission .accounts .iter() .filter(|account| account.is_multiple) .map(|account| self._is_recursion_depth_overflow(&account.address, depth_clone)) .any(|overflow| overflow) } else { false } } } #[derive(Debug, Display)] pub enum ServiceError { #[display(fmt = "Decode {:?} error", _0)] DecodeErr(String), #[display(fmt = "accounts length must be [1,16]")] InvalidAccountLength, #[display(fmt = "accounts weight or threshold not valid")] InvalidAccountWeights, #[display(fmt = "above max recursion depth")] AboveMaxRecursionDepth, #[display(fmt = "Can not get tx hash from service context")] CtxMissingTxHash, #[display(fmt = "generate address from tx_hash failed")] GenerateAddressFailed, #[display(fmt = "account is not existed")] AccountNotExsit, #[display(fmt = "invalid owner")] InvalidOwner, #[display(fmt = "account can not contain itself")] AccountSelfContained, #[display(fmt = "the account count reach max value")] AccountCountReachMaxValue, #[display(fmt = "pubkkeys len is not equal to signatures len")] PubkeyAndSignatureMismatch, #[display(fmt = "invalid public key")] InvalidPublicKey, #[display(fmt = "multi signature verified failed")] VerifyMultiSignatureFailed, } impl ServiceError { fn code(&self) -> u64 { match self { ServiceError::DecodeErr(_) => 101, ServiceError::InvalidAccountLength => 102, ServiceError::InvalidAccountWeights => 103, ServiceError::AboveMaxRecursionDepth => 104, ServiceError::CtxMissingTxHash => 105, ServiceError::GenerateAddressFailed => 106, ServiceError::AccountNotExsit => 107, ServiceError::InvalidOwner => 108, ServiceError::AccountSelfContained => 109, ServiceError::AccountCountReachMaxValue => 110, ServiceError::PubkeyAndSignatureMismatch => 111, ServiceError::InvalidPublicKey => 112, ServiceError::VerifyMultiSignatureFailed => 113, } } } impl From for ServiceResponse { fn from(err: ServiceError) -> ServiceResponse { ServiceResponse::from_error(err.code(), err.to_string()) } } fn decode_list(bytes: &[u8], ty: &str) -> Result, ServiceError> { Rlp::new(bytes) .as_list() .map_err(|_| ServiceError::DecodeErr(ty.to_string())) } ================================================ FILE: built-in-services/multi-signature/src/tests/curd_test.rs ================================================ use std::str::FromStr; use crate::types::{ AddAccountPayload, GenerateMultiSigAccountPayload, GetMultiSigAccountPayload, MultiSigPermission, RemoveAccountPayload, SetAccountWeightPayload, SetThresholdPayload, UpdateAccountPayload, }; use super::*; #[test] fn test_generate_multi_signature() { let cycles_limit = 1024 * 1024 * 1024; // 1073741824 let caller = Address::from_str("muta14e0lmgck835vm2dfm0w3ckv6svmez8fdgdl705").unwrap(); let context = mock_context(cycles_limit, caller); let mut service = new_multi_signature_service(); let owner = Address::from_pubkey_bytes(gen_one_keypair().1).unwrap(); // test permission accounts above the max value let accounts = gen_keypairs(17) .iter() .map(|pair| to_multi_sig_account(pair.1.clone())) .collect::>(); let multi_sig_address = service.generate_account(context.clone(), GenerateMultiSigAccountPayload { owner: owner.clone(), autonomy: false, addr_with_weight: accounts, threshold: 12, memo: String::new(), }); assert!(multi_sig_address.is_error()); // test the threshold larger than the sum of weights let accounts = gen_keypairs(4) .iter() .map(|pair| to_multi_sig_account(pair.1.clone())) .collect::>(); let multi_sig_address = service.generate_account(context.clone(), GenerateMultiSigAccountPayload { owner: owner.clone(), autonomy: false, addr_with_weight: accounts, threshold: 12, memo: String::new(), }); assert!(multi_sig_address.is_error()); // test generate a multi-signature address let accounts = gen_keypairs(4) .iter() .map(|pair| to_multi_sig_account(pair.1.clone())) .collect::>(); let multi_sig_address = service.generate_account(context.clone(), GenerateMultiSigAccountPayload { owner: owner.clone(), autonomy: false, addr_with_weight: accounts.clone(), threshold: 3, memo: String::new(), }); assert!(!multi_sig_address.is_error()); // test get permission by multi-signature address let addr = multi_sig_address.succeed_data; let permission = service.get_account_from_address(context, GetMultiSigAccountPayload { multi_sig_address: addr.address, }); assert!(!permission.is_error()); assert_eq!(permission.succeed_data.permission, MultiSigPermission { owner, accounts: to_accounts_list(accounts), threshold: 3, memo: String::new(), }); } #[test] fn test_set_threshold() { let cycles_limit = 1024 * 1024 * 1024; // 1073741824 let mut service = new_multi_signature_service(); let owner = gen_one_keypair(); let owner_address = Address::from_pubkey_bytes(owner.1).unwrap(); let context = mock_context(cycles_limit, owner_address.clone()); let keypairs = gen_keypairs(4); let account_pubkeys = keypairs .iter() .map(|pair| to_multi_sig_account(pair.1.clone())) .collect::>(); let multi_sig_address = service .generate_account(context.clone(), GenerateMultiSigAccountPayload { owner: owner_address, autonomy: false, addr_with_weight: account_pubkeys, threshold: 3, memo: String::new(), }) .succeed_data .address; // test new threshold above sum of the weights let res = service.set_threshold(context.clone(), SetThresholdPayload { multi_sig_address: multi_sig_address.clone(), new_threshold: 5, }); assert_eq!( res.error_message, "accounts weight or threshold not valid".to_owned() ); // test set new threshold success let res = service.set_threshold(context, SetThresholdPayload { multi_sig_address, new_threshold: 2, }); assert_eq!(res.error_message, "".to_owned()); } #[test] fn test_adeption_address() { let cycles_limit = 1024 * 1024 * 1024; // 1073741824 let mut service = new_multi_signature_service(); let owner = gen_one_keypair(); let owner_address = Address::from_pubkey_bytes(owner.1).unwrap(); let context = mock_context(cycles_limit, owner_address); let keypairs = gen_keypairs(15); let account_pubkeys = keypairs .iter() .map(|pair| to_multi_sig_account(pair.1.clone())) .collect::>(); let multi_sig_address = service .generate_account(context.clone(), GenerateMultiSigAccountPayload { owner: Address::default(), autonomy: true, addr_with_weight: account_pubkeys, threshold: 3, memo: String::new(), }) .succeed_data .address; let permission = service.get_account_from_address(context, GetMultiSigAccountPayload { multi_sig_address: multi_sig_address.clone(), }); assert_eq!(multi_sig_address, permission.succeed_data.permission.owner); } #[test] fn test_add_account() { let cycles_limit = 1024 * 1024 * 1024; // 1073741824 let mut service = new_multi_signature_service(); let owner = gen_one_keypair(); let owner_address = Address::from_pubkey_bytes(owner.1).unwrap(); let context = mock_context(cycles_limit, owner_address.clone()); let keypairs = gen_keypairs(15); let mut account_pubkeys = keypairs .iter() .map(|pair| to_multi_sig_account(pair.1.clone())) .collect::>(); let multi_sig_address = service .generate_account(context.clone(), GenerateMultiSigAccountPayload { owner: owner_address.clone(), autonomy: false, addr_with_weight: account_pubkeys.clone(), threshold: 3, memo: String::new(), }) .succeed_data .address; // test add new account success let new_keypair = gen_one_keypair(); account_pubkeys.push(to_multi_sig_account(new_keypair.1.clone())); let res = service.add_account(context.clone(), AddAccountPayload { multi_sig_address: multi_sig_address.clone(), new_account: to_multi_sig_account(new_keypair.1).into_signle_account(), }); assert_eq!(res.error_message, "".to_owned()); // test add new account success above max count value let new_keypair = gen_one_keypair(); let res = service.add_account(context.clone(), AddAccountPayload { multi_sig_address: multi_sig_address.clone(), new_account: to_multi_sig_account(new_keypair.1).into_signle_account(), }); assert_eq!( res.error_message, "the account count reach max value".to_owned() ); // test get permission after add a new account let permission = service.get_account_from_address(context, GetMultiSigAccountPayload { multi_sig_address }); assert_eq!(permission.succeed_data.permission, MultiSigPermission { owner: owner_address, accounts: to_accounts_list(account_pubkeys), threshold: 3, memo: String::new(), }); } #[test] fn test_update_account() { let cycles_limit = 1024 * 1024 * 1024; // 1073741824 let mut service = new_multi_signature_service(); let owner = gen_one_keypair(); let owner_address = Address::from_pubkey_bytes(owner.1).unwrap(); let context = mock_context(cycles_limit, owner_address.clone()); let keypairs = gen_keypairs(4); let account_pubkeys = keypairs .iter() .map(|pair| to_multi_sig_account(pair.1.clone())) .collect::>(); let multi_sig_address = service .generate_account(context, GenerateMultiSigAccountPayload { owner: owner_address.clone(), autonomy: false, addr_with_weight: account_pubkeys, threshold: 4, memo: String::new(), }) .succeed_data .address; let new_owner = gen_one_keypair(); let new_owner_address = Address::from_pubkey_bytes(new_owner.1).unwrap(); let context = mock_context(cycles_limit, owner_address); let account_pubkeys = vec![AddressWithWeight { address: multi_sig_address.clone(), weight: 1u8, }]; let res = service.update_account(context.clone(), UpdateAccountPayload { account_address: multi_sig_address.clone(), owner: new_owner_address.clone(), addr_with_weight: account_pubkeys, threshold: 1, memo: String::new(), }); assert!(res.is_error()); let keypairs = gen_keypairs(4); let account_pubkeys = keypairs .iter() .map(|pair| to_multi_sig_account(pair.1.clone())) .collect::>(); let res = service.update_account(context, UpdateAccountPayload { account_address: multi_sig_address, owner: new_owner_address, addr_with_weight: account_pubkeys, threshold: 1, memo: String::new(), }); assert_eq!(res.is_error(), false); } #[test] fn test_set_weight() { let cycles_limit = 1024 * 1024 * 1024; // 1073741824 let mut service = new_multi_signature_service(); let owner = gen_one_keypair(); let owner_address = Address::from_pubkey_bytes(owner.1).unwrap(); let context = mock_context(cycles_limit, owner_address.clone()); let keypairs = gen_keypairs(4); let mut account_pubkeys = keypairs .iter() .map(|pair| to_multi_sig_account(pair.1.clone())) .collect::>(); let multi_sig_address = service .generate_account(context.clone(), GenerateMultiSigAccountPayload { owner: owner_address.clone(), autonomy: false, addr_with_weight: account_pubkeys.clone(), threshold: 4, memo: String::new(), }) .succeed_data .address; let to_be_changed_address = Address::from_pubkey_bytes(keypairs[0].1.clone()).unwrap(); // test set weight success let res = service.set_account_weight(context.clone(), SetAccountWeightPayload { multi_sig_address: multi_sig_address.clone(), account_address: to_be_changed_address.clone(), new_weight: 2, }); assert_eq!(res.error_message, "".to_owned()); // test set an invalid weight let res = service.set_account_weight(context.clone(), SetAccountWeightPayload { multi_sig_address: multi_sig_address.clone(), account_address: to_be_changed_address, new_weight: 0, }); assert_eq!( res.error_message, "accounts weight or threshold not valid".to_owned() ); // test get permission after add a new account let permission = service.get_account_from_address(context, GetMultiSigAccountPayload { multi_sig_address }); account_pubkeys[0].weight = 2; assert_eq!(permission.succeed_data.permission, MultiSigPermission { owner: owner_address, accounts: to_accounts_list(account_pubkeys), threshold: 4, memo: String::new(), }); } #[test] fn test_remove_account() { let cycles_limit = 1024 * 1024 * 1024; // 1073741824 let mut service = new_multi_signature_service(); let owner = gen_one_keypair(); let owner_address = Address::from_pubkey_bytes(owner.1).unwrap(); let context = mock_context(cycles_limit, owner_address.clone()); let keypairs = gen_keypairs(4); let mut account_pubkeys = keypairs .iter() .map(|pair| to_multi_sig_account(pair.1.clone())) .collect::>(); let multi_sig_address = service .generate_account(context.clone(), GenerateMultiSigAccountPayload { owner: owner_address.clone(), autonomy: false, addr_with_weight: account_pubkeys.clone(), threshold: 3, memo: String::new(), }) .succeed_data .address; let to_be_removed_address = Address::from_pubkey_bytes(keypairs[3].1.clone()).unwrap(); let res = service.remove_account(context.clone(), RemoveAccountPayload { multi_sig_address: multi_sig_address.clone(), account_address: to_be_removed_address, }); account_pubkeys.pop(); assert!(!res.is_error()); let to_be_removed_address = Address::from_pubkey_bytes(keypairs[2].1.clone()).unwrap(); let res = service.remove_account(context.clone(), RemoveAccountPayload { multi_sig_address: multi_sig_address.clone(), account_address: to_be_removed_address, }); assert_eq!( res.error_message, "accounts weight or threshold not valid".to_owned() ); let permission = service.get_account_from_address(context, GetMultiSigAccountPayload { multi_sig_address }); assert_eq!(permission.succeed_data.permission, MultiSigPermission { owner: owner_address, accounts: to_accounts_list(account_pubkeys), threshold: 3, memo: String::new(), }); } ================================================ FILE: built-in-services/multi-signature/src/tests/mod.rs ================================================ mod curd_test; mod recursion_test; use std::cell::RefCell; use std::convert::TryFrom; use std::rc::Rc; use std::sync::Arc; use async_trait::async_trait; use cita_trie::MemoryDB; use rand::{random, thread_rng}; use common_crypto::{ HashValue, PrivateKey, PublicKey, Secp256k1PrivateKey, Signature, ToPublicKey, }; use framework::binding::sdk::{DefaultChainQuerier, DefaultServiceSDK}; use framework::binding::state::{GeneralServiceState, MPTTrie}; use protocol::traits::{CommonStorage, Context, Storage}; use protocol::types::{ Address, Block, BlockHeader, Hash, Proof, Receipt, ServiceContext, ServiceContextParams, SignedTransaction, }; use protocol::{types::Bytes, ProtocolResult}; use crate::types::{Account, AddressWithWeight, VerifySignaturePayload}; use crate::MultiSignatureService; struct MockStorage; #[async_trait] impl CommonStorage for MockStorage { async fn insert_block(&self, _ctx: Context, _block: Block) -> ProtocolResult<()> { unimplemented!() } async fn get_block(&self, _ctx: Context, _height: u64) -> ProtocolResult> { unimplemented!() } async fn set_block(&self, _ctx: Context, _block: Block) -> ProtocolResult<()> { unimplemented!() } async fn get_block_header( &self, _ctx: Context, _height: u64, ) -> ProtocolResult> { unimplemented!() } async fn remove_block(&self, _ctx: Context, _height: u64) -> ProtocolResult<()> { unimplemented!() } async fn get_latest_block(&self, _ctx: Context) -> ProtocolResult { unimplemented!() } async fn set_latest_block(&self, _ctx: Context, _block: Block) -> ProtocolResult<()> { unimplemented!() } async fn get_latest_block_header(&self, _ctx: Context) -> ProtocolResult { unimplemented!() } } #[async_trait] impl Storage for MockStorage { async fn insert_transactions( &self, _ctx: Context, _: u64, _: Vec, ) -> ProtocolResult<()> { unimplemented!() } async fn insert_receipts(&self, _ctx: Context, _: u64, _: Vec) -> ProtocolResult<()> { unimplemented!() } async fn update_latest_proof(&self, _ctx: Context, _: Proof) -> ProtocolResult<()> { unimplemented!() } async fn get_transaction_by_hash( &self, _ctx: Context, _: &Hash, ) -> ProtocolResult> { unimplemented!() } async fn get_transactions( &self, _ctx: Context, _: u64, _: &[Hash], ) -> ProtocolResult>> { unimplemented!() } async fn get_receipt_by_hash(&self, _ctx: Context, _: Hash) -> ProtocolResult> { unimplemented!() } async fn get_receipts( &self, _ctx: Context, _: u64, _: Vec, ) -> ProtocolResult>> { unimplemented!() } async fn get_latest_proof(&self, _ctx: Context) -> ProtocolResult { unimplemented!() } } fn new_multi_signature_service() -> MultiSignatureService< DefaultServiceSDK, DefaultChainQuerier>, > { let chain_db = DefaultChainQuerier::new(Arc::new(MockStorage {})); let trie = MPTTrie::new(Arc::new(MemoryDB::new(false))); let state = GeneralServiceState::new(trie); let sdk = DefaultServiceSDK::new(Rc::new(RefCell::new(state)), Rc::new(chain_db)); MultiSignatureService::new(sdk) } fn mock_context(cycles_limit: u64, caller: Address) -> ServiceContext { let params = ServiceContextParams { tx_hash: Some(mock_hash()), nonce: None, cycles_limit, cycles_price: 1, cycles_used: Rc::new(RefCell::new(0)), caller, height: 1, timestamp: 0, service_name: "service_name".to_owned(), service_method: "service_method".to_owned(), service_payload: "service_payload".to_owned(), extra: None, events: Rc::new(RefCell::new(vec![])), }; ServiceContext::new(params) } fn mock_hash() -> Hash { Hash::digest(get_random_bytes(10)) } fn get_random_bytes(len: usize) -> Bytes { let vec: Vec = (0..len).map(|_| random::()).collect(); Bytes::from(vec) } fn gen_one_keypair() -> (Bytes, Bytes) { let sk = Secp256k1PrivateKey::generate(&mut thread_rng()); let pk = sk.pub_key(); (sk.to_bytes(), pk.to_bytes()) } fn gen_keypairs(num: usize) -> Vec<(Bytes, Bytes)> { (0..num).map(|_| gen_one_keypair()).collect::>() } fn to_multi_sig_account(pk: Bytes) -> AddressWithWeight { AddressWithWeight { address: Address::from_pubkey_bytes(pk).unwrap(), weight: 1u8, } } fn sign(privkey: &Bytes, hash: &Hash) -> Bytes { Secp256k1PrivateKey::try_from(privkey.as_ref()) .unwrap() .sign_message(&HashValue::try_from(hash.as_bytes().as_ref()).unwrap()) .to_bytes() } fn _gen_single_witness(privkey: &Bytes, hash: &Hash) -> VerifySignaturePayload { let privkey = Secp256k1PrivateKey::try_from(privkey.as_ref()).unwrap(); let pk = privkey.pub_key().to_bytes(); let sig = privkey .sign_message(&HashValue::try_from(hash.as_bytes().as_ref()).unwrap()) .to_bytes(); VerifySignaturePayload { pubkeys: vec![pk.clone()], signatures: vec![sig], sender: Address::from_pubkey_bytes(pk).unwrap(), tx_hash: hash.clone(), } } fn to_accounts_list(input: Vec) -> Vec { input .into_iter() .map(|item| item.into_signle_account()) .collect::>() } ================================================ FILE: built-in-services/multi-signature/src/tests/recursion_test.rs ================================================ use std::str::FromStr; use crate::types::{GenerateMultiSigAccountPayload, VerifySignaturePayload}; use super::*; #[test] fn test_recursion_verify_signature() { let cycles_limit = 1024 * 1024 * 1024; // 1073741824 let caller = Address::from_str("muta14e0lmgck835vm2dfm0w3ckv6svmez8fdgdl705").unwrap(); let mut service = new_multi_signature_service(); let owner = Address::from_pubkey_bytes(gen_one_keypair().1).unwrap(); let init_keypairs = gen_keypairs(4); let init_multi_sig_account = init_keypairs .iter() .map(|pair| to_multi_sig_account(pair.1.clone())) .collect::>(); let sender = service .generate_account( mock_context(cycles_limit, caller.clone()), GenerateMultiSigAccountPayload { owner: owner.clone(), autonomy: false, addr_with_weight: init_multi_sig_account, threshold: 4, memo: String::new(), }, ) .succeed_data .address; let keypairs = gen_keypairs(3); let mut multi_sig_account = keypairs .iter() .map(|pair| to_multi_sig_account(pair.1.clone())) .collect::>(); multi_sig_account.push(AddressWithWeight { address: sender, weight: 1u8, }); let sender_new = service .generate_account( mock_context(cycles_limit, caller.clone()), GenerateMultiSigAccountPayload { owner, autonomy: false, addr_with_weight: multi_sig_account, threshold: 4, memo: String::new(), }, ) .succeed_data .address; let ctx = mock_context(cycles_limit, caller); let tx_hash = ctx.get_tx_hash().unwrap(); let mut pks = Vec::new(); let mut sigs = Vec::new(); for pair in init_keypairs.iter().chain(keypairs.iter()) { pks.push(pair.1.clone()); sigs.push(sign(&pair.0, &tx_hash)); } assert_eq!(pks.len(), sigs.len()); let res = service._inner_verify_signature(VerifySignaturePayload { pubkeys: pks, signatures: sigs, sender: sender_new, tx_hash, }); assert_eq!(res.is_error(), false); } #[test] fn test_recursion_depth() { let cycles_limit = 1024 * 1024 * 1024; // 1073741824 let caller = Address::from_str("muta14e0lmgck835vm2dfm0w3ckv6svmez8fdgdl705").unwrap(); let mut service = new_multi_signature_service(); let owner_keypair = gen_one_keypair(); let owner = Address::from_pubkey_bytes(owner_keypair.1).unwrap(); let mut all_keypairs = Vec::new(); let init_keypairs = gen_keypairs(4); let mut init_keypairs_clone = init_keypairs.clone(); all_keypairs.append(&mut init_keypairs_clone); let init_multi_sig_account = init_keypairs .iter() .map(|pair| to_multi_sig_account(pair.1.clone())) .collect::>(); let mut sender = service .generate_account( mock_context(cycles_limit, caller.clone()), GenerateMultiSigAccountPayload { owner: owner.clone(), autonomy: false, addr_with_weight: init_multi_sig_account, threshold: 4, memo: String::new(), }, ) .succeed_data .address; for _i in 0..7 { let new_keypairs = gen_keypairs(3); let mut new_keypairs_clone = new_keypairs.clone(); all_keypairs.append(&mut new_keypairs_clone); let mut multi_sig_account = new_keypairs .iter() .map(|pair| to_multi_sig_account(pair.1.clone())) .collect::>(); multi_sig_account.push(AddressWithWeight { address: sender.clone(), weight: 1u8, }); let res = service.generate_account( mock_context(cycles_limit, caller.clone()), GenerateMultiSigAccountPayload { owner: owner.clone(), autonomy: false, addr_with_weight: multi_sig_account, threshold: 4, memo: String::new(), }, ); assert_eq!(res.is_error(), false); sender = res.succeed_data.address; } let res = service.generate_account( mock_context(cycles_limit, caller), GenerateMultiSigAccountPayload { owner, autonomy: false, addr_with_weight: vec![AddressWithWeight { address: sender, weight: 4u8, }], threshold: 1, memo: String::new(), }, ); assert!(res.is_error()); } ================================================ FILE: built-in-services/multi-signature/src/types.rs ================================================ use std::collections::HashMap; use muta_codec_derive::RlpFixedCodec; use serde::{Deserialize, Serialize}; use protocol::fixed_codec::{FixedCodec, FixedCodecError}; use protocol::types::{Address, Bytes, Hash}; use protocol::ProtocolResult; #[derive(Clone, Debug)] pub enum SetWeightResult { Success, NoAccount, InvalidNewWeight, } #[derive(Clone, Debug)] pub enum RemoveAccountResult { Success(Account), NoAccount, BelowThreshold, } #[derive(RlpFixedCodec, Deserialize, Serialize, Clone, Debug)] pub struct InitGenesisPayload { pub address: Address, pub owner: Address, pub addr_with_weight: Vec, pub threshold: u32, pub memo: String, } #[derive(RlpFixedCodec, Deserialize, Serialize, Clone, Debug, PartialEq, Eq)] pub struct GenerateMultiSigAccountPayload { pub owner: Address, pub autonomy: bool, pub addr_with_weight: Vec, pub threshold: u32, pub memo: String, } #[derive(RlpFixedCodec, Deserialize, Serialize, Clone, Debug, Default)] pub struct GenerateMultiSigAccountResponse { pub address: Address, } #[derive(RlpFixedCodec, Deserialize, Serialize, Clone, Debug)] pub struct VerifySignaturePayload { pub tx_hash: Hash, pub pubkeys: Vec, pub signatures: Vec, pub sender: Address, } #[derive(RlpFixedCodec, Deserialize, Serialize, Clone, Debug)] pub struct GetMultiSigAccountPayload { pub multi_sig_address: Address, } #[derive(RlpFixedCodec, Deserialize, Serialize, Clone, Debug, Default)] pub struct GetMultiSigAccountResponse { pub permission: MultiSigPermission, } #[derive(RlpFixedCodec, Deserialize, Serialize, Clone, Debug)] pub struct ChangeOwnerPayload { pub multi_sig_address: Address, pub new_owner: Address, } #[derive(RlpFixedCodec, Deserialize, Serialize, Clone, Debug)] pub struct ChangeMemoPayload { pub multi_sig_address: Address, pub new_memo: String, } #[derive(RlpFixedCodec, Deserialize, Serialize, Clone, Debug)] pub struct AddAccountPayload { pub multi_sig_address: Address, pub new_account: Account, } #[derive(RlpFixedCodec, Deserialize, Serialize, Clone, Debug)] pub struct RemoveAccountPayload { pub multi_sig_address: Address, pub account_address: Address, } #[derive(RlpFixedCodec, Deserialize, Serialize, Clone, Debug)] pub struct SetAccountWeightPayload { pub multi_sig_address: Address, pub account_address: Address, pub new_weight: u8, } #[derive(RlpFixedCodec, Deserialize, Serialize, Clone, Debug)] pub struct SetThresholdPayload { pub multi_sig_address: Address, pub new_threshold: u32, } #[derive(RlpFixedCodec, Deserialize, Serialize, Clone, Debug)] pub struct UpdateAccountPayload { pub account_address: Address, pub owner: Address, pub addr_with_weight: Vec, pub threshold: u32, pub memo: String, } #[derive(RlpFixedCodec, Deserialize, Serialize, Clone, Debug, Default, PartialEq, Eq)] pub struct MultiSigPermission { pub owner: Address, pub accounts: Vec, pub threshold: u32, pub memo: String, } impl MultiSigPermission { pub fn get_account(&self, addr: &Address) -> Option { for account in self.accounts.iter() { if &account.address == addr { return Some(account.clone()); } } None } pub fn set_owner(&mut self, new_owner: Address) { self.owner = new_owner; } pub fn set_memo(&mut self, new_memo: String) { self.memo = new_memo; } pub fn add_account(&mut self, new_account: Account) { self.accounts.push(new_account); } pub fn remove_account(&mut self, address: &Address) -> RemoveAccountResult { let mut idx = self.accounts.len(); let weight_sum = self .accounts .iter() .map(|account| account.weight as u32) .sum::(); for (index, account) in self.accounts.iter().enumerate() { if &account.address == address { idx = index; break; } } if idx != self.accounts.len() { if (weight_sum - self.accounts[idx].weight as u32) < self.threshold { RemoveAccountResult::BelowThreshold } else { let ret = self.accounts.remove(idx); RemoveAccountResult::Success(ret) } } else { RemoveAccountResult::NoAccount } } pub fn set_threshold(&mut self, new_threshold: u32) { self.threshold = new_threshold; } pub fn set_account_weight( &mut self, account_address: &Address, new_weight: u8, ) -> SetWeightResult { let weight_sum = self .accounts .iter() .map(|account| account.weight as u32) .sum::(); for account in self.accounts.iter_mut() { if &account.address == account_address { if weight_sum + (new_weight as u32) - (account.weight as u32) < self.threshold { return SetWeightResult::InvalidNewWeight; } else { account.weight = new_weight; return SetWeightResult::Success; } } } SetWeightResult::NoAccount } } #[derive(RlpFixedCodec, Deserialize, Serialize, Clone, Debug, Default, PartialEq, Eq)] pub struct Account { pub address: Address, pub weight: u8, pub is_multiple: bool, } #[derive(RlpFixedCodec, Deserialize, Serialize, Clone, Debug, PartialEq, Eq)] pub struct AddressWithWeight { pub address: Address, pub weight: u8, } #[derive(RlpFixedCodec, Deserialize, Serialize, Clone, Debug)] pub struct Witness { pub pubkeys: Vec, pub signatures: Vec, } impl Witness { pub fn new(pubkeys: Vec, signatures: Vec) -> Self { Witness { pubkeys, signatures, } } pub fn into_addr_map(self) -> HashMap { let mut ret = HashMap::new(); for (pk, sig) in self.pubkeys.into_iter().zip(self.signatures.into_iter()) { if let Ok(addr) = Address::from_pubkey_bytes(&pk) { ret.insert(addr, (pk, sig)); } } ret } } #[cfg(test)] impl AddressWithWeight { pub fn into_signle_account(self) -> Account { Account { address: self.address, weight: self.weight, is_multiple: false, } } } ================================================ FILE: built-in-services/util/Cargo.toml ================================================ [package] name = "util" version = "0.2.1" authors = ["Muta Dev "] edition = "2018" repository = "https://github.com/nervosnetwork/muta" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] binding-macro = { path = "../../binding-macro" } protocol = { path = "../../protocol", package = "muta-protocol" } hasher = { version="0.1", features = ["hash-keccak"] } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" rlp = "0.4" bytes = "0.5" derive_more = "0.15" byteorder = "1.3" common-crypto = { path = "../../common/crypto" } hex = "0.4" rand = "0.7" [dev-dependencies] cita_trie = "2.0" async-trait = "0.1" framework = { path = "../../framework" } ================================================ FILE: built-in-services/util/src/lib.rs ================================================ use bytes::Bytes; use hasher::{Hasher, HasherKeccak}; use binding_macro::{cycles, service}; use common_crypto::{Crypto, Secp256k1}; use protocol::traits::{ExecutorParams, ServiceResponse, ServiceSDK}; use protocol::types::{Hash, ServiceContext}; use crate::types::{KeccakPayload, KeccakResponse, SigVerifyPayload, SigVerifyResponse}; #[cfg(test)] mod tests; pub mod types; pub const UTIL_SERVICE_NAME: &str = "util"; pub struct UtilService { _sdk: SDK, } #[service] impl UtilService { pub fn new(_sdk: SDK) -> Self { Self { _sdk } } #[cycles(10_000)] #[read] fn keccak256( &self, ctx: ServiceContext, payload: KeccakPayload, ) -> ServiceResponse { let keccak = HasherKeccak::new(); let data = hex::decode(payload.hex_str.as_string_trim0x()); if data.is_err() { return ServiceResponse::::from_error(107, "data not valid".to_owned()); } let hash_res = keccak.digest(data.unwrap().as_slice()); let response = KeccakResponse { result: Hash::from_bytes(Bytes::from(hash_res)).unwrap(), }; ServiceResponse::::from_succeed(response) } #[cycles(10_000)] #[read] fn verify( &self, ctx: ServiceContext, payload: SigVerifyPayload, ) -> ServiceResponse { let data_sig = hex::decode(payload.sig.as_string_trim0x()); if data_sig.is_err() { return ServiceResponse::::from_error( 108, "signature not valid".to_owned(), ); }; let data_pk = hex::decode(payload.pub_key.as_string_trim0x()); if data_pk.is_err() { return ServiceResponse::::from_error( 109, "public key not valid".to_owned(), ); }; let data_hash = payload.hash.as_bytes(); let response = SigVerifyResponse { is_ok: Secp256k1::verify_signature( data_hash.as_ref(), data_sig.unwrap().as_slice(), data_pk.unwrap().as_slice(), ) .is_ok(), }; ServiceResponse::::from_succeed(response) } } ================================================ FILE: built-in-services/util/src/tests/mod.rs ================================================ use std::cell::RefCell; use std::rc::Rc; use std::str::FromStr; use std::sync::Arc; use cita_trie::MemoryDB; use rand::rngs::OsRng; use async_trait::async_trait; use common_crypto::{ Crypto, PrivateKey, PublicKey, Secp256k1, Secp256k1PrivateKey, Signature, ToPublicKey, }; use framework::binding::sdk::{DefaultChainQuerier, DefaultServiceSDK}; use framework::binding::state::{GeneralServiceState, MPTTrie}; use protocol::traits::{CommonStorage, Context, Storage}; use protocol::types::{ Address, Block, BlockHeader, Hash, Hex, Proof, Receipt, ServiceContext, ServiceContextParams, SignedTransaction, }; use protocol::ProtocolResult; use crate::types::{KeccakPayload, SigVerifyPayload}; use crate::UtilService; #[test] fn test_hash() { let cycles_limit = 1024 * 1024 * 1024; // 1073741824 let caller = Address::from_str("muta14e0lmgck835vm2dfm0w3ckv6svmez8fdgdl705").unwrap(); let context = mock_context(cycles_limit, caller); let service = new_util_service(); let res = service .keccak256(context, KeccakPayload { hex_str: Hex::from_string("0x1234".to_string()).unwrap(), }) .succeed_data; assert_eq!( res.result.as_hex(), "0x56570de287d73cd1cb6092bb8fdee6173974955fdef345ae579ee9f475ea7432".to_string() ) } #[test] fn test_verify() { let cycles_limit = 1024 * 1024 * 1024; // 1073741824 let caller = Address::from_str("muta14e0lmgck835vm2dfm0w3ckv6svmez8fdgdl705").unwrap(); let context = mock_context(cycles_limit, caller); let service = new_util_service(); let priv_key = Secp256k1PrivateKey::generate(&mut OsRng); let pub_key = priv_key.pub_key(); let mut input_pk: String = "0x".to_string(); input_pk.push_str(hex::encode(pub_key.to_bytes()).as_str()); let pub_key_data = Hex::from_string(input_pk).unwrap(); let hash = Hash::from_hex("0x56570de287d73cd1cb6092bb8fdee6173974955fdef345ae579ee9f475ea7432") .unwrap(); let sig = Secp256k1::sign_message(&hash.as_bytes(), &priv_key.to_bytes()).unwrap(); let mut input_sig: String = "0x".to_string(); input_sig.push_str(hex::encode(sig.to_bytes()).as_str()); let sig_data = Hex::from_string(input_sig).unwrap(); println!( "pubkey: {}\r\nsig: {}", pub_key_data.as_string(), sig_data.as_string() ); let res = service .verify(context, SigVerifyPayload { hash, sig: sig_data, pub_key: pub_key_data, }) .succeed_data; assert_eq!(res.is_ok, true) } fn new_util_service( ) -> UtilService, DefaultChainQuerier>> { let chain_db = DefaultChainQuerier::new(Arc::new(MockStorage {})); let trie = MPTTrie::new(Arc::new(MemoryDB::new(false))); let state = GeneralServiceState::new(trie); let sdk = DefaultServiceSDK::new(Rc::new(RefCell::new(state)), Rc::new(chain_db)); UtilService::new(sdk) } fn mock_context(cycles_limit: u64, caller: Address) -> ServiceContext { let params = ServiceContextParams { tx_hash: None, nonce: None, cycles_limit, cycles_price: 1, cycles_used: Rc::new(RefCell::new(0)), caller, height: 1, timestamp: 0, service_name: "service_name".to_owned(), service_method: "service_method".to_owned(), service_payload: "service_payload".to_owned(), extra: None, events: Rc::new(RefCell::new(vec![])), }; ServiceContext::new(params) } struct MockStorage; #[async_trait] impl CommonStorage for MockStorage { async fn insert_block(&self, _ctx: Context, _block: Block) -> ProtocolResult<()> { unimplemented!() } async fn get_block(&self, _ctx: Context, _height: u64) -> ProtocolResult> { unimplemented!() } async fn get_block_header( &self, _ctx: Context, _height: u64, ) -> ProtocolResult> { unimplemented!() } async fn set_block(&self, _ctx: Context, _block: Block) -> ProtocolResult<()> { unimplemented!() } async fn remove_block(&self, _ctx: Context, _height: u64) -> ProtocolResult<()> { unimplemented!() } async fn get_latest_block(&self, _ctx: Context) -> ProtocolResult { unimplemented!() } async fn set_latest_block(&self, _ctx: Context, _block: Block) -> ProtocolResult<()> { unimplemented!() } async fn get_latest_block_header(&self, _ctx: Context) -> ProtocolResult { unimplemented!() } } #[async_trait] impl Storage for MockStorage { async fn insert_transactions( &self, _: Context, _: u64, _: Vec, ) -> ProtocolResult<()> { unimplemented!() } async fn get_transactions( &self, _: Context, _height: u64, _: &[Hash], ) -> ProtocolResult>> { unimplemented!() } async fn get_transaction_by_hash( &self, _: Context, _: &Hash, ) -> ProtocolResult> { unimplemented!() } async fn insert_receipts( &self, _: Context, _height: u64, _: Vec, ) -> ProtocolResult<()> { unimplemented!() } async fn get_receipt_by_hash(&self, _: Context, _: Hash) -> ProtocolResult> { unimplemented!() } async fn get_receipts( &self, _: Context, _height: u64, _: Vec, ) -> ProtocolResult>> { unimplemented!() } async fn update_latest_proof(&self, _: Context, _: Proof) -> ProtocolResult<()> { unimplemented!() } async fn get_latest_proof(&self, _: Context) -> ProtocolResult { unimplemented!() } } ================================================ FILE: built-in-services/util/src/types.rs ================================================ use protocol::types::{Hash, Hex}; use serde::{Deserialize, Serialize}; #[derive(Deserialize, Serialize, Clone, Debug)] pub struct KeccakPayload { pub hex_str: Hex, } #[derive(Deserialize, Serialize, Clone, Debug, Default)] pub struct KeccakResponse { pub result: Hash, } #[derive(Deserialize, Serialize, Clone, Debug)] pub struct SigVerifyPayload { pub hash: Hash, pub sig: Hex, pub pub_key: Hex, } #[derive(Deserialize, Serialize, Clone, Debug, Default)] pub struct SigVerifyResponse { pub is_ok: bool, } ================================================ FILE: byzantine/Cargo.toml ================================================ [package] name = "byzantine" version = "0.2.1" authors = ["Muta Dev "] edition = "2018" repository = "https://github.com/nervosnetwork/muta" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] common-apm = { path = "../common/apm" } common-config-parser = { path = "../common/config-parser" } common-crypto = { path = "../common/crypto" } common-logger = { path = "../common/logger" } common-merkle = { path = "../common/merkle" } protocol = { path = "../protocol", package = "muta-protocol" } core-api = { path = "../core/api" } core-storage = { path = "../core/storage" } core-mempool = { path = "../core/mempool" } core-network = { path = "../core/network" } core-consensus = { path = "../core/consensus" } overlord = "0.2" binding-macro = { path = "../binding-macro" } framework = { path = "../framework" } actix-rt = "1.0" async-trait = "0.1" derive_more = "0.99" lazy_static = "1.4" futures = "0.3" parking_lot = "0.11" serde = "1.0" serde_derive = "1.0" serde_json = "1.0" log = "0.4" clap = "2.33" bytes = "0.5" hex = "0.4" rlp = "0.4" rand = "0.7" toml = "0.5" tokio = { version = "0.2", features = ["macros", "rt-core", "rt-util", "signal", "time"] } muta-apm = "0.1.0-alpha.7" futures-timer="3.0" ================================================ FILE: byzantine/README.md ================================================ # 1. 概述 拜占庭测试是通过模拟恶意攻击行为对系统进行安全性和稳定性测试。 对于区块链这样的分布式系统,节点是通过对外发送/不发送消息来对系统施加影响的。 因此通过控制消息的发送时间、内容、数量和接收方,我们就可以模拟出任意的恶意行为。 考虑到这些控制因素理论上可以组合出无穷多的恶意行为,而我们无法穷尽所有的可能, 所以在具体实现时一方面是通过随机组合来覆盖尽可能多的可能,另一方面要方便随时增加新的测试用例。 后者会在[架构设计](#3-架构设计)部分做详细的解释。 在`muta`系统中,可以将节点发送的消息分为`主动消息`和`被动消息`。 主动消息是指节点可以在任意时刻主动发起的消息,如发送新交易,提案,投票等。 被动消息是指节点需要被其他消息触发才能发起的消息, 比如节点在收到 `pull_txs` 的时候才会发送 `push_txs`。 本项目的核心思路就是精心构造许多不同的恶意消息类型, 再与其他因素随机组合,以模拟出能够覆盖大部分场景的恶意行为。 # 2. 测试操作 首先根据测试需要,修改配置文件 `byzantine/generatoes.toml`,增加或删除某些测试用例。 ``` interval = 500 // 主动消息的触发间隔,单位 ms [[list]] req_end = "/gossip/consensus/signed_proposal" // 主动消息需忽略该项,被动消息填触发消息的 end msg_type = { RecvProposal = "InvalidHeight" } // 消息内容的类型 probability = 0.2 // 每次被触发时,生成该类型消息的概率,最大 1.0,表示 100% num_range = [1, 10] // 消息数量的取值范围,实际数量从该范围内随机生成 priority = "Normal" // 消息发送的优先级,目前只有 Normal 和 High 两个选项 ``` 测试命令 ``` // 启动三个正常节点 muta$ CONFIG=examples/config-1.toml GENESIS=examples/genesis.toml cargo run --release --example muta-chain muta$ CONFIG=examples/config-2.toml GENESIS=examples/genesis.toml cargo run --release --example muta-chain muta$ CONFIG=examples/config-3.toml GENESIS=examples/genesis.toml cargo run --release --example muta-chain // 启动一个拜占庭节点 muta$ CONFIG=examples/config-4.toml GENESIS=examples/genesis.toml cargo run --release --example byzantine_node ``` # 3. 架构设计 核心需求 1. 方便随时增加新的测试用例;2.提供未来可通过外部交互控制和组织编排恶意攻击的能力。 为了实现以上需求,将恶意消息的生成过程抽象成以下三个过程 1. 配置文件 -> 行为生成器,由 `strategy` 模块实现 ```rust pub struct BehaviorGenerator { pub req_end: Option, pub msg_type: MessageType, pub probability: f64, pub num_range: (u64, u64), pub priority: Priority, } ``` 2. 行为生成器 -> 行为,由 `commander` 模块实现 ```rust pub struct Behavior { pub msg_type: MessageType, pub msg_num: u64, pub request: Option, // 主动消息为 None, 被动消息为触发消息的内容 pub send_to: Vec, pub priority: Priority, } ``` 3. 行为 -> 消息,由 `worker` 模块实现 由于通过配置生成的消息具有很强的随机性且无法从外部进行操纵,因此若未来有更加针对性的测试需求时, 可以增加交互功能,通过交互命令指导 `commander` 模块触发特定的 `Behavior`。 ![image](./resource/structure.png) # 4. 文件列表 ``` byzantine ├── src │   ├── behaviors.rs # 定义 Behavior 和各种消息类型 │   ├── commander.rs # 根据 BehaviorGenerator 生成 Behavior │   ├── config.rs # 定义配置文件的数据结构 │   ├── default_start.rs # 启动逻辑 │   ├── invalid_types.rs # 生成各种恶意消息的方法实现 │   ├── lib.rs │   ├── message.rs # 将被动消息的触发消息传递给 commander │   ├── strategy.rs # 根据配置文件生成 BehaviorGenerator │   ├── utils.rs # 工具方法 └── └── worker.rs # 根据 Behavior 生成和发送消息 ``` ================================================ FILE: byzantine/generators.toml ================================================ interval = 500 ################# ##### NewTx ##### ################# [[list]] msg_type = { NewTxs = "Valid" } probability = 1.0 num_range = [10, 100] priority = "Normal" [[list]] msg_type = { NewTxs = "InvalidStruct" } probability = 0.01 num_range = [1, 10] priority = "Normal" [[list]] msg_type = { NewTxs = "InvalidHash" } probability = 1.0 num_range = [1, 10] priority = "Normal" [[list]] msg_type = { NewTxs = "InvalidSig" } probability = 1.0 num_range = [1, 10] priority = "Normal" [[list]] msg_type = { NewTxs = "InvalidChainID" } probability = 1.0 num_range = [1, 10] priority = "Normal" [[list]] msg_type = { NewTxs = "InvalidCyclesPrice" } probability = 1.0 num_range = [1, 10] priority = "Normal" [[list]] msg_type = { NewTxs = "InvalidCyclesLimit" } probability = 1.0 num_range = [1, 10] priority = "Normal" [[list]] msg_type = { NewTxs = "InvalidNonceOfRandLen" } probability = 1.0 num_range = [1, 10] priority = "Normal" [[list]] msg_type = { NewTxs = "InvalidNonceDup" } probability = 1.0 num_range = [1, 10] priority = "Normal" [[list]] msg_type = { NewTxs = "InvalidRequest" } probability = 1.0 num_range = [1, 10] priority = "Normal" [[list]] msg_type = { NewTxs = "InvalidTimeout" } probability = 1.0 num_range = [1, 10] priority = "Normal" [[list]] msg_type = { NewTxs = "InvalidSender" } probability = 1.0 num_range = [1, 10] priority = "Normal" ######################### ###### RecvProposal ##### ######################### [[list]] req_end = "/gossip/consensus/signed_proposal" msg_type = { RecvProposal = "InvalidHeight" } probability = 1.0 num_range = [1, 10] priority = "High" [[list]] req_end = "/gossip/consensus/signed_proposal" msg_type = { RecvProposal = "InvalidHash" } probability = 1.0 num_range = [1, 10] priority = "High" [[list]] req_end = "/gossip/consensus/signed_proposal" msg_type = { RecvProposal = "NotExistTxs" } probability = 1.0 num_range = [1, 1000] priority = "High" [[list]] req_end = "/gossip/consensus/signed_proposal" msg_type = { RecvProposal = "InvalidStruct" } probability = 1.0 num_range = [1, 2] priority = "High" ########################## ####### SendProposal ##### ########################## [[list]] msg_type = { SendProposal = "InvalidStruct" } probability = 1.0 num_range = [1, 10] priority = "High" [[list]] msg_type = { SendProposal = "InvalidChainId" } probability = 1.0 num_range = [1, 10] priority = "High" [[list]] msg_type = { SendProposal = "InvalidPrevHash" } probability = 1.0 num_range = [1, 10] priority = "High" [[list]] msg_type = { SendProposal = "InvalidHeight" } probability = 1.0 num_range = [1, 10] priority = "High" [[list]] msg_type = { SendProposal = "InvalidExecHeight" } probability = 1.0 num_range = [1, 10] priority = "High" [[list]] msg_type = { SendProposal = "InvalidTimestamp" } probability = 1.0 num_range = [1, 10] priority = "High" [[list]] msg_type = { SendProposal = "InvalidOrderRoot" } probability = 1.0 num_range = [1, 10] priority = "High" [[list]] msg_type = { SendProposal = "InvalidSignedTxsHash" } probability = 1.0 num_range = [1, 10] priority = "High" [[list]] msg_type = { SendProposal = "InvalidConfirmRoot" } probability = 1.0 num_range = [1, 10] priority = "High" [[list]] msg_type = { SendProposal = "InvalidStateRoot" } probability = 1.0 num_range = [1, 10] priority = "High" [[list]] msg_type = { SendProposal = "InvalidReceiptRoot" } probability = 1.0 num_range = [1, 10] priority = "High" [[list]] msg_type = { SendProposal = "InvalidCyclesUsed" } probability = 1.0 num_range = [1, 10] priority = "High" # [[list]] msg_type = { SendProposal = "InvalidBlockProposer" } probability = 1.0 num_range = [1, 10] priority = "High" [[list]] msg_type = { SendProposal = "InvalidProof" } probability = 1.0 num_range = [1, 10] priority = "High" [[list]] msg_type = { SendProposal = "InvalidVersion" } probability = 1.0 num_range = [1, 10] priority = "High" [[list]] msg_type = { SendProposal = "InvalidValidators" } probability = 1.0 num_range = [1, 10] priority = "High" [[list]] msg_type = { SendProposal = "InvalidTxHash" } probability = 1.0 num_range = [1, 10] priority = "High" [[list]] msg_type = { SendProposal = "InvalidSig" } probability = 1.0 num_range = [1, 10] priority = "High" [[list]] msg_type = { SendProposal = "InvalidProposalHeight" } probability = 1.0 num_range = [1, 10] priority = "High" [[list]] msg_type = { SendProposal = "InvalidRound" } probability = 1.0 num_range = [1, 10] priority = "High" # ##[[list]] ##msg_type = { SendProposal = "InvalidContentStruct" } ## Cause panic ##probability = 1.0 ##num_range = [1, 10] ##priority = "High" # ##[[list]] ##msg_type = { SendProposal = "InvalidBlockHash" } ## Cause panic ##probability = 1.0 ##num_range = [1, 10] ##priority = "High" # [[list]] msg_type = { SendProposal = "InvalidLock" } probability = 1.0 num_range = [1, 10] priority = "High" [[list]] msg_type = { SendProposal = "InvalidProposalProposer" } probability = 1.0 num_range = [1, 10] priority = "High" #################### ##### SendVote ##### #################### [[list]] msg_type = { SendVote = "InvalidStruct" } probability = 1.0 num_range = [1, 10] priority = "High" [[list]] msg_type = { SendVote = "InvalidHeight" } probability = 1.0 num_range = [1, 10] priority = "High" [[list]] msg_type = { SendVote = "InvalidRound" } probability = 1.0 num_range = [1, 10] priority = "High" [[list]] msg_type = { SendVote = "InvalidBlockHash" } probability = 1.0 num_range = [1, 10] priority = "High" [[list]] msg_type = { SendVote = "InvalidSig" } probability = 1.0 num_range = [1, 10] priority = "High" [[list]] msg_type = { SendVote = "InvalidVoter" } probability = 1.0 num_range = [1, 10] priority = "High" ################## ##### SendQC ##### ################## [[list]] msg_type = { SendQC = "InvalidStruct" } probability = 1.0 num_range = [1, 10] priority = "High" [[list]] msg_type = { SendQC = "InvalidHeight" } probability = 1.0 num_range = [1, 10] priority = "High" [[list]] msg_type = { SendQC = "InvalidRound" } probability = 1.0 num_range = [1, 10] priority = "High" [[list]] msg_type = { SendQC = "InvalidBlockHash" } probability = 1.0 num_range = [1, 10] priority = "High" [[list]] msg_type = { SendQC = "InvalidSig" } probability = 1.0 num_range = [1, 10] priority = "High" [[list]] msg_type = { SendQC = "InvalidLeader" } probability = 1.0 num_range = [1, 10] priority = "High" ##################### ##### SendChoke ##### ##################### [[list]] msg_type = { SendChoke = "InvalidStruct" } probability = 1.0 num_range = [1, 10] priority = "High" [[list]] msg_type = { SendChoke = "InvalidHeight" } probability = 1.0 num_range = [1, 10] priority = "High" [[list]] msg_type = { SendChoke = "InvalidRound" } probability = 1.0 num_range = [1, 10] priority = "High" ##[[list]] ##msg_type = { SendChoke = "InvalidFrom" } ## Break liveness ##probability = 1.0 ##num_range = [1, 10] ##priority = "High" # [[list]] msg_type = { SendChoke = "InvalidSig" } probability = 1.0 num_range = [1, 10] priority = "High" [[list]] msg_type = { SendChoke = "InvalidAddress" } probability = 1.0 num_range = [1, 10] priority = "High" ##################### #### SendHeight ##### ##################### [[list]] msg_type = "SendHeight" probability = 1.0 num_range = [1, 10] priority = "High" ##################### #### PullTxs ##### ##################### [[list]] req_end = "/rpc_call/mempool/pull_txs" msg_type = { PullTxs = "InvalidStruct" } probability = 1.0 num_range = [1, 10] priority = "High" [[list]] req_end = "/rpc_call/mempool/pull_txs" msg_type = { PullTxs = "InvalidHash" } probability = 1.0 num_range = [1, 10] priority = "High" [[list]] req_end = "/rpc_call/mempool/pull_txs" msg_type = { PullTxs = "InvalidSig" } probability = 1.0 num_range = [1, 10] priority = "High" [[list]] req_end = "/rpc_call/mempool/pull_txs" msg_type = { PullTxs = "InvalidChainID" } probability = 1.0 num_range = [1, 10] priority = "High" [[list]] req_end = "/rpc_call/mempool/pull_txs" msg_type = { PullTxs = "InvalidCyclesPrice" } probability = 1.0 num_range = [1, 10] priority = "High" [[list]] req_end = "/rpc_call/mempool/pull_txs" msg_type = { PullTxs = "InvalidCyclesLimit" } probability = 1.0 num_range = [1, 10] priority = "High" [[list]] req_end = "/rpc_call/mempool/pull_txs" msg_type = { PullTxs = "InvalidNonceOfRandLen" } probability = 1.0 num_range = [1, 10] priority = "High" [[list]] req_end = "/rpc_call/mempool/pull_txs" msg_type = { PullTxs = "InvalidRequest" } probability = 1.0 num_range = [1, 10] priority = "High" [[list]] req_end = "/rpc_call/mempool/pull_txs" msg_type = { PullTxs = "InvalidTimeout" } probability = 1.0 num_range = [1, 10] priority = "High" [[list]] req_end = "/rpc_call/mempool/pull_txs" msg_type = { PullTxs = "InvalidSender" } probability = 1.0 num_range = [1, 10] priority = "High" ================================================ FILE: byzantine/src/behaviors.rs ================================================ use bytes::Bytes; use derive_more::Constructor; use serde_derive::Deserialize; use core_consensus::message::{ Choke, Proposal, Vote, BROADCAST_HEIGHT, END_GOSSIP_AGGREGATED_VOTE, END_GOSSIP_SIGNED_CHOKE, END_GOSSIP_SIGNED_PROPOSAL, END_GOSSIP_SIGNED_VOTE, QC, }; use core_mempool::{MsgNewTxs, MsgPullTxs, END_GOSSIP_NEW_TXS, RPC_PULL_TXS}; use protocol::traits::Priority; #[derive(Constructor, Clone, Debug)] pub struct Behavior { pub msg_type: MessageType, pub msg_num: u64, pub request: Option, pub send_to: Vec, pub priority: Priority, } #[allow(dead_code)] #[derive(Clone, Debug)] pub enum Request { NewTx(MsgNewTxs), PullTxs(MsgPullTxs), RecvProposal(Proposal), RecvVote(Vote), RecvQC(QC), RecvChoke(Choke), RecvHeight(u64), } impl Request { pub fn to_end(&self) -> &str { match self { Request::NewTx(_) => END_GOSSIP_NEW_TXS, Request::PullTxs(_) => RPC_PULL_TXS, Request::RecvProposal(_) => END_GOSSIP_SIGNED_PROPOSAL, Request::RecvVote(_) => END_GOSSIP_SIGNED_VOTE, Request::RecvQC(_) => END_GOSSIP_AGGREGATED_VOTE, Request::RecvChoke(_) => END_GOSSIP_SIGNED_CHOKE, Request::RecvHeight(_) => BROADCAST_HEIGHT, } } } #[allow(dead_code)] #[derive(Clone, Debug, Deserialize)] pub enum MessageType { NewTxs(NewTx), SendProposal(NewProposal), RecvProposal(PullTxs), SendVote(NewVote), RecvVote, SendQC(NewQC), RecvQC, SendChoke(NewChoke), RecvChoke, SendHeight, RecvHeight, PullTxs(NewTx), } #[derive(Clone, Debug, Deserialize)] pub enum NewTx { InvalidStruct, InvalidHash, InvalidSig, InvalidChainID, InvalidCyclesPrice, InvalidCyclesLimit, InvalidNonceOfRandLen, InvalidNonceDup, InvalidRequest, InvalidTimeout, InvalidSender, Valid, } #[derive(Clone, Debug, Deserialize)] pub enum PullTxs { Valid, InvalidStruct, InvalidHeight, InvalidHash, NotExistTxs, } #[derive(Clone, Debug, Deserialize)] pub enum NewProposal { Valid, InvalidStruct, InvalidChainId, InvalidPrevHash, InvalidHeight, InvalidExecHeight, InvalidTimestamp, InvalidOrderRoot, InvalidSignedTxsHash, InvalidConfirmRoot, InvalidStateRoot, InvalidReceiptRoot, InvalidCyclesUsed, InvalidBlockProposer, InvalidProof, InvalidVersion, InvalidValidators, InvalidTxHash, InvalidSig, InvalidProposalHeight, InvalidRound, InvalidContentStruct, InvalidBlockHash, InvalidLock, InvalidProposalProposer, } #[derive(Clone, Debug, Deserialize)] pub enum NewVote { InvalidStruct, InvalidHeight, InvalidRound, InvalidBlockHash, InvalidSig, InvalidVoter, } #[derive(Clone, Debug, Deserialize)] pub enum NewQC { InvalidStruct, InvalidHeight, InvalidRound, InvalidBlockHash, InvalidSig, InvalidLeader, } #[derive(Clone, Debug, Deserialize)] pub enum NewChoke { InvalidStruct, InvalidHeight, InvalidRound, InvalidFrom, InvalidSig, InvalidAddress, } #[derive(Clone, Debug, Deserialize)] pub enum SyncPullBlock { Valid, } ================================================ FILE: byzantine/src/commander.rs ================================================ use bytes::Bytes; use futures::{ channel::mpsc::{UnboundedReceiver, UnboundedSender}, stream::StreamExt, }; use tokio::time::{self, Duration}; use core_consensus::message::{ BROADCAST_HEIGHT, END_GOSSIP_AGGREGATED_VOTE, END_GOSSIP_SIGNED_CHOKE, END_GOSSIP_SIGNED_PROPOSAL, END_GOSSIP_SIGNED_VOTE, }; use protocol::traits::{Context, Priority}; use crate::behaviors::{Behavior, MessageType, PullTxs, Request}; use crate::config::Generators; use crate::strategy::{BehaviorGenerator, DefaultStrategy, Strategy}; pub struct Commander { generators: Generators, pub_key_list: Vec, to_worker: UnboundedSender<(Context, Vec)>, from_network: UnboundedReceiver<(Context, Request)>, } impl Commander { pub fn new( generators: Generators, pub_key_list: Vec, to_worker: UnboundedSender<(Context, Vec)>, from_network: UnboundedReceiver<(Context, Request)>, ) -> Self { Commander { generators, pub_key_list, to_worker, from_network, } } pub async fn run(mut self) { let mut list = self.generators.list.clone(); add_primitive_generator(&mut list); let strategy = DefaultStrategy::new(self.pub_key_list.clone(), list); let interval = self.generators.interval; let mut cnt = 0; loop { let mut delay = time::delay_for(Duration::from_millis(interval)); tokio::select! { _ = &mut delay => { let behaviors = strategy.get_behaviors(None); cnt += behaviors.len(); println!("commander is working, accumulative gen {} behaviors", cnt); let _ = self.to_worker.unbounded_send((Context::default(), behaviors)); } Some((ctx, request)) = self.from_network.next() => { let behaviors = strategy.get_behaviors(Some(request)); cnt += behaviors.len(); println!("commander receive message from network, accumulative gen {} behaviors", cnt); let _ = self.to_worker.unbounded_send((ctx, behaviors)); } } } } } fn add_primitive_generator(list: &mut Vec) { let valid_recv_proposal_generator = BehaviorGenerator { req_end: Some(END_GOSSIP_SIGNED_PROPOSAL.to_string()), msg_type: MessageType::RecvProposal(PullTxs::Valid), probability: 1.0, num_range: (1, 2), priority: Priority::High, }; list.push(valid_recv_proposal_generator); let valid_recv_vote_generator = BehaviorGenerator { req_end: Some(END_GOSSIP_SIGNED_VOTE.to_string()), msg_type: MessageType::RecvVote, probability: 1.0, num_range: (1, 2), priority: Priority::High, }; list.push(valid_recv_vote_generator); let valid_recv_qc_generator = BehaviorGenerator { req_end: Some(END_GOSSIP_AGGREGATED_VOTE.to_string()), msg_type: MessageType::RecvQC, probability: 1.0, num_range: (1, 2), priority: Priority::High, }; list.push(valid_recv_qc_generator); let valid_recv_choke_generator = BehaviorGenerator { req_end: Some(END_GOSSIP_SIGNED_CHOKE.to_string()), msg_type: MessageType::RecvChoke, probability: 1.0, num_range: (1, 2), priority: Priority::High, }; list.push(valid_recv_choke_generator); let valid_recv_height_generator = BehaviorGenerator { req_end: Some(BROADCAST_HEIGHT.to_string()), msg_type: MessageType::RecvHeight, probability: 1.0, num_range: (1, 2), priority: Priority::High, }; list.push(valid_recv_height_generator); } ================================================ FILE: byzantine/src/config.rs ================================================ use std::collections::HashMap; use std::net::SocketAddr; use std::path::PathBuf; use serde_derive::Deserialize; use core_mempool::{DEFAULT_BROADCAST_TXS_INTERVAL, DEFAULT_BROADCAST_TXS_SIZE}; use protocol::types::Hex; use crate::strategy::BehaviorGenerator; #[derive(Debug, Deserialize)] pub struct ConfigGraphQL { pub listening_address: SocketAddr, pub graphql_uri: String, pub graphiql_uri: String, #[serde(default)] pub workers: usize, #[serde(default)] pub maxconn: usize, #[serde(default)] pub max_payload_size: usize, pub tls: Option, } #[derive(Debug, Deserialize)] pub struct ConfigGraphQLTLS { pub private_key_file_path: PathBuf, pub certificate_chain_file_path: PathBuf, } #[derive(Debug, Deserialize)] pub struct ConfigNetwork { pub bootstraps: Option>, pub allowlist: Option>, pub allowlist_only: Option, pub trust_interval_duration: Option, pub trust_max_history_duration: Option, pub fatal_ban_duration: Option, pub soft_ban_duration: Option, pub max_connected_peers: Option, pub same_ip_conn_limit: Option, pub inbound_conn_limit: Option, pub listening_address: SocketAddr, pub rpc_timeout: Option, pub selfcheck_interval: Option, pub send_buffer_size: Option, pub write_timeout: Option, pub recv_buffer_size: Option, pub max_frame_length: Option, pub max_wait_streams: Option, pub ping_interval: Option, } #[derive(Debug, Deserialize)] pub struct ConfigNetworkBootstrap { pub peer_id: String, pub address: String, } #[derive(Debug, Deserialize)] pub struct ConfigConsensus { pub sync_txs_chunk_size: usize, } impl Default for ConfigConsensus { fn default() -> Self { Self { sync_txs_chunk_size: 5000, } } } fn default_broadcast_txs_size() -> usize { DEFAULT_BROADCAST_TXS_SIZE } fn default_broadcast_txs_interval() -> u64 { DEFAULT_BROADCAST_TXS_INTERVAL } #[derive(Debug, Deserialize)] pub struct ConfigMempool { pub pool_size: u64, #[serde(default = "default_broadcast_txs_size")] pub broadcast_txs_size: usize, #[serde(default = "default_broadcast_txs_interval")] pub broadcast_txs_interval: u64, } #[derive(Debug, Deserialize)] pub struct ConfigExecutor { pub light: bool, pub triedb_cache_size: usize, } #[derive(Debug, Deserialize)] pub struct ConfigRocksDB { pub max_open_files: i32, } impl Default for ConfigRocksDB { fn default() -> Self { Self { max_open_files: 64 } } } #[derive(Debug, Deserialize)] pub struct ConfigLogger { pub filter: String, pub log_to_console: bool, pub console_show_file_and_line: bool, pub log_to_file: bool, pub metrics: bool, pub log_path: PathBuf, #[serde(default)] pub modules_level: HashMap, } impl Default for ConfigLogger { fn default() -> Self { Self { filter: "info".into(), log_to_console: true, console_show_file_and_line: false, log_to_file: true, metrics: true, log_path: "logs/".into(), modules_level: HashMap::new(), } } } #[derive(Debug, Deserialize)] pub struct ConfigAPM { pub service_name: String, pub tracing_address: SocketAddr, pub tracing_batch_size: Option, } #[derive(Debug, Deserialize)] pub struct Config { // crypto pub privkey: Hex, // db config pub data_path: PathBuf, pub graphql: ConfigGraphQL, pub network: ConfigNetwork, pub mempool: ConfigMempool, pub executor: ConfigExecutor, #[serde(default)] pub consensus: ConfigConsensus, #[serde(default)] pub logger: ConfigLogger, #[serde(default)] pub rocksdb: ConfigRocksDB, pub apm: Option, } impl Config { pub fn data_path_for_state(&self) -> PathBuf { let mut path_state = self.data_path.clone(); path_state.push("rocksdb"); path_state.push("state_data"); path_state } pub fn data_path_for_block(&self) -> PathBuf { let mut path_state = self.data_path.clone(); path_state.push("rocksdb"); path_state.push("block_data"); path_state } pub fn data_path_for_txs_wal(&self) -> PathBuf { let mut path_state = self.data_path.clone(); path_state.push("txs_wal"); path_state } } #[derive(Debug, Deserialize)] pub struct Generators { pub interval: u64, // ms pub list: Vec, } ================================================ FILE: byzantine/src/default_start.rs ================================================ use std::collections::HashMap; use std::convert::TryFrom; use std::sync::Arc; use bytes::Bytes; use common_crypto::{ BlsCommonReference, BlsPrivateKey, BlsPublicKey, PublicKey, Secp256k1PrivateKey, ToPublicKey, UncompressedPublicKey, }; use futures::channel::mpsc::unbounded; use futures::future; #[cfg(unix)] use tokio::signal::unix::{self as os_impl}; use core_consensus::message::{ BROADCAST_HEIGHT, END_GOSSIP_AGGREGATED_VOTE, END_GOSSIP_SIGNED_CHOKE, END_GOSSIP_SIGNED_PROPOSAL, END_GOSSIP_SIGNED_VOTE, }; use core_consensus::util::OverlordCrypto; use core_mempool::{MsgPushTxs, END_GOSSIP_NEW_TXS, RPC_PULL_TXS, RPC_RESP_PULL_TXS}; use core_network::{NetworkConfig, NetworkService, PeerId, PeerIdExt}; use protocol::traits::{Context, Network}; use protocol::types::{Address, Genesis, Metadata, Validator}; use protocol::ProtocolResult; use crate::commander::Commander; use crate::config::{Config, Generators}; use crate::message::{ ChokeMessageHandler, NewTxsHandler, ProposalMessageHandler, PullTxsHandler, QCMessageHandler, RemoteHeightMessageHandler, VoteMessageHandler, }; use crate::worker::Worker; pub async fn start(config: Config, genesis: Genesis, generators: Generators) -> ProtocolResult<()> { log::info!("byzantine node starts"); // Init network let network_config = NetworkConfig::new() .max_connections(config.network.max_connected_peers)? .same_ip_conn_limit(config.network.same_ip_conn_limit) .inbound_conn_limit(config.network.inbound_conn_limit)? .allowlist_only(config.network.allowlist_only) .peer_trust_metric( config.network.trust_interval_duration, config.network.trust_max_history_duration, )? .peer_soft_ban(config.network.soft_ban_duration) .peer_fatal_ban(config.network.fatal_ban_duration) .rpc_timeout(config.network.rpc_timeout) .ping_interval(config.network.ping_interval) .selfcheck_interval(config.network.selfcheck_interval) .max_wait_streams(config.network.max_wait_streams) .max_frame_length(config.network.max_frame_length) .send_buffer_size(config.network.send_buffer_size) .write_timeout(config.network.write_timeout) .recv_buffer_size(config.network.recv_buffer_size); let network_privkey = config.privkey.as_string_trim0x(); let mut bootstrap_pairs = vec![]; if let Some(bootstrap) = &config.network.bootstraps { for bootstrap in bootstrap.iter() { bootstrap_pairs.push((bootstrap.peer_id.to_owned(), bootstrap.address.to_owned())); } } let allowlist = config.network.allowlist.clone().unwrap_or_default(); let network_config = network_config .bootstraps(bootstrap_pairs)? .allowlist(allowlist)? .secio_keypair(network_privkey)?; let mut network_service = NetworkService::new(network_config); network_service .listen(config.network.listening_address) .await?; // self private key let hex_privkey = hex::decode(config.privkey.as_string_trim0x()).expect("decode privkey error!"); let my_privkey = Secp256k1PrivateKey::try_from(hex_privkey.as_ref()).expect("get privkey failed!"); let my_pubkey = my_privkey.pub_key(); let my_address = Address::from_pubkey_bytes(my_pubkey.to_uncompressed_bytes())?; // get pub_key_list let metadata: Metadata = serde_json::from_str(genesis.get_payload("metadata")).expect("Decode metadata failed!"); let pub_key_list: Vec = metadata .verifier_list .iter() .map(|v| v.pub_key.decode()) .filter(|addr| addr != &my_pubkey.to_bytes()) .collect(); let validators: Vec = metadata .verifier_list .iter() .map(|v| Validator { pub_key: v.pub_key.decode(), propose_weight: v.propose_weight, vote_weight: v.vote_weight, }) .collect(); assert_ne!( pub_key_list.len(), 0, "It's meaningless to test a system contains only one node which is a byzantine node" ); // get crypto let mut bls_pub_keys = HashMap::new(); for validator_extend in metadata.verifier_list.iter() { let address = validator_extend.pub_key.decode(); let hex_pubkey = hex::decode(validator_extend.bls_pub_key.as_string_trim0x()) .expect("decode pubkey failed"); let pub_key = BlsPublicKey::try_from(hex_pubkey.as_ref()).expect("try into BlsPublicKey failed"); bls_pub_keys.insert(address, pub_key); } let mut priv_key = Vec::new(); priv_key.extend_from_slice(&[0u8; 16]); let mut tmp = hex::decode(config.privkey.as_string_trim0x()).unwrap(); priv_key.append(&mut tmp); let bls_priv_key = BlsPrivateKey::try_from(priv_key.as_ref()).expect("try into BlsPrivateKey failed"); let hex_common_ref = hex::decode(metadata.common_ref.as_string_trim0x()).expect("decode common ref failed"); let common_ref: BlsCommonReference = std::str::from_utf8(hex_common_ref.as_ref()) .expect("transfer common_ref failed") .into(); let crypto = OverlordCrypto::new(bls_priv_key, bls_pub_keys, common_ref); let (network_tx, network_rx) = unbounded(); let (worker_tx, worker_rx) = unbounded(); // set chain id in network network_service.set_chain_id(metadata.chain_id.clone()); let peer_ids = metadata .verifier_list .iter() .map(|v| PeerId::from_pubkey_bytes(v.pub_key.decode()).map(PeerIdExt::into_bytes_ext)) .collect::, _>>()?; network_service .handle() .tag_consensus(Context::new(), peer_ids)?; // register broadcast new transaction network_service .register_endpoint_handler(END_GOSSIP_NEW_TXS, NewTxsHandler::new(network_tx.clone()))?; // register pull txs from other node network_service .register_endpoint_handler(RPC_PULL_TXS, PullTxsHandler::new(network_tx.clone()))?; network_service.register_rpc_response::(RPC_RESP_PULL_TXS)?; network_service.register_endpoint_handler( END_GOSSIP_SIGNED_PROPOSAL, ProposalMessageHandler::new(network_tx.clone()), )?; network_service.register_endpoint_handler( END_GOSSIP_SIGNED_VOTE, VoteMessageHandler::new(network_tx.clone()), )?; network_service.register_endpoint_handler( END_GOSSIP_AGGREGATED_VOTE, QCMessageHandler::new(network_tx.clone()), )?; network_service.register_endpoint_handler( END_GOSSIP_SIGNED_CHOKE, ChokeMessageHandler::new(network_tx.clone()), )?; network_service.register_endpoint_handler( BROADCAST_HEIGHT, RemoteHeightMessageHandler::new(network_tx.clone()), )?; let commander = Commander::new(generators, pub_key_list, worker_tx, network_rx); let worker = Worker::new( my_address, my_pubkey.to_bytes(), metadata, validators, crypto, Arc::new(network_service.handle()), worker_rx, ); // Run network tokio::spawn(network_service); // Run worker tokio::spawn(async move { worker.run().await; }); // run commander let (abortable_demon, abort_handle) = future::abortable(commander.run()); let exec_handler = tokio::task::spawn_local(abortable_demon); let ctrl_c_handler = tokio::task::spawn_local(async { #[cfg(windows)] let _ = tokio::signal::ctrl_c().await; #[cfg(unix)] { let mut sigtun_int = os_impl::signal(os_impl::SignalKind::interrupt()).unwrap(); let mut sigtun_term = os_impl::signal(os_impl::SignalKind::terminate()).unwrap(); tokio::select! { _ = sigtun_int.recv() => {} _ = sigtun_term.recv() => {} }; } }); tokio::select! { _ = exec_handler =>{log::error!("exec_daemon is down, quit.")}, _ = ctrl_c_handler =>{log::info!("ctrl + c is pressed, quit.")}, }; abort_handle.abort(); Ok(()) } ================================================ FILE: byzantine/src/invalid_types.rs ================================================ use std::error::Error; use std::sync::Arc; use bytes::Bytes; use derive_more::Constructor; use overlord::types::SignedProposal; use overlord::{Codec, Crypto}; use rlp::Encodable; use common_crypto::Secp256k1PrivateKey; use core_consensus::util::OverlordCrypto; use core_mempool::MsgPullTxs; use protocol::traits::MessageCodec; use protocol::types::{Address, Hash, Metadata, SignedTransaction, Validator}; use protocol::ProtocolResult; use crate::utils::{ gen_invalid_address, gen_invalid_aggregate_sig, gen_invalid_chain_id, gen_invalid_content_struct_proposal, gen_invalid_from, gen_invalid_hash, gen_invalid_lock, gen_invalid_proof, gen_invalid_request, gen_invalid_sig, gen_invalid_validators, gen_positive_range, gen_random_bytes, gen_range, gen_signed_proposal_from_header, gen_signed_tx, gen_valid_block, gen_valid_block_header, gen_valid_choke, gen_valid_hash, gen_valid_proposal, gen_valid_qc, gen_valid_raw_tx, gen_valid_signed_choke, gen_valid_signed_proposal, gen_valid_signed_tx, gen_valid_signed_vote, gen_valid_vote, }; use crate::worker::State; #[derive(Constructor, Clone, Debug, Eq, PartialEq)] pub struct InvalidStruct { pub inner: Bytes, } impl InvalidStruct { pub fn gen(len: usize) -> Self { InvalidStruct { inner: gen_random_bytes(len), } } } impl MessageCodec for InvalidStruct { fn encode(&mut self) -> ProtocolResult { Ok(self.inner.clone()) } fn decode(bytes: Bytes) -> ProtocolResult { Ok(InvalidStruct::new(bytes)) } } impl Codec for InvalidStruct { fn encode(&self) -> Result> { let bytes = self.inner.clone(); Ok(bytes) } fn decode(data: Bytes) -> Result> { Ok(InvalidStruct::new(data)) } } //################################ //########## NewChoke ########## //########## ###################### pub fn gen_invalid_struct_new_choke( _state: &State, _crypto: &Arc, _my_pub_key: &Bytes, ) -> Vec { gen_random_bytes(100).to_vec() } pub fn gen_invalid_height_new_choke( state: &State, crypto: &Arc, my_pub_key: &Bytes, ) -> Vec { let mut choke = gen_valid_choke(state, my_pub_key); choke.height = gen_positive_range(state.height, 20); let signed_choke = gen_valid_signed_choke(choke, crypto, my_pub_key); signed_choke.rlp_bytes() } pub fn gen_invalid_round_new_choke( state: &State, crypto: &Arc, my_pub_key: &Bytes, ) -> Vec { let mut choke = gen_valid_choke(state, my_pub_key); choke.round = gen_positive_range(state.round, 20); let signed_choke = gen_valid_signed_choke(choke, crypto, my_pub_key); signed_choke.rlp_bytes() } pub fn gen_invalid_from_new_vote( state: &State, crypto: &Arc, my_pub_key: &Bytes, ) -> Vec { let mut choke = gen_valid_choke(state, my_pub_key); choke.from = gen_invalid_from(); let signed_choke = gen_valid_signed_choke(choke, crypto, my_pub_key); signed_choke.rlp_bytes() } pub fn gen_invalid_sig_new_choke( state: &State, crypto: &Arc, my_pub_key: &Bytes, ) -> Vec { let choke = gen_valid_choke(state, my_pub_key); let mut signed_choke = gen_valid_signed_choke(choke, crypto, my_pub_key); signed_choke.signature = gen_invalid_sig(); signed_choke.rlp_bytes() } pub fn gen_invalid_address_new_choke( state: &State, crypto: &Arc, my_pub_key: &Bytes, ) -> Vec { let choke = gen_valid_choke(state, my_pub_key); let mut signed_choke = gen_valid_signed_choke(choke, crypto, my_pub_key); signed_choke.address = gen_invalid_address().as_bytes(); signed_choke.rlp_bytes() } //############################# //########## NewQC ########## //########## ################### pub fn gen_invalid_struct_new_qc(_state: &State, _my_pub_key: &Bytes) -> Vec { gen_random_bytes(100).to_vec() } pub fn gen_invalid_height_new_qc(state: &State, my_pub_key: &Bytes) -> Vec { let mut qc = gen_valid_qc(state, my_pub_key); qc.height = gen_positive_range(state.height, 20); qc.rlp_bytes() } pub fn gen_invalid_round_new_qc(state: &State, my_pub_key: &Bytes) -> Vec { let mut qc = gen_valid_qc(state, my_pub_key); qc.round = gen_positive_range(state.round, 20); qc.rlp_bytes() } pub fn gen_invalid_block_hash_new_qc(state: &State, my_pub_key: &Bytes) -> Vec { let mut qc = gen_valid_qc(state, my_pub_key); qc.block_hash = gen_invalid_hash().as_bytes(); qc.rlp_bytes() } pub fn gen_invalid_sig_new_qc(state: &State, my_pub_key: &Bytes) -> Vec { let mut qc = gen_valid_qc(state, my_pub_key); qc.signature = gen_invalid_aggregate_sig(); qc.rlp_bytes() } pub fn gen_invalid_leader_new_qc(state: &State, my_pub_key: &Bytes) -> Vec { let mut qc = gen_valid_qc(state, my_pub_key); qc.leader = gen_invalid_address().as_bytes(); qc.rlp_bytes() } //############################### //########## NewVote ########## //########## ##################### pub fn gen_invalid_struct_new_vote( _state: &State, _crypto: &Arc, _my_pub_key: &Bytes, ) -> Vec { gen_random_bytes(100).to_vec() } pub fn gen_invalid_height_new_vote( state: &State, crypto: &Arc, my_pub_key: &Bytes, ) -> Vec { let mut vote = gen_valid_vote(state); vote.height = gen_positive_range(state.height, 20); let signed_vote = gen_valid_signed_vote(vote, crypto, my_pub_key); signed_vote.rlp_bytes() } pub fn gen_invalid_round_new_vote( state: &State, crypto: &Arc, my_pub_key: &Bytes, ) -> Vec { let mut vote = gen_valid_vote(state); vote.round = gen_positive_range(state.round, 20); let signed_vote = gen_valid_signed_vote(vote, crypto, my_pub_key); signed_vote.rlp_bytes() } pub fn gen_invalid_block_hash_new_vote( state: &State, crypto: &Arc, my_pub_key: &Bytes, ) -> Vec { let mut vote = gen_valid_vote(state); vote.block_hash = gen_invalid_hash().as_bytes(); let signed_vote = gen_valid_signed_vote(vote, crypto, my_pub_key); signed_vote.rlp_bytes() } pub fn gen_invalid_sig_new_vote( state: &State, crypto: &Arc, my_pub_key: &Bytes, ) -> Vec { let vote = gen_valid_vote(state); let mut signed_vote = gen_valid_signed_vote(vote, crypto, my_pub_key); signed_vote.signature = gen_invalid_sig(); signed_vote.rlp_bytes() } pub fn gen_invalid_voter_new_vote( state: &State, crypto: &Arc, my_pub_key: &Bytes, ) -> Vec { let vote = gen_valid_vote(state); let mut signed_vote = gen_valid_signed_vote(vote, crypto, my_pub_key); signed_vote.voter = gen_random_bytes(100); signed_vote.rlp_bytes() } //################################### //########## NewProposal ########## //########## ######################### pub fn gen_valid_new_proposal( state: &State, metadata: &Metadata, crypto: &Arc, my_address: &Address, my_pub_key: &Bytes, validators: &[Validator], ) -> Vec { let order_tx_hashes: Vec = (0..gen_range(0, 1000)).map(|_| gen_valid_hash()).collect(); let propose_tx_hashes: Vec = (0..gen_range(0, 1000)).map(|_| gen_valid_hash()).collect(); let header = gen_valid_block_header( state, metadata, my_address, validators, order_tx_hashes.clone(), ); let block = gen_valid_block(header, order_tx_hashes); let proposal = gen_valid_proposal(block, state, my_pub_key, propose_tx_hashes); let signed_proposal = gen_valid_signed_proposal(proposal, crypto); signed_proposal.rlp_bytes() } pub fn gen_invalid_prop_proposer_new_proposal( state: &State, metadata: &Metadata, crypto: &Arc, my_address: &Address, my_pub_key: &Bytes, validators: &[Validator], ) -> Vec { let header = gen_valid_block_header(state, metadata, my_address, validators, vec![]); let block = gen_valid_block(header, vec![]); let mut proposal = gen_valid_proposal(block, state, my_pub_key, vec![]); proposal.proposer = gen_invalid_address().as_bytes(); let signed_proposal = gen_valid_signed_proposal(proposal, crypto); signed_proposal.rlp_bytes() } pub fn gen_invalid_lock_new_proposal( state: &State, metadata: &Metadata, crypto: &Arc, my_address: &Address, my_pub_key: &Bytes, validators: &[Validator], ) -> Vec { let header = gen_valid_block_header(state, metadata, my_address, validators, vec![]); let block = gen_valid_block(header, vec![]); let mut proposal = gen_valid_proposal(block, state, my_pub_key, vec![]); proposal.lock = Some(gen_invalid_lock()); let signed_proposal = gen_valid_signed_proposal(proposal, crypto); signed_proposal.rlp_bytes() } pub fn gen_invalid_block_hash_new_proposal( state: &State, metadata: &Metadata, crypto: &Arc, my_address: &Address, my_pub_key: &Bytes, validators: &[Validator], ) -> Vec { let header = gen_valid_block_header(state, metadata, my_address, validators, vec![]); let block = gen_valid_block(header, vec![]); let mut proposal = gen_valid_proposal(block, state, my_pub_key, vec![]); proposal.block_hash = gen_invalid_hash().as_bytes(); let signed_proposal = gen_valid_signed_proposal(proposal, crypto); signed_proposal.rlp_bytes() } pub fn gen_invalid_content_struct_new_proposal( state: &State, _metadata: &Metadata, crypto: &Arc, _my_address: &Address, my_pub_key: &Bytes, _validators: &[Validator], ) -> Vec { let proposal = gen_invalid_content_struct_proposal(state, my_pub_key); let signature = crypto .sign(crypto.hash(proposal.content.inner.clone())) .expect("sign proposal failed"); let signed_proposal = SignedProposal { signature, proposal, }; signed_proposal.rlp_bytes() } pub fn gen_invalid_round_new_proposal( state: &State, metadata: &Metadata, crypto: &Arc, my_address: &Address, my_pub_key: &Bytes, validators: &[Validator], ) -> Vec { let header = gen_valid_block_header(state, metadata, my_address, validators, vec![]); let block = gen_valid_block(header, vec![]); let mut proposal = gen_valid_proposal(block, state, my_pub_key, vec![]); proposal.round = gen_positive_range(state.round, 20); let signed_proposal = gen_valid_signed_proposal(proposal, crypto); signed_proposal.rlp_bytes() } pub fn gen_invalid_prop_height_new_proposal( state: &State, metadata: &Metadata, crypto: &Arc, my_address: &Address, my_pub_key: &Bytes, validators: &[Validator], ) -> Vec { let header = gen_valid_block_header(state, metadata, my_address, validators, vec![]); let block = gen_valid_block(header, vec![]); let mut proposal = gen_valid_proposal(block, state, my_pub_key, vec![]); proposal.height = gen_positive_range(state.height, 20); let signed_proposal = gen_valid_signed_proposal(proposal, crypto); signed_proposal.rlp_bytes() } pub fn gen_invalid_sig_new_proposal( state: &State, metadata: &Metadata, _crypto: &Arc, my_address: &Address, my_pub_key: &Bytes, validators: &[Validator], ) -> Vec { let header = gen_valid_block_header(state, metadata, my_address, validators, vec![]); let block = gen_valid_block(header, vec![]); let proposal = gen_valid_proposal(block, state, my_pub_key, vec![]); let signed_proposal = SignedProposal { proposal, signature: gen_invalid_sig(), }; signed_proposal.rlp_bytes() } pub fn gen_invalid_tx_hash_new_proposal( state: &State, metadata: &Metadata, crypto: &Arc, my_address: &Address, my_pub_key: &Bytes, validators: &[Validator], ) -> Vec { let order_tx_hashes: Vec = (0..gen_range(0, 1000)) .map(|_| gen_invalid_hash()) .collect(); let propose_tx_hashes: Vec = (0..gen_range(0, 1000)) .map(|_| gen_invalid_hash()) .collect(); let header = gen_valid_block_header( state, metadata, my_address, validators, order_tx_hashes.clone(), ); let block = gen_valid_block(header, order_tx_hashes); let proposal = gen_valid_proposal(block, state, my_pub_key, propose_tx_hashes); let signed_proposal = gen_valid_signed_proposal(proposal, crypto); signed_proposal.rlp_bytes() } pub fn gen_invalid_validators_new_proposal( state: &State, metadata: &Metadata, crypto: &Arc, my_address: &Address, my_pub_key: &Bytes, validators: &[Validator], ) -> Vec { let mut header = gen_valid_block_header(state, metadata, my_address, validators, vec![]); header.validators = gen_invalid_validators(); gen_signed_proposal_from_header(header, state, crypto, my_pub_key) } pub fn gen_invalid_version_new_proposal( state: &State, metadata: &Metadata, crypto: &Arc, my_address: &Address, my_pub_key: &Bytes, validators: &[Validator], ) -> Vec { let mut header = gen_valid_block_header(state, metadata, my_address, validators, vec![]); header.validator_version = gen_range(u64::MIN, u64::MAX); gen_signed_proposal_from_header(header, state, crypto, my_pub_key) } pub fn gen_invalid_proof_new_proposal( state: &State, metadata: &Metadata, crypto: &Arc, my_address: &Address, my_pub_key: &Bytes, validators: &[Validator], ) -> Vec { let mut header = gen_valid_block_header(state, metadata, my_address, validators, vec![]); header.proof = gen_invalid_proof(); gen_signed_proposal_from_header(header, state, crypto, my_pub_key) } pub fn gen_invalid_block_proposer_new_proposal( state: &State, metadata: &Metadata, crypto: &Arc, my_address: &Address, my_pub_key: &Bytes, validators: &[Validator], ) -> Vec { let mut header = gen_valid_block_header(state, metadata, my_address, validators, vec![]); header.proposer = gen_invalid_address(); gen_signed_proposal_from_header(header, state, crypto, my_pub_key) } pub fn gen_invalid_cycle_used_new_proposal( state: &State, metadata: &Metadata, crypto: &Arc, my_address: &Address, my_pub_key: &Bytes, validators: &[Validator], ) -> Vec { let mut header = gen_valid_block_header(state, metadata, my_address, validators, vec![]); header.cycles_used = vec![gen_range(u64::MIN, u64::MAX)]; gen_signed_proposal_from_header(header, state, crypto, my_pub_key) } pub fn gen_invalid_receipt_root_new_proposal( state: &State, metadata: &Metadata, crypto: &Arc, my_address: &Address, my_pub_key: &Bytes, validators: &[Validator], ) -> Vec { let mut header = gen_valid_block_header(state, metadata, my_address, validators, vec![]); header.receipt_root = vec![gen_invalid_hash()]; gen_signed_proposal_from_header(header, state, crypto, my_pub_key) } pub fn gen_invalid_state_root_new_proposal( state: &State, metadata: &Metadata, crypto: &Arc, my_address: &Address, my_pub_key: &Bytes, validators: &[Validator], ) -> Vec { let mut header = gen_valid_block_header(state, metadata, my_address, validators, vec![]); header.state_root = gen_invalid_hash(); gen_signed_proposal_from_header(header, state, crypto, my_pub_key) } pub fn gen_invalid_confirm_root_new_proposal( state: &State, metadata: &Metadata, crypto: &Arc, my_address: &Address, my_pub_key: &Bytes, validators: &[Validator], ) -> Vec { let mut header = gen_valid_block_header(state, metadata, my_address, validators, vec![]); header.confirm_root = vec![gen_invalid_hash()]; gen_signed_proposal_from_header(header, state, crypto, my_pub_key) } pub fn gen_invalid_signed_tx_hash_new_proposal( state: &State, metadata: &Metadata, crypto: &Arc, my_address: &Address, my_pub_key: &Bytes, validators: &[Validator], ) -> Vec { let mut header = gen_valid_block_header(state, metadata, my_address, validators, vec![]); header.order_signed_transactions_hash = gen_invalid_hash(); gen_signed_proposal_from_header(header, state, crypto, my_pub_key) } pub fn gen_invalid_order_root_new_proposal( state: &State, metadata: &Metadata, crypto: &Arc, my_address: &Address, my_pub_key: &Bytes, validators: &[Validator], ) -> Vec { let mut header = gen_valid_block_header(state, metadata, my_address, validators, vec![]); header.order_root = gen_invalid_hash(); gen_signed_proposal_from_header(header, state, crypto, my_pub_key) } pub fn gen_invalid_timestamp_new_proposal( state: &State, metadata: &Metadata, crypto: &Arc, my_address: &Address, my_pub_key: &Bytes, validators: &[Validator], ) -> Vec { let mut header = gen_valid_block_header(state, metadata, my_address, validators, vec![]); header.timestamp = gen_positive_range(state.prev_timestamp, 1_000_000); gen_signed_proposal_from_header(header, state, crypto, my_pub_key) } pub fn gen_invalid_exec_height_new_proposal( state: &State, metadata: &Metadata, crypto: &Arc, my_address: &Address, my_pub_key: &Bytes, validators: &[Validator], ) -> Vec { let mut header = gen_valid_block_header(state, metadata, my_address, validators, vec![]); header.exec_height = gen_positive_range(state.exec_height, 20); gen_signed_proposal_from_header(header, state, crypto, my_pub_key) } pub fn gen_invalid_height_new_proposal( state: &State, metadata: &Metadata, crypto: &Arc, my_address: &Address, my_pub_key: &Bytes, validators: &[Validator], ) -> Vec { let mut header = gen_valid_block_header(state, metadata, my_address, validators, vec![]); header.height = gen_positive_range(state.height, 20); gen_signed_proposal_from_header(header, state, crypto, my_pub_key) } pub fn gen_invalid_prev_hash_new_proposal( state: &State, metadata: &Metadata, crypto: &Arc, my_address: &Address, my_pub_key: &Bytes, validators: &[Validator], ) -> Vec { let mut header = gen_valid_block_header(state, metadata, my_address, validators, vec![]); header.prev_hash = gen_invalid_hash(); gen_signed_proposal_from_header(header, state, crypto, my_pub_key) } pub fn gen_invalid_chain_id_new_proposal( state: &State, metadata: &Metadata, crypto: &Arc, my_address: &Address, my_pub_key: &Bytes, validators: &[Validator], ) -> Vec { let mut header = gen_valid_block_header(state, metadata, my_address, validators, vec![]); header.chain_id = gen_invalid_chain_id(); gen_signed_proposal_from_header(header, state, crypto, my_pub_key) } pub fn gen_invalid_struct_new_proposal( _state: &State, _metadata: &Metadata, _crypto: &Arc, _my_address: &Address, _my_pub_key: &Bytes, _validators: &[Validator], ) -> Vec { gen_random_bytes(1000).to_vec() } //############################### //########## PullTxs ########## //########## ##################### pub fn gen_invalid_height_pull_txs(height: u64) -> MsgPullTxs { let tx_num = gen_positive_range(100, 300); let tx_hashes: Vec = (0..tx_num).map(|_| gen_valid_hash()).collect(); MsgPullTxs { height: Some(gen_positive_range(height, 100)), hashes: tx_hashes, } } pub fn gen_invalid_hash_pull_txs(_height: u64) -> MsgPullTxs { let tx_num = gen_positive_range(100, 300); let tx_hashes: Vec = (0..tx_num).map(|_| gen_invalid_hash()).collect(); MsgPullTxs { height: None, hashes: tx_hashes, } } pub fn gen_not_exists_txs_pull_txs(_height: u64) -> MsgPullTxs { let tx_num = gen_positive_range(100, 300); let tx_hashes: Vec = (0..tx_num).map(|_| gen_valid_hash()).collect(); MsgPullTxs { height: None, hashes: tx_hashes, } } //############################# //########## NewTx ########## //########## ################### pub fn gen_invalid_hash_signed_tx( pri_key: &Secp256k1PrivateKey, height: u64, metadata: &Metadata, ) -> SignedTransaction { let raw = gen_valid_raw_tx(pri_key, height, metadata); gen_signed_tx(raw, pri_key, Some(gen_random_bytes(100)), None) } pub fn gen_invalid_sig_signed_tx( pri_key: &Secp256k1PrivateKey, height: u64, metadata: &Metadata, ) -> SignedTransaction { let raw = gen_valid_raw_tx(pri_key, height, metadata); gen_signed_tx(raw, pri_key, None, Some(gen_random_bytes(100))) } pub fn gen_invalid_chain_id_signed_tx( pri_key: &Secp256k1PrivateKey, height: u64, metadata: &Metadata, ) -> SignedTransaction { let mut raw = gen_valid_raw_tx(pri_key, height, metadata); raw.chain_id = gen_invalid_chain_id(); gen_valid_signed_tx(raw, pri_key) } pub fn gen_invalid_cycles_price_signed_tx( pri_key: &Secp256k1PrivateKey, height: u64, metadata: &Metadata, ) -> SignedTransaction { let mut raw = gen_valid_raw_tx(pri_key, height, metadata); raw.cycles_price = gen_range(metadata.cycles_price + 1, u64::MAX); gen_valid_signed_tx(raw, pri_key) } pub fn gen_invalid_cycles_limit_signed_tx( pri_key: &Secp256k1PrivateKey, height: u64, metadata: &Metadata, ) -> SignedTransaction { let mut raw = gen_valid_raw_tx(pri_key, height, metadata); raw.cycles_limit = gen_range(metadata.cycles_limit + 1, u64::MAX); gen_valid_signed_tx(raw, pri_key) } pub fn gen_invalid_nonce_of_rand_len_signed_tx( pri_key: &Secp256k1PrivateKey, height: u64, metadata: &Metadata, ) -> SignedTransaction { let mut raw = gen_valid_raw_tx(pri_key, height, metadata); raw.nonce = gen_invalid_hash(); gen_valid_signed_tx(raw, pri_key) } pub fn gen_invalid_nonce_dup_signed_tx( pri_key: &Secp256k1PrivateKey, height: u64, metadata: &Metadata, nonce: Hash, ) -> SignedTransaction { let mut raw = gen_valid_raw_tx(pri_key, height, metadata); raw.nonce = nonce; gen_valid_signed_tx(raw, pri_key) } pub fn gen_invalid_request_signed_tx( pri_key: &Secp256k1PrivateKey, height: u64, metadata: &Metadata, ) -> SignedTransaction { let mut raw = gen_valid_raw_tx(pri_key, height, metadata); raw.request = gen_invalid_request(); gen_valid_signed_tx(raw, pri_key) } pub fn gen_invalid_timeout_signed_tx( pri_key: &Secp256k1PrivateKey, height: u64, metadata: &Metadata, ) -> SignedTransaction { let mut raw = gen_valid_raw_tx(pri_key, height, metadata); raw.timeout = gen_positive_range(height + metadata.timeout_gap, 100); gen_valid_signed_tx(raw, pri_key) } pub fn gen_invalid_sender_signed_tx( pri_key: &Secp256k1PrivateKey, height: u64, metadata: &Metadata, ) -> SignedTransaction { let mut raw = gen_valid_raw_tx(pri_key, height, metadata); raw.sender = gen_invalid_address(); gen_valid_signed_tx(raw, pri_key) } pub fn gen_valid_tx( pri_key: &Secp256k1PrivateKey, height: u64, metadata: &Metadata, ) -> SignedTransaction { let raw = gen_valid_raw_tx(pri_key, height, metadata); gen_valid_signed_tx(raw, pri_key) } ================================================ FILE: byzantine/src/lib.rs ================================================ #![allow(clippy::mutable_key_type)] pub mod config; pub mod default_start; mod behaviors; mod commander; mod invalid_types; mod message; mod strategy; mod utils; mod worker; ================================================ FILE: byzantine/src/message.rs ================================================ use async_trait::async_trait; use derive_more::Constructor; use futures::channel::mpsc::UnboundedSender; use core_consensus::message::{Choke, Proposal, Vote, QC}; use core_mempool::{MsgNewTxs, MsgPullTxs}; use protocol::traits::{Context, MessageHandler, TrustFeedback}; use crate::behaviors::Request; #[derive(Constructor)] pub struct NewTxsHandler { to_commander: UnboundedSender<(Context, Request)>, } #[async_trait] impl MessageHandler for NewTxsHandler { type Message = MsgNewTxs; async fn process(&self, ctx: Context, msg: Self::Message) -> TrustFeedback { self.to_commander .unbounded_send((ctx, Request::NewTx(msg))) .unwrap(); TrustFeedback::Neutral } } #[derive(Constructor)] pub struct PullTxsHandler { to_commander: UnboundedSender<(Context, Request)>, } #[async_trait] impl MessageHandler for PullTxsHandler { type Message = MsgPullTxs; async fn process(&self, ctx: Context, msg: Self::Message) -> TrustFeedback { self.to_commander .unbounded_send((ctx, Request::PullTxs(msg))) .unwrap(); TrustFeedback::Neutral } } #[derive(Constructor)] pub struct ProposalMessageHandler { to_commander: UnboundedSender<(Context, Request)>, } #[async_trait] impl MessageHandler for ProposalMessageHandler { type Message = Proposal; #[muta_apm::derive::tracing_span(name = "handle_proposal", kind = "consensus.message")] async fn process(&self, ctx: Context, msg: Self::Message) -> TrustFeedback { self.to_commander .unbounded_send((ctx, Request::RecvProposal(msg))) .unwrap(); TrustFeedback::Good } } #[derive(Constructor)] pub struct VoteMessageHandler { to_commander: UnboundedSender<(Context, Request)>, } #[async_trait] impl MessageHandler for VoteMessageHandler { type Message = Vote; #[muta_apm::derive::tracing_span(name = "handle_vote", kind = "consensus.message")] async fn process(&self, ctx: Context, msg: Self::Message) -> TrustFeedback { self.to_commander .unbounded_send((ctx, Request::RecvVote(msg))) .unwrap(); TrustFeedback::Good } } #[derive(Constructor)] pub struct QCMessageHandler { to_commander: UnboundedSender<(Context, Request)>, } #[async_trait] impl MessageHandler for QCMessageHandler { type Message = QC; #[muta_apm::derive::tracing_span(name = "handle_vote", kind = "consensus.message")] async fn process(&self, ctx: Context, msg: Self::Message) -> TrustFeedback { self.to_commander .unbounded_send((ctx, Request::RecvQC(msg))) .unwrap(); TrustFeedback::Good } } #[derive(Constructor)] pub struct ChokeMessageHandler { to_commander: UnboundedSender<(Context, Request)>, } #[async_trait] impl MessageHandler for ChokeMessageHandler { type Message = Choke; #[muta_apm::derive::tracing_span(name = "handle_vote", kind = "consensus.message")] async fn process(&self, ctx: Context, msg: Self::Message) -> TrustFeedback { self.to_commander .unbounded_send((ctx, Request::RecvChoke(msg))) .unwrap(); TrustFeedback::Good } } #[derive(Constructor)] pub struct RemoteHeightMessageHandler { to_commander: UnboundedSender<(Context, Request)>, } #[async_trait] impl MessageHandler for RemoteHeightMessageHandler { type Message = u64; #[muta_apm::derive::tracing_span(name = "handle_vote", kind = "consensus.message")] async fn process(&self, ctx: Context, msg: Self::Message) -> TrustFeedback { self.to_commander .unbounded_send((ctx, Request::RecvHeight(msg))) .unwrap(); TrustFeedback::Good } } ================================================ FILE: byzantine/src/strategy.rs ================================================ use bytes::Bytes; use derive_more::Constructor; use rand::seq::SliceRandom; use serde_derive::Deserialize; use protocol::traits::Priority; use crate::behaviors::{Behavior, MessageType, Request}; use crate::utils::{gen_bool, gen_range}; pub trait Strategy { fn get_behaviors(&self, request: Option) -> Vec; } #[derive(Constructor, Clone, Debug, Deserialize)] pub struct BehaviorGenerator { pub req_end: Option, pub msg_type: MessageType, pub probability: f64, pub num_range: (u64, u64), pub priority: Priority, } impl BehaviorGenerator { fn gen_behavior( &self, pub_key_list: &mut Vec, req: Option, ) -> Option { if gen_bool(self.probability) { let msg_num = gen_range(self.num_range.0, self.num_range.1); let send_to = gen_rand_pub_key_list(pub_key_list); let behavior = Behavior::new(self.msg_type.clone(), msg_num, req, send_to, self.priority); Some(behavior) } else { None } } } #[derive(Constructor, Clone, Debug)] pub struct DefaultStrategy { pub_key_list: Vec, generators: Vec, } impl Strategy for DefaultStrategy { fn get_behaviors(&self, request: Option) -> Vec { let mut pub_key_list = self.pub_key_list.to_vec(); self.generators .iter() .filter(|gen| { if request.is_none() { gen.req_end.is_none() } else { gen.req_end.is_some() && gen.req_end.as_ref().unwrap() == request.as_ref().unwrap().to_end() } }) .map(|gen| gen.gen_behavior(&mut pub_key_list, request.clone())) .filter_map(|opt| opt) .collect() } } pub fn gen_rand_pub_key_list(pub_key_list: &mut Vec) -> Vec { let mut rng = rand::thread_rng(); pub_key_list.shuffle(&mut rng); let mut new_list = pub_key_list.to_vec(); let cut_num = gen_range(0, new_list.len()); new_list.split_off(cut_num) } ================================================ FILE: byzantine/src/utils.rs ================================================ use std::convert::TryFrom; use std::sync::Arc; use std::time::{SystemTime, UNIX_EPOCH}; use bytes::Bytes; use overlord::types::{ AggregatedChoke, AggregatedSignature, AggregatedVote, Choke, PoLC, Proposal, SignedChoke, SignedProposal, SignedVote, UpdateFrom, Vote, VoteType, }; use overlord::Crypto; use rand::distributions::uniform::{SampleBorrow, SampleUniform}; use rand::distributions::Alphanumeric; use rand::{random, Rng}; use rlp::{self, Encodable, RlpStream}; use common_crypto::{ HashValue, PrivateKey, PublicKey, Secp256k1PrivateKey, Signature, ToPublicKey, UncompressedPublicKey, }; use common_merkle::Merkle; use core_consensus::fixed_types::FixedPill; use core_consensus::util::OverlordCrypto; use protocol::fixed_codec::FixedCodec; use protocol::types::{ Address, Block, BlockHeader, Hash, Metadata, Pill, Proof, RawTransaction, SignedTransaction, TransactionRequest, Validator, }; use crate::invalid_types::InvalidStruct; use crate::worker::State; const VALIDATOR_VERSION: u64 = 0; const HASH_LEN: u64 = 32; const ADDRESS_LEN: u64 = 20; const SIGNATURE_LEN: u64 = 192; const BITMAP_LEN: u64 = 1; pub fn time_now() -> u64 { SystemTime::now() .duration_since(UNIX_EPOCH) .unwrap() .as_millis() as u64 } pub fn gen_random_bytes(len: usize) -> Bytes { let vec = (0..len).map(|_| random::()).collect::>(); Bytes::from(vec) } pub fn gen_random_string(len: usize) -> String { rand::thread_rng() .sample_iter(&Alphanumeric) .take(len) .collect() } pub fn gen_range(low: B1, high: B2) -> T where B1: SampleBorrow + Sized, B2: SampleBorrow + Sized, { let mut rng = rand::thread_rng(); rng.gen_range(low, high) } pub fn gen_bool(p: f64) -> bool { let mut rng = rand::thread_rng(); if p >= 1.0 { true } else { rng.gen_bool(p) } } pub fn gen_valid_raw_tx( pri_key: &Secp256k1PrivateKey, height: u64, metadata: &Metadata, ) -> RawTransaction { RawTransaction { chain_id: metadata.chain_id.clone(), cycles_price: 100, cycles_limit: 1_000_000, nonce: gen_valid_hash(), request: gen_transfer_tx_request(), timeout: gen_range(height, height + metadata.timeout_gap), sender: gen_address_bytes(pri_key), } } pub fn gen_invalid_request() -> TransactionRequest { TransactionRequest { method: gen_random_string(10), service_name: gen_random_string(10), payload: gen_random_string(100), } } pub fn gen_transfer_tx_request() -> TransactionRequest { TransactionRequest { method: "asset".to_string(), service_name: "transfer".to_string(), payload: "{ \"asset_id\": \"0xf56924db538e77bb5951eb5ff0d02b88983c49c45eea30e8ae3e7234b311436c\", \"to\":\"0x0000000000000000000000000000000000000001\", \"value\": 100 }".to_string(), } } pub fn gen_address_bytes(pri_key: &Secp256k1PrivateKey) -> Address { let pubkey = pri_key.pub_key(); Address::from_pubkey_bytes(pubkey.to_uncompressed_bytes()).expect("get address failed") } pub fn gen_valid_hash() -> Hash { Hash::digest(gen_random_bytes(20)) } pub fn gen_invalid_hash() -> Hash { let rand_len = gen_positive_range(HASH_LEN, 1); Hash::from_invalid_bytes(gen_random_bytes(rand_len as usize)) } pub fn gen_invalid_address() -> Address { let rand_len = gen_positive_range(ADDRESS_LEN, 1); Address::from_invalid_bytes(gen_random_bytes(rand_len as usize)) } pub fn gen_valid_signed_tx( raw: RawTransaction, pri_key: &Secp256k1PrivateKey, ) -> SignedTransaction { gen_signed_tx(raw, pri_key, None, None) } pub fn gen_signed_tx( raw: RawTransaction, pri_key: &Secp256k1PrivateKey, fixed_bytes: Option, sig: Option, ) -> SignedTransaction { let fixed_bytes = fixed_bytes.unwrap_or_else(|| raw.encode_fixed().expect("get bytes from raw_tx failed!")); let tx_hash = Hash::digest(fixed_bytes); let hash_value = HashValue::try_from(tx_hash.as_bytes().as_ref()).unwrap(); let signature = sig.unwrap_or_else(|| pri_key.sign_message(&hash_value).to_bytes()); let pubkey = pri_key.pub_key().to_bytes(); let signature = Bytes::from(rlp::encode_list::, _>(&[signature.to_vec()])); let pubkey = Bytes::from(rlp::encode_list::, _>(&[pubkey.to_vec()])); SignedTransaction { raw, tx_hash, pubkey, signature, } } pub fn gen_valid_block_header( state: &State, metadata: &Metadata, my_address: &Address, validators: &[Validator], ordered_tx_hashes: Vec, ) -> BlockHeader { let order_root = Merkle::from_hashes(ordered_tx_hashes).get_root_hash(); BlockHeader { chain_id: metadata.chain_id.clone(), height: state.height, exec_height: state.exec_height, prev_hash: state.prev_hash.clone(), timestamp: time_now(), order_root: order_root.unwrap_or_else(Hash::from_empty), order_signed_transactions_hash: Hash::from_empty(), confirm_root: state.confirm_root.clone(), state_root: state.state_root.clone(), receipt_root: state.receipt_root.clone(), cycles_used: state.cycles_used.clone(), proposer: my_address.clone(), proof: state.proof.clone(), validator_version: VALIDATOR_VERSION, validators: validators.to_vec(), } } pub fn gen_valid_block(header: BlockHeader, ordered_tx_hashes: Vec) -> Block { Block { header, ordered_tx_hashes, } } pub fn gen_invalid_content_struct_proposal( state: &State, my_pub_key: &Bytes, ) -> Proposal { let content = InvalidStruct::gen(1000); let hash = Hash::digest(content.inner.clone()).as_bytes(); Proposal { height: state.height, round: state.round, content, block_hash: hash, lock: state.lock.clone(), proposer: my_pub_key.clone(), } } pub fn gen_valid_proposal( block: Block, state: &State, my_pub_key: &Bytes, propose_hashes: Vec, ) -> Proposal { let pill = Pill { block, propose_hashes, }; let fixed_pill = FixedPill { inner: pill.clone(), }; let hash = Hash::digest( pill.block .header .encode_fixed() .expect("encode block header failed"), ) .as_bytes(); Proposal { height: state.height, round: state.round, content: fixed_pill, block_hash: hash, lock: state.lock.clone(), proposer: my_pub_key.clone(), } } pub fn gen_valid_signed_proposal( proposal: Proposal, crypto: &Arc, ) -> SignedProposal { let signature = crypto .sign(crypto.hash(Bytes::from(rlp::encode(&proposal)))) .expect("sign proposal failed"); SignedProposal { signature, proposal, } } pub fn gen_signed_proposal_from_header( header: BlockHeader, state: &State, crypto: &Arc, my_pub_key: &Bytes, ) -> Vec { let block = gen_valid_block(header, vec![]); let proposal = gen_valid_proposal(block, state, my_pub_key, vec![]); let signed_proposal = gen_valid_signed_proposal(proposal, crypto); signed_proposal.rlp_bytes() } pub fn gen_invalid_chain_id() -> Hash { Hash::digest(gen_random_bytes(20)) } pub fn gen_positive_range(base: u64, range: u64) -> u64 { let low = if base < range { 0 } else { base - range }; let high = if u64::MAX - base < range { u64::MAX } else { base + range }; gen_range(low, high) } pub fn gen_invalid_sig() -> Bytes { gen_random_bytes(gen_positive_range(SIGNATURE_LEN, 1) as usize) } pub fn gen_invalid_proof() -> Proof { Proof { height: gen_range(u64::MIN, u64::MAX), round: gen_range(u64::MIN, u64::MAX), block_hash: gen_invalid_hash(), signature: gen_invalid_sig(), bitmap: gen_invalid_bitmap(), } } pub fn gen_invalid_bitmap() -> Bytes { gen_random_bytes(gen_positive_range(BITMAP_LEN, 1) as usize) } pub fn gen_invalid_validators() -> Vec { (0..gen_range(0, 100)) .map(|_| Validator { pub_key: gen_random_bytes(32), propose_weight: gen_range(u32::MIN, u32::MAX), vote_weight: gen_range(u32::MIN, u32::MAX), }) .collect() } pub fn gen_invalid_lock() -> PoLC { PoLC { lock_round: gen_range(u64::MIN, u64::MAX), lock_votes: gen_invalid_qc(), } } pub fn gen_invalid_qc() -> AggregatedVote { AggregatedVote { signature: gen_invalid_aggregate_sig(), vote_type: gen_vote_type(), height: gen_range(u64::MIN, u64::MAX), round: gen_range(u64::MIN, u64::MAX), block_hash: gen_invalid_hash().as_bytes(), leader: gen_invalid_address().as_bytes(), } } pub fn gen_invalid_aggregate_sig() -> AggregatedSignature { AggregatedSignature { signature: gen_invalid_sig(), address_bitmap: gen_invalid_bitmap(), } } pub fn gen_valid_qc(state: &State, my_pub_key: &Bytes) -> AggregatedVote { AggregatedVote { signature: gen_valid_aggregate_sig(), vote_type: gen_vote_type(), height: state.height, round: state.round, block_hash: gen_invalid_hash().as_bytes(), leader: my_pub_key.clone(), } } pub fn gen_valid_aggregate_sig() -> AggregatedSignature { AggregatedSignature { signature: gen_random_bytes(SIGNATURE_LEN as usize), address_bitmap: gen_random_bytes(BITMAP_LEN as usize), } } pub fn gen_valid_signed_vote( vote: Vote, crypto: &Arc, my_pub_key: &Bytes, ) -> SignedVote { let signature = crypto .sign(crypto.hash(Bytes::from(rlp::encode(&vote)))) .expect("sign proposal failed"); SignedVote { signature, vote, voter: my_pub_key.clone(), } } pub fn gen_valid_vote(state: &State) -> Vote { Vote { height: state.height, round: state.round, vote_type: gen_vote_type(), block_hash: gen_valid_hash().as_bytes(), } } pub fn gen_valid_choke(state: &State, my_pub_key: &Bytes) -> Choke { Choke { height: state.height, round: state.round, from: UpdateFrom::PrevoteQC(gen_valid_qc(state, my_pub_key)), } } pub fn gen_invalid_from() -> UpdateFrom { match gen_range(0, 100) % 3 { 0 => UpdateFrom::PrevoteQC(gen_invalid_qc()), 1 => UpdateFrom::PrecommitQC(gen_invalid_qc()), 2 => UpdateFrom::ChokeQC(gen_invalid_aggregated_choke()), _ => panic!("unreachable!"), } } pub fn gen_valid_signed_choke( choke: Choke, crypto: &Arc, my_pub_key: &Bytes, ) -> SignedChoke { let signature = crypto .sign(crypto.hash(Bytes::from(rlp::encode(&choke_to_hash(&choke))))) .expect("sign choke failed"); SignedChoke { signature, choke, address: my_pub_key.clone(), } } #[derive(Clone, Debug)] struct HashChoke { height: u64, round: u64, } impl Encodable for HashChoke { fn rlp_append(&self, s: &mut RlpStream) { s.begin_list(2).append(&self.height).append(&self.round); } } fn choke_to_hash(choke: &Choke) -> HashChoke { HashChoke { height: choke.height, round: choke.round, } } pub fn gen_invalid_aggregated_choke() -> AggregatedChoke { AggregatedChoke { height: gen_range(u64::MIN, u64::MAX), round: gen_range(u64::MIN, u64::MAX), signature: gen_invalid_sig(), voters: vec![gen_invalid_address().as_bytes()], } } fn gen_vote_type() -> VoteType { match gen_range(0, 100) % 2 { 0 => VoteType::Prevote, 1 => VoteType::Precommit, _ => panic!("unreachable!"), } } ================================================ FILE: byzantine/src/worker.rs ================================================ use std::convert::TryFrom; use std::sync::Arc; use bytes::Bytes; use futures::{channel::mpsc::UnboundedReceiver, stream::StreamExt}; use lazy_static::lazy_static; use overlord::types::{ AggregatedVote, Choke, PoLC, Proposal, SignedChoke, SignedProposal, SignedVote, Vote, VoteType, }; use rlp::Encodable; use common_crypto::Secp256k1PrivateKey; use core_consensus::fixed_types::FixedPill; use core_consensus::message::{ BROADCAST_HEIGHT, END_GOSSIP_AGGREGATED_VOTE, END_GOSSIP_SIGNED_CHOKE, END_GOSSIP_SIGNED_PROPOSAL, END_GOSSIP_SIGNED_VOTE, }; use core_consensus::util::OverlordCrypto; use core_mempool::{ MsgNewTxs, MsgPullTxs, MsgPushTxs, END_GOSSIP_NEW_TXS, RPC_PULL_TXS, RPC_RESP_PULL_TXS, }; use core_network::{PeerId, PeerIdExt}; use protocol::traits::{Context, Gossip, MessageCodec, PeerTrust, Priority, Rpc}; use protocol::types::{ Address, Hash, Hex, MerkleRoot, Metadata, Proof, SignedTransaction, Validator, }; use crate::behaviors::{ Behavior, MessageType, NewChoke, NewProposal, NewQC, NewTx, NewVote, PullTxs, Request, }; use crate::invalid_types::{ gen_invalid_address_new_choke, gen_invalid_block_hash_new_proposal, gen_invalid_block_hash_new_qc, gen_invalid_block_hash_new_vote, gen_invalid_block_proposer_new_proposal, gen_invalid_chain_id_new_proposal, gen_invalid_chain_id_signed_tx, gen_invalid_confirm_root_new_proposal, gen_invalid_content_struct_new_proposal, gen_invalid_cycle_used_new_proposal, gen_invalid_cycles_limit_signed_tx, gen_invalid_cycles_price_signed_tx, gen_invalid_exec_height_new_proposal, gen_invalid_from_new_vote, gen_invalid_hash_pull_txs, gen_invalid_hash_signed_tx, gen_invalid_height_new_choke, gen_invalid_height_new_proposal, gen_invalid_height_new_qc, gen_invalid_height_new_vote, gen_invalid_height_pull_txs, gen_invalid_leader_new_qc, gen_invalid_lock_new_proposal, gen_invalid_nonce_dup_signed_tx, gen_invalid_nonce_of_rand_len_signed_tx, gen_invalid_order_root_new_proposal, gen_invalid_prev_hash_new_proposal, gen_invalid_proof_new_proposal, gen_invalid_prop_height_new_proposal, gen_invalid_prop_proposer_new_proposal, gen_invalid_receipt_root_new_proposal, gen_invalid_request_signed_tx, gen_invalid_round_new_choke, gen_invalid_round_new_proposal, gen_invalid_round_new_qc, gen_invalid_round_new_vote, gen_invalid_sender_signed_tx, gen_invalid_sig_new_choke, gen_invalid_sig_new_proposal, gen_invalid_sig_new_qc, gen_invalid_sig_new_vote, gen_invalid_sig_signed_tx, gen_invalid_signed_tx_hash_new_proposal, gen_invalid_state_root_new_proposal, gen_invalid_struct_new_choke, gen_invalid_struct_new_proposal, gen_invalid_struct_new_qc, gen_invalid_struct_new_vote, gen_invalid_timeout_signed_tx, gen_invalid_timestamp_new_proposal, gen_invalid_tx_hash_new_proposal, gen_invalid_validators_new_proposal, gen_invalid_version_new_proposal, gen_invalid_voter_new_vote, gen_not_exists_txs_pull_txs, gen_valid_new_proposal, gen_valid_tx, InvalidStruct, }; use crate::utils::{ gen_positive_range, gen_random_bytes, gen_valid_signed_choke, gen_valid_signed_vote, time_now, }; lazy_static! { static ref TEST_PRI_KEY: Secp256k1PrivateKey = { let hex_prikey = Hex::from_string( "0x5ec982173d54d830b6789cbbbe43eaa2853a5ff752d1ebc1b266cf9790314f8a".to_string(), ) .unwrap(); Secp256k1PrivateKey::try_from(hex_prikey.decode().as_ref()) .expect("get test pri_key failed") }; } macro_rules! send_new_tx { ($self_: ident, $ctx: ident, $behavior: ident, $func: ident) => {{ let behavior = $behavior.clone(); let metadata = $self_.metadata.clone(); let height = $self_.state.height; let network = Arc::<_>::clone(&$self_.network); tokio::spawn(async move { let batch_stxs: Vec = (0..behavior.msg_num) .map(|_| $func(&TEST_PRI_KEY, height, &metadata)) .collect(); let gossip_txs = MsgNewTxs { batch_stxs }; send(&network, gossip_txs, $ctx, END_GOSSIP_NEW_TXS, &behavior).await; }); }}; } macro_rules! send_push_txs { ($self_: ident, $ctx: ident, $behavior: ident, $func: ident) => {{ let behavior = $behavior.clone(); let metadata = $self_.metadata.clone(); let height = $self_.state.height; let network = Arc::<_>::clone(&$self_.network); tokio::spawn(async move { let batch_stxs: Vec = (0..behavior.msg_num) .map(|_| $func(&TEST_PRI_KEY, height, &metadata)) .collect(); let push_txs = MsgPushTxs { sig_txs: batch_stxs, }; let _ = network .response::($ctx, RPC_RESP_PULL_TXS, Ok(push_txs), behavior.priority) .await; }); }}; } macro_rules! send_pull_txs { ($self_: ident, $ctx: ident, $behavior: ident, $func: ident) => {{ let behavior = $behavior.clone(); let height = $self_.state.height; let network = Arc::<_>::clone(&$self_.network); tokio::spawn(async move { for _ in (0..behavior.msg_num) { let pull_msg = $func(height); let _ = network .call::( $ctx.clone(), RPC_PULL_TXS, pull_msg, behavior.priority.clone(), ) .await; } }); }}; } macro_rules! send_new_proposal { ($self_: ident, $ctx: ident, $behavior: ident, $func: ident) => {{ let behavior = $behavior.clone(); let state = $self_.state.clone(); let metadata = $self_.metadata.clone(); let crypto = $self_.crypto.clone(); let address = $self_.address.clone(); let pub_key = $self_.pub_key.clone(); let validators = $self_.validators.clone(); let network = Arc::<_>::clone(&$self_.network); tokio::spawn(async move { let messages: Vec> = (0..behavior.msg_num) .map(|_| $func(&state, &metadata, &crypto, &address, &pub_key, &validators)) .collect(); for msg in messages { send( &network, msg, $ctx.clone(), END_GOSSIP_SIGNED_PROPOSAL, &behavior, ) .await; } }); }}; } macro_rules! send_new_vote_or_choke { ($self_: ident, $ctx: ident, $behavior: ident, $func: ident, $end: ident) => {{ let behavior = $behavior.clone(); let state = $self_.state.clone(); let crypto = $self_.crypto.clone(); let pub_key = $self_.pub_key.clone(); let network = Arc::<_>::clone(&$self_.network); tokio::spawn(async move { let messages: Vec> = (0..behavior.msg_num) .map(|_| $func(&state, &crypto, &pub_key)) .collect(); for msg in messages { send(&network, msg, $ctx.clone(), $end, &behavior).await; } }); }}; } macro_rules! send_new_vote { ($self_: ident, $ctx: ident, $behavior: ident, $func: ident) => { send_new_vote_or_choke!($self_, $ctx, $behavior, $func, END_GOSSIP_SIGNED_VOTE); }; } macro_rules! send_new_choke { ($self_: ident, $ctx: ident, $behavior: ident, $func: ident) => { send_new_vote_or_choke!($self_, $ctx, $behavior, $func, END_GOSSIP_SIGNED_CHOKE); }; } macro_rules! send_new_qc { ($self_: ident, $ctx: ident, $behavior: ident, $func: ident) => {{ let behavior = $behavior.clone(); let state = $self_.state.clone(); let pub_key = $self_.pub_key.clone(); let network = Arc::<_>::clone(&$self_.network); tokio::spawn(async move { let messages: Vec> = (0..behavior.msg_num) .map(|_| $func(&state, &pub_key)) .collect(); for msg in messages { send( &network, msg, $ctx.clone(), END_GOSSIP_AGGREGATED_VOTE, &behavior, ) .await; } }); }}; } #[derive(Clone, Debug)] pub struct State { pub height: u64, pub round: u64, pub exec_height: u64, pub prev_hash: Hash, pub prev_timestamp: u64, pub state_root: MerkleRoot, pub confirm_root: Vec, pub receipt_root: Vec, pub cycles_used: Vec, pub lock: Option, pub proof: Proof, } impl Default for State { fn default() -> Self { State { height: 0, round: 0, exec_height: 0, prev_hash: Hash::from_empty(), prev_timestamp: time_now(), state_root: MerkleRoot::from_empty(), confirm_root: vec![], receipt_root: vec![], cycles_used: vec![], lock: None, proof: Proof { height: 0, round: 0, block_hash: Hash::from_empty(), signature: Bytes::new(), bitmap: Bytes::new(), }, } } } pub struct Worker { state: State, address: Address, pub_key: Bytes, metadata: Metadata, validators: Vec, crypto: Arc, network: Arc, from_timeout: UnboundedReceiver<(Context, Vec)>, } impl Worker where N: Rpc + PeerTrust + Gossip + 'static, { pub fn new( address: Address, pub_key: Bytes, metadata: Metadata, validators: Vec, crypto: OverlordCrypto, network: Arc, from_timeout: UnboundedReceiver<(Context, Vec)>, ) -> Worker { Worker { state: State::default(), address, pub_key, crypto: Arc::new(crypto), metadata, validators, network, from_timeout, } } pub async fn run(mut self) { let mut cnt = 0; loop { let (ctx, behaviors) = self.from_timeout.next().await.expect("Channel is down!"); for behavior in behaviors { cnt += 1; println!( "[h: {}, r: {}] worker process {:?}, accumulative process {} behaviors", self.state.height, self.state.round, behavior.msg_type, cnt ); self.process(ctx.clone(), &behavior).await; } } } pub async fn process(&mut self, ctx: Context, behavior: &Behavior) { match &behavior.msg_type { MessageType::NewTxs(new_tx) => match new_tx { NewTx::InvalidStruct => self.send_invalid_struct_of_new_tx(ctx, behavior).await, NewTx::InvalidHash => send_new_tx!(self, ctx, behavior, gen_invalid_hash_signed_tx), NewTx::InvalidSig => send_new_tx!(self, ctx, behavior, gen_invalid_sig_signed_tx), NewTx::InvalidChainID => { send_new_tx!(self, ctx, behavior, gen_invalid_chain_id_signed_tx) } NewTx::InvalidCyclesPrice => { send_new_tx!(self, ctx, behavior, gen_invalid_cycles_price_signed_tx) } NewTx::InvalidCyclesLimit => { send_new_tx!(self, ctx, behavior, gen_invalid_cycles_limit_signed_tx) } NewTx::InvalidNonceOfRandLen => { send_new_tx!(self, ctx, behavior, gen_invalid_nonce_of_rand_len_signed_tx) } NewTx::InvalidNonceDup => { self.send_invalid_nonce_dup_of_new_tx(ctx, behavior).await } NewTx::InvalidRequest => { send_new_tx!(self, ctx, behavior, gen_invalid_request_signed_tx) } NewTx::InvalidTimeout => { send_new_tx!(self, ctx, behavior, gen_invalid_timeout_signed_tx) } NewTx::InvalidSender => { send_new_tx!(self, ctx, behavior, gen_invalid_sender_signed_tx) } NewTx::Valid => send_new_tx!(self, ctx, behavior, gen_valid_tx), }, MessageType::RecvProposal(pull_txs) => match pull_txs { PullTxs::Valid => self.set_state(behavior.request.as_ref()).await, PullTxs::InvalidHeight => { send_pull_txs!(self, ctx, behavior, gen_invalid_height_pull_txs) } PullTxs::InvalidHash => { send_pull_txs!(self, ctx, behavior, gen_invalid_hash_pull_txs) } PullTxs::NotExistTxs => { send_pull_txs!(self, ctx, behavior, gen_not_exists_txs_pull_txs) } PullTxs::InvalidStruct => self.send_invalid_struct_of_pull_txs(ctx, behavior).await, }, MessageType::SendProposal(new_proposal) => match new_proposal { NewProposal::Valid => { send_new_proposal!(self, ctx, behavior, gen_valid_new_proposal) } NewProposal::InvalidStruct => { send_new_proposal!(self, ctx, behavior, gen_invalid_struct_new_proposal) } NewProposal::InvalidChainId => { send_new_proposal!(self, ctx, behavior, gen_invalid_chain_id_new_proposal) } NewProposal::InvalidPrevHash => { send_new_proposal!(self, ctx, behavior, gen_invalid_prev_hash_new_proposal) } NewProposal::InvalidHeight => { send_new_proposal!(self, ctx, behavior, gen_invalid_height_new_proposal) } NewProposal::InvalidExecHeight => { send_new_proposal!(self, ctx, behavior, gen_invalid_exec_height_new_proposal) } NewProposal::InvalidTimestamp => { send_new_proposal!(self, ctx, behavior, gen_invalid_timestamp_new_proposal) } NewProposal::InvalidOrderRoot => { send_new_proposal!(self, ctx, behavior, gen_invalid_order_root_new_proposal) } NewProposal::InvalidSignedTxsHash => { send_new_proposal!(self, ctx, behavior, gen_invalid_signed_tx_hash_new_proposal) } NewProposal::InvalidConfirmRoot => { send_new_proposal!(self, ctx, behavior, gen_invalid_confirm_root_new_proposal) } NewProposal::InvalidStateRoot => { send_new_proposal!(self, ctx, behavior, gen_invalid_state_root_new_proposal) } NewProposal::InvalidReceiptRoot => { send_new_proposal!(self, ctx, behavior, gen_invalid_receipt_root_new_proposal) } NewProposal::InvalidCyclesUsed => { send_new_proposal!(self, ctx, behavior, gen_invalid_cycle_used_new_proposal) } NewProposal::InvalidBlockProposer => { send_new_proposal!(self, ctx, behavior, gen_invalid_block_proposer_new_proposal) } NewProposal::InvalidProof => { send_new_proposal!(self, ctx, behavior, gen_invalid_proof_new_proposal) } NewProposal::InvalidVersion => { send_new_proposal!(self, ctx, behavior, gen_invalid_version_new_proposal) } NewProposal::InvalidValidators => { send_new_proposal!(self, ctx, behavior, gen_invalid_validators_new_proposal) } NewProposal::InvalidTxHash => { send_new_proposal!(self, ctx, behavior, gen_invalid_tx_hash_new_proposal) } NewProposal::InvalidSig => { send_new_proposal!(self, ctx, behavior, gen_invalid_sig_new_proposal) } NewProposal::InvalidProposalHeight => { send_new_proposal!(self, ctx, behavior, gen_invalid_prop_height_new_proposal) } NewProposal::InvalidRound => { send_new_proposal!(self, ctx, behavior, gen_invalid_round_new_proposal) } NewProposal::InvalidContentStruct => { send_new_proposal!(self, ctx, behavior, gen_invalid_content_struct_new_proposal) } NewProposal::InvalidBlockHash => { send_new_proposal!(self, ctx, behavior, gen_invalid_block_hash_new_proposal) } NewProposal::InvalidLock => { send_new_proposal!(self, ctx, behavior, gen_invalid_lock_new_proposal) } NewProposal::InvalidProposalProposer => { send_new_proposal!(self, ctx, behavior, gen_invalid_prop_proposer_new_proposal) } }, MessageType::SendVote(new_vote) => match new_vote { NewVote::InvalidStruct => { send_new_vote!(self, ctx, behavior, gen_invalid_struct_new_vote) } NewVote::InvalidHeight => { send_new_vote!(self, ctx, behavior, gen_invalid_height_new_vote) } NewVote::InvalidRound => { send_new_vote!(self, ctx, behavior, gen_invalid_round_new_vote) } NewVote::InvalidBlockHash => { send_new_vote!(self, ctx, behavior, gen_invalid_block_hash_new_vote) } NewVote::InvalidSig => { send_new_vote!(self, ctx, behavior, gen_invalid_sig_new_vote) } NewVote::InvalidVoter => { send_new_vote!(self, ctx, behavior, gen_invalid_voter_new_vote) } }, MessageType::SendQC(new_qc) => match new_qc { NewQC::InvalidStruct => { send_new_qc!(self, ctx, behavior, gen_invalid_struct_new_qc) } NewQC::InvalidHeight => { send_new_qc!(self, ctx, behavior, gen_invalid_height_new_qc) } NewQC::InvalidRound => send_new_qc!(self, ctx, behavior, gen_invalid_round_new_qc), NewQC::InvalidBlockHash => { send_new_qc!(self, ctx, behavior, gen_invalid_block_hash_new_qc) } NewQC::InvalidSig => send_new_qc!(self, ctx, behavior, gen_invalid_sig_new_qc), NewQC::InvalidLeader => { send_new_qc!(self, ctx, behavior, gen_invalid_leader_new_qc) } }, MessageType::SendChoke(new_choke) => match new_choke { NewChoke::InvalidStruct => { send_new_choke!(self, ctx, behavior, gen_invalid_struct_new_choke) } NewChoke::InvalidHeight => { send_new_choke!(self, ctx, behavior, gen_invalid_height_new_choke) } NewChoke::InvalidRound => { send_new_choke!(self, ctx, behavior, gen_invalid_round_new_choke) } NewChoke::InvalidFrom => { send_new_choke!(self, ctx, behavior, gen_invalid_from_new_vote) } NewChoke::InvalidSig => { send_new_choke!(self, ctx, behavior, gen_invalid_sig_new_choke) } NewChoke::InvalidAddress => { send_new_choke!(self, ctx, behavior, gen_invalid_address_new_choke) } }, MessageType::SendHeight => self.send_invalid_new_height(ctx, behavior).await, MessageType::PullTxs(new_tx) => match new_tx { NewTx::InvalidStruct => self.send_invalid_struct_of_push_txs(ctx, behavior).await, NewTx::InvalidHash => { send_push_txs!(self, ctx, behavior, gen_invalid_hash_signed_tx) } NewTx::InvalidSig => send_push_txs!(self, ctx, behavior, gen_invalid_sig_signed_tx), NewTx::InvalidChainID => { send_push_txs!(self, ctx, behavior, gen_invalid_chain_id_signed_tx) } NewTx::InvalidCyclesPrice => { send_push_txs!(self, ctx, behavior, gen_invalid_cycles_price_signed_tx) } NewTx::InvalidCyclesLimit => { send_push_txs!(self, ctx, behavior, gen_invalid_cycles_limit_signed_tx) } NewTx::InvalidNonceOfRandLen => { send_push_txs!(self, ctx, behavior, gen_invalid_nonce_of_rand_len_signed_tx) } NewTx::InvalidRequest => { send_push_txs!(self, ctx, behavior, gen_invalid_request_signed_tx) } NewTx::InvalidTimeout => { send_push_txs!(self, ctx, behavior, gen_invalid_timeout_signed_tx) } NewTx::InvalidSender => { send_push_txs!(self, ctx, behavior, gen_invalid_sender_signed_tx) } _ => panic!("not support yet!"), }, MessageType::RecvQC | MessageType::RecvVote | MessageType::RecvChoke | MessageType::RecvHeight => self.set_state(behavior.request.as_ref()).await, } } pub async fn send_invalid_new_height(&mut self, ctx: Context, behavior: &Behavior) { let behavior = behavior.clone(); let height = self.state.height; let network = Arc::<_>::clone(&self.network); tokio::spawn(async move { let messages: Vec = (0..behavior.msg_num) .map(|_| gen_positive_range(height, 20)) .collect(); for msg in messages { send(&network, msg, ctx.clone(), BROADCAST_HEIGHT, &behavior).await; } }); } pub async fn send_invalid_struct_of_pull_txs(&mut self, ctx: Context, behavior: &Behavior) { let behavior = behavior.clone(); let network = Arc::<_>::clone(&self.network); tokio::spawn(async move { for _ in 0..behavior.msg_num { let pull_msg = InvalidStruct::gen(100); let _ = network .call::( ctx.clone(), RPC_PULL_TXS, pull_msg, behavior.priority, ) .await; } }); } pub async fn send_invalid_struct_of_new_tx(&self, ctx: Context, behavior: &Behavior) { let behavior = behavior.clone(); let network = Arc::<_>::clone(&self.network); tokio::spawn(async move { let messages: Vec = (0..behavior.msg_num) .map(|_| InvalidStruct::gen(1000)) .collect(); for msg in messages { send(&network, msg, ctx.clone(), END_GOSSIP_NEW_TXS, &behavior).await; } }); } pub async fn send_invalid_struct_of_push_txs(&self, ctx: Context, behavior: &Behavior) { let behavior = behavior.clone(); let network = Arc::<_>::clone(&self.network); tokio::spawn(async move { let messages: Vec = (0..behavior.msg_num) .map(|_| InvalidStruct::gen(1000)) .collect(); for msg in messages { let _ = network .response::( ctx.clone(), RPC_RESP_PULL_TXS, Ok(msg), behavior.priority, ) .await; } }); } pub async fn send_invalid_nonce_dup_of_new_tx(&self, ctx: Context, behavior: &Behavior) { let nonce = Hash::digest(gen_random_bytes(20)); let behavior = behavior.clone(); let metadata = self.metadata.clone(); let height = self.state.height; let network = Arc::<_>::clone(&self.network); tokio::spawn(async move { let batch_stxs: Vec = (0..behavior.msg_num) .map(|_| { gen_invalid_nonce_dup_signed_tx(&TEST_PRI_KEY, height, &metadata, nonce.clone()) }) .collect(); let gossip_txs = MsgNewTxs { batch_stxs }; send(&network, gossip_txs, ctx, END_GOSSIP_NEW_TXS, &behavior).await; }); } async fn set_state(&mut self, req_opt: Option<&Request>) { if let Some(req) = req_opt { match req { Request::RecvProposal(proposal) => { let signed_proposal: SignedProposal = rlp::decode(&proposal.0).expect("decode signed_proposal failed"); let proposal = signed_proposal.proposal; if proposal.height > self.state.height || (proposal.height == self.state.height && proposal.round >= self.state.round) { let header = proposal.content.inner.block.header.clone(); self.state.height = proposal.height; self.state.round = proposal.round; self.state.prev_hash = header.prev_hash; self.state.proof = header.proof; self.state.state_root = header.state_root; self.state.exec_height = header.exec_height; self.state.confirm_root = header.confirm_root; self.state.receipt_root = header.receipt_root; self.state.cycles_used = header.cycles_used; self.state.lock = proposal.lock.clone(); } self.send_prevote(&proposal).await; } Request::RecvQC(qc) => { let qc: AggregatedVote = rlp::decode(&qc.0).expect("decode qc failed"); if !qc.is_prevote_qc() && qc.height >= self.state.height { if !qc.block_hash.is_empty() { self.state.height = qc.height + 1; self.state.round = 0; self.state.prev_hash = Hash::from_bytes(qc.block_hash.clone()).unwrap(); self.state.proof = Proof { height: qc.height, round: qc.round, block_hash: Hash::from_bytes(qc.block_hash.clone()).unwrap(), signature: qc.signature.signature.clone(), bitmap: qc.signature.address_bitmap, }; self.state.confirm_root = vec![]; self.state.receipt_root = vec![]; self.state.cycles_used = vec![]; self.state.lock = None; self.state.prev_timestamp = time_now(); } else if qc.round >= self.state.round { self.state.height = qc.height; self.state.round = qc.round + 1; } } } Request::RecvVote(vote) => { let vote: SignedVote = rlp::decode(&vote.0).expect("decode vote failed"); if vote.vote.height > self.state.height || (vote.vote.height == self.state.height && vote.vote.round > self.state.round) { self.state.height = vote.vote.height; self.state.round = vote.vote.round; } } Request::RecvChoke(choke) => { let choke: SignedChoke = rlp::decode(&choke.0).expect("decode choke failed"); if choke.choke.height > self.state.height || (choke.choke.height == self.state.height && choke.choke.round > self.state.round) { self.state.height = choke.choke.height; self.state.round = choke.choke.round; } self.send_choke(choke.choke.clone(), choke.address.clone()) .await; } Request::RecvHeight(height) => { if *height > self.state.height { self.state.height = *height; self.state.round = 0; } } _ => panic!("not support yet"), } } self.check_liveness(); } fn check_liveness(&self) { let current_time = time_now(); let gap = current_time - self.state.prev_timestamp; if gap > 10 * 60 * 1000 { panic!("liveness is seemly broken! do not reach consensus in past 10 min"); } else if gap > 5 * 60 * 1000 { println!("strong warning! do not reach consensus in past 5 min"); } else if gap > 60 * 1000 { println!("warning! do not reach consensus in past 60 s"); } } async fn send_prevote(&self, proposal: &Proposal) { let pre_vote = Vote { height: proposal.height, round: proposal.round, vote_type: VoteType::Prevote, block_hash: proposal.block_hash.clone(), }; let signed_vote = gen_valid_signed_vote(pre_vote, &self.crypto, &self.pub_key); let msg = signed_vote.rlp_bytes(); let peer_id = PeerId::from_pubkey_bytes(&proposal.proposer) .unwrap() .into_bytes_ext(); let _ = self .network .multicast( Context::default(), END_GOSSIP_SIGNED_VOTE, [peer_id], msg, Priority::High, ) .await; } async fn send_choke(&self, choke: Choke, sender: Bytes) { let signed_choke = gen_valid_signed_choke(choke, &self.crypto, &self.pub_key); let msg = signed_choke.rlp_bytes(); let peer_id = PeerId::from_pubkey_bytes(&sender).unwrap().into_bytes_ext(); let _ = self .network .multicast( Context::default(), END_GOSSIP_SIGNED_CHOKE, [peer_id], msg, Priority::High, ) .await; } } async fn send(network: &Arc, message: M, ctx: Context, end: &str, behavior: &Behavior) where M: MessageCodec, N: Rpc + PeerTrust + Gossip + 'static, { let peer_ids: Vec<_> = behavior .send_to .iter() .map(|pub_key| PeerId::from_pubkey_bytes(pub_key).unwrap().into_bytes_ext()) .collect(); let _ = network .multicast(ctx.clone(), end, peer_ids, message, behavior.priority) .await; } ================================================ FILE: byzantine/tests/byz.test.ts ================================================ import { parse } from 'toml'; import { find } from 'lodash'; import { AssetService, MultiSignatureService } from '@mutadev/service' import { readFileSync } from 'fs'; import * as sdk from '@mutadev/muta-sdk'; import { Muta } from "@mutadev/muta-sdk"; const genesis = parse(readFileSync('../../examples/genesis.toml', 'utf-8')); const metadata = JSON.parse( find(genesis.services, (s) => s.name === 'metadata').payload, ); const chain_id = metadata.chain_id; const client_0 = get_client('../../examples/config-1.toml', chain_id); const client_1 = get_client('../../examples/config-2.toml', chain_id); const client_2 = get_client('../../examples/config-3.toml', chain_id); describe("Byzantine test via @mutadev/muta-sdk-js", () => { test("getLatestBlock", async () => { const timeoutLoopTimes = process.env.TIMEOUT | 600; // seconds var last_height = 0; var cnt = 0; for (var i = 0; i < timeoutLoopTimes; i++) { let height_0 = await client_0.getLatestBlockHeight(); let height_1 = await client_1.getLatestBlockHeight(); let height_2 = await client_2.getLatestBlockHeight(); let max_height = Math.max(height_0, height_1, height_2); console.log(max_height); if (max_height > last_height) { last_height = max_height; cnt = 0; } else if (max_height == last_height) { cnt += 1; if (cnt > 600) { throw new Error('break liveness'); } } else { throw new Error('break safety'); } await sleep(1000); } }); }); function sleep(ms: number) { return new Promise((resolve) => setTimeout(resolve, ms)); } function get_client(file_path: string, chain_id: string) { const config = parse(readFileSync(file_path, 'utf-8')); const graphql_port = config.graphql.listening_address.split(':')[1]; const muta = new Muta({ endpoint: 'http://localhost:' + graphql_port + '/graphql', chainId: chain_id }); return muta.client(); } ================================================ FILE: byzantine/tests/jest.config.js ================================================ module.exports = { displayName: "Unit Tests", testRegex: "(/.*.(test|spec))\\.(ts?|js?)$", transform: { "^.+\\.ts?$": "ts-jest" }, moduleFileExtensions: ["ts", "js", "json"], testTimeout: 10000000 }; ================================================ FILE: byzantine/tests/package.json ================================================ { "name": "muta-e2e-tests", "version": "1.0.0", "description": "", "author": "wancencen", "license": "MIT", "scripts": { "test": "jest --color", "lint": "eslint --fix '{src,test}/**/*.{js,ts}'", "prettier": "prettier --write **/*.{js,ts,graphql}" }, "dependencies": { "@mutadev/muta-sdk": "0.2.0-rc.0", "@mutadev/service": "0.2.0-rc.0", "graphql": "^15.2.0", "graphql-tag": "^2.10.1", "toml": "^3.0.0", "lodash": "^4.17.15", "ts-node": "^8.3.0", "typescript": "^3.5.3" }, "devDependencies": { "@types/jest": "^24.0.23", "jest": "^24.9.0", "prettier": "^1.19.1", "ts-jest": "^26.0.0" } } ================================================ FILE: charts/deploy-chaos/.helmignore ================================================ # Patterns to ignore when building packages. # This supports shell glob matching, relative path matching, and # negation (prefixed with !). Only one pattern per line. .DS_Store # Common VCS dirs .git/ .gitignore .bzr/ .bzrignore .hg/ .hgignore .svn/ # Common backup files *.swp *.bak *.tmp *~ # Various IDEs .project .idea/ *.tmproj .vscode/ ================================================ FILE: charts/deploy-chaos/Chart.yaml ================================================ apiVersion: v2 name: deploy-chaos description: A Helm chart for Kubernetes # A chart can be either an 'application' or a 'library' chart. # # Application charts are a collection of templates that can be packaged into versioned archives # to be deployed. # # Library charts provide useful utilities or functions for the chart developer. They're included as # a dependency of application charts to inject those utilities and functions into the rendering # pipeline. Library charts do not define any templates and therefore cannot be deployed. type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. version: 0.1.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. appVersion: 1.16.0 ================================================ FILE: charts/deploy-chaos/templates/_helpers.tpl ================================================ {{/* vim: set filetype=mustache: */}} {{/* Expand the name of the chart. */}} {{- define "deploy-chaos.name" -}} {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} {{- end -}} {{/* Create a default fully qualified app name. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). If release name contains chart name it will be used as a full name. */}} {{- define "deploy-chaos.fullname" -}} {{- if .Values.fullnameOverride -}} {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} {{- else -}} {{- $name := default .Chart.Name .Values.nameOverride -}} {{- if contains $name .Release.Name -}} {{- .Release.Name | trunc 63 | trimSuffix "-" -}} {{- else -}} {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} {{- end -}} {{- end -}} {{- end -}} {{/* Create chart name and version as used by the chart label. */}} {{- define "deploy-chaos.chart" -}} {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} {{- end -}} {{/* Common labels */}} {{- define "deploy-chaos.labels" -}} helm.sh/chart: {{ include "deploy-chaos.chart" . }} {{ include "deploy-chaos.selectorLabels" . }} {{- if .Chart.AppVersion }} app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} {{- end }} app.kubernetes.io/managed-by: {{ .Release.Service }} {{- end -}} {{/* Selector labels */}} {{- define "deploy-chaos.selectorLabels" -}} app.kubernetes.io/name: {{ include "deploy-chaos.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} {{- end -}} {{/* Create the name of the service account to use */}} {{- define "deploy-chaos.serviceAccountName" -}} {{- if .Values.serviceAccount.create -}} {{ default (include "deploy-chaos.fullname" .) .Values.serviceAccount.name }} {{- else -}} {{ default "default" .Values.serviceAccount.name }} {{- end -}} {{- end -}} ================================================ FILE: charts/deploy-chaos/templates/muta-benchmark.yaml ================================================ {{- $chainName := (printf "chaos-%s-%s" .Values.repo_name .Values.version) -}} apiVersion: batch/v1beta1 kind: CronJob metadata: name: benchmark-{{ .Values.repo_name }}-{{ .Values.version }} namespace: {{ .Values.namespace }} # Only supports deployment to the mutadev namespace spec: concurrencyPolicy: Replace schedule: {{ .Values.benchmark.schedule | quote }} jobTemplate: spec: template: spec: containers: - name: benchmark image: {{ .Values.benchmark.image }} args: {{- range .Values.benchmark.args }} - {{ . | quote }} {{- end }} - --chain-id - {{ .Values.chain_genesis.metadata.chain_id }} {{- range $i, $e := until (.Values.size | int) }} - {{ printf "http://%s-%d:8000/graphql" $chainName $i }} {{- end }} restartPolicy: OnFailure ================================================ FILE: charts/deploy-chaos/templates/muta-chaos-crd.yaml ================================================ apiVersion: nervos.org/v1alpha1 kind: Muta metadata: name: chaos-{{ .Values.repo_name }}-{{ .Values.version }} namespace: {{ .Values.namespace }} # Only supports deployment to the mutadev namespace spec: image: mutadev/{{ .Values.repo_name }}:{{ .Values.version }} # docker image resources: limits: cpu: {{ .Values.resources.cpu }} memory: {{ .Values.resources.memory }} ephemeral-storage: {{ .Values.resources.storage }} requests: cpu: {{ .Values.resources.cpu }} memory: {{ .Values.resources.memory }} ephemeral-storage: {{ .Values.resources.storage }} chaos: # all / stable-network-corrupt / stable-network-delay / stable-network-duplicate / stable-network-loss / stable-network-partition / stable-node-failure / stable-node-kill {{- range .Values.chaos }} - {{ . }} {{- end }} size: {{ .Values.size }} # Node numbers persistent: {{ .Values.resources.persistent }} # Persistent data config: # see https://github.com/nervosnetwork/muta/blob/master/devtools/chain/config.toml data_path: "/muta-data" graphql: listening_address: "0.0.0.0:8000" graphql_uri: "/graphql" graphiql_uri: "/" workers: 0 # if 0, uses number of available logical cpu as threads count. maxconn: 25000 network: listening_address: "0.0.0.0:1337" rpc_timeout: {{ .Values.chain_config.network.rpc_timeout }} mempool: pool_size: {{ .Values.chain_config.mempool.pool_size }} broadcast_txs_size: {{ .Values.chain_config.mempool.broadcast_txs_size }} broadcast_txs_interval: {{ .Values.chain_config.mempool.broadcast_txs_interval }} executor: light: false logger: filter: "info" log_to_console: true console_show_file_and_line: false log_path: "/muta-data/logs/" log_to_file: true metrics: true modules_level: # "overlord::state::process": "debug" # "core_consensus": "error" genesis: # https://github.com/nervosnetwork/muta/blob/master/devtools/chain/genesis.toml prevhash: {{ .Values.chain_genesis.prevhash }} metadata: chain_id: {{ .Values.chain_genesis.metadata.chain_id }} bech32_address_hrp: {{ .Values.chain_genesis.metadata.bech32_address_hrp }} timeout_gap: {{ .Values.chain_genesis.metadata.timeout_gap }} cycles_limit: {{ .Values.chain_genesis.metadata.cycles_limit }} cycles_price: {{ .Values.chain_genesis.metadata.cycles_price }} interval: {{ .Values.chain_genesis.metadata.interval }} propose_ratio: {{ .Values.chain_genesis.metadata.propose_ratio }} prevote_ratio: {{ .Values.chain_genesis.metadata.prevote_ratio }} precommit_ratio: {{ .Values.chain_genesis.metadata.precommit_ratio }} brake_ratio: {{ .Values.chain_genesis.metadata.brake_ratio }} tx_num_limit: {{ .Values.chain_genesis.metadata.tx_num_limit }} max_tx_size: {{ .Values.chain_genesis.metadata.max_tx_size }} services: {{- range $service := .Values.chain_genesis.services }} - name: {{ $service.name }} payload: {{ $service.payload | toJson | quote }} {{- end }} ================================================ FILE: charts/deploy-chaos/values.yaml ================================================ # Default values for deploy-chaos. # This is a YAML-formatted file. # Declare variables to be passed into your templates. benchmark: schedule: "*/6 * * * *" image: mutadev/muta-benchmark:v0.1.12 args: - -d - 300s - -c - 16 - -g - 9999 - --cpu - 3 namespace: mutadev repo_name: muta version: latest resources: cpu: 1100m memory: 4Gi storage: 6Gi persistent: true chaos: - all size: 4 chain_config: network: rpc_timeout: 10 mempool: pool_size: 20000 broadcast_txs_size: 200 broadcast_txs_interval: 200 chain_genesis: prevhash: 0x44915be5b6c20b0678cf05fcddbbaa832e25d7e6ac538784cd5c24de00d47472 metadata: chain_id: 0xb6a4d7da21443f5e816e8700eea87610e6d769657d6b8ec73028457bf2ca4036 bech32_address_hrp: muta timeout_gap: 9999 cycles_limit: 99999999 cycles_price: 1 interval: 3000 propose_ratio: 15 prevote_ratio: 15 precommit_ratio: 10 brake_ratio: 3 tx_num_limit: 10000 max_tx_size: 1073741824 services: - name: asset payload: { "id": "0xf56924db538e77bb5951eb5ff0d02b88983c49c45eea30e8ae3e7234b311436c", "name": "MutaToken", "symbol": "MT", "supply": 320000011, "issuer": "muta14e0lmgck835vm2dfm0w3ckv6svmez8fdgdl705" } ================================================ FILE: charts/muta/.helmignore ================================================ # Patterns to ignore when building packages. # This supports shell glob matching, relative path matching, and # negation (prefixed with !). Only one pattern per line. .DS_Store # Common VCS dirs .git/ .gitignore .bzr/ .bzrignore .hg/ .hgignore .svn/ # Common backup files *.swp *.bak *.tmp *~ # Various IDEs .project .idea/ *.tmproj ================================================ FILE: charts/muta/Chart.yaml ================================================ apiVersion: v1 description: A Helm chart for Kubernetes icon: https://raw.githubusercontent.com/jenkins-x/jenkins-x-platform/d965bfa/images/rust.png name: muta version: 0.1.0-SNAPSHOT ================================================ FILE: charts/muta/Makefile ================================================ CHART_REPO := http://jenkins-x-chartmuseum:8080 CURRENT=$(pwd) NAME := muta OS := $(shell uname) RELEASE_VERSION := $(shell cat ../../VERSION) build: clean rm -rf requirements.lock helm dependency build helm lint install: clean build helm install . --name ${NAME} upgrade: clean build helm upgrade ${NAME} . delete: helm delete --purge ${NAME} clean: rm -rf charts rm -rf ${NAME}*.tgz release: clean helm dependency build helm lint helm init --client-only helm package . curl --fail -u $(CHARTMUSEUM_CREDS_USR):$(CHARTMUSEUM_CREDS_PSW) --data-binary "@$(NAME)-$(shell sed -n 's/^version: //p' Chart.yaml).tgz" $(CHART_REPO)/api/charts rm -rf ${NAME}*.tgz% tag: ifeq ($(OS),Darwin) sed -i "" -e "s/version:.*/version: $(RELEASE_VERSION)/" Chart.yaml sed -i "" -e "s/tag:.*/tag: $(RELEASE_VERSION)/" values.yaml else ifeq ($(OS),Linux) sed -i -e "s/version:.*/version: $(RELEASE_VERSION)/" Chart.yaml sed -i -e "s|repository:.*|repository: $(DOCKER_REGISTRY)\/nervosnetwork\/muta|" values.yaml sed -i -e "s/tag:.*/tag: $(RELEASE_VERSION)/" values.yaml else echo "platfrom $(OS) not supported to release from" exit -1 endif git add --all git commit -m "release $(RELEASE_VERSION)" --allow-empty # if first release then no verion update is performed git tag -fa v$(RELEASE_VERSION) -m "Release version $(RELEASE_VERSION)" git push origin v$(RELEASE_VERSION) ================================================ FILE: charts/muta/README.md ================================================ # Rust application ================================================ FILE: charts/muta/templates/NOTES.txt ================================================ Get the application URL by running these commands: kubectl get ingress {{ template "fullname" . }} ================================================ FILE: charts/muta/templates/_helpers.tpl ================================================ {{/* vim: set filetype=mustache: */}} {{/* Expand the name of the chart. */}} {{- define "name" -}} {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} {{- end -}} {{/* Create a default fully qualified app name. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). */}} {{- define "fullname" -}} {{- $name := default .Chart.Name .Values.nameOverride -}} {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} {{- end -}} ================================================ FILE: charts/muta/templates/canary.yaml ================================================ {{- if .Values.canary.enabled }} apiVersion: flagger.app/v1beta1 kind: Canary metadata: name: {{ template "fullname" . }} labels: draft: {{ default "draft-app" .Values.draft }} chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" spec: provider: istio targetRef: apiVersion: apps/v1 kind: Deployment name: {{ template "fullname" . }} progressDeadlineSeconds: {{ .Values.canary.progressDeadlineSeconds }} {{- if .Values.hpa.enabled }} autoscalerRef: apiVersion: autoscaling/v2beta1 kind: HorizontalPodAutoscaler name: {{ template "fullname" . }} {{- end }} service: port: {{ .Values.service.externalPort }} targetPort: {{ .Values.service.internalPort }} gateways: - {{ template "fullname" . }} hosts: - {{ .Values.canary.host }} analysis: interval: {{ .Values.canary.canaryAnalysis.interval }} threshold: {{ .Values.canary.canaryAnalysis.threshold }} maxWeight: {{ .Values.canary.canaryAnalysis.maxWeight }} stepWeight: {{ .Values.canary.canaryAnalysis.stepWeight }} metrics: - name: request-success-rate threshold: {{ .Values.canary.canaryAnalysis.metrics.requestSuccessRate.threshold }} interval: {{ .Values.canary.canaryAnalysis.metrics.requestSuccessRate.interval }} - name: latency templateRef: name: latency thresholdRange: max: {{ .Values.canary.canaryAnalysis.metrics.requestDuration.threshold }} interval: {{ .Values.canary.canaryAnalysis.metrics.requestDuration.interval }} --- apiVersion: flagger.app/v1beta1 kind: MetricTemplate metadata: name: latency spec: provider: type: prometheus address: http://prometheus.istio-system:9090 query: | histogram_quantile( 0.99, sum( rate( istio_request_duration_milliseconds_bucket{ reporter="destination", destination_workload_namespace="{{ "{{" }} namespace {{ "}}" }}", destination_workload=~"{{ "{{" }} target {{ "}}" }}" }[{{ "{{" }} interval {{ "}}" }}] ) ) by (le) ) --- apiVersion: networking.istio.io/v1alpha3 kind: Gateway metadata: name: {{ template "fullname" . }} spec: selector: istio: ingressgateway servers: - port: number: {{ .Values.service.externalPort }} name: http protocol: HTTP hosts: - {{ .Values.canary.host }} {{- end }} ================================================ FILE: charts/muta/templates/deployment.yaml ================================================ {{- if .Values.knativeDeploy }} {{- else }} apiVersion: apps/v1 kind: Deployment metadata: name: {{ template "fullname" . }} labels: draft: {{ default "draft-app" .Values.draft }} chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" spec: selector: matchLabels: app: {{ template "fullname" . }} {{- if .Values.hpa.enabled }} {{- else }} replicas: {{ .Values.replicaCount }} {{- end }} template: metadata: labels: draft: {{ default "draft-app" .Values.draft }} app: {{ template "fullname" . }} {{- if .Values.podAnnotations }} annotations: {{ toYaml .Values.podAnnotations | indent 8 }} {{- end }} spec: containers: - name: {{ .Chart.Name }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" imagePullPolicy: {{ .Values.image.pullPolicy }} env: {{- range $pkey, $pval := .Values.env }} - name: {{ $pkey }} value: {{ quote $pval }} {{- end }} envFrom: {{ toYaml .Values.envFrom | indent 10 }} ports: - containerPort: {{ .Values.service.internalPort }} livenessProbe: httpGet: path: {{ .Values.probePath }} port: {{ .Values.service.internalPort }} initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} periodSeconds: {{ .Values.livenessProbe.periodSeconds }} successThreshold: {{ .Values.livenessProbe.successThreshold }} timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} readinessProbe: httpGet: path: {{ .Values.probePath }} port: {{ .Values.service.internalPort }} periodSeconds: {{ .Values.readinessProbe.periodSeconds }} successThreshold: {{ .Values.readinessProbe.successThreshold }} timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} resources: {{ toYaml .Values.resources | indent 12 }} terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} {{- end }} ================================================ FILE: charts/muta/templates/hpa.yaml ================================================ {{- if .Values.hpa.enabled }} apiVersion: autoscaling/v2beta1 kind: HorizontalPodAutoscaler metadata: name: {{ template "fullname" . }} labels: draft: {{ default "draft-app" .Values.draft }} chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" spec: scaleTargetRef: apiVersion: apps/v1 kind: Deployment name: {{ template "fullname" . }} minReplicas: {{ .Values.hpa.minReplicas }} maxReplicas: {{ .Values.hpa.maxReplicas }} metrics: - type: Resource resource: name: cpu targetAverageUtilization: {{ .Values.hpa.cpuTargetAverageUtilization }} - type: Resource resource: name: memory targetAverageUtilization: {{ .Values.hpa.memoryTargetAverageUtilization }} {{- end }} ================================================ FILE: charts/muta/templates/ingress.yaml ================================================ {{- if and (.Values.jxRequirements.ingress.domain) (not .Values.knativeDeploy) }} apiVersion: {{ .Values.jxRequirements.ingress.apiVersion }} kind: Ingress metadata: annotations: kubernetes.io/ingress.class: nginx {{- if .Values.ingress.annotations }} {{ toYaml .Values.ingress.annotations | indent 4 }} {{- end }} {{- if .Values.jxRequirements.ingress.annotations }} {{ toYaml .Values.jxRequirements.ingress.annotations | indent 4 }} {{- end }} name: {{ .Values.service.name }} spec: rules: - host: {{ .Values.service.name }}{{ .Values.jxRequirements.ingress.namespaceSubDomain }}{{ .Values.jxRequirements.ingress.domain }} http: paths: - backend: serviceName: {{ .Values.service.name }} servicePort: 80 {{- if .Values.jxRequirements.ingress.tls.enabled }} tls: - hosts: - {{ .Values.service.name }}{{ .Values.jxRequirements.ingress.namespaceSubDomain }}{{ .Values.jxRequirements.ingress.domain }} {{- if .Values.jxRequirements.ingress.tls.production }} secretName: "tls-{{ .Values.jxRequirements.ingress.domain | replace "." "-" }}-p" {{- else }} secretName: "tls-{{ .Values.jxRequirements.ingress.domain | replace "." "-" }}-s" {{- end }} {{- end }} {{- end }} ================================================ FILE: charts/muta/templates/ksvc.yaml ================================================ {{- if .Values.knativeDeploy }} apiVersion: serving.knative.dev/v1alpha1 kind: Service metadata: {{- if .Values.service.name }} name: {{ .Values.service.name }} {{- else }} name: {{ template "fullname" . }} {{- end }} labels: chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" spec: runLatest: configuration: revisionTemplate: spec: container: image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" imagePullPolicy: {{ .Values.image.pullPolicy }} env: {{- range $pkey, $pval := .Values.env }} - name: {{ $pkey }} value: {{ quote $pval }} {{- end }} livenessProbe: httpGet: path: {{ .Values.probePath }} initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} periodSeconds: {{ .Values.livenessProbe.periodSeconds }} successThreshold: {{ .Values.livenessProbe.successThreshold }} timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} readinessProbe: failureThreshold: {{ .Values.readinessProbe.failureThreshold }} httpGet: path: {{ .Values.probePath }} periodSeconds: {{ .Values.readinessProbe.periodSeconds }} successThreshold: {{ .Values.readinessProbe.successThreshold }} timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} resources: {{ toYaml .Values.resources | indent 14 }} {{- end }} ================================================ FILE: charts/muta/templates/service.yaml ================================================ {{- if or .Values.knativeDeploy .Values.canary.enabled }} {{- else }} apiVersion: v1 kind: Service metadata: {{- if .Values.service.name }} name: {{ .Values.service.name }} {{- else }} name: {{ template "fullname" . }} {{- end }} labels: chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" {{- if .Values.service.annotations }} annotations: {{ toYaml .Values.service.annotations | indent 4 }} {{- end }} spec: type: {{ .Values.service.type }} ports: - port: {{ .Values.service.externalPort }} targetPort: {{ .Values.service.internalPort }} protocol: TCP name: http selector: app: {{ template "fullname" . }} {{- end }} ================================================ FILE: charts/muta/values.yaml ================================================ # Default values for Rust projects. # This is a YAML-formatted file. # Declare variables to be passed into your templates. replicaCount: 1 image: repository: draft tag: dev pullPolicy: IfNotPresent # define environment variables here as a map of key: value env: # enable this flag to use knative serve to deploy the app knativeDeploy: false # HorizontalPodAutoscaler hpa: enabled: false minReplicas: 2 maxReplicas: 6 cpuTargetAverageUtilization: 80 memoryTargetAverageUtilization: 80 # Canary deployments # If enabled, Istio v1.5+ and Flagger need to be installed in the cluster canary: enabled: false progressDeadlineSeconds: 60 canaryAnalysis: interval: "1m" threshold: 5 maxWeight: 60 stepWeight: 20 # WARNING: Canary deployments will fail and rollback if there is no traffic that will generate the below specified metrics. metrics: requestSuccessRate: threshold: 99 interval: "1m" requestDuration: threshold: 1000 interval: "1m" # The host is using Istio Gateway and is currently not auto-generated # Please overwrite the `canary.host` in `values.yaml` in each environment repository (e.g., staging, production) host: acme.com service: name: muta type: ClusterIP externalPort: 80 internalPort: 8080 annotations: fabric8.io/expose: "true" fabric8.io/ingress.annotations: "kubernetes.io/ingress.class: nginx" resources: limits: cpu: 100m memory: 256Mi requests: cpu: 80m memory: 128Mi probePath: / livenessProbe: initialDelaySeconds: 60 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 readinessProbe: failureThreshold: 1 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 # custom ingress annotations on this service ingress: annotations: # kubernetes.io/ingress.class: nginx # values we use from the `jx-requirements.yml` file if we are using helmfile and helm 3 jxRequirements: ingress: domain: "" externalDNS: false namespaceSubDomain: -jx. tls: email: "" enabled: false production: false # For Kubernetes v1.14+, use 'networking.k8s.io/v1beta1' apiVersion: "extensions/v1beta1" # shared ingress annotations on all services annotations: # kubernetes.io/ingress.class: nginx ================================================ FILE: charts/preview/Chart.yaml ================================================ apiVersion: v1 description: A Helm chart for Kubernetes icon: https://raw.githubusercontent.com/jenkins-x/jenkins-x-platform/d965bfa/images/rust.png name: preview version: 0.1.0-SNAPSHOT ================================================ FILE: charts/preview/Makefile ================================================ OS := $(shell uname) preview: ifeq ($(OS),Darwin) sed -i "" -e "s/version:.*/version: $(PREVIEW_VERSION)/" Chart.yaml sed -i "" -e "s/version:.*/version: $(PREVIEW_VERSION)/" ../*/Chart.yaml sed -i "" -e "s/tag:.*/tag: $(PREVIEW_VERSION)/" values.yaml else ifeq ($(OS),Linux) sed -i -e "s/version:.*/version: $(PREVIEW_VERSION)/" Chart.yaml sed -i -e "s/version:.*/version: $(PREVIEW_VERSION)/" ../*/Chart.yaml sed -i -e "s|repository:.*|repository: $(DOCKER_REGISTRY)\/nervosnetwork\/muta|" values.yaml sed -i -e "s/tag:.*/tag: $(PREVIEW_VERSION)/" values.yaml else echo "platfrom $(OS) not supported to release from" exit -1 endif echo " version: $(PREVIEW_VERSION)" >> requirements.yaml jx step helm build ================================================ FILE: charts/preview/requirements.yaml ================================================ # !! File must end with empty line !! dependencies: - alias: expose name: exposecontroller repository: http://chartmuseum.jenkins-x.io version: 2.3.92 - alias: cleanup name: exposecontroller repository: http://chartmuseum.jenkins-x.io version: 2.3.92 # !! "alias: preview" must be last entry in dependencies array !! # !! Place custom dependencies above !! - alias: preview name: muta repository: file://../muta ================================================ FILE: charts/preview/values.yaml ================================================ cleanup: Annotations: helm.sh/hook: pre-delete helm.sh/hook-delete-policy: hook-succeeded Args: - --cleanup expose: Annotations: helm.sh/hook: post-install,post-upgrade helm.sh/hook-delete-policy: hook-succeeded config: exposer: Ingress http: true tlsacme: false preview: image: pullPolicy: IfNotPresent repository: null tag: null namespace: jx-previews ================================================ FILE: clippy.toml ================================================ too-many-arguments-threshold = 12 ================================================ FILE: common/apm/Cargo.toml ================================================ [package] name = "common-apm" version = "0.2.1" authors = ["Muta Dev "] edition = "2018" repository = "https://github.com/nervosnetwork/muta" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] protocol = { path = "../../protocol", package = "muta-protocol" } muta-apm = "0.1.0-alpha.15" prometheus = "0.10" prometheus-static-metric = "0.5" derive_more = "0.99" lazy_static = "1.4" ================================================ FILE: common/apm/README.md ================================================ # Metrics documentation for promethues All current metrics and usage ## API | Metric name | Metric types | Related Grafana panel | |---|---|---| | muta_api_request_total | counter | | | muta_api_request_result_total | counter | processed_tx_request | | muta_api_request_time_cost_seconds | histogram | | ## Consensus
Consensus
Metric name Metric types Related Grafana panel
muta_concensus_result counter
muta_consensus_time_cost_seconds histogram exec_p90
muta_consensus_round gauge consensus_round_cost
muta_executing_queue gauge executing_block_size
muta_consensus_height gauge get_cf_each_block_time_usage
put_cf_each_block_time_usage
current_height
muta_consensus_committed_tx_total counter TPS
muta_consensus_sync_block_duration counter synced_block
muta_consensus_duration_seconds histogram consensus_p90
## Mempool
Metric name Metric types Related Grafana panel
muta_mempool_counter counter
muta_mempool_result_counter counter
muta_mempool_cost_seconds histogram
muta_mempool_package_size_vec histogram
muta_mempool_current_size_vec histogram
muta_mempool_tx_count guage mempool_cached_tx
## Network
Metric name Metric types Related Grafana panel
muta_network_message_total counter network_message_arrival_rate
muta_network_rpc_result_total counter
muta_network_protocol_time_cost_seconds histogram
muta_network_total_pending_data_size gauge
muta_network_ip_pending_data_size gauge
muta_network_received_message_in_processing_guage gauge Received messages in processing
muta_network_received_ip_message_in_processing_guage gauge Received messages in processing by ip
muta_network_connected_peers gauge Connected Peers
muta_network_ip_ping_in_ms gauge Ping (ms)
Ping by ip
muta_network_ip_disconnected_count counter Disconnected count(To other peers)
muta_network_outbound_connecting_peers gauge Connecting Peers
muta_network_unidentified_connections gauge Received messages in processing
muta_network_saved_peer_count counter Saved peers
muta_network_tagged_consensus_peers gauge Consensus peers
muta_network_connected_consensus_peers gauge Connected Consensus Peers (Minus itself)
## Storage
Metric name Metric types Related Grafana panel
muta_storage_put_cf_seconds counter put_cf_each_block_time_usage
muta_storage_put_cf_bytes counter
muta_storage_get_cf_seconds counter get_cf_each_block_time_usage
muta_storage_get_cf_total counter
================================================ FILE: common/apm/src/lib.rs ================================================ // https://rust-lang.github.io/rust-clippy/master/index.html#float_cmp #![allow(clippy::float_cmp)] pub mod metrics; pub use muta_apm; pub use lazy_static; pub use prometheus; pub use prometheus_static_metric; ================================================ FILE: common/apm/src/metrics/api.rs ================================================ use crate::metrics::{ auto_flush_from, exponential_buckets, make_auto_flush_static_metric, register_histogram_vec, register_int_counter_vec, HistogramVec, IntCounterVec, }; use lazy_static::lazy_static; make_auto_flush_static_metric! { pub label_enum RequestKind { send_transaction, get_block, } pub label_enum SendTransactionResult { success, failure, } pub struct RequestCounterVec: LocalIntCounter { "type" => RequestKind, } pub struct RequestResultCounterVec: LocalIntCounter { "type" => RequestKind, "result" => SendTransactionResult, } pub struct RequestTimeHistogramVec: LocalHistogram { "type" => RequestKind, } } lazy_static! { pub static ref API_REQUEST_COUNTER_VEC: IntCounterVec = register_int_counter_vec!("muta_api_request_total", "Total number of request", &[ "type" ]) .expect("request total"); pub static ref API_REQUEST_RESULT_COUNTER_VEC: IntCounterVec = register_int_counter_vec!( "muta_api_request_result_total", "Total number of request result", &["type", "result"] ) .expect("request result total"); pub static ref API_REQUEST_TIME_HISTOGRAM_VEC: HistogramVec = register_histogram_vec!( "muta_api_request_time_cost_seconds", "Request process time cost", &["type"], exponential_buckets(0.001, 2.0, 20).expect("api req time expontial") ) .expect("request time cost"); } lazy_static! { pub static ref API_REQUEST_COUNTER_VEC_STATIC: RequestCounterVec = auto_flush_from!(API_REQUEST_COUNTER_VEC, RequestCounterVec); pub static ref API_REQUEST_RESULT_COUNTER_VEC_STATIC: RequestResultCounterVec = auto_flush_from!(API_REQUEST_RESULT_COUNTER_VEC, RequestResultCounterVec); pub static ref API_REQUEST_TIME_HISTOGRAM_STATIC: RequestTimeHistogramVec = auto_flush_from!(API_REQUEST_TIME_HISTOGRAM_VEC, RequestTimeHistogramVec); } ================================================ FILE: common/apm/src/metrics/consensus.rs ================================================ use crate::metrics::{ auto_flush_from, exponential_buckets, make_auto_flush_static_metric, register_histogram, register_histogram_vec, register_int_counter, register_int_counter_vec, register_int_gauge, Histogram, HistogramVec, IntCounter, IntCounterVec, IntGauge, }; use lazy_static::lazy_static; make_auto_flush_static_metric! { pub label_enum ConsensusResultKind { get_block_from_remote, } pub label_enum ConsensusResult { success, failure, } pub struct ConsensusResultCounterVec: LocalIntCounter { "type" => ConsensusResultKind, "result" => ConsensusResult, } pub label_enum ConsensusTimeKind { commit, exec, block } pub struct ConsensusTimeHistogramVec: LocalHistogram { "type" => ConsensusTimeKind, } pub label_enum ConsensusRoundKind { round } pub struct ConsensusRoundHistogramVec: LocalHistogram { "type" => ConsensusRoundKind, } } lazy_static! { pub static ref CONSENSUS_RESULT_COUNTER_VEC: IntCounterVec = register_int_counter_vec!( "muta_concensus_result", "Total number of consensus result", &["type", "result"] ) .unwrap(); pub static ref CONSENSUS_TIME_HISTOGRAM_VEC: HistogramVec = register_histogram_vec!( "muta_consensus_time_cost_seconds", "Consensus process time cost", &["type"], exponential_buckets(0.05, 1.2, 30).unwrap() ) .unwrap(); } lazy_static! { pub static ref CONSENSUS_RESULT_COUNTER_VEC_STATIC: ConsensusResultCounterVec = auto_flush_from!(CONSENSUS_RESULT_COUNTER_VEC, ConsensusResultCounterVec); pub static ref CONSENSUS_TIME_HISTOGRAM_VEC_STATIC: ConsensusTimeHistogramVec = auto_flush_from!(CONSENSUS_TIME_HISTOGRAM_VEC, ConsensusTimeHistogramVec); pub static ref ENGINE_ROUND_GAUGE: IntGauge = register_int_gauge!("muta_consensus_round", "Round count of consensus").unwrap(); pub static ref ENGINE_HEIGHT_GAUGE: IntGauge = register_int_gauge!("muta_consensus_height", "Height of muta").unwrap(); pub static ref ENGINE_EXECUTING_BLOCK_GAUGE: IntGauge = register_int_gauge!("muta_executing_block_count", "The executing blocks").unwrap(); pub static ref ENGINE_COMMITED_TX_COUNTER: IntCounter = register_int_counter!( "muta_consensus_committed_tx_total", "The committed transactions" ) .unwrap(); pub static ref ENGINE_ORDER_TX_GAUGE: IntGauge = register_int_gauge!("muta_proposal_order_tx_len", "The ordered transactions len").unwrap(); pub static ref ENGINE_SYNC_TX_GAUGE: IntGauge = register_int_gauge!("muta_proposal_sync_tx_len", "The sync transactions len").unwrap(); pub static ref ENGINE_SYNC_BLOCK_COUNTER: IntCounter = register_int_counter!( "muta_consensus_sync_block_total", "The counter for sync blocks from remote" ) .unwrap(); pub static ref ENGINE_SYNC_BLOCK_HISTOGRAM: Histogram = register_histogram!( "muta_consensus_sync_block_duration", "Histogram of consensus sync duration", exponential_buckets(0.5, 1.2, 20).expect("consensus duration time exponential") ) .unwrap(); pub static ref ENGINE_CONSENSUS_COST_TIME: Histogram = register_histogram!( "muta_consensus_duration_seconds", "Histogram of consensus duration from last block", exponential_buckets(1.0, 1.2, 15).expect("consensus duration time exponential") ) .unwrap(); } ================================================ FILE: common/apm/src/metrics/mempool.rs ================================================ use crate::metrics::{ auto_flush_from, exponential_buckets, make_auto_flush_static_metric, register_histogram_vec, register_int_counter_vec, register_int_gauge, HistogramVec, IntCounterVec, IntGauge, }; use lazy_static::lazy_static; make_auto_flush_static_metric! { pub label_enum MempoolKind { insert_tx_from_p2p, package, current_size, } pub label_enum MempoolOpResult { success, failure, } pub struct MempoolCounterVec: LocalIntCounter { "type" => MempoolKind, } pub struct MempoolResultCounterVec: LocalIntCounter { "type" => MempoolKind, "result" => MempoolOpResult, } pub struct MempoolTimeHistogramVec: LocalHistogram { "type" => MempoolKind, } pub struct MempoolPackageSizeVec: LocalHistogram { "type" => MempoolKind, } pub struct MempoolCurrentSizeVec: LocalHistogram { "type" => MempoolKind, } } lazy_static! { pub static ref MEMPOOL_COUNTER_VEC: IntCounterVec = register_int_counter_vec!("muta_mempool_counter", "Counter in mempool", &["type"]) .expect("failed init mempool counter vec"); pub static ref MEMPOOL_RESULT_COUNTER_VEC: IntCounterVec = register_int_counter_vec!( "muta_mempool_result_counter", "Result counter in mempool", &["type", "result"] ) .expect("request result total"); pub static ref MEMPOOL_TIME_HISTOGRAM_VEC: HistogramVec = register_histogram_vec!( "muta_mempool_cost_seconds", "Time cost in mempool", &["type"], exponential_buckets(0.05, 2.0, 10).expect("mempool time expontial") ) .expect("mempool time cost"); pub static ref MEMPOOL_PACKAGE_SIZE_VEC: HistogramVec = register_histogram_vec!( "muta_mempool_package_size_vec", "Package size", &["type"], exponential_buckets(0.05, 2.0, 10).expect("mempool package size exponential") ) .expect("mempool package size"); pub static ref MEMPOOL_CURRENT_SIZE_VEC: HistogramVec = register_histogram_vec!( "muta_mempool_current_size_vec", "Current size", &[], exponential_buckets(0.05, 2.0, 10).expect("mempool current size exponential") ) .expect("mempool current size"); pub static ref MEMPOOL_LEN_GAUGE: IntGauge = register_int_gauge!("muta_mempool_tx_count", "Tx len in mempool").unwrap(); } lazy_static! { pub static ref MEMPOOL_COUNTER_STATIC: MempoolCounterVec = auto_flush_from!(MEMPOOL_COUNTER_VEC, MempoolCounterVec); pub static ref MEMPOOL_RESULT_COUNTER_STATIC: MempoolResultCounterVec = auto_flush_from!(MEMPOOL_RESULT_COUNTER_VEC, MempoolResultCounterVec); pub static ref MEMPOOL_TIME_STATIC: MempoolTimeHistogramVec = auto_flush_from!(MEMPOOL_TIME_HISTOGRAM_VEC, MempoolTimeHistogramVec); pub static ref MEMPOOL_PACKAGE_SIZE_VEC_STATIC: MempoolPackageSizeVec = auto_flush_from!(MEMPOOL_PACKAGE_SIZE_VEC, MempoolPackageSizeVec); pub static ref MEMPOOL_CURRENT_SIZE_VEC_STATIC: MempoolCurrentSizeVec = auto_flush_from!(MEMPOOL_CURRENT_SIZE_VEC, MempoolCurrentSizeVec); } ================================================ FILE: common/apm/src/metrics/network.rs ================================================ use lazy_static::lazy_static; use crate::metrics::{ auto_flush_from, exponential_buckets, linear_buckets, make_auto_flush_static_metric, register_histogram_vec, register_int_counter, register_int_counter_vec, register_int_gauge, register_int_gauge_vec, HistogramVec, IntCounter, IntCounterVec, IntGauge, IntGaugeVec, }; make_auto_flush_static_metric! { pub label_enum MessageDirection { sent, received, } pub label_enum ProtocolKind { rpc, } pub label_enum RPCResult { success, timeout, } pub label_enum MessageTaret { single, multi, all } pub struct MessageCounterVec: LocalIntCounter { "direction" => MessageDirection, } pub struct RPCResultCounterVec: LocalIntCounter { "result" => RPCResult, } pub struct ProtocolTimeHistogramVec: LocalHistogram { "type" => ProtocolKind, } } lazy_static! { pub static ref NETWORK_MESSAGE_COUNT_VEC: IntCounterVec = register_int_counter_vec!( "muta_network_message_total", "Total number of network message", &["direction", "target", "type", "module", "action"] ) .expect("network message total"); pub static ref NETWORK_MESSAGE_SIZE_COUNT_VEC: IntCounterVec = register_int_counter_vec!( "muta_network_message_size", "Accumulated compressed network message size", &["direction", "url"] ) .expect("network message size"); pub static ref NETWORK_RPC_RESULT_COUNT_VEC: IntCounterVec = register_int_counter_vec!( "muta_network_rpc_result_total", "Total number of network rpc result", &["result"] ) .expect("network rpc result total"); pub static ref NETWORK_PROTOCOL_TIME_HISTOGRAM_VEC: HistogramVec = register_histogram_vec!( "muta_network_protocol_time_cost_seconds", "Network protocol time cost", &["type"], exponential_buckets(0.01, 2.0, 20).expect("network protocol time expontial") ) .expect("network protocol time cost"); pub static ref NETWORK_PING_HISTOGRAM_VEC: HistogramVec = register_histogram_vec!( "muta_network_ping_in_ms", "Network peer ping time", &["ip"], linear_buckets(100.0, 200.0, 5).expect("network ping time linear buckets") ) .expect("network ping time"); } lazy_static! { pub static ref NETWORK_RPC_RESULT_COUNT_VEC_STATIC: RPCResultCounterVec = auto_flush_from!(NETWORK_RPC_RESULT_COUNT_VEC, RPCResultCounterVec); pub static ref NETWORK_PROTOCOL_TIME_HISTOGRAM_VEC_STATIC: ProtocolTimeHistogramVec = auto_flush_from!( NETWORK_PROTOCOL_TIME_HISTOGRAM_VEC, ProtocolTimeHistogramVec ); } lazy_static! { pub static ref NETWORK_TOTAL_PENDING_DATA_SIZE: IntGauge = register_int_gauge!( "muta_network_total_pending_data_size", "Total pending data size" ) .expect("network total pending data size"); pub static ref NETWORK_IP_PENDING_DATA_SIZE_VEC: IntGaugeVec = register_int_gauge_vec!( "muta_network_ip_pending_data_size", "IP pending data size", &["ip"] ) .expect("network ip pending data size"); pub static ref NETWORK_RECEIVED_MESSAGE_IN_PROCESSING_GUAGE: IntGauge = register_int_gauge!( "muta_network_received_message_in_processing_guage", "Total number of network received message current in processing" ) .expect("network received message in processing"); pub static ref NETWORK_RECEIVED_IP_MESSAGE_IN_PROCESSING_GUAGE_VEC: IntGaugeVec = register_int_gauge_vec!( "muta_network_received_ip_message_in_processing_guage", "Number of network received messasge from ip current in processing", &["ip"] ) .expect("network received ip message in processing"); pub static ref NETWORK_CONNECTED_PEERS: IntGauge = register_int_gauge!("muta_network_connected_peers", "Total connected peer count") .expect("network total connecteds"); pub static ref NETWORK_IP_DISCONNECTED_COUNT_VEC: IntCounterVec = register_int_counter_vec!( "muta_network_ip_disconnected_count", "Total number of ip disconnected count", &["ip"] ) .expect("network disconnect ip count"); pub static ref NETWORK_OUTBOUND_CONNECTING_PEERS: IntGauge = register_int_gauge!( "muta_network_outbound_connecting_peers", "Total number of network outbound connecting peers" ) .expect("network outbound connecting peer count"); pub static ref NETWORK_UNIDENTIFIED_CONNECTIONS: IntGauge = register_int_gauge!( "muta_network_unidentified_connections", "Total number of network unidentified connections" ) .expect("network unidentified connections"); pub static ref NETWORK_SAVED_PEER_COUNT: IntCounter = register_int_counter!( "muta_network_saved_peer_count", "Total number of saved peer count" ) .expect("network saved peer count"); pub static ref NETWORK_TAGGED_CONSENSUS_PEERS: IntGauge = register_int_gauge!( "muta_network_tagged_consensus_peers", "Total number of consensus peers" ) .expect("network tagged consensus peers"); pub static ref NETWORK_CONNECTED_CONSENSUS_PEERS: IntGauge = register_int_gauge!( "muta_network_connected_consensus_peers", "Total number of connected consensus peers" ) .expect("network connected consenss peers"); } fn on_network_message(direction: &str, target: &str, url: &str, inc: i64) { let spliced: Vec<&str> = url.split('/').collect(); if spliced.len() < 4 { return; } let network_type = spliced[1]; let module = spliced[2]; let action = spliced[3]; NETWORK_MESSAGE_COUNT_VEC .with_label_values(&[direction, target, network_type, module, action]) .inc_by(inc); } pub fn on_network_message_sent_all_target(url: &str) { on_network_message("sent", "all", url, 1) } pub fn on_network_message_sent_multi_target(url: &str, target_count: i64) { on_network_message("sent", "single", url, target_count); } pub fn on_network_message_sent(url: &str) { on_network_message("sent", "single", url, 1); } pub fn on_network_message_received(url: &str) { on_network_message("received", "single", url, 1); } ================================================ FILE: common/apm/src/metrics/storage.rs ================================================ use std::time::Duration; use lazy_static::lazy_static; use protocol::traits::StorageCategory; use crate::metrics::{ auto_flush_from, duration_to_sec, make_auto_flush_static_metric, register_counter_vec, register_int_counter_vec, CounterVec, IntCounterVec, }; make_auto_flush_static_metric! { pub label_enum COLUMN_FAMILY_TYPES { block, block_header, receipt, signed_tx, wal, hash_height, state, } pub struct StoragePutCfTimeUsageVec: LocalCounter { "cf" => COLUMN_FAMILY_TYPES } pub struct StoragePutCfBytesVec: LocalIntCounter { "cf" => COLUMN_FAMILY_TYPES } pub struct StorageGetCfTimeUsageVec: LocalCounter { "cf" => COLUMN_FAMILY_TYPES } pub struct StorageGetCfTotalVec: LocalIntCounter { "cf" => COLUMN_FAMILY_TYPES } } lazy_static! { pub static ref STORAGE_PUT_CF_TIME_USAGE_VEC: CounterVec = register_counter_vec!( "muta_storage_put_cf_seconds", "Storage put_cf time usage", &["cf"] ) .unwrap(); pub static ref STORAGE_PUT_CF_BYTES_COUNTER_VEC: IntCounterVec = register_int_counter_vec!( "muta_storage_put_cf_bytes", "Storage total insert bytes", &["cf"] ) .unwrap(); pub static ref STORAGE_GET_CF_TIME_USAGE_VEC: CounterVec = register_counter_vec!( "muta_storage_get_cf_seconds", "Storage get_cf time usage", &["cf"] ) .unwrap(); pub static ref STORAGE_GET_CF_COUNTER_VEC: IntCounterVec = register_int_counter_vec!( "muta_storage_get_cf_total", "Storage total get_cf keys number", &["cf"] ) .unwrap(); } lazy_static! { pub static ref STORAGE_PUT_CF_TIME_USAGE: StoragePutCfTimeUsageVec = auto_flush_from!(STORAGE_PUT_CF_TIME_USAGE_VEC, StoragePutCfTimeUsageVec); pub static ref STORAGE_PUT_CF_BYTES_COUNTER: StoragePutCfBytesVec = auto_flush_from!(STORAGE_PUT_CF_BYTES_COUNTER_VEC, StoragePutCfBytesVec); pub static ref STORAGE_GET_CF_TIME_USAGE: StorageGetCfTimeUsageVec = auto_flush_from!(STORAGE_GET_CF_TIME_USAGE_VEC, StorageGetCfTimeUsageVec); pub static ref STORAGE_GET_CF_COUNTER: StorageGetCfTotalVec = auto_flush_from!(STORAGE_GET_CF_COUNTER_VEC, StorageGetCfTotalVec); } pub fn on_storage_get_state(duration: Duration, keys: i64) { let seconds = duration_to_sec(duration); STORAGE_GET_CF_TIME_USAGE.state.inc_by(seconds); STORAGE_GET_CF_COUNTER.state.inc_by(keys); } pub fn on_storage_put_state(duration: Duration, size: i64) { let seconds = duration_to_sec(duration); STORAGE_PUT_CF_TIME_USAGE.state.inc_by(seconds); STORAGE_PUT_CF_BYTES_COUNTER.state.inc_by(size); } pub fn on_storage_get_cf(sc: StorageCategory, duration: Duration, keys: i64) { let seconds = duration_to_sec(duration); match sc { StorageCategory::Block => { STORAGE_GET_CF_TIME_USAGE.block.inc_by(seconds); STORAGE_GET_CF_COUNTER.block.inc_by(keys); } StorageCategory::BlockHeader => { STORAGE_GET_CF_TIME_USAGE.block_header.inc_by(seconds); STORAGE_GET_CF_COUNTER.block_header.inc_by(keys); } StorageCategory::Receipt => { STORAGE_GET_CF_TIME_USAGE.receipt.inc_by(seconds); STORAGE_GET_CF_COUNTER.receipt.inc_by(keys); } StorageCategory::Wal => { STORAGE_GET_CF_TIME_USAGE.wal.inc_by(seconds); STORAGE_GET_CF_COUNTER.wal.inc_by(keys); } StorageCategory::SignedTransaction => { STORAGE_GET_CF_TIME_USAGE.signed_tx.inc_by(seconds); STORAGE_GET_CF_COUNTER.signed_tx.inc_by(keys); } StorageCategory::HashHeight => { STORAGE_GET_CF_TIME_USAGE.hash_height.inc_by(seconds); STORAGE_GET_CF_COUNTER.hash_height.inc_by(keys); } } } pub fn on_storage_put_cf(sc: StorageCategory, duration: Duration, size: i64) { let seconds = duration_to_sec(duration); match sc { StorageCategory::Block => { STORAGE_PUT_CF_TIME_USAGE.block.inc_by(seconds); STORAGE_PUT_CF_BYTES_COUNTER.block.inc_by(size); } StorageCategory::BlockHeader => { STORAGE_PUT_CF_TIME_USAGE.block_header.inc_by(seconds); STORAGE_PUT_CF_BYTES_COUNTER.block_header.inc_by(size); } StorageCategory::Receipt => { STORAGE_PUT_CF_TIME_USAGE.receipt.inc_by(seconds); STORAGE_PUT_CF_BYTES_COUNTER.receipt.inc_by(size); } StorageCategory::Wal => { STORAGE_PUT_CF_TIME_USAGE.wal.inc_by(seconds); STORAGE_PUT_CF_BYTES_COUNTER.wal.inc_by(size); } StorageCategory::SignedTransaction => { STORAGE_PUT_CF_TIME_USAGE.signed_tx.inc_by(seconds); STORAGE_PUT_CF_BYTES_COUNTER.signed_tx.inc_by(size); } StorageCategory::HashHeight => { STORAGE_PUT_CF_TIME_USAGE.hash_height.inc_by(seconds); STORAGE_PUT_CF_BYTES_COUNTER.hash_height.inc_by(size); } } } ================================================ FILE: common/apm/src/metrics.rs ================================================ pub mod api; pub mod consensus; pub mod mempool; pub mod network; pub mod storage; pub use prometheus::{ CounterVec, Histogram, HistogramVec, IntCounter, IntCounterVec, IntGauge, IntGaugeVec, }; use derive_more::Display; use prometheus::{ exponential_buckets, linear_buckets, register_counter_vec, register_histogram, register_histogram_vec, register_int_counter, register_int_counter_vec, register_int_gauge, register_int_gauge_vec, Encoder, TextEncoder, }; use prometheus_static_metric::{auto_flush_from, make_auto_flush_static_metric}; use protocol::{ProtocolError, ProtocolErrorKind, ProtocolResult}; use std::time::Duration; #[derive(Debug, Display)] enum Error { #[display(fmt = "promtheus {}", _0)] Prometheus(prometheus::Error), } impl From for Error { fn from(err: prometheus::Error) -> Error { Error::Prometheus(err) } } impl From for ProtocolError { fn from(err: Error) -> ProtocolError { ProtocolError::new(ProtocolErrorKind::Metric, Box::new(err)) } } impl std::error::Error for Error {} pub fn duration_to_sec(d: Duration) -> f64 { d.as_secs_f64() } pub fn all_metrics() -> ProtocolResult> { let metric_families = prometheus::gather(); let encoder = TextEncoder::new(); let mut encoded_metrics = vec![]; encoder .encode(&metric_families, &mut encoded_metrics) .map_err(Error::Prometheus)?; Ok(encoded_metrics) } #[cfg(test)] mod tests { use super::duration_to_sec; use std::time::Duration; #[test] fn test_duration_to_sec() { let d = Duration::from_millis(1110); let sec = duration_to_sec(d); assert_eq!(sec, 1.11 as f64); } } ================================================ FILE: common/channel/Cargo.toml ================================================ [package] name = "common-channel" version = "0.2.1" authors = ["Muta Dev "] edition = "2018" repository = "https://github.com/nervosnetwork/muta" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] ================================================ FILE: common/channel/src/lib.rs ================================================ #[cfg(test)] mod tests { #[test] fn it_works() { assert_eq!(2 + 2, 4); } } ================================================ FILE: common/config-parser/Cargo.toml ================================================ [package] name = "common-config-parser" version = "0.2.1" authors = ["Muta Dev "] edition = "2018" repository = "https://github.com/nervosnetwork/muta" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] reqwest = "0.9" serde = "1.0" serde_derive = "1.0" stringreader = "0.1" toml = "0.4" core-consensus = { path = "../../core/consensus" } core-mempool = { path = "../../core/mempool" } protocol = { path = "../../protocol", package = "muta-protocol" } ================================================ FILE: common/config-parser/src/lib.rs ================================================ pub mod types; use serde::de; use std::error; use std::fmt; use std::fs; use std::io; use std::path::Path; /// Parse a config from reader. pub fn parse_reader(r: &mut R) -> Result { let mut buf = Vec::new(); r.read_to_end(&mut buf)?; Ok(toml::from_slice(&buf)?) } /// Parse a config from file. /// /// Note: In most cases, function `parse` is better. pub fn parse_file(name: impl AsRef) -> Result { let mut f = fs::File::open(name)?; parse_reader(&mut f) } // FIXME: http is inscure, support https only /// Parse a config from method of HTTP GET. /// /// Note: In most cases, function `parse` is better. pub fn parse_http(name: &str) -> Result { let mut r = reqwest::get(name)?; parse_reader(&mut r) } /// If name is starts with "http", parse it by function `parse_http`, else /// `parse_file` in use. pub fn parse(name: &str) -> Result { if name.starts_with("http") { parse_http(name) } else { parse_file(name) } } #[derive(Debug)] pub enum ParseError { IO(io::Error), Deserialize(toml::de::Error), Reqwest(reqwest::Error), } impl error::Error for ParseError {} impl fmt::Display for ParseError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { ParseError::IO(e) => return write!(f, "{}", e), ParseError::Deserialize(e) => return write!(f, "{}", e), ParseError::Reqwest(e) => return write!(f, "{}", e), } } } impl From for ParseError { fn from(error: io::Error) -> ParseError { ParseError::IO(error) } } impl From for ParseError { fn from(error: toml::de::Error) -> ParseError { ParseError::Deserialize(error) } } impl From for ParseError { fn from(error: reqwest::Error) -> ParseError { ParseError::Reqwest(error) } } #[cfg(test)] mod tests { use super::{parse, parse_file, parse_http, parse_reader}; use serde_derive::Deserialize; use stringreader::StringReader; #[derive(Debug, Deserialize)] struct Config { global_string: Option, global_int: Option, } #[test] fn test_parse_reader() { let toml_str = r#" global_string = "Best Food" global_int = 42 "#; let mut toml_r = StringReader::new(toml_str); let config: Config = parse_reader(&mut toml_r).unwrap(); assert_eq!(config.global_string, Some(String::from("Best Food"))); assert_eq!(config.global_int, Some(42)); } #[ignore] #[test] fn test_parse_file() { let config: Config = parse_file("/tmp/config.toml").unwrap(); assert_eq!(config.global_string, Some(String::from("Best Food"))); assert_eq!(config.global_int, Some(42)); } #[ignore] #[test] fn test_parse_http() { let config: Config = parse_http("http://127.0.0.1:8080/config.toml").unwrap(); assert_eq!(config.global_string, Some(String::from("Best Food"))); assert_eq!(config.global_int, Some(42)); } #[ignore] #[test] fn test_parse() { let config: Config = parse("http://127.0.0.1:8080/config.toml").unwrap(); assert_eq!(config.global_string, Some(String::from("Best Food"))); assert_eq!(config.global_int, Some(42)); let config: Config = parse("/tmp/config.toml").unwrap(); assert_eq!(config.global_string, Some(String::from("Best Food"))); assert_eq!(config.global_int, Some(42)); } } ================================================ FILE: common/config-parser/src/types.rs ================================================ use std::collections::HashMap; use std::net::SocketAddr; use std::path::PathBuf; use serde_derive::Deserialize; use core_consensus::{DEFAULT_OVERLORD_GAP, DEFAULT_SYNC_TXS_CHUNK_SIZE}; use core_mempool::{DEFAULT_BROADCAST_TXS_INTERVAL, DEFAULT_BROADCAST_TXS_SIZE}; use protocol::types::Hex; #[derive(Debug, Deserialize)] pub struct ConfigGraphQL { pub listening_address: SocketAddr, pub graphql_uri: String, pub graphiql_uri: String, #[serde(default)] pub workers: usize, #[serde(default)] pub maxconn: usize, #[serde(default)] pub max_payload_size: usize, pub tls: Option, pub enable_dump_profile: Option, } #[derive(Debug, Deserialize)] pub struct ConfigGraphQLTLS { pub private_key_file_path: PathBuf, pub certificate_chain_file_path: PathBuf, } #[derive(Debug, Deserialize)] pub struct ConfigNetwork { pub bootstraps: Option>, pub allowlist: Option>, pub allowlist_only: Option, pub trust_interval_duration: Option, pub trust_max_history_duration: Option, pub fatal_ban_duration: Option, pub soft_ban_duration: Option, pub max_connected_peers: Option, pub same_ip_conn_limit: Option, pub inbound_conn_limit: Option, pub listening_address: SocketAddr, pub rpc_timeout: Option, pub selfcheck_interval: Option, pub send_buffer_size: Option, pub write_timeout: Option, pub recv_buffer_size: Option, pub max_frame_length: Option, pub max_wait_streams: Option, pub ping_interval: Option, } #[derive(Debug, Deserialize)] pub struct ConfigNetworkBootstrap { pub peer_id: String, pub address: String, } fn default_overlord_gap() -> usize { DEFAULT_OVERLORD_GAP } fn default_sync_txs_chunk_size() -> usize { DEFAULT_SYNC_TXS_CHUNK_SIZE } #[derive(Debug, Deserialize)] pub struct ConfigConsensus { #[serde(default = "default_overlord_gap")] pub overlord_gap: usize, #[serde(default = "default_sync_txs_chunk_size")] pub sync_txs_chunk_size: usize, } fn default_broadcast_txs_size() -> usize { DEFAULT_BROADCAST_TXS_SIZE } fn default_broadcast_txs_interval() -> u64 { DEFAULT_BROADCAST_TXS_INTERVAL } #[derive(Debug, Deserialize)] pub struct ConfigMempool { pub pool_size: u64, #[serde(default = "default_broadcast_txs_size")] pub broadcast_txs_size: usize, #[serde(default = "default_broadcast_txs_interval")] pub broadcast_txs_interval: u64, } #[derive(Debug, Deserialize)] pub struct ConfigExecutor { pub light: bool, pub triedb_cache_size: usize, } #[derive(Debug, Deserialize)] pub struct ConfigRocksDB { pub max_open_files: i32, } impl Default for ConfigRocksDB { fn default() -> Self { Self { max_open_files: 64 } } } #[derive(Debug, Deserialize)] pub struct ConfigLogger { pub filter: String, pub log_to_console: bool, pub console_show_file_and_line: bool, pub log_to_file: bool, pub metrics: bool, pub log_path: PathBuf, pub file_size_limit: u64, #[serde(default)] pub modules_level: HashMap, } impl Default for ConfigLogger { fn default() -> Self { Self { filter: "info".into(), log_to_console: true, console_show_file_and_line: false, log_to_file: true, metrics: true, log_path: "logs/".into(), file_size_limit: 1024 * 1024 * 1024, // GiB modules_level: HashMap::new(), } } } #[derive(Debug, Deserialize)] pub struct ConfigAPM { pub service_name: String, pub tracing_address: SocketAddr, pub tracing_batch_size: Option, } #[derive(Debug, Deserialize)] pub struct Config { // crypto pub privkey: Hex, // db config pub data_path: PathBuf, pub graphql: ConfigGraphQL, pub network: ConfigNetwork, pub mempool: ConfigMempool, pub executor: ConfigExecutor, pub consensus: ConfigConsensus, #[serde(default)] pub logger: ConfigLogger, #[serde(default)] pub rocksdb: ConfigRocksDB, pub apm: Option, } impl Config { pub fn data_path_for_state(&self) -> PathBuf { let mut path_state = self.data_path.clone(); path_state.push("rocksdb"); path_state.push("state_data"); path_state } pub fn data_path_for_block(&self) -> PathBuf { let mut path_state = self.data_path.clone(); path_state.push("rocksdb"); path_state.push("block_data"); path_state } pub fn data_path_for_txs_wal(&self) -> PathBuf { let mut path_state = self.data_path.clone(); path_state.push("txs_wal"); path_state } pub fn data_path_for_consensus_wal(&self) -> PathBuf { let mut path_state = self.data_path.clone(); path_state.push("consensus_wal"); path_state } } ================================================ FILE: common/crypto/Cargo.toml ================================================ [package] name = "common-crypto" version = "0.2.1" authors = ["Muta Dev "] edition = "2018" repository = "https://github.com/nervosnetwork/muta" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] ophelia-bls-amcl = "0.3" ophelia-secp256k1 = "0.3" ophelia = "0.3" [dev-dependencies] overlord = "0.2.0-alpha.11" protocol = { path = "../../protocol", package = "muta-protocol"} rand = "0.7" rlp = "0.4" ================================================ FILE: common/crypto/src/lib.rs ================================================ #![feature(test)] pub use ophelia::HashValue; pub use ophelia::{ BlsSignatureVerify, Crypto, Error, PrivateKey, PublicKey, Signature, ToBlsPublicKey, ToPublicKey, UncompressedPublicKey, }; pub use ophelia_bls_amcl::{BlsCommonReference, BlsPrivateKey, BlsPublicKey, BlsSignature}; pub use ophelia_secp256k1::{ Secp256k1, Secp256k1PrivateKey, Secp256k1PublicKey, Secp256k1Signature, }; #[rustfmt::skip] /// Bench in Intel(R) Core(TM) i7-4770HQ CPU @ 2.20GHz (8 x 2200) /// test benches::bench_4_aggregated_sig ... bench: 20,325 ns/iter (+/- 1,251) /// test benches::bench_8_aggregated_sig ... bench: 40,178 ns/iter (+/- 4,191) /// test benches::bench_16_aggregated_sig ... bench: 78,256 ns/iter (+/- 5,680) /// test benches::bench_32_aggregated_sig ... bench: 156,514 ns/iter (+/- 14,312) /// test benches::bench_64_aggregated_sig ... bench: 313,124 ns/iter (+/- 16,774) /// test benches::bench_4_aggregated_sig_verify ... bench: 4,451,726 ns/iter (+/- 341,019) /// test benches::bench_8_aggregated_sig_verify ... bench: 4,347,873 ns/iter (+/- 247,429) /// test benches::bench_16_aggregated_sig_verify ... bench: 5,034,893 ns/iter (+/- 1,552,969) /// test benches::bench_32_aggregated_sig_verify ... bench: 4,439,291 ns/iter (+/- 452,905) /// test benches::bench_64_aggregated_sig_verify ... bench: 4,404,453 ns/iter (+/- 224,377) #[cfg(test)] mod benches { extern crate test; use std::convert::TryFrom; use overlord::types::{Vote, VoteType}; use rand::distributions::Alphanumeric; use rand::{random, Rng, RngCore}; use test::Bencher; use protocol::types::Hash; use protocol::{Bytes, BytesMut}; use super::*; fn gen_common_ref() -> String { rand::thread_rng() .sample_iter(&Alphanumeric) .take(10) .collect::() } fn mock_block_hash() -> Hash { let temp = (0..10).map(|_| random::()).collect::>(); Hash::digest(Bytes::from(temp)) } fn mock_vote() -> Vote { Vote { height: 0u64, round: 0u64, vote_type: VoteType::Prevote, block_hash: mock_block_hash().as_bytes(), } } fn gen_key_pair_sigs( size: usize, keypairs: &mut Vec<(BlsPrivateKey, BlsPublicKey)>, sigs: &mut Vec, hash: &HashValue, common_ref: &BlsCommonReference, ) { for _i in 0..size { let seckey = { let mut seed = [0u8; 32]; rand::rngs::OsRng.fill_bytes(&mut seed); Hash::digest(BytesMut::from(seed.as_ref()).freeze()).as_bytes() }; let bls_priv_key = BlsPrivateKey::try_from([&[0u8; 16], seckey.as_ref()].concat().as_ref()).unwrap(); let bls_pub_key = bls_priv_key.pub_key(common_ref); let sig = bls_priv_key.sign_message(&hash); keypairs.push((bls_priv_key, bls_pub_key)); sigs.push(sig); } } #[bench] fn bench_4_aggregated_sig(b: &mut Bencher) { let common_ref: BlsCommonReference = gen_common_ref().as_str().into(); let vote_msg = HashValue::try_from( Hash::digest(Bytes::from(rlp::encode(&mock_vote()))) .as_bytes() .as_ref(), ) .unwrap(); let mut priv_pub_keys = Vec::new(); let mut signatures = Vec::new(); gen_key_pair_sigs( 4, &mut priv_pub_keys, &mut signatures, &vote_msg, &common_ref, ); let sigs_pubkeys = signatures .iter() .zip(priv_pub_keys.iter()) .map(|(sig, key_pair)| (sig.clone(), key_pair.1.clone())) .collect::>(); b.iter(move || { let _ = BlsSignature::combine(sigs_pubkeys.clone()); }) } #[bench] fn bench_8_aggregated_sig(b: &mut Bencher) { let common_ref: BlsCommonReference = gen_common_ref().as_str().into(); let vote_msg = HashValue::try_from( Hash::digest(Bytes::from(rlp::encode(&mock_vote()))) .as_bytes() .as_ref(), ) .unwrap(); let mut priv_pub_keys = Vec::new(); let mut signatures = Vec::new(); gen_key_pair_sigs( 8, &mut priv_pub_keys, &mut signatures, &vote_msg, &common_ref, ); let sigs_pubkeys = signatures .iter() .zip(priv_pub_keys.iter()) .map(|(sig, key_pair)| (sig.clone(), key_pair.1.clone())) .collect::>(); b.iter(move || { let _ = BlsSignature::combine(sigs_pubkeys.clone()); }) } #[bench] fn bench_16_aggregated_sig(b: &mut Bencher) { let common_ref: BlsCommonReference = gen_common_ref().as_str().into(); let vote_msg = HashValue::try_from( Hash::digest(Bytes::from(rlp::encode(&mock_vote()))) .as_bytes() .as_ref(), ) .unwrap(); let mut priv_pub_keys = Vec::new(); let mut signatures = Vec::new(); gen_key_pair_sigs( 16, &mut priv_pub_keys, &mut signatures, &vote_msg, &common_ref, ); let sigs_pubkeys = signatures .iter() .zip(priv_pub_keys.iter()) .map(|(sig, key_pair)| (sig.clone(), key_pair.1.clone())) .collect::>(); b.iter(move || { let _ = BlsSignature::combine(sigs_pubkeys.clone()); }) } #[bench] fn bench_32_aggregated_sig(b: &mut Bencher) { let common_ref: BlsCommonReference = gen_common_ref().as_str().into(); let vote_msg = HashValue::try_from( Hash::digest(Bytes::from(rlp::encode(&mock_vote()))) .as_bytes() .as_ref(), ) .unwrap(); let mut priv_pub_keys = Vec::new(); let mut signatures = Vec::new(); gen_key_pair_sigs( 32, &mut priv_pub_keys, &mut signatures, &vote_msg, &common_ref, ); let sigs_pubkeys = signatures .iter() .zip(priv_pub_keys.iter()) .map(|(sig, key_pair)| (sig.clone(), key_pair.1.clone())) .collect::>(); b.iter(move || { let _ = BlsSignature::combine(sigs_pubkeys.clone()); }) } #[bench] fn bench_64_aggregated_sig(b: &mut Bencher) { let common_ref: BlsCommonReference = gen_common_ref().as_str().into(); let vote_msg = HashValue::try_from( Hash::digest(Bytes::from(rlp::encode(&mock_vote()))) .as_bytes() .as_ref(), ) .unwrap(); let mut priv_pub_keys = Vec::new(); let mut signatures = Vec::new(); gen_key_pair_sigs( 64, &mut priv_pub_keys, &mut signatures, &vote_msg, &common_ref, ); let sigs_pubkeys = signatures .iter() .zip(priv_pub_keys.iter()) .map(|(sig, key_pair)| (sig.clone(), key_pair.1.clone())) .collect::>(); b.iter(move || { let _ = BlsSignature::combine(sigs_pubkeys.clone()); }) } #[bench] fn bench_4_aggregated_sig_verify(b: &mut Bencher) { let common_ref: BlsCommonReference = gen_common_ref().as_str().into(); let vote_msg = HashValue::try_from( Hash::digest(Bytes::from(rlp::encode(&mock_vote()))) .as_bytes() .as_ref(), ) .unwrap(); let mut priv_pub_keys = Vec::new(); let mut signatures = Vec::new(); gen_key_pair_sigs( 4, &mut priv_pub_keys, &mut signatures, &vote_msg, &common_ref, ); let sigs_pubkeys = signatures .iter() .zip(priv_pub_keys.iter()) .map(|(sig, key_pair)| (sig.clone(), key_pair.1.clone())) .collect::>(); let aggragated_sig = BlsSignature::combine(sigs_pubkeys); let aggregated_key = BlsPublicKey::aggregate( priv_pub_keys .iter() .map(|key_pair| key_pair.1.clone()) .collect::>(), ); b.iter(move || { aggragated_sig .clone() .verify(&vote_msg, &aggregated_key, &common_ref) .unwrap(); }) } #[bench] fn bench_8_aggregated_sig_verify(b: &mut Bencher) { let common_ref: BlsCommonReference = gen_common_ref().as_str().into(); let vote_msg = HashValue::try_from( Hash::digest(Bytes::from(rlp::encode(&mock_vote()))) .as_bytes() .as_ref(), ) .unwrap(); let mut priv_pub_keys = Vec::new(); let mut signatures = Vec::new(); gen_key_pair_sigs( 8, &mut priv_pub_keys, &mut signatures, &vote_msg, &common_ref, ); let sigs_pubkeys = signatures .iter() .zip(priv_pub_keys.iter()) .map(|(sig, key_pair)| (sig.clone(), key_pair.1.clone())) .collect::>(); let aggragated_sig = BlsSignature::combine(sigs_pubkeys); let aggregated_key = BlsPublicKey::aggregate( priv_pub_keys .iter() .map(|key_pair| key_pair.1.clone()) .collect::>(), ); b.iter(move || { aggragated_sig .clone() .verify(&vote_msg, &aggregated_key, &common_ref) .unwrap(); }) } #[bench] fn bench_16_aggregated_sig_verify(b: &mut Bencher) { let common_ref: BlsCommonReference = gen_common_ref().as_str().into(); let vote_msg = HashValue::try_from( Hash::digest(Bytes::from(rlp::encode(&mock_vote()))) .as_bytes() .as_ref(), ) .unwrap(); let mut priv_pub_keys = Vec::new(); let mut signatures = Vec::new(); gen_key_pair_sigs( 16, &mut priv_pub_keys, &mut signatures, &vote_msg, &common_ref, ); let sigs_pubkeys = signatures .iter() .zip(priv_pub_keys.iter()) .map(|(sig, key_pair)| (sig.clone(), key_pair.1.clone())) .collect::>(); let aggragated_sig = BlsSignature::combine(sigs_pubkeys); let aggregated_key = BlsPublicKey::aggregate( priv_pub_keys .iter() .map(|key_pair| key_pair.1.clone()) .collect::>(), ); b.iter(move || { aggragated_sig .clone() .verify(&vote_msg, &aggregated_key, &common_ref) .unwrap(); }) } #[bench] fn bench_32_aggregated_sig_verify(b: &mut Bencher) { let common_ref: BlsCommonReference = gen_common_ref().as_str().into(); let vote_msg = HashValue::try_from( Hash::digest(Bytes::from(rlp::encode(&mock_vote()))) .as_bytes() .as_ref(), ) .unwrap(); let mut priv_pub_keys = Vec::new(); let mut signatures = Vec::new(); gen_key_pair_sigs( 32, &mut priv_pub_keys, &mut signatures, &vote_msg, &common_ref, ); let sigs_pubkeys = signatures .iter() .zip(priv_pub_keys.iter()) .map(|(sig, key_pair)| (sig.clone(), key_pair.1.clone())) .collect::>(); let aggragated_sig = BlsSignature::combine(sigs_pubkeys); let aggregated_key = BlsPublicKey::aggregate( priv_pub_keys .iter() .map(|key_pair| key_pair.1.clone()) .collect::>(), ); b.iter(move || { aggragated_sig .clone() .verify(&vote_msg, &aggregated_key, &common_ref) .unwrap(); }) } #[bench] fn bench_64_aggregated_sig_verify(b: &mut Bencher) { let common_ref: BlsCommonReference = gen_common_ref().as_str().into(); let vote_msg = HashValue::try_from( Hash::digest(Bytes::from(rlp::encode(&mock_vote()))) .as_bytes() .as_ref(), ) .unwrap(); let mut priv_pub_keys = Vec::new(); let mut signatures = Vec::new(); gen_key_pair_sigs( 64, &mut priv_pub_keys, &mut signatures, &vote_msg, &common_ref, ); let sigs_pubkeys = signatures .iter() .zip(priv_pub_keys.iter()) .map(|(sig, key_pair)| (sig.clone(), key_pair.1.clone())) .collect::>(); let aggragated_sig = BlsSignature::combine(sigs_pubkeys); let aggregated_key = BlsPublicKey::aggregate( priv_pub_keys .iter() .map(|key_pair| key_pair.1.clone()) .collect::>(), ); b.iter(move || { aggragated_sig .clone() .verify(&vote_msg, &aggregated_key, &common_ref) .unwrap(); }) } } ================================================ FILE: common/logger/Cargo.toml ================================================ [package] name = "common-logger" version = "0.2.1" authors = ["Muta Dev "] edition = "2018" repository = "https://github.com/nervosnetwork/muta" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] env_logger = "0.7" log = "0.4" # Turn off gzip feature, it hurts performance. For more information, reference # log4rs document. log4rs = { version = "0.13", features = ["all_components", "file", "yaml_format"] } json = "0.12" creep = "0.2" rustracing_jaeger = "0.5" serde = "1.0" serde_derive = "1.0" chrono = "0.4" ================================================ FILE: common/logger/README.md ================================================ # Logger Module Instruction ## Logger Config The logger config in `config.toml` is listed below with default values. ```toml [logger] filter = "info" log_to_console = true console_show_file_and_line = false log_path = "logs/" log_to_file = true metrics = true ``` `filter` is the root logger filter, must be one of `off`, `trace`, `debug`, `info`, `warn` and `error`. If `log_to_console` is `true`, logs like below will be logged to console. ``` [2019-12-02T10:02:45.779337+08:00 INFO overlord::state::process] Overlord: state receive commit event height 11220, round 0 ``` If `console_show_file_and_line` is `true`, log file and line number will also be logged to console, pretty useful for debugging. ``` [2019-12-02T10:05:28.343228+08:00 INFO core_network::peer_manager core/network/src/peer_manager/mod.rs:1035] network: PeerId(QmYSZUy3G5Mf5GSTKfH7LXJeFJrVW59rX1qPPfapuH7AUw): connected peer_ip(s): [] ``` If `log_to_file` is true, logs like below will be logged to `{log_path}/muta.log`. It is json format, good for machine understanding. ``` {"time":"2019-12-01T22:01:57.839042+08:00","message":"network: PeerId(QmYSZUy3G5Mf5GSTKfH7LXJeFJrVW59rX1qPPfapuH7AUw): connect addrs [\"/ip4/0.0.0.0/tcp/1888\"]","module_path":"core_network::peer_manager","file":"core/network/src/peer_manager/mod.rs","line":591,"level":"INFO","target":"core_network::peer_manager","thread":"tokio-runtime-worker-0","thread_id":123145432756224,"mdc":{}} ``` This crate uses `log4rs` to init the logger, but you don't need to add dependency for that. After invoking the `init` function in this crate, you can use `log` crate to log. ## Metrics Metrics is an independent logger, it `metrics` is `true`, the metrics will be logged to `{log_path}/metrics.log`. ``` {"time":"2019-12-01T22:02:49.035084+08:00","message":"{\"height\":7943,\"name\":\"save_block\",\"ordered_tx_num\":0}","module_path":"common_logger","file":"common/logger/src/lib.rs","line":83,"level":"TRACE","target":"metrics","thread":"tokio-runtime-worker-3","thread_id":123145445486592,"mdc":{}} ``` If you want to use log metrics in a module, you need to add this crate as dependency and use the code below to add a metric. The `name` field is reserved, please avoid using this as a key in your metrics. ```rust common_logger::metrics("save_block", common_logger::object! { "height" => block.header.height, "ordered_tx_num" => block.ordered_tx_hashes.len(), }); ``` This signature of the function is showed below. The `JsonValue` is a `enum` from [`json crate`](https://docs.rs/json/0.12.0/json/enum.JsonValue.html). ```rust pub fn metrics(name: &str, mut content: JsonValue) ``` ## Structured Event Log With TraceId Included Structured event log api provide a convenient way to log structured json data. It's signature is provided as below: ```rust pub fn log(level: Level, module: &str, event: &str, ctx: &Context, mut msg: JsonValue) ``` `module` should be your component name, `event` is just event name, better begin with 4 chars with 4 digits to identify this event. `Context` is used to extract trace id. `msg` is `JsonValue` which is same as `metrics`. Useage example: ```rust common_logger::log(Level::Info, "network", "netw0001", &ctx, common_logger::json!({"music", "beautiful world"; "movie", "fury"})); ``` ## Yaml File The `log.yml` in this crate is the yaml style config of log4rs with default logger config. If you need more customized configurations, you can copy the file to some config path, edit the file, and replace the `init` function with `log4rs::init_file("/path/to/log.yml", Default::default()).unwrap();`. ================================================ FILE: common/logger/log.yml ================================================ # This file is yaml style config, can make testing the logger more easily. # When you need to do some test, Add the code below to the `init` function. # log4rs::init_file("common/logger/log.yml", Default::default()).unwrap(); # reference: appenders: console: kind: console encoder: # this pattern below contains file name and line, usefule for debugging # pattern: "[{d} {h({l})} {t} {f}:{L}] {m}{n}" pattern: "[{d} {h({l})} {t}] {m}{n}" file: kind: file path: logs/muta.log encoder: kind: json metrics: kind: file path: logs/metrics.log encoder: kind: json root: level: info appenders: - console - file loggers: metrics: level: trace appenders: - metrics additive: false ================================================ FILE: common/logger/src/date_fixed_roller.rs ================================================ use std::error::Error; use std::fs; use std::path::Path; use chrono::prelude::Utc; use log4rs::append::rolling_file::policy::compound::roll::Roll; use log4rs::file::{Deserialize, Deserializers}; #[derive(serde_derive::Deserialize, Clone)] #[serde(deny_unknown_fields)] pub struct DateFixedWindowRollerConfig { pattern: String, } pub struct DateFixedWindowRollerBuilder; impl DateFixedWindowRollerBuilder { pub fn build( self, pattern: &str, ) -> Result> { if !pattern.contains("{date}") || !pattern.contains("{timestamp}") { return Err("pattern doesn't contain `{date}` or `{timestamp}`".into()); } let roller = DateFixedWindowRoller { pattern: pattern.into(), }; Ok(roller) } } /// The pattern takes two interpolation arguments, {date} and {timestamp}. /// {date} and {timestamp} will be replaced with actual date and timestamp /// value. /// /// For example: /// For pattern `log/{date}.muta.{timestamp}.log`, it will generate /// `log/2020-08-27.muta.83748392743.log`. #[derive(Debug)] pub struct DateFixedWindowRoller { pattern: String, } impl DateFixedWindowRoller { pub fn builder() -> DateFixedWindowRollerBuilder { DateFixedWindowRollerBuilder } fn roll_file( &self, cur_log: &Path, date: &str, timestamp: &str, ) -> Result<(), Box> { let archived_log = { let pattern = self.pattern.clone(); let partial_log = pattern.replace("{date}", date); partial_log.replace("{timestamp}", ×tamp) }; if let Some(parent) = Path::new(&archived_log).parent() { fs::create_dir_all(parent)?; } match fs::rename(cur_log, &archived_log) { Ok(()) => return Ok(()), Err(ref e) if e.kind() == std::io::ErrorKind::NotFound => return Ok(()), Err(_) => {} } // fall back to a copy fs::copy(cur_log, &archived_log).and_then(|_| fs::remove_file(cur_log))?; Ok(()) } } impl Roll for DateFixedWindowRoller { fn roll(&self, cur_log: &Path) -> Result<(), Box> { let now = Utc::now(); self.roll_file( cur_log, &now.format("%Y-%m-%d").to_string(), &now.timestamp().to_string(), ) } } pub struct DateFixedWindowRollerDeserializer; impl Deserialize for DateFixedWindowRollerDeserializer { type Config = DateFixedWindowRollerConfig; type Trait = dyn Roll; fn deserialize( &self, config: Self::Config, _: &Deserializers, ) -> Result, Box> { let roll = DateFixedWindowRoller { pattern: config.pattern, }; Ok(Box::new(roll)) } } #[cfg(test)] mod tests { use std::fs::File; use std::io::{Read, Write}; use chrono::prelude::Utc; use super::DateFixedWindowRoller; #[test] fn test_rotation() { let temp_dir = std::env::temp_dir(); let pattern = format!( "{}/{{date}}.muta.{{timestamp}}.log", temp_dir.as_path().to_string_lossy() ); let roller = DateFixedWindowRoller::builder().build(&pattern).unwrap(); let test_log = { let mut temp_file = temp_dir.clone(); temp_file.push("logger_test.log"); temp_file }; File::create(&test_log).unwrap().write_all(b"test").unwrap(); let now = Utc::now(); let date = &now.format("%Y-%m-%d").to_string(); let timestamp = &now.timestamp().to_string(); roller.roll_file(&test_log, &date, ×tamp).unwrap(); assert!(!test_log.exists()); let mut log_data = vec![]; let archived_log = { let mut temp_file = temp_dir; temp_file.push(&format!("{}.muta.{}.log", &date, ×tamp)); temp_file }; File::open(archived_log) .unwrap() .read_to_end(&mut log_data) .unwrap(); assert_eq!(log_data, b"test"); } } ================================================ FILE: common/logger/src/lib.rs ================================================ mod date_fixed_roller; use std::collections::HashMap; use std::path::PathBuf; use creep::Context; use json::JsonValue; use log::{Level, LevelFilter}; use log4rs::append::console::ConsoleAppender; use log4rs::append::rolling_file::policy::compound::trigger::size::SizeTrigger; use log4rs::append::rolling_file::policy::compound::CompoundPolicy; use log4rs::append::rolling_file::RollingFileAppender; use log4rs::config::{Appender, Config, Logger, Root}; use log4rs::encode::json::JsonEncoder; use log4rs::encode::pattern::PatternEncoder; use rustracing_jaeger::span::{SpanContext, TraceId}; use date_fixed_roller::DateFixedWindowRoller; pub use json::array; pub use json::object; use log4rs::append::file::FileAppender; // Example // ```rust // let json_obj = json!({ // "key_01", value_01; // "key_02", value_02; // }); // ``` #[macro_export] macro_rules! json { ({$($key: expr, $value: expr); *}) => {{ let mut evt = JsonValue::new_object(); $(evt[$key] = $value.into();)* evt }}; } pub fn init( filter: String, log_to_console: bool, console_show_file_and_line: bool, log_to_file: bool, metrics: bool, log_path: PathBuf, file_size_limit: u64, // bytes modules_level: HashMap, ) { let console_appender = ConsoleAppender::builder() .encoder(Box::new(PatternEncoder::new( if console_show_file_and_line { "[{d} {h({l})} {t} {f}:{L}] {m}{n}" } else { "[{d} {h({l})} {t}] {m}{n}" }, ))) .build(); let muta_roller_pat = log_path.join("{date}.muta.{timestamp}.log"); let metrics_roller_pat = log_path.join("{date}.metrics.{timestamp}.log"); let file_appender = { let size_trigger = SizeTrigger::new(file_size_limit); let roller = DateFixedWindowRoller::builder() .build(&muta_roller_pat.to_string_lossy()) .unwrap(); let policy = CompoundPolicy::new(Box::new(size_trigger), Box::new(roller)); RollingFileAppender::builder() .encoder(Box::new(JsonEncoder::new())) .build(log_path.join("muta.log"), Box::new(policy)) .unwrap() }; let cli_file_appender = FileAppender::builder() .encoder(Box::new(JsonEncoder::new())) .build(log_path.join("cli.log")) .unwrap(); let metrics_appender = { let size_trigger = SizeTrigger::new(file_size_limit); let roller = DateFixedWindowRoller::builder() .build(&metrics_roller_pat.to_string_lossy()) .unwrap(); let policy = CompoundPolicy::new(Box::new(size_trigger), Box::new(roller)); RollingFileAppender::builder() .encoder(Box::new(JsonEncoder::new())) .build(log_path.join("metrics.log"), Box::new(policy)) .unwrap() }; let mut root_builder = Root::builder(); if log_to_console { root_builder = root_builder.appender("console"); } if log_to_file { root_builder = root_builder.appender("file"); } let level_filter = convert_level(filter.as_ref()); let root = root_builder.build(level_filter); let metrics_logger = Logger::builder().additive(false).appender("metrics").build( "metrics", if metrics { LevelFilter::Trace } else { LevelFilter::Off }, ); let cli_logger = Logger::builder() .additive(false) .appender("cli") .appender("console") .build("cli", LevelFilter::Trace); let mut config_builder = Config::builder() .appender(Appender::builder().build("console", Box::new(console_appender))) .appender(Appender::builder().build("file", Box::new(file_appender))) .appender(Appender::builder().build("metrics", Box::new(metrics_appender))) .appender(Appender::builder().build("cli", Box::new(cli_file_appender))) .logger(metrics_logger) .logger(cli_logger); for (module, level) in &modules_level { let module_logger = Logger::builder() .additive(false) .appender("console") .appender("file") .build(module, convert_level(&level)); config_builder = config_builder.logger(module_logger); } let config = config_builder.build(root).unwrap(); log4rs::init_config(config).expect(""); } fn convert_level(level: &str) -> LevelFilter { match level { "off" => LevelFilter::Off, "error" => LevelFilter::Error, "info" => LevelFilter::Info, "warn" => LevelFilter::Warn, "debug" => LevelFilter::Debug, "trace" => LevelFilter::Trace, f => { println!("invalid logger.filter {}, use info", f); LevelFilter::Info } } } pub fn metrics(name: &str, mut content: JsonValue) { log::trace!(target: "metrics", "{}", { content["name"] = name.into(); content }); } // Usage: // log(Level::Info, "network", "netw0001", &ctx, common_logger::object!{"music" // : "beautiful world"}) pub fn log(level: Level, module: &str, event: &str, ctx: &Context, mut msg: JsonValue) { if let Some(trace_ctx) = trace_context(ctx) { msg["trace_id"] = trace_ctx.trace_id.to_string().into(); msg["span_id"] = trace_ctx.span_id.into(); } log::log!(target: module, level, "{}", { msg["event"] = event.into(); msg }); } #[derive(Debug, Clone, Copy)] struct TraceContext { trace_id: TraceId, span_id: u64, } // NOTE: Reference muta_apm::MutaTracer::span_state. // Copy code to avoid depends on muta_apm crate. fn trace_context(ctx: &Context) -> Option { match ctx.get::>("parent_span_ctx") { Some(Some(parent_ctx)) => { let state = parent_ctx.state(); let trace_ctx = TraceContext { trace_id: state.trace_id(), span_id: state.span_id(), }; Some(trace_ctx) } _ => None, } } #[cfg(test)] mod tests { use super::*; #[test] fn test_json() { env_logger::init(); let json = json!({"height", 1; "msg", "asset_01"; "is_connected", true}); log( Level::Warn, "logger", "logg_001", &Context::new(), json.clone(), ); assert_eq!(json["height"], 1); assert_eq!(json["msg"], "asset_01"); assert_eq!(json["is_connected"], true); } } ================================================ FILE: common/merkle/Cargo.toml ================================================ [package] name = "common-merkle" version = "0.2.1" authors = ["Muta Dev "] edition = "2018" repository = "https://github.com/nervosnetwork/muta" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] protocol = { path = "../../protocol", package = "muta-protocol" } rayon = "1.3" static_merkle_tree = "1.1.0" [dev-dependencies] rand = "0.7" ================================================ FILE: common/merkle/src/lib.rs ================================================ #![feature(test)] use static_merkle_tree::Tree; use protocol::{types::Hash, Bytes}; #[derive(Debug, Clone)] pub struct ProofNode { pub is_right: bool, pub hash: Hash, } pub struct Merkle { tree: Tree, } impl Merkle { pub fn from_hashes(hashes: Vec) -> Self { let tree = Tree::from_hashes(hashes, merge); Merkle { tree } } pub fn get_root_hash(&self) -> Option { match self.tree.get_root_hash() { Some(hash) => Some(hash.clone()), None => None, } } pub fn get_proof_by_input_index(&self, input_index: usize) -> Option> { self.tree .get_proof_by_input_index(input_index) .map(|proof| { proof .0 .into_iter() .map(|node| ProofNode { is_right: node.is_right, hash: node.hash, }) .collect() }) } } fn merge(left: &Hash, right: &Hash) -> Hash { let left = left.as_bytes(); let right = right.as_bytes(); let mut root = Vec::with_capacity(left.len() + right.len()); root.extend_from_slice(&left); root.extend_from_slice(&right); Hash::digest(Bytes::from(root)) } #[rustfmt::skip] /// Bench in Intel(R) Core(TM) i7-4770HQ CPU @2.20GHz (8 x 2200): /// test benches::bench_merkle_1000_hashes ... bench: 1,167,080 ns/iter (+/- 108,462) /// test benches::bench_merkle_2000_hashes ... bench: 2,338,504 ns/iter (+/- 137,184) /// test benches::bench_merkle_4000_hashes ... bench: 4,662,601 ns/iter (+/- 231,500) /// test benches::bench_merkle_8000_hashes ... bench: 9,336,278 ns/iter (+/- 900,731) /// test benches::bench_merkle_16000_hashes ... bench: 18,697,547 ns/iter (+/- 1,103,828) #[cfg(test)] mod benches { extern crate test; use rand::random; use test::Bencher; use super::*; fn mock_hash() -> Hash { Hash::digest(Bytes::from( (0..10).map(|_| random::()).collect::>(), )) } fn rand_hashes(size: usize) -> Vec { (0..size).map(|_| mock_hash()).collect::>() } #[bench] fn bench_merkle_1000_hashes(b: &mut Bencher) { let case = rand_hashes(1000); b.iter(|| { let _ = Merkle::from_hashes(case.clone()); }); } #[bench] fn bench_merkle_2000_hashes(b: &mut Bencher) { let case = rand_hashes(2000); b.iter(|| { let _ = Merkle::from_hashes(case.clone()); }); } #[bench] fn bench_merkle_4000_hashes(b: &mut Bencher) { let case = rand_hashes(4000); b.iter(|| { let _ = Merkle::from_hashes(case.clone()); }); } #[bench] fn bench_merkle_8000_hashes(b: &mut Bencher) { let case = rand_hashes(8000); b.iter(|| { let _ = Merkle::from_hashes(case.clone()); }); } #[bench] fn bench_merkle_16000_hashes(b: &mut Bencher) { let case = rand_hashes(16000); b.iter(|| { let _ = Merkle::from_hashes(case.clone()); }); } } ================================================ FILE: common/pubsub/Cargo.toml ================================================ [package] name = "common-pubsub" version = "0.2.1" authors = ["Muta Dev "] edition = "2018" repository = "https://github.com/nervosnetwork/muta" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] ================================================ FILE: common/pubsub/src/lib.rs ================================================ #[cfg(test)] mod tests { #[test] fn it_works() { assert_eq!(2 + 2, 4); } } ================================================ FILE: core/api/Cargo.toml ================================================ [package] name = "core-api" version = "0.2.1" authors = ["Muta Dev "] edition = "2018" repository = "https://github.com/nervosnetwork/muta" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] protocol = { path = "../../protocol", package = "muta-protocol" } common-apm = { path = "../../common/apm" } common-crypto = { path = "../../common/crypto" } juniper = { git = "https://github.com/graphql-rust/juniper", rev = "eff086a", features = ["async"] } juniper_codegen = "0.14" async-trait = "0.1" hex = "0.4" futures = "0.3" derive_more = "0.15" cita_trie = "2.0" bytes = "0.5" actix-web = { version = "2.0.0", features = ["openssl"] } serde_json = "1.0" lazy_static = "1.4" num_cpus = "1.12" log = "0.4" openssl = "0.10" pprof = { version = "0.3", features = ["flamegraph", "protobuf"] } url = { version = "2.1" } tokio = { version = "0.2", features = [ "time" ] } ================================================ FILE: core/api/source/graphiql.html ================================================ GraphQL Playground
Loading GraphQL Playground
================================================ FILE: core/api/src/adapter/mod.rs ================================================ use std::marker::PhantomData; use std::sync::Arc; use async_trait::async_trait; use derive_more::Display; use protocol::traits::{ APIAdapter, Context, ExecutorFactory, ExecutorParams, MemPool, ServiceMapping, ServiceResponse, Storage, }; use protocol::types::{ Address, Block, BlockHeader, Hash, Receipt, SignedTransaction, TransactionRequest, }; use protocol::{ProtocolError, ProtocolErrorKind, ProtocolResult}; #[derive(Debug, Display)] pub enum APIError { #[display( fmt = "Unexecuted block,try to {:?}, but now only reached {:?}", real, expect )] UnExecedError { expect: u64, real: u64 }, #[display(fmt = "not found")] NotFound, } impl std::error::Error for APIError {} impl From for ProtocolError { fn from(api_err: APIError) -> ProtocolError { ProtocolError::new(ProtocolErrorKind::API, Box::new(api_err)) } } pub struct DefaultAPIAdapter { mempool: Arc, storage: Arc, trie_db: Arc, service_mapping: Arc, pin_ef: PhantomData, } impl< EF: ExecutorFactory, M: MemPool, S: Storage, DB: cita_trie::DB, Mapping: ServiceMapping, > DefaultAPIAdapter { pub fn new( mempool: Arc, storage: Arc, trie_db: Arc, service_mapping: Arc, ) -> Self { Self { mempool, storage, trie_db, service_mapping, pin_ef: PhantomData, } } } #[async_trait] impl< EF: ExecutorFactory, M: MemPool, S: Storage, DB: cita_trie::DB, Mapping: ServiceMapping, > APIAdapter for DefaultAPIAdapter { async fn insert_signed_txs( &self, ctx: Context, signed_tx: SignedTransaction, ) -> ProtocolResult<()> { self.mempool.insert(ctx, signed_tx).await } async fn get_block_by_height( &self, ctx: Context, height: Option, ) -> ProtocolResult> { match height { Some(id) => self.storage.get_block(ctx.clone(), id).await, None => Ok(Some(self.storage.get_latest_block(ctx).await?)), } } async fn get_block_header_by_height( &self, ctx: Context, height: Option, ) -> ProtocolResult> { match height { Some(id) => self.storage.get_block_header(ctx.clone(), id).await, None => Ok(Some(self.storage.get_latest_block_header(ctx).await?)), } } async fn get_receipt_by_tx_hash( &self, ctx: Context, tx_hash: Hash, ) -> ProtocolResult> { let opt_receipt = self .storage .get_receipt_by_hash(ctx.clone(), tx_hash) .await?; let exec_height = self.storage.get_latest_block_header(ctx).await?.exec_height; match opt_receipt { Some(receipt) => { let height = receipt.height; if exec_height >= height { Ok(Some(receipt)) } else { Ok(None) } } None => Ok(None), } } async fn get_transaction_by_hash( &self, ctx: Context, tx_hash: Hash, ) -> ProtocolResult> { self.storage.get_transaction_by_hash(ctx, &tx_hash).await } async fn query_service( &self, ctx: Context, height: u64, cycles_limit: u64, cycles_price: u64, caller: Address, service_name: String, method: String, payload: String, ) -> ProtocolResult> { let header = self .get_block_header_by_height(ctx.clone(), Some(height)) .await? .ok_or(APIError::NotFound)?; let executor = EF::from_root( header.state_root.clone(), Arc::clone(&self.trie_db), Arc::clone(&self.storage), Arc::clone(&self.service_mapping), )?; let params = ExecutorParams { state_root: header.state_root, height, timestamp: header.timestamp, cycles_limit, proposer: header.proposer, }; executor.read(¶ms, &caller, cycles_price, &TransactionRequest { service_name, method, payload, }) } } ================================================ FILE: core/api/src/config.rs ================================================ use std::net::SocketAddr; use std::path::PathBuf; #[derive(Debug, Clone)] pub struct GraphQLConfig { pub listening_address: SocketAddr, pub graphql_uri: String, pub graphiql_uri: String, // Set number of workers to start. // By default http server uses number of available logical cpu as threads count. pub workers: usize, // Sets the maximum number of all concurrent connections. pub maxconn: usize, // Set the max payload size of graphql interface. // It is used to prevent DOS attacking through memory exhaustion. // The default value is 1024 * 1024, which is 1MB. pub max_payload_size: usize, pub tls: Option, pub enable_dump_profile: bool, } #[derive(Debug, Clone)] pub struct GraphQLTLS { pub private_key_file_path: PathBuf, pub certificate_chain_file_path: PathBuf, } impl Default for GraphQLConfig { fn default() -> Self { Self { listening_address: "127.0.0.1:8080" .parse() .expect("Unable to parse socket address"), graphql_uri: "/graphql".to_owned(), graphiql_uri: "/graphiql".to_owned(), workers: num_cpus::get(), maxconn: 25000, max_payload_size: 1024 * 1024, // 1MB tls: None, enable_dump_profile: false, } } } ================================================ FILE: core/api/src/lib.rs ================================================ pub mod adapter; pub mod config; mod schema; use std::cmp; use std::convert::TryFrom; use std::sync::Arc; use std::time::Instant; use actix_web::{web, App, Error, FromRequest, HttpResponse, HttpServer}; use futures::executor::block_on; use juniper::http::GraphQLRequest; use juniper::FieldResult; use lazy_static::lazy_static; use openssl::ssl::{SslAcceptor, SslFiletype, SslMethod}; use common_crypto::{ HashValue, PrivateKey, PublicKey, Secp256k1PrivateKey, Signature, ToPublicKey, }; use protocol::fixed_codec::FixedCodec; use protocol::traits::{APIAdapter, Context}; use crate::config::GraphQLConfig; use crate::schema::{ to_signed_transaction, to_transaction, Address, Block, Bytes, Hash, InputRawTransaction, InputTransactionEncryption, Receipt, ServiceResponse, SignedTransaction, Uint64, }; lazy_static! { static ref GRAPHIQL_HTML: &'static str = include_str!("../source/graphiql.html"); } // This is accessible as state in Tide, and as executor context in Juniper. #[derive(Clone)] struct State { adapter: Arc>, schema: Arc, } // We define `Query` unit struct here. GraphQL queries will refer to this // struct. The struct itself doesn't have any associated state (and there's no // need to do so), but instead it exposes the accumulator state from the // context. struct Query; // Switch to async/await fn https://github.com/graphql-rust/juniper/issues/2 #[juniper::graphql_object(Context = State)] impl Query { #[graphql(name = "getBlock", description = "Get the block")] async fn get_block(state_ctx: &State, height: Option) -> FieldResult> { let ctx = Context::new(); let inst = Instant::now(); common_apm::metrics::api::API_REQUEST_COUNTER_VEC_STATIC .get_block .inc(); let height = match height { Some(id) => match id.try_into_u64() { Ok(id) => Some(id), Err(err) => { common_apm::metrics::api::API_REQUEST_RESULT_COUNTER_VEC_STATIC .get_block .failure .inc(); return Err(err.into()); } }, None => None, }; let opt_block = match state_ctx .adapter .get_block_by_height(ctx.clone(), height) .await { Ok(opt_block) => opt_block, Err(err) => { common_apm::metrics::api::API_REQUEST_RESULT_COUNTER_VEC_STATIC .get_block .failure .inc(); return Err(err.into()); } }; common_apm::metrics::api::API_REQUEST_RESULT_COUNTER_VEC_STATIC .get_block .success .inc(); common_apm::metrics::api::API_REQUEST_TIME_HISTOGRAM_STATIC .get_block .observe(common_apm::metrics::duration_to_sec(inst.elapsed())); Ok(opt_block.map(Block::from)) } #[graphql(name = "getTransaction", description = "Get the transaction by hash")] async fn get_transaction( state_ctx: &State, tx_hash: Hash, ) -> FieldResult> { let ctx = Context::new(); let hash = protocol::types::Hash::from_hex(&tx_hash.as_hex())?; let opt_stx = state_ctx .adapter .get_transaction_by_hash(ctx.clone(), hash) .await?; Ok(opt_stx.map(SignedTransaction::from)) } #[graphql( name = "getReceipt", description = "Get the receipt by transaction hash" )] async fn get_receipt(state_ctx: &State, tx_hash: Hash) -> FieldResult> { let ctx = Context::new(); let hash = protocol::types::Hash::from_hex(&tx_hash.as_hex())?; let opt_receipt = state_ctx .adapter .get_receipt_by_tx_hash(ctx.clone(), hash) .await?; Ok(opt_receipt.map(Receipt::from)) } #[graphql(name = "queryService", description = "query service")] async fn query_service( state_ctx: &State, height: Option, cycles_limit: Option, cycles_price: Option, caller: Address, service_name: String, method: String, payload: String, ) -> FieldResult { let ctx = Context::new(); let height = match height { Some(id) => id.try_into_u64()?, None => { block_on(state_ctx.adapter.get_block_by_height(Context::new(), None))? .expect("Always not none") .header .height } }; let cycles_limit = match cycles_limit { Some(cycles_limit) => cycles_limit.try_into_u64()?, None => std::u64::MAX, }; let cycles_price = match cycles_price { Some(cycles_price) => cycles_price.try_into_u64()?, None => 1, }; let address: protocol::types::Address = caller.to_str().parse()?; let exec_resp = state_ctx .adapter .query_service( ctx.clone(), height, cycles_limit, cycles_price, address, service_name, method, payload, ) .await?; Ok(ServiceResponse::from(exec_resp)) } } struct Mutation; // Switch to async/await fn https://github.com/graphql-rust/juniper/issues/2 #[juniper::graphql_object(Context = State)] impl Mutation { #[graphql(name = "sendTransaction", description = "send transaction")] async fn send_transaction( state_ctx: &State, input_raw: InputRawTransaction, input_encryption: InputTransactionEncryption, ) -> FieldResult { let ctx = Context::new(); let inst = Instant::now(); common_apm::metrics::api::API_REQUEST_COUNTER_VEC_STATIC .send_transaction .inc(); let stx = to_signed_transaction(input_raw, input_encryption)?; let tx_hash = stx.tx_hash.clone(); if let Err(err) = state_ctx.adapter.insert_signed_txs(ctx.clone(), stx).await { common_apm::metrics::api::API_REQUEST_RESULT_COUNTER_VEC_STATIC .send_transaction .failure .inc(); return Err(err.into()); } common_apm::metrics::api::API_REQUEST_RESULT_COUNTER_VEC_STATIC .send_transaction .success .inc(); common_apm::metrics::api::API_REQUEST_TIME_HISTOGRAM_STATIC .send_transaction .observe(common_apm::metrics::duration_to_sec(inst.elapsed())); Ok(Hash::from(tx_hash)) } #[graphql( name = "unsafeSendTransaction", deprecated = "DON'T use it in production! This is just for development." )] async fn unsafe_send_transaction( state_ctx: &State, input_raw: InputRawTransaction, input_privkey: Bytes, ) -> FieldResult { let ctx = Context::new(); let raw_tx = to_transaction(input_raw)?; let tx_hash = protocol::types::Hash::digest(raw_tx.encode_fixed()?); let privkey = Secp256k1PrivateKey::try_from(input_privkey.to_vec()?.as_ref())?; let pubkey = privkey.pub_key(); let hash_value = HashValue::try_from(tx_hash.as_bytes().as_ref())?; let signature = privkey.sign_message(&hash_value); let stx = protocol::types::SignedTransaction { raw: raw_tx, tx_hash: tx_hash.clone(), signature: signature.to_bytes(), pubkey: pubkey.to_bytes(), }; state_ctx .adapter .insert_signed_txs(ctx.clone(), stx) .await?; Ok(Hash::from(tx_hash)) } } // Adding `Query` and `Mutation` together we get `Schema`, which describes, // well, the whole GraphQL schema. type Schema = juniper::RootNode<'static, Query, Mutation>; async fn graphiql() -> HttpResponse { HttpResponse::Ok() .content_type("text/html; charset=utf-8") .body(GRAPHIQL_HTML.to_owned()) } async fn graphql( st: web::Data, data: web::Json, ) -> Result { let result = data.execute_async(&st.schema, &st).await; let res = Ok::<_, serde_json::error::Error>(serde_json::to_string(&result)?)?; Ok(HttpResponse::Ok() .content_type("application/json") .body(res)) } async fn metrics() -> HttpResponse { let metrics_data = match common_apm::metrics::all_metrics() { Ok(data) => data, Err(e) => e.to_string().into_bytes(), }; HttpResponse::Ok() .content_type("text/plain; charset=utf-8") .body(metrics_data) } mod profile { use std::collections::HashMap; use std::str::FromStr; use std::time::Duration; use actix_web::error::{ErrorBadRequest, ErrorInternalServerError}; use actix_web::{dev, FromRequest, HttpRequest, HttpResponse}; use futures::future; use pprof::protos::Message; pub enum ProfileReport { /// Perf flamegraph FlameGraph, /// Go pprof PProf, } impl FromStr for ProfileReport { type Err = &'static str; fn from_str(report: &str) -> Result { match report { "flamegraph" => Ok(ProfileReport::FlameGraph), "pprof" => Ok(ProfileReport::PProf), _ => Err("invalid report type, only support flamegraph and pprof"), } } } pub struct ProfileConfig { duration: Duration, frequency: i32, report: ProfileReport, } impl Default for ProfileConfig { fn default() -> Self { ProfileConfig { duration: Duration::from_secs(10), frequency: 99, report: ProfileReport::FlameGraph, } } } impl FromRequest for ProfileConfig { type Config = (); type Error = actix_web::Error; type Future = future::Ready>; fn from_request(req: &HttpRequest, _: &mut dev::Payload) -> Self::Future { let query = req.query_string(); let query_pairs: HashMap<_, _> = url::form_urlencoded::parse(query.as_bytes()).collect(); let duration: Duration = match query_pairs.get("duration").map(|val| val.parse()) { Some(Ok(val)) => Duration::from_secs(val), Some(Err(e)) => return future::err(ErrorBadRequest(e)), None => ProfileConfig::default().duration, }; let frequency: i32 = match query_pairs.get("frequency").map(|val| val.parse()) { Some(Ok(val)) => val, Some(Err(e)) => return future::err(ErrorBadRequest(e)), None => ProfileConfig::default().frequency, }; let report: ProfileReport = match query_pairs.get("report").map(|val| val.parse()) { Some(Ok(val)) => val, Some(Err(e)) => return future::err(ErrorBadRequest(e)), None => ProfileConfig::default().report, }; future::ok(ProfileConfig { duration, frequency, report, }) } } pub async fn dump_profile(maybe_config: actix_web::Result) -> HttpResponse { let config = match maybe_config { Ok(config) => config, Err(e) => return e.into(), }; let guard = match pprof::ProfilerGuard::new(config.frequency) { Ok(guard) => guard, Err(e) => return ErrorInternalServerError(e).into(), }; tokio::time::delay_for(config.duration).await; let report = match guard.report().build() { Ok(report) => report, Err(e) => return ErrorInternalServerError(e).into(), }; drop(guard); let mut body = Vec::new(); match config.report { ProfileReport::FlameGraph => match report.flamegraph(&mut body) { Ok(_) => { log::info!("dump flamegraph successfully"); HttpResponse::Ok().body(body) } Err(err) => HttpResponse::InternalServerError().body(err.to_string()), }, ProfileReport::PProf => match report.pprof().map(|p| p.encode(&mut body)) { Ok(Ok(())) => { log::info!("dump pprof successfully"); HttpResponse::Ok().body(body) } Err(err) => HttpResponse::InternalServerError().body(err.to_string()), Ok(Err(err)) => HttpResponse::InternalServerError().body(err.to_string()), }, } } } pub async fn start_graphql(cfg: GraphQLConfig, adapter: Adapter) { let schema = Schema::new(Query, Mutation); let state = State { adapter: Arc::new(Box::new(adapter)), schema: Arc::new(schema), }; let path_graphql_uri = cfg.graphql_uri.to_owned(); let path_graphiql_uri = cfg.graphiql_uri.to_owned(); let workers = cfg.workers; let maxconn = cfg.maxconn; let add_listening_address = cfg.listening_address; let max_payload_size = cfg.max_payload_size; let enable_dump_profile = cfg.enable_dump_profile; // Start http server let server = HttpServer::new(move || { let app = App::new() .data(state.clone()) .service( web::resource(&path_graphql_uri) .app_data(web::Json::::configure(|cfg| { cfg.limit(max_payload_size) })) .route(web::post().to(graphql)), ) .service(web::resource(&path_graphiql_uri).route(web::get().to(graphiql))) .service(web::resource("/metrics").route(web::get().to(metrics))); if enable_dump_profile { app.service(web::resource("/dump_profile").route(web::get().to(profile::dump_profile))) } else { app } }) .workers(workers) .maxconn(cmp::max(maxconn / workers, 1)); if let Some(tls) = cfg.tls { // load ssl keys let mut builder = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap(); builder .set_private_key_file(tls.private_key_file_path, SslFiletype::PEM) .unwrap(); builder .set_certificate_chain_file(tls.certificate_chain_file_path) .unwrap(); server .bind_openssl(add_listening_address, builder) .unwrap() .run() .await .unwrap() } else { server .bind(add_listening_address) .unwrap() .run() .await .unwrap() } } ================================================ FILE: core/api/src/schema/block.rs ================================================ use protocol::fixed_codec::FixedCodec; use protocol::types::Hash as PHash; use crate::schema::{Address, Bytes, Hash, MerkleRoot, Uint64}; #[derive(juniper::GraphQLObject, Clone)] #[graphql( description = "Block is a single digital record created within a blockchain. \ Each block contains a record of the previous Block, \ and when linked together these become the “chain”.\ A block is always composed of header and body." )] pub struct Block { #[graphql(description = "The header section of a block")] header: BlockHeader, #[graphql(description = "The body section of a block")] ordered_tx_hashes: Vec, #[graphql(description = "Hash of the block")] hash: Hash, } #[derive(juniper::GraphQLObject, Clone)] #[graphql(description = "A block header is like the metadata of a block.")] pub struct BlockHeader { #[graphql( description = "Identifier of a chain in order to prevent replay attacks across channels " )] pub chain_id: Hash, #[graphql(description = "block height")] pub height: Uint64, #[graphql(description = "The height to which the block has been executed")] pub exec_height: Uint64, #[graphql(description = "The hash of the serialized previous block")] pub prev_hash: Hash, #[graphql(description = "A timestamp that records when the block was created")] pub timestamp: Uint64, #[graphql(description = "The merkle root of ordered transactions")] pub order_root: MerkleRoot, #[graphql(description = "The hash of ordered signed transactions")] pub order_signed_transactions_hash: Hash, #[graphql(description = "The merkle roots of all the confirms")] pub confirm_root: Vec, #[graphql(description = "The merkle root of state root")] pub state_root: MerkleRoot, #[graphql(description = "The merkle roots of receipts")] pub receipt_root: Vec, #[graphql(description = "The sum of all transactions costs")] pub cycles_used: Vec, #[graphql(description = "The address descirbed who packed the block")] pub proposer: Address, pub proof: Proof, #[graphql(description = "The version of validator is designed for cross chain")] pub validator_version: Uint64, pub validators: Vec, } #[derive(juniper::GraphQLObject, Clone)] #[graphql(description = "The verifier of the block header proved")] pub struct Proof { pub height: Uint64, pub round: Uint64, pub block_hash: Hash, pub signature: Bytes, pub bitmap: Bytes, } #[derive(juniper::GraphQLObject, Clone)] #[graphql(description = "Validator address set")] pub struct Validator { pub pubkey: Bytes, pub propose_weight: i32, pub vote_weight: i32, } impl From for BlockHeader { fn from(block_header: protocol::types::BlockHeader) -> Self { BlockHeader { chain_id: Hash::from(block_header.chain_id), height: Uint64::from(block_header.height), exec_height: Uint64::from(block_header.exec_height), prev_hash: Hash::from(block_header.prev_hash), timestamp: Uint64::from(block_header.timestamp), order_root: MerkleRoot::from(block_header.order_root), order_signed_transactions_hash: Hash::from(block_header.order_signed_transactions_hash), state_root: MerkleRoot::from(block_header.state_root), confirm_root: block_header .confirm_root .into_iter() .map(MerkleRoot::from) .collect(), receipt_root: block_header .receipt_root .into_iter() .map(MerkleRoot::from) .collect(), cycles_used: block_header .cycles_used .into_iter() .map(Uint64::from) .collect(), proposer: Address::from(block_header.proposer), proof: Proof::from(block_header.proof), validator_version: Uint64::from(block_header.validator_version), validators: block_header .validators .into_iter() .map(Validator::from) .collect(), } } } impl From for Block { fn from(block: protocol::types::Block) -> Self { Block { header: BlockHeader::from(block.header.clone()), ordered_tx_hashes: block .ordered_tx_hashes .clone() .into_iter() .map(MerkleRoot::from) .collect(), hash: Hash::from(PHash::digest( block.header.encode_fixed().expect("rlp encode never fail"), )), } } } impl From for Proof { fn from(proof: protocol::types::Proof) -> Self { Proof { height: Uint64::from(proof.height), round: Uint64::from(proof.round), block_hash: Hash::from(proof.block_hash), signature: Bytes::from(proof.signature), bitmap: Bytes::from(proof.bitmap), } } } impl From for Validator { fn from(validator: protocol::types::Validator) -> Self { Validator { pubkey: Bytes::from(validator.pub_key), propose_weight: validator.vote_weight as i32, vote_weight: validator.vote_weight as i32, } } } ================================================ FILE: core/api/src/schema/mod.rs ================================================ mod block; mod receipt; mod transaction; use std::convert::From; use derive_more::{Display, From}; use std::num::ParseIntError; use protocol::{ProtocolError, ProtocolErrorKind, ProtocolResult}; pub use block::{Block, BlockHeader}; pub use receipt::{Event, Receipt, ReceiptResponse}; pub use transaction::{ to_signed_transaction, to_transaction, InputRawTransaction, InputTransactionEncryption, SignedTransaction, }; #[derive(juniper::GraphQLObject, Clone)] pub struct ServiceResponse { pub code: Uint64, pub succeed_data: String, pub error_message: String, } impl From> for ServiceResponse { fn from(resp: protocol::traits::ServiceResponse) -> Self { Self { code: Uint64::from(resp.code), succeed_data: resp.succeed_data, error_message: resp.error_message, } } } #[derive(juniper::GraphQLScalarValue, Clone)] #[graphql(description = "The output digest of Keccak hash function")] pub struct Hash(String); pub type MerkleRoot = Hash; #[derive(juniper::GraphQLScalarValue, Clone)] #[graphql(description = "20 bytes of account address")] pub struct Address(String); #[derive(juniper::GraphQLScalarValue, Clone)] #[graphql(description = "Uint64")] pub struct Uint64(String); #[derive(juniper::GraphQLScalarValue, Clone)] #[graphql(description = "Bytes corresponding hex string.")] pub struct Bytes(String); impl Hash { pub fn as_hex(&self) -> String { self.0.to_uppercase() } } impl Address { pub fn to_str(&self) -> &str { &self.0 } } impl Uint64 { pub fn as_hex(&self) -> ProtocolResult { Ok(clean_0x(&self.0)?.to_uppercase()) } pub fn try_into_u64(self) -> ProtocolResult { let n = u64::from_str_radix(&self.as_hex()?, 16).map_err(SchemaError::IntoU64)?; Ok(n) } } impl Bytes { pub fn as_hex(&self) -> ProtocolResult { Ok(clean_0x(&self.0)?.to_uppercase()) } pub fn to_vec(&self) -> ProtocolResult> { let v = hex::decode(self.as_hex()?).map_err(SchemaError::FromHex)?; Ok(v) } } impl From for Hash { fn from(hash: protocol::types::Hash) -> Self { Hash(hash.as_hex()) } } impl From for Address { fn from(address: protocol::types::Address) -> Self { Address(address.to_string()) } } impl From for Uint64 { fn from(n: u64) -> Self { Uint64("0x".to_owned() + &hex::encode(n.to_be_bytes().to_vec())) } } impl From for Bytes { fn from(bytes: protocol::Bytes) -> Self { Bytes("0x".to_owned() + &hex::encode(bytes)) } } fn clean_0x(s: &str) -> ProtocolResult { if s.starts_with("0x") || s.starts_with("0X") { Ok(s[2..].to_owned()) } else { Err(SchemaError::HexPrefix.into()) } } #[derive(Debug, Display, From)] pub enum SchemaError { #[display(fmt = "into u64 {:?}", _0)] IntoU64(ParseIntError), #[display(fmt = "from hex {:?}", _0)] FromHex(hex::FromHexError), #[display(fmt = "hex should start with 0x")] HexPrefix, } impl std::error::Error for SchemaError {} impl From for ProtocolError { fn from(err: SchemaError) -> ProtocolError { ProtocolError::new(ProtocolErrorKind::API, Box::new(err)) } } ================================================ FILE: core/api/src/schema/receipt.rs ================================================ use crate::schema::{Hash, MerkleRoot, ServiceResponse, Uint64}; #[derive(juniper::GraphQLObject, Clone)] pub struct Receipt { pub state_root: MerkleRoot, pub height: Uint64, pub tx_hash: Hash, pub cycles_used: Uint64, pub events: Vec, pub response: ReceiptResponse, } #[derive(juniper::GraphQLObject, Clone)] pub struct Event { pub service: String, pub name: String, pub data: String, } #[derive(juniper::GraphQLObject, Clone)] pub struct ReceiptResponse { pub service_name: String, pub method: String, pub response: ServiceResponse, } impl From for Receipt { fn from(receipt: protocol::types::Receipt) -> Self { Self { state_root: MerkleRoot::from(receipt.state_root), height: Uint64::from(receipt.height), tx_hash: Hash::from(receipt.tx_hash), cycles_used: Uint64::from(receipt.cycles_used), events: receipt.events.into_iter().map(Event::from).collect(), response: ReceiptResponse::from(receipt.response), } } } impl From for Event { fn from(event: protocol::types::Event) -> Self { Self { service: event.service, name: event.name, data: event.data, } } } impl From for ReceiptResponse { fn from(response: protocol::types::ReceiptResponse) -> Self { Self { service_name: response.service_name, method: response.method, response: ServiceResponse::from(response.response), } } } ================================================ FILE: core/api/src/schema/transaction.rs ================================================ use protocol::ProtocolResult; use crate::schema::{Address, Bytes, Hash, SchemaError, Uint64}; #[derive(juniper::GraphQLObject, Clone)] pub struct SignedTransaction { pub chain_id: Hash, pub cycles_limit: Uint64, pub cycles_price: Uint64, pub nonce: Hash, pub timeout: Uint64, pub sender: Address, pub service_name: String, pub method: String, pub payload: String, pub tx_hash: Hash, pub pubkey: Bytes, pub signature: Bytes, } impl From for SignedTransaction { fn from(stx: protocol::types::SignedTransaction) -> Self { Self { chain_id: Hash::from(stx.raw.chain_id), cycles_limit: Uint64::from(stx.raw.cycles_limit), cycles_price: Uint64::from(stx.raw.cycles_price), nonce: Hash::from(stx.raw.nonce), timeout: Uint64::from(stx.raw.timeout), sender: Address::from(stx.raw.sender), service_name: stx.raw.request.service_name, method: stx.raw.request.method, payload: stx.raw.request.payload, tx_hash: Hash::from(stx.tx_hash), pubkey: Bytes::from(stx.pubkey), signature: Bytes::from(stx.signature), } } } // ##################### // GraphQLInputObject // ##################### #[derive(juniper::GraphQLInputObject, Clone)] #[graphql(description = "There was many types of transaction in Muta, \ A transaction often require computing resources or write data to chain,\ these resources are valuable so we need to pay some token for them.\ InputRawTransaction describes information above")] pub struct InputRawTransaction { #[graphql(description = "Identifier of the chain.")] pub chain_id: Hash, #[graphql( description = "Mostly like the gas limit in Ethereum, describes the fee that \ you are willing to pay the highest price for the transaction" )] pub cycles_limit: Uint64, pub cycles_price: Uint64, #[graphql( description = "Every transaction has its own id, unlike Ethereum's nonce,\ the nonce in Muta is an hash" )] pub nonce: Hash, #[graphql(description = "For security and performance reasons, \ Muta will only deal with trade request over a period of time,\ the `timeout` should be `timeout > current_block_height` and `timeout < current_block_height + timeout_gap`,\ the `timeout_gap` generally equal to 20.")] pub timeout: Uint64, pub service_name: String, pub method: String, pub payload: String, pub sender: Address, } #[derive(juniper::GraphQLInputObject, Clone)] #[graphql(description = "Signature of the transaction")] pub struct InputTransactionEncryption { #[graphql(description = "The digest of the transaction")] pub tx_hash: Hash, #[graphql(description = "The public key of transfer")] pub pubkey: Bytes, #[graphql(description = "The signature of the transaction")] pub signature: Bytes, } pub fn to_signed_transaction( raw: InputRawTransaction, encryption: InputTransactionEncryption, ) -> ProtocolResult { let pubkey: &[u8] = &hex::decode(encryption.pubkey.as_hex()?).map_err(SchemaError::from)?; let signature: &[u8] = &hex::decode(encryption.signature.as_hex()?).map_err(SchemaError::from)?; Ok(protocol::types::SignedTransaction { raw: to_transaction(raw)?, tx_hash: protocol::types::Hash::from_hex(&encryption.tx_hash.as_hex())?, pubkey: bytes::BytesMut::from(pubkey).freeze(), signature: bytes::BytesMut::from(signature).freeze(), }) } pub fn to_transaction(raw: InputRawTransaction) -> ProtocolResult { Ok(protocol::types::RawTransaction { chain_id: protocol::types::Hash::from_hex(&raw.chain_id.as_hex())?, nonce: protocol::types::Hash::from_hex(&raw.nonce.as_hex())?, timeout: raw.timeout.try_into_u64()?, cycles_price: raw.cycles_price.try_into_u64()?, cycles_limit: raw.cycles_limit.try_into_u64()?, request: protocol::types::TransactionRequest { service_name: raw.service_name.to_owned(), method: raw.method.to_owned(), payload: raw.payload.to_owned(), }, sender: raw.sender.to_str().parse()?, }) } ================================================ FILE: core/cli/Cargo.toml ================================================ [package] name = "cli" version = "0.2.1" authors = ["Muta Dev "] edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] backtrace = "0.3" actix-rt = "1.0" derive_more = "0.99" futures = "0.3" parking_lot = "0.11" serde = "1.0" serde_derive = "1.0" serde_json = "1.0" log = "0.4" clap = "2.33" bytes = "0.5" hex = "0.4" rlp = "0.4" toml = "0.5" tokio = { version = "0.2", features = ["macros", "sync", "rt-core", "rt-util", "signal", "time"] } muta-apm = "0.1.0-alpha.7" futures-timer="3.0" cita_trie = "2.0" fs_extra = "1.2.0" byzantine = { path = "../../byzantine" } common-apm = { path = "../../common/apm" } common-config-parser = { path = "../../common/config-parser" } common-crypto = { path = "../../common/crypto" } common-logger = { path = "../../common/logger" } protocol = { path = "../../protocol", package = "muta-protocol" } core-api = { path = "../../core/api" } core-storage = { path = "../../core/storage" } core-mempool = { path = "../../core/mempool" } core-network = { path = "../../core/network" } core-consensus = { path = "../../core/consensus" } binding-macro = { path = "../../binding-macro" } framework = { path = "../../framework" } run = {path = "../run"} [dev-dependencies] cita_trie = "2.0" async-trait = "0.1" toml = "0.5" lazy_static = "1.4" muta-codec-derive = "0.2" asset = { path = "../../built-in-services/asset" } multi-signature = { path = "../../built-in-services/multi-signature" } authorization = { path = "../../built-in-services/authorization" } metadata = { path = "../../built-in-services/metadata"} util = { path = "../../built-in-services/util"} rand = "0.7" core-network = { path = "../../core/network", features = ["diagnostic"] } tokio = { version = "0.2", features = ["full"] } ================================================ FILE: core/cli/src/error.rs ================================================ use std::error::Error; use derive_more::{Display, From}; use protocol::{ProtocolError, ProtocolErrorKind}; #[derive(Debug, Display, From)] pub enum CliError { #[display(fmt = "input is not a valid JSON format for target, {:?}", _0)] JSONFormat(serde_json::error::Error), #[display(fmt = "grammar error")] Grammar, #[display(fmt = "path not found: {}", _0)] Path(String), #[display(fmt = "io operation fails: {:?}", _0)] IO(std::io::Error), #[display(fmt = "io operation fails: {:?}", _0)] IO2(fs_extra::error::Error), #[display(fmt = "block for height {} not found", _0)] BlockNotFound(u64), #[display(fmt = "parsing error")] Parse, #[display(fmt = "unsupported command")] UnsupportedCommand, #[display(fmt = "genesis.toml is missing")] MissingGenesis, } impl Error for CliError {} impl From for ProtocolError { fn from(err: CliError) -> ProtocolError { ProtocolError::new(ProtocolErrorKind::Storage, Box::new(err)) } } ================================================ FILE: core/cli/src/lib.rs ================================================ mod error; #[cfg(test)] mod tests; use std::fs; use std::ops::RangeInclusive; use std::path::{Path, PathBuf}; use std::str::FromStr; use std::sync::Arc; use clap::ArgMatches; use common_config_parser::types::Config; use core_consensus::wal::ConsensusWal; use core_consensus::SignedTxsWAL; use core_storage::adapter::rocks::RocksAdapter; use core_storage::ImplStorage; use protocol::traits::{Context, MaintenanceStorage, ServiceMapping}; use protocol::types::{Block, Genesis, SignedTransaction}; use protocol::ProtocolResult; use crate::error::CliError; const PLEASE_CONFIRM: &str = "Please use -y to confirm modification and DO BACK UP YOUR DB DATA AND WAL"; pub struct CliConfig { pub app_name: &'static str, pub version: &'static str, pub author: &'static str, pub config_path: &'static str, pub genesis_patch: &'static str, } pub struct Cli<'a, Mapping> where Mapping: 'static + ServiceMapping, { pub matches: ArgMatches<'a>, pub config: Config, pub genesis: Option, pub service_mapping: Arc, } impl<'a, Mapping> Cli<'a, Mapping> where Mapping: 'static + ServiceMapping, { pub fn run( service_mapping: Mapping, cli_config: CliConfig, target_commands: Option>, ) { let cli = Self::new(service_mapping, cli_config, target_commands); if let Err(e) = cli.start() { log::error!("{:?}", e) } } pub fn new( service_mapping: Mapping, cli_config: CliConfig, target_commands: Option>, ) -> Self { let matches = Self::generate_matches(cli_config, target_commands); let config_path = matches.value_of("config").expect("missing config path"); let genesis_path = matches.value_of("genesis").expect("missing genesis path"); let config: Config = common_config_parser::parse(&config_path.trim()).expect("config path is not set"); if !cfg!(test) { Self::register_log(&config) }; // genesis may be absent for now let genesis = match fs::read_to_string(&genesis_path.trim()) { Ok(genesis_content) => match toml::from_str::(&genesis_content) { Ok(genesis) => Some(genesis), Err(_) => None, }, Err(_) => None, }; Self { matches, config, genesis, service_mapping: Arc::new(service_mapping), } } fn register_log(config: &Config) { common_logger::init( config.logger.filter.clone(), config.logger.log_to_console, config.logger.console_show_file_and_line, config.logger.log_to_file, config.logger.metrics, config.logger.log_path.clone(), config.logger.file_size_limit, config.logger.modules_level.clone(), ); } pub fn start(self) -> ProtocolResult<()> { match self.matches.subcommand() { ("run", Some(_sub_cmd)) => { log::info!("run subcommand run"); if let Some(genesis) = self.genesis { let muta = run::Muta::new(self.config, genesis, self.service_mapping); muta.run() } else { log::error!("genesis.toml is missing"); Err(CliError::MissingGenesis.into()) } } ("latest_block", Some(_sub_cmd)) => { log::info!("run subcommand latest_block"); let maintenance_cli = self.generate_maintenance_cli(); maintenance_cli.start() } ("block", Some(_sub_cmd)) => { log::info!("run subcommand block"); let maintenance_cli = self.generate_maintenance_cli(); maintenance_cli.start() } ("wal", Some(_sub_cmd)) => { log::info!("run subcommand wal"); let maintenance_cli = self.generate_maintenance_cli(); maintenance_cli.start() } ("backup", Some(_sub_cmd)) => { log::info!("run subcommand backup"); let maintenance_cli = self.generate_maintenance_cli(); maintenance_cli.start() } _ => { log::info!("run without any subcommand, default to run"); if let Some(genesis) = self.genesis { let muta = run::Muta::new(self.config, genesis, self.service_mapping); muta.run() } else { log::error!("genesis.toml is missing"); Err(CliError::MissingGenesis.into()) } } } } pub fn generate_matches(cli_config: CliConfig, cmds: Option>) -> ArgMatches<'a> { let app = clap::App::new(cli_config.app_name) .version(cli_config.version) .author(cli_config.author) .arg( clap::Arg::with_name("config") .short("c") .long("config") .value_name("FILE") .help("a required file for the configuration") .env("CONFIG") .default_value(cli_config.config_path), ) .arg( clap::Arg::with_name("genesis") .short("g") .long("genesis") .value_name("FILE") .help("a required file for the genesis") .env("GENESIS") .default_value(cli_config.genesis_patch), ) .subcommand(clap::SubCommand::with_name("run").about("run the muta-chain")) .subcommand( clap::SubCommand::with_name("latest_block") //.help("latest block") .about("APIs for latest block operation") .subcommand( clap::SubCommand::with_name("set") .arg(clap::Arg::with_name("BLOCK_HEIGHT").required(true)) .arg(clap::Arg::with_name("confirm").short("y").help("confirm to take effect")) .about("set the latest block") ) .subcommand( clap::SubCommand::with_name("get") .help("latest_block get")), ) .subcommand( clap::SubCommand::with_name("block") .about("APIs for block manipulation") .subcommand( clap::SubCommand::with_name("get") .arg(clap::Arg::with_name("BLOCK_HEIGHT").required(true)) .about("get block of [BLOCK_HEIGHT]"), ) .subcommand( clap::SubCommand::with_name("set") .arg(clap::Arg::with_name("BLOCK").required(true)) .arg(clap::Arg::with_name("confirm").short("y").help("confirm to take effect")) .about("upsert target block by [BLOCK], [BLOCK] is in JSON format"), ), ) .subcommand( clap::SubCommand::with_name("wal") .about("APIs for Write Ahead Log operation") .subcommand( clap::SubCommand::with_name("clear") .arg(clap::Arg::with_name("confirm") .short("y").help("confirm to take effect")) .about("clear all wals, include mempool wal and consensus txs"), ) .subcommand( clap::SubCommand::with_name("mempool") .about("handle mempool wal") .subcommand( clap::SubCommand::with_name("clear") .arg(clap::Arg::with_name("confirm").short("y").help("confirm to take effect")) .about("clear mempool wal"), ) .subcommand( clap::SubCommand::with_name("list").about("list mempool wal"), ) .subcommand( clap::SubCommand::with_name("get") .about("get mempool wal") .arg(clap::Arg::with_name("BLOCK_HEIGHT").required(true)), ), ) .subcommand( clap::SubCommand::with_name("consensus") .about("handle consensus wal") .subcommand( clap::SubCommand::with_name("clear") .arg(clap::Arg::with_name("confirm").short("y").help("confirm to take effect")) .about("clear consensus wal"), ), ), ) .subcommand( clap::SubCommand::with_name("backup") .about("APIs for backup operation") .subcommand( clap::SubCommand::with_name("save") .about("save db to [TO] place") .arg(clap::Arg::with_name("TO").required(true).help("path")), ) .subcommand( clap::SubCommand::with_name("restore") .about("restore db from [FROM] place") .arg(clap::Arg::with_name("FROM").required(true).help("path")), ), ); match cmds { Some(cmds) => app.get_matches_from(cmds), None => app.get_matches(), } } fn generate_maintenance_cli(self) -> MaintenanceCli<'a, Mapping, ImplStorage> { let path_block = self.config.data_path_for_block(); let rocks_adapter = match RocksAdapter::new(path_block, self.config.rocksdb.max_open_files) { Ok(adapter) => Arc::new(adapter), Err(e) => { log::error!("{:?}", e); panic!("rocks_adapter init fails") } }; let storage = ImplStorage::new(rocks_adapter); // Init full transactions wal let txs_wal_path = self .config .data_path_for_txs_wal() .to_str() .unwrap() .to_string(); let txs_wal = SignedTxsWAL::new(txs_wal_path); // Init consensus wal let consensus_wal_path = self .config .data_path_for_consensus_wal() .to_str() .unwrap() .to_string(); let consensus_wal = ConsensusWal::new(consensus_wal_path); MaintenanceCli::new( self.matches, self.config, self.service_mapping, storage, txs_wal, consensus_wal, ) } } pub struct MaintenanceCli<'a, Mapping, S> where Mapping: 'static + ServiceMapping, S: 'static + MaintenanceStorage, { pub matches: ArgMatches<'a>, pub config: Config, pub service_mapping: Arc, pub storage: Arc, pub txs_wal: Arc, pub consensus_wal: Arc, } impl<'a, Mapping, S> MaintenanceCli<'a, Mapping, S> where Mapping: 'static + ServiceMapping, S: 'static + MaintenanceStorage, { pub fn new( matches: ArgMatches<'a>, config: Config, service_mapping: Arc, storage: S, txs_wal: SignedTxsWAL, consensus_wal: ConsensusWal, ) -> Self { Self { matches, config, service_mapping, storage: Arc::new(storage), txs_wal: Arc::new(txs_wal), consensus_wal: Arc::new(consensus_wal), } } pub fn start(&self) -> ProtocolResult<()> { match self.matches.subcommand() { ("latest_block", Some(sub_cmd)) => self.latest_block(sub_cmd), ("block", Some(sub_cmd)) => self.block(sub_cmd), ("wal", Some(sub_cmd)) => self.wal(sub_cmd), ("backup", Some(sub_cmd)) => self.backup(sub_cmd), _ => Err(CliError::UnsupportedCommand.into()), } } pub fn latest_block(&self, sub_cmd: &ArgMatches) -> ProtocolResult<()> { let mut rt = tokio::runtime::Runtime::new().expect("new tokio runtime"); match sub_cmd.subcommand() { ("set", Some(cmd)) => { let height = cmd .value_of("BLOCK_HEIGHT") .expect("missing [BLOCK_HEIGHT]"); let confirm = cmd.is_present("confirm"); if !confirm { log::info!("{}", PLEASE_CONFIRM); return Ok(()); } match u64::from_str_radix(height, 10) { Ok(height) => rt.block_on(async move { self.latest_block_set(height).await }), Err(_e) => Err(CliError::Parse.into()), } } ("get", Some(_cmd)) => { let block = rt.block_on(async move { self.latest_block_get().await })?; log::info!( "latest_block get {}", serde_json::to_string(&block).unwrap() ); Ok(()) } _ => Err(CliError::Grammar.into()), } } pub async fn latest_block_set(&self, height: u64) -> ProtocolResult<()> { let last = self.storage.get_latest_block(Context::new()).await?; let block = self.block_get(height).await?; let block = match block { Some(blk) => blk, None => return Err(CliError::BlockNotFound(height).into()), }; self.storage .insert_block(Context::new(), block.clone()) .await?; log::info!( "latest_block set successfully : {}", serde_json::to_string(&block).unwrap() ); // now remove 'future' blocks for idx in RangeInclusive::new(height + 1, last.header.height) { self.storage.remove_block(Context::new(), idx).await? } log::info!( "latest_block set, remove blocks from {} to {}", height + 1, last.header.height ); Ok(()) } pub async fn latest_block_get(&self) -> ProtocolResult { self.storage.get_latest_block(Context::new()).await } pub fn block(&self, sub_cmd: &ArgMatches) -> ProtocolResult<()> { let mut rt = tokio::runtime::Runtime::new().expect("new tokio runtime"); match sub_cmd.subcommand() { ("set", Some(cmd)) => { let confirm = cmd.is_present("confirm"); if !confirm { log::info!("{}", PLEASE_CONFIRM); return Ok(()); } let block_json = cmd.value_of("BLOCK").expect("missing [BLOCK]"); rt.block_on(async move { self.block_set(block_json).await })?; Ok(()) } ("get", Some(cmd)) => { let height = cmd .value_of("BLOCK_HEIGHT") .expect("missing height") .parse() .unwrap(); let res = rt.block_on(async move { self.block_get(height).await })?; match res { Some(block) => { log::info!("block_get: {}", serde_json::to_string(&block).unwrap()); } None => { log::info!("block not found for height {}", height); } } Ok(()) } _ => Err(CliError::Grammar.into()), } } pub async fn block_get(&self, height: u64) -> ProtocolResult> { self.storage.get_block(Context::new(), height).await } pub async fn block_set(&self, block_json: &str) -> ProtocolResult<()> { let block = serde_json::from_str::(block_json).map_err(|e| { log::info!("use 'block get 0' to get a example block JSON output"); CliError::JSONFormat(e) })?; self.storage .remove_block(Context::new(), block.header.height) .await?; self.storage .set_block(Context::new(), block.clone()) .await?; log::info!( "block set successfully: {}", serde_json::to_string(&block).unwrap() ); Ok(()) } pub fn wal(&self, sub_cmd: &ArgMatches) -> ProtocolResult<()> { match sub_cmd.subcommand() { ("mempool", Some(cmd)) => match cmd.subcommand() { ("clear", Some(cmd)) => { let confirm = cmd.is_present("confirm"); if !confirm { log::info!("{}", PLEASE_CONFIRM); return Ok(()); }; self.wal_txs_clear() } ("list", Some(_cmd)) => { self.wal_txs_list()?; Ok(()) } ("get", Some(cmd)) => { let height = cmd .value_of("BLOCK_HEIGHT") .expect("missing [BLOCK_HEIGHT]") .parse() .unwrap(); self.wal_txs_get(height)?; Ok(()) } _ => Err(CliError::Grammar.into()), }, ("consensus", Some(cmd)) => match cmd.subcommand() { ("clear", Some(cmd)) => { let confirm = cmd.is_present("confirm"); if !confirm { log::info!("{}", PLEASE_CONFIRM); return Ok(()); }; self.wal_consensus_clear() } _ => Err(CliError::Grammar.into()), }, ("clear", Some(cmd)) => { let confirm = cmd.is_present("confirm"); if !confirm { log::info!("{}", PLEASE_CONFIRM); return Ok(()); }; self.wal_consensus_clear()?; self.wal_txs_clear()?; log::info!("wal clear, successfully"); Ok(()) } _ => Err(CliError::Grammar.into()), } } pub fn wal_txs_clear(&self) -> ProtocolResult<()> { let res = self.txs_wal.remove_all(); log::info!("wal_txs_clear: {:?}", res); res } pub fn wal_txs_list(&self) -> ProtocolResult> { let res = self.txs_wal.available_height(); log::info!("wal_txs_list: {:?}", res); res } pub fn wal_txs_get(&self, height: u64) -> ProtocolResult> { let res = self.txs_wal.load_by_height(height); log::info!("wal_txs_get: {:?}", res); Ok(res) } pub fn wal_consensus_clear(&self) -> ProtocolResult<()> { let res = self.consensus_wal.clear(); log::info!("wal_consensus_clear: {:?}", res); res } pub fn backup(&self, sub_cmd: &ArgMatches) -> ProtocolResult<()> { match sub_cmd.subcommand() { ("save", Some(cmd)) => { let to = cmd.value_of("TO").expect("missing [TO]"); self.backup_save(PathBuf::from_str(to).map_err(|e| CliError::Path(e.to_string()))?) } ("restore", Some(cmd)) => { let from = cmd.value_of("FROM").expect("missing [FROM]"); self.backup_restore( PathBuf::from_str(from).map_err(|e| CliError::Path(e.to_string()))?, ) } _ => Err(CliError::Grammar.into()), } } pub fn backup_save>(&self, to: P) -> ProtocolResult<()> { let to = to.as_ref(); let data_path = self.config.data_path.as_path(); fs_extra::dir::remove(to).map_err(CliError::IO2)?; fs_extra::dir::copy(data_path, to, &fs_extra::dir::CopyOptions { overwrite: true, skip_exist: false, buffer_size: 64000, // 64kb copy_inside: true, content_only: false, depth: 0, }) .map_err(CliError::IO2)?; log::info!("backup_save successfully to: {:?}", to.to_str()); Ok(()) } pub fn backup_restore>(&self, from: P) -> ProtocolResult<()> { let from = from.as_ref(); let data_path = self.config.data_path.as_path(); fs_extra::dir::remove(data_path).map_err(CliError::IO2)?; fs_extra::dir::copy(from, data_path, &fs_extra::dir::CopyOptions { overwrite: true, skip_exist: false, buffer_size: 64000, // 64kb copy_inside: true, content_only: false, depth: 0, }) .map_err(CliError::IO2)?; log::info!("backup_restore successfully to: {:?}", from.to_str()); Ok(()) } } ================================================ FILE: core/cli/src/tests/config.toml ================================================ # crypto privkey = "0x5ec982173d54d830b6789cbbbe43eaa2853a5ff752d1ebc1b266cf9790314f8a" # db config data_path = "./free-space/data" [graphql] listening_address = "127.0.0.1:8000" graphql_uri = "/graphql" graphiql_uri = "/graphiql" workers = 0 # if 0, uses number of available logical cpu as threads count. maxconn = 25000 max_payload_size = 1048576 # [graphql.tls] # private_key_file_path = "key.pem" # certificate_chain_file_path = "cert.pem" [network] listening_address = "0.0.0.0:1337" rpc_timeout = 10 [consensus] overlord_gap = 5 sync_txs_chunk_size = 5000 [[network.bootstraps]] peer_id = "QmTEJkB5QKWsEq37huryZZfVvqBKb54sHnKn9TQcA6j3n9" address = "0.0.0.0:1888" [mempool] pool_size = 20000 broadcast_txs_size = 200 broadcast_txs_interval = 200 [executor] light = false triedb_cache_size = 2000 [logger] filter = "info" log_to_console = true console_show_file_and_line = false log_path = "./free-space/logs" log_to_file = true file_size_limit = 1073741824 # 1 GiB metrics = true # you can specify log level for modules with config below # modules_level = { "overlord::state::process" = "debug", core_consensus = "error" } [rocksdb] max_open_files = 64 # [apm] # service_name = "muta" # tracing_address = "127.0.0.1:6831" # tracing_batch_size = 50 ================================================ FILE: core/cli/src/tests/genesis.toml ================================================ timestamp = 0 prevhash = "0x44915be5b6c20b0678cf05fcddbbaa832e25d7e6ac538784cd5c24de00d47472" [[services]] name = "asset" payload = ''' { "id": "0xf56924db538e77bb5951eb5ff0d02b88983c49c45eea30e8ae3e7234b311436c", "name": "MutaToken", "symbol": "MT", "supply": 320000011, "issuer": "muta14e0lmgck835vm2dfm0w3ckv6svmez8fdgdl705" } ''' [[services]] name = "metadata" payload = ''' { "chain_id": "0xb6a4d7da21443f5e816e8700eea87610e6d769657d6b8ec73028457bf2ca4036", "bech32_address_hrp": "muta", "common_ref": "0x6c747758636859487038", "timeout_gap": 20, "cycles_limit": 4294967295, "cycles_price": 1, "interval": 3000, "verifier_list": [ { "bls_pub_key": "0x04102947214862a503c73904deb5818298a186d68c7907bb609583192a7de6331493835e5b8281f4d9ee705537c0e765580e06f86ddce5867812fceb42eecefd209f0eddd0389d6b7b0100f00fb119ef9ab23826c6ea09aadcc76fa6cea6a32724", "pub_key": "0x02ef0cb0d7bc6c18b4bea1f5908d9106522b35ab3c399369605d4242525bda7e60", "address": "muta14e0lmgck835vm2dfm0w3ckv6svmez8fdgdl705", "propose_weight": 1, "vote_weight": 1 } ], "propose_ratio": 15, "prevote_ratio": 10, "precommit_ratio": 10, "brake_ratio": 7, "tx_num_limit": 20000, "max_tx_size": 1024 } ''' ================================================ FILE: core/cli/src/tests/mod.rs ================================================ mod service_mapping; use std::path::PathBuf; use std::str::FromStr; use protocol::traits::{CommonStorage, Context}; use protocol::types::{Block, BlockHeader, Bytes, Hash, Proof}; use protocol::ProtocolResult; use crate::{Cli, CliConfig}; use service_mapping::DefaultServiceMapping; const SAVE_DIR: &str = "./free-space/save"; const DATA_DIR: &str = "./free-space/data"; const CONFIG_PATH: &str = "./src/tests/config.toml"; const GENESIS_PATH: &str = "./src/tests/genesis.toml"; #[test] fn test_lineally() { clean(); prepare(); save_restore(); clean(); // set "latest" test before "block" test due to latest block cache in storage prepare(); latest_get(23); clean(); prepare(); latest_set(); clean(); prepare(); block_get(); clean(); prepare(); block_set(); clean(); } fn save_restore() { println!("test save_restore"); let save = PathBuf::from_str(SAVE_DIR).expect("save_restore, path fails"); fs_extra::dir::remove(save.clone()).expect("save_restore, remove save_restore fails"); run(vec![ "muta-chain", "--config", CONFIG_PATH, "--genesis", GENESIS_PATH, "backup", "save", SAVE_DIR, ]) .expect("save_restore, run save fails"); assert!(save.exists()); // now the data has gone clean(); run(vec![ "muta-chain", "--config", CONFIG_PATH, "--genesis", GENESIS_PATH, "backup", "restore", SAVE_DIR, ]) .expect("save_restore, run restore fails"); let data = PathBuf::from_str(DATA_DIR).expect("save_restore, path fails"); assert!(data.exists()); fs_extra::dir::remove(save).expect("save_restore, remove save files fails"); println!("tested save_restore"); } fn block_get() -> Block { println!("test block_get"); let cmd = vec![ "muta-chain", "--config", CONFIG_PATH, "--genesis", GENESIS_PATH, "block", "get", "11", ]; let maintenance_cli = Cli::new( DefaultServiceMapping {}, CliConfig { app_name: "Rodents", version: "Big Cheek", author: "Hamsters", config_path: "./cofnig.toml", genesis_patch: "./genesis.toml", }, Some(cmd), ) .generate_maintenance_cli(); let block = if let ("block", Some(sub_cmd)) = maintenance_cli.matches.subcommand() { let mut rt = tokio::runtime::Runtime::new().expect("new tokio runtime"); if let ("get", Some(_cmd)) = sub_cmd.subcommand() { let res = rt.block_on(async move { maintenance_cli.block_get(11).await }); let block = res .expect("block_get, block_get fails") .expect("block_get, block_get block not found"); assert_eq!(block.header.height, 11); block } else { panic!() } } else { panic!() }; println!("tested block_get"); block } fn block_set() { println!("test block_set"); // we chagne the exec height from 10 to 9 on height 11 let cmd = vec![ "muta-chain", "--config", CONFIG_PATH, "--genesis", GENESIS_PATH, "block", "set", "-y", r#" {"header":{"chain_id":"0xb6a4d7da21443f5e816e8700eea87610e6d769657d6b8ec73028457bf2ca4036","height":11,"exec_height":9,"prev_hash":"0xc60d9652e5a7d18d34272ac4f8350086439520923d812b4cc4428a9b04d2dd01","timestamp":1598632570280,"order_root":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","order_signed_transactions_hash":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","confirm_root":["0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"],"state_root":"0xd26475337965236ee6bfb4db3f02ed8d21b710f4194e7de5a379fdde0f48c681","receipt_root":["0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"],"cycles_used":[0],"proposer":"muta14e0lmgck835vm2dfm0w3ckv6svmez8fdgdl705","proof":{"height":10,"round":0,"block_hash":"0xc60d9652e5a7d18d34272ac4f8350086439520923d812b4cc4428a9b04d2dd01","signature":[7,23,172,129,210,37,136,144,12,57,227,78,29,103,134,41,243,30,237,76,239,6,104,140,72,255,52,0,245,178,160,99,83,172,226,68,115,200,56,126,97,78,80,58,101,70,84,162,8,230,26,25,30,82,91,62,107,140,126,30,95,148,17,78,243,149,82,90,103,206,13,32,42,83,41,233,22,248,127,89,83,246,37,8,152,236,11,120,55,77,110,93,222,191,246,59,11,217,193,133,230,91,73,115,76,124,147,244,154,146,179,147,242,89,239,124,135,95,62,70,190,42,220,245,155,74,210,75,166,138,78,42,247,71,229,134,245,53,10,57,65,253,178,238,14,108,79,191,45,140,142,134,251,157,255,148,122,78,167,127,204,79,176,71,188,253,42,167,34,61,234,242,248,86,0,62,225,11,207,15,254,235,189,202,94,10,185,176,223,127,62,127],"bitmap":[128]},"validator_version":0,"validators":[{"pub_key":[2,239,12,176,215,188,108,24,180,190,161,245,144,141,145,6,82,43,53,171,60,57,147,105,96,93,66,66,82,91,218,126,96],"propose_weight":1,"vote_weight":1}]},"ordered_tx_hashes":[]} "#, ]; let maintenance_cli = Cli::new( DefaultServiceMapping {}, CliConfig { app_name: "Rodents", version: "Big Cheek", author: "Hamsters", config_path: "./cofnig.toml", genesis_patch: "./genesis.toml", }, Some(cmd), ) .generate_maintenance_cli(); let mut rt = tokio::runtime::Runtime::new().expect("new tokio runtime"); rt.block_on(async move { if let ("block", Some(sub_cmd)) = maintenance_cli.matches.subcommand() { if let ("set", Some(cmd)) = sub_cmd.subcommand() { let block_json = cmd.value_of("BLOCK").expect("missing [BLOCK]"); let res = maintenance_cli.block_set(block_json).await; assert!(res.is_ok()); } else { panic!() } } else { panic!() } }); let changed = block_get(); assert_eq!(changed.header.exec_height, 9); println!("tested block_set"); } fn latest_get(expect: u64) -> Block { println!("test latest_get"); // we chagne the exec height from 10 to 9 on height 11 let cmd = vec![ "muta-chain", "--config", CONFIG_PATH, "--genesis", GENESIS_PATH, "latest_block", "get", ]; let maintenance_cli = Cli::new( DefaultServiceMapping {}, CliConfig { app_name: "Rodents", version: "Big Cheek", author: "Hamsters", config_path: "./cofnig.toml", genesis_patch: "./genesis.toml", }, Some(cmd), ) .generate_maintenance_cli(); let block = if let ("latest_block", Some(sub_cmd)) = maintenance_cli.matches.subcommand() { if let ("get", Some(_cmd)) = sub_cmd.subcommand() { let mut rt = tokio::runtime::Runtime::new().expect("new tokio runtime"); let res = rt.block_on(async move { maintenance_cli.latest_block_get().await }); let block = res.expect("latest_get, latest_block_get fails"); assert_eq!(block.header.height, expect); block } else { panic!() } } else { panic!() }; println!("tested latest_get"); block } fn latest_set() { println!("test latest_set"); // we change the exec height from 10 to 9 on height 11 let cmd = vec![ "muta-chain", "--config", CONFIG_PATH, "--genesis", GENESIS_PATH, "latest_block", "set", "-y", "10", ]; let maintenance_cli = Cli::new( DefaultServiceMapping {}, CliConfig { app_name: "Rodents", version: "Big Cheek", author: "Hamsters", config_path: "./cofnig.toml", genesis_patch: "./genesis.toml", }, Some(cmd), ) .generate_maintenance_cli(); if let ("latest_block", Some(sub_cmd)) = maintenance_cli.matches.subcommand() { if let ("set", Some(_cmd)) = sub_cmd.subcommand() { let mut rt = tokio::runtime::Runtime::new().expect("new tokio runtime"); let res = rt.block_on(async move { maintenance_cli.latest_block_set(10).await }); assert!(res.is_ok()); } else { panic!() } } else { panic!() } let changed = latest_get(10); assert_eq!(changed.header.height, 10); println!("tested latest_set"); } // test functional methods list below fn prepare() { let to = PathBuf::from_str(DATA_DIR).expect("prepare,data dir fails"); if to.exists() { fs_extra::dir::remove(to.as_path()).expect("prepare, remove to fails"); } // we just add a validation command, but we don't use the match yet let cmd = vec![ "muta-chain", "--config", CONFIG_PATH, "--genesis", GENESIS_PATH, "latest_block", "get", ]; let maintenance_cli = Cli::new( DefaultServiceMapping {}, CliConfig { app_name: "Rodents", version: "Big Cheek", author: "Hamsters", config_path: "./cofnig.toml", genesis_patch: "./genesis.toml", }, Some(cmd), ) .generate_maintenance_cli(); let storage = maintenance_cli.storage; // now we add fake blocks let mut rt = tokio::runtime::Runtime::new().expect("new tokio runtime"); for idx in 0..=23 { if let Err(e) = rt.block_on(storage.insert_block(Context::new(), Block { header: BlockHeader { chain_id: Default::default(), height: idx, exec_height: match idx { i if i > 0 => i - 1, _ => 0, }, prev_hash: Default::default(), timestamp: 0, order_root: Default::default(), order_signed_transactions_hash: Default::default(), confirm_root: vec![], state_root: Default::default(), receipt_root: vec![], cycles_used: vec![], proposer: Default::default(), proof: Proof { height: 0, round: 0, block_hash: Default::default(), signature: Default::default(), bitmap: Default::default(), }, validator_version: 0, validators: vec![], }, ordered_tx_hashes: vec![], })) { println!("{:?}", e); panic!("muta cli test prepare(), prepare rocksdb fails") }; } let tx_wal = maintenance_cli.txs_wal; if tx_wal .save( 23, Hash::from_hex("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") .unwrap(), vec![], ) .is_err() { panic!("muta cli test prepare(), prepare tx_wal fails") }; let consensus_wal = maintenance_cli.consensus_wal; if consensus_wal .update_overlord_wal( Context::new(), Bytes::from_static(b"1234567,doremifasolati"), ) .is_err() { panic!("muta cli test prepare(), prepare consense_wal fails") }; } fn clean() { let to = PathBuf::from_str(DATA_DIR).expect("clean, data dir fails"); if to.exists() { fs_extra::dir::remove(to.as_path()).expect("clean, remove to"); } } fn run(cmd: Vec<&str>) -> ProtocolResult<()> { Cli::new( service_mapping::DefaultServiceMapping {}, CliConfig { app_name: "Rodents", version: "Big Cheek", author: "Hamsters", config_path: "./cofnig.toml", genesis_patch: "./genesis.toml", }, Some(cmd), ) .start() } ================================================ FILE: core/cli/src/tests/service_mapping.rs ================================================ // This file is copied directly from example/muta-chain use derive_more::{Display, From}; use protocol::traits::{SDKFactory, Service, ServiceMapping, ServiceSDK}; use protocol::{ProtocolError, ProtocolErrorKind, ProtocolResult}; use asset::{AssetService, ASSET_SERVICE_NAME}; use authorization::{AuthorizationService, AUTHORIZATION_SERVICE_NAME}; use metadata::{MetadataService, METADATA_SERVICE_NAME}; use multi_signature::{MultiSignatureService, MULTI_SIG_SERVICE_NAME}; use util::{UtilService, UTIL_SERVICE_NAME}; pub struct DefaultServiceMapping; impl ServiceMapping for DefaultServiceMapping { fn get_service>( &self, name: &str, factory: &Factory, ) -> ProtocolResult> { let sdk = factory.get_sdk(name)?; let service = match name { AUTHORIZATION_SERVICE_NAME => { let multi_sig_sdk = factory.get_sdk("multi_signature")?; Box::new(AuthorizationService::new( sdk, MultiSignatureService::new(multi_sig_sdk), )) as Box } ASSET_SERVICE_NAME => Box::new(AssetService::new(sdk)) as Box, METADATA_SERVICE_NAME => Box::new(MetadataService::new(sdk)) as Box, MULTI_SIG_SERVICE_NAME => Box::new(MultiSignatureService::new(sdk)) as Box, UTIL_SERVICE_NAME => Box::new(UtilService::new(sdk)) as Box, _ => { return Err(MappingError::NotFoundService { service: name.to_owned(), } .into()); } }; Ok(service) } fn list_service_name(&self) -> Vec { vec![ ASSET_SERVICE_NAME.to_owned(), AUTHORIZATION_SERVICE_NAME.to_owned(), METADATA_SERVICE_NAME.to_owned(), MULTI_SIG_SERVICE_NAME.to_owned(), UTIL_SERVICE_NAME.to_owned(), ] } } #[derive(Debug, Display, From)] pub enum MappingError { #[display(fmt = "service {:?} was not found", service)] NotFoundService { service: String }, } impl std::error::Error for MappingError {} impl From for ProtocolError { fn from(err: MappingError) -> ProtocolError { ProtocolError::new(ProtocolErrorKind::Service, Box::new(err)) } } ================================================ FILE: core/consensus/Cargo.toml ================================================ [package] name = "core-consensus" version = "0.2.1" authors = ["Muta Dev "] edition = "2018" repository = "https://github.com/nervosnetwork/muta" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] async-trait = "0.1" bincode = "1.3" cita_trie = "2.0" json = "0.12" creep = "0.2" derive_more = "0.99" futures = { version = "0.3", features = ["async-await"] } futures-timer = "3.0" hex = "0.4" log = "0.4" overlord = "0.2" parking_lot = "0.11" prost = "0.6" rlp = "0.4" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" tokio = { version = "0.2", features = ["macros", "sync", "rt-core", "rt-threaded"] } bytes = { version = "0.5", features = ["serde"] } lazy_static = "1.4" common-apm = { path = "../../common/apm" } common-crypto = { path = "../../common/crypto" } common-logger = { path = "../../common/logger" } common-merkle = { path = "../../common/merkle" } core-mempool = { path = "../../core/mempool" } core-storage = { path = "../../core/storage" } core-network = { path = "../../core/network" } protocol = { path = "../../protocol", package = "muta-protocol" } [dev-dependencies] bit-vec = "0.6" num-traits = "0.2" rand = "0.7" [features] default = [] random_leader = ["overlord/random_leader"] ================================================ FILE: core/consensus/src/adapter.rs ================================================ use std::boxed::Box; use std::collections::HashMap; use std::marker::PhantomData; use std::sync::Arc; use std::time::Instant; use async_trait::async_trait; use overlord::types::{Node, OverlordMsg, Vote, VoteType}; use overlord::{extract_voters, Crypto, OverlordHandler}; use parking_lot::RwLock; use tokio::sync::mpsc::error::TrySendError; use tokio::sync::mpsc::{channel, Receiver, Sender}; use common_apm::muta_apm; use common_merkle::Merkle; use core_network::{PeerId, PeerIdExt}; use protocol::traits::{ CommonConsensusAdapter, ConsensusAdapter, Context, ExecutorFactory, ExecutorParams, ExecutorResp, Gossip, MemPool, MessageTarget, MixedTxHashes, Network, PeerTrust, Priority, Rpc, ServiceMapping, Storage, SynchronizationAdapter, TrustFeedback, }; use protocol::types::{ Address, Block, BlockHeader, Bytes, Hash, Hex, MerkleRoot, Metadata, Proof, Receipt, SignedTransaction, TransactionRequest, Validator, }; use protocol::{fixed_codec::FixedCodec, ProtocolResult}; use crate::consensus::gen_overlord_status; use crate::fixed_types::{ FixedBlock, FixedHeight, FixedPill, FixedProof, FixedSignedTxs, PullTxsRequest, }; use crate::message::{ BROADCAST_HEIGHT, RPC_SYNC_PULL_BLOCK, RPC_SYNC_PULL_PROOF, RPC_SYNC_PULL_TXS, }; use crate::status::{ExecutedInfo, StatusAgent}; use crate::util::{convert_hex_to_bls_pubkeys, ExecuteInfo, OverlordCrypto}; use crate::BlockHeaderField::{PreviousBlockHash, ProofHash, Proposer}; use crate::BlockProofField::{BitMap, HashMismatch, HeightMismatch, Signature, WeightNotFound}; use crate::{BlockHeaderField, BlockProofField, ConsensusError}; pub struct OverlordConsensusAdapter< EF: ExecutorFactory, M: MemPool, N: Rpc + PeerTrust + Gossip + Network + 'static, S: Storage, DB: cita_trie::DB, Mapping: ServiceMapping, > { network: Arc, mempool: Arc, storage: Arc, trie_db: Arc, service_mapping: Arc, overlord_handler: RwLock>>, exec_queue: Sender, exec_demons: Option>, crypto: Arc, } #[async_trait] impl ConsensusAdapter for OverlordConsensusAdapter where EF: ExecutorFactory, M: MemPool + 'static, N: Rpc + PeerTrust + Gossip + Network + 'static, S: Storage + 'static, DB: cita_trie::DB + 'static, Mapping: ServiceMapping + 'static, { #[muta_apm::derive::tracing_span(kind = "consensus.adapter")] async fn get_txs_from_mempool( &self, ctx: Context, _height: u64, cycle_limit: u64, tx_num_limit: u64, ) -> ProtocolResult { self.mempool.package(ctx, cycle_limit, tx_num_limit).await } #[muta_apm::derive::tracing_span(kind = "consensus.adapter")] async fn sync_txs(&self, ctx: Context, txs: Vec) -> ProtocolResult<()> { self.mempool.sync_propose_txs(ctx, txs).await } #[muta_apm::derive::tracing_span(kind = "consensus.adapter", logs = "{'txs_len': 'txs.len()'}")] async fn get_full_txs( &self, ctx: Context, txs: &[Hash], ) -> ProtocolResult> { self.mempool.get_full_txs(ctx, None, txs).await } #[muta_apm::derive::tracing_span(kind = "consensus.adapter")] async fn transmit( &self, ctx: Context, msg: Vec, end: &str, target: MessageTarget, ) -> ProtocolResult<()> { match target { MessageTarget::Broadcast => { self.network .broadcast(ctx.clone(), end, msg, Priority::High) .await } MessageTarget::Specified(pub_key) => { let peer_id_bytes = PeerId::from_pubkey_bytes(pub_key)?.into_bytes_ext(); self.network .multicast(ctx, end, [peer_id_bytes], msg, Priority::High) .await } } } #[muta_apm::derive::tracing_span(kind = "consensus.adapter")] async fn execute( &self, ctx: Context, chain_id: Hash, order_root: MerkleRoot, height: u64, cycles_price: u64, proposer: Address, block_hash: Hash, signed_txs: Vec, cycles_limit: u64, timestamp: u64, ) -> ProtocolResult<()> { let exec_info = ExecuteInfo { ctx, height, chain_id, cycles_price, block_hash, signed_txs, order_root, proposer, cycles_limit, timestamp, }; let mut tx = self.exec_queue.clone(); tx.try_send(exec_info).map_err(|e| match e { TrySendError::Closed(_) => panic!("exec queue dropped!"), _ => ConsensusError::ExecuteErr(e.to_string()), })?; Ok(()) } #[muta_apm::derive::tracing_span(kind = "consensus.adapter")] async fn get_last_validators( &self, ctx: Context, height: u64, ) -> ProtocolResult> { let header = self .storage .get_block_header(ctx, height) .await? .ok_or(ConsensusError::StorageItemNotFound)?; Ok(header.validators) } /// Get the current height from storage. #[muta_apm::derive::tracing_span(kind = "consensus.adapter")] async fn get_current_height(&self, ctx: Context) -> ProtocolResult { let header = self.storage.get_latest_block_header(ctx).await?; Ok(header.height) } #[muta_apm::derive::tracing_span(kind = "consensus.adapter")] async fn pull_block(&self, ctx: Context, height: u64, end: &str) -> ProtocolResult { log::debug!("consensus: send rpc pull block {}", height); let res = self .network .call::(ctx, end, FixedHeight::new(height), Priority::High) .await?; Ok(res.inner) } #[muta_apm::derive::tracing_span(kind = "consensus.adapter", logs = "{'txs_len': 'txs.len()'}")] async fn verify_txs(&self, ctx: Context, height: u64, txs: &[Hash]) -> ProtocolResult<()> { if let Err(e) = self .mempool .ensure_order_txs(ctx.clone(), Some(height), txs) .await { log::error!("verify_txs error {:?}", e); return Err(ConsensusError::VerifyTransaction(height).into()); } Ok(()) } } #[async_trait] impl SynchronizationAdapter for OverlordConsensusAdapter where EF: ExecutorFactory, M: MemPool + 'static, N: Rpc + PeerTrust + Gossip + Network + 'static, S: Storage + 'static, DB: cita_trie::DB + 'static, Mapping: ServiceMapping + 'static, { #[muta_apm::derive::tracing_span(kind = "consensus.adapter")] fn update_status( &self, ctx: Context, height: u64, consensus_interval: u64, propose_ratio: u64, prevote_ratio: u64, precommit_ratio: u64, brake_ratio: u64, validators: Vec, ) -> ProtocolResult<()> { self.overlord_handler .read() .as_ref() .expect("Please set the overlord handle first") .send_msg( ctx, OverlordMsg::RichStatus(gen_overlord_status( height + 1, consensus_interval, propose_ratio, prevote_ratio, precommit_ratio, brake_ratio, validators, )), ) .map_err(|e| ConsensusError::OverlordErr(Box::new(e)))?; Ok(()) } #[muta_apm::derive::tracing_span(kind = "consensus.adapter", logs = "{'txs_len': 'txs.len()'}")] fn sync_exec( &self, ctx: Context, params: &ExecutorParams, txs: &[SignedTransaction], ) -> ProtocolResult { let mut executor = EF::from_root( params.state_root.clone(), Arc::clone(&self.trie_db), Arc::clone(&self.storage), Arc::clone(&self.service_mapping), )?; let inst = Instant::now(); let resp = executor.exec(ctx, params, txs)?; common_apm::metrics::consensus::CONSENSUS_TIME_HISTOGRAM_VEC_STATIC .exec .observe(common_apm::metrics::duration_to_sec(inst.elapsed())); Ok(resp) } /// Pull some blocks from other nodes from `begin` to `end`. #[muta_apm::derive::tracing_span(kind = "consensus.adapter")] async fn get_block_from_remote(&self, ctx: Context, height: u64) -> ProtocolResult { let res = self .network .call::( ctx, RPC_SYNC_PULL_BLOCK, FixedHeight::new(height), Priority::High, ) .await; match res { Ok(data) => { common_apm::metrics::consensus::CONSENSUS_RESULT_COUNTER_VEC_STATIC .get_block_from_remote .success .inc(); Ok(data.inner) } Err(err) => { common_apm::metrics::consensus::CONSENSUS_RESULT_COUNTER_VEC_STATIC .get_block_from_remote .failure .inc(); Err(err) } } } /// Pull signed transactions corresponding to the given hashes from other /// nodes. #[muta_apm::derive::tracing_span( kind = "consensus.adapter", logs = "{'txs_len': 'hashes.len()'}" )] async fn get_txs_from_remote( &self, ctx: Context, height: u64, hashes: &[Hash], ) -> ProtocolResult> { let res = self .network .call::( ctx, RPC_SYNC_PULL_TXS, PullTxsRequest::new(height, hashes.to_vec()), Priority::High, ) .await?; Ok(res.inner) } /// Pull a proof of certain block from other nodes #[muta_apm::derive::tracing_span(kind = "consensus.adapter")] async fn get_proof_from_remote(&self, ctx: Context, height: u64) -> ProtocolResult { let ret = self .network .call::( ctx.clone(), RPC_SYNC_PULL_PROOF, FixedHeight::new(height), Priority::High, ) .await?; Ok(ret.inner) } } #[async_trait] impl CommonConsensusAdapter for OverlordConsensusAdapter where EF: ExecutorFactory, M: MemPool + 'static, N: Rpc + PeerTrust + Gossip + Network + 'static, S: Storage + 'static, DB: cita_trie::DB + 'static, Mapping: ServiceMapping + 'static, { /// Save a block to the database. #[muta_apm::derive::tracing_span( kind = "consensus.adapter", logs = "{'txs_len': 'block.ordered_tx_hashes.len()'}" )] async fn save_block(&self, ctx: Context, block: Block) -> ProtocolResult<()> { self.storage.insert_block(ctx, block).await } #[muta_apm::derive::tracing_span(kind = "consensus.adapter")] async fn save_proof(&self, ctx: Context, proof: Proof) -> ProtocolResult<()> { self.storage.update_latest_proof(ctx, proof).await } /// Save some signed transactions to the database. #[muta_apm::derive::tracing_span( kind = "consensus.adapter", logs = "{'txs_len': 'signed_txs.len()'}" )] async fn save_signed_txs( &self, ctx: Context, block_height: u64, signed_txs: Vec, ) -> ProtocolResult<()> { self.storage .insert_transactions(ctx, block_height, signed_txs) .await } #[muta_apm::derive::tracing_span( kind = "consensus.adapter", logs = "{'receipts_len': 'receipts.len()'}" )] async fn save_receipts( &self, ctx: Context, height: u64, receipts: Vec, ) -> ProtocolResult<()> { self.storage.insert_receipts(ctx, height, receipts).await } /// Flush the given transactions in the mempool. #[muta_apm::derive::tracing_span( kind = "consensus.adapter", logs = "{'flush_txs_len': 'ordered_tx_hashes.len()'}" )] async fn flush_mempool(&self, ctx: Context, ordered_tx_hashes: &[Hash]) -> ProtocolResult<()> { self.mempool.flush(ctx, ordered_tx_hashes).await } /// Get a block corresponding to the given height. #[muta_apm::derive::tracing_span(kind = "consensus.adapter")] async fn get_block_by_height(&self, ctx: Context, height: u64) -> ProtocolResult { self.storage .get_block(ctx, height) .await? .ok_or_else(|| ConsensusError::StorageItemNotFound.into()) } async fn get_block_header_by_height( &self, ctx: Context, height: u64, ) -> ProtocolResult { self.storage .get_block_header(ctx, height) .await? .ok_or_else(|| ConsensusError::StorageItemNotFound.into()) } /// Get the current height from storage. #[muta_apm::derive::tracing_span(kind = "consensus.adapter")] async fn get_current_height(&self, ctx: Context) -> ProtocolResult { let header = self.storage.get_latest_block_header(ctx).await?; Ok(header.height) } #[muta_apm::derive::tracing_span( kind = "consensus.adapter", logs = "{'txs_len': 'tx_hashes.len()'}" )] async fn get_txs_from_storage( &self, ctx: Context, tx_hashes: &[Hash], ) -> ProtocolResult> { let futs = tx_hashes .iter() .map(|tx_hash| self.storage.get_transaction_by_hash(ctx.clone(), tx_hash)) .collect::>(); futures::future::try_join_all(futs).await.map(|txs| { txs.into_iter() .filter_map(|opt_tx| opt_tx) .collect::>() }) } #[muta_apm::derive::tracing_span(kind = "consensus.adapter")] async fn broadcast_height(&self, ctx: Context, height: u64) -> ProtocolResult<()> { self.network .broadcast(ctx.clone(), BROADCAST_HEIGHT, height, Priority::High) .await } /// Get metadata by the giving height. #[muta_apm::derive::tracing_span(kind = "consensus.adapter")] fn get_metadata( &self, ctx: Context, state_root: MerkleRoot, height: u64, timestamp: u64, proposer: Address, ) -> ProtocolResult { let executor = EF::from_root( state_root.clone(), Arc::clone(&self.trie_db), Arc::clone(&self.storage), Arc::clone(&self.service_mapping), )?; let caller = Address::from_hash(Hash::digest(protocol::address_hrp().as_str()))?; let params = ExecutorParams { state_root, height, timestamp, cycles_limit: u64::max_value(), proposer, }; let exec_resp = executor.read(¶ms, &caller, 1, &TransactionRequest { service_name: "metadata".to_string(), method: "get_metadata".to_string(), payload: "".to_string(), })?; Ok(serde_json::from_str(&exec_resp.succeed_data).expect("Decode metadata failed!")) } fn tag_consensus(&self, ctx: Context, pub_keys: Vec) -> ProtocolResult<()> { let peer_ids_bytes = pub_keys .iter() .map(|pk| PeerId::from_pubkey_bytes(pk).map(PeerIdExt::into_bytes_ext)) .collect::>()?; self.network.tag_consensus(ctx, peer_ids_bytes) } #[muta_apm::derive::tracing_span(kind = "consensus.adapter")] fn report_bad(&self, ctx: Context, feedback: TrustFeedback) { self.network.report(ctx, feedback); } fn set_args(&self, _context: Context, timeout_gap: u64, cycles_limit: u64, max_tx_size: u64) { self.mempool .set_args(timeout_gap, cycles_limit, max_tx_size); } /// this function verify all info in header except proof and roots #[muta_apm::derive::tracing_span(kind = "consensus.adapter")] async fn verify_block_header(&self, ctx: Context, block: &Block) -> ProtocolResult<()> { let previous_block_header = self .get_block_header_by_height(ctx.clone(), block.header.height - 1) .await .map_err(|e| { log::error!( "[consensus] verify_block_header, previous_block_header {} fails", block.header.height - 1, ); e })?; let previous_block_hash = Hash::digest(previous_block_header.encode_fixed()?); if previous_block_hash != block.header.prev_hash { log::error!( "[consensus] verify_block_header, previous_block_hash: {:?}, block.header.prev_hash: {:?}", previous_block_hash, block.header.prev_hash ); return Err( ConsensusError::VerifyBlockHeader(block.header.height, PreviousBlockHash).into(), ); } // the block 0 and 1 's proof is consensus-ed by community if block.header.height > 1u64 && block.header.prev_hash != block.header.proof.block_hash { log::error!( "[consensus] verify_block_header, verifying_block header : {:?}", block.header ); return Err(ConsensusError::VerifyBlockHeader(block.header.height, ProofHash).into()); } // verify proposer and validators let previous_metadata = self.get_metadata( ctx, previous_block_header.state_root.clone(), previous_block_header.height, previous_block_header.timestamp, previous_block_header.proposer, )?; let authority_map = previous_metadata .verifier_list .iter() .map(|v| { let address = v.pub_key.decode(); let node = Node { address: v.pub_key.decode(), propose_weight: v.propose_weight, vote_weight: v.vote_weight, }; (address, node) }) .collect::>(); // TODO: useless check // check proposer if block.header.height != 0 && !previous_metadata .verifier_list .iter() .any(|v| v.address == block.header.proposer) { log::error!( "[consensus] verify_block_header, block.header.proposer: {:?}, authority_map: {:?}", block.header.proposer, authority_map ); return Err(ConsensusError::VerifyBlockHeader(block.header.height, Proposer).into()); } // check validators for validator in block.header.validators.iter() { let validator_address = Address::from_pubkey_bytes(validator.pub_key.clone()); if !authority_map.contains_key(&validator.pub_key) { log::error!( "[consensus] verify_block_header, validator.address: {:?}, authority_map: {:?}", validator_address, authority_map ); return Err(ConsensusError::VerifyBlockHeader( block.header.height, BlockHeaderField::Validator, ) .into()); } else { let node = authority_map.get(&validator.pub_key).unwrap(); if node.vote_weight != validator.vote_weight || node.propose_weight != validator.vote_weight { log::error!( "[consensus] verify_block_header, validator.address: {:?}, authority_map: {:?}", validator_address, authority_map ); return Err(ConsensusError::VerifyBlockHeader( block.header.height, BlockHeaderField::Weight, ) .into()); } } } Ok(()) } #[muta_apm::derive::tracing_span(kind = "consensus.adapter")] async fn verify_proof( &self, ctx: Context, block_header: &BlockHeader, proof: &Proof, ) -> ProtocolResult<()> { // the block 0 has no proof, which is consensus-ed by community, not by chain if block_header.height == 0 { return Ok(()); }; if block_header.height != proof.height { log::error!( "[consensus] verify_proof, block_header.height: {}, proof.height: {}", block_header.height, proof.height ); return Err(ConsensusError::VerifyProof( block_header.height, HeightMismatch(block_header.height, proof.height), ) .into()); } let blockhash = Hash::digest(block_header.encode_fixed()?); if blockhash != proof.block_hash { log::error!( "[consensus] verify_proof, blockhash: {:?}, proof.block_hash: {:?}", blockhash, proof.block_hash ); return Err(ConsensusError::VerifyProof(block_header.height, HashMismatch).into()); } let previous_block_header = self .get_block_header_by_height(ctx.clone(), block_header.height - 1) .await .map_err(|e| { log::error!( "[consensus] verify_proof, previous_block {} fails", block_header.height - 1, ); e })?; // the auth_list for the target should comes from previous height let metadata = self.get_metadata( ctx.clone(), previous_block_header.state_root.clone(), previous_block_header.height, previous_block_header.timestamp, previous_block_header.proposer, )?; let mut authority_list = metadata .verifier_list .iter() .map(|v| Node { address: v.pub_key.decode(), propose_weight: v.propose_weight, vote_weight: v.vote_weight, }) .collect::>(); let signed_voters = extract_voters(&mut authority_list, &proof.bitmap).map_err(|_| { log::error!("[consensus] extract_voters fails, bitmap error"); ConsensusError::VerifyProof(block_header.height, BitMap) })?; let vote = Vote { height: proof.height, round: proof.round, vote_type: VoteType::Precommit, block_hash: proof.block_hash.as_bytes(), }; let weight_map = authority_list .iter() .map(|node| (node.address.clone(), node.vote_weight)) .collect::>(); self.verify_proof_weight( ctx.clone(), block_header.height, weight_map, signed_voters.clone(), )?; let vote_hash = self.crypto.hash(Bytes::from(rlp::encode(&vote))); let hex_pubkeys = metadata .verifier_list .iter() .filter_map(|v| { if signed_voters.contains(&v.pub_key.decode()) { Some(v.bls_pub_key.clone()) } else { None } }) .collect::>(); self.verify_proof_signature( ctx.clone(), block_header.height, vote_hash.clone(), proof.signature.clone(), hex_pubkeys, ).map_err(|e| { log::error!("[consensus] verify_proof_signature error, height {}, vote: {:?}, vote_hash:{:?}, sig:{:?}, signed_voter:{:?}", block_header.height, vote, vote_hash, proof.signature, signed_voters, ); e })?; Ok(()) } #[muta_apm::derive::tracing_span(kind = "consensus.adapter")] fn verify_proof_signature( &self, ctx: Context, block_height: u64, vote_hash: Bytes, aggregated_signature_bytes: Bytes, vote_keys: Vec, ) -> ProtocolResult<()> { let mut pub_keys = Vec::new(); for hex in vote_keys.into_iter() { pub_keys.push(convert_hex_to_bls_pubkeys(hex)?) } self.crypto .inner_verify_aggregated_signature(vote_hash, pub_keys, aggregated_signature_bytes) .map_err(|e| { log::error!("[consensus] verify_proof_signature error: {}", e); ConsensusError::VerifyProof(block_height, Signature).into() }) } #[muta_apm::derive::tracing_span(kind = "consensus.adapter")] fn verify_proof_weight( &self, ctx: Context, block_height: u64, weight_map: HashMap, signed_voters: Vec, ) -> ProtocolResult<()> { let total_validator_weight: u64 = weight_map.iter().map(|pair| u64::from(*pair.1)).sum(); let mut accumulator = 0u64; for signed_voter_address in signed_voters { if weight_map.contains_key(signed_voter_address.as_ref()) { let weight = weight_map .get(signed_voter_address.as_ref()) .ok_or(ConsensusError::VerifyProof(block_height, WeightNotFound)) .map_err(|e| { log::error!( "[consensus] verify_proof_weight,signed_voter_address: {:?}", signed_voter_address ); e })?; accumulator += u64::from(*(weight)); } else { log::error!( "[consensus] verify_proof_weight, weight not found, signed_voter_address: {:?}", signed_voter_address ); return Err( ConsensusError::VerifyProof(block_height, BlockProofField::Validator).into(), ); } } if 3 * accumulator <= 2 * total_validator_weight { log::error!( "[consensus] verify_proof_weight, accumulator: {}, total: {}", accumulator, total_validator_weight ); return Err(ConsensusError::VerifyProof(block_height, BlockProofField::Weight).into()); } Ok(()) } } impl OverlordConsensusAdapter where EF: ExecutorFactory, M: MemPool + 'static, N: Rpc + PeerTrust + Gossip + Network + 'static, S: Storage + 'static, DB: cita_trie::DB + 'static, Mapping: ServiceMapping + 'static, { pub fn new( network: Arc, mempool: Arc, storage: Arc, trie_db: Arc, service_mapping: Arc, status_agent: StatusAgent, crypto: Arc, gap: usize, ) -> ProtocolResult { let (exec_queue, rx) = channel(gap); let exec_demons = Some(ExecDemons::new( Arc::clone(&storage), Arc::clone(&trie_db), Arc::clone(&service_mapping), rx, status_agent, )); let adapter = OverlordConsensusAdapter { network, mempool, storage, trie_db, service_mapping, overlord_handler: RwLock::new(None), exec_queue, exec_demons, crypto, }; Ok(adapter) } pub fn take_exec_demon(&mut self) -> ExecDemons { assert!(self.exec_demons.is_some()); self.exec_demons.take().unwrap() } pub fn set_overlord_handler(&self, handler: OverlordHandler) { *self.overlord_handler.write() = Some(handler) } } #[derive(Debug)] pub struct ExecDemons { storage: Arc, trie_db: Arc, service_mapping: Arc, pin_ef: PhantomData, queue: Receiver, status: StatusAgent, } impl ExecDemons where S: Storage, DB: cita_trie::DB, EF: ExecutorFactory, Mapping: ServiceMapping, { fn new( storage: Arc, trie_db: Arc, service_mapping: Arc, rx: Receiver, status_agent: StatusAgent, ) -> Self { ExecDemons { storage, trie_db, service_mapping, queue: rx, pin_ef: PhantomData, status: status_agent, } } pub async fn run(mut self) { loop { let inst = Instant::now(); if let Err(e) = self.process().await { log::error!("muta-consensus: executor demons error {:?}", e); } common_apm::metrics::consensus::CONSENSUS_TIME_HISTOGRAM_VEC_STATIC .block .observe(common_apm::metrics::duration_to_sec(inst.elapsed())); } } async fn process(&mut self) -> ProtocolResult<()> { if let Some(info) = self.queue.recv().await { self.exec(info.ctx.clone(), info).await } else { Err(ConsensusError::Other("Queue disconnect".to_string()).into()) } } #[muta_apm::derive::tracing_span( kind = "consensus.adapter", logs = "{'height': 'info.height', 'txs_len': 'info.signed_txs.len()'}" )] async fn exec(&self, ctx: Context, info: ExecuteInfo) -> ProtocolResult<()> { let height = info.height; let txs = info.signed_txs; let order_root = info.order_root; let state_root = self.status.to_inner().get_latest_state_root(); let now = Instant::now(); let mut executor = EF::from_root( state_root.clone(), Arc::clone(&self.trie_db), Arc::clone(&self.storage), Arc::clone(&self.service_mapping), )?; let exec_params = ExecutorParams { state_root: state_root.clone(), height, timestamp: info.timestamp, cycles_limit: info.cycles_limit, proposer: info.proposer, }; let resp = executor.exec(ctx.clone(), &exec_params, &txs)?; common_apm::metrics::consensus::CONSENSUS_TIME_HISTOGRAM_VEC_STATIC .exec .observe(common_apm::metrics::duration_to_sec(now.elapsed())); log::info!( "[consensus-adapter]: exec transactions cost {:?} transactions len {:?}", now.elapsed(), txs.len(), ); let now = Instant::now(); self.save_receipts(info.ctx.clone(), height, resp.receipts.clone()) .await?; log::info!( "[consensus-adapter]: save receipts cost {:?} receipts len {:?}", now.elapsed(), resp.receipts.len(), ); self.status.update_by_executed(gen_executed_info( info.ctx.clone(), resp, height, order_root, )); Ok(()) } #[muta_apm::derive::tracing_span( kind = "consensus.adapter", logs = "{'receipts_len': 'receipts.len()'}" )] async fn save_receipts( &self, ctx: Context, height: u64, receipts: Vec, ) -> ProtocolResult<()> { self.storage.insert_receipts(ctx, height, receipts).await } } fn gen_executed_info( ctx: Context, exec_resp: ExecutorResp, height: u64, order_root: MerkleRoot, ) -> ExecutedInfo { let cycles = exec_resp.all_cycles_used; let receipt = Merkle::from_hashes( exec_resp .receipts .iter() .map(|r| Hash::digest(r.to_owned().encode_fixed().unwrap())) .collect::>(), ) .get_root_hash() .unwrap_or_else(Hash::from_empty); ExecutedInfo { ctx, exec_height: height, cycles_used: cycles, receipt_root: receipt, confirm_root: order_root, state_root: exec_resp.state_root, } } ================================================ FILE: core/consensus/src/consensus.rs ================================================ use std::sync::Arc; use async_trait::async_trait; use creep::Context; use futures::lock::Mutex; use overlord::types::{ AggregatedVote, Node, OverlordMsg, SignedChoke, SignedProposal, SignedVote, Status, }; use overlord::{DurationConfig, Overlord, OverlordHandler}; use common_apm::muta_apm; use protocol::traits::{Consensus, ConsensusAdapter, NodeInfo}; use protocol::types::Validator; use protocol::ProtocolResult; use crate::engine::ConsensusEngine; use crate::fixed_types::FixedPill; use crate::status::StatusAgent; use crate::util::OverlordCrypto; use crate::wal::{ConsensusWal, SignedTxsWAL}; use crate::{ConsensusError, ConsensusType}; /// Provide consensus pub struct OverlordConsensus { /// Overlord consensus protocol instance. inner: Arc< Overlord, OverlordCrypto, ConsensusEngine>, >, /// An overlord consensus protocol handler. handler: OverlordHandler, } #[async_trait] impl Consensus for OverlordConsensus { #[muta_apm::derive::tracing_span(kind = "consensus")] async fn set_proposal(&self, ctx: Context, proposal: Vec) -> ProtocolResult<()> { let signed_proposal: SignedProposal = rlp::decode(&proposal) .map_err(|_| ConsensusError::DecodeErr(ConsensusType::SignedProposal))?; let msg = OverlordMsg::SignedProposal(signed_proposal); tracing_overlord_message(ctx.clone(), &msg); self.handler .send_msg(ctx, msg) .expect("Overlord handler disconnect"); Ok(()) } async fn set_vote(&self, ctx: Context, vote: Vec) -> ProtocolResult<()> { let ctx = match muta_apm::MUTA_TRACER.span("consensus.set_vote", vec![ muta_apm::rustracing::tag::Tag::new("kind", "consensus"), ]) { Some(mut span) => { span.log(|log| { log.time(std::time::SystemTime::now()); }); ctx.with_value("parent_span_ctx", span.context().cloned()) } None => ctx, }; let signed_vote: SignedVote = rlp::decode(&vote).map_err(|_| ConsensusError::DecodeErr(ConsensusType::SignedVote))?; let msg = OverlordMsg::SignedVote(signed_vote); tracing_overlord_message(ctx.clone(), &msg); self.handler .send_msg(ctx, msg) .expect("Overlord handler disconnect"); Ok(()) } #[muta_apm::derive::tracing_span(kind = "consensus")] async fn set_qc(&self, ctx: Context, qc: Vec) -> ProtocolResult<()> { let aggregated_vote: AggregatedVote = rlp::decode(&qc) .map_err(|_| ConsensusError::DecodeErr(ConsensusType::AggregateVote))?; let msg = OverlordMsg::AggregatedVote(aggregated_vote); tracing_overlord_message(ctx.clone(), &msg); self.handler .send_msg(ctx, msg) .expect("Overlord handler disconnect"); Ok(()) } #[muta_apm::derive::tracing_span(kind = "consensus")] async fn set_choke(&self, ctx: Context, choke: Vec) -> ProtocolResult<()> { let signed_choke: SignedChoke = rlp::decode(&choke) .map_err(|_| ConsensusError::DecodeErr(ConsensusType::SignedChoke))?; let msg = OverlordMsg::SignedChoke(signed_choke); tracing_overlord_message(ctx.clone(), &msg); self.handler .send_msg(ctx, msg) .expect("Overlord handler disconnect"); Ok(()) } } impl OverlordConsensus { pub fn new( status_agent: StatusAgent, node_info: NodeInfo, crypto: Arc, txs_wal: Arc, adapter: Arc, lock: Arc>, consensus_wal: Arc, ) -> Self { let engine = Arc::new(ConsensusEngine::new( status_agent.clone(), node_info.clone(), txs_wal, Arc::clone(&adapter), Arc::clone(&crypto), lock, consensus_wal, )); let overlord = Overlord::new(node_info.self_pub_key, Arc::clone(&engine), crypto, engine); let overlord_handler = overlord.get_handler(); let status = status_agent.to_inner(); if status.latest_committed_height == 0 { overlord_handler .send_msg( Context::new(), OverlordMsg::RichStatus(gen_overlord_status( status.latest_committed_height + 1, status.consensus_interval, status.propose_ratio, status.prevote_ratio, status.precommit_ratio, status.brake_ratio, status.validators, )), ) .unwrap(); } Self { inner: Arc::new(overlord), handler: overlord_handler, } } pub fn get_overlord_handler(&self) -> OverlordHandler { self.handler.clone() } pub async fn run( &self, init_height: u64, interval: u64, authority_list: Vec, timer_config: Option, ) -> ProtocolResult<()> { self.inner .run(init_height, interval, authority_list, timer_config) .await .map_err(|e| ConsensusError::OverlordErr(Box::new(e)))?; Ok(()) } } pub fn gen_overlord_status( height: u64, interval: u64, propose_ratio: u64, prevote_ratio: u64, precommit_ratio: u64, brake_ratio: u64, validators: Vec, ) -> Status { let mut authority_list = validators .into_iter() .map(|v| Node { address: v.pub_key.clone(), propose_weight: v.propose_weight, vote_weight: v.vote_weight, }) .collect::>(); authority_list.sort(); Status { height, interval: Some(interval), timer_config: Some(DurationConfig { propose_ratio, prevote_ratio, precommit_ratio, brake_ratio, }), authority_list, } } trait OverlordMsgExt { fn get_height(&self) -> String; fn get_round(&self) -> String; } impl OverlordMsgExt for OverlordMsg { fn get_height(&self) -> String { match self { OverlordMsg::SignedProposal(sp) => sp.proposal.height.to_string(), OverlordMsg::SignedVote(sv) => sv.get_height().to_string(), OverlordMsg::AggregatedVote(av) => av.get_height().to_string(), OverlordMsg::RichStatus(s) => s.height.to_string(), OverlordMsg::SignedChoke(sc) => sc.choke.height.to_string(), _ => "".to_owned(), } } fn get_round(&self) -> String { match self { OverlordMsg::SignedProposal(sp) => sp.proposal.round.to_string(), OverlordMsg::SignedVote(sv) => sv.get_round().to_string(), OverlordMsg::AggregatedVote(av) => av.get_round().to_string(), OverlordMsg::SignedChoke(sc) => sc.choke.round.to_string(), _ => "".to_owned(), } } } #[muta_apm::derive::tracing_span( kind = "consensus", logs = "{ 'height': 'msg.get_height()', 'round': 'msg.get_round()' }" )] pub fn tracing_overlord_message(ctx: Context, msg: &OverlordMsg) { let _ = msg; } ================================================ FILE: core/consensus/src/engine.rs ================================================ use std::collections::{HashMap, HashSet}; use std::convert::TryFrom; use std::error::Error; use std::sync::Arc; use std::time::{Duration, Instant}; use async_trait::async_trait; use futures::lock::Mutex; use futures_timer::Delay; use json::JsonValue; use log::{error, info, warn}; use overlord::error::ConsensusError as OverlordError; use overlord::types::{Commit, Node, OverlordMsg, Status, ViewChangeReason}; use overlord::{Consensus as Engine, DurationConfig, Wal}; use parking_lot::RwLock; use rlp::Encodable; use common_apm::muta_apm; use common_crypto::BlsPublicKey; use common_logger::{json, log}; use common_merkle::Merkle; use protocol::fixed_codec::FixedCodec; use protocol::traits::{ConsensusAdapter, Context, MessageTarget, NodeInfo, TrustFeedback}; use protocol::types::{ Address, Block, BlockHeader, Hash, MerkleRoot, Metadata, Pill, Proof, SignedTransaction, Validator, }; use protocol::{Bytes, ProtocolError, ProtocolResult}; use crate::fixed_types::FixedPill; use crate::message::{ END_GOSSIP_AGGREGATED_VOTE, END_GOSSIP_SIGNED_CHOKE, END_GOSSIP_SIGNED_PROPOSAL, END_GOSSIP_SIGNED_VOTE, }; use crate::status::StatusAgent; use crate::util::{check_list_roots, digest_signed_transactions, time_now, OverlordCrypto}; use crate::wal::{ConsensusWal, SignedTxsWAL}; use crate::ConsensusError; const RETRY_COMMIT_INTERVAL: u64 = 1000; // 1s const RETRY_CHECK_ROOT_LIMIT: u8 = 15; const RETRY_CHECK_ROOT_INTERVAL: u64 = 100; // 100ms /// validator is for create new block, and authority is for build overlord /// status. pub struct ConsensusEngine { status_agent: StatusAgent, node_info: NodeInfo, exemption_hash: RwLock>, adapter: Arc, txs_wal: Arc, crypto: Arc, lock: Arc>, last_commit_time: RwLock, consensus_wal: Arc, last_check_block_fail_reason: RwLock, } #[async_trait] impl Engine for ConsensusEngine { #[muta_apm::derive::tracing_span( kind = "consensus.engine", logs = "{'next_height': 'next_height'}" )] async fn get_block( &self, ctx: Context, next_height: u64, ) -> Result<(FixedPill, Bytes), Box> { let current_consensus_status = self.status_agent.to_inner(); if current_consensus_status.latest_committed_height != current_consensus_status.current_proof.height { error!("[consensus] get_block for {}, error, current_consensus_status.current_height {} != current_consensus_status.current_proof.height, proof :{:?}", current_consensus_status.latest_committed_height, current_consensus_status.current_proof.height, current_consensus_status.current_proof) } let (ordered_tx_hashes, propose_hashes) = self .adapter .get_txs_from_mempool( ctx.clone(), next_height, current_consensus_status.cycles_limit, current_consensus_status.tx_num_limit, ) .await? .clap(); let signed_txs = self .adapter .get_full_txs(ctx.clone(), &ordered_tx_hashes) .await?; let order_signed_transactions_hash = digest_signed_transactions(&signed_txs)?; if current_consensus_status.latest_committed_height != next_height - 1 { return Err(ProtocolError::from(ConsensusError::MissingBlockHeader( current_consensus_status.latest_committed_height, )) .into()); } let order_root = Merkle::from_hashes(ordered_tx_hashes.clone()).get_root_hash(); let state_root = current_consensus_status.get_latest_state_root(); let header = BlockHeader { chain_id: self.node_info.chain_id.clone(), prev_hash: current_consensus_status.current_hash, height: next_height, exec_height: current_consensus_status.exec_height, timestamp: time_now(), order_root: order_root.unwrap_or_else(Hash::from_empty), order_signed_transactions_hash, confirm_root: current_consensus_status.list_confirm_root, state_root, receipt_root: current_consensus_status.list_receipt_root.clone(), cycles_used: current_consensus_status.list_cycles_used, proposer: self.node_info.self_address.clone(), proof: current_consensus_status.current_proof.clone(), validator_version: 0u64, validators: current_consensus_status.validators.clone(), }; if header.height != header.proof.height + 1 { error!( "[consensus] get_block for {}, proof error, proof height {} mismatch", header.height, header.proof.height, ); } let block = Block { header, ordered_tx_hashes, }; let pill = Pill { block, propose_hashes, }; let fixed_pill = FixedPill { inner: pill.clone(), }; let hash = Hash::digest(pill.block.header.encode_fixed()?).as_bytes(); let mut set = self.exemption_hash.write(); set.insert(hash.clone()); Ok((fixed_pill, hash)) } #[muta_apm::derive::tracing_span( kind = "consensus.engine", logs = "{'next_height': 'next_height', 'hash': 'Hash::from_bytes(hash.clone()).unwrap().as_hex()', 'txs_len': 'block.inner.block.ordered_tx_hashes.len()'}" )] async fn check_block( &self, ctx: Context, next_height: u64, hash: Bytes, block: FixedPill, ) -> Result<(), Box> { let time = Instant::now(); if block.inner.block.header.height != block.inner.block.header.proof.height + 1 { error!("[consensus-engine]: check_block for overlord receives a proposal, error, block height {}, block {:?}", block.inner.block.header.height,block.inner.block); } let order_hashes = block.get_ordered_hashes(); let order_hashes_len = order_hashes.len(); let exemption = { self.exemption_hash.read().contains(&hash) }; let sync_tx_hashes = block.get_propose_hashes(); let pill = block.inner; gauge_txs_len(&pill); // If the block is proposed by self, it does not need to check. Get full signed // transactions directly. if !exemption { if let Err(e) = self.inner_check_block(ctx.clone(), &pill.block).await { let mut reason = self.last_check_block_fail_reason.write(); *reason = e.to_string(); return Err(e.into()); } let adapter = Arc::clone(&self.adapter); let ctx_clone = ctx.clone(); tokio::spawn(async move { if let Err(e) = sync_txs(ctx_clone, adapter, sync_tx_hashes).await { error!("Consensus sync block error {}", e); } }); } info!( "[consensus-engine]: check block cost {:?}", Instant::now() - time ); let time = Instant::now(); let txs = self.adapter.get_full_txs(ctx, &order_hashes).await?; info!( "[consensus-engine]: get txs cost {:?}", Instant::now() - time ); let time = Instant::now(); self.txs_wal.save( next_height, pill.block.header.order_signed_transactions_hash, txs, )?; info!( "[consensus-engine]: write wal cost {:?} order_hashes_len {:?}", time.elapsed(), order_hashes_len ); Ok(()) } /// **TODO:** the overlord interface and process needs to be changed. /// Get the `FixedSignedTxs` from the argument rather than get it from /// mempool. #[muta_apm::derive::tracing_span( kind = "consensus.engine", logs = "{'current_height': 'current_height', 'txs_len': 'commit.content.inner.block.ordered_tx_hashes.len()'}" )] async fn commit( &self, ctx: Context, current_height: u64, commit: Commit, ) -> Result> { let lock = self.lock.try_lock(); if lock.is_none() { return Err(ProtocolError::from(ConsensusError::LockInSync).into()); } let current_consensus_status = self.status_agent.to_inner(); if current_consensus_status.exec_height == current_height { let status = Status { height: current_height + 1, interval: Some(current_consensus_status.consensus_interval), timer_config: Some(DurationConfig { propose_ratio: current_consensus_status.propose_ratio, prevote_ratio: current_consensus_status.prevote_ratio, precommit_ratio: current_consensus_status.precommit_ratio, brake_ratio: current_consensus_status.brake_ratio, }), authority_list: covert_to_overlord_authority(¤t_consensus_status.validators), }; return Ok(status); } if current_height != current_consensus_status.latest_committed_height + 1 { return Err(ProtocolError::from(ConsensusError::OutdatedCommit( current_height, current_consensus_status.latest_committed_height, )) .into()); } let pill = commit.content.inner; let block_hash = Hash::from_bytes(commit.proof.block_hash.clone())?; let signature = commit.proof.signature.signature.clone(); let bitmap = commit.proof.signature.address_bitmap.clone(); let txs_len = pill.block.ordered_tx_hashes.len(); // Storage save the latest proof. let proof = Proof { height: commit.proof.height, round: commit.proof.round, block_hash: block_hash.clone(), signature, bitmap, }; common_apm::metrics::consensus::ENGINE_ROUND_GAUGE.set(commit.proof.round as i64); self.adapter.save_proof(ctx.clone(), proof.clone()).await?; // Get full transactions from mempool. If is error, try get from wal. let ordered_tx_hashes = pill.block.ordered_tx_hashes.clone(); let signed_txs = match self .adapter .get_full_txs(ctx.clone(), &ordered_tx_hashes) .await { Ok(txs) => txs, Err(_) => self.txs_wal.load( current_height, pill.block.header.order_signed_transactions_hash.clone(), )?, }; // Execute transactions loop { if self .exec( ctx.clone(), pill.block.header.order_root.clone(), current_height, pill.block.header.proposer.clone(), pill.block.header.timestamp, Hash::digest(pill.block.header.encode_fixed()?), signed_txs.clone(), ) .await .is_ok() { break; } else { Delay::new(Duration::from_millis(RETRY_COMMIT_INTERVAL)).await; } } let block_exec_height = pill.block.header.exec_height; let metadata = self.adapter.get_metadata( ctx.clone(), pill.block.header.state_root.clone(), pill.block.header.height, pill.block.header.timestamp, pill.block.header.proposer.clone(), )?; info!( "[consensus]: validator of height {} is {:?}", current_height + 1, metadata.verifier_list ); self.update_status(metadata, pill.block, proof, signed_txs) .await?; self.adapter .flush_mempool(ctx.clone(), &ordered_tx_hashes) .await?; self.adapter .broadcast_height(ctx.clone(), current_height) .await?; self.txs_wal.remove(block_exec_height)?; let mut set = self.exemption_hash.write(); set.clear(); let current_consensus_status = self.status_agent.to_inner(); let status = Status { height: current_height + 1, interval: Some(current_consensus_status.consensus_interval), timer_config: Some(DurationConfig { propose_ratio: current_consensus_status.propose_ratio, prevote_ratio: current_consensus_status.prevote_ratio, precommit_ratio: current_consensus_status.precommit_ratio, brake_ratio: current_consensus_status.brake_ratio, }), authority_list: covert_to_overlord_authority(¤t_consensus_status.validators), }; self.metric_commit(current_height, txs_len); Ok(status) } /// Only signed proposal and aggregated vote will be broadcast to others. #[muta_apm::derive::tracing_span(kind = "consensus.engine")] async fn broadcast_to_other( &self, ctx: Context, msg: OverlordMsg, ) -> Result<(), Box> { let (end, msg) = match msg { OverlordMsg::SignedProposal(sp) => { let bytes = sp.rlp_bytes(); (END_GOSSIP_SIGNED_PROPOSAL, bytes) } OverlordMsg::AggregatedVote(av) => { let bytes = av.rlp_bytes(); (END_GOSSIP_AGGREGATED_VOTE, bytes) } OverlordMsg::SignedChoke(sc) => { let bytes = sc.rlp_bytes(); (END_GOSSIP_SIGNED_CHOKE, bytes) } _ => unreachable!(), }; self.adapter .transmit(ctx, msg, end, MessageTarget::Broadcast) .await?; Ok(()) } /// Only signed vote will be transmit to the relayer. #[muta_apm::derive::tracing_span( kind = "consensus.engine", logs = "{'pub_key': 'hex::encode(pub_key.clone())'}" )] async fn transmit_to_relayer( &self, ctx: Context, pub_key: Bytes, msg: OverlordMsg, ) -> Result<(), Box> { match msg { OverlordMsg::SignedVote(sv) => { let msg = sv.rlp_bytes(); self.adapter .transmit( ctx, msg, END_GOSSIP_SIGNED_VOTE, MessageTarget::Specified(pub_key), ) .await?; } OverlordMsg::AggregatedVote(av) => { let msg = av.rlp_bytes(); self.adapter .transmit( ctx, msg, END_GOSSIP_AGGREGATED_VOTE, MessageTarget::Specified(pub_key), ) .await?; } _ => unreachable!(), }; Ok(()) } /// This function is rarely used, so get the authority list from the /// RocksDB. #[muta_apm::derive::tracing_span( kind = "consensus.engine", logs = "{'next_height': 'next_height'}" )] async fn get_authority_list( &self, ctx: Context, next_height: u64, ) -> Result, Box> { if next_height == 0 { return Ok(vec![]); } let old_block_header = self .adapter .get_block_header_by_height(ctx.clone(), next_height - 1) .await?; let old_metadata = self.adapter.get_metadata( ctx.clone(), old_block_header.state_root.clone(), old_block_header.timestamp, old_block_header.height, old_block_header.proposer, )?; let mut old_validators = old_metadata .verifier_list .into_iter() .map(|v| Node { address: v.pub_key.decode(), propose_weight: v.propose_weight, vote_weight: v.vote_weight, }) .collect::>(); old_validators.sort(); Ok(old_validators) } fn report_error(&self, ctx: Context, err: OverlordError) { match err { OverlordError::CryptoErr(_) | OverlordError::AggregatedSignatureErr(_) => self .adapter .report_bad(ctx, TrustFeedback::Worse(err.to_string())), _ => (), } } fn report_view_change(&self, cx: Context, height: u64, round: u64, reason: ViewChangeReason) { let view_change_reason = match reason { ViewChangeReason::CheckBlockNotPass => { let e = self.last_check_block_fail_reason.read(); reason.to_string() + " " + e.as_str() } _ => reason.to_string(), }; log( log::Level::Warn, "consensus", "cons000", &cx, json!({"height", height; "round", round; "reason", view_change_reason}), ); } } #[async_trait] impl Wal for ConsensusEngine { async fn save(&self, info: Bytes) -> Result<(), Box> { self.consensus_wal .update_overlord_wal(Context::new(), info) .map_err(|e| ProtocolError::from(ConsensusError::Other(e.to_string())))?; Ok(()) } async fn load(&self) -> Result, Box> { let res = self.consensus_wal.load_overlord_wal(Context::new()).ok(); Ok(res) } } impl ConsensusEngine { pub fn new( status_agent: StatusAgent, node_info: NodeInfo, wal: Arc, adapter: Arc, crypto: Arc, lock: Arc>, consensus_wal: Arc, ) -> Self { Self { status_agent, node_info, exemption_hash: RwLock::new(HashSet::new()), txs_wal: wal, adapter, crypto, lock, last_commit_time: RwLock::new(time_now()), consensus_wal, last_check_block_fail_reason: RwLock::new(String::new()), } } #[muta_apm::derive::tracing_span(kind = "consensus.engine")] pub async fn exec( &self, ctx: Context, order_root: MerkleRoot, height: u64, proposer: Address, timestamp: u64, block_hash: Hash, txs: Vec, ) -> ProtocolResult<()> { let status = self.status_agent.to_inner(); self.adapter .execute( ctx, self.node_info.chain_id.clone(), order_root, height, status.cycles_price, proposer, block_hash, txs, status.cycles_limit, timestamp, ) .await } async fn inner_check_block(&self, ctx: Context, block: &Block) -> ProtocolResult<()> { let current_timestamp = time_now(); self.adapter .verify_block_header(ctx.clone(), &block) .await .map_err(|e| { error!( "[consensus] check_block, verify_block_header error, block header: {:?}", block.header ); e })?; // verify the proof in the block for previous block // skip to get previous proof to compare because the node may just comes from // sync and waste a delay of read let previous_block_header = self .adapter .get_block_header_by_height(ctx.clone(), block.header.height - 1) .await?; // verify block timestamp. if !validate_timestamp( current_timestamp, block.header.timestamp, previous_block_header.timestamp, ) { return Err(ProtocolError::from(ConsensusError::InvalidTimestamp)); } self.adapter .verify_proof( ctx.clone(), &previous_block_header, &block.header.proof, ) .await .map_err(|e| { error!( "[consensus] check_block, verify_proof error, previous block header: {:?}, proof: {:?}", previous_block_header, block.header.proof ); e })?; self.adapter .verify_txs(ctx.clone(), block.header.height, &block.ordered_tx_hashes) .await .map_err(|e| { error!("[consensus] check_block, verify_txs error",); e })?; // If it is inconsistent with the state of the proposal, we will wait for a // period of time. let mut check_retry = 0; loop { match self.check_block_roots(ctx.clone(), &block.header) { Ok(()) => break, Err(e) => { if check_retry >= RETRY_CHECK_ROOT_LIMIT { return Err(e); } check_retry += 1; } } Delay::new(Duration::from_millis(RETRY_CHECK_ROOT_INTERVAL)).await; } let signed_txs = self .adapter .get_full_txs(ctx.clone(), &block.ordered_tx_hashes) .await?; self.check_order_transactions(ctx.clone(), &block, &signed_txs) } #[muta_apm::derive::tracing_span(kind = "consensus.engine")] fn check_block_roots(&self, ctx: Context, block: &BlockHeader) -> ProtocolResult<()> { let status = self.status_agent.to_inner(); // check previous hash if status.current_hash != block.prev_hash { return Err(ConsensusError::InvalidPrevhash { expect: status.current_hash, actual: block.prev_hash.clone(), } .into()); } // check state root if status.latest_committed_state_root != block.state_root && !status.list_state_root.contains(&block.state_root) { warn!( "invalid status list_state_root, latest {:?}, current list {:?}, block {:?}", status.latest_committed_state_root, status.list_state_root, block.state_root ); return Err(ConsensusError::InvalidStatusVec.into()); } // check confirm root if !check_list_roots(&status.list_confirm_root, &block.confirm_root) { error!( "current list confirm root {:?}, block confirm root {:?}", status.list_confirm_root, block.confirm_root ); return Err(ConsensusError::InvalidStatusVec.into()); } // check receipt root if !check_list_roots(&status.list_receipt_root, &block.receipt_root) { error!( "current list receipt root {:?}, block receipt root {:?}", status.list_receipt_root, block.receipt_root ); return Err(ConsensusError::InvalidStatusVec.into()); } // check cycles used if !check_list_roots(&status.list_cycles_used, &block.cycles_used) { error!( "current list cycles used {:?}, block cycles used {:?}", status.list_cycles_used, block.cycles_used ); return Err(ConsensusError::InvalidStatusVec.into()); } Ok(()) } #[muta_apm::derive::tracing_span( kind = "consensus.engine", logs = "{'txs_len': 'signed_txs.len()'}" )] fn check_order_transactions( &self, ctx: Context, block: &Block, signed_txs: &[SignedTransaction], ) -> ProtocolResult<()> { let order_root = Merkle::from_hashes(block.ordered_tx_hashes.clone()) .get_root_hash() .unwrap_or_else(Hash::from_empty); if order_root != block.header.order_root { return Err(ConsensusError::InvalidOrderRoot { expect: order_root, actual: block.header.order_root.clone(), } .into()); } let order_signed_transactions_hash = digest_signed_transactions(signed_txs)?; if order_signed_transactions_hash != block.header.order_signed_transactions_hash { return Err(ConsensusError::InvalidOrderSignedTransactionsHash { expect: order_signed_transactions_hash, actual: block.header.order_signed_transactions_hash.clone(), } .into()); } Ok(()) } /// After get the signed transactions: /// 1. Execute the signed transactions. /// 2. Save the signed transactions. /// 3. Save the latest proof. /// 4. Save the new block. /// 5. Save the receipt. pub async fn update_status( &self, metadata: Metadata, block: Block, proof: Proof, txs: Vec, ) -> ProtocolResult<()> { // Save signed transactions self.adapter .save_signed_txs(Context::new(), block.header.height, txs) .await?; // Save the block. self.adapter .save_block(Context::new(), block.clone()) .await?; // update timeout_gap of mempool self.adapter.set_args( Context::new(), metadata.timeout_gap, metadata.cycles_limit, metadata.max_tx_size, ); let pub_keys = metadata .verifier_list .iter() .map(|v| v.pub_key.decode()) .collect(); self.adapter.tag_consensus(Context::new(), pub_keys)?; let block_hash = Hash::digest(block.header.encode_fixed()?); if block.header.height != proof.height { info!("[consensus] update_status for handle_commit, error, before update, block height {}, proof height:{}, proof : {:?}", block.header.height, proof.height, proof.clone()); } self.status_agent .update_by_committed(metadata.clone(), block, block_hash, proof); let committed_status_agent = self.status_agent.to_inner(); if committed_status_agent.latest_committed_height != committed_status_agent.current_proof.height { error!("[consensus] update_status for handle_commit, error, current_height {} != current_proof.height {}, proof :{:?}", committed_status_agent.latest_committed_height, committed_status_agent.current_proof.height, committed_status_agent.current_proof) } self.update_overlord_crypto(metadata)?; Ok(()) } fn update_overlord_crypto(&self, metadata: Metadata) -> ProtocolResult<()> { self.crypto.update(generate_new_crypto_map(metadata)?); Ok(()) } fn metric_commit(&self, current_height: u64, txs_len: usize) { common_apm::metrics::consensus::ENGINE_HEIGHT_GAUGE.set((current_height + 1) as i64); common_apm::metrics::consensus::ENGINE_COMMITED_TX_COUNTER.inc_by(txs_len as i64); let now = time_now(); let last_commit_time = *(self.last_commit_time.read()); let elapsed = (now - last_commit_time) as f64; common_apm::metrics::consensus::ENGINE_CONSENSUS_COST_TIME.observe(elapsed / 1e3); let mut last_commit_time = self.last_commit_time.write(); *last_commit_time = now; } #[cfg(test)] pub fn get_current_status(&self) -> crate::status::CurrentConsensusStatus { self.status_agent.to_inner() } } pub fn generate_new_crypto_map(metadata: Metadata) -> ProtocolResult> { let mut new_addr_pubkey_map = HashMap::new(); for validator in metadata.verifier_list.into_iter() { let addr = validator.pub_key.decode(); let hex_pubkey = hex::decode(validator.bls_pub_key.as_string_trim0x()).map_err(|err| { ConsensusError::Other(format!("hex decode metadata bls pubkey error {:?}", err)) })?; let pubkey = BlsPublicKey::try_from(hex_pubkey.as_ref()) .map_err(|err| ConsensusError::Other(format!("try from bls pubkey error {:?}", err)))?; new_addr_pubkey_map.insert(addr, pubkey); } Ok(new_addr_pubkey_map) } fn covert_to_overlord_authority(validators: &[Validator]) -> Vec { let mut authority = validators .iter() .map(|v| Node { address: v.pub_key.clone(), propose_weight: v.propose_weight, vote_weight: v.vote_weight, }) .collect::>(); authority.sort(); authority } async fn sync_txs( ctx: Context, adapter: Arc, propose_hashes: Vec, ) -> ProtocolResult<()> { adapter.sync_txs(ctx, propose_hashes).await } fn validate_timestamp( current_timestamp: u64, proposal_timestamp: u64, previous_timestamp: u64, ) -> bool { if proposal_timestamp < previous_timestamp { return false; } if proposal_timestamp > current_timestamp { return false; } true } fn gauge_txs_len(pill: &Pill) { common_apm::metrics::consensus::ENGINE_ORDER_TX_GAUGE .set(pill.block.ordered_tx_hashes.len() as i64); common_apm::metrics::consensus::ENGINE_SYNC_TX_GAUGE.set(pill.propose_hashes.len() as i64); } #[cfg(test)] mod tests { use super::validate_timestamp; #[test] fn test_validate_timestamp() { // current 10, proposal 9, previous 8. true assert_eq!(validate_timestamp(10, 9, 8), true); // current 10, proposal 11, previous 8. true assert_eq!(validate_timestamp(10, 11, 8), false); // current 10, proposal 9, previous 11. true assert_eq!(validate_timestamp(10, 9, 11), false); } } ================================================ FILE: core/consensus/src/fixed_types.rs ================================================ use std::error::Error; use overlord::Codec; use protocol::codec::{Deserialize, ProtocolCodecSync, Serialize}; use protocol::fixed_codec::FixedCodec; use protocol::types::{Block, Hash, Pill, Proof, SignedTransaction}; use protocol::{traits::MessageCodec, Bytes, BytesMut, ProtocolResult}; use crate::{ConsensusError, ConsensusType}; #[derive(Serialize, Deserialize, Clone, Debug)] pub enum ConsensusRpcRequest { PullBlocks(u64), PullTxs(PullTxsRequest), } #[derive(Clone, Debug, PartialEq, Eq)] pub enum ConsensusRpcResponse { PullBlocks(Box), PullTxs(Box), } impl MessageCodec for ConsensusRpcResponse { fn encode(&mut self) -> ProtocolResult { let bytes = match self { ConsensusRpcResponse::PullBlocks(ep) => { let mut tmp = BytesMut::from(ep.encode_fixed()?.as_ref()); tmp.extend_from_slice(b"a"); tmp } ConsensusRpcResponse::PullTxs(txs) => { let mut tmp = BytesMut::from( bincode::serialize(&txs) .map_err(|_| ConsensusError::EncodeErr(ConsensusType::RpcPullTxs))? .as_slice(), ); tmp.extend_from_slice(b"b"); tmp } }; Ok(bytes.freeze()) } fn decode(mut bytes: Bytes) -> ProtocolResult { let len = bytes.len(); let flag = bytes.split_off(len - 1); match flag.as_ref() { b"a" => { let res: Block = FixedCodec::decode_fixed(bytes)?; Ok(ConsensusRpcResponse::PullBlocks(Box::new(res))) } b"b" => { let res: FixedSignedTxs = bincode::deserialize(&bytes) .map_err(|_| ConsensusError::DecodeErr(ConsensusType::RpcPullTxs))?; Ok(ConsensusRpcResponse::PullTxs(Box::new(res))) } _ => unreachable!(), } } } #[derive(Clone, Debug, PartialEq, Eq)] pub struct FixedPill { pub inner: Pill, } impl Codec for FixedPill { fn encode(&self) -> Result> { let bytes = self.inner.encode_fixed()?; Ok(bytes) } fn decode(data: Bytes) -> Result> { let inner: Pill = FixedCodec::decode_fixed(data)?; Ok(FixedPill { inner }) } } impl FixedPill { pub fn get_ordered_hashes(&self) -> Vec { self.inner.block.ordered_tx_hashes.clone() } pub fn get_propose_hashes(&self) -> Vec { self.inner.propose_hashes.clone() } } #[derive(Clone, Debug, PartialEq, Eq)] pub struct FixedBlock { pub inner: Block, } impl MessageCodec for FixedBlock { fn encode(&mut self) -> ProtocolResult { self.inner.encode_sync() } fn decode(bytes: Bytes) -> ProtocolResult { let inner: Block = ProtocolCodecSync::decode_sync(bytes)?; Ok(FixedBlock::new(inner)) } } impl FixedBlock { pub fn new(inner: Block) -> Self { FixedBlock { inner } } } #[derive(Clone, Debug, PartialEq, Eq)] pub struct FixedProof { pub inner: Proof, } impl MessageCodec for FixedProof { fn encode(&mut self) -> ProtocolResult { self.inner.encode_sync() } fn decode(bytes: Bytes) -> ProtocolResult { let inner: Proof = ProtocolCodecSync::decode_sync(bytes)?; Ok(FixedProof::new(inner)) } } impl FixedProof { pub fn new(inner: Proof) -> Self { FixedProof { inner } } } #[derive(Serialize, Deserialize, Clone, Debug)] pub struct FixedHeight { pub inner: u64, } impl FixedHeight { pub fn new(inner: u64) -> Self { FixedHeight { inner } } } #[derive(Serialize, Deserialize, Clone, Debug)] pub struct PullTxsRequest { pub height: u64, #[serde(with = "core_network::serde_multi")] pub inner: Vec, } impl PullTxsRequest { pub fn new(height: u64, inner: Vec) -> Self { PullTxsRequest { height, inner } } } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] pub struct FixedSignedTxs { #[serde(with = "core_network::serde_multi")] pub inner: Vec, } impl FixedSignedTxs { pub fn new(inner: Vec) -> Self { FixedSignedTxs { inner } } } #[cfg(test)] mod test { use std::convert::From; use std::str::FromStr; use futures::executor; use rand::random; use protocol::types::{ Address, Block, BlockHeader, Hash, Proof, RawTransaction, SignedTransaction, TransactionRequest, }; use protocol::Bytes; use super::{FixedBlock, FixedSignedTxs}; const PUB_KEY_STR: &str = "02ee34d1ce8270cd236e9455d4ab9e756c4478779b1a20d7ce1c247af61ec2be3b"; fn gen_block(height: u64, block_hash: Hash) -> Block { let nonce = Hash::digest(Bytes::from("XXXX")); let addr_str = "muta14e0lmgck835vm2dfm0w3ckv6svmez8fdgdl705"; let header = BlockHeader { chain_id: nonce.clone(), height, exec_height: height - 1, prev_hash: nonce.clone(), timestamp: 1000, order_root: nonce.clone(), order_signed_transactions_hash: nonce.clone(), confirm_root: Vec::new(), state_root: nonce, receipt_root: Vec::new(), cycles_used: vec![999_999], proposer: Address::from_str(addr_str).unwrap(), proof: mock_proof(block_hash), validator_version: 1, validators: Vec::new(), }; Block { header, ordered_tx_hashes: Vec::new(), } } fn mock_proof(block_hash: Hash) -> Proof { Proof { height: 0, round: 0, block_hash, signature: Default::default(), bitmap: Default::default(), } } fn gen_random_bytes(len: usize) -> Vec { (0..len).map(|_| random::()).collect::>() } fn gen_signed_tx() -> SignedTransaction { use protocol::codec::ProtocolCodec; let nonce = Hash::digest(Bytes::from(gen_random_bytes(10))); let request = TransactionRequest { service_name: "test".to_owned(), method: "test".to_owned(), payload: "test".to_owned(), }; let mut raw = RawTransaction { chain_id: nonce.clone(), nonce, timeout: random::(), cycles_price: 1, cycles_limit: random::(), request, sender: Address::from_pubkey_bytes(Bytes::from(hex::decode(PUB_KEY_STR).unwrap())) .unwrap(), }; let raw_bytes = executor::block_on(async { raw.encode().await.unwrap() }); let tx_hash = Hash::digest(raw_bytes); SignedTransaction { raw, tx_hash, pubkey: Bytes::from(hex::decode(PUB_KEY_STR).unwrap()), signature: Bytes::from(gen_random_bytes(64)), } } #[test] fn test_txs_codec() { use super::ProtocolCodecSync; for _ in 0..10 { let fixed_txs = FixedSignedTxs { inner: (0..1000).map(|_| gen_signed_tx()).collect::>(), }; let bytes = fixed_txs.encode_sync().unwrap(); assert_eq!(fixed_txs, FixedSignedTxs::decode_sync(bytes).unwrap()); } } #[tokio::test] async fn test_block_codec() { use super::MessageCodec; let block = gen_block(random::(), Hash::from_empty()); let mut origin = FixedBlock::new(block.clone()); let bytes = origin.encode().unwrap(); let res: FixedBlock = MessageCodec::decode(bytes).unwrap(); assert_eq!(res.inner, block); } } ================================================ FILE: core/consensus/src/lib.rs ================================================ #![feature(test)] #![allow( clippy::type_complexity, clippy::suspicious_else_formatting, clippy::mutable_key_type )] pub mod adapter; pub mod consensus; mod engine; pub mod fixed_types; pub mod message; pub mod status; pub mod synchronization; #[cfg(test)] mod tests; pub mod util; pub mod wal; mod wal_proto; use std::error::Error; use derive_more::Display; use common_crypto::Error as CryptoError; use protocol::types::{Hash, MerkleRoot}; use protocol::{ProtocolError, ProtocolErrorKind}; pub use crate::adapter::OverlordConsensusAdapter; pub use crate::consensus::OverlordConsensus; pub use crate::synchronization::{OverlordSynchronization, RichBlock}; pub use crate::wal::{ConsensusWal, SignedTxsWAL}; pub use overlord::{types::Node, DurationConfig}; pub const DEFAULT_OVERLORD_GAP: usize = 5; pub const DEFAULT_SYNC_TXS_CHUNK_SIZE: usize = 5000; #[derive(Clone, Debug, Display, PartialEq, Eq)] pub enum ConsensusType { #[display(fmt = "Signed Proposal")] SignedProposal, #[display(fmt = "Signed Vote")] SignedVote, #[display(fmt = "Aggregated Vote")] AggregateVote, #[display(fmt = "Rich Height")] RichHeight, #[display(fmt = "Rpc Pull Blocks")] RpcPullBlocks, #[display(fmt = "Rpc Pull Transactions")] RpcPullTxs, #[display(fmt = "Signed Choke")] SignedChoke, #[display(fmt = "WAL Signed Transactions")] WALSignedTxs, } /// Consensus errors defines here. #[derive(Debug, Display)] pub enum ConsensusError { /// Check block error. #[display(fmt = "Check invalid prev_hash, expect {:?} get {:?}", expect, actual)] InvalidPrevhash { expect: Hash, actual: Hash }, #[display(fmt = "Check invalid order root, expect {:?} get {:?}", expect, actual)] InvalidOrderRoot { expect: MerkleRoot, actual: MerkleRoot, }, #[display( fmt = "Check invalid order signed transactions hash, expect {:?} get {:?}", expect, actual )] InvalidOrderSignedTransactionsHash { expect: Hash, actual: Hash }, #[display(fmt = "Check invalid status vec")] InvalidStatusVec, /// Decode consensus message error. #[display(fmt = "Decode {:?} message failed", _0)] DecodeErr(ConsensusType), /// Encode consensus message error. #[display(fmt = "Encode {:?} message failed", _0)] EncodeErr(ConsensusType), /// Overlord consensus protocol error. #[display(fmt = "Overlord error {:?}", _0)] OverlordErr(Box), /// Consensus missed last block proof. #[display(fmt = "Consensus missed proof of {} block", _0)] MissingProof(u64), /// Consensus missed the pill. #[display(fmt = "Consensus missed pill cooresponding {:?}", _0)] MissingPill(Hash), /// Invalid timestamp #[display(fmt = "Consensus invalid timestamp")] InvalidTimestamp, /// Consensus missed the block header. #[display(fmt = "Consensus missed block header of {} block", _0)] MissingBlockHeader(u64), /// This boxed error should be a `CryptoError`. #[display(fmt = "Crypto error {:?}", _0)] CryptoErr(Box), #[display(fmt = "Synchronization {} block error", _0)] VerifyTransaction(u64), #[display(fmt = "Synchronization/Consensus {} block error : {}", _0, _1)] VerifyBlockHeader(u64, BlockHeaderField), #[display(fmt = "Synchronization/Consensus {} block error : {}", _0, _1)] VerifyProof(u64, BlockProofField), /// #[display(fmt = "Execute transactions error {:?}", _0)] ExecuteErr(String), /// WALErr(std::io::Error), #[display(fmt = "Storage item not found")] StorageItemNotFound, #[display(fmt = "Lock in sync")] LockInSync, #[display(fmt = "Wal transactions mismatch, height {}", _0)] WalTxsMismatch(u64), #[display( fmt = "Commit an outdated block, block_height {}, last_committed_height {}", _0, _1 )] OutdatedCommit(u64, u64), /// Other error used for very few errors. #[display(fmt = "{:?}", _0)] Other(String), #[display(fmt = "{:?}", _0)] SystemTime(std::time::SystemTimeError), #[display(fmt = "parse file name into timestamp error")] FileNameTimestamp, #[display(fmt = "consensus wal dir doesn't exist")] ConsensusWalDirNotExist, #[display(fmt = "no consensus wal file available")] ConsensusWalNoWalFile, } #[derive(Debug, Display)] pub enum BlockHeaderField { #[display(fmt = "The prev_hash mismatch the previous block")] PreviousBlockHash, #[display(fmt = "The prev_hash mismatch the hash in the proof field")] ProofHash, #[display(fmt = "The proposer is not in the committee")] Proposer, #[display(fmt = "There is at least one validator not in the committee")] Validator, #[display(fmt = "There is at least one validator's weight mismatch")] Weight, } #[derive(Debug, Display)] pub enum BlockProofField { #[display(fmt = "The bit_map has error with committer, can't get signed voters")] BitMap, #[display(fmt = "The proof signature is fraud or error")] Signature, #[display(fmt = "Heights of block and proof diverse, block {}, proof {}", _0, _1)] HeightMismatch(u64, u64), #[display(fmt = "Hash of block and proof diverse")] HashMismatch, #[display(fmt = "There is at least one validator not in the committee")] Validator, #[display(fmt = "There is at least one validator's weight mismatch")] Weight, #[display(fmt = "There is at least one validator's weight missing")] WeightNotFound, } impl Error for ConsensusError {} impl From for ProtocolError { fn from(err: ConsensusError) -> ProtocolError { ProtocolError::new(ProtocolErrorKind::Consensus, Box::new(err)) } } ================================================ FILE: core/consensus/src/message.rs ================================================ use std::sync::Arc; use async_trait::async_trait; use bincode::serialize; use futures::TryFutureExt; use log::warn; use overlord::types::{AggregatedVote, SignedChoke, SignedProposal, SignedVote}; use overlord::Codec; use rlp::Encodable; use serde::{Deserialize, Serialize}; use common_apm::muta_apm; use protocol::traits::{ Consensus, Context, MessageHandler, Priority, Rpc, Storage, Synchronization, TrustFeedback, }; use protocol::ProtocolError; use core_storage::StorageError; pub use crate::fixed_types::{FixedBlock, FixedHeight, FixedProof, FixedSignedTxs, PullTxsRequest}; pub const END_GOSSIP_SIGNED_PROPOSAL: &str = "/gossip/consensus/signed_proposal"; pub const END_GOSSIP_SIGNED_VOTE: &str = "/gossip/consensus/signed_vote"; pub const END_GOSSIP_AGGREGATED_VOTE: &str = "/gossip/consensus/qc"; pub const END_GOSSIP_SIGNED_CHOKE: &str = "/gossip/consensus/signed_choke"; pub const RPC_SYNC_PULL_BLOCK: &str = "/rpc_call/consensus/sync_pull_block"; pub const RPC_RESP_SYNC_PULL_BLOCK: &str = "/rpc_resp/consensus/sync_pull_block"; pub const RPC_SYNC_PULL_TXS: &str = "/rpc_call/consensus/sync_pull_txs"; pub const RPC_RESP_SYNC_PULL_TXS: &str = "/rpc_resp/consensus/sync_pull_txs"; pub const BROADCAST_HEIGHT: &str = "/gossip/consensus/broadcast_height"; pub const RPC_SYNC_PULL_PROOF: &str = "/rpc_call/consensus/sync_pull_proof"; pub const RPC_RESP_SYNC_PULL_PROOF: &str = "/rpc_resp/consensus/sync_pull_proof"; #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] pub struct Proposal(pub Vec); impl From> for Proposal { fn from(proposal: SignedProposal) -> Self { Proposal(proposal.rlp_bytes()) } } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] pub struct Vote(pub Vec); impl From for Vote { fn from(vote: SignedVote) -> Self { Vote(vote.rlp_bytes()) } } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] pub struct QC(pub Vec); impl From for QC { fn from(aggregated_vote: AggregatedVote) -> Self { QC(aggregated_vote.rlp_bytes()) } } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] pub struct RichHeight(pub Vec); impl From for RichHeight { fn from(id: FixedHeight) -> Self { RichHeight(serialize(&id).unwrap()) } } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] pub struct Choke(pub Vec); impl From for Choke { fn from(signed_choke: SignedChoke) -> Self { Choke(signed_choke.rlp_bytes()) } } pub struct ProposalMessageHandler { consensus: Arc, } impl ProposalMessageHandler { pub fn new(consensus: Arc) -> Self { Self { consensus } } } #[async_trait] impl MessageHandler for ProposalMessageHandler { type Message = Proposal; #[muta_apm::derive::tracing_span(name = "handle_proposal", kind = "consensus.message")] async fn process(&self, ctx: Context, msg: Self::Message) -> TrustFeedback { if let Err(e) = self.consensus.set_proposal(ctx, msg.0).await { warn!("set proposal {}", e); return TrustFeedback::Worse(e.to_string()); } TrustFeedback::Good } } pub struct VoteMessageHandler { consensus: Arc, } impl VoteMessageHandler { pub fn new(consensus: Arc) -> Self { Self { consensus } } } #[async_trait] impl MessageHandler for VoteMessageHandler { type Message = Vote; #[muta_apm::derive::tracing_span(name = "handle_vote", kind = "consensus.message")] async fn process(&self, ctx: Context, msg: Self::Message) -> TrustFeedback { if let Err(e) = self.consensus.set_vote(ctx, msg.0).await { warn!("set vote {}", e); return TrustFeedback::Worse(e.to_string()); } TrustFeedback::Good } } pub struct QCMessageHandler { consensus: Arc, } impl QCMessageHandler { pub fn new(consensus: Arc) -> Self { Self { consensus } } } #[async_trait] impl MessageHandler for QCMessageHandler { type Message = QC; #[muta_apm::derive::tracing_span(name = "handle_qc", kind = "consensus.message")] async fn process(&self, ctx: Context, msg: Self::Message) -> TrustFeedback { if let Err(e) = self.consensus.set_qc(ctx, msg.0).await { warn!("set qc {}", e); return TrustFeedback::Worse(e.to_string()); } TrustFeedback::Good } } pub struct ChokeMessageHandler { consensus: Arc, } impl ChokeMessageHandler { pub fn new(consensus: Arc) -> Self { Self { consensus } } } #[async_trait] impl MessageHandler for ChokeMessageHandler { type Message = Choke; #[muta_apm::derive::tracing_span(name = "handle_choke", kind = "consensus.message")] async fn process(&self, ctx: Context, msg: Self::Message) -> TrustFeedback { if let Err(e) = self.consensus.set_choke(ctx, msg.0).await { warn!("set choke {}", e); return TrustFeedback::Worse(e.to_string()); } TrustFeedback::Good } } pub struct RemoteHeightMessageHandler { synchronization: Arc, } impl RemoteHeightMessageHandler { pub fn new(synchronization: Arc) -> Self { Self { synchronization } } } #[async_trait] impl MessageHandler for RemoteHeightMessageHandler { type Message = u64; #[muta_apm::derive::tracing_span(name = "handle_remote_height", kind = "consensus.message")] async fn process(&self, ctx: Context, remote_height: Self::Message) -> TrustFeedback { if let Err(e) = self .synchronization .receive_remote_block(ctx, remote_height) .await { warn!("sync: receive remote block {}", e); if e.to_string().contains("timeout") { return TrustFeedback::Bad("sync block timeout".to_owned()); } else { // Just in case, don't use worse here return TrustFeedback::Bad(e.to_string()); } } TrustFeedback::Good } } #[derive(Debug)] pub struct PullBlockRpcHandler { rpc: Arc, storage: Arc, } impl PullBlockRpcHandler where R: Rpc + 'static, S: Storage + 'static, { pub fn new(rpc: Arc, storage: Arc) -> Self { PullBlockRpcHandler { rpc, storage } } } #[async_trait] impl MessageHandler for PullBlockRpcHandler { type Message = FixedHeight; #[muta_apm::derive::tracing_span(name = "pull_block_rpc", kind = "consensus.message")] async fn process(&self, ctx: Context, msg: FixedHeight) -> TrustFeedback { let id = msg.inner; let ret = match self.storage.get_block(ctx.clone(), id).await { Ok(Some(block)) => Ok(FixedBlock::new(block)), Ok(None) => Err(StorageError::GetNone.into()), Err(e) => Err(e), }; self.rpc .response(ctx, RPC_RESP_SYNC_PULL_BLOCK, ret, Priority::High) .unwrap_or_else(move |e: ProtocolError| warn!("[core_consensus] push block {}", e)) .await; TrustFeedback::Neutral } } #[derive(Debug)] pub struct PullProofRpcHandler { rpc: Arc, storage: Arc, } impl PullProofRpcHandler where R: Rpc + 'static, S: Storage + 'static, { pub fn new(rpc: Arc, storage: Arc) -> Self { PullProofRpcHandler { rpc, storage } } } #[async_trait] impl MessageHandler for PullProofRpcHandler { type Message = FixedHeight; #[muta_apm::derive::tracing_span(name = "pull_proof_rpc", kind = "consensus.message")] async fn process(&self, ctx: Context, height: FixedHeight) -> TrustFeedback { let height = height.inner; let latest_proof = self.storage.get_latest_proof(ctx.clone()).await; let ret = match latest_proof { Ok(latest_proof) => match height { height if height < latest_proof.height => { match self.storage.get_block_header(ctx.clone(), height + 1).await { Ok(Some(next_header)) => Ok(next_header.proof), Ok(None) => Err(StorageError::GetNone.into()), Err(_) => Err(StorageError::GetNone.into()), } } height if height == latest_proof.height => Ok(latest_proof), _ => Err(StorageError::GetNone.into()), }, Err(_) => Err(StorageError::GetNone.into()), }; self.rpc .response( ctx, RPC_RESP_SYNC_PULL_PROOF, ret.map(FixedProof::new), Priority::High, ) .unwrap_or_else(move |e: ProtocolError| warn!("[core_consensus] push proof {}", e)) .await; TrustFeedback::Neutral } } #[derive(Debug)] pub struct PullTxsRpcHandler { rpc: Arc, storage: Arc, } impl PullTxsRpcHandler where R: Rpc + 'static, S: Storage + 'static, { pub fn new(rpc: Arc, storage: Arc) -> Self { PullTxsRpcHandler { rpc, storage } } } #[async_trait] impl MessageHandler for PullTxsRpcHandler { type Message = PullTxsRequest; #[muta_apm::derive::tracing_span(name = "pull_txs_rpc", kind = "consensus.message")] async fn process(&self, ctx: Context, msg: PullTxsRequest) -> TrustFeedback { let PullTxsRequest { height, inner } = msg; let ret = self .storage .get_transactions(ctx.clone(), height, &inner) .await .map(|txs| { txs.into_iter() .filter_map(|opt_tx| opt_tx) .collect::>() }) .map(FixedSignedTxs::new); self.rpc .response(ctx, RPC_RESP_SYNC_PULL_TXS, ret, Priority::High) .unwrap_or_else(move |e: ProtocolError| warn!("[core_consensus] push txs {}", e)) .await; TrustFeedback::Neutral } } ================================================ FILE: core/consensus/src/status.rs ================================================ use std::sync::Arc; use derive_more::Display; use parking_lot::RwLock; use serde::{Deserialize, Serialize}; use common_merkle::Merkle; use protocol::fixed_codec::FixedCodec; use protocol::traits::{Context, ExecutorResp}; use protocol::types::{Block, Hash, MerkleRoot, Metadata, Proof, Validator}; use crate::util::check_list_roots; #[derive(Clone, Debug)] pub struct StatusAgent { status: Arc>, } impl StatusAgent { pub fn new(status: CurrentConsensusStatus) -> Self { Self { status: Arc::new(RwLock::new(status)), } } pub fn update_by_executed(&self, info: ExecutedInfo) { self.status.write().update_by_executed(info); } pub fn update_by_committed( &self, metadata: Metadata, block: Block, block_hash: Hash, current_proof: Proof, ) { self.status .write() .update_by_committed(metadata, block, block_hash, current_proof) } // TODO(yejiayu): Is there a better way to write it? pub fn replace(&self, new_status: CurrentConsensusStatus) { let mut status = self.status.write(); status.cycles_price = new_status.cycles_price; status.cycles_limit = new_status.cycles_limit; status.latest_committed_height = new_status.latest_committed_height; status.exec_height = new_status.exec_height; status.current_hash = new_status.current_hash; status.latest_committed_state_root = new_status.latest_committed_state_root; status.list_confirm_root = new_status.list_confirm_root; status.list_state_root = new_status.list_state_root; status.list_receipt_root = new_status.list_receipt_root; status.list_cycles_used = new_status.list_cycles_used; status.current_proof = new_status.current_proof; status.validators = new_status.validators; status.consensus_interval = new_status.consensus_interval; } pub fn to_inner(&self) -> CurrentConsensusStatus { self.status.read().clone() } } #[derive(Serialize, Deserialize, Clone, Debug, Display, PartialEq, Eq)] #[display( fmt = "latest_committed_height {}, exec height {}, current_hash {:?}, latest_committed_state_root {:?} list state root {:?}, list receipt root {:?}, list confirm root {:?}, list cycle used {:?}", latest_committed_height, exec_height, current_hash, latest_committed_state_root, list_state_root, list_receipt_root, list_confirm_root, list_cycles_used )] pub struct CurrentConsensusStatus { pub cycles_price: u64, // metadata pub cycles_limit: u64, // metadata pub latest_committed_height: u64, // latest consented height pub exec_height: u64, pub current_hash: Hash, // as same as block of current height pub latest_committed_state_root: MerkleRoot, // latest consented height pub list_confirm_root: Vec, pub list_state_root: Vec, pub list_receipt_root: Vec, pub list_cycles_used: Vec, pub current_proof: Proof, // latest consented block's proof, not previous block pub validators: Vec, // metadate pub consensus_interval: u64, // metadata pub propose_ratio: u64, // metadata pub prevote_ratio: u64, // metadata pub precommit_ratio: u64, // metadata pub brake_ratio: u64, pub tx_num_limit: u64, pub max_tx_size: u64, } // metadata is as same as latest consented height impl CurrentConsensusStatus { pub fn get_latest_state_root(&self) -> MerkleRoot { self.list_state_root .last() .unwrap_or(&self.latest_committed_state_root) .clone() } pub(crate) fn update_by_executed(&mut self, info: ExecutedInfo) { if info.exec_height <= self.exec_height { return; } log::info!("update_by_executed: info {}", info,); log::info!("update_by_executed: current status {}", self); assert!(info.exec_height == self.exec_height + 1); self.exec_height += 1; self.list_cycles_used.push(info.cycles_used); self.list_confirm_root.push(info.confirm_root.clone()); self.list_receipt_root.push(info.receipt_root.clone()); self.list_state_root.push(info.state_root); common_apm::metrics::consensus::ENGINE_EXECUTING_BLOCK_GAUGE .set(self.latest_committed_height as i64 - self.exec_height as i64); } pub(crate) fn update_by_committed( &mut self, metadata: Metadata, block: Block, block_hash: Hash, current_proof: Proof, ) { self.set_metadata(metadata); assert!(block.header.height == self.latest_committed_height + 1); self.latest_committed_height = block.header.height; self.current_hash = block_hash; self.current_proof = current_proof; self.latest_committed_state_root = block.header.state_root.clone(); self.split_off(&block); common_apm::metrics::consensus::ENGINE_EXECUTING_BLOCK_GAUGE .set((self.latest_committed_height - self.exec_height) as i64); } pub(crate) fn set_metadata(&mut self, metadata: Metadata) { self.cycles_limit = metadata.cycles_limit; self.cycles_price = metadata.cycles_price; self.consensus_interval = metadata.interval; let validators: Vec = metadata .verifier_list .iter() .map(|v| Validator { pub_key: v.pub_key.decode(), propose_weight: v.propose_weight, vote_weight: v.vote_weight, }) .collect(); self.validators = validators; self.propose_ratio = metadata.propose_ratio; self.prevote_ratio = metadata.prevote_ratio; self.precommit_ratio = metadata.precommit_ratio; self.brake_ratio = metadata.brake_ratio; self.max_tx_size = metadata.max_tx_size; self.tx_num_limit = metadata.tx_num_limit; } fn split_off(&mut self, block: &Block) { let len = block.header.confirm_root.len(); if len != block.header.cycles_used.len() || len != block.header.receipt_root.len() { panic!("vec lengths do not match. {:?}", block); } if !check_list_roots(&self.list_cycles_used, &block.header.cycles_used) { panic!( "check list_cycles_used error current_roots: {:?}, committed_roots roots {:?}", self.list_cycles_used, block.header.cycles_used ); } if !check_list_roots(&self.list_confirm_root, &block.header.confirm_root) { panic!( "check list_confirm_root error current_roots: {:?}, committed_roots roots {:?}", self.list_confirm_root, block.header.confirm_root ); } if !check_list_roots(&self.list_receipt_root, &block.header.receipt_root) { panic!( "check list_receipt_root error current_roots: {:?}, committed_roots roots {:?}", self.list_receipt_root, block.header.receipt_root ); } self.list_cycles_used = self.list_cycles_used.split_off(len); self.list_confirm_root = self.list_confirm_root.split_off(len); self.list_receipt_root = self.list_receipt_root.split_off(len); self.list_state_root = self.list_state_root.split_off(len); } } #[derive(Clone, Debug, Display)] #[display( fmt = "exec height {}, cycles used {}, state root {:?}, receipt root {:?}, confirm root {:?}", exec_height, cycles_used, state_root, receipt_root, confirm_root )] pub struct ExecutedInfo { pub ctx: Context, pub exec_height: u64, pub cycles_used: u64, pub state_root: MerkleRoot, pub receipt_root: MerkleRoot, pub confirm_root: MerkleRoot, } impl ExecutedInfo { pub fn new(ctx: Context, height: u64, order_root: MerkleRoot, resp: ExecutorResp) -> Self { let cycles = resp.all_cycles_used; let receipt = Merkle::from_hashes( resp.receipts .iter() .map(|r| Hash::digest(r.to_owned().encode_fixed().unwrap())) .collect::>(), ) .get_root_hash() .unwrap_or_else(Hash::from_empty); Self { ctx, exec_height: height, cycles_used: cycles, receipt_root: receipt, confirm_root: order_root, state_root: resp.state_root, } } } ================================================ FILE: core/consensus/src/synchronization.rs ================================================ use std::sync::Arc; use std::time::{Duration, Instant}; use async_trait::async_trait; use futures::lock::Mutex; use futures_timer::Delay; use common_apm::muta_apm; use protocol::fixed_codec::FixedCodec; use protocol::traits::{ Context, ExecutorParams, ExecutorResp, Synchronization, SynchronizationAdapter, }; use protocol::types::{Block, Hash, Proof, Receipt, SignedTransaction}; use protocol::ProtocolResult; use crate::engine::generate_new_crypto_map; use crate::status::{ExecutedInfo, StatusAgent}; use crate::util::{digest_signed_transactions, OverlordCrypto}; use crate::ConsensusError; const POLLING_BROADCAST: u64 = 2000; const WAIT_EXECUTION: u64 = 1000; const ONCE_SYNC_BLOCK_LIMIT: u64 = 50; #[derive(Clone, Debug)] pub struct RichBlock { pub block: Block, pub txs: Vec, } pub struct OverlordSynchronization { adapter: Arc, status: StatusAgent, crypto: Arc, lock: Arc>, syncing: Mutex<()>, sync_txs_chunk_size: usize, } #[async_trait] impl Synchronization for OverlordSynchronization { #[muta_apm::derive::tracing_span( kind = "consensus.sync", logs = "{'remote_height': 'remote_height'}" )] async fn receive_remote_block(&self, ctx: Context, remote_height: u64) -> ProtocolResult<()> { let syncing_lock = self.syncing.try_lock(); if syncing_lock.is_none() { return Ok(()); } if !self.need_sync(ctx.clone(), remote_height).await? { return Ok(()); } // Lock the consensus engine, block commit process. let commit_lock = self.lock.try_lock(); if commit_lock.is_none() { return Ok(()); } let current_height = self.status.to_inner().latest_committed_height; if remote_height <= current_height { return Ok(()); } log::info!( "[synchronization]: sync start, remote block height {:?} current block height {:?}", remote_height, current_height, ); let sync_status_agent = self.init_status_agent().await?; let sync_resp = self .start_sync( ctx.clone(), sync_status_agent.clone(), current_height, remote_height, ) .await; let sync_status = sync_status_agent.to_inner(); if let Err(e) = sync_resp { log::error!( "[synchronization]: err, current_height {:?} err_msg: {:?}", sync_status.latest_committed_height, e ); return Err(e); } log::info!( "[synchronization]: sync end, remote block height {:?} current block height {:?} current exec height {:?} current proof height {:?}", remote_height, sync_status.latest_committed_height, sync_status.exec_height, sync_status.current_proof.height, ); Ok(()) } } impl OverlordSynchronization { pub fn new( sync_txs_chunk_size: usize, adapter: Arc, status: StatusAgent, crypto: Arc, lock: Arc>, ) -> Self { let syncing = Mutex::new(()); Self { adapter, status, crypto, lock, syncing, sync_txs_chunk_size, } } pub async fn polling_broadcast(&self) -> ProtocolResult<()> { loop { let current_height = self.status.to_inner().latest_committed_height; if current_height != 0 { self.adapter .broadcast_height(Context::new(), current_height) .await?; } Delay::new(Duration::from_millis(POLLING_BROADCAST)).await; } } #[muta_apm::derive::tracing_span( kind = "consensus.sync", logs = "{'current_height': 'current_height', 'remote_height': 'remote_height'}" )] async fn start_sync( &self, ctx: Context, sync_status_agent: StatusAgent, current_height: u64, remote_height: u64, ) -> ProtocolResult<()> { let remote_height = if current_height + ONCE_SYNC_BLOCK_LIMIT > remote_height { remote_height } else { current_height + ONCE_SYNC_BLOCK_LIMIT }; let mut current_consented_height = current_height; while current_consented_height < remote_height { let inst = Instant::now(); let consenting_height = current_consented_height + 1; log::info!( "[synchronization]: try syncing block, current_consented_height:{},syncing_height:{}", current_consented_height, consenting_height ); let consenting_rich_block: RichBlock = self .get_rich_block_from_remote(ctx.clone(), consenting_height) .await .map_err(|e| { log::error!( "[synchronization]: get_rich_block_from_remote error, height: {:?}", consenting_height ); e })?; let consenting_proof: Proof = self .adapter .get_proof_from_remote(ctx.clone(), consenting_height) .await .map_err(|e| { log::error!( "[synchronization]: get_proof_from_remote error, height: {:?}", consenting_height ); e })?; self.adapter .verify_block_header(ctx.clone(), &consenting_rich_block.block) .await .map_err(|e| { log::error!( "[synchronization]: verify_block_header error, block header: {:?}", consenting_rich_block.block.header ); e })?; // verify syncing proof self.adapter .verify_proof( ctx.clone(), &consenting_rich_block.block.header, &consenting_proof, ) .await .map_err(|e| { log::error!( "[synchronization]: verify_proof error, syncing block header: {:?}, proof: {:?}", consenting_rich_block.block.header, consenting_proof, ); e })?; // verify previous proof let previous_block_header = self .adapter .get_block_header_by_height( ctx.clone(), consenting_rich_block.block.header.height - 1, ) .await .map_err(|e| { log::error!( "[synchronization] get previous block {} error", consenting_rich_block.block.header.height - 1 ); e })?; self.adapter .verify_proof( ctx.clone(), &previous_block_header, &consenting_rich_block.block.header.proof, ) .await .map_err(|e| { log::error!( "[synchronization]: verify_proof error, previous block header: {:?}, proof: {:?}", previous_block_header, consenting_rich_block.block.header.proof ); e })?; let order_signed_transactions_hash = digest_signed_transactions(&consenting_rich_block.txs)?; if order_signed_transactions_hash != consenting_rich_block .block .header .order_signed_transactions_hash { return Err(ConsensusError::InvalidOrderSignedTransactionsHash { expect: order_signed_transactions_hash, actual: consenting_rich_block .block .header .order_signed_transactions_hash .clone(), } .into()); } let inst = Instant::now(); self.commit_block( ctx.clone(), consenting_rich_block.clone(), consenting_proof, sync_status_agent.clone(), ) .await .map_err(|e| { log::error!( "[synchronization]: commit block {} error", consenting_rich_block.block.header.height ); e })?; self.update_status(ctx.clone(), sync_status_agent.clone())?; current_consented_height += 1; common_apm::metrics::consensus::ENGINE_SYNC_BLOCK_COUNTER.inc_by(1 as i64); common_apm::metrics::consensus::ENGINE_SYNC_BLOCK_HISTOGRAM .observe(common_apm::metrics::duration_to_sec(inst.elapsed())); } Ok(()) } #[muta_apm::derive::tracing_span(kind = "consensus.sync")] async fn commit_block( &self, ctx: Context, rich_block: RichBlock, proof: Proof, status_agent: StatusAgent, ) -> ProtocolResult<()> { let executor_resp = self .exec_block(ctx.clone(), rich_block.clone(), status_agent.clone()) .await?; let block = &rich_block.block; let block_hash = Hash::digest(block.header.encode_fixed()?); let metadata = self.adapter.get_metadata( ctx.clone(), block.header.state_root.clone(), block.header.height, block.header.timestamp, block.header.proposer.clone(), )?; self.crypto .update(generate_new_crypto_map(metadata.clone())?); self.adapter.set_args( ctx.clone(), metadata.timeout_gap, metadata.cycles_limit, metadata.max_tx_size, ); let pub_keys = metadata .verifier_list .iter() .map(|v| v.pub_key.decode()) .collect(); self.adapter.tag_consensus(ctx.clone(), pub_keys)?; log::info!( "[synchronization]: commit_block, committing block header: {}, committing proof:{:?}", block.header.clone(), proof.clone() ); status_agent.update_by_committed(metadata, block.clone(), block_hash, proof); self.save_chain_data( ctx.clone(), rich_block.txs.clone(), executor_resp.receipts.clone(), rich_block.block.clone(), ) .await?; // If there are transactions in the trasnaction pool that have been on chain // after this execution, make sure they are cleaned up. self.adapter .flush_mempool(ctx.clone(), &rich_block.block.ordered_tx_hashes) .await?; Ok(()) } #[muta_apm::derive::tracing_span(kind = "consensus.sync", logs = "{'height': 'height'}")] async fn get_rich_block_from_remote( &self, ctx: Context, height: u64, ) -> ProtocolResult { let block = self.get_block_from_remote(ctx.clone(), height).await?; let mut txs = Vec::with_capacity(block.ordered_tx_hashes.len()); for tx_hashes in block.ordered_tx_hashes.chunks(self.sync_txs_chunk_size) { let remote_txs = self .adapter .get_txs_from_remote(ctx.clone(), height, &tx_hashes) .await?; txs.extend(remote_txs); } Ok(RichBlock { block, txs }) } #[muta_apm::derive::tracing_span(kind = "consensus.sync", logs = "{'height': 'height'}")] async fn get_block_from_remote(&self, ctx: Context, height: u64) -> ProtocolResult { self.adapter .get_block_from_remote(ctx.clone(), height) .await } #[muta_apm::derive::tracing_span(kind = "consensus.sync", logs = "{'txs_len': 'txs.len()'}")] async fn save_chain_data( &self, ctx: Context, txs: Vec, receipts: Vec, block: Block, ) -> ProtocolResult<()> { self.adapter .save_signed_txs(ctx.clone(), block.header.height, txs) .await?; self.adapter .save_receipts(ctx.clone(), block.header.height, receipts) .await?; self.adapter .save_proof(ctx.clone(), block.header.proof.clone()) .await?; self.adapter.save_block(ctx.clone(), block).await?; Ok(()) } #[muta_apm::derive::tracing_span(kind = "consensus.sync")] pub async fn exec_block( &self, ctx: Context, rich_block: RichBlock, status_agent: StatusAgent, ) -> ProtocolResult { let current_status = status_agent.to_inner(); let cycles_limit = current_status.cycles_limit; let exec_params = ExecutorParams { state_root: current_status.get_latest_state_root(), height: rich_block.block.header.height, timestamp: rich_block.block.header.timestamp, cycles_limit, proposer: rich_block.block.header.proposer, }; let resp = self .adapter .sync_exec(ctx.clone(), &exec_params, &rich_block.txs)?; status_agent.update_by_executed(ExecutedInfo::new( ctx, rich_block.block.header.height, rich_block.block.header.order_root, resp.clone(), )); Ok(resp) } async fn init_status_agent(&self) -> ProtocolResult { loop { let current_status = self.status.to_inner(); if current_status.exec_height != current_status.latest_committed_height { Delay::new(Duration::from_millis(WAIT_EXECUTION)).await; } else { break; } } let current_status = self.status.to_inner(); Ok(StatusAgent::new(current_status)) } #[muta_apm::derive::tracing_span( kind = "consensus.sync", logs = "{'remote_height': 'remote_height'}" )] async fn need_sync(&self, ctx: Context, remote_height: u64) -> ProtocolResult { let mut current_height = self.status.to_inner().latest_committed_height; if remote_height == 0 { return Ok(false); } if remote_height <= current_height { return Ok(false); } if current_height == remote_height - 1 { let status = self.status.to_inner(); Delay::new(Duration::from_millis(status.consensus_interval)).await; current_height = self.status.to_inner().latest_committed_height; if current_height == remote_height { return Ok(false); } } let block = self .get_block_from_remote(ctx.clone(), remote_height) .await?; log::debug!( "[synchronization] get block from remote success {:?} ", remote_height ); if block.header.height != remote_height { log::error!("[synchronization]: block that doesn't match is found"); return Ok(false); } Ok(true) } fn update_status(&self, ctx: Context, sync_status_agent: StatusAgent) -> ProtocolResult<()> { let sync_status = sync_status_agent.to_inner(); self.status.replace(sync_status.clone()); self.adapter.update_status( ctx, sync_status.latest_committed_height, sync_status.consensus_interval, sync_status.propose_ratio, sync_status.prevote_ratio, sync_status.precommit_ratio, sync_status.brake_ratio, sync_status.validators, )?; log::info!( "[synchronization]: synced block, status: height:{}, exec_height:{}, proof_height:{}", sync_status.latest_committed_height, sync_status.exec_height, sync_status.current_proof.height ); Ok(()) } } ================================================ FILE: core/consensus/src/tests/engine.rs ================================================ use std::collections::HashMap; use std::convert::TryFrom; use std::sync::Arc; use async_trait::async_trait; use futures::lock::Mutex; use overlord::types::{AggregatedSignature, Commit, Proof as OverlordProof}; use overlord::Consensus; use common_crypto::BlsPrivateKey; use protocol::fixed_codec::FixedCodec; use protocol::traits::{ CommonConsensusAdapter, ConsensusAdapter, Context, MessageTarget, MixedTxHashes, NodeInfo, TrustFeedback, }; use protocol::types::{ Address, Block, BlockHeader, Hash, Hex, MerkleRoot, Metadata, Pill, Proof, Receipt, SignedTransaction, Validator, }; use protocol::{Bytes, ProtocolResult}; use crate::engine::ConsensusEngine; use crate::fixed_types::FixedPill; use crate::status::StatusAgent; use crate::util::OverlordCrypto; use crate::wal::{ConsensusWal, SignedTxsWAL}; use super::*; static FULL_TXS_PATH: &str = "./free-space/engine/txs"; static FULL_CONSENSUS_PATH: &str = "./free-space/engine/consensus"; #[tokio::test] async fn test_repetitive_commit() { let init_status = mock_current_status(1); let engine = init_engine(init_status.clone()); let block = mock_block_from_status(&init_status); let res = engine .commit(Context::new(), 11, mock_commit(block.clone())) .await; assert!(res.is_ok()); let status = engine.get_current_status(); let res = engine .commit(Context::new(), 11, mock_commit(block.clone())) .await; assert!(res.is_err()); assert_eq!(status, engine.get_current_status()); } fn mock_commit(block: Block) -> Commit { let pill = Pill { block: block.clone(), propose_hashes: vec![], }; Commit { height: 11, content: FixedPill { inner: pill }, proof: OverlordProof { height: 11, round: 0, block_hash: Hash::digest(block.header.encode_fixed().unwrap()).as_bytes(), signature: AggregatedSignature { signature: get_random_bytes(32), address_bitmap: get_random_bytes(10), }, }, } } fn init_engine(init_status: CurrentConsensusStatus) -> ConsensusEngine { ConsensusEngine::new( StatusAgent::new(init_status), mock_node_info(), Arc::new(SignedTxsWAL::new(FULL_TXS_PATH)), Arc::new(MockConsensusAdapter {}), Arc::new(init_crypto()), Arc::new(Mutex::new(())), Arc::new(ConsensusWal::new(FULL_CONSENSUS_PATH)), ) } fn init_crypto() -> OverlordCrypto { let mut priv_key = Vec::new(); priv_key.extend_from_slice(&[0u8; 16]); let mut tmp = hex::decode("45c56be699dca666191ad3446897e0f480da234da896270202514a0e1a587c3f").unwrap(); priv_key.append(&mut tmp); OverlordCrypto::new( BlsPrivateKey::try_from(priv_key.as_ref()).unwrap(), HashMap::new(), std::str::from_utf8(hex::decode("").unwrap().as_ref()) .unwrap() .into(), ) } fn mock_node_info() -> NodeInfo { NodeInfo { self_pub_key: mock_pub_key().decode(), chain_id: mock_hash(), self_address: mock_address(), } } fn mock_metadata() -> Metadata { Metadata { chain_id: mock_hash(), bech32_address_hrp: "muta".to_owned(), common_ref: Hex::from_string("0x703873635a6b51513451".to_string()).unwrap(), timeout_gap: 20, cycles_limit: 600000, cycles_price: 1, interval: 3000, verifier_list: vec![], propose_ratio: 3, prevote_ratio: 3, precommit_ratio: 3, brake_ratio: 3, tx_num_limit: 3, max_tx_size: 3000, } } pub struct MockConsensusAdapter; #[async_trait] impl CommonConsensusAdapter for MockConsensusAdapter { async fn save_block(&self, _ctx: Context, _block: Block) -> ProtocolResult<()> { Ok(()) } async fn save_proof(&self, _ctx: Context, _proof: Proof) -> ProtocolResult<()> { Ok(()) } async fn save_signed_txs( &self, _ctx: Context, _block_height: u64, _signed_txs: Vec, ) -> ProtocolResult<()> { Ok(()) } async fn save_receipts( &self, _ctx: Context, _height: u64, _receipts: Vec, ) -> ProtocolResult<()> { Ok(()) } async fn flush_mempool( &self, _ctx: Context, _ordered_tx_hashes: &[Hash], ) -> ProtocolResult<()> { Ok(()) } async fn get_block_by_height(&self, _ctx: Context, _height: u64) -> ProtocolResult { unimplemented!() } async fn get_block_header_by_height( &self, _ctx: Context, _height: u64, ) -> ProtocolResult { unimplemented!() } async fn get_current_height(&self, _ctx: Context) -> ProtocolResult { Ok(10) } async fn get_txs_from_storage( &self, _ctx: Context, _tx_hashes: &[Hash], ) -> ProtocolResult> { unimplemented!() } async fn verify_block_header(&self, _ctx: Context, _block: &Block) -> ProtocolResult<()> { unimplemented!() } async fn verify_proof( &self, _ctx: Context, _block_header: &BlockHeader, _proof: &Proof, ) -> ProtocolResult<()> { Ok(()) } async fn broadcast_height(&self, _ctx: Context, _height: u64) -> ProtocolResult<()> { Ok(()) } fn get_metadata( &self, _context: Context, _state_root: MerkleRoot, _height: u64, _timestamp: u64, _proposer: Address, ) -> ProtocolResult { Ok(mock_metadata()) } fn report_bad(&self, _ctx: Context, _feedback: TrustFeedback) {} fn set_args( &self, _context: Context, _timeout_gap: u64, _cycles_limit: u64, _max_tx_size: u64, ) { } fn tag_consensus(&self, _ctx: Context, _peer_ids: Vec) -> ProtocolResult<()> { Ok(()) } fn verify_proof_signature( &self, _ctx: Context, _block_height: u64, _vote_hash: Bytes, _aggregated_signature_bytes: Bytes, _vote_pubkeys: Vec, ) -> ProtocolResult<()> { Ok(()) } fn verify_proof_weight( &self, _ctx: Context, _block_height: u64, _weight_map: HashMap, _signed_voters: Vec, ) -> ProtocolResult<()> { Ok(()) } } #[async_trait] impl ConsensusAdapter for MockConsensusAdapter { async fn get_txs_from_mempool( &self, _ctx: Context, _height: u64, _cycles_limit: u64, _tx_num_limit: u64, ) -> ProtocolResult { unimplemented!() } async fn sync_txs(&self, _ctx: Context, _txs: Vec) -> ProtocolResult<()> { Ok(()) } async fn get_full_txs( &self, _ctx: Context, _txs: &[Hash], ) -> ProtocolResult> { Ok(vec![]) } async fn transmit( &self, _ctx: Context, _msg: Vec, _end: &str, _target: MessageTarget, ) -> ProtocolResult<()> { Ok(()) } async fn execute( &self, _ctx: Context, _chain_id: Hash, _order_root: MerkleRoot, _height: u64, _cycles_price: u64, _proposer: Address, _block_hash: Hash, _signed_txs: Vec, _cycles_limit: u64, _timestamp: u64, ) -> ProtocolResult<()> { Ok(()) } async fn get_last_validators( &self, _ctx: Context, _height: u64, ) -> ProtocolResult> { unimplemented!() } async fn pull_block(&self, _ctx: Context, _height: u64, _end: &str) -> ProtocolResult { unimplemented!() } async fn get_current_height(&self, _ctx: Context) -> ProtocolResult { Ok(10) } async fn verify_txs(&self, _ctx: Context, _height: u64, _txs: &[Hash]) -> ProtocolResult<()> { Ok(()) } } ================================================ FILE: core/consensus/src/tests/mod.rs ================================================ mod engine; mod status; mod synchronization; use rand::random; use protocol::types::{Address, Block, BlockHeader, Hash, Hex, MerkleRoot, Proof, Validator}; use protocol::Bytes; use crate::status::CurrentConsensusStatus; const HEIGHT_TEN: u64 = 10; fn mock_block_from_status(status: &CurrentConsensusStatus) -> Block { let block_header = BlockHeader { chain_id: mock_hash(), height: status.latest_committed_height + 1, exec_height: status.exec_height + 1, prev_hash: status.current_hash.clone(), timestamp: random::(), order_root: mock_hash(), order_signed_transactions_hash: mock_hash(), confirm_root: vec![status.list_confirm_root.first().cloned().unwrap()], state_root: status.list_state_root.first().cloned().unwrap(), receipt_root: vec![status.list_receipt_root.first().cloned().unwrap()], cycles_used: vec![*status.list_cycles_used.first().unwrap()], proposer: mock_address(), proof: mock_proof(status.latest_committed_height), validator_version: 1, validators: mock_validators(4), }; Block { header: block_header, ordered_tx_hashes: vec![], } } fn mock_current_status(exec_lag: u64) -> CurrentConsensusStatus { let state_roots = mock_roots(exec_lag); CurrentConsensusStatus { cycles_price: random::(), cycles_limit: random::(), latest_committed_height: HEIGHT_TEN, exec_height: HEIGHT_TEN - exec_lag, current_hash: mock_hash(), latest_committed_state_root: state_roots.last().cloned().unwrap_or_else(mock_hash), list_confirm_root: mock_roots(exec_lag), list_state_root: state_roots, list_receipt_root: mock_roots(exec_lag), list_cycles_used: (0..exec_lag).map(|_| random::()).collect::>(), current_proof: mock_proof(HEIGHT_TEN + exec_lag), validators: mock_validators(4), consensus_interval: random::(), propose_ratio: random::(), prevote_ratio: random::(), precommit_ratio: random::(), brake_ratio: random::(), tx_num_limit: random::(), max_tx_size: random::(), } } fn mock_proof(proof_height: u64) -> Proof { Proof { height: proof_height, round: random::(), signature: get_random_bytes(64), bitmap: get_random_bytes(20), block_hash: mock_hash(), } } fn mock_roots(len: u64) -> Vec { (0..len).map(|_| mock_hash()).collect::>() } fn mock_hash() -> Hash { Hash::digest(get_random_bytes(10)) } fn mock_address() -> Address { let hash = mock_hash(); Address::from_hash(hash).unwrap() } fn get_random_bytes(len: usize) -> Bytes { let vec: Vec = (0..len).map(|_| random::()).collect(); Bytes::from(vec) } fn mock_pub_key() -> Hex { Hex::from_string( "0x026c184a9016f6f71a234c86b141621f38b68c78602ab06768db4d83682c616004".to_owned(), ) .unwrap() } fn mock_validators(len: usize) -> Vec { (0..len).map(|_| mock_validator()).collect::>() } fn mock_validator() -> Validator { Validator { pub_key: mock_pub_key().decode(), propose_weight: random::(), vote_weight: random::(), } } ================================================ FILE: core/consensus/src/tests/status.rs ================================================ use creep::Context; use rand::random; use protocol::fixed_codec::FixedCodec; use protocol::types::{Hash, Hex, Metadata, ValidatorExtend}; use crate::status::{CurrentConsensusStatus, ExecutedInfo}; use super::*; #[test] #[should_panic] fn test_update_by_executed() { let mut status = mock_current_status(2); let mut status_clone = status.clone(); let info = mock_executed_info(9); status.update_by_executed(info.clone()); status_clone.exec_height = 9; status_clone.list_cycles_used.push(info.cycles_used); status_clone .list_confirm_root .push(info.confirm_root.clone()); status_clone.list_state_root.push(info.state_root.clone()); status_clone.list_receipt_root.push(info.receipt_root); assert_eq!(status, status_clone); let info = mock_executed_info(9); status.update_by_executed(info); assert_eq!(status, status_clone); let info = mock_executed_info(11); status.update_by_executed(info); } #[test] #[should_panic] fn test_update_by_committed() { let mut status = mock_current_status(2); let status_clone = status.clone(); let block = mock_block_from_status(&status); let metadata = mock_metadata(); let block_hash = Hash::digest(block.encode_fixed().unwrap()); status.update_by_committed( metadata.clone(), block.clone(), block_hash.clone(), block.header.proof.clone(), ); assert_eq!(status.latest_committed_height, block.header.height); assert_eq!(status.current_hash, block_hash); assert_eq!(status.latest_committed_state_root, block.header.state_root); check_metadata(&status, &metadata); check_vec(&status_clone, &status); let mut block = mock_block_from_status(&status); block.header.height += 1; status.update_by_committed( metadata, block.clone(), Hash::digest(block.encode_fixed().unwrap()), block.header.proof, ); } fn check_metadata(status: &CurrentConsensusStatus, metadata: &Metadata) { assert_eq!(status.consensus_interval, metadata.interval); assert_eq!(status.propose_ratio, metadata.propose_ratio); assert_eq!(status.prevote_ratio, metadata.prevote_ratio); assert_eq!(status.precommit_ratio, metadata.precommit_ratio); assert_eq!(status.brake_ratio, metadata.brake_ratio); assert_eq!(status.tx_num_limit, metadata.tx_num_limit); assert_eq!(status.max_tx_size, metadata.max_tx_size); } fn check_vec(status_before: &CurrentConsensusStatus, status_after: &CurrentConsensusStatus) { assert!(status_after.list_cycles_used.len() == 1); assert!(status_after.list_confirm_root.len() == 1); assert!(status_after.list_receipt_root.len() == 1); assert!(status_after.list_state_root.len() == 1); assert!(status_before .list_cycles_used .ends_with(&status_after.list_cycles_used)); assert!(status_before .list_confirm_root .ends_with(&status_after.list_confirm_root)); assert!(status_before .list_receipt_root .ends_with(&status_after.list_receipt_root)); assert!(status_before .list_state_root .ends_with(&status_after.list_state_root)); } fn mock_metadata() -> Metadata { Metadata { chain_id: mock_hash(), bech32_address_hrp: "muta".to_owned(), common_ref: Hex::from_string( "0xd654c7a6747fc2e34808c1ebb1510bfb19b443d639f2fab6dc41fce9f634de37".to_string(), ) .unwrap(), timeout_gap: random::(), cycles_limit: random::(), cycles_price: random::(), verifier_list: mock_validators_extend(4), interval: random::(), propose_ratio: random::(), prevote_ratio: random::(), precommit_ratio: random::(), brake_ratio: random::(), tx_num_limit: random::(), max_tx_size: random::(), } } fn mock_validators_extend(len: usize) -> Vec { (0..len) .map(|_| ValidatorExtend { bls_pub_key: Hex::from_string( "0xd654c7a6747fc2e34808c1ebb1510bfb19b443d639f2fab6dc41fce9f634de37".to_string(), ) .unwrap(), pub_key: mock_pub_key(), address: mock_address(), propose_weight: random::(), vote_weight: random::(), }) .collect::>() } fn mock_executed_info(height: u64) -> ExecutedInfo { ExecutedInfo { ctx: Context::new(), exec_height: height, cycles_used: random::(), state_root: mock_hash(), receipt_root: mock_hash(), confirm_root: mock_hash(), } } ================================================ FILE: core/consensus/src/tests/synchronization.rs ================================================ use std::collections::{HashMap, HashSet}; use std::convert::TryFrom; use std::str::FromStr; use std::sync::Arc; use async_trait::async_trait; use bit_vec::BitVec; use futures::executor::block_on; use futures::lock::Mutex; use overlord::types::{AggregatedSignature, AggregatedVote, Node, SignedVote, Vote, VoteType}; use overlord::{extract_voters, Crypto}; use parking_lot::RwLock; use common_crypto::{ BlsCommonReference, BlsPrivateKey, BlsPublicKey, HashValue, PrivateKey, PublicKey, Secp256k1PrivateKey, Secp256k1PublicKey, Signature, ToPublicKey, }; use common_merkle::Merkle; use protocol::fixed_codec::FixedCodec; use protocol::traits::{CommonConsensusAdapter, Synchronization, SynchronizationAdapter}; use protocol::traits::{Context, ExecutorParams, ExecutorResp, ServiceResponse, TrustFeedback}; use protocol::types::{ Address, Block, BlockHeader, Bytes, Hash, Hex, MerkleRoot, Metadata, Proof, RawTransaction, Receipt, ReceiptResponse, SignedTransaction, TransactionRequest, Validator, ValidatorExtend, }; use protocol::ProtocolResult; use crate::status::{CurrentConsensusStatus, StatusAgent}; use crate::synchronization::{OverlordSynchronization, RichBlock}; use crate::util::{convert_hex_to_bls_pubkeys, digest_signed_transactions, OverlordCrypto}; use crate::BlockHeaderField::{PreviousBlockHash, ProofHash, Proposer}; use crate::BlockProofField::{BitMap, HashMismatch, HeightMismatch, WeightNotFound}; use crate::{BlockHeaderField, BlockProofField, ConsensusError}; const PUB_KEY_STR: &str = "02ef0cb0d7bc6c18b4bea1f5908d9106522b35ab3c399369605d4242525bda7e60"; // Test the blocks gap from 1 to 4. #[test] fn sync_gap_test() { for gap in [1, 2, 3, 4].iter() { let key_tool = get_mock_key_tool(); let max_height = 10 * *gap; let list_rich_block = mock_chained_rich_block(max_height, *gap, &key_tool); let remote_blocks = gen_remote_block_hashmap(list_rich_block.0.clone()); let remote_proofs = gen_remote_proof_hashmap(list_rich_block.1.clone()); let genesis_block = remote_blocks.read().get(&0).unwrap().clone(); let local_blocks = Arc::new(RwLock::new(HashMap::new())); local_blocks .write() .insert(genesis_block.header.height, genesis_block.clone()); let local_transactions = Arc::new(RwLock::new(HashMap::new())); let remote_transactions = gen_remote_tx_hashmap(list_rich_block.0.clone()); let adapter = Arc::new(MockCommonConsensusAdapter::new( 0, local_blocks, remote_blocks, remote_proofs, local_transactions, remote_transactions, Arc::clone(&key_tool.overlord_crypto), )); let block_hash = Hash::digest(genesis_block.header.encode_fixed().unwrap()); let status = CurrentConsensusStatus { cycles_price: 1, cycles_limit: 300_000_000, latest_committed_height: genesis_block.header.height, exec_height: genesis_block.header.exec_height, current_hash: block_hash, list_confirm_root: vec![], latest_committed_state_root: genesis_block.header.state_root.clone(), list_state_root: vec![], list_receipt_root: vec![], list_cycles_used: vec![], current_proof: genesis_block.header.proof, validators: genesis_block.header.validators, consensus_interval: 3000, propose_ratio: 15, prevote_ratio: 10, precommit_ratio: 10, brake_ratio: 3, tx_num_limit: 20000, max_tx_size: 1_073_741_824, }; let status_agent = StatusAgent::new(status); let lock = Arc::new(Mutex::new(())); let sync = OverlordSynchronization::<_>::new( 5000, Arc::clone(&adapter), status_agent.clone(), Arc::new(mock_crypto()), lock, ); // simulate to get a block block_on(sync.receive_remote_block(Context::new(), max_height / 2)).unwrap(); // get the current consensus status to check if the test works fine let status = status_agent.to_inner(); let block = block_on(adapter.get_block_by_height(Context::new(), status.latest_committed_height)) .unwrap(); assert_sync(status, block); block_on(sync.receive_remote_block(Context::new(), max_height)).unwrap(); let status = status_agent.to_inner(); let block = block_on(adapter.get_block_by_height(Context::new(), status.latest_committed_height)) .unwrap(); assert_sync(status, block); let status = status_agent.to_inner(); // status.current_height is consensus-ed height assert_eq!(status.latest_committed_height, max_height); } } pub type SafeHashMap = Arc>>; pub struct MockCommonConsensusAdapter { latest_height: RwLock, local_blocks: SafeHashMap, remote_blocks: SafeHashMap, remote_proofs: SafeHashMap, local_transactions: SafeHashMap, remote_transactions: SafeHashMap, crypto: Arc, } impl MockCommonConsensusAdapter { pub fn new( latest_height: u64, local_blocks: SafeHashMap, remote_blocks: SafeHashMap, remote_proofs: SafeHashMap, local_transactions: SafeHashMap, remote_transactions: SafeHashMap, crypto: Arc, ) -> Self { Self { latest_height: RwLock::new(latest_height), local_blocks, remote_blocks, remote_proofs, local_transactions, remote_transactions, crypto, } } } #[async_trait] impl SynchronizationAdapter for MockCommonConsensusAdapter { fn update_status( &self, _: Context, _: u64, _: u64, _: u64, _: u64, _: u64, _: u64, _: Vec, ) -> ProtocolResult<()> { Ok(()) } fn sync_exec( &self, _: Context, params: &ExecutorParams, txs: &[SignedTransaction], ) -> ProtocolResult { Ok(exec_txs(params.height, txs).0) } /// Pull some blocks from other nodes from `begin` to `end`. async fn get_block_from_remote(&self, _: Context, height: u64) -> ProtocolResult { Ok(self.remote_blocks.read().get(&height).unwrap().clone()) } /// Pull signed transactions corresponding to the given hashes from other /// nodes. async fn get_txs_from_remote( &self, _: Context, _: u64, tx_hashes: &[Hash], ) -> ProtocolResult> { let map = self.remote_transactions.read(); let mut txs = vec![]; for hash in tx_hashes.iter() { let tx = map.get(hash).unwrap(); txs.push(tx.clone()) } Ok(txs) } async fn get_proof_from_remote(&self, _: Context, height: u64) -> ProtocolResult { Ok(self.remote_proofs.read().get(&height).unwrap().clone()) } } #[async_trait] impl CommonConsensusAdapter for MockCommonConsensusAdapter { /// Save a block to the database. async fn save_block(&self, _: Context, block: Block) -> ProtocolResult<()> { self.local_blocks.write().insert(block.header.height, block); let mut height = self.latest_height.write(); *height += 1; Ok(()) } async fn save_proof(&self, _: Context, _: Proof) -> ProtocolResult<()> { Ok(()) } /// Save some signed transactions to the database. async fn save_signed_txs( &self, _: Context, _block_height: u64, signed_txs: Vec, ) -> ProtocolResult<()> { let mut map = self.local_transactions.write(); for tx in signed_txs.into_iter() { map.insert(tx.tx_hash.clone(), tx); } Ok(()) } async fn save_receipts(&self, _: Context, _: u64, _: Vec) -> ProtocolResult<()> { Ok(()) } /// Flush the given transactions in the mempool. async fn flush_mempool(&self, _: Context, _: &[Hash]) -> ProtocolResult<()> { Ok(()) } /// Get a block corresponding to the given height. async fn get_block_by_height(&self, _: Context, height: u64) -> ProtocolResult { Ok(self.local_blocks.read().get(&height).unwrap().clone()) } async fn get_block_header_by_height( &self, _ctx: Context, height: u64, ) -> ProtocolResult { Ok(self .local_blocks .read() .get(&height) .unwrap() .header .clone()) } /// Get the current height from storage. async fn get_current_height(&self, _: Context) -> ProtocolResult { Ok(*self.latest_height.read()) } async fn get_txs_from_storage( &self, _: Context, tx_hashes: &[Hash], ) -> ProtocolResult> { let map = self.local_transactions.read(); let mut txs = vec![]; for hash in tx_hashes.iter() { let tx = map.get(hash).unwrap(); txs.push(tx.clone()) } Ok(txs) } async fn broadcast_height(&self, _: Context, _: u64) -> ProtocolResult<()> { Ok(()) } fn get_metadata( &self, _context: Context, _state_root: MerkleRoot, _height: u64, _timestamp: u64, _proposer: Address, ) -> ProtocolResult { Ok(Metadata { chain_id: Hash::from_empty(), bech32_address_hrp: "muta".to_owned(), common_ref: Hex::from_string("0x6c747758636859487038".to_string()).unwrap(), timeout_gap: 20, cycles_limit: 9999, cycles_price: 1, interval: 3000, verifier_list: mock_verifier_list(), propose_ratio: 10, prevote_ratio: 10, precommit_ratio: 10, brake_ratio: 10, tx_num_limit: 20000, max_tx_size: 1_073_741_824, }) } fn tag_consensus(&self, _: Context, _: Vec) -> ProtocolResult<()> { Ok(()) } fn report_bad(&self, _ctx: Context, _feedback: TrustFeedback) {} fn set_args( &self, _context: Context, _timeout_gap: u64, _cycles_limit: u64, _max_tx_size: u64, ) { } /// this function verify all info in header except proof and roots async fn verify_block_header(&self, ctx: Context, block: &Block) -> ProtocolResult<()> { let previous_block = self .get_block_by_height(ctx.clone(), block.header.height - 1) .await?; let previous_block_hash = Hash::digest(previous_block.header.encode_fixed()?); if previous_block_hash != block.header.prev_hash { log::error!( "[consensus] verify_block_header, previous_block_hash: {:?}, block.header.prev_hash: {:?}", previous_block_hash, block.header.prev_hash ); return Err( ConsensusError::VerifyBlockHeader(block.header.height, PreviousBlockHash).into(), ); } // the block 0 and 1 's proof is consensus-ed by community if block.header.height > 1u64 && block.header.prev_hash != block.header.proof.block_hash { log::error!( "[consensus] verify_block_header, verifying_block : {:?}", block ); return Err(ConsensusError::VerifyBlockHeader(block.header.height, ProofHash).into()); } // verify proposer and validators let previous_metadata = self.get_metadata( ctx, previous_block.header.state_root.clone(), previous_block.header.height, previous_block.header.timestamp, previous_block.header.proposer, )?; let authority_map = previous_metadata .verifier_list .iter() .map(|v| { let address = v.pub_key.decode(); let node = Node { address: v.pub_key.decode(), propose_weight: v.propose_weight, vote_weight: v.vote_weight, }; (address, node) }) .collect::>(); // check proposer if block.header.height != 0 && !previous_metadata .verifier_list .iter() .any(|v| v.address == block.header.proposer) { log::error!( "[consensus] verify_block_header, block.header.proposer: {:?}, authority_map: {:?}", block.header.proposer, authority_map ); return Err(ConsensusError::VerifyBlockHeader(block.header.height, Proposer).into()); } // check validators for validator in block.header.validators.iter() { let validator_address = Address::from_pubkey_bytes(validator.pub_key.clone()); if !authority_map.contains_key(&validator.pub_key) { log::error!( "[consensus] verify_block_header, validator.address: {:?}, authority_map: {:?}", validator_address, authority_map ); return Err(ConsensusError::VerifyBlockHeader( block.header.height, BlockHeaderField::Validator, ) .into()); } else { let node = authority_map.get(&validator.pub_key).unwrap(); if node.vote_weight != validator.vote_weight || node.propose_weight != validator.vote_weight { log::error!( "[consensus] verify_block_header, validator.address: {:?}, authority_map: {:?}", validator_address, authority_map ); return Err(ConsensusError::VerifyBlockHeader( block.header.height, BlockHeaderField::Weight, ) .into()); } } } Ok(()) } async fn verify_proof( &self, ctx: Context, block_header: &BlockHeader, proof: &Proof, ) -> ProtocolResult<()> { // the block 0 has no proof, which is consensus-ed by community, not by chain if block_header.height == 0 { return Ok(()); }; if block_header.height != proof.height { log::error!( "[consensus] verify_proof, block_header.height: {}, proof.height: {}", block_header.height, proof.height ); return Err(ConsensusError::VerifyProof( block_header.height, HeightMismatch(block_header.height, proof.height), ) .into()); } let blockhash = Hash::digest(block_header.clone().encode_fixed()?); if blockhash != proof.block_hash { log::error!( "[consensus] verify_proof, blockhash: {:?}, proof.block_hash: {:?}", blockhash, proof.block_hash ); return Err(ConsensusError::VerifyProof(block_header.height, HashMismatch).into()); } let previous_block = self .get_block_by_height(ctx.clone(), block_header.height - 1) .await?; // the auth_list for the target should comes from previous height let metadata = self.get_metadata( ctx.clone(), previous_block.header.state_root.clone(), previous_block.header.height, previous_block.header.timestamp, previous_block.header.proposer, )?; let mut authority_list = metadata .verifier_list .iter() .map(|v| Node { address: v.pub_key.decode(), propose_weight: v.propose_weight, vote_weight: v.vote_weight, }) .collect::>(); let signed_voters = extract_voters(&mut authority_list, &proof.bitmap).map_err(|_| { log::error!("[consensus] extract_voters fails, bitmap error"); ConsensusError::VerifyProof(block_header.height, BitMap) })?; let vote = Vote { height: proof.height, round: proof.round, vote_type: VoteType::Precommit, block_hash: proof.block_hash.as_bytes(), }; let vote_hash = self.crypto.hash(protocol::Bytes::from(rlp::encode(&vote))); let hex_pubkeys = metadata .verifier_list .iter() .filter_map(|v| { if signed_voters.contains(&v.pub_key.decode()) { Some(v.bls_pub_key.clone()) } else { None } }) .collect::>(); self.verify_proof_signature( ctx.clone(), block_header.height, vote_hash.clone(), proof.signature.clone(), hex_pubkeys, ).map_err(|e| { log::error!("[consensus] verify_proof_signature error, height {}, vote: {:?}, vote_hash:{:?}, sig:{:?}, signed_voter:{:?}", block_header.height, vote, vote_hash, proof.signature, signed_voters, ); e })?; let weight_map = authority_list .iter() .map(|node| (node.address.clone(), node.vote_weight)) .collect::>(); self.verify_proof_weight(ctx.clone(), block_header.height, weight_map, signed_voters)?; Ok(()) } fn verify_proof_signature( &self, _ctx: Context, block_height: u64, vote_hash: Bytes, aggregated_signature_bytes: Bytes, vote_keys: Vec, ) -> ProtocolResult<()> { let mut pub_keys = Vec::new(); for hex in vote_keys.into_iter() { pub_keys.push(convert_hex_to_bls_pubkeys(hex)?) } self.crypto .inner_verify_aggregated_signature(vote_hash, pub_keys, aggregated_signature_bytes) .map_err(|e| { log::error!("[consensus] verify_proof_signature error: {}", e); ConsensusError::VerifyProof(block_height, BlockProofField::Signature).into() }) } fn verify_proof_weight( &self, _ctx: Context, block_height: u64, weight_map: HashMap, signed_voters: Vec, ) -> ProtocolResult<()> { let total_validator_weight: u64 = weight_map.iter().map(|pair| u64::from(*pair.1)).sum(); let mut accumulator = 0u64; for signed_voter_address in signed_voters.iter() { if weight_map.contains_key(signed_voter_address) { let weight = weight_map.get(signed_voter_address).ok_or_else(|| { log::error!( "[consensus] verify_proof_weight, signed_voter_address: {:?}", hex::encode(signed_voter_address) ); ConsensusError::VerifyProof(block_height, WeightNotFound) })?; accumulator += u64::from(*(weight)); } else { log::error!( "[consensus] verify_proof_weight,signed_voter_address: {:?}", hex::encode(signed_voter_address) ); return Err( ConsensusError::VerifyProof(block_height, BlockProofField::Validator).into(), ); } } if 3 * accumulator <= 2 * total_validator_weight { log::error!( "[consensus] verify_proof_weight, accumulator: {}, total: {}", accumulator, total_validator_weight ); return Err(ConsensusError::VerifyProof(block_height, BlockProofField::Weight).into()); } Ok(()) } } fn mock_crypto() -> OverlordCrypto { let priv_key = BlsPrivateKey::try_from(hex::decode("00000000000000000000000000000000d654c7a6747fc2e34808c1ebb1510bfb19b443d639f2fab6dc41fce9f634de37").unwrap().as_ref()).unwrap(); OverlordCrypto::new(priv_key, HashMap::new(), "muta".into()) } fn gen_remote_tx_hashmap(list: Vec) -> SafeHashMap { let mut remote_txs = HashMap::new(); for rich_block in list.into_iter() { for tx in rich_block.txs { remote_txs.insert(tx.tx_hash.clone(), tx); } } Arc::new(RwLock::new(remote_txs)) } fn gen_remote_block_hashmap(list: Vec) -> SafeHashMap { let mut remote_blocks = HashMap::new(); for rich_block in list.into_iter() { remote_blocks.insert(rich_block.block.header.height, rich_block.block.clone()); } Arc::new(RwLock::new(remote_blocks)) } fn gen_remote_proof_hashmap(list: Vec) -> SafeHashMap { let mut remote_proof = HashMap::new(); for proof in list.into_iter() { remote_proof.insert(proof.height, proof.clone()); } Arc::new(RwLock::new(remote_proof)) } fn mock_chained_rich_block(len: u64, gap: u64, key_tool: &KeyTool) -> (Vec, Vec) { let mut list_rich_block = vec![]; let mut list_proof = vec![]; let genesis_rich_block = mock_genesis_rich_block(); list_rich_block.push(genesis_rich_block.clone()); // the proof of block 0 is n/a, we just stuff something here list_proof.push(genesis_rich_block.clone().block.header.proof); let mut last_rich_block = genesis_rich_block; let mut current_height = 1; let mut temp_rich_block: Vec = vec![]; let mut last_proof: Proof = Proof { height: 0, round: 0, block_hash: Hash::from_hex( "0x1122334455667788990011223344556677889900112233445566778899001122", ) .unwrap(), signature: Default::default(), bitmap: Default::default(), }; loop { let last_block_hash = Hash::digest(last_rich_block.block.header.encode_fixed().unwrap()); let last_header = &last_rich_block.block.header; let txs = mock_tx_list(3, current_height); let tx_hashes: Vec = txs.iter().map(|tx| tx.tx_hash.clone()).collect(); let order_root = Merkle::from_hashes(tx_hashes.clone()) .get_root_hash() .unwrap(); let order_signed_transactions_hash = digest_signed_transactions(&txs).unwrap(); let mut header = BlockHeader { chain_id: last_header.chain_id.clone(), height: current_height, exec_height: current_height, prev_hash: last_block_hash.clone(), timestamp: 0, order_root, order_signed_transactions_hash, confirm_root: vec![], state_root: Hash::from_empty(), receipt_root: vec![], cycles_used: vec![], proposer: Address::from_str("muta14e0lmgck835vm2dfm0w3ckv6svmez8fdgdl705").unwrap(), proof: last_proof, validator_version: 0, validators: vec![Validator { pub_key: Hex::from_string( "0x02ef0cb0d7bc6c18b4bea1f5908d9106522b35ab3c399369605d4242525bda7e60" .to_owned(), ) .unwrap() .decode(), propose_weight: 5, vote_weight: 5, }], }; if last_header.height != 0 && current_height % gap == 0 { temp_rich_block.iter().for_each(|rich_block| { let height = rich_block.block.header.height; let confirm_root = rich_block.block.header.order_root.clone(); let (exec_resp, receipt_root) = exec_txs(height, &rich_block.txs); header.exec_height = height; header.confirm_root.push(confirm_root); header.state_root = exec_resp.state_root; header.receipt_root.push(receipt_root); header.cycles_used.push(exec_resp.all_cycles_used); }); temp_rich_block.clear(); } else if last_header.height != 0 && header.height != 1 { header.exec_height -= temp_rich_block.len() as u64 + 1; } else if header.height == 1 { header.exec_height -= 1; } let block = Block { header, ordered_tx_hashes: tx_hashes, }; let rich_block = RichBlock { block, txs }; list_rich_block.push(rich_block.clone()); temp_rich_block.push(rich_block.clone()); last_rich_block = rich_block.clone(); let current_block_hash = Hash::digest(rich_block.block.header.encode_fixed().unwrap()); // generate proof for current height and for next block use last_proof = mock_proof(current_block_hash.clone(), current_height, 0, &key_tool); list_proof.push(last_proof.clone()); current_height += 1; if current_height > len { break; } } (list_rich_block, list_proof) } fn mock_genesis_rich_block() -> RichBlock { let header = BlockHeader { chain_id: Hash::from_empty(), height: 0, exec_height: 0, prev_hash: Hash::from_empty(), timestamp: 0, order_root: Hash::from_empty(), order_signed_transactions_hash: Hash::from_empty(), confirm_root: vec![], state_root: Hash::from_empty(), receipt_root: vec![], cycles_used: vec![], proposer: "muta14e0lmgck835vm2dfm0w3ckv6svmez8fdgdl705" .parse() .unwrap(), proof: Proof { height: 0, round: 0, block_hash: Hash::from_empty(), signature: Bytes::new(), bitmap: Bytes::new(), }, validator_version: 0, validators: vec![Validator { pub_key: Hex::from_string( "0x02ef0cb0d7bc6c18b4bea1f5908d9106522b35ab3c399369605d4242525bda7e60".to_owned(), ) .unwrap() .decode(), propose_weight: 0, vote_weight: 0, }], }; let genesis_block = Block { header, ordered_tx_hashes: vec![], }; RichBlock { block: genesis_block, txs: vec![], } } fn get_receipt(tx: &SignedTransaction, height: u64) -> Receipt { Receipt { state_root: MerkleRoot::from_empty(), height, tx_hash: tx.tx_hash.clone(), cycles_used: tx.raw.cycles_limit, events: vec![], response: ReceiptResponse { service_name: "sync".to_owned(), method: "sync_exec".to_owned(), response: ServiceResponse:: { code: 0, succeed_data: "ok".to_owned(), error_message: "".to_owned(), }, }, } } // gen a lot of txs fn mock_tx_list(num: usize, height: u64) -> Vec { let mut txs = vec![]; for i in 0..num { let raw = RawTransaction { chain_id: Hash::from_empty(), nonce: Hash::digest(Bytes::from(format!("{}", i))), timeout: height, cycles_price: 1, cycles_limit: 1, request: TransactionRequest { service_name: "test_service".to_owned(), method: "test_method".to_owned(), payload: "test_payload".to_owned(), }, sender: Address::from_pubkey_bytes(Bytes::from( hex::decode(PUB_KEY_STR).unwrap(), )) .unwrap(), }; let bytes = raw.encode_fixed().unwrap(); // sign it vividly let hex_privkey = hex::decode("5ec982173d54d830b6789cbbbe43eaa2853a5ff752d1ebc1b266cf9790314f8a") .unwrap(); let test_privkey = Secp256k1PrivateKey::try_from(hex_privkey.as_ref()).unwrap(); let test_pubkey = test_privkey.pub_key(); let _test_address = Address::from_pubkey_bytes(test_pubkey.to_bytes()).unwrap(); let tx_hash = Hash::digest(bytes); let hash_value = HashValue::try_from(tx_hash.as_bytes().as_ref()) .ok() .unwrap(); let signature = test_privkey.sign_message(&hash_value); let signed_tx = SignedTransaction { raw, tx_hash, pubkey: test_pubkey.to_bytes(), signature: signature.to_bytes(), }; txs.push(signed_tx) } txs } // only the bls_private_key in KeyTool.overlordCrypto.private_key signs the // Vote!!!!!!! fn mock_proof(block_hash: Hash, height: u64, round: u64, key_tool: &KeyTool) -> Proof { let vote = Vote { height, round, vote_type: VoteType::Precommit, block_hash: block_hash.as_bytes(), }; let vote_hash = key_tool .overlord_crypto .hash(Bytes::from(rlp::encode(&vote))); let bls_signature = key_tool.overlord_crypto.sign(vote_hash).unwrap(); let signed_vote = SignedVote { voter: key_tool.signer_node.secp_public_key.to_bytes(), signature: bls_signature, vote: vote.clone(), }; let signed_voter = vec![key_tool.signer_node.secp_public_key.to_bytes()] .iter() .cloned() .collect::>(); // let mut bit_map = BitVec::from_elem(3, false); let mut authority_list: Vec = key_tool .verifier_list .clone() .iter() .map(|v| Node { address: v.pub_key.decode(), propose_weight: v.propose_weight, vote_weight: v.vote_weight, }) .collect::>(); authority_list.sort(); for (index, node) in authority_list.iter().enumerate() { if signed_voter.contains(&node.address) { bit_map.set(index, true); } } let aggregated_signature = AggregatedSignature { signature: key_tool .overlord_crypto .aggregate_signatures(vec![signed_vote.signature], vec![signed_vote.voter]) .unwrap(), address_bitmap: Bytes::from(bit_map.to_bytes()), }; let aggregated_vote = AggregatedVote { signature: aggregated_signature, vote_type: vote.vote_type, height, round, block_hash: block_hash.as_bytes(), leader: key_tool.signer_node.secp_public_key.to_bytes(), }; Proof { height: aggregated_vote.height, round: 0, block_hash: Hash::from_bytes(aggregated_vote.block_hash).unwrap(), signature: aggregated_vote.signature.signature.clone(), bitmap: aggregated_vote.signature.address_bitmap, } } fn exec_txs(height: u64, txs: &[SignedTransaction]) -> (ExecutorResp, MerkleRoot) { let mut receipts = vec![]; let mut all_cycles_used = 0; for tx in txs.iter() { let receipt = get_receipt(tx, height); all_cycles_used += receipt.cycles_used; receipts.push(receipt); } let receipt_root = Merkle::from_hashes( receipts .iter() .map(|r| Hash::digest(r.to_owned().encode_fixed().unwrap())) .collect::>(), ) .get_root_hash() .unwrap_or_else(Hash::from_empty); ( ExecutorResp { receipts, all_cycles_used, state_root: MerkleRoot::from_empty(), }, receipt_root, ) } #[derive(Clone)] struct SignerNode { secp_private_key: Secp256k1PrivateKey, secp_public_key: Secp256k1PublicKey, } impl SignerNode { pub fn new(secp_private_key: Secp256k1PrivateKey, secp_public_key: Secp256k1PublicKey) -> Self { SignerNode { secp_private_key, secp_public_key, } } } struct KeyTool { signer_node: SignerNode, overlord_crypto: Arc, verifier_list: Vec, } impl KeyTool { pub fn new( signer_node: SignerNode, overlord_crypto: Arc, verifier_list: Vec, ) -> Self { KeyTool { signer_node, overlord_crypto, verifier_list, } } } fn get_mock_key_tool() -> KeyTool { let hex_privkey = hex::decode("5ec982173d54d830b6789cbbbe43eaa2853a5ff752d1ebc1b266cf9790314f8a").unwrap(); let secp_privkey = Secp256k1PrivateKey::try_from(hex_privkey.as_ref()).unwrap(); let secp_pubkey: Secp256k1PublicKey = secp_privkey.pub_key(); let signer_node = SignerNode::new(secp_privkey, secp_pubkey); // generate BLS/OverlordCrypto let mut bls_priv_key = Vec::new(); bls_priv_key.extend_from_slice(&[0u8; 16]); let mut tmp = hex::decode("5ec982173d54d830b6789cbbbe43eaa2853a5ff752d1ebc1b266cf9790314f8a").unwrap(); bls_priv_key.append(&mut tmp); let bls_priv_key = BlsPrivateKey::try_from(bls_priv_key.as_ref()).unwrap(); let (bls_pub_keys, common_ref) = get_mock_public_keys_and_common_ref(); let mock_crypto = OverlordCrypto::new(bls_priv_key, bls_pub_keys, common_ref); KeyTool::new(signer_node, Arc::new(mock_crypto), mock_verifier_list()) } fn get_mock_public_keys_and_common_ref() -> (HashMap, BlsCommonReference) { let mut bls_pub_keys: HashMap = HashMap::new(); // weight = 5 let bls_hex = Hex::from_string("0x04102947214862a503c73904deb5818298a186d68c7907bb609583192a7de6331493835e5b8281f4d9ee705537c0e765580e06f86ddce5867812fceb42eecefd209f0eddd0389d6b7b0100f00fb119ef9ab23826c6ea09aadcc76fa6cea6a32724".to_string() ).unwrap(); let bls_hex = hex::decode(bls_hex.as_string_trim0x()).unwrap(); bls_pub_keys.insert( Hex::from_string( "0x02ef0cb0d7bc6c18b4bea1f5908d9106522b35ab3c399369605d4242525bda7e60".to_owned(), ) .unwrap() .decode(), BlsPublicKey::try_from(bls_hex.as_ref()).unwrap(), ); // weight = 1 let bls_hex = Hex::from_string("0x0418e16bd67ce0b58a575f506967706be733c96feef19a06bb37d510000d89905f2f61b7da4d831cb1bb01e2f99833362602a0a252dfd1e95c75c1eadb0db220e3722c9a077b730e7f6cec5f4a55bfc9a4d88db3e6c27684aa8335456824070501".to_string() ).unwrap(); let bls_hex = hex::decode(bls_hex.as_string_trim0x()).unwrap(); bls_pub_keys.insert( Hex::from_string( "0x03dbd1dbf3835efb4ec34a360ee671ee1d22425425368edfc5b9ffafc812e86200".to_owned(), ) .unwrap() .decode(), BlsPublicKey::try_from(bls_hex.as_ref()).unwrap(), ); // weight = 1 let bls_hex = Hex::from_string("0x040944276f414c46330227f2c0c5a998aba3d400ed19cfc2d31d3e7fcc442ce9f91ea86e172dc3c1b6cedc364bd52ba1cf074529e52337cd80ab32a196a3d42ab46eee25120b44fdd2b5c4268bf3b84c72d068ea83d0530a5461dc30b6a63a60e9".to_string() ).unwrap(); let bls_hex = hex::decode(bls_hex.as_string_trim0x()).unwrap(); bls_pub_keys.insert( Hex::from_string( "0x03cba4ae147eb24891d78c9527798577419b7db913b4b03ba548c28f40c5841166".to_owned(), ) .unwrap() .decode(), BlsPublicKey::try_from(bls_hex.as_ref()).unwrap(), ); let hex_common_ref = hex::decode("6c747758636859487038").unwrap(); let common_ref: BlsCommonReference = std::str::from_utf8(hex_common_ref.as_ref()).unwrap().into(); (bls_pub_keys, common_ref) } fn mock_verifier_list() -> Vec { vec![ ValidatorExtend { bls_pub_key: Hex::from_string("0x04102947214862a503c73904deb5818298a186d68c7907bb609583192a7de6331493835e5b8281f4d9ee705537c0e765580e06f86ddce5867812fceb42eecefd209f0eddd0389d6b7b0100f00fb119ef9ab23826c6ea09aadcc76fa6cea6a32724".to_owned()).unwrap(), pub_key: Hex::from_string("0x02ef0cb0d7bc6c18b4bea1f5908d9106522b35ab3c399369605d4242525bda7e60".to_owned()).unwrap(), address: Address::from_str("muta14e0lmgck835vm2dfm0w3ckv6svmez8fdgdl705").unwrap(), propose_weight: 5, vote_weight: 5, }, ValidatorExtend { bls_pub_key: Hex::from_string("0x0418e16bd67ce0b58a575f506967706be733c96feef19a06bb37d510000d89905f2f61b7da4d831cb1bb01e2f99833362602a0a252dfd1e95c75c1eadb0db220e3722c9a077b730e7f6cec5f4a55bfc9a4d88db3e6c27684aa8335456824070501".to_owned()).unwrap(), pub_key: Hex::from_string("0x03dbd1dbf3835efb4ec34a360ee671ee1d22425425368edfc5b9ffafc812e86200".to_owned()).unwrap(), address: Address::from_str("muta15a8a9ksxe3hhjpw3l7wz7ry778qg8h9wz8y35p").unwrap(), propose_weight: 1, vote_weight: 1, }, ValidatorExtend { bls_pub_key: Hex::from_string("0x040944276f414c46330227f2c0c5a998aba3d400ed19cfc2d31d3e7fcc442ce9f91ea86e172dc3c1b6cedc364bd52ba1cf074529e52337cd80ab32a196a3d42ab46eee25120b44fdd2b5c4268bf3b84c72d068ea83d0530a5461dc30b6a63a60e9".to_owned()).unwrap(), pub_key: Hex::from_string("0x03cba4ae147eb24891d78c9527798577419b7db913b4b03ba548c28f40c5841166".to_owned()).unwrap(), address: Address::from_str("muta1h99h6f54vytatam3ckftrmvcdpn4jlmnwm6hl0").unwrap(), propose_weight: 1, vote_weight: 1, }, ] } #[rustfmt::skip] // { // "common_ref": "0x6c747758636859487038", // "keypairs": [ // { // "index": 1, // "private_key": "0x5ec982173d54d830b6789cbbbe43eaa2853a5ff752d1ebc1b266cf9790314f8a", // "public_key": "0x02ef0cb0d7bc6c18b4bea1f5908d9106522b35ab3c399369605d4242525bda7e60", // "address": "muta14e0lmgck835vm2dfm0w3ckv6svmez8fdgdl705", // "peer_id": "QmTEJkB5QKWsEq37huryZZfVvqBKb54sHnKn9TQcA6j3n9", // "bls_public_key": "0x04102947214862a503c73904deb5818298a186d68c7907bb609583192a7de6331493835e5b8281f4d9ee705537c0e765580e06f86ddce5867812fceb42eecefd209f0eddd0389d6b7b0100f00fb119ef9ab23826c6ea09aadcc76fa6cea6a32724" // }, // { // "index": 2, // "private_key": "0x8dfbd3c689308d29c058cce163984a2ae8d5fc5191ce6b1e18bd1d7b95a8c632", // "public_key": "0x03dbd1dbf3835efb4ec34a360ee671ee1d22425425368edfc5b9ffafc812e86200", // "address": "muta15a8a9ksxe3hhjpw3l7wz7ry778qg8h9wz8y35p", // "peer_id": "QmaEX2TxiC2YJufqcHRigVpnoxahX3hdR1gsFjD5Yf7K1Z", // "bls_public_key": "0x0418e16bd67ce0b58a575f506967706be733c96feef19a06bb37d510000d89905f2f61b7da4d831cb1bb01e2f99833362602a0a252dfd1e95c75c1eadb0db220e3722c9a077b730e7f6cec5f4a55bfc9a4d88db3e6c27684aa8335456824070501" // }, // { // "index": 3, // "private_key": "0xfc659f0ed09a4ba0d2d1836af7520d1a050a7739d598dc98517bbbe7a2e38124", // "public_key": "0x03cba4ae147eb24891d78c9527798577419b7db913b4b03ba548c28f40c5841166", // "address": "muta1h99h6f54vytatam3ckftrmvcdpn4jlmnwm6hl0", // "peer_id": "QmbRmcYD3j2zMr27C6Ga2Bo5xB9t37NyAt36cSvUGYXE2B", // "bls_public_key": "0x040944276f414c46330227f2c0c5a998aba3d400ed19cfc2d31d3e7fcc442ce9f91ea86e172dc3c1b6cedc364bd52ba1cf074529e52337cd80ab32a196a3d42ab46eee25120b44fdd2b5c4268bf3b84c72d068ea83d0530a5461dc30b6a63a60e9" // }, // { // "index": 4, // "private_key": "0x7c01d6539419cffc78ab0779dabe88fad3f70c20ef47a562ac4ba5b7bd704b8e", // "public_key": "0x0245a0c291f56c2c5751db1c0bf1ed986e703d29a0fe023df770fe92c7c2347316", // "address": "muta16xukzz73l5r6vulk9q697tave8c5mfu33mwud6", // "peer_id": "QmeqYprgrXwxzLP7qAFiiJ3Kfi3F6H9PPH2qPCEHr9cRYW", // "bls_public_key": "0x041342e9a35278b298a67006cd98d663053e3f7eb72a08ffe9835074e430b2112a866c1c8d981edcd793cb16d459fc952b0464007d876355eea671e74727588bae69740c6a0b49d8142b7b0821a78acd34b4d8012b9ef69444a476e03d5fea5330" // } // ] // } fn assert_sync(status: CurrentConsensusStatus, latest_block: Block) { let exec_gap = latest_block.header.height - latest_block.header.exec_height; assert_eq!(status.latest_committed_height, latest_block.header.height); assert_eq!(status.exec_height, latest_block.header.height); assert_eq!(status.current_proof.height, status.latest_committed_height); assert_eq!(status.list_confirm_root.len(), exec_gap as usize); assert_eq!(status.list_cycles_used.len(), exec_gap as usize); assert_eq!(status.list_receipt_root.len(), exec_gap as usize); } ================================================ FILE: core/consensus/src/util.rs ================================================ use std::collections::HashMap; use std::convert::TryFrom; use std::error::Error; use std::time::{SystemTime, UNIX_EPOCH}; use bytes::buf::BufMut; use bytes::BytesMut; use overlord::Crypto; use parking_lot::RwLock; use crate::ConsensusError; use common_crypto::{ BlsCommonReference, BlsPrivateKey, BlsPublicKey, BlsSignature, BlsSignatureVerify, HashValue, PrivateKey, Signature, }; use protocol::fixed_codec::FixedCodec; use protocol::traits::Context; use protocol::types::{Address, Hash, Hex, MerkleRoot, SignedTransaction}; use protocol::{Bytes, ProtocolError, ProtocolResult}; pub fn time_now() -> u64 { SystemTime::now() .duration_since(UNIX_EPOCH) .unwrap() .as_millis() as u64 } pub struct OverlordCrypto { private_key: BlsPrivateKey, addr_pubkey: RwLock>, common_ref: BlsCommonReference, } impl Crypto for OverlordCrypto { fn hash(&self, msg: Bytes) -> Bytes { Hash::digest(msg).as_bytes() } fn sign(&self, hash: Bytes) -> Result> { let hash = HashValue::try_from(hash.as_ref()).map_err(|_| { ProtocolError::from(ConsensusError::Other( "failed to convert hash value".to_string(), )) })?; let sig = self.private_key.sign_message(&hash); Ok(sig.to_bytes()) } fn verify_signature( &self, signature: Bytes, hash: Bytes, voter: Bytes, ) -> Result<(), Box> { let map = self.addr_pubkey.read(); let hash = HashValue::try_from(hash.as_ref()).map_err(|_| { ProtocolError::from(ConsensusError::Other( "failed to convert hash value".to_string(), )) })?; let pub_key = map.get(&voter).ok_or_else(|| { ProtocolError::from(ConsensusError::Other("lose public key".to_string())) })?; let signature = BlsSignature::try_from(signature.as_ref()) .map_err(|e| ProtocolError::from(ConsensusError::CryptoErr(Box::new(e))))?; signature .verify(&hash, &pub_key, &self.common_ref) .map_err(|e| ProtocolError::from(ConsensusError::CryptoErr(Box::new(e))))?; Ok(()) } fn aggregate_signatures( &self, signatures: Vec, voters: Vec, ) -> Result> { if signatures.len() != voters.len() { return Err(ProtocolError::from(ConsensusError::Other( "signatures length does not match voters length".to_string(), )) .into()); } let map = self.addr_pubkey.read(); let mut sigs_pubkeys = Vec::with_capacity(signatures.len()); for (sig, addr) in signatures.iter().zip(voters.iter()) { let signature = BlsSignature::try_from(sig.as_ref()) .map_err(|e| ProtocolError::from(ConsensusError::CryptoErr(Box::new(e))))?; let pub_key = map.get(addr).ok_or_else(|| { ProtocolError::from(ConsensusError::Other("lose public key".to_string())) })?; sigs_pubkeys.push((signature, pub_key.to_owned())); } let sig = BlsSignature::combine(sigs_pubkeys); Ok(sig.to_bytes()) } fn verify_aggregated_signature( &self, aggregated_signature: Bytes, hash: Bytes, voters: Vec, ) -> Result<(), Box> { let map = self.addr_pubkey.read(); let mut pub_keys = Vec::new(); for addr in voters.iter() { let pub_key = map.get(addr).ok_or_else(|| { ProtocolError::from(ConsensusError::Other("lose public key".to_string())) })?; pub_keys.push(pub_key.clone()); } self.inner_verify_aggregated_signature(hash, pub_keys, aggregated_signature)?; Ok(()) } } impl OverlordCrypto { pub fn new( private_key: BlsPrivateKey, pubkey_to_bls_pubkey: HashMap, common_ref: BlsCommonReference, ) -> Self { OverlordCrypto { addr_pubkey: RwLock::new(pubkey_to_bls_pubkey), private_key, common_ref, } } pub fn update(&self, new_addr_pubkey: HashMap) { let mut map = self.addr_pubkey.write(); *map = new_addr_pubkey; } pub fn inner_verify_aggregated_signature( &self, hash: Bytes, pub_keys: Vec, signature: Bytes, ) -> ProtocolResult<()> { let aggregate_key = BlsPublicKey::aggregate(pub_keys); let aggregated_signature = BlsSignature::try_from(signature.as_ref()) .map_err(|e| ProtocolError::from(ConsensusError::CryptoErr(Box::new(e))))?; let hash = HashValue::try_from(hash.as_ref()).map_err(|_| { ProtocolError::from(ConsensusError::Other( "failed to convert hash value".to_string(), )) })?; aggregated_signature .verify(&hash, &aggregate_key, &self.common_ref) .map_err(|e| ProtocolError::from(ConsensusError::CryptoErr(Box::new(e))))?; Ok(()) } } #[derive(Clone, Debug)] pub struct ExecuteInfo { pub ctx: Context, pub height: u64, pub chain_id: Hash, pub block_hash: Hash, pub signed_txs: Vec, pub order_root: MerkleRoot, pub cycles_price: u64, pub proposer: Address, pub timestamp: u64, pub cycles_limit: u64, } pub fn check_list_roots(cache_roots: &[T], block_roots: &[T]) -> bool { block_roots.len() <= cache_roots.len() && cache_roots .iter() .zip(block_roots.iter()) .all(|(c_root, e_root)| c_root == e_root) } pub fn digest_signed_transactions(signed_txs: &[SignedTransaction]) -> ProtocolResult { if signed_txs.is_empty() { return Ok(Hash::from_empty()); } let mut list_bytes = BytesMut::new(); for signed_tx in signed_txs.iter() { let bytes = signed_tx.encode_fixed()?; list_bytes.put(bytes); } Ok(Hash::digest(list_bytes.freeze())) } pub fn convert_hex_to_bls_pubkeys(hex: Hex) -> ProtocolResult { let hex_pubkey = hex::decode(hex.as_string_trim0x()) .map_err(|e| ConsensusError::Other(format!("from hex error {:?}", e)))?; let ret = BlsPublicKey::try_from(hex_pubkey.as_ref()) .map_err(|e| ConsensusError::CryptoErr(Box::new(e)))?; Ok(ret) } #[cfg(test)] mod tests { use super::*; #[test] fn test_bls_amcl() { let private_keys = vec![ hex::decode("000000000000000000000000000000001abd6ffdb44427d9e1fcb6f84e7fe7d98f2b5b205b30a94992ec24d94bb0c970").unwrap(), hex::decode("00000000000000000000000000000000320b11d7c1ae66fdad1b4a75221244ae2d84903d3548c581d7d30dc135aac817").unwrap(), hex::decode("000000000000000000000000000000006a41e900d0426e615ca9d9393e6792baf9bda4398d5d407e59f77cb6c6f393cc").unwrap(), hex::decode("00000000000000000000000000000000125d81e0eb0a9c3746d868bf3b4f07760fdd430daded41d92f53b4e484ef3415").unwrap(), ]; let public_keys = vec![ hex::decode("041054fe9a65be0891094ed37fb3655e3ffb12353bc0a1b4f8673b52ad65d1ca481780cf7e988eb8dcdc05d8352f03605b0d11afb2525b3f1b55ec694509248bcfead39cbb292725d710e2a509c77ed051d1d49e15e429cf6d12b9be7c02179612").unwrap(), hex::decode("040c15c82ed07dc866ab7c3af3a070eb4340ac0439bf12bb49cbed5797d52707e009f7c17414777b0213b9a55c8a5c08290ce40c366d59322db418b7ff41277090bd25614174763c9fd725ede1f65f3e61ca9acdb35f59e33d556e738add14d536").unwrap(), hex::decode("040b3118acefdfbb11ded262a7f3c90dfca4fbc0200a92b4f6bb80210ab85e39f79458f7d47f7cb06864df0571e7591a4e0858df0b52a4c3ae19ae3adc32e1da0ec4cbdca108365ee433becdb1ccebb1b339647788dfad94ebae1cbd770fcfa4e5").unwrap(), hex::decode("040709f204e3ec5b8bdd9f2bb6edc9cb1704fc1e4952661ba7532ea8e37f3b159b8d41987ee6707d32bdf494e2deb00b7f049a4670a5ce1ad8e429fcacc5bbc69cb03b71a7f1d831d0b47dda5e62642d420ff0a545950cb1db19d42fe04e2c91d2").unwrap(), ]; let msg = Hash::digest(Bytes::from("muta-consensus")); let hash = HashValue::try_from(msg.as_bytes().as_ref()).unwrap(); let mut sigs_and_pub_keys = Vec::new(); for i in 0..3 { let sig = BlsPrivateKey::try_from(private_keys[i].as_ref()) .unwrap() .sign_message(&hash); let pub_key = BlsPublicKey::try_from(public_keys[i].as_ref()).unwrap(); sigs_and_pub_keys.push((sig, pub_key)); } let signature = BlsSignature::combine(sigs_and_pub_keys.clone()); let aggregate_key = BlsPublicKey::aggregate( sigs_and_pub_keys .iter() .map(|s| s.1.clone()) .collect::>(), ); let res = signature.verify(&hash, &aggregate_key, &"muta".into()); println!("{:?}", res); assert!(res.is_ok()); } #[test] fn test_aggregate_pubkeys_order() { let public_keys = vec![ hex::decode("041054fe9a65be0891094ed37fb3655e3ffb12353bc0a1b4f8673b52ad65d1ca481780cf7e988eb8dcdc05d8352f03605b0d11afb2525b3f1b55ec694509248bcfead39cbb292725d710e2a509c77ed051d1d49e15e429cf6d12b9be7c02179612").unwrap(), hex::decode("040c15c82ed07dc866ab7c3af3a070eb4340ac0439bf12bb49cbed5797d52707e009f7c17414777b0213b9a55c8a5c08290ce40c366d59322db418b7ff41277090bd25614174763c9fd725ede1f65f3e61ca9acdb35f59e33d556e738add14d536").unwrap(), hex::decode("040b3118acefdfbb11ded262a7f3c90dfca4fbc0200a92b4f6bb80210ab85e39f79458f7d47f7cb06864df0571e7591a4e0858df0b52a4c3ae19ae3adc32e1da0ec4cbdca108365ee433becdb1ccebb1b339647788dfad94ebae1cbd770fcfa4e5").unwrap(), hex::decode("040709f204e3ec5b8bdd9f2bb6edc9cb1704fc1e4952661ba7532ea8e37f3b159b8d41987ee6707d32bdf494e2deb00b7f049a4670a5ce1ad8e429fcacc5bbc69cb03b71a7f1d831d0b47dda5e62642d420ff0a545950cb1db19d42fe04e2c91d2").unwrap(), ]; let mut pub_keys = public_keys .into_iter() .map(|pk| BlsPublicKey::try_from(pk.as_ref()).unwrap()) .collect::>(); let pk_1 = BlsPublicKey::aggregate(pub_keys.clone()); pub_keys.reverse(); let pk_2 = BlsPublicKey::aggregate(pub_keys); assert_eq!(pk_1, pk_2); } #[test] fn test_zip_roots() { let roots_1 = vec![1, 2, 3, 4, 5]; let roots_2 = vec![1, 2, 3]; let roots_3 = vec![]; let roots_4 = vec![1, 2]; let roots_5 = vec![3, 4, 5, 6, 8]; assert!(check_list_roots(&roots_1, &roots_2)); assert!(!check_list_roots(&roots_3, &roots_2)); assert!(!check_list_roots(&roots_4, &roots_2)); assert!(!check_list_roots(&roots_5, &roots_2)); } #[test] fn test_convert_from_hex() { let hex_str = "0x04188ef9488c19458a963cc57b567adde7db8f8b6bec392d5cb7b67b0abc1ed6cd966edc451f6ac2ef38079460eb965e890d1f576e4039a20467820237cda753f07a8b8febae1ec052190973a1bcf00690ea8fc0168b3fbbccd1c4e402eda5ef22"; assert!( convert_hex_to_bls_pubkeys(Hex::from_string(String::from(hex_str)).unwrap()).is_ok() ); } } ================================================ FILE: core/consensus/src/wal.rs ================================================ use std::fs; use std::io::{ErrorKind, Read, Write}; use std::path::{Path, PathBuf}; use common_apm::muta_apm; use protocol::codec::ProtocolCodecSync; use protocol::types::{Bytes, Hash, SignedTransaction}; use protocol::ProtocolResult; use crate::fixed_types::FixedSignedTxs; use crate::ConsensusError; use bytes::{BufMut, BytesMut}; use creep::Context; use std::str::FromStr; use std::time::SystemTime; #[derive(Debug)] pub struct SignedTxsWAL { path: PathBuf, } impl SignedTxsWAL { pub fn new>(path: P) -> Self { if !path.as_ref().exists() { fs::create_dir_all(&path).expect("Failed to create wal directory"); } SignedTxsWAL { path: path.as_ref().to_path_buf(), } } pub fn save( &self, height: u64, ordered_signed_transactions_hash: Hash, txs: Vec, ) -> ProtocolResult<()> { let mut wal_path = self.path.clone(); wal_path.push(height.to_string()); if !wal_path.exists() { fs::create_dir(&wal_path).map_err(ConsensusError::WALErr)?; } wal_path.push(ordered_signed_transactions_hash.as_hex()); wal_path.set_extension("txt"); let mut wal_file = match fs::OpenOptions::new() .read(true) .write(true) .create(true) .open(wal_path) { Ok(file) => file, Err(err) => { if err.kind() == ErrorKind::AlreadyExists { return Ok(()); } else { return Err(ConsensusError::WALErr(err).into()); } } }; let data = FixedSignedTxs::new(txs).encode_sync()?; wal_file .write_all(data.as_ref()) .map_err(ConsensusError::WALErr)?; Ok(()) } pub fn available_height(&self) -> ProtocolResult> { let dir_path = self.path.clone(); let mut availables = vec![]; for item in fs::read_dir(dir_path).map_err(ConsensusError::WALErr)? { let item = item.map_err(ConsensusError::WALErr)?; if item.path().is_dir() { availables.push(item.file_name().to_str().unwrap().parse().unwrap()) } } Ok(availables) } pub fn remove_all(&self) -> ProtocolResult<()> { for height in self.available_height()? { self.remove(height)? } Ok(()) } pub fn load( &self, height: u64, ordered_signed_transactions_hash: Hash, ) -> ProtocolResult> { let mut file_path = self.path.clone(); file_path.push(height.to_string()); file_path.push(ordered_signed_transactions_hash.as_hex()); file_path.set_extension("txt"); self.recover_stxs(file_path) } pub fn load_by_height(&self, height: u64) -> Vec { let mut dir = self.path.clone(); dir.push(height.to_string()); let dir = if let Ok(res) = fs::read_dir(dir) { res } else { return Vec::new(); }; let mut ret = Vec::new(); for entry in dir { if let Ok(file_dir) = entry { if let Ok(mut stxs) = self.recover_stxs(file_dir.path()) { ret.append(&mut stxs); } } } ret } pub fn remove(&self, committed_height: u64) -> ProtocolResult<()> { for entry in fs::read_dir(&self.path).map_err(ConsensusError::WALErr)? { let folder = entry.map_err(ConsensusError::WALErr)?.path(); let folder_name = folder .file_stem() .ok_or_else(|| ConsensusError::Other("file stem error".to_string()))? .to_os_string() .clone(); let folder_name = folder_name.into_string().map_err(|err| { ConsensusError::Other(format!("transfer os string to string error {:?}", err)) })?; let height = folder_name.parse::().map_err(|err| { ConsensusError::Other(format!("parse folder name {:?} error {:?}", folder, err)) })?; if height <= committed_height { fs::remove_dir_all(folder).map_err(ConsensusError::WALErr)?; } } Ok(()) } fn recover_stxs(&self, file_path: PathBuf) -> ProtocolResult> { let mut read_buf = Vec::new(); let mut file = fs::File::open(&file_path).map_err(ConsensusError::WALErr)?; let _ = file .read_to_end(&mut read_buf) .map_err(ConsensusError::WALErr)?; let txs = FixedSignedTxs::decode_sync(Bytes::from(read_buf))?; Ok(txs.inner) } } #[derive(Debug)] pub struct ConsensusWal { path: PathBuf, } impl ConsensusWal { pub fn new>(path: P) -> Self { if !path.as_ref().exists() { fs::create_dir_all(&path).expect("Failed to create wal directory"); } ConsensusWal { path: path.as_ref().to_path_buf(), } } #[muta_apm::derive::tracing_span(kind = "consensus_wal")] pub fn update_overlord_wal(&self, ctx: Context, info: Bytes) -> ProtocolResult<()> { // 1st, make sure the dir exists let dir_path = self.path.clone(); if !dir_path.exists() { fs::create_dir(&dir_path).map_err(ConsensusError::WALErr)?; } // 2nd, write info into file let check_sum = Hash::digest(info.clone()); let mut content = BytesMut::new(); content.put(check_sum.as_bytes()); content.put(info); let (data_path, timestamp) = { loop { let timestamp = SystemTime::now() .duration_since(SystemTime::UNIX_EPOCH) .map_err(ConsensusError::SystemTime)?; let timestamp = timestamp.as_millis(); let mut data_path = dir_path.clone(); data_path.push(timestamp.to_string()); if !data_path.exists() { break (data_path, timestamp); } } }; let mut data_file = match fs::OpenOptions::new() .read(true) .write(true) .create(true) .open(data_path) { Ok(file) => file, Err(err) => { if err.kind() == ErrorKind::AlreadyExists { return Ok(()); } else { return Err(ConsensusError::WALErr(err).into()); } } }; data_file .write_all(content.as_ref()) .map_err(ConsensusError::WALErr)?; // 3rd, we can safely clean other old wal files for item in fs::read_dir(dir_path).map_err(ConsensusError::WALErr)? { let item = item.map_err(ConsensusError::WALErr)?; let file_name = item .file_name() .to_str() .ok_or(ConsensusError::FileNameTimestamp)? .to_owned(); let file_name_timestamp = u128::from_str(file_name.as_str()) .map_err(|e| ConsensusError::FileNameTimestamp)?; if file_name_timestamp < timestamp { fs::remove_file(item.path()).map_err(ConsensusError::WALErr)?; } } Ok(()) } #[muta_apm::derive::tracing_span(kind = "consensus_wal")] pub fn load_overlord_wal(&self, ctx: Context) -> ProtocolResult { // 1st, let dir_path = self.path.clone(); if !dir_path.exists() { return Err(ConsensusError::ConsensusWalDirNotExist.into()); } // 2 read all log files and sort by timestamp in their names let files = fs::read_dir(dir_path.clone()).map_err(ConsensusError::WALErr)?; let mut file_names_timestamps = files .filter_map(|item| { let item = item.ok()?; let file_name = item.file_name(); let file_name = file_name.to_str()?; let file_name_timestamp = u128::from_str(file_name).ok()?; Some(file_name_timestamp) }) .collect::>(); file_names_timestamps.sort_by_key(|&b| std::cmp::Reverse(b)); // 3rd, get a latest and valid wal if possible let mut index = 0; let content = loop { if index >= file_names_timestamps.len() { break None; } let file_name_timestamp = file_names_timestamps[index]; let mut log_path = dir_path.clone(); log_path.push(file_name_timestamp.to_string()); let mut read_buf = Vec::new(); let mut file = fs::File::open(&log_path).map_err(ConsensusError::WALErr)?; let res = file.read_to_end(&mut read_buf); if res.is_err() { continue; } let mut info = Bytes::from(read_buf); if info.len() < Hash::default().as_bytes().len() { continue; } let content = info.split_off(Hash::default().as_bytes().len()); if info == Hash::digest(content.clone()).as_bytes() { break Some(content); } else { index += 1; } }; content.ok_or_else(|| ConsensusError::ConsensusWalNoWalFile.into()) } pub fn clear(&self) -> ProtocolResult<()> { let dir_path = self.path.clone(); if !dir_path.exists() { return Ok(()); } for item in fs::read_dir(dir_path).map_err(ConsensusError::WALErr)? { let item = item.map_err(ConsensusError::WALErr)?; fs::remove_file(item.path()).map_err(ConsensusError::WALErr)?; } Ok(()) } } #[rustfmt::skip] /// Bench in Intel(R) Core(TM) i7-4770HQ CPU @ 2.20GHz (8 x 2200): /// test wal::test::bench_save_wal_1000_txs ... bench: 2,346,611 ns/iter (+/- 754,074) /// test wal::test::bench_save_wal_16000_txs ... bench: 41,576,328 ns/iter (+/- 2,547,323) /// test wal::test::bench_save_wal_2000_txs ... bench: 4,759,015 ns/iter (+/- 460,748) /// test wal::test::bench_save_wal_4000_txs ... bench: 9,725,284 ns/iter (+/- 452,143) /// test wal::test::bench_save_wal_8000_txs ... bench: 19,971,012 ns/iter (+/- 1,620,755) /// test wal::test::bench_save_wal_16000_txs ... bench: 41,576,328 ns/iter (+/- 2,547,323) /// test wal::test::bench_txs_prost_encode ... bench: 40,020,365 ns/iter (+/- 2,800,361) /// test wal::test::bench_txs_rlp_encode ... bench: 40,792,370 ns/iter (+/- 1,908,695) #[cfg(test)] mod tests { extern crate test; use rand::random; use test::Bencher; use protocol::types::{Address, Hash, RawTransaction, TransactionRequest}; use protocol::Bytes; use super::*; static FULL_TXS_PATH: &str = "./free-space/wal/txs"; static FULL_CONSENSUS_PATH: &str = "./free-space/wal/consensus"; fn mock_hash() -> Hash { Hash::digest(get_random_bytes(10)) } fn mock_address() -> Address { let hash = mock_hash(); Address::from_hash(hash).unwrap() } fn mock_raw_tx() -> RawTransaction { RawTransaction { chain_id: mock_hash(), nonce: mock_hash(), timeout: 100, cycles_price: 1, cycles_limit: 100, request: mock_transaction_request(), sender: mock_address(), } } pub fn mock_transaction_request() -> TransactionRequest { TransactionRequest { service_name: "mock-service".to_owned(), method: "mock-method".to_owned(), payload: "mock-payload".to_owned(), } } pub fn mock_sign_tx() -> SignedTransaction { SignedTransaction { raw: mock_raw_tx(), tx_hash: mock_hash(), pubkey: Default::default(), signature: Default::default(), } } pub fn mock_wal_txs(size: usize) -> Vec { (0..size).map(|_| mock_sign_tx()).collect::>() } pub fn get_random_bytes(len: usize) -> Bytes { let vec: Vec = (0..len).map(|_| random::()).collect(); Bytes::from(vec) } #[test] fn test_txs_wal() { fs::remove_dir_all(PathBuf::from_str(FULL_TXS_PATH).unwrap()).unwrap(); let wal = SignedTxsWAL::new(FULL_TXS_PATH.to_string()); let txs_01 = mock_wal_txs(100); let hash_01 = Hash::digest(Bytes::from(rlp::encode_list(&txs_01))); wal.save(1u64, hash_01.clone(), txs_01.clone()).unwrap(); let txs_02 = mock_wal_txs(100); let hash_02 = Hash::digest(Bytes::from(rlp::encode_list(&txs_02))); wal.save(3u64, hash_02.clone(), txs_02.clone()).unwrap(); let txs_03 = mock_wal_txs(100); let hash_03 = Hash::digest(Bytes::from(rlp::encode_list(&txs_03))); wal.save(3u64, hash_03, txs_03.clone()).unwrap(); let res = wal.load_by_height(3); assert_eq!(res.len(), 200); for tx in res.iter() { assert!(txs_02.contains(tx) || txs_03.contains(tx)); } assert_eq!(wal.load(1u64, hash_01.clone()).unwrap(), txs_01); assert_eq!(wal.load(3u64, hash_02.clone()).unwrap(), txs_02); wal.remove(2u64).unwrap(); assert!(wal.load(1u64, hash_01).is_err()); assert!(wal.load(2u64, hash_02).is_err()); wal.remove(1u64).unwrap(); wal.remove(3u64).unwrap(); } #[test] fn test_consensus_wal() { // write one, read one let wal = ConsensusWal::new(FULL_CONSENSUS_PATH.to_string()); let info = get_random_bytes(1000); wal.update_overlord_wal(Context::new(),info.clone()).unwrap(); let load = wal.load_overlord_wal(Context::new()).unwrap(); assert_eq!(load,info); // write three, read latest fs::remove_dir_all(PathBuf::from_str(FULL_CONSENSUS_PATH).unwrap()).unwrap(); let info = get_random_bytes(1000); wal.update_overlord_wal(Context::new(),get_random_bytes(1000)).unwrap(); wal.update_overlord_wal(Context::new(),get_random_bytes(1000)).unwrap(); wal.update_overlord_wal(Context::new(),info.clone()).unwrap(); let load = wal.load_overlord_wal(Context::new()).unwrap(); assert_eq!(load,info); // remove all, read nothing fs::remove_dir_all(PathBuf::from_str(FULL_CONSENSUS_PATH).unwrap()).unwrap(); let load = wal.load_overlord_wal(Context::new()); assert!(load.is_err()); // write a old correct one and a new wrong one, read old // old one //fs::remove_dir_all(PathBuf::from_str(FULL_CONSENSUS_PATH).unwrap()).unwrap(); let info = get_random_bytes(1000); wal.update_overlord_wal(Context::new(),info.clone()).unwrap(); // -> copy and modify to a new fake one let mut files = fs::read_dir(FULL_CONSENSUS_PATH).unwrap(); let file = files.next().unwrap().unwrap(); let from = u128::from_str( file.file_name().to_str().unwrap()).unwrap(); let to = file.path().parent().unwrap().join((from+1).to_string()); let mut new_file = fs::OpenOptions::new() .read(true) .write(true) .create(true) .open(to).unwrap(); new_file .write_all(get_random_bytes(1000).as_ref()).unwrap(); let load = wal.load_overlord_wal(Context::new()).unwrap(); assert_eq!(load,info); fs::remove_dir_all(PathBuf::from_str(FULL_CONSENSUS_PATH).unwrap()).unwrap(); } #[test] fn test_wal_txs_codec() { for _ in 0..10 { let txs = FixedSignedTxs::new(mock_wal_txs(100)); assert_eq!( FixedSignedTxs::decode_sync(txs.encode_sync().unwrap()).unwrap(), txs ); } } #[bench] fn bench_txs_rlp_encode(b: &mut Bencher) { let txs = mock_wal_txs(20000); b.iter(move || { let _ = rlp::encode_list(&txs); }); } #[bench] fn bench_txs_prost_encode(b: &mut Bencher) { let txs = FixedSignedTxs::new(mock_wal_txs(20000)); b.iter(move || { let _ = txs.encode_sync(); }); } #[bench] fn bench_save_wal_1000_txs(b: &mut Bencher) { let wal = SignedTxsWAL::new(FULL_TXS_PATH.to_string()); let txs = mock_wal_txs(1000); let txs_hash = Hash::digest(Bytes::from(rlp::encode_list(&txs))); b.iter(move || { wal.save(1u64, txs_hash.clone(), txs.clone()).unwrap(); }) } #[bench] fn bench_save_wal_2000_txs(b: &mut Bencher) { let wal = SignedTxsWAL::new(FULL_TXS_PATH.to_string()); let txs = mock_wal_txs(2000); let txs_hash = Hash::digest(Bytes::from(rlp::encode_list(&txs))); b.iter(move || { wal.save(1u64, txs_hash.clone(), txs.clone()).unwrap(); }) } #[bench] fn bench_save_wal_4000_txs(b: &mut Bencher) { let wal = SignedTxsWAL::new(FULL_TXS_PATH.to_string()); let txs = mock_wal_txs(4000); let txs_hash = Hash::digest(Bytes::from(rlp::encode_list(&txs))); b.iter(move || { wal.save(1u64, txs_hash.clone(), txs.clone()).unwrap(); }) } #[bench] fn bench_save_wal_8000_txs(b: &mut Bencher) { let wal = SignedTxsWAL::new(FULL_TXS_PATH.to_string()); let txs = mock_wal_txs(8000); let txs_hash = Hash::digest(Bytes::from(rlp::encode_list(&txs))); b.iter(move || { wal.save(1u64, txs_hash.clone(), txs.clone()).unwrap(); }) } #[bench] fn bench_save_wal_16000_txs(b: &mut Bencher) { let wal = SignedTxsWAL::new(FULL_TXS_PATH.to_string()); let txs = mock_wal_txs(16000); let txs_hash = Hash::digest(Bytes::from(rlp::encode_list(&txs))); b.iter(move || { wal.save(1u64, txs_hash.clone(), txs.clone()).unwrap(); }) } } ================================================ FILE: core/consensus/src/wal_proto.rs ================================================ use std::convert::TryFrom; use prost::Message; use protocol::codec::{transaction, ProtocolCodecSync}; use protocol::types::SignedTransaction; use protocol::{Bytes, ProtocolError, ProtocolResult}; use crate::{fixed_types, ConsensusError, ConsensusType}; #[derive(Clone, Message)] pub struct FixedSignedTxs { #[prost(message, repeated, tag = "1")] pub inner: Vec, } impl From for FixedSignedTxs { fn from(txs: fixed_types::FixedSignedTxs) -> FixedSignedTxs { let inner = txs .inner .into_iter() .map(transaction::SignedTransaction::from) .collect::>(); FixedSignedTxs { inner } } } impl TryFrom for fixed_types::FixedSignedTxs { type Error = ProtocolError; fn try_from(txs: FixedSignedTxs) -> Result { let mut inner = Vec::new(); for tx in txs.inner.into_iter() { let tmp = SignedTransaction::try_from(tx)?; inner.push(tmp); } Ok(fixed_types::FixedSignedTxs { inner }) } } impl ProtocolCodecSync for fixed_types::FixedSignedTxs { fn encode_sync(&self) -> ProtocolResult { let ser_type = FixedSignedTxs::from(self.clone()); let mut buf = Vec::with_capacity(ser_type.encoded_len()); ser_type .encode(&mut buf) .map_err(|_| ConsensusError::EncodeErr(ConsensusType::WALSignedTxs))?; Ok(Bytes::from(buf)) } fn decode_sync(data: Bytes) -> ProtocolResult { let ser_type = FixedSignedTxs::decode(data) .map_err(|_| ConsensusError::DecodeErr(ConsensusType::WALSignedTxs))?; fixed_types::FixedSignedTxs::try_from(ser_type) } } ================================================ FILE: core/mempool/Cargo.toml ================================================ [package] name = "core-mempool" version = "0.2.1" authors = ["Muta Dev "] edition = "2018" repository = "https://github.com/nervosnetwork/muta" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] protocol = { path = "../../protocol", package = "muta-protocol" } common-apm = { path = "../../common/apm" } common-crypto = { path = "../../common/crypto" } core-network = { path = "../network" } futures = { version = "0.3", features = [ "async-await" ] } crossbeam-queue = "0.2" derive_more = "0.99" async-trait = "0.1" num-traits = "0.2" bytes = "0.5" rand = "0.7" hex = "0.4" serde_derive = "1.0" serde_json = "1.0" serde = "1.0" futures-timer = "3.0" log = "0.4" tokio = { version = "0.2", features = ["macros", "rt-core", "sync", "blocking"]} muta-apm = "0.1.0-alpha.7" cita_trie = "2.0" [dev-dependencies] chashmap = "2.2" parking_lot = "0.11" ================================================ FILE: core/mempool/src/adapter/message.rs ================================================ use std::sync::Arc; use std::time::Instant; use async_trait::async_trait; use futures::future::{try_join_all, TryFutureExt}; use protocol::{ traits::{Context, MemPool, MessageHandler, Priority, Rpc, TrustFeedback}, types::{Hash, SignedTransaction}, }; use serde_derive::{Deserialize, Serialize}; use crate::context::TxContext; pub const END_GOSSIP_NEW_TXS: &str = "/gossip/mempool/new_txs"; pub const RPC_PULL_TXS: &str = "/rpc_call/mempool/pull_txs"; pub const RPC_RESP_PULL_TXS: &str = "/rpc_resp/mempool/pull_txs"; pub const RPC_RESP_PULL_TXS_SYNC: &str = "/rpc_resp/mempool/pull_txs_sync"; #[derive(Clone, Debug, Serialize, Deserialize)] pub struct MsgNewTxs { #[serde(with = "core_network::serde_multi")] pub batch_stxs: Vec, } pub struct NewTxsHandler { mem_pool: Arc, } impl NewTxsHandler where M: MemPool, { pub fn new(mem_pool: Arc) -> Self { NewTxsHandler { mem_pool } } } #[async_trait] impl MessageHandler for NewTxsHandler where M: MemPool + 'static, { type Message = MsgNewTxs; async fn process(&self, ctx: Context, msg: Self::Message) -> TrustFeedback { let ctx = ctx.mark_network_origin_new_txs(); let insert_stx = |stx| -> _ { let mem_pool = Arc::clone(&self.mem_pool); let ctx = ctx.clone(); tokio::spawn(async move { let inst = Instant::now(); common_apm::metrics::mempool::MEMPOOL_COUNTER_STATIC .insert_tx_from_p2p .inc(); if mem_pool.insert(ctx, stx).await.is_err() { common_apm::metrics::mempool::MEMPOOL_RESULT_COUNTER_STATIC .insert_tx_from_p2p .failure .inc(); } common_apm::metrics::mempool::MEMPOOL_RESULT_COUNTER_STATIC .insert_tx_from_p2p .success .inc(); common_apm::metrics::mempool::MEMPOOL_TIME_STATIC .insert_tx_from_p2p .observe(common_apm::metrics::duration_to_sec(inst.elapsed())); }) }; // Concurrently insert them if try_join_all( msg.batch_stxs .into_iter() .map(insert_stx) .collect::>(), ) .await .map(|_| ()) .is_err() { log::error!("[core_mempool] mempool batch insert error"); } TrustFeedback::Neutral } } #[derive(Clone, Debug, Serialize, Deserialize)] pub struct MsgPullTxs { pub height: Option, #[serde(with = "core_network::serde_multi")] pub hashes: Vec, } #[derive(Debug, Serialize, Deserialize)] pub struct MsgPushTxs { #[serde(with = "core_network::serde_multi")] pub sig_txs: Vec, } pub struct PullTxsHandler { network: Arc, mem_pool: Arc, } impl PullTxsHandler where N: Rpc + 'static, M: MemPool + 'static, { pub fn new(network: Arc, mem_pool: Arc) -> Self { PullTxsHandler { network, mem_pool } } } #[async_trait] impl MessageHandler for PullTxsHandler where N: Rpc + 'static, M: MemPool + 'static, { type Message = MsgPullTxs; async fn process(&self, ctx: Context, msg: Self::Message) -> TrustFeedback { let push_txs = async move { let ret = self .mem_pool .get_full_txs(ctx.clone(), msg.height, &msg.hashes) .await .map(|sig_txs| MsgPushTxs { sig_txs }); self.network .response::(ctx, RPC_RESP_PULL_TXS, ret, Priority::High) .await }; push_txs .unwrap_or_else(move |err| log::warn!("[core_mempool] push txs {}", err)) .await; TrustFeedback::Neutral } } ================================================ FILE: core/mempool/src/adapter/mod.rs ================================================ use super::TxContext; pub mod message; use std::{ error::Error, marker::PhantomData, sync::atomic::{AtomicU64, Ordering}, sync::Arc, time::Duration, }; use async_trait::async_trait; use derive_more::Display; use futures::{ channel::mpsc::{ channel, unbounded, Receiver, Sender, TrySendError, UnboundedReceiver, UnboundedSender, }, lock::Mutex, select, stream::StreamExt, }; use futures_timer::Delay; use log::{debug, error}; use common_crypto::Crypto; use protocol::{ fixed_codec::FixedCodec, traits::{ Context, ExecutorFactory, ExecutorParams, Gossip, MemPoolAdapter, PeerTrust, Priority, Rpc, ServiceMapping, ServiceResponse, Storage, TrustFeedback, }, types::{Address, Hash, SignedTransaction, TransactionRequest}, ProtocolError, ProtocolErrorKind, ProtocolResult, }; use crate::adapter::message::{ MsgNewTxs, MsgPullTxs, MsgPushTxs, END_GOSSIP_NEW_TXS, RPC_PULL_TXS, }; use crate::MemPoolError; pub const DEFAULT_BROADCAST_TXS_SIZE: usize = 200; pub const DEFAULT_BROADCAST_TXS_INTERVAL: u64 = 200; // milliseconds struct IntervalTxsBroadcaster; impl IntervalTxsBroadcaster { pub async fn broadcast( stx_rx: UnboundedReceiver, interval_reached: Receiver<()>, tx_size: usize, gossip: G, err_tx: UnboundedSender, ) where G: Gossip + Clone + Unpin + 'static, { let mut stx_rx = stx_rx.fuse(); let mut interval_rx = interval_reached.fuse(); let mut txs_cache = Vec::with_capacity(tx_size); loop { select! { opt_stx = stx_rx.next() => { if let Some(stx) = opt_stx { txs_cache.push(stx); if txs_cache.len() == tx_size { Self::do_broadcast(&mut txs_cache, &gossip, err_tx.clone()).await } } else { debug!("mempool: default mempool adapter dropped") } }, signal = interval_rx.next() => { if signal.is_some() { Self::do_broadcast(&mut txs_cache, &gossip, err_tx.clone()).await } }, complete => break, }; } } pub async fn timer(mut signal_tx: Sender<()>, interval: u64) { let interval = Duration::from_millis(interval); loop { Delay::new(interval).await; if let Err(err) = signal_tx.try_send(()) { // This means previous interval signal hasn't processed // yet, simply drop this one. if err.is_full() { debug!("mempool: interval signal channel full"); } if err.is_disconnected() { error!("mempool: interval broadcaster dropped"); } } } } async fn do_broadcast( txs_cache: &mut Vec, gossip: &G, err_tx: UnboundedSender, ) where G: Gossip + Unpin, { if txs_cache.is_empty() { return; } let batch_stxs = txs_cache.drain(..).collect::>(); let gossip_msg = MsgNewTxs { batch_stxs }; let ctx = Context::new(); let end = END_GOSSIP_NEW_TXS; let report_if_err = move |ret: ProtocolResult<()>| { if let Err(err) = ret { if err_tx.unbounded_send(err).is_err() { error!("mempool: default mempool adapter dropped"); } } }; report_if_err( gossip .broadcast(ctx, end, gossip_msg, Priority::Normal) .await, ) } } pub struct DefaultMemPoolAdapter { network: N, storage: Arc, trie_db: Arc, service_mapping: Arc, timeout_gap: AtomicU64, cycles_limit: AtomicU64, max_tx_size: AtomicU64, stx_tx: UnboundedSender, err_rx: Mutex>, pin_c: PhantomData, pin_ef: PhantomData, } impl DefaultMemPoolAdapter where EF: ExecutorFactory, C: Crypto, N: Rpc + PeerTrust + Gossip + Clone + Unpin + 'static, S: Storage, DB: cita_trie::DB + 'static, Mapping: ServiceMapping + 'static, { pub fn new( network: N, storage: Arc, trie_db: Arc, service_mapping: Arc, broadcast_txs_size: usize, broadcast_txs_interval: u64, ) -> Self { let (stx_tx, stx_rx) = unbounded(); let (err_tx, err_rx) = unbounded(); let (signal_tx, interval_reached) = channel(1); tokio::spawn(IntervalTxsBroadcaster::timer( signal_tx, broadcast_txs_interval, )); tokio::spawn(IntervalTxsBroadcaster::broadcast( stx_rx, interval_reached, broadcast_txs_size, network.clone(), err_tx, )); DefaultMemPoolAdapter { network, storage, trie_db, service_mapping, timeout_gap: AtomicU64::new(0), cycles_limit: AtomicU64::new(0), max_tx_size: AtomicU64::new(0), stx_tx, err_rx: Mutex::new(err_rx), pin_c: PhantomData, pin_ef: PhantomData, } } } #[async_trait] impl MemPoolAdapter for DefaultMemPoolAdapter where EF: ExecutorFactory, C: Crypto + Send + Sync + 'static, N: Rpc + PeerTrust + Gossip + Clone + Unpin + 'static, S: Storage + 'static, DB: cita_trie::DB + 'static, Mapping: ServiceMapping + 'static, { #[muta_apm::derive::tracing_span( kind = "mempool.adapter", logs = "{'txs_len': 'tx_hashes.len()'}" )] async fn pull_txs( &self, ctx: Context, height: Option, tx_hashes: Vec, ) -> ProtocolResult> { let pull_msg = MsgPullTxs { height, hashes: tx_hashes, }; let resp_msg = self .network .call::(ctx, RPC_PULL_TXS, pull_msg, Priority::High) .await?; Ok(resp_msg.sig_txs) } async fn broadcast_tx(&self, _ctx: Context, stx: SignedTransaction) -> ProtocolResult<()> { self.stx_tx .unbounded_send(stx) .map_err(AdapterError::from)?; if let Some(mut err_rx) = self.err_rx.try_lock() { match err_rx.try_next() { Ok(Some(err)) => return Err(err), // Error means receiver channel is empty, is ok here Ok(None) | Err(_) => return Ok(()), } } Ok(()) } async fn check_authorization( &self, ctx: Context, tx: Box, ) -> ProtocolResult<()> { let network = self.network.clone(); let ctx_clone = ctx.clone(); let header = self.storage.get_latest_block_header(ctx.clone()).await?; let trie_db_clone = Arc::clone(&self.trie_db); let storage_clone = Arc::clone(&self.storage); let service_mapping_clone = Arc::clone(&self.service_mapping); let tx_hash = tx.tx_hash.clone(); let blocking_res: ProtocolResult> = tokio::task::spawn_blocking(move || { // Verify transaction hash let fixed_bytes = tx.raw.encode_fixed()?; let tx_hash = Hash::digest(fixed_bytes); if tx_hash != tx.tx_hash { if ctx_clone.is_network_origin_txs() { network.report( ctx_clone, TrustFeedback::Worse(format!( "Mempool wrong tx_hash of tx {:?}", tx.tx_hash )), ); } return Err(MemPoolError::CheckHash { expect: tx.tx_hash, actual: tx_hash, } .into()); } // Verify transaction signatures let caller = Address::from_hash(Hash::digest(protocol::address_hrp().as_str()))?; let executor = EF::from_root( header.state_root.clone(), Arc::clone(&trie_db_clone), Arc::clone(&storage_clone), Arc::clone(&service_mapping_clone), )?; let params = ExecutorParams { state_root: header.state_root, height: header.height, timestamp: header.timestamp, cycles_limit: 99999, proposer: header.proposer, }; let stx_ptr_json = format!("{{ \"ptr\": {} }}", Box::into_raw(tx) as usize); let check_resp = executor.read(¶ms, &caller, 1, &TransactionRequest { service_name: "authorization".to_string(), method: "check_authorization_by_ptr".to_string(), payload: stx_ptr_json, })?; Ok(check_resp) }) .await .map_err(|_| AdapterError::Internal)?; let check_resp = blocking_res?; if check_resp.is_error() { if ctx.is_network_origin_txs() { self.network.report( ctx, TrustFeedback::Worse(format!( "Mempool check authorization failed tx hash {:?}", tx_hash )), ) } return Err(MemPoolError::CheckAuthorization { tx_hash, err_info: check_resp.error_message, } .into()); } Ok(()) } async fn check_transaction(&self, ctx: Context, stx: &SignedTransaction) -> ProtocolResult<()> { let fixed_bytes = stx.raw.encode_fixed()?; let size = fixed_bytes.len() as u64; let tx_hash = stx.tx_hash.clone(); // check tx size let max_tx_size = self.max_tx_size.load(Ordering::SeqCst); if size > max_tx_size { if ctx.is_network_origin_txs() { self.network.report( ctx.clone(), TrustFeedback::Bad(format!( "Mempool exceed size limit of tx {:?}", stx.tx_hash )), ); } return Err(MemPoolError::ExceedSizeLimit { tx_hash, max_tx_size, size, } .into()); } // check cycle limit let cycles_limit_config = self.cycles_limit.load(Ordering::SeqCst); let cycles_limit_tx = stx.raw.cycles_limit; if cycles_limit_tx > cycles_limit_config { if ctx.is_network_origin_txs() { self.network.report( ctx.clone(), TrustFeedback::Bad(format!( "Mempool exceed cycle limit of tx {:?}", stx.tx_hash )), ); } return Err(MemPoolError::ExceedCyclesLimit { tx_hash, cycles_limit_tx, cycles_limit_config, } .into()); } // Verify chain id let latest_header = self.storage.get_latest_block_header(ctx.clone()).await?; if latest_header.chain_id != stx.raw.chain_id { if ctx.is_network_origin_txs() { self.network.report( ctx.clone(), TrustFeedback::Worse(format!("Mempool wrong chain of tx {:?}", stx.tx_hash)), ); } let wrong_chain_id = MemPoolError::WrongChain { tx_hash: stx.tx_hash.clone(), }; return Err(wrong_chain_id.into()); } // Verify timeout let latest_height = latest_header.height; let timeout_gap = self.timeout_gap.load(Ordering::SeqCst); if stx.raw.timeout > latest_height + timeout_gap { let invalid_timeout = MemPoolError::InvalidTimeout { tx_hash: stx.tx_hash.clone(), }; return Err(invalid_timeout.into()); } if stx.raw.timeout < latest_height { let timeout = MemPoolError::Timeout { tx_hash: stx.tx_hash.clone(), timeout: stx.raw.timeout, }; return Err(timeout.into()); } Ok(()) } async fn check_storage_exist(&self, ctx: Context, tx_hash: &Hash) -> ProtocolResult<()> { match self.storage.get_transaction_by_hash(ctx, tx_hash).await { Ok(Some(_)) => Err(MemPoolError::CommittedTx { tx_hash: tx_hash.clone(), } .into()), Ok(None) => Ok(()), Err(err) => Err(err), } } async fn get_latest_height(&self, ctx: Context) -> ProtocolResult { let height = self.storage.get_latest_block_header(ctx).await?.height; Ok(height) } async fn get_transactions_from_storage( &self, ctx: Context, block_height: Option, tx_hashes: &[Hash], ) -> ProtocolResult>> { if let Some(height) = block_height { self.storage.get_transactions(ctx, height, tx_hashes).await } else { let futs = tx_hashes .iter() .map(|tx_hash| self.storage.get_transaction_by_hash(ctx.clone(), tx_hash)) .collect::>(); futures::future::try_join_all(futs).await } } fn report_good(&self, ctx: Context) { if ctx.is_network_origin_txs() { self.network.report(ctx, TrustFeedback::Good); } } fn set_args(&self, timeout_gap: u64, cycles_limit: u64, max_tx_size: u64) { self.timeout_gap.store(timeout_gap, Ordering::Relaxed); self.cycles_limit.store(cycles_limit, Ordering::Relaxed); self.max_tx_size.store(max_tx_size, Ordering::Relaxed); } } #[derive(Debug, Display)] pub enum AdapterError { #[display(fmt = "adapter: interval broadcaster drop")] IntervalBroadcasterDrop, #[display(fmt = "adapter: internal error")] Internal, } impl Error for AdapterError {} impl From> for AdapterError { fn from(_error: TrySendError) -> AdapterError { AdapterError::IntervalBroadcasterDrop } } impl From for ProtocolError { fn from(error: AdapterError) -> ProtocolError { ProtocolError::new(ProtocolErrorKind::Mempool, Box::new(error)) } } #[cfg(test)] mod tests { use super::IntervalTxsBroadcaster; use crate::{adapter::message::MsgNewTxs, tests::default_mock_txs}; use protocol::{ traits::{Context, Gossip, MessageCodec, Priority}, Bytes, ProtocolResult, }; use async_trait::async_trait; use futures::{ channel::mpsc::{channel, unbounded, UnboundedSender}, stream::StreamExt, }; use parking_lot::Mutex; use std::{ ops::Sub, sync::Arc, time::{Duration, Instant}, }; #[derive(Clone)] struct MockGossip { msgs: Arc>>, signal_tx: UnboundedSender<()>, } impl MockGossip { pub fn new(signal_tx: UnboundedSender<()>) -> Self { MockGossip { msgs: Default::default(), signal_tx, } } } #[async_trait] impl Gossip for MockGossip { async fn broadcast( &self, _: Context, _: &str, mut msg: M, _: Priority, ) -> ProtocolResult<()> where M: MessageCodec, { let bytes = msg.encode().expect("encode message fail"); self.msgs.lock().push(bytes); self.signal_tx .unbounded_send(()) .expect("send broadcast signal fail"); Ok(()) } async fn multicast<'a, M, P>( &self, _: Context, _: &str, _: P, _: M, _: Priority, ) -> ProtocolResult<()> where M: MessageCodec, P: AsRef<[Bytes]> + Send + 'a, { unreachable!() } } macro_rules! pop_msg { ($msgs:expr) => {{ let msg = $msgs.pop().expect("should have one message"); MsgNewTxs::decode(msg).expect("decode MsgNewTxs fail") }}; } #[tokio::test] async fn test_interval_timer() { let (tx, mut rx) = channel(1); let interval = Duration::from_millis(200); let now = Instant::now(); tokio::spawn(IntervalTxsBroadcaster::timer(tx, 200)); rx.next().await.expect("await interval signal fail"); assert!(now.elapsed().sub(interval).as_millis() < 100u128); } #[tokio::test] async fn test_interval_broadcast_reach_cache_size() { let (stx_tx, stx_rx) = unbounded(); let (err_tx, _err_rx) = unbounded(); let (_signal_tx, interval_reached) = channel(1); let tx_size = 10; let (broadcast_signal_tx, mut broadcast_signal_rx) = unbounded(); let gossip = MockGossip::new(broadcast_signal_tx); tokio::spawn(IntervalTxsBroadcaster::broadcast( stx_rx, interval_reached, tx_size, gossip.clone(), err_tx, )); for stx in default_mock_txs(11).into_iter() { stx_tx.unbounded_send(stx).expect("send stx fail"); } broadcast_signal_rx.next().await; let mut msgs = gossip.msgs.lock().drain(..).collect::>(); assert_eq!(msgs.len(), 1, "should only have one message"); let msg = pop_msg!(msgs); assert_eq!(msg.batch_stxs.len(), 10, "should only have 10 stx"); } #[tokio::test] async fn test_interval_broadcast_reach_interval() { let (stx_tx, stx_rx) = unbounded(); let (err_tx, _err_rx) = unbounded(); let (signal_tx, interval_reached) = channel(1); let tx_size = 10; let (broadcast_signal_tx, mut broadcast_signal_rx) = unbounded(); let gossip = MockGossip::new(broadcast_signal_tx); tokio::spawn(IntervalTxsBroadcaster::timer(signal_tx, 200)); tokio::spawn(IntervalTxsBroadcaster::broadcast( stx_rx, interval_reached, tx_size, gossip.clone(), err_tx, )); for stx in default_mock_txs(9).into_iter() { stx_tx.unbounded_send(stx).expect("send stx fail"); } broadcast_signal_rx.next().await; let mut msgs = gossip.msgs.lock().drain(..).collect::>(); assert_eq!(msgs.len(), 1, "should only have one message"); let msg = pop_msg!(msgs); assert_eq!(msg.batch_stxs.len(), 9, "should only have 9 stx"); } #[tokio::test] async fn test_interval_broadcast() { let (stx_tx, stx_rx) = unbounded(); let (err_tx, _err_rx) = unbounded(); let (signal_tx, interval_reached) = channel(1); let tx_size = 10; let (broadcast_signal_tx, mut broadcast_signal_rx) = unbounded(); let gossip = MockGossip::new(broadcast_signal_tx); tokio::spawn(IntervalTxsBroadcaster::timer(signal_tx, 200)); tokio::spawn(IntervalTxsBroadcaster::broadcast( stx_rx, interval_reached, tx_size, gossip.clone(), err_tx, )); for stx in default_mock_txs(19).into_iter() { stx_tx.unbounded_send(stx).expect("send stx fail"); } // Should got two broadcast broadcast_signal_rx.next().await; broadcast_signal_rx.next().await; let mut msgs = gossip.msgs.lock().drain(..).collect::>(); assert_eq!(msgs.len(), 2, "should only have two messages"); let msg = pop_msg!(msgs); assert_eq!( msg.batch_stxs.len(), 9, "last message should only have 9 stx" ); let msg = pop_msg!(msgs); assert_eq!( msg.batch_stxs.len(), 10, "first message should only have 10 stx" ); } } ================================================ FILE: core/mempool/src/context.rs ================================================ use protocol::traits::Context; const TXS_ORIGINAL_KEY: &str = "txs_original"; const NETWORK_TXS: usize = 1; pub(crate) trait TxContext { fn mark_network_origin_new_txs(&self) -> Self; fn is_network_origin_txs(&self) -> bool; } impl TxContext for Context { fn mark_network_origin_new_txs(&self) -> Self { self.with_value::(TXS_ORIGINAL_KEY, NETWORK_TXS) } fn is_network_origin_txs(&self) -> bool { self.get::(TXS_ORIGINAL_KEY) == Some(&NETWORK_TXS) } } ================================================ FILE: core/mempool/src/lib.rs ================================================ #![feature(async_closure, test)] #![allow(clippy::suspicious_else_formatting, clippy::mutable_key_type)] mod adapter; mod context; mod map; #[cfg(test)] mod tests; mod tx_cache; pub use adapter::message::{ MsgNewTxs, MsgPullTxs, MsgPushTxs, NewTxsHandler, PullTxsHandler, END_GOSSIP_NEW_TXS, RPC_PULL_TXS, RPC_RESP_PULL_TXS, RPC_RESP_PULL_TXS_SYNC, }; pub use adapter::DefaultMemPoolAdapter; pub use adapter::{DEFAULT_BROADCAST_TXS_INTERVAL, DEFAULT_BROADCAST_TXS_SIZE}; use std::collections::HashSet; use std::error::Error; use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::Arc; use std::time::Instant; use async_trait::async_trait; use derive_more::Display; use futures::future::try_join_all; use tokio::sync::RwLock; use protocol::traits::{Context, MemPool, MemPoolAdapter, MixedTxHashes}; use protocol::types::{Hash, SignedTransaction}; use protocol::{ProtocolError, ProtocolErrorKind, ProtocolResult}; use crate::context::TxContext; use crate::map::Map; use crate::tx_cache::TxCache; /// Memory pool for caching transactions. pub struct HashMemPool { /// Pool size limit. pool_size: usize, /// A system param limits the life time of an off-chain transaction. timeout_gap: AtomicU64, /// A structure for caching new transactions and responsible transactions of /// propose-sync. tx_cache: TxCache, /// A structure for caching fresh transactions in order transaction hashes. callback_cache: Arc>, /// Supply necessary functions from outer modules. adapter: Arc, /// exclusive flush_memory and insert_tx to avoid repeat txs insertion. flush_lock: RwLock<()>, } impl HashMemPool where Adapter: MemPoolAdapter, { pub async fn new( pool_size: usize, adapter: Adapter, initial_txs: Vec, ) -> Self { let mempool = HashMemPool { pool_size, timeout_gap: AtomicU64::new(0), tx_cache: TxCache::new(pool_size * 2), callback_cache: Arc::new(Map::new(pool_size)), adapter: Arc::new(adapter), flush_lock: RwLock::new(()), }; for tx in initial_txs.into_iter() { if let Err(e) = mempool.initial_insert(Context::new(), tx).await { log::warn!("[mempool]: initial insert tx failed {:?}", e); } } mempool } pub fn get_tx_cache(&self) -> &TxCache { &self.tx_cache } pub fn get_callback_cache(&self) -> &Map { &self.callback_cache } pub fn get_adapter(&self) -> &Adapter { &self.adapter } async fn show_unknown_txs(&self, tx_hashes: &[Hash]) -> Vec { let tx_hashes = self.tx_cache.show_unknown(tx_hashes).await; let mut unknown_hashes = vec![]; for tx_hash in tx_hashes.into_iter() { if !self.callback_cache.contains_key(&tx_hash).await { unknown_hashes.push(tx_hash) } } unknown_hashes } async fn initial_insert(&self, ctx: Context, tx: SignedTransaction) -> ProtocolResult<()> { let _lock = self.flush_lock.read().await; self.tx_cache.check_exist(&tx.tx_hash).await?; self.adapter .check_storage_exist(ctx.clone(), &tx.tx_hash) .await?; self.tx_cache.insert_propose_tx(tx).await } async fn insert_tx( &self, ctx: Context, tx: SignedTransaction, tx_type: TxType, ) -> ProtocolResult<()> { let _lock = self.flush_lock.read().await; let tx = Box::new(tx); let tx_hash = &tx.tx_hash; self.tx_cache.check_reach_limit(self.pool_size).await?; self.tx_cache.check_exist(tx_hash).await?; self.adapter .check_authorization(ctx.clone(), tx.clone()) .await?; self.adapter.check_transaction(ctx.clone(), &tx).await?; self.adapter .check_storage_exist(ctx.clone(), tx_hash) .await?; match tx_type { TxType::NewTx => self.tx_cache.insert_new_tx(*tx.clone()).await?, TxType::ProposeTx => self.tx_cache.insert_propose_tx(*tx.clone()).await?, } if !ctx.is_network_origin_txs() { self.adapter.broadcast_tx(ctx, *tx).await?; } else { self.adapter.report_good(ctx); } Ok(()) } async fn verify_tx_in_parallel(&self, ctx: Context, tx_ptrs: Vec) -> ProtocolResult<()> { let now = Instant::now(); let len = tx_ptrs.len(); let futs = tx_ptrs .into_iter() .map(|ptr| { let adapter = Arc::clone(&self.adapter); let ctx = ctx.clone(); tokio::spawn(async move { let boxed_stx = unsafe { Box::from_raw(ptr as *mut SignedTransaction) }; let signed_tx = *(boxed_stx.clone()); adapter.check_authorization(ctx.clone(), boxed_stx).await?; adapter.check_transaction(ctx.clone(), &signed_tx).await?; adapter .check_storage_exist(ctx.clone(), &signed_tx.tx_hash) .await }) }) .collect::>(); try_join_all(futs).await.map_err(|e| { log::error!("[mempool] verify batch txs error {:?}", e); MemPoolError::VerifyBatchTransactions })?; log::info!( "[mempool] verify txs done, size {:?} cost {:?}", len, now.elapsed() ); Ok(()) } } #[async_trait] impl MemPool for HashMemPool where Adapter: MemPoolAdapter, { async fn insert(&self, ctx: Context, tx: SignedTransaction) -> ProtocolResult<()> { self.insert_tx(ctx, tx, TxType::NewTx).await } async fn package( &self, ctx: Context, cycles_limit: u64, tx_num_limit: u64, ) -> ProtocolResult { let current_height = self.adapter.get_latest_height(ctx.clone()).await?; log::info!( "[core_mempool]: {:?} txs in map and {:?} txs in queue while package", self.tx_cache.len().await, self.tx_cache.queue_len(), ); let inst = Instant::now(); let result = self .tx_cache .package( cycles_limit, tx_num_limit, current_height, current_height + self.timeout_gap.load(Ordering::Relaxed), ) .await; match result { Ok(txs) => { common_apm::metrics::mempool::MEMPOOL_PACKAGE_SIZE_VEC_STATIC .package .observe((txs.order_tx_hashes.len()) as f64); common_apm::metrics::mempool::MEMPOOL_TIME_STATIC .package .observe(common_apm::metrics::duration_to_sec(inst.elapsed())); Ok(txs) } Err(e) => { common_apm::metrics::mempool::MEMPOOL_RESULT_COUNTER_STATIC .package .failure .inc(); Err(e) } } } async fn flush(&self, ctx: Context, tx_hashes: &[Hash]) -> ProtocolResult<()> { let _lock = self.flush_lock.write().await; let current_height = self.adapter.get_latest_height(ctx.clone()).await?; log::info!( "[core_mempool]: flush mempool with {:?} tx_hashes", tx_hashes.len(), ); self.tx_cache .flush( &tx_hashes, current_height, current_height + self.timeout_gap.load(Ordering::Relaxed), ) .await; self.callback_cache.clear().await; Ok(()) } async fn get_full_txs( &self, ctx: Context, height: Option, tx_hashes: &[Hash], ) -> ProtocolResult> { let len = tx_hashes.len(); let mut missing_hashes = vec![]; let mut full_txs = Vec::with_capacity(len); for tx_hash in tx_hashes.iter() { if let Some(tx) = self.tx_cache.get(tx_hash).await { full_txs.push(tx); } else if let Some(tx) = self.callback_cache.get(tx_hash).await { full_txs.push(tx); } else { missing_hashes.push(tx_hash.clone()); } } // for push txs when local mempool is flushed, but the remote node still fetch // full block if !missing_hashes.is_empty() { let txs = self .adapter .get_transactions_from_storage(ctx, height, &missing_hashes) .await?; let txs = txs .into_iter() .filter_map(|opt_tx| opt_tx) .collect::>(); full_txs.extend(txs); } if full_txs.len() != len { Err(MemPoolError::MisMatch { require: len, response: full_txs.len(), } .into()) } else { Ok(full_txs) } } async fn ensure_order_txs( &self, ctx: Context, height: Option, order_tx_hashes: &[Hash], ) -> ProtocolResult<()> { check_dup_order_hashes(order_tx_hashes)?; let unknown_hashes = self.show_unknown_txs(order_tx_hashes).await; if !unknown_hashes.is_empty() { let unknown_len = unknown_hashes.len(); let txs = self .adapter .pull_txs(ctx.clone(), height, unknown_hashes) .await?; // Make sure response signed_txs is the same size of request hashes. if txs.len() != unknown_len { return Err(MemPoolError::EnsureBreak { require: unknown_len, response: txs.len(), } .into()); } let (tx_ptrs, txs): (Vec<_>, Vec<_>) = txs .into_iter() .map(|tx| { let boxed = Box::new(tx); (Box::into_raw(boxed.clone()) as usize, boxed) }) .unzip(); self.verify_tx_in_parallel(ctx.clone(), tx_ptrs).await?; for signed_tx in txs.into_iter() { self.callback_cache .insert(signed_tx.tx_hash.clone(), *signed_tx) .await; } self.adapter.report_good(ctx); } Ok(()) } async fn sync_propose_txs( &self, ctx: Context, propose_tx_hashes: Vec, ) -> ProtocolResult<()> { let unknown_hashes = self.show_unknown_txs(&propose_tx_hashes).await; if !unknown_hashes.is_empty() { let txs = self .adapter .pull_txs(ctx.clone(), None, unknown_hashes) .await?; // TODO: concurrently insert for tx in txs.into_iter() { // Should not handle error here, it is normal that transactions // response here are exist in pool. let _ = self.insert_tx(ctx.clone(), tx, TxType::ProposeTx).await; } } Ok(()) } fn set_args(&self, timeout_gap: u64, cycles_limit: u64, max_tx_size: u64) { self.adapter .set_args(timeout_gap, cycles_limit, max_tx_size); self.timeout_gap.store(timeout_gap, Ordering::Relaxed); } } fn check_dup_order_hashes(order_tx_hashes: &[Hash]) -> ProtocolResult<()> { let mut dup_set = HashSet::with_capacity(order_tx_hashes.len()); for hash in order_tx_hashes.iter() { if dup_set.contains(hash) { return Err(MemPoolError::EnsureDup { hash: hash.clone() }.into()); } dup_set.insert(hash.clone()); } Ok(()) } pub enum TxType { NewTx, ProposeTx, } #[derive(Debug, Display)] pub enum MemPoolError { #[display( fmt = "Tx: {:?} exceeds size limit, now: {}, limit: {} Bytes", tx_hash, size, max_tx_size )] ExceedSizeLimit { tx_hash: Hash, max_tx_size: u64, size: u64, }, #[display( fmt = "Tx: {:?} exceeds cycle limit, tx: {}, config: {}", tx_hash, cycles_limit_tx, cycles_limit_config )] ExceedCyclesLimit { tx_hash: Hash, cycles_limit_config: u64, cycles_limit_tx: u64, }, #[display(fmt = "Tx: {:?} inserts failed", tx_hash)] Insert { tx_hash: Hash }, #[display(fmt = "Mempool reaches limit: {}", pool_size)] ReachLimit { pool_size: usize }, #[display(fmt = "Tx: {:?} exists in pool", tx_hash)] Dup { tx_hash: Hash }, #[display(fmt = "Pull txs, require: {}, response: {}", require, response)] EnsureBreak { require: usize, response: usize }, #[display( fmt = "There is duplication in order transactions. duplication tx_hash {:?}", hash )] EnsureDup { hash: Hash }, #[display(fmt = "Fetch full txs, require: {}, response: {}", require, response)] MisMatch { require: usize, response: usize }, #[display(fmt = "Tx inserts candidate_queue failed, len: {}", len)] InsertCandidate { len: usize }, #[display(fmt = "Tx: {:?} check authorization error {:?}", tx_hash, err_info)] CheckAuthorization { tx_hash: Hash, err_info: String }, #[display(fmt = "Check_hash failed, expect: {:?}, get: {:?}", expect, actual)] CheckHash { expect: Hash, actual: Hash }, #[display(fmt = "Tx: {:?} already commit", tx_hash)] CommittedTx { tx_hash: Hash }, #[display(fmt = "Tx: {:?} doesn't match our chain id", tx_hash)] WrongChain { tx_hash: Hash }, #[display(fmt = "Tx: {:?} timeout {}", tx_hash, timeout)] Timeout { tx_hash: Hash, timeout: u64 }, #[display(fmt = "Tx: {:?} invalid timeout", tx_hash)] InvalidTimeout { tx_hash: Hash }, #[display(fmt = "Batch transaction validation failed")] VerifyBatchTransactions, #[display(fmt = "Encode transaction to JSON failed")] EncodeJson, } impl Error for MemPoolError {} impl From for ProtocolError { fn from(error: MemPoolError) -> ProtocolError { ProtocolError::new(ProtocolErrorKind::Mempool, Box::new(error)) } } ================================================ FILE: core/mempool/src/map.rs ================================================ use std::collections::HashMap; use std::sync::Arc; use futures::future::try_join_all; use tokio::sync::RwLock; use protocol::types::Hash; /// The "Map" is a concurrent HashMap that uses 16 buckets to /// decentralize store transactions. /// Why use 16 buckets? We take 0 bytes of each "tx_hash" and shift it 4 bits to /// the left to get a number in the range 0~15, which corresponds to 16 buckets. pub struct Map { buckets: Vec>>, } impl Map where V: Send + Sync + Clone + 'static, { pub fn new(cache_size: usize) -> Self { let mut buckets = Vec::with_capacity(16); for _ in 0..16 { buckets.push(Arc::new(Bucket { // Allocate enough space to avoid triggering resize. store: RwLock::new(HashMap::with_capacity(cache_size)), })); } Self { buckets } } pub async fn insert(&self, hash: Hash, value: V) -> Option { let bucket = self.get_bucket(&hash); bucket.insert(hash, value).await } pub async fn contains_key(&self, hash: &Hash) -> bool { let bucket = self.get_bucket(hash); bucket.contains_key(hash).await } pub async fn get(&self, hash: &Hash) -> Option { let bucket = self.get_bucket(hash); bucket.get(hash).await } pub async fn remove(&self, hash: &Hash) { let bucket = self.get_bucket(hash); bucket.remove(hash).await } pub async fn remove_batch(&self, hashes: &[Hash]) { let mut h: HashMap> = HashMap::new(); for hash in hashes.iter() { let index = get_index(hash); h.entry(index).or_insert_with(Vec::new).push(hash.clone()); } let futs = h .into_iter() .map(|(index, hashes)| { let bucket = Arc::clone(&self.buckets[index]); tokio::spawn(async move { bucket.remove_batch(hashes).await }) }) .collect::>(); try_join_all(futs) .await .expect("[mempool]: the runtime panics."); } pub async fn len(&self) -> usize { let mut len = 0; for bucket in self.buckets.iter() { len += bucket.len().await; } len } pub async fn clear(&self) { let futs = self .buckets .iter() .map(|bucket| { let bucket = Arc::clone(bucket); tokio::spawn(async move { bucket.clear().await }) }) .collect::>(); try_join_all(futs) .await .expect("[mempool]: the runtime panics."); } fn get_bucket(&self, hash: &Hash) -> &Bucket { &self.buckets[get_index(hash)] } } fn get_index(hash: &Hash) -> usize { (hash.as_bytes()[0] >> 4) as usize } struct Bucket { store: RwLock>, } impl Bucket where V: Send + Sync + Clone, { /// Before inserting a transaction into the bucket, you must check whether /// the transaction is in the bucket first. Never use the insert function to /// check this. async fn insert(&self, hash: Hash, value: V) -> Option { let mut lock_data = self.store.write().await; if lock_data.contains_key(&hash) { Some(value) } else { lock_data.insert(hash, value) } } async fn contains_key(&self, hash: &Hash) -> bool { self.store.read().await.contains_key(hash) } async fn get(&self, hash: &Hash) -> Option { self.store.read().await.get(hash).map(Clone::clone) } async fn remove(&self, hash: &Hash) { let mut store = self.store.write().await; store.remove(hash); } async fn remove_batch(&self, hashes: Vec) { let mut store = self.store.write().await; for hash in hashes { store.remove(&hash); } } async fn len(&self) -> usize { self.store.read().await.len() } async fn clear(&self) { self.store.write().await.clear(); } } #[cfg(test)] mod tests { extern crate test; use std::collections::HashMap; use std::sync::{Arc, RwLock}; use chashmap::CHashMap; use rand::random; use test::Bencher; use protocol::{types::Hash, Bytes}; use crate::map::Map; const GEN_TX_SIZE: usize = 1000; #[bench] fn bench_map_insert(b: &mut Bencher) { let mut runtime = tokio::runtime::Runtime::new().unwrap(); let txs = mock_txs(GEN_TX_SIZE); b.iter(move || { let cache = Map::new(GEN_TX_SIZE); txs.iter().for_each(|(hash, tx)| { runtime.block_on(cache.insert(hash.clone(), tx.clone())); }); }); } #[bench] fn bench_std_map_insert(b: &mut Bencher) { let txs = mock_txs(GEN_TX_SIZE); b.iter(move || { let cache = Arc::new(RwLock::new(HashMap::new())); txs.iter().for_each(|(hash, tx)| { cache.write().unwrap().insert(hash, tx); }); }); } #[bench] fn bench_chashmap_insert(b: &mut Bencher) { let txs = mock_txs(GEN_TX_SIZE); b.iter(move || { let cache = CHashMap::new(); txs.iter().for_each(|(hash, tx)| { cache.insert(hash, tx); }); }); } fn mock_txs(size: usize) -> Vec<(Hash, Hash)> { let mut txs = Vec::with_capacity(size); for _ in 0..size { let tx: Vec = (0..10).map(|_| random::()).collect(); let tx = Hash::digest(Bytes::from(tx)); txs.push((tx.clone(), tx)); } txs } } ================================================ FILE: core/mempool/src/tests/mempool.rs ================================================ use std::sync::Arc; use test::Bencher; use protocol::types::Hash; use super::*; macro_rules! insert { (normal($pool_size: expr, $input: expr, $output: expr)) => { insert!(inner($pool_size, 1, $input, 0, $output)); }; (repeat($repeat: expr, $input: expr, $output: expr)) => { insert!(inner($input * 10, $repeat, $input, 0, $output)); }; (invalid($valid: expr, $invalid: expr, $output: expr)) => { insert!(inner($valid * 10, 1, $valid, $invalid, $output)); }; (inner($pool_size: expr, $repeat: expr, $valid: expr, $invalid: expr, $output: expr)) => { let mempool = Arc::new(new_mempool($pool_size, TIMEOUT_GAP, CYCLE_LIMIT, MAX_TX_SIZE).await); let txs = mock_txs($valid, $invalid, TIMEOUT); for _ in 0..$repeat { concurrent_insert(txs.clone(), Arc::clone(&mempool)).await; } assert_eq!(mempool.get_tx_cache().len().await, $output); }; } #[test] fn test_dup_order_hashes() { let hashes = vec![ Hash::digest(Bytes::from("test1")), Hash::digest(Bytes::from("test2")), Hash::digest(Bytes::from("test3")), Hash::digest(Bytes::from("test4")), Hash::digest(Bytes::from("test2")), ]; assert_eq!(check_dup_order_hashes(&hashes).is_err(), true); let hashes = vec![ Hash::digest(Bytes::from("test1")), Hash::digest(Bytes::from("test2")), Hash::digest(Bytes::from("test3")), Hash::digest(Bytes::from("test4")), ]; assert_eq!(check_dup_order_hashes(&hashes).is_err(), false); } #[tokio::test] async fn test_insert() { // 1. insertion under pool size. insert!(normal(100, 100, 100)); // 3. invalid insertion insert!(invalid(80, 10, 80)); } macro_rules! package { (normal($tx_num_limit: expr, $insert: expr, $expect_order: expr, $expect_propose: expr)) => { package!(inner( $tx_num_limit, TIMEOUT_GAP, TIMEOUT, $insert, $expect_order, $expect_propose )); }; (timeout($timeout_gap: expr, $timeout: expr, $insert: expr, $expect: expr)) => { package!(inner($insert, $timeout_gap, $timeout, $insert, $expect, 0)); }; (inner($tx_num_limit: expr, $timeout_gap: expr, $timeout: expr, $insert: expr, $expect_order: expr, $expect_propose: expr)) => { let mempool = &Arc::new(new_mempool($insert * 10, $timeout_gap, CYCLE_LIMIT, MAX_TX_SIZE).await); let txs = mock_txs($insert, 0, $timeout); concurrent_insert(txs.clone(), Arc::clone(mempool)).await; let mixed_tx_hashes = exec_package(Arc::clone(mempool), CYCLE_LIMIT, $tx_num_limit).await; assert_eq!(mixed_tx_hashes.order_tx_hashes.len(), $expect_order); assert_eq!(mixed_tx_hashes.propose_tx_hashes.len(), $expect_propose); }; } #[tokio::test] async fn test_package() { // 1. pool_size <= tx_num_limit package!(normal(100, 50, 50, 0)); package!(normal(100, 100, 100, 0)); // 2. tx_num_limit < pool_size <= 2 * tx_num_limit package!(normal(100, 101, 100, 1)); package!(normal(100, 200, 100, 100)); // 3. 2 * tx_num_limit < pool_size package!(normal(100, 201, 100, 100)); // 4. current_height >= tx.timeout package!(timeout(50, CURRENT_HEIGHT, 10, 0)); package!(timeout(50, CURRENT_HEIGHT - 10, 10, 0)); // 5. current_height + timeout_gap < tx.timeout package!(timeout(50, CURRENT_HEIGHT + 51, 10, 0)); package!(timeout(50, CURRENT_HEIGHT + 60, 10, 0)); // 6. tx.timeout - timeout_gap =< current_height < tx.timeout package!(timeout(50, CURRENT_HEIGHT + 50, 10, 10)); package!(timeout(50, CURRENT_HEIGHT + 1, 10, 10)); } #[tokio::test] async fn test_package_order_consistent_with_insert_order() { let mempool = &Arc::new(default_mempool().await); let txs = default_mock_txs(100); for tx in txs.iter() { exec_insert(tx.clone(), Arc::clone(mempool)).await; } let mixed_tx_hashes = exec_package(Arc::clone(mempool), CYCLE_LIMIT, TX_NUM_LIMIT).await; assert!(check_order_consistant(&mixed_tx_hashes, &txs)); // flush partial txs and test order consistency let (remove_txs, reserve_txs) = txs.split_at(50); let remove_hashes: Vec = remove_txs.iter().map(|tx| tx.tx_hash.clone()).collect(); exec_flush(remove_hashes, Arc::clone(mempool)).await; let mixed_tx_hashes = exec_package(Arc::clone(mempool), CYCLE_LIMIT, TX_NUM_LIMIT).await; assert!(check_order_consistant(&mixed_tx_hashes, reserve_txs)); } #[tokio::test] async fn test_flush() { let mempool = Arc::new(default_mempool().await); // insert txs let txs = default_mock_txs(555); concurrent_insert(txs.clone(), Arc::clone(&mempool)).await; assert_eq!(mempool.get_tx_cache().len().await, 555); let callback_cache = mempool.get_callback_cache(); for tx in txs.iter() { callback_cache.insert(tx.tx_hash.clone(), tx.clone()).await; } assert_eq!(callback_cache.len().await, 555); // flush exist txs let (remove_txs, _) = txs.split_at(123); let remove_hashes: Vec = remove_txs.iter().map(|tx| tx.tx_hash.clone()).collect(); exec_flush(remove_hashes, Arc::clone(&mempool)).await; assert_eq!(mempool.get_tx_cache().len().await, 432); assert_eq!(mempool.get_tx_cache().queue_len(), 432); exec_package(Arc::clone(&mempool), CYCLE_LIMIT, TX_NUM_LIMIT).await; assert_eq!(mempool.get_tx_cache().queue_len(), 432); assert_eq!(callback_cache.len().await, 0); // flush absent txs let txs = default_mock_txs(222); let remove_hashes: Vec = txs.iter().map(|tx| tx.tx_hash.clone()).collect(); exec_flush(remove_hashes, Arc::clone(&mempool)).await; assert_eq!(mempool.get_tx_cache().len().await, 432); assert_eq!(mempool.get_tx_cache().queue_len(), 432); } macro_rules! ensure_order_txs { ($in_pool: expr, $out_pool: expr) => { let mempool = &Arc::new(default_mempool().await); let txs = &default_mock_txs($in_pool + $out_pool); let (in_pool_txs, out_pool_txs) = txs.split_at($in_pool); concurrent_insert(in_pool_txs.to_vec(), Arc::clone(mempool)).await; concurrent_broadcast(out_pool_txs.to_vec(), Arc::clone(mempool)).await; let tx_hashes: Vec = txs.iter().map(|tx| tx.tx_hash.clone()).collect(); exec_ensure_order_txs(tx_hashes.clone(), Arc::clone(mempool)).await; assert_eq!(mempool.get_callback_cache().len().await, $out_pool); let fetch_txs = exec_get_full_txs(tx_hashes, Arc::clone(mempool)).await; assert_eq!(fetch_txs.len(), txs.len()); }; } #[tokio::test] async fn test_ensure_order_txs() { // all txs are in pool ensure_order_txs!(100, 0); // 50 txs are not in pool ensure_order_txs!(50, 50); // all txs are not in pool ensure_order_txs!(0, 100); } #[tokio::test] async fn test_sync_propose_txs() { let mempool = &Arc::new(default_mempool().await); let txs = &default_mock_txs(50); let (exist_txs, need_sync_txs) = txs.split_at(20); concurrent_insert(exist_txs.to_vec(), Arc::clone(mempool)).await; concurrent_broadcast(need_sync_txs.to_vec(), Arc::clone(mempool)).await; let tx_hashes: Vec = txs.iter().map(|tx| tx.tx_hash.clone()).collect(); exec_sync_propose_txs(tx_hashes, Arc::clone(mempool)).await; assert_eq!(mempool.get_tx_cache().len().await, 50); } #[rustfmt::skip] /// Bench in Intel(R) Core(TM) i7-4770HQ CPU @ 2.20GHz (8 x 2200): /// test tests::mempool::bench_check_sig ... bench: 2,881,140 ns/iter (+/- 907,215) /// test tests::mempool::bench_check_sig_serial_1 ... bench: 94,666 ns/iter (+/- 11,070) /// test tests::mempool::bench_check_sig_serial_10 ... bench: 966,800 ns/iter (+/- 97,227) /// test tests::mempool::bench_check_sig_serial_100 ... bench: 10,098,216 ns/iter (+/- 1,289,584) /// test tests::mempool::bench_check_sig_serial_1000 ... bench: 100,396,727 ns/iter (+/- 10,665,143) /// test tests::mempool::bench_flush ... bench: 3,504,193 ns/iter (+/- 1,096,699) /// test tests::mempool::bench_get_10000_full_txs ... bench: 14,997,762 ns/iter (+/- 2,697,725) /// test tests::mempool::bench_get_20000_full_txs ... bench: 31,858,720 ns/iter (+/- 3,822,648) /// test tests::mempool::bench_get_40000_full_txs ... bench: 65,027,639 ns/iter (+/- 3,926,768) /// test tests::mempool::bench_get_80000_full_txs ... bench: 131,066,149 ns/iter (+/- 11,457,417) /// test tests::mempool::bench_insert ... bench: 9,320,879 ns/iter (+/- 710,246) /// test tests::mempool::bench_insert_serial_1 ... bench: 4,588 ns/iter (+/- 349) /// test tests::mempool::bench_insert_serial_10 ... bench: 44,027 ns/iter (+/- 4,168) /// test tests::mempool::bench_insert_serial_100 ... bench: 432,974 ns/iter (+/- 43,058) /// test tests::mempool::bench_insert_serial_1000 ... bench: 4,449,648 ns/iter (+/- 560,818) /// test tests::mempool::bench_mock_txs ... bench: 5,890,752 ns/iter (+/- 583,029) /// test tests::mempool::bench_package ... bench: 3,684,431 ns/iter (+/- 278,575) /// test tx_cache::tests::bench_flush ... bench: 3,034,868 ns/iter (+/- 371,514) /// test tx_cache::tests::bench_flush_insert ... bench: 2,954,223 ns/iter (+/- 389,002) /// test tx_cache::tests::bench_gen_txs ... bench: 2,479,226 ns/iter (+/- 399,728) /// test tx_cache::tests::bench_insert ... bench: 2,742,422 ns/iter (+/- 641,587) /// test tx_cache::tests::bench_package ... bench: 70,563 ns/iter (+/- 16,723) /// test tx_cache::tests::bench_package_insert ... bench: 2,654,196 ns/iter (+/- 285,460) #[bench] fn bench_insert(b: &mut Bencher) { let mut runtime = tokio::runtime::Runtime::new().unwrap(); let mempool = &Arc::new(default_mempool_sync()); b.iter(|| { let txs = default_mock_txs(100); runtime.block_on(concurrent_insert(txs, Arc::clone(mempool))); }); } #[bench] fn bench_insert_serial_1(b: &mut Bencher) { let mempool = &Arc::new(default_mempool_sync()); let txs = default_mock_txs(1); b.iter(move || { futures::executor::block_on(async { for tx in txs.clone().into_iter() { let _ = mempool.insert(Context::new(), tx).await; } }); }) } #[bench] fn bench_insert_serial_10(b: &mut Bencher) { let mempool = &Arc::new(default_mempool_sync()); let txs = default_mock_txs(10); b.iter(move || { futures::executor::block_on(async { for tx in txs.clone().into_iter() { let _ = mempool.insert(Context::new(), tx).await; } }); }) } #[bench] fn bench_insert_serial_100(b: &mut Bencher) { let mempool = &Arc::new(default_mempool_sync()); let txs = default_mock_txs(100); b.iter(move || { futures::executor::block_on(async { for tx in txs.clone().into_iter() { let _ = mempool.insert(Context::new(), tx).await; } }); }) } #[bench] fn bench_insert_serial_1000(b: &mut Bencher) { let mempool = &Arc::new(default_mempool_sync()); let txs = default_mock_txs(1000); b.iter(move || { futures::executor::block_on(async { for tx in txs.clone().into_iter() { let _ = mempool.insert(Context::new(), tx).await; } }); }) } #[bench] fn bench_package(b: &mut Bencher) { let mut runtime = tokio::runtime::Runtime::new().unwrap(); let mempool = Arc::new(default_mempool_sync()); let txs = default_mock_txs(50_000); runtime.block_on(concurrent_insert(txs, Arc::clone(&mempool))); b.iter(|| { runtime.block_on(exec_package( Arc::clone(&mempool), CYCLE_LIMIT, TX_NUM_LIMIT, )); }); } #[bench] fn bench_get_10000_full_txs(b: &mut Bencher) { let mut runtime = tokio::runtime::Runtime::new().unwrap(); let mempool = Arc::new(default_mempool_sync()); let txs = default_mock_txs(10_000); let tx_hashes = txs.iter().map(|tx| tx.tx_hash.clone()).collect::>(); runtime.block_on(concurrent_insert(txs, Arc::clone(&mempool))); b.iter(|| { runtime.block_on(exec_get_full_txs(tx_hashes.clone(), Arc::clone(&mempool))); }); } #[bench] fn bench_get_20000_full_txs(b: &mut Bencher) { let mut runtime = tokio::runtime::Runtime::new().unwrap(); let mempool = Arc::new(default_mempool_sync()); let txs = default_mock_txs(20_000); let tx_hashes = txs.iter().map(|tx| tx.tx_hash.clone()).collect::>(); runtime.block_on(concurrent_insert(txs, Arc::clone(&mempool))); b.iter(|| { runtime.block_on(exec_get_full_txs(tx_hashes.clone(), Arc::clone(&mempool))); }); } #[bench] fn bench_get_40000_full_txs(b: &mut Bencher) { let mut runtime = tokio::runtime::Runtime::new().unwrap(); let mempool = Arc::new(default_mempool_sync()); let txs = default_mock_txs(40_000); let tx_hashes = txs.iter().map(|tx| tx.tx_hash.clone()).collect::>(); runtime.block_on(concurrent_insert(txs, Arc::clone(&mempool))); b.iter(|| { runtime.block_on(exec_get_full_txs(tx_hashes.clone(), Arc::clone(&mempool))); }); } #[bench] fn bench_get_80000_full_txs(b: &mut Bencher) { let mut runtime = tokio::runtime::Runtime::new().unwrap(); let mempool = Arc::new(default_mempool_sync()); let txs = default_mock_txs(80_000); let tx_hashes = txs.iter().map(|tx| tx.tx_hash.clone()).collect::>(); runtime.block_on(concurrent_insert(txs, Arc::clone(&mempool))); b.iter(|| { runtime.block_on(exec_get_full_txs(tx_hashes.clone(), Arc::clone(&mempool))); }); } #[bench] fn bench_flush(b: &mut Bencher) { let mut runtime = tokio::runtime::Runtime::new().unwrap(); let mempool = &Arc::new(default_mempool_sync()); let txs = &default_mock_txs(100); let remove_hashes: &Vec = &txs.iter().map(|tx| tx.tx_hash.clone()).collect(); b.iter(|| { runtime.block_on(concurrent_insert(txs.clone(), Arc::clone(mempool))); runtime.block_on(exec_flush(remove_hashes.clone(), Arc::clone(mempool))); runtime.block_on(exec_package(Arc::clone(mempool), CYCLE_LIMIT, TX_NUM_LIMIT)); }); } #[tokio::test] async fn bench_sign_with_spawn_list() { let adapter = Arc::new(HashMemPoolAdapter::new()); let txs = default_mock_txs(30000); let len = txs.len(); let now = std::time::Instant::now(); let futs = txs .into_iter() .map(|tx| { let adapter = Arc::clone(&adapter); tokio::spawn(async move { adapter .check_authorization(Context::new(), Box::new(tx)) .await .unwrap(); }) }) .collect::>(); futures::future::try_join_all(futs).await.unwrap(); println!( "bench_sign_with_spawn_list size {:?} cost {:?}", len, now.elapsed() ); } #[tokio::test] async fn bench_sign() { let adapter = HashMemPoolAdapter::new(); let txs = default_mock_txs(30000) .into_iter() .map(Box::new) .collect::>(); let now = std::time::Instant::now(); for tx in txs.iter() { adapter .check_authorization(Context::new(), tx.clone()) .await .unwrap(); } println!("bench_sign size {:?} cost {:?}", txs.len(), now.elapsed()); } #[bench] fn bench_mock_txs(b: &mut Bencher) { b.iter(|| { default_mock_txs(100); }); } #[bench] fn bench_check_sig(b: &mut Bencher) { let mut runtime = tokio::runtime::Runtime::new().unwrap(); let txs = &default_mock_txs(100); b.iter(|| { runtime.block_on(concurrent_check_sig(txs.clone())); }); } #[bench] fn bench_check_sig_serial_1(b: &mut Bencher) { let txs = default_mock_txs(1); b.iter(|| { for tx in txs.iter() { let _ = check_sig(&tx); } }) } #[bench] fn bench_check_sig_serial_10(b: &mut Bencher) { let txs = default_mock_txs(10); b.iter(|| { for tx in txs.iter() { let _ = check_sig(&tx); } }) } #[bench] fn bench_check_sig_serial_100(b: &mut Bencher) { let txs = default_mock_txs(100); b.iter(|| { for tx in txs.iter() { let _ = check_sig(&tx); } }) } #[bench] fn bench_check_sig_serial_1000(b: &mut Bencher) { let txs = default_mock_txs(1000); b.iter(|| { for tx in txs.iter() { let _ = check_sig(&tx); } }) } ================================================ FILE: core/mempool/src/tests/mod.rs ================================================ extern crate test; mod mempool; use std::convert::{From, TryFrom}; use std::sync::Arc; use async_trait::async_trait; use chashmap::CHashMap; use futures::executor; use rand::random; use rand::rngs::OsRng; use common_crypto::{ Crypto, PrivateKey, PublicKey, Secp256k1, Secp256k1PrivateKey, Secp256k1PublicKey, Secp256k1Signature, Signature, ToPublicKey, }; use protocol::codec::ProtocolCodec; use protocol::traits::{Context, MemPool, MemPoolAdapter, MixedTxHashes}; use protocol::types::{Address, Hash, RawTransaction, SignedTransaction, TransactionRequest}; use protocol::{Bytes, ProtocolResult}; use crate::{check_dup_order_hashes, HashMemPool, MemPoolError}; const CYCLE_LIMIT: u64 = 1_000_000; const TX_NUM_LIMIT: u64 = 10_000; const CURRENT_HEIGHT: u64 = 999; const POOL_SIZE: usize = 100_000; const MAX_TX_SIZE: u64 = 1024; // 1KB const TIMEOUT: u64 = 1000; const TIMEOUT_GAP: u64 = 100; const TX_CYCLE: u64 = 1; pub struct HashMemPoolAdapter { network_txs: CHashMap, } impl HashMemPoolAdapter { fn new() -> HashMemPoolAdapter { HashMemPoolAdapter { network_txs: CHashMap::new(), } } } #[async_trait] impl MemPoolAdapter for HashMemPoolAdapter { async fn pull_txs( &self, _ctx: Context, _height: Option, tx_hashes: Vec, ) -> ProtocolResult> { let mut vec = Vec::new(); for hash in tx_hashes { if let Some(tx) = self.network_txs.get(&hash) { vec.push(tx.clone()); } } Ok(vec) } async fn broadcast_tx(&self, _ctx: Context, tx: SignedTransaction) -> ProtocolResult<()> { self.network_txs.insert(tx.tx_hash.clone(), tx); Ok(()) } async fn check_authorization( &self, _ctx: Context, tx: Box, ) -> ProtocolResult<()> { check_hash(&tx.clone()).await?; check_sig(&tx) } async fn check_transaction( &self, _ctx: Context, _tx: &SignedTransaction, ) -> ProtocolResult<()> { Ok(()) } async fn check_storage_exist(&self, _ctx: Context, _tx_hash: &Hash) -> ProtocolResult<()> { Ok(()) } async fn get_latest_height(&self, _ctx: Context) -> ProtocolResult { Ok(CURRENT_HEIGHT) } async fn get_transactions_from_storage( &self, _ctx: Context, _height: Option, _tx_hashes: &[Hash], ) -> ProtocolResult>> { Ok(vec![]) } fn report_good(&self, _ctx: Context) {} fn set_args(&self, _timeout_gap: u64, _cycles_limit: u64, _max_tx_size: u64) {} } pub fn default_mock_txs(size: usize) -> Vec { mock_txs(size, 0, TIMEOUT) } fn mock_txs(valid_size: usize, invalid_size: usize, timeout: u64) -> Vec { let mut vec = Vec::new(); let priv_key = Secp256k1PrivateKey::generate(&mut OsRng); let pub_key = priv_key.pub_key(); for i in 0..valid_size + invalid_size { vec.push(mock_signed_tx(&priv_key, &pub_key, timeout, i < valid_size)); } vec } fn default_mempool_sync() -> HashMemPool { let mut rt = tokio::runtime::Runtime::new().unwrap(); rt.block_on(default_mempool()) } async fn default_mempool() -> HashMemPool { new_mempool(POOL_SIZE, TIMEOUT_GAP, CYCLE_LIMIT, MAX_TX_SIZE).await } async fn new_mempool( pool_size: usize, timeout_gap: u64, cycles_limit: u64, max_tx_size: u64, ) -> HashMemPool { let adapter = HashMemPoolAdapter::new(); let mempool = HashMemPool::new(pool_size, adapter, vec![]).await; mempool.set_args(timeout_gap, cycles_limit, max_tx_size); mempool } async fn check_hash(tx: &SignedTransaction) -> ProtocolResult<()> { let mut raw = tx.raw.clone(); let raw_bytes = raw.encode().await?; let tx_hash = Hash::digest(raw_bytes); if tx_hash != tx.tx_hash { return Err(MemPoolError::CheckHash { expect: tx.tx_hash.clone(), actual: tx_hash, } .into()); } Ok(()) } fn check_sig(tx: &SignedTransaction) -> ProtocolResult<()> { if Secp256k1::verify_signature(&tx.tx_hash.as_bytes(), &tx.signature, &tx.pubkey).is_err() { return Err(MemPoolError::CheckAuthorization { tx_hash: tx.tx_hash.clone(), err_info: "".to_string(), } .into()); } Ok(()) } async fn concurrent_check_sig(txs: Vec) { let futs = txs .into_iter() .map(|tx| tokio::task::spawn_blocking(move || check_sig(&tx).unwrap())) .collect::>(); futures::future::try_join_all(futs).await.unwrap(); } async fn concurrent_insert( txs: Vec, mempool: Arc>, ) { let futs = txs .into_iter() .map(|tx| { let mempool = Arc::clone(&mempool); tokio::spawn(async { exec_insert(tx, mempool).await }) }) .collect::>(); futures::future::try_join_all(futs).await.unwrap(); } async fn concurrent_broadcast( txs: Vec, mempool: Arc>, ) { let futs = txs .into_iter() .map(|tx| { let mempool = Arc::clone(&mempool); tokio::spawn(async move { mempool .get_adapter() .broadcast_tx(Context::new(), tx) .await .unwrap() }) }) .collect::>(); futures::future::try_join_all(futs).await.unwrap(); } async fn exec_insert(signed_tx: SignedTransaction, mempool: Arc>) { let _ = mempool.insert(Context::new(), signed_tx).await.is_ok(); } async fn exec_flush(remove_hashes: Vec, mempool: Arc>) { mempool.flush(Context::new(), &remove_hashes).await.unwrap() } async fn exec_package( mempool: Arc>, cycle_limit: u64, tx_num_limit: u64, ) -> MixedTxHashes { mempool .package(Context::new(), cycle_limit, tx_num_limit) .await .unwrap() } async fn exec_ensure_order_txs( require_hashes: Vec, mempool: Arc>, ) { mempool .ensure_order_txs(Context::new(), None, &require_hashes) .await .unwrap(); } async fn exec_sync_propose_txs( require_hashes: Vec, mempool: Arc>, ) { mempool .sync_propose_txs(Context::new(), require_hashes) .await .unwrap(); } async fn exec_get_full_txs( require_hashes: Vec, mempool: Arc>, ) -> Vec { mempool .get_full_txs(Context::new(), None, &require_hashes) .await .unwrap() } fn mock_signed_tx( priv_key: &Secp256k1PrivateKey, pub_key: &Secp256k1PublicKey, timeout: u64, valid: bool, ) -> SignedTransaction { let nonce = Hash::digest(Bytes::from(get_random_bytes(10))); let request = TransactionRequest { service_name: "test".to_owned(), method: "test".to_owned(), payload: "test".to_owned(), }; let mut raw = RawTransaction { chain_id: nonce.clone(), nonce, timeout, cycles_limit: TX_CYCLE, cycles_price: 1, request, sender: Address::from_pubkey_bytes(pub_key.to_bytes()).unwrap(), }; let raw_bytes = executor::block_on(async { raw.encode().await.unwrap() }); let tx_hash = Hash::digest(raw_bytes); let signature = if valid { Secp256k1::sign_message(&tx_hash.as_bytes(), &priv_key.to_bytes()).unwrap() } else { Secp256k1Signature::try_from([0u8; 64].as_ref()).unwrap() }; SignedTransaction { raw, tx_hash, pubkey: pub_key.to_bytes(), signature: signature.to_bytes(), } } fn get_random_bytes(len: usize) -> Vec { (0..len).map(|_| random::()).collect() } fn check_order_consistant(mixed_tx_hashes: &MixedTxHashes, txs: &[SignedTransaction]) -> bool { mixed_tx_hashes .order_tx_hashes .iter() .enumerate() .any(|(i, hash)| hash == &txs.get(i).unwrap().tx_hash) } ================================================ FILE: core/mempool/src/tx_cache.rs ================================================ use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use std::sync::Arc; use crossbeam_queue::ArrayQueue; use protocol::traits::MixedTxHashes; use protocol::types::{Hash, SignedTransaction}; use protocol::ProtocolResult; use crate::map::Map; use crate::MemPoolError; /// Wrap `SignedTransaction` with two marks for mempool management. /// /// Each new transaction inserting into mempool will set `removed` false, /// while transaction from propose-transaction-sync will additionally set /// `proposed` true. When shared transaction in `TxCache` removed from map, /// it will set `removed` true. The `removed` and `proposed` marks will remind /// queue in `TxCache` to appropriately process elements while packaging /// transaction hashes for consensus. pub struct TxWrapper { /// Content. tx: SignedTransaction, /// While map removes a `shared_tx` during flush, it will mark `removed` /// true. Afterwards, queue removes the transaction which marks /// `removed` true during package. removed: AtomicBool, /// The response transactions in propose-syncing will insert into `TxCache` /// marking `proposed` true. /// While collecting propose_tx_hashes during package, /// it will skips transactions which marks 'proposed` true. proposed: AtomicBool, } impl TxWrapper { #[allow(dead_code)] pub(crate) fn new(tx: SignedTransaction) -> Self { TxWrapper { tx, removed: AtomicBool::new(false), proposed: AtomicBool::new(false), } } pub(crate) fn propose(tx: SignedTransaction) -> Self { TxWrapper { tx, removed: AtomicBool::new(false), proposed: AtomicBool::new(true), } } pub(crate) fn set_removed(&self) { self.removed.store(true, Ordering::SeqCst); } #[inline] pub(crate) fn is_removed(&self) -> bool { self.removed.load(Ordering::SeqCst) } #[inline] fn is_proposed(&self) -> bool { self.proposed.load(Ordering::SeqCst) } #[inline] fn is_timeout(&self, current_height: u64, timeout: u64) -> bool { let tx_timeout = self.tx.raw.timeout; tx_timeout <= current_height || tx_timeout > timeout } } /// Share `TxWrapper` for collections in `TxCache`. pub type SharedTx = Arc; /// An enum stands for package stage #[derive(PartialEq, Eq)] enum Stage { /// Packing order_tx_hashes OrderTxs, /// Packing propose_tx_hashes ProposeTxs, /// Packing finished. Only insert transactions into temp queue. Finished, } impl Stage { fn next(&self) -> Self { match self { Stage::OrderTxs => Stage::ProposeTxs, Stage::ProposeTxs => Stage::Finished, Stage::Finished => panic!("There is no next stage after finished stage!"), } } } /// Queue role. Incumbent is for insertion and package. struct QueueRole { incumbent: Arc>, candidate: Arc>, } /// This is the core structure for caching new transactions and /// feeding transactions in batch for consensus. /// /// The queues are served for packaging a batch of transactions in insertion /// order. The `map` is served for randomly search and removal. /// All these collections should support concurrent insertion. /// We set two queues, `queue_0` and `queue_1`, to make package concurrent with /// insertion. When `queue_0` served for insertion and package begins, /// transactions pop from `queue_0` and push into `queue_1` while new /// transactions still insert into `queue_0` concurrently. while `queue_0` pop /// out, `queue_1` switch to insertion queue. pub struct TxCache { /// One queue. queue_0: Arc>, /// Another queue. queue_1: Arc>, /// A map for randomly search and removal. map: Map, /// This is used to pick a queue for insertion, /// If true selects `queue_0`, else `queue_1`. is_zero: AtomicBool, /// This is an atomic state to solve concurrent insertion problem during /// package. While switching insertion queues, some transactions may /// still insert into the old queue. We use this state to make sure /// switch insertions *happen-before* old queue re-pop. concurrent_count: AtomicUsize, } impl TxCache { pub fn new(pool_size: usize) -> Self { TxCache { queue_0: Arc::new(ArrayQueue::new(pool_size * 2)), queue_1: Arc::new(ArrayQueue::new(pool_size * 2)), map: Map::new(pool_size * 2), is_zero: AtomicBool::new(true), concurrent_count: AtomicUsize::new(0), } } pub async fn len(&self) -> usize { self.map.len().await } pub async fn insert_new_tx(&self, signed_tx: SignedTransaction) -> ProtocolResult<()> { let tx_hash = signed_tx.tx_hash.clone(); let tx_wrapper = TxWrapper::new(signed_tx); let shared_tx = Arc::new(tx_wrapper); self.insert(tx_hash, shared_tx).await } pub async fn insert_propose_tx(&self, signed_tx: SignedTransaction) -> ProtocolResult<()> { let tx_hash = signed_tx.tx_hash.clone(); let tx_wrapper = TxWrapper::propose(signed_tx); let shared_tx = Arc::new(tx_wrapper); self.insert(tx_hash, shared_tx).await } pub async fn show_unknown(&self, tx_hashes: &[Hash]) -> Vec { let mut unknow_hashes = vec![]; for tx_hash in tx_hashes.iter() { if !self.contain(&tx_hash).await { unknow_hashes.push(tx_hash.clone()); } } unknow_hashes } pub async fn flush(&self, tx_hashes: &[Hash], current_height: u64, timeout: u64) { for tx_hash in tx_hashes { let opt = self.map.get(tx_hash).await; if let Some(shared_tx) = opt { shared_tx.set_removed(); } } // Dividing set removed and remove into two loops is to avoid lock competition. self.map.remove_batch(tx_hashes).await; self.flush_incumbent_queue(current_height, timeout).await; } pub async fn package( &self, _cycles_limit: u64, tx_num_limit: u64, current_height: u64, timeout: u64, ) -> ProtocolResult { let queue_role = self.get_queue_role(); let mut order_tx_hashes = Vec::new(); let mut propose_tx_hashes = Vec::new(); let mut timeout_tx_hashes = Vec::new(); let mut tx_count: u64 = 0; let mut stage = Stage::OrderTxs; loop { if let Ok(shared_tx) = queue_role.incumbent.pop() { let tx_hash = &shared_tx.tx.tx_hash; if shared_tx.is_removed() { continue; } if shared_tx.is_timeout(current_height, timeout) { timeout_tx_hashes.push(tx_hash.clone()); continue; } // After previous filter, tx are valid and should cache in temp_queue. if queue_role .candidate .push(Arc::::clone(&shared_tx)) .is_err() { log::error!( "[core_mempool]: candidate queue is full while package, delete {:?}", &shared_tx.tx.tx_hash ); self.map.remove(&shared_tx.tx.tx_hash).await; } if stage == Stage::Finished || (stage == Stage::ProposeTxs && shared_tx.is_proposed()) { continue; } tx_count += 1; if tx_count > tx_num_limit { stage = stage.next(); tx_count = 1; } match stage { Stage::OrderTxs => order_tx_hashes.push(tx_hash.clone()), Stage::ProposeTxs => propose_tx_hashes.push(tx_hash.clone()), Stage::Finished => {} } } else { // Switch queue_roles let new_role = self.switch_queue_role(); // Transactions may insert into previous incumbent queue during role switch. self.process_omission_txs(new_role).await; break; } } // Remove timeout tx in map self.map.remove_batch(&timeout_tx_hashes).await; Ok(MixedTxHashes { order_tx_hashes, propose_tx_hashes, }) } pub async fn check_exist(&self, tx_hash: &Hash) -> ProtocolResult<()> { if self.contain(tx_hash).await { return Err(MemPoolError::Dup { tx_hash: tx_hash.clone(), } .into()); } Ok(()) } pub async fn check_reach_limit(&self, pool_size: usize) -> ProtocolResult<()> { if self.len().await >= pool_size { return Err(MemPoolError::ReachLimit { pool_size }.into()); } Ok(()) } pub async fn contain(&self, tx_hash: &Hash) -> bool { self.map.contains_key(tx_hash).await } pub async fn get(&self, tx_hash: &Hash) -> Option { self.map .get(tx_hash) .await .map(|shared_tx| shared_tx.tx.clone()) } pub fn queue_len(&self) -> usize { if self.is_zero.load(Ordering::Relaxed) { self.queue_0.len() } else { self.queue_1.len() } } async fn insert(&self, tx_hash: Hash, shared_tx: SharedTx) -> ProtocolResult<()> { // If multiple transactions exactly the same insert concurrently, // this will prevent them to be both insert successfully into queue. if self .map .insert(tx_hash.clone(), Arc::::clone(&shared_tx)) .await .is_some() { return Err(MemPoolError::Dup { tx_hash }.into()); } self.concurrent_count.fetch_add(1, Ordering::SeqCst); let rst = self .get_queue_role() .incumbent .push(Arc::::clone(&shared_tx)); self.concurrent_count.fetch_sub(1, Ordering::SeqCst); // If queue inserts into queue failed, removes from map. if rst.is_err() { // If tx_hash exists, it will panic. So repeat check must do before insertion. self.map.remove(&tx_hash).await; Err(MemPoolError::Insert { tx_hash }.into()) } else { Ok(()) } } // Process transactions insert into previous incumbent queue during role switch. async fn process_omission_txs(&self, queue_role: QueueRole) { 'outer: loop { // When there are no transaction insertions processing, // pop off previous incumbent queue and push them into current incumbent queue. if self.concurrent_count.load(Ordering::SeqCst) == 0 { while let Ok(shared_tx) = queue_role.candidate.pop() { if queue_role .incumbent .push(Arc::::clone(&shared_tx)) .is_err() { log::error!( "[core_mempool]: incumbent queue is full while process_omission_txs, delete {:?}", &shared_tx.tx.tx_hash ); self.map.remove(&shared_tx.tx.tx_hash).await; } } break 'outer; } } } async fn flush_incumbent_queue(&self, current_height: u64, timeout: u64) { let queue_role = self.get_queue_role(); let mut timeout_tx_hashes = Vec::new(); loop { if let Ok(shared_tx) = queue_role.incumbent.pop() { let tx_hash = &shared_tx.tx.tx_hash; if shared_tx.is_removed() { continue; } if shared_tx.is_timeout(current_height, timeout) { timeout_tx_hashes.push(tx_hash.clone()); continue; } // After previous filter, tx are valid and should cache in temp_queue. if queue_role .candidate .push(Arc::::clone(&shared_tx)) .is_err() { log::error!( "[core_mempool]: candidate queue is full while flush_incumbent_queue, delete {:?}", &shared_tx.tx.tx_hash ); self.map.remove(&shared_tx.tx.tx_hash).await; } } else { // Switch queue_roles let new_role = self.switch_queue_role(); // Transactions may insert into previous incumbent queue during role switch. self.process_omission_txs(new_role).await; break; } } // Remove timeout tx in map self.map.remove_batch(&timeout_tx_hashes).await; } fn switch_queue_role(&self) -> QueueRole { self.is_zero.fetch_xor(true, Ordering::SeqCst); self.get_queue_role() } fn get_queue_role(&self) -> QueueRole { let (incumbent, candidate) = if self.is_zero.load(Ordering::SeqCst) { (&self.queue_0, &self.queue_1) } else { (&self.queue_1, &self.queue_0) }; QueueRole { incumbent: Arc::clone(incumbent), candidate: Arc::clone(candidate), } } } #[cfg(test)] mod tests { extern crate test; use std::sync::Arc; use rand::random; use test::Bencher; use protocol::types::{ Address, Bytes, Hash, RawTransaction, SignedTransaction, TransactionRequest, }; use crate::map::Map; use crate::tx_cache::{TxCache, TxWrapper}; const POOL_SIZE: usize = 1000; const BYTES_LEN: usize = 10; const TX_NUM: usize = 1000; const TX_CYCLE: u64 = 1; const TX_NUM_LIMIT: u64 = 20000; const CYCLE_LIMIT: u64 = 500; const CURRENT_H: u64 = 100; const TIMEOUT: u64 = 150; fn gen_bytes() -> Vec { (0..BYTES_LEN).map(|_| random::()).collect() } fn gen_signed_txs(n: usize) -> Vec { let mut vec = Vec::new(); for _ in 0..n { vec.push(mock_signed_tx(gen_bytes())); } vec } fn mock_signed_tx(bytes: Vec) -> SignedTransaction { let rand_hash = Hash::digest(Bytes::from(bytes)); let chain_id = rand_hash.clone(); let nonce = rand_hash.clone(); let tx_hash = rand_hash; let pubkey = { let hex_str = "03380295981e77dcd0a3f50c1d58867e590f2837f03daf639d683ec5e995c02984"; Bytes::from(hex::decode(hex_str).unwrap()) }; let fake_sig = Hash::digest(pubkey.clone()).as_bytes(); let request = TransactionRequest { service_name: "test".to_owned(), method: "test".to_owned(), payload: "test".to_owned(), }; let raw = RawTransaction { chain_id, nonce, timeout: TIMEOUT, cycles_limit: TX_CYCLE, cycles_price: 1, request, sender: Address::from_pubkey_bytes(pubkey.clone()).unwrap(), }; SignedTransaction { raw, tx_hash, pubkey, signature: fake_sig, } } async fn concurrent_insert(txs: Vec, tx_cache: Arc) { let futs = txs .into_iter() .map(|tx| { let tx_cache = Arc::clone(&tx_cache); tokio::spawn(async move { tx_cache.insert_new_tx(tx.clone()).await }) }) .collect::>(); futures::future::try_join_all(futs).await.unwrap(); } async fn concurrent_flush(tx_cache: Arc, tx_hashes: Vec, height: u64) { tokio::spawn(async move { tx_cache.flush(&tx_hashes, height, height + TIMEOUT).await; }) .await .unwrap(); } async fn concurrent_package(tx_cache: Arc) { tokio::spawn(async move { tx_cache .package(CYCLE_LIMIT, TX_NUM_LIMIT, CURRENT_H, TIMEOUT) .await .unwrap(); }) .await .unwrap(); } #[tokio::test] async fn test_concurrent_insert() { let txs = gen_signed_txs(POOL_SIZE / 2); let txs: Vec = txs .iter() .flat_map(|tx| { (0..5) .map(|_| tx.clone()) .collect::>() }) .collect(); let tx_cache = Arc::new(TxCache::new(POOL_SIZE)); concurrent_insert(txs, Arc::clone(&tx_cache)).await; assert_eq!(tx_cache.len().await, POOL_SIZE / 2); } #[tokio::test] async fn test_insert_overlap() { let txs = gen_signed_txs(1); let tx = txs.get(0).unwrap(); let map = Map::new(POOL_SIZE); let tx_wrapper_0 = TxWrapper::new(tx.clone()); tx_wrapper_0.set_removed(); map.insert(tx.tx_hash.clone(), Arc::new(tx_wrapper_0)).await; let shared_tx_0 = map.get(&tx.tx_hash).await.unwrap(); assert!(shared_tx_0.is_removed()); let tx_wrapper_1 = TxWrapper::new(tx.clone()); map.insert(tx.tx_hash.clone(), Arc::new(tx_wrapper_1)).await; let shared_tx_1 = map.get(&tx.tx_hash).await.unwrap(); assert!(shared_tx_1.is_removed()); } #[bench] fn bench_gen_txs(b: &mut Bencher) { b.iter(|| { gen_signed_txs(TX_NUM); }); } #[bench] fn bench_insert(b: &mut Bencher) { let mut runtime = tokio::runtime::Runtime::new().unwrap(); let txs = gen_signed_txs(TX_NUM); b.iter(|| { let tx_cache = Arc::new(TxCache::new(POOL_SIZE)); runtime.block_on(concurrent_insert(txs.clone(), Arc::clone(&tx_cache))); assert_eq!(runtime.block_on(tx_cache.len()), TX_NUM); assert_eq!(tx_cache.queue_len(), TX_NUM); }); } #[bench] fn bench_flush(b: &mut Bencher) { let mut runtime = tokio::runtime::Runtime::new().unwrap(); let txs = gen_signed_txs(TX_NUM); let tx_hashes: Vec = txs .iter() .map(|signed_tx| signed_tx.tx_hash.clone()) .collect(); b.iter(|| { let tx_cache = Arc::new(TxCache::new(POOL_SIZE)); runtime.block_on(concurrent_insert(txs.clone(), Arc::clone(&tx_cache))); assert_eq!(runtime.block_on(tx_cache.len()), TX_NUM); assert_eq!(tx_cache.queue_len(), TX_NUM); runtime.block_on(tx_cache.flush(tx_hashes.as_slice(), CURRENT_H, CURRENT_H + TIMEOUT)); assert_eq!(runtime.block_on(tx_cache.len()), 0); assert_eq!(tx_cache.queue_len(), 0); }); } #[bench] fn bench_flush_insert(b: &mut Bencher) { let mut runtime = tokio::runtime::Runtime::new().unwrap(); let txs_base = gen_signed_txs(TX_NUM / 2); let txs_insert = gen_signed_txs(TX_NUM / 2); let txs_flush: Vec = txs_base .iter() .map(|signed_tx| signed_tx.tx_hash.clone()) .collect(); b.iter(|| { let tx_cache = Arc::new(TxCache::new(POOL_SIZE)); runtime.block_on(concurrent_insert(txs_base.clone(), Arc::clone(&tx_cache))); runtime.block_on(concurrent_flush( Arc::clone(&tx_cache), txs_flush.clone(), CURRENT_H, )); runtime.block_on(concurrent_insert(txs_insert.clone(), Arc::clone(&tx_cache))); assert_eq!(runtime.block_on(tx_cache.len()), TX_NUM / 2); assert_eq!(tx_cache.queue_len(), TX_NUM / 2); }); } #[bench] fn bench_package(b: &mut Bencher) { let mut runtime = tokio::runtime::Runtime::new().unwrap(); let txs = gen_signed_txs(TX_NUM); let tx_cache = Arc::new(TxCache::new(POOL_SIZE)); runtime.block_on(concurrent_insert(txs, Arc::clone(&tx_cache))); b.iter(|| { let mixed_tx_hashes = runtime .block_on(tx_cache.package(TX_NUM_LIMIT, CYCLE_LIMIT, CURRENT_H, TIMEOUT)) .unwrap(); assert_eq!( mixed_tx_hashes.order_tx_hashes.len(), (CYCLE_LIMIT / TX_CYCLE) as usize ); }); } #[bench] fn bench_package_insert(b: &mut Bencher) { let mut runtime = tokio::runtime::Runtime::new().unwrap(); let txs = gen_signed_txs(TX_NUM / 2); let txs_insert = gen_signed_txs(TX_NUM / 2); b.iter(|| { let tx_cache = Arc::new(TxCache::new(POOL_SIZE)); runtime.block_on(concurrent_insert(txs.clone(), Arc::clone(&tx_cache))); runtime.block_on(concurrent_package(Arc::clone(&tx_cache))); runtime.block_on(concurrent_insert(txs_insert.clone(), Arc::clone(&tx_cache))); assert_eq!(runtime.block_on(tx_cache.len()), TX_NUM); assert_eq!(tx_cache.queue_len(), TX_NUM); }); } } ================================================ FILE: core/network/Cargo.toml ================================================ [package] name = "core-network" version = "0.2.1" authors = ["Muta Dev "] edition = "2018" repository = "https://github.com/nervosnetwork/muta" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] protocol = { path = "../../protocol", package = "muta-protocol" } common-apm = { path = "../../common/apm" } async-trait = "0.1" bincode = "1.2" derive_more = "0.99" futures-timer = "2.0" futures= { version = "0.3", features = [ "compat" ] } hex = "0.4" log = "0.4" parking_lot = "0.11" prost = "0.6" bytes = "0.5" rand = "0.7" serde = "1.0" serde_derive = "1.0" snap = "0.2" tentacle = { git = "http://github.com/zeroqn/p2p", rev = "b2682d2", features = ["molc"]} tokio = { version = "0.2", features = ["macros", "rt-core"]} tokio-util = { version = "0.2", features = ["codec"] } hostname = "0.3" lazy_static = "1.4" bs58 = "0.3" arc-swap = "0.4" [dev-dependencies] env_logger = "0.6" quickcheck = "0.9" quickcheck_macros = "0.8" tokio = { version = "0.2", features = ["macros", "rt-core"]} [features] default = [] global_ip_only = [] diagnostic = [] tentacle_metrics = ["tentacle/metrics"] [[test]] name = "broadcast" path = "tests/gossip_test.rs" ================================================ FILE: core/network/examples/buycopy.rs ================================================ use std::{ net::{IpAddr, Ipv4Addr, SocketAddr}, thread, time::Duration, }; use async_trait::async_trait; use bytes::Bytes; use log::{info, warn}; use serde_derive::{Deserialize, Serialize}; use tentacle::secio::SecioKeyPair; use core_network::{NetworkConfig, NetworkService}; use protocol::{ traits::{Context, Gossip, MessageHandler, Priority, Rpc, TrustFeedback}, types::Hash, ProtocolError, }; const IP_ADDR: IpAddr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)); const RELEASE_CHANNEL: &str = "/gossip/cprd/cyperpunk7702_released"; const SHOP_CASH_CHANNEL: &str = "/rpc_call/v3/steam"; const SHOP_CHANNEL: &str = "/rpc_resp/v3/steam"; // Gossip message #[derive(Debug, Serialize, Deserialize)] struct Cyber7702Released { pub shop: String, #[serde(with = "core_network::serde")] pub hash: Hash, } // Gossip message handler struct TakeMyMoney { pub shop: N, } #[async_trait] impl MessageHandler for TakeMyMoney { type Message = Cyber7702Released; async fn process(&self, ctx: Context, msg: Self::Message) -> TrustFeedback { let sell = async move { println!("Rush to {}. Shut up, take my money", msg.shop); let copy: ACopy = self .shop .call(ctx, SHOP_CASH_CHANNEL, BuyACopy, Priority::High) .await?; println!("Got my copy {:?}", copy); Ok::<(), ProtocolError>(()) }; match sell.await { Ok(_) => TrustFeedback::Good, Err(e) => { warn!("sell {}", e); TrustFeedback::Bad("sell failed".to_owned()) } } } } // Rpc message #[derive(Debug, Serialize, Deserialize)] struct BuyACopy; #[derive(Debug, Serialize, Deserialize)] struct ACopy { #[serde(with = "core_network::serde")] pub hash: Hash, #[serde(with = "core_network::serde_multi")] pub gifs: Vec, } // Rpc call message handler struct Checkout { dealer: N, } #[async_trait] impl MessageHandler for Checkout { type Message = BuyACopy; async fn process(&self, ctx: Context, _msg: Self::Message) -> TrustFeedback { let acopy = ACopy { hash: Hash::digest(Bytes::new()), gifs: vec![ Hash::digest("jacket"), Hash::digest("map"), Hash::digest("book"), ], }; match self .dealer .response(ctx, SHOP_CHANNEL, Ok(acopy), Priority::High) .await { Ok(_) => TrustFeedback::Good, Err(e) => TrustFeedback::Bad(format!("send copy {}", e.to_string())), } } } #[tokio::main] pub async fn main() { env_logger::init(); let bt_seckey_bytes = "8".repeat(32); let bt_seckey = hex::encode(&bt_seckey_bytes); let bt_keypair = SecioKeyPair::secp256k1_raw_key(bt_seckey_bytes).expect("keypair"); let bt_pubkey = hex::encode(bt_keypair.public_key().inner()); let bt_addr = SocketAddr::new(IP_ADDR, 1337); if std::env::args().nth(1) == Some("server".to_string()) { info!("Starting server"); let bt_conf = NetworkConfig::new() .secio_keypair(bt_seckey) .expect("set keypair"); let mut bootstrap = NetworkService::new(bt_conf); let handle = bootstrap.handle(); bootstrap.listen(bt_addr).await.unwrap(); let check_out = Checkout { dealer: handle.clone(), }; bootstrap .register_endpoint_handler(SHOP_CASH_CHANNEL, check_out) .unwrap(); tokio::spawn(bootstrap); thread::sleep(Duration::from_secs(10)); let released = Cyber7702Released { shop: "steam".to_owned(), hash: Hash::digest(Bytes::from("buy".repeat(3))), }; let ctx = Context::default(); handle .broadcast(ctx.clone(), RELEASE_CHANNEL, released, Priority::High) .await .unwrap(); thread::sleep(Duration::from_secs(10)); } else { info!("Starting client"); let port = std::env::args().nth(1).unwrap().parse::().unwrap(); let peer_addr = SocketAddr::new(IP_ADDR, port); let peer_conf = NetworkConfig::new() .bootstraps(vec![(bt_pubkey, bt_addr.to_string())]) .unwrap(); let mut peer = NetworkService::new(peer_conf); let handle = peer.handle(); peer.listen(peer_addr).await.unwrap(); let take_my_money = TakeMyMoney { shop: handle.clone(), }; peer.register_endpoint_handler(RELEASE_CHANNEL, take_my_money) .unwrap(); peer.register_rpc_response::(SHOP_CHANNEL).unwrap(); peer.await; } } ================================================ FILE: core/network/src/common.rs ================================================ use crate::traits::MultiaddrExt; use derive_more::Display; use futures::{pin_mut, task::AtomicWaker}; use futures_timer::Delay; use serde_derive::{Deserialize, Serialize}; use tentacle::{ multiaddr::{Multiaddr, Protocol}, secio::PeerId, }; use std::{ borrow::Cow, future::Future, net::{IpAddr, SocketAddr, ToSocketAddrs}, ops::Add, pin::Pin, sync::Arc, task::{Context, Poll}, time::{Duration, Instant}, vec::IntoIter, }; #[macro_export] macro_rules! loop_ready { ($poll:expr) => { match $poll { Poll::Pending => break, Poll::Ready(v) => v, } }; } #[macro_export] macro_rules! service_ready { ($service:expr, $poll:expr) => { match crate::loop_ready!($poll) { Some(v) => v, None => { log::info!("network: {} exit", $service); return Poll::Ready(()); } } }; } pub fn socket_to_multi_addr(socket_addr: SocketAddr) -> Multiaddr { let mut multi_addr = Multiaddr::from(socket_addr.ip()); multi_addr.push(Protocol::TCP(socket_addr.port())); multi_addr } pub fn multiaddr_to_socket(multiaddr: &Multiaddr) -> Option { let mut extract_ip = None; let mut extract_port = 0u16; for proto in multiaddr.iter() { match proto { Protocol::IP4(ip) => extract_ip = Some(IpAddr::V4(ip)), Protocol::IP6(ip) => extract_ip = Some(IpAddr::V6(ip)), Protocol::TCP(port) => extract_port = port, _ => (), } } if let Some(ip) = extract_ip { Some(SocketAddr::new(ip, extract_port)) } else { None } } pub fn resolve_if_unspecified(multiaddr: &Multiaddr) -> Result { let match_socket = |iter: IntoIter, be_v4: bool| -> Option { for socket in iter { match socket { SocketAddr::V4(_) if be_v4 => { return Some(socket); } SocketAddr::V6(_) if !be_v4 => { return Some(socket); } _ => (), } } None }; let sock = multiaddr_to_socket(&multiaddr).ok_or(())?; if !sock.ip().is_unspecified() { return Err(()); } let peer_id = multiaddr.id_bytes().clone().ok_or(())?; let hs = hostname::get().map_err(|_| ())?; let hostname_port = hs .to_str() .map(|s| format!("{}:{}", s, sock.port())) .ok_or(())?; let socks_iter = hostname_port.to_socket_addrs().map_err(|_| ())?; let socket = match_socket(socks_iter, sock.ip().is_ipv4()).ok_or_else(|| ())?; let mut resolved_addr = socket_to_multi_addr(socket); resolved_addr.push(Protocol::P2P(peer_id)); Ok(resolved_addr) } impl MultiaddrExt for Multiaddr { fn id_bytes(&self) -> Option> { for proto in self.iter() { if let Protocol::P2P(bytes) = proto { return Some(bytes); } } None } fn has_id(&self) -> bool { self.iter().any(|proto| matches!(proto, Protocol::P2P(_))) } fn push_id(&mut self, peer_id: PeerId) { self.push(Protocol::P2P(Cow::Owned(peer_id.as_bytes().to_vec()))) } } pub struct HeartBeat { waker: Arc, interval: Duration, delay: Delay, } impl HeartBeat { pub fn new(waker: Arc, interval: Duration) -> Self { let delay = Delay::new(interval); HeartBeat { waker, interval, delay, } } } // # Note // // Delay returns an error after default global timer gone away. impl Future for HeartBeat { type Output = ::Output; fn poll(mut self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll { let ecg = &mut self.as_mut(); loop { let interval = ecg.interval; let delay = &mut ecg.delay; pin_mut!(delay); crate::loop_ready!(delay.poll(ctx)); let next_time = Instant::now().add(interval); ecg.delay.reset(next_time); ecg.waker.wake(); } Poll::Pending } } #[derive(Debug, Display, PartialEq, Eq, Serialize, Deserialize, Clone, Hash)] #[display(fmt = "{}:{}", host, port)] pub struct ConnectedAddr { pub host: String, pub port: u16, } impl From<&Multiaddr> for ConnectedAddr { fn from(multiaddr: &Multiaddr) -> Self { use tentacle::multiaddr::Protocol::{DNS4, DNS6, IP4, IP6, TCP, TLS}; let mut host = None; let mut port = 0u16; for comp in multiaddr.iter() { match comp { IP4(ip_addr) => host = Some(ip_addr.to_string()), IP6(ip_addr) => host = Some(ip_addr.to_string()), DNS4(dns_addr) | DNS6(dns_addr) => host = Some(dns_addr.to_string()), TLS(tls_addr) => host = Some(tls_addr.to_string()), TCP(p) => port = p, _ => (), } } let host = host.unwrap_or_else(|| multiaddr.to_string()); ConnectedAddr { host, port } } } ================================================ FILE: core/network/src/compression/mod.rs ================================================ mod snappy; pub use snappy::Snappy; ================================================ FILE: core/network/src/compression/snappy.rs ================================================ use std::io; use protocol::Bytes; use crate::{error::NetworkError, traits::Compression}; #[derive(Clone)] pub struct Snappy; impl Compression for Snappy { fn compress(&self, bytes: Bytes) -> Result { let mut vec_bytes = Vec::with_capacity(bytes.len()); { let mut writer = snap::Writer::new(&mut vec_bytes); let n = io::copy(&mut bytes.as_ref(), &mut writer)?; if n as usize != bytes.len() { let kind = io::ErrorKind::Other; return Err(io::Error::new(kind, "snappy: fail to compress").into()); } } Ok(Bytes::from(vec_bytes)) } fn decompress(&self, bytes: Bytes) -> Result { let mut vec_bytes = vec![]; let mut reader = snap::Reader::new(bytes.as_ref()); let _ = io::copy(&mut reader, &mut vec_bytes)? as usize; Ok(Bytes::from(vec_bytes)) } } ================================================ FILE: core/network/src/config.rs ================================================ use std::{ default::Default, net::{IpAddr, Ipv4Addr, SocketAddr}, path::{Path, PathBuf}, str::FromStr, sync::Arc, time::Duration, }; use log::error; use protocol::ProtocolResult; use tentacle::{ multiaddr::{multiaddr, Multiaddr, Protocol}, secio::{PeerId, SecioKeyPair}, }; use crate::{ common::socket_to_multi_addr, connection::ConnectionConfig, error::NetworkError, peer_manager::{ArcPeer, PeerManagerConfig, TrustMetricConfig}, selfcheck::SelfCheckConfig, traits::MultiaddrExt, PeerIdExt, }; // TODO: 0.0.0.0 expose? 127.0.0.1 doesn't work because of tentacle-discovery. // Default listen address: 0.0.0.0:2337 pub const DEFAULT_LISTEN_IP_ADDR: IpAddr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)); pub const DEFAULT_LISTEN_PORT: u16 = 2337; // Default max connections pub const DEFAULT_MAX_CONNECTIONS: usize = 40; // Default connection stream frame window lenght pub const DEFAULT_MAX_FRAME_LENGTH: usize = 4 * 1024 * 1024; // 4 Mib pub const DEFAULT_BUFFER_SIZE: usize = 24 * 1024 * 1024; // same as tentacle // Default max wait streams for accept pub const DEFAULT_MAX_WAIT_STREAMS: usize = 256; // Default write timeout pub const DEFAULT_WRITE_TIMEOUT: u64 = 10; // seconds pub const DEFAULT_SAME_IP_CONN_LIMIT: usize = 1; pub const DEFAULT_INBOUND_CONN_LIMIT: usize = 20; // Default peer trust metric pub const DEFAULT_PEER_TRUST_INTERVAL_DURATION: Duration = Duration::from_secs(60); pub const DEFAULT_PEER_TRUST_MAX_HISTORY_DURATION: Duration = Duration::from_secs(24 * 60 * 60 * 10); // 10 day const DEFAULT_PEER_FATAL_BAN_DURATION: Duration = Duration::from_secs(60 * 60); // 1 hour const DEFAULT_PEER_SOFT_BAN_DURATION: Duration = Duration::from_secs(60 * 10); // 10 minutes // Default peer data persistent path pub const DEFAULT_PEER_FILE_NAME: &str = "peers"; pub const DEFAULT_PEER_FILE_EXT: &str = "dat"; pub const DEFAULT_PEER_DAT_FILE: &str = "./peers.dat"; pub const DEFAULT_PING_INTERVAL: u64 = 15; pub const DEFAULT_PING_TIMEOUT: u64 = 30; pub const DEFAULT_DISCOVERY_SYNC_INTERVAL: u64 = 60 * 60; // 1 hour pub const DEFAULT_PEER_MANAGER_HEART_BEAT_INTERVAL: u64 = 30; pub const DEFAULT_SELF_HEART_BEAT_INTERVAL: u64 = 35; pub const DEFAULT_RPC_TIMEOUT: u64 = 10; // Selfcheck pub const DEFAULT_SELF_CHECK_INTERVAL: u64 = 30; pub type PrivateKeyHexStr = String; pub type PeerAddrStr = String; pub type PeerIdBase58Str = String; // Example: // example.com:2077 struct DnsAddr { host: String, port: u16, } impl FromStr for DnsAddr { type Err = NetworkError; fn from_str(s: &str) -> Result { use NetworkError::UnexpectedPeerAddr; let comps = s.split(':').collect::>(); if comps.len() != 2 { return Err(UnexpectedPeerAddr(s.to_owned())); } let port = comps[1] .parse::() .map_err(|_| UnexpectedPeerAddr(s.to_owned()))?; Ok(DnsAddr { host: comps[0].to_owned(), port, }) } } // TODO: support Dns6 impl From for Multiaddr { fn from(addr: DnsAddr) -> Self { multiaddr!(DNS4(&addr.host), TCP(addr.port)) } } #[derive(Debug)] pub struct NetworkConfig { // connection pub default_listen: Multiaddr, pub max_connections: usize, pub max_frame_length: usize, pub send_buffer_size: usize, pub recv_buffer_size: usize, pub max_wait_streams: usize, pub write_timeout: u64, // peer manager pub bootstraps: Vec, pub allowlist: Vec, pub allowlist_only: bool, pub enable_save_restore: bool, pub peer_dat_file: PathBuf, pub peer_trust_interval: Duration, pub peer_trust_max_history: Duration, pub peer_fatal_ban: Duration, pub peer_soft_ban: Duration, pub same_ip_conn_limit: usize, pub inbound_conn_limit: usize, // identity and encryption pub secio_keypair: SecioKeyPair, // protocol pub ping_interval: Duration, pub ping_timeout: Duration, pub discovery_sync_interval: Duration, // routine pub peer_manager_heart_beat_interval: Duration, pub heart_beat_interval: Duration, // rpc pub rpc_timeout: Duration, // self check pub selfcheck_interval: Duration, } impl NetworkConfig { pub fn new() -> Self { let mut listen_addr = Multiaddr::from(DEFAULT_LISTEN_IP_ADDR); listen_addr.push(Protocol::TCP(DEFAULT_LISTEN_PORT)); let peer_manager_hb_interval = Duration::from_secs(DEFAULT_PEER_MANAGER_HEART_BEAT_INTERVAL); NetworkConfig { default_listen: listen_addr, max_connections: DEFAULT_MAX_CONNECTIONS, max_frame_length: DEFAULT_MAX_FRAME_LENGTH, send_buffer_size: DEFAULT_BUFFER_SIZE, recv_buffer_size: DEFAULT_BUFFER_SIZE, max_wait_streams: DEFAULT_MAX_WAIT_STREAMS, write_timeout: DEFAULT_WRITE_TIMEOUT, bootstraps: Default::default(), allowlist: Default::default(), allowlist_only: false, enable_save_restore: false, peer_dat_file: PathBuf::from(DEFAULT_PEER_DAT_FILE.to_owned()), peer_trust_interval: DEFAULT_PEER_TRUST_INTERVAL_DURATION, peer_trust_max_history: DEFAULT_PEER_TRUST_MAX_HISTORY_DURATION, peer_fatal_ban: DEFAULT_PEER_FATAL_BAN_DURATION, peer_soft_ban: DEFAULT_PEER_SOFT_BAN_DURATION, same_ip_conn_limit: DEFAULT_SAME_IP_CONN_LIMIT, inbound_conn_limit: DEFAULT_INBOUND_CONN_LIMIT, secio_keypair: SecioKeyPair::secp256k1_generated(), ping_interval: Duration::from_secs(DEFAULT_PING_INTERVAL), ping_timeout: Duration::from_secs(DEFAULT_PING_TIMEOUT), discovery_sync_interval: Duration::from_secs(DEFAULT_DISCOVERY_SYNC_INTERVAL), peer_manager_heart_beat_interval: peer_manager_hb_interval, heart_beat_interval: Duration::from_secs(DEFAULT_SELF_HEART_BEAT_INTERVAL), rpc_timeout: Duration::from_secs(DEFAULT_RPC_TIMEOUT), selfcheck_interval: Duration::from_secs(DEFAULT_SELF_CHECK_INTERVAL), } } pub fn max_connections(mut self, max: Option) -> ProtocolResult { if let Some(max) = max { if max <= self.inbound_conn_limit { return Err(NetworkError::InboundLimitEqualOrSmallerThanMaxConn.into()); } self.max_connections = max; } Ok(self) } pub fn same_ip_conn_limit(mut self, limit: Option) -> Self { if let Some(limit) = limit { self.same_ip_conn_limit = limit; } self } pub fn inbound_conn_limit(mut self, limit: Option) -> ProtocolResult { if let Some(limit) = limit { if self.max_connections <= limit { return Err(NetworkError::InboundLimitEqualOrSmallerThanMaxConn.into()); } self.inbound_conn_limit = limit; } Ok(self) } pub fn max_frame_length(mut self, max: Option) -> Self { if let Some(max) = max { self.max_frame_length = max; } self } pub fn send_buffer_size(mut self, size: Option) -> Self { if let Some(size) = size { self.send_buffer_size = size; } self } pub fn recv_buffer_size(mut self, size: Option) -> Self { if let Some(size) = size { self.recv_buffer_size = size; } self } pub fn max_wait_streams(mut self, max: Option) -> Self { if let Some(max) = max { self.max_wait_streams = max; } self } pub fn write_timeout(mut self, timeout: Option) -> Self { if let Some(timeout) = timeout { self.write_timeout = timeout; } self } pub fn bootstraps( mut self, pairs: Vec<(PeerIdBase58Str, PeerAddrStr)>, ) -> ProtocolResult { let to_peer = |(pid_str, peer_addr): (PeerIdBase58Str, PeerAddrStr)| -> _ { let peer_id = PeerId::from_str_ext(&pid_str)?; let mut multiaddr = Self::parse_peer_addr(peer_addr)?; let peer = ArcPeer::new(peer_id.clone()); if let Some(id_bytes) = multiaddr.id_bytes() { if id_bytes != peer_id.as_bytes() { error!("network: pubkey doesn't match peer id in {}", multiaddr); return Ok(peer); } } if !multiaddr.has_id() { multiaddr.push_id(peer_id); } peer.multiaddrs.insert_raw(multiaddr); Ok(peer) }; let bootstrap_peers = pairs .into_iter() .map(to_peer) .collect::>>()?; self.bootstraps = bootstrap_peers; Ok(self) } pub fn allowlist>(mut self, peer_id_strs: S) -> ProtocolResult { let peer_ids = { let str_iter = peer_id_strs.as_ref().iter(); let to_peer_ids = str_iter.map(PeerId::from_str_ext); to_peer_ids.collect::, _>>()? }; self.allowlist = peer_ids; Ok(self) } pub fn allowlist_only(mut self, flag: Option) -> Self { if let Some(flag) = flag { self.allowlist_only = flag; } self } pub fn peer_dat_file>(mut self, path: P) -> Self { let mut path = path.as_ref().to_owned(); path.push(DEFAULT_PEER_FILE_NAME); path.set_extension(DEFAULT_PEER_FILE_EXT); self.peer_dat_file = path; self } pub fn peer_trust_metric( mut self, interval: Option, max_history: Option, ) -> ProtocolResult { if let Some(interval) = interval { self.peer_trust_interval = Duration::from_secs(interval); } if let Some(max_hist) = max_history { self.peer_trust_max_history = Duration::from_secs(max_hist); } if self.peer_trust_max_history < self.peer_trust_interval * 20 { let interval = self.peer_trust_interval.as_secs(); Err(NetworkError::SmallTrustMaxHistory(interval * 20).into()) } else { Ok(self) } } pub fn peer_fatal_ban(mut self, duration: Option) -> Self { if let Some(duration) = duration { self.peer_fatal_ban = Duration::from_secs(duration); } self } pub fn peer_soft_ban(mut self, duration: Option) -> Self { if let Some(duration) = duration { self.peer_soft_ban = Duration::from_secs(duration); } self } pub fn secio_keypair(mut self, sk_hex: PrivateKeyHexStr) -> ProtocolResult { let maybe_skp = hex::decode(sk_hex).map(SecioKeyPair::secp256k1_raw_key); if let Ok(Ok(skp)) = maybe_skp { self.secio_keypair = skp; Ok(self) } else { Err(NetworkError::InvalidPrivateKey.into()) } } pub fn ping_interval(mut self, interval: Option) -> Self { if let Some(interval) = interval { self.ping_interval = Duration::from_secs(interval); } self } pub fn ping_timeout(mut self, timeout: u64) -> Self { self.ping_timeout = Duration::from_secs(timeout); self } pub fn discovery_sync_interval(mut self, interval: u64) -> Self { self.discovery_sync_interval = Duration::from_secs(interval); self } pub fn peer_manager_heart_beat_interval(mut self, interval: u64) -> Self { self.peer_manager_heart_beat_interval = Duration::from_secs(interval); self } pub fn heart_beat_interval(mut self, interval: u64) -> Self { self.heart_beat_interval = Duration::from_secs(interval); self } pub fn rpc_timeout(mut self, timeout: Option) -> Self { if let Some(timeout) = timeout { self.rpc_timeout = Duration::from_secs(timeout); } self } pub fn selfcheck_interval(mut self, interval: Option) -> Self { if let Some(interval) = interval { self.selfcheck_interval = Duration::from_secs(interval); } self } fn parse_peer_addr(addr: PeerAddrStr) -> ProtocolResult { if let Ok(socket_addr) = addr.parse::() { Ok(socket_to_multi_addr(socket_addr)) } else if let Ok(dns_addr) = addr.parse::() { Ok(Multiaddr::from(dns_addr)) } else { Err(NetworkError::UnexpectedPeerAddr(addr).into()) } } } impl Default for NetworkConfig { fn default() -> Self { NetworkConfig::new() } } impl From<&NetworkConfig> for ConnectionConfig { fn from(config: &NetworkConfig) -> ConnectionConfig { ConnectionConfig { secio_keypair: config.secio_keypair.clone(), max_frame_length: Some(config.max_frame_length), send_buffer_size: Some(config.send_buffer_size), recv_buffer_size: Some(config.recv_buffer_size), max_wait_streams: Some(config.max_wait_streams), write_timeout: Some(config.write_timeout), } } } impl From<&NetworkConfig> for PeerManagerConfig { fn from(config: &NetworkConfig) -> PeerManagerConfig { let peer_trust_config = TrustMetricConfig::new(config.peer_trust_interval, config.peer_trust_max_history); PeerManagerConfig { our_id: config.secio_keypair.peer_id(), pubkey: config.secio_keypair.public_key(), bootstraps: config.bootstraps.clone(), allowlist: config.allowlist.clone(), allowlist_only: config.allowlist_only, peer_trust_config: Arc::new(peer_trust_config), peer_fatal_ban: config.peer_fatal_ban, peer_soft_ban: config.peer_soft_ban, max_connections: config.max_connections, same_ip_conn_limit: config.same_ip_conn_limit, inbound_conn_limit: config.inbound_conn_limit, outbound_conn_limit: config.max_connections - config.inbound_conn_limit, routine_interval: config.peer_manager_heart_beat_interval, peer_dat_file: config.peer_dat_file.clone(), } } } #[derive(Debug, Clone, Copy)] pub struct TimeoutConfig { pub rpc: Duration, } impl From<&NetworkConfig> for TimeoutConfig { fn from(config: &NetworkConfig) -> TimeoutConfig { TimeoutConfig { rpc: config.rpc_timeout, } } } impl From<&NetworkConfig> for SelfCheckConfig { fn from(config: &NetworkConfig) -> SelfCheckConfig { SelfCheckConfig { interval: config.selfcheck_interval, } } } ================================================ FILE: core/network/src/connection/control.rs ================================================ use tentacle::error::SendErrorKind; use tentacle::service::{ServiceControl, TargetSession}; use tentacle::ProtocolId; use protocol::traits::Priority; use protocol::Bytes; pub struct ProtocolMessage { pub protocol_id: ProtocolId, pub target: TargetSession, pub data: Bytes, pub priority: Priority, } #[derive(Clone)] pub struct ConnectionServiceControl { inner: ServiceControl, } impl ConnectionServiceControl { pub fn new(control: ServiceControl) -> Self { ConnectionServiceControl { inner: control } } pub fn send(&self, message: ProtocolMessage) -> Result<(), SendErrorKind> { let ProtocolMessage { target, protocol_id, data, .. } = message; match message.priority { Priority::High => self.inner.quick_filter_broadcast(target, protocol_id, data), Priority::Normal => self.inner.filter_broadcast(target, protocol_id, data), } } } ================================================ FILE: core/network/src/connection/keeper.rs ================================================ use std::sync::atomic::{AtomicBool, Ordering}; use futures::channel::mpsc::UnboundedSender; use log::{debug, error}; use tentacle::secio::error::SecioError; use tentacle::{ context::ServiceContext, error::{DialerErrorKind, HandshakeErrorKind, ListenErrorKind}, multiaddr::Multiaddr, service::{ServiceError, ServiceEvent}, traits::ServiceHandle, }; use crate::{ error::{ErrorKind, NetworkError}, event::{ ConnectionErrorKind, ConnectionType, PeerManagerEvent, ProtocolIdentity, SessionErrorKind, }, }; #[cfg(test)] use crate::test::mock::SessionContext; // This macro tries to extract PublicKey from SessionContext, it's Optional. // If it get None, then simple `return` to exit caller function. Otherwise, // return PublicKey reference. macro_rules! peer_pubkey { ($session_context:expr) => {{ let opt_pk = $session_context.remote_pubkey.as_ref(); debug_assert!(opt_pk.is_some(), "secio is enforced, no way it's None here"); if let Some(pubkey) = opt_pk { pubkey } else { return; } }}; } pub struct ConnectionServiceKeeper { peer_mgr: UnboundedSender, sys_reporter: UnboundedSender, sys_shutdown: AtomicBool, } impl ConnectionServiceKeeper { pub fn new( peer_mgr: UnboundedSender, sys_reporter: UnboundedSender, ) -> Self { ConnectionServiceKeeper { peer_mgr, sys_reporter, sys_shutdown: AtomicBool::new(false), } } fn is_sys_shutdown(&self) -> bool { self.sys_shutdown.load(Ordering::SeqCst) } fn sys_shutdown(&self) { self.sys_shutdown.store(true, Ordering::SeqCst); } fn report_error(&self, kind: ErrorKind) { debug!("network: connection error: {}", kind); if !self.is_sys_shutdown() { let error = NetworkError::from(kind); if self.sys_reporter.unbounded_send(error).is_err() { error!("network: connection: error report channel dropped"); self.sys_shutdown(); } } } fn report_peer(&self, event: PeerManagerEvent) { if self.peer_mgr.unbounded_send(event).is_err() { self.report_error(ErrorKind::Offline("peer manager")); } } fn process_dailer_error(&self, addr: Multiaddr, error: DialerErrorKind) { use DialerErrorKind::{ HandshakeError, IoError, PeerIdNotMatch, RepeatedConnection, TransportError, }; let kind = match error { IoError(err) => ConnectionErrorKind::Io(err), PeerIdNotMatch => ConnectionErrorKind::PeerIdNotMatch, RepeatedConnection(sid) => { let ty = ConnectionType::Outbound; let repeated_connection = PeerManagerEvent::RepeatedConnection { ty, sid, addr }; return self.report_peer(repeated_connection); } HandshakeError(HandshakeErrorKind::Timeout(reason)) => { ConnectionErrorKind::TimeOut(reason) } HandshakeError(HandshakeErrorKind::SecioError(SecioError::IoError(err))) => { ConnectionErrorKind::Io(err) } HandshakeError(err) => ConnectionErrorKind::SecioHandshake(Box::new(err)), TransportError(err) => ConnectionErrorKind::from(err), }; let dail_failed = PeerManagerEvent::ConnectFailed { addr, kind }; self.report_peer(dail_failed); } fn process_listen_error(&self, addr: Multiaddr, error: ListenErrorKind) { use ListenErrorKind::{IoError, RepeatedConnection, TransportError}; let kind = match error { IoError(err) => ConnectionErrorKind::Io(err), RepeatedConnection(sid) => { let ty = ConnectionType::Outbound; let repeated_connection = PeerManagerEvent::RepeatedConnection { ty, sid, addr }; return self.report_peer(repeated_connection); } TransportError(err) => ConnectionErrorKind::from(err), }; let listen_failed = PeerManagerEvent::ConnectFailed { addr, kind }; self.report_peer(listen_failed); } } #[rustfmt::skip] impl ServiceHandle for ConnectionServiceKeeper { fn handle_error(&mut self, _ctx: &mut ServiceContext, err: ServiceError) { match err { ServiceError::DialerError { error, address } => { self.process_dailer_error(address, error) } ServiceError::ListenError { error, address } => { self.process_listen_error(address, error) } ServiceError::ProtocolSelectError { session_context, proto_name } => { let protocol_identity = if let Some(proto_name) = proto_name { Some(ProtocolIdentity::Name(proto_name)) } else { None }; let kind = SessionErrorKind::Protocol { identity: protocol_identity, cause: None, }; let protocol_select_failure = PeerManagerEvent::SessionFailed { sid: session_context.id, kind, }; self.report_peer(protocol_select_failure); } ServiceError::ProtocolError { id, error, proto_id } => { let kind = SessionErrorKind::Protocol { identity: Some(ProtocolIdentity::Id(proto_id)), cause: Some(Box::new(error)), }; let broken_protocol = PeerManagerEvent::SessionFailed { sid: id, kind }; self.report_peer(broken_protocol); } ServiceError::SessionTimeout { session_context } => { let kind = SessionErrorKind::Io(std::io::ErrorKind::TimedOut.into()); let session_timeout = PeerManagerEvent::SessionFailed { sid: session_context.id, kind, }; self.report_peer(session_timeout); } ServiceError::MuxerError { session_context, error } => { let muxer_broken = PeerManagerEvent::SessionFailed { sid: session_context.id, kind: SessionErrorKind::Io(error) }; self.report_peer(muxer_broken); } // Bad protocol code, will cause memory leaks/abnormal CPU usage ServiceError::ProtocolHandleError { error, proto_id } => { error!("network: bad protocol {} implement: {}", proto_id, error); let kind = ErrorKind::BadProtocolHandle {proto_id, cause : Box::new(error)}; self.report_error(kind); } // Partial protocol task logic take long time to process, usually // indicate bad protocol implement. ServiceError::SessionBlocked { session_context } => { #[cfg(test)] let session_context = SessionContext::from(session_context).arced(); let session_blocked = PeerManagerEvent::SessionBlocked { ctx: session_context }; self.report_peer(session_blocked); } } } fn handle_event(&mut self, ctx: &mut ServiceContext, evt: ServiceEvent) { match evt { ServiceEvent::SessionOpen { session_context } => { if session_context.remote_pubkey.is_none() { // Peer without encryption will not be able to connect to us error!("impossible, got connection from/to {:?} without public key, disconnect it", session_context.address); // Just in case if let Err(e) = ctx.disconnect(session_context.id) { error!("disconnect session {} {}", session_context.id, e); } return; } let pubkey = peer_pubkey!(&session_context).clone(); let pid = pubkey.peer_id(); #[cfg(test)] let session_context = SessionContext::from(session_context).arced(); let new_unidentified_session = PeerManagerEvent::UnidentifiedSession { pid, pubkey, ctx: session_context }; self.report_peer(new_unidentified_session); } ServiceEvent::SessionClose { session_context } => { let pid = peer_pubkey!(&session_context).peer_id(); let sid = session_context.id; let peer_session_closed = PeerManagerEvent::SessionClosed { pid, sid }; self.report_peer(peer_session_closed); } ServiceEvent::ListenStarted { address } => { let start_listen = PeerManagerEvent::AddNewListenAddr { addr: address }; self.report_peer(start_listen); } ServiceEvent::ListenClose { address } => { let close_listen = PeerManagerEvent::RemoveListenAddr { addr: address }; self.report_peer(close_listen); } } } } ================================================ FILE: core/network/src/connection/mod.rs ================================================ mod control; mod keeper; pub use control::{ConnectionServiceControl, ProtocolMessage}; pub use keeper::ConnectionServiceKeeper; use std::collections::VecDeque; use std::future::Future; use std::marker::PhantomData; use std::pin::Pin; use std::task::{Context, Poll}; use std::time::Duration; use futures::channel::mpsc::UnboundedReceiver; use futures::stream::Stream; use log::debug; use tentacle::builder::ServiceBuilder; use tentacle::error::SendErrorKind; use tentacle::multiaddr::Multiaddr; use tentacle::secio::SecioKeyPair; use tentacle::service::Service; use crate::error::NetworkError; use crate::event::ConnectionEvent; use crate::traits::NetworkProtocol; pub struct ConnectionConfig { /// Secio keypair for stream encryption and peer identity pub secio_keypair: SecioKeyPair, /// Max stream window size pub max_frame_length: Option, /// Send buffer size pub send_buffer_size: Option, /// Write buffer size pub recv_buffer_size: Option, /// Max wait streams pub max_wait_streams: Option, /// Write timeout pub write_timeout: Option, } pub struct ConnectionService { inner: Service, event_rx: UnboundedReceiver, // Temporary store events for later processing under high load pending_events: VecDeque, // Indicate which protocol this connection service tries to open pin_protocol: PhantomData

, } impl ConnectionService

{ pub fn new( protocol: P, config: ConnectionConfig, keeper: ConnectionServiceKeeper, event_rx: UnboundedReceiver, ) -> Self { let mut builder = ServiceBuilder::default() .key_pair(config.secio_keypair) .forever(true); let mut yamux_config = tentacle::yamux::Config::default(); if let Some(max) = config.max_wait_streams { yamux_config.accept_backlog = max; } if let Some(timeout) = config.write_timeout { yamux_config.connection_write_timeout = Duration::from_secs(timeout); } builder = builder.yamux_config(yamux_config); if let Some(max) = config.max_frame_length { builder = builder.max_frame_length(max); } if let Some(size) = config.send_buffer_size { builder = builder.set_send_buffer_size(size); } if let Some(size) = config.recv_buffer_size { builder = builder.set_recv_buffer_size(size); } for proto_meta in protocol.metas().into_iter() { debug!("network: connection: insert protocol {}", proto_meta.name()); builder = builder.insert_protocol(proto_meta); } ConnectionService { inner: builder.build(keeper), event_rx, pending_events: Default::default(), pin_protocol: PhantomData, } } pub async fn listen(&mut self, address: Multiaddr) -> Result<(), NetworkError> { self.inner.listen(address).await?; Ok(()) } pub fn control(&self) -> ConnectionServiceControl { ConnectionServiceControl::new(self.inner.control().clone()) } // BrokenPipe means service is closed. // WouldBlock means service is temporary unavailable. // // If WouldBlock is returned, we should try again later. pub fn process_event(&mut self, event: ConnectionEvent) { enum State { Closed, Busy, // limit to 2048 in tentacle } macro_rules! try_do { ($ctrl_op:expr) => {{ let ret = $ctrl_op.map_err(|err| match &err { SendErrorKind::BrokenPipe => State::Closed, SendErrorKind::WouldBlock => State::Busy, }); match ret { Ok(_) => Ok(()), Err(state) => match state { State::Closed => return, // Early abort func State::Busy => Err::<(), ()>(()), }, } }}; } let control = self.inner.control(); match event { ConnectionEvent::Connect { addrs, .. } => { let mut pending_addrs = Vec::new(); let target_protocol = P::target(); for addr in addrs.into_iter() { if let Err(()) = try_do!(control.dial(addr.clone(), target_protocol.clone())) { pending_addrs.push(addr); } } if !pending_addrs.is_empty() { let pending_connect = ConnectionEvent::Connect { addrs: pending_addrs, proto: target_protocol, }; self.pending_events.push_back(pending_connect); } } ConnectionEvent::Disconnect(sid) => { if let Err(()) = try_do!(control.disconnect(sid)) { let pending_disconnect = ConnectionEvent::Disconnect(sid); self.pending_events.push_back(pending_disconnect); } } } } } impl Future for ConnectionService

{ type Output = (); fn poll(mut self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll { let serv_mut = &mut self.as_mut(); // Process commands // Pending commands first let mut pending_events = std::mem::replace(&mut serv_mut.pending_events, VecDeque::new()); for event in pending_events.drain(..) { debug!("network: pending event {}", event); serv_mut.process_event(event); } // Now received events // No-empty means service is temporary unavailable, try later while serv_mut.pending_events.is_empty() { let event_rx = &mut serv_mut.event_rx; futures::pin_mut!(event_rx); let event = crate::service_ready!("connection service", event_rx.poll_next(ctx)); debug!("network: event [{}]", event); serv_mut.process_event(event); } // Advance service state loop { let inner = &mut serv_mut.inner; futures::pin_mut!(inner); crate::service_ready!("connection service", inner.poll_next(ctx)); } Poll::Pending } } ================================================ FILE: core/network/src/endpoint.rs ================================================ use std::{ cmp::PartialEq, convert::TryFrom, hash::{Hash, Hasher}, str::FromStr, }; use derive_more::{Display, From}; use crate::error::{ErrorKind, NetworkError}; pub const GOSSIP_SCHEME: &str = "/gossip"; pub const RPC_CALL_SCHEME: &str = "/rpc_call"; pub const RPC_RESPONSE_SCHEME: &str = "/rpc_resp"; pub const MAX_ENDPOINT_LENGTH: usize = 120; #[derive(Debug, Display, PartialEq, Eq)] pub enum EndpointScheme { #[display(fmt = "{}", GOSSIP_SCHEME)] Gossip, #[display(fmt = "{}", RPC_CALL_SCHEME)] RpcCall, #[display(fmt = "{}", RPC_RESPONSE_SCHEME)] RpcResponse, } // For example // // gossip: /gossip/cprd/7702_cnpukpeyr_release_date // rpc: /rpc_call/cykppeunr_7702/create_a_character/{rpc_id} // // NOTE: Endpoint only care about first three url comps. So // as its PartialEq, Eq and Hash implement. #[derive(Debug, Clone, Display)] #[display(fmt = "{}", _0)] pub struct Endpoint(String); impl Endpoint { pub fn starts_with(&self, pat: &str) -> bool { self.0.starts_with(pat) } pub fn scheme(&self) -> EndpointScheme { if self.starts_with(GOSSIP_SCHEME) { EndpointScheme::Gossip } else if self.starts_with(RPC_CALL_SCHEME) { EndpointScheme::RpcCall } else if self.starts_with(RPC_RESPONSE_SCHEME) { EndpointScheme::RpcResponse } else { unreachable!() } } // Root part, the first three comps pub fn root(&self) -> String { let url = &self.0; let comps = url .split('/') .filter(|comp| !comp.is_empty()) .collect::>(); format!("/{}/{}/{}", comps[0], comps[1], comps[2]) } pub fn full_url(&self) -> &str { &self.0 } pub fn extend(&self, comp: &str) -> Result { let comp = comp.trim_start_matches('/'); format!("{}/{}", self.0, comp).parse::() } } impl PartialEq for Endpoint { fn eq(&self, other: &Self) -> bool { self.root() == other.root() } } impl Eq for Endpoint {} impl Hash for Endpoint { fn hash(&self, state: &mut H) { self.root().hash(state) } } impl FromStr for Endpoint { type Err = NetworkError; fn from_str(end: &str) -> Result { if end.is_empty() || end.len() > MAX_ENDPOINT_LENGTH { return Err(NetworkError::NotEndpoint); } // Check scheme if !end.starts_with(GOSSIP_SCHEME) && !end.starts_with(RPC_CALL_SCHEME) && !end.starts_with(RPC_RESPONSE_SCHEME) { return Err(NetworkError::UnexpectedScheme(end.to_owned())); } // Count components let comps = end .split('/') .filter(|comp| !comp.is_empty()) .collect::>(); // Right now, gossip takes 3 comps and rpc has 4 comps if comps.len() < 3 || comps.len() > 4 { return Err(NetworkError::NotEndpoint); } Ok(Endpoint(end.to_owned())) } } #[derive(Debug, PartialEq, Eq, From, Display, Hash, Clone, Copy)] #[display(fmt = "{}", _0)] pub struct RpcId(u64); impl RpcId { pub fn value(self) -> u64 { self.0 } } #[derive(Debug, Clone, From, Display)] #[display(fmt = "{}/{}", end, rid)] pub struct RpcEndpoint { end: Endpoint, rid: RpcId, } impl RpcEndpoint { pub fn endpoint(&self) -> &Endpoint { &self.end } pub fn rpc_id(&self) -> RpcId { self.rid } fn extract_rpc_id_from(end: &Endpoint) -> Result { let end = end.full_url(); // Rpc id should be the last comp let r_sep_idx = end.rfind('/').ok_or(NetworkError::NotEndpoint)?; if end.len() == (r_sep_idx + 1) { // Last separator '/' should not be the last char return Err(NetworkError::NotEndpoint); } // Extract rid let rid = &end[(r_sep_idx + 1)..]; // Parse it let rid = rid.parse::().map_err(ErrorKind::NotIdString)?; Ok(rid.into()) } } impl TryFrom for RpcEndpoint { type Error = NetworkError; fn try_from(end: Endpoint) -> Result { let rid = Self::extract_rpc_id_from(&end)?; Ok(RpcEndpoint { end, rid }) } } impl FromStr for RpcEndpoint { type Err = NetworkError; fn from_str(end: &str) -> Result { let end = end.parse::()?; if !end.starts_with(RPC_CALL_SCHEME) && !end.starts_with(RPC_RESPONSE_SCHEME) { return Err(NetworkError::UnexpectedScheme(end.root())); } let rid = Self::extract_rpc_id_from(&end)?; Ok(RpcEndpoint { end, rid }) } } #[cfg(test)] mod tests { use super::Endpoint; #[test] fn should_able_parse_valid_endpoint_url() { let end = "/gossip/crpd/watch_cpunpyker7702"; let expect = Endpoint(end.to_owned()); let endpoint = end.parse::().unwrap(); assert_eq!(endpoint, expect); } } ================================================ FILE: core/network/src/error.rs ================================================ use std::{error::Error, num::ParseIntError}; use derive_more::Display; use tentacle::{ multiaddr::Multiaddr, secio::{PeerId, PublicKey}, ProtocolId, SessionId, }; use protocol::{types::Address, Bytes, ProtocolError, ProtocolErrorKind}; use crate::common::ConnectedAddr; #[derive(Debug, Display)] pub enum ErrorKind { #[display(fmt = "{} offline", _0)] Offline(&'static str), #[display(fmt = "protocol {} missing", _0)] MissingProtocol(&'static str), #[display(fmt = "kind: bad protocl logic code")] BadProtocolHandle { proto_id: ProtocolId, cause: Box, }, #[display(fmt = "kind: given string isn't an id: {}", _0)] NotIdString(ParseIntError), #[display(fmt = "kind: unable to encode or decode: {}", _0)] BadMessage(Box), #[display(fmt = "kind: unknown rid {} from session {}", rid, sid)] UnknownRpc { sid: SessionId, rid: u64 }, #[display(fmt = "kind: unexpected rpc sender, wrong type")] UnexpectedRpcSender, #[display(fmt = "kind: more than one arc rpc sender, cannot unwrap it")] MoreArcRpcSender, #[display(fmt = "kind: session id not found in context")] NoSessionId, #[display(fmt = "kind: remote peer id not found in context")] NoRemotePeerId, #[display(fmt = "kind: rpc id not found in context")] NoRpcId, #[display(fmt = "kind: rpc future dropped {:?}", _0)] RpcDropped(Option), #[display(fmt = "kind: rpc timeout {:?}", _0)] RpcTimeout(Option), #[display(fmt = "kind: not reactor register for {}", _0)] NoReactor(String), #[display( fmt = "kind: cannot create chain address from bytes {:?} {}", pubkey, cause )] NoChainAddress { pubkey: Bytes, cause: Box, }, #[display(fmt = "kind: public key {:?} not match {:?}", pubkey, id)] PublicKeyNotMatchId { pubkey: PublicKey, id: PeerId }, #[display(fmt = "kind: untaggable {}", _0)] Untaggable(String), #[display(fmt = "kind: internal {}", _0)] Internal(String), } impl Error for ErrorKind {} #[derive(Debug, Display)] #[display(fmt = "peer id not found in {}", _0)] pub struct PeerIdNotFound(pub(crate) Multiaddr); impl Error for PeerIdNotFound {} #[derive(Debug, Display)] pub enum NetworkError { #[display(fmt = "io error: {}", _0)] IoError(std::io::Error), #[display(fmt = "temporary unavailable, try again later")] Busy, #[display(fmt = "send incompletely, blocked {:?}, other {:?}", blocked, other)] Send { blocked: Option>, other: Option>, }, #[display( fmt = "send incompletely, unconnected {:?}, other {:?}", unconnected, other )] MultiCast { unconnected: Option>, other: Option>, }, #[display(fmt = "shutdown")] Shutdown, #[display(fmt = "unexected error: {}", _0)] UnexpectedError(Box), #[display(fmt = "cannot decode public key bytes")] InvalidPublicKey, #[display(fmt = "cannot decode private key bytes")] InvalidPrivateKey, #[display(fmt = "cannot decode peer id")] InvalidPeerId, #[display(fmt = "unsupported peer address {}", _0)] UnexpectedPeerAddr(String), #[display(fmt = "unknown endpoint scheme {}", _0)] UnexpectedScheme(String), #[display(fmt = "cannot serde encode or decode: {}", _0)] SerdeError(Box), #[display(fmt = "malformat or exceed maximum length, /[scheme]/[name]/[method] etc")] NotEndpoint, #[display(fmt = "{:?} account addrs aren't connecting, try connect them", miss)] PartialRouteMessage { miss: Vec

}, #[display(fmt = "remote response {}", _0)] RemoteResponse(Box), #[display(fmt = "trust max history should be longer than {} secs", _0)] SmallTrustMaxHistory(u64), #[display(fmt = "transport {}", _0)] Transport(tentacle::error::TransportErrorKind), #[display(fmt = "inbound connection limit is equal or smaller than max connections")] InboundLimitEqualOrSmallerThanMaxConn, #[display(fmt = "internal error: {}", _0)] Internal(Box), } impl Error for NetworkError {} impl From for NetworkError { fn from(err: PeerIdNotFound) -> NetworkError { NetworkError::Internal(Box::new(err)) } } impl From for NetworkError { fn from(kind: ErrorKind) -> NetworkError { NetworkError::Internal(Box::new(kind)) } } impl From> for NetworkError { fn from(kind: Box) -> NetworkError { NetworkError::SerdeError(Box::new(kind)) } } impl From for ProtocolError { fn from(err: NetworkError) -> ProtocolError { ProtocolError::new(ProtocolErrorKind::Network, Box::new(err)) } } impl From for NetworkError { fn from(err: std::io::Error) -> NetworkError { NetworkError::IoError(err) } } impl From for NetworkError { fn from(err: tentacle::error::TransportErrorKind) -> NetworkError { NetworkError::Transport(err) } } impl From for Box { fn from(err: NetworkError) -> Box { err.boxed() } } impl NetworkError { pub fn boxed(self) -> Box { Box::new(self) as Box } } ================================================ FILE: core/network/src/event.rs ================================================ use std::{error::Error, sync::Arc}; use derive_more::Display; use protocol::traits::TrustFeedback; #[cfg(not(test))] use tentacle::context::SessionContext; use tentacle::{ error::TransportErrorKind, multiaddr::Multiaddr, secio::{PeerId, PublicKey}, service::TargetProtocol, ProtocolId, SessionId, }; #[cfg(test)] use crate::test::mock::SessionContext; #[derive(Debug, Display)] pub enum ConnectionEvent { #[display(fmt = "connect addrs {:?}, proto: {:?}", addrs, proto)] Connect { addrs: Vec, proto: TargetProtocol, }, #[display(fmt = "disconnect session {}", _0)] Disconnect(SessionId), } #[derive(Debug, Display)] pub enum ProtocolIdentity { #[display(fmt = "protocol id {}", _0)] Id(ProtocolId), #[display(fmt = "protocol name {}", _0)] Name(String), } #[derive(Debug, Display)] pub enum ConnectionErrorKind { #[display(fmt = "io {:?}", _0)] Io(std::io::Error), #[display(fmt = "dns resolver {}", _0)] DNSResolver(Box), #[display(fmt = "multiaddr {} is not supported", _0)] MultiaddrNotSuppored(Multiaddr), #[display(fmt = "handshake {}", _0)] SecioHandshake(Box), #[display(fmt = "timeout {}", _0)] TimeOut(String), #[display(fmt = "remote peer doesn't match one in multiaddr")] PeerIdNotMatch, #[display(fmt = "protocol handle block or abnormally closed")] ProtocolHandle, } impl From for ConnectionErrorKind { fn from(err: TransportErrorKind) -> ConnectionErrorKind { match err { TransportErrorKind::Io(err) => ConnectionErrorKind::Io(err), TransportErrorKind::NotSupported(addr) => { ConnectionErrorKind::MultiaddrNotSuppored(addr) } TransportErrorKind::DNSResolverError(_, _) => { ConnectionErrorKind::DNSResolver(Box::new(err)) } } } } #[derive(Debug, Display)] pub enum SessionErrorKind { #[display(fmt = "io {:?}", _0)] Io(std::io::Error), // Maybe unknown protocol, protocol version incompatible, protocol codec // error #[display(fmt = "protocol identity {:?} {:?}", identity, cause)] Protocol { identity: Option, cause: Option>, }, #[display(fmt = "unexpect {}", _0)] #[allow(dead_code)] Unexpected(Box), } #[derive(Debug, Display)] pub enum MisbehaviorKind { #[display(fmt = "discovery")] Discovery, #[display(fmt = "ping time out")] PingTimeout, // Maybe message codec or nonce incorrect #[display(fmt = "ping unexpect")] PingUnexpect, } #[derive(Debug, Display, PartialEq, Eq)] pub enum ConnectionType { #[allow(dead_code)] #[display(fmt = "Receive an repeated connection")] Inbound, #[display(fmt = "Dial an repeated connection")] Outbound, } #[derive(Debug, Display)] pub enum PeerManagerEvent { // Peer #[display(fmt = "connect peers {:?} now", pids)] ConnectPeersNow { pids: Vec }, #[display(fmt = "connect to {} failed, kind: {}", addr, kind)] ConnectFailed { addr: Multiaddr, kind: ConnectionErrorKind, }, #[display( fmt = "new session {} peer {:?} addr {} ty {:?}", "ctx.id", pid, "ctx.address", "ctx.ty" )] NewSession { pid: PeerId, pubkey: PublicKey, ctx: Arc, }, #[display( fmt = "unidentified session {} peer {:?} addr {} ty {:?}", "ctx.id", pid, "ctx.address", "ctx.ty" )] UnidentifiedSession { pid: PeerId, pubkey: PublicKey, ctx: Arc, }, #[display(fmt = "repeated connection type {} session {} addr {}", ty, sid, addr)] RepeatedConnection { ty: ConnectionType, sid: SessionId, addr: Multiaddr, }, #[display( fmt = "session {} blocked, pending data size {}", "ctx.id", "ctx.pending_data_size()" )] SessionBlocked { ctx: Arc }, #[display(fmt = "peer {:?} session {} closed", pid, sid)] SessionClosed { pid: PeerId, sid: SessionId }, #[display(fmt = "session {} failed, kind: {}", sid, kind)] SessionFailed { sid: SessionId, kind: SessionErrorKind, }, #[display(fmt = "peer {:?} alive", pid)] PeerAlive { pid: PeerId }, #[display(fmt = "peer {:?} misbehave {}", pid, kind)] Misbehave { pid: PeerId, kind: MisbehaviorKind }, #[display(fmt = "peer {:?} trust metric feedback {}", pid, feedback)] TrustMetric { pid: PeerId, feedback: TrustFeedback, }, // Address #[display(fmt = "discover multi addrs {:?}", addrs)] DiscoverMultiAddrs { addrs: Vec }, #[display(fmt = "identify pid {:?} addrs {:?}", pid, addrs)] IdentifiedAddrs { pid: PeerId, addrs: Vec, }, // Self #[display(fmt = "add listen addr {}", addr)] AddNewListenAddr { addr: Multiaddr }, #[display(fmt = "rmeove listen addr {}", addr)] RemoveListenAddr { addr: Multiaddr }, } ================================================ FILE: core/network/src/lib.rs ================================================ mod common; mod compression; mod config; mod connection; mod endpoint; mod error; mod event; mod message; mod metrics; mod outbound; mod peer_manager; mod protocols; mod reactor; mod rpc; mod selfcheck; mod service; #[cfg(test)] mod test; mod traits; pub use config::NetworkConfig; pub use error::NetworkError; pub use message::{serde, serde_multi}; pub use service::{NetworkService, NetworkServiceHandle}; #[cfg(feature = "diagnostic")] pub use peer_manager::diagnostic::{DiagnosticEvent, TrustReport}; pub use tentacle::secio::PeerId; use protocol::Bytes; use tentacle::secio::PublicKey; pub trait PeerIdExt { fn from_pubkey_bytes<'a, B: AsRef<[u8]> + 'a>(bytes: B) -> Result { let pubkey = PublicKey::secp256k1_raw_key(bytes.as_ref()) .map_err(|_| NetworkError::InvalidPublicKey)?; Ok(PeerId::from_public_key(&pubkey)) } fn from_bytes<'a, B: AsRef<[u8]> + 'a>(bytes: B) -> Result { PeerId::from_bytes(bytes.as_ref().to_vec()).map_err(|_| NetworkError::InvalidPeerId) } fn to_string(&self) -> String; fn into_bytes_ext(self) -> Bytes; fn from_str_ext<'a, S: AsRef + 'a>(s: S) -> Result { s.as_ref().parse().map_err(|_| NetworkError::InvalidPeerId) } } impl PeerIdExt for PeerId { fn into_bytes_ext(self) -> Bytes { Bytes::from(self.into_bytes()) } fn to_string(&self) -> String { self.to_base58() } } ================================================ FILE: core/network/src/message/mod.rs ================================================ pub mod serde; pub mod serde_multi; use std::{collections::HashMap, str::FromStr}; use common_apm::muta_apm::rustracing_jaeger::span::TraceId; use prost::Message; use protocol::Bytes; use crate::endpoint::Endpoint; use crate::error::{ErrorKind, NetworkError}; pub struct Headers(HashMap>); impl Default for Headers { fn default() -> Self { Headers(Default::default()) } } impl Headers { pub fn set_trace_id(&mut self, id: TraceId) { self.0 .insert("trace_id".to_owned(), id.to_string().into_bytes()); } pub fn set_span_id(&mut self, id: u64) { self.0 .insert("span_id".to_owned(), id.to_be_bytes().to_vec()); } } #[derive(Message)] pub struct NetworkMessage { #[prost(map = "string, bytes", tag = "1")] pub headers: HashMap>, #[prost(string, tag = "2")] pub url: String, #[prost(bytes, tag = "3")] pub content: Vec, } impl NetworkMessage { pub fn new(endpoint: Endpoint, content: Bytes, headers: Headers) -> Self { NetworkMessage { headers: headers.0, url: endpoint.full_url().to_owned(), content: content.to_vec(), } } pub fn trace_id(&self) -> Option { self.headers .get("trace_id") .map(|id| { String::from_utf8(id.to_owned()) .ok() .map(|s| TraceId::from_str(&s).ok()) .flatten() }) .flatten() } pub fn span_id(&self) -> Option { self.headers.get("span_id").map(|id| { let mut buf = [0u8; 8]; buf.copy_from_slice(&id[..8]); u64::from_be_bytes(buf) }) } pub fn encode(self) -> Result { let mut buf = Vec::with_capacity(self.encoded_len()); ::encode(&self, &mut buf) .map_err(|e| ErrorKind::BadMessage(Box::new(e)))?; Ok(Bytes::from(buf)) } pub fn decode(bytes: Bytes) -> Result { ::decode(bytes).map_err(|e| ErrorKind::BadMessage(Box::new(e)).into()) } } #[cfg(test)] mod tests { use protocol::{types::Hash, Bytes}; use quickcheck_macros::quickcheck; use serde_derive::{Deserialize, Serialize}; #[derive(Debug, Serialize, Deserialize)] struct Hashes { #[serde(with = "super::serde_multi")] hashes: Vec, } #[derive(Debug, Clone, Serialize, Deserialize)] struct QHash { #[serde(with = "super::serde")] hash: Hash, } impl quickcheck::Arbitrary for QHash { fn arbitrary(g: &mut G) -> QHash { let msg = Bytes::from(String::arbitrary(g)); let hash_val = Hash::digest(msg); QHash { hash: hash_val } } } impl From> for Hashes { fn from(q_hashes: Vec) -> Hashes { let hashes = q_hashes .into_iter() .map(|qhash| qhash.hash) .collect::>(); Hashes { hashes } } } #[quickcheck] fn prop_protocol_type_serialization(hash: QHash) -> bool { bincode::deserialize::(&bincode::serialize(&hash).unwrap()).is_ok() } #[quickcheck] fn prop_vec_protocol_type_serialization(hashes: Vec) -> bool { let hashes = Hashes::from(hashes); bincode::deserialize::(&bincode::serialize(&hashes).unwrap()).is_ok() } } ================================================ FILE: core/network/src/message/serde.rs ================================================ use std::fmt; use protocol::codec::ProtocolCodecSync; use protocol::Bytes; use serde::{de, ser, Deserializer, Serializer}; pub fn serialize(val: &T, s: S) -> Result where S: Serializer, T: ProtocolCodecSync, { let bytes = val.encode_sync().map_err(ser::Error::custom)?; s.serialize_bytes(&bytes.to_vec()) } struct BytesVisit; pub fn deserialize<'de, T, D>(deserializer: D) -> Result where D: Deserializer<'de>, T: ProtocolCodecSync, { let bytes = deserializer.deserialize_byte_buf(BytesVisit)?; ::decode_sync(bytes).map_err(de::Error::custom) } impl<'de> de::Visitor<'de> for BytesVisit { type Value = Bytes; fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter.write_str("byte array") } #[inline] fn visit_byte_buf(self, v: Vec) -> Result where E: de::Error, { Ok(Bytes::from(v)) } } ================================================ FILE: core/network/src/message/serde_multi.rs ================================================ use std::{fmt, iter::FromIterator, marker::PhantomData}; use derive_more::Constructor; use protocol::codec::ProtocolCodecSync; use serde::{de, ser::SerializeStruct, Deserializer, Serializer}; use serde_derive::{Deserialize, Serialize}; #[derive(Debug, Serialize, Deserialize)] struct TWrapper { #[serde(with = "super::serde")] inner: T, } #[derive(Constructor, Serialize)] struct VecT { inner: Vec>, } pub fn serialize<'se, V, T, S>(val: &'se V, s: S) -> Result where S: Serializer, V: IntoIterator + Clone, T: ProtocolCodecSync + 'se + Clone, { let val_cloned = val.clone().into_iter(); let inner = val_cloned .map(|t| TWrapper { inner: t }) .collect::>(); let vec_t = VecT { inner }; let mut state = s.serialize_struct("VecT", 1)?; state.serialize_field("inner", &vec_t.inner)?; state.end() } pub fn deserialize<'de, T, V, D>(deserializer: D) -> Result where D: Deserializer<'de>, V: FromIterator, T: ProtocolCodecSync, { #[derive(Deserialize)] #[serde(field_identifier, rename_all = "lowercase")] enum Field { Inner, } struct VecTVisitor { pin_t: PhantomData, } impl VecTVisitor { pub fn new() -> Self { VecTVisitor { pin_t: PhantomData } } } impl<'de, T> de::Visitor<'de> for VecTVisitor where T: ProtocolCodecSync, { type Value = VecT; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str("serde multi") } fn visit_seq(self, mut seq: V) -> Result where V: de::SeqAccess<'de>, { let inner = seq .next_element()? .ok_or_else(|| de::Error::invalid_length(0, &self))?; Ok(VecT::new(inner)) } fn visit_map(self, mut map: V) -> Result where V: de::MapAccess<'de>, { let mut inner = None; while let Some(key) = map.next_key()? { match key { Field::Inner => { if inner.is_some() { return Err(de::Error::duplicate_field("inner")); } inner = Some(map.next_value()?); } } } let inner = inner.ok_or_else(|| de::Error::missing_field("inner"))?; Ok(VecT::new(inner)) } } const FIELDS: &[&str] = &["inner"]; let vec_t = deserializer.deserialize_struct("VecT", FIELDS, VecTVisitor::new())?; Ok(V::from_iter( vec_t.inner.into_iter().map(|wrap_t| wrap_t.inner), )) } ================================================ FILE: core/network/src/metrics.rs ================================================ use std::{ future::Future, pin::Pin, sync::Arc, task::{Context, Poll}, time::Duration, }; use futures::task::AtomicWaker; use crate::{ common::{ConnectedAddr, HeartBeat}, traits::SharedSessionBook, }; const METRICS_INTERVAL: Duration = Duration::from_secs(1); pub(crate) struct Metrics { sessions: S, heart_beat: Option, hb_waker: Arc, } impl Metrics where S: SharedSessionBook + Send + Unpin + 'static, { pub fn new(sessions: S) -> Self { let waker = Arc::new(AtomicWaker::new()); let heart_beat = HeartBeat::new(Arc::clone(&waker), METRICS_INTERVAL); Metrics { sessions, heart_beat: Some(heart_beat), hb_waker: waker, } } fn report_pending_data(&self) { let sids = self.sessions.all(); let total_size: usize = sids .iter() .map(|sid| { let data_size = self.sessions.pending_data_size(*sid); if let Some(ConnectedAddr { host, .. }) = self.sessions.connected_addr(*sid) { let guage = common_apm::metrics::network::NETWORK_IP_PENDING_DATA_SIZE_VEC .with_label_values(&[&host]); guage.set(data_size as i64); } data_size }) .sum(); common_apm::metrics::network::NETWORK_TOTAL_PENDING_DATA_SIZE.set(total_size as i64); } } impl Future for Metrics where S: SharedSessionBook + Send + Unpin + 'static, { type Output = (); fn poll(mut self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll { self.hb_waker.register(ctx.waker()); // Spawn heart beat if let Some(heart_beat) = self.heart_beat.take() { tokio::spawn(heart_beat); // No needed for first run return Poll::Pending; } self.as_ref().report_pending_data(); Poll::Pending } } ================================================ FILE: core/network/src/outbound/gossip.rs ================================================ use async_trait::async_trait; use protocol::traits::{Context, Gossip, MessageCodec, Priority}; use protocol::{Bytes, ProtocolResult}; use tentacle::secio::PeerId; use tentacle::service::TargetSession; use crate::endpoint::Endpoint; use crate::error::NetworkError; use crate::message::{Headers, NetworkMessage}; use crate::protocols::{Recipient, Transmitter, TransmitterMessage}; use crate::traits::{Compression, NetworkContext}; use crate::PeerIdExt; #[derive(Clone)] pub struct NetworkGossip { transmitter: Transmitter, } impl NetworkGossip { pub fn new(transmitter: Transmitter) -> Self { NetworkGossip { transmitter } } async fn package_message( &self, ctx: Context, endpoint: &str, mut msg: M, ) -> ProtocolResult where M: MessageCodec, { let endpoint = endpoint.parse::()?; let data = msg.encode()?; let mut headers = Headers::default(); if let Some(state) = common_apm::muta_apm::MutaTracer::span_state(&ctx) { headers.set_trace_id(state.trace_id()); headers.set_span_id(state.span_id()); log::info!("no trace id found for gossip {}", endpoint.full_url()); } let net_msg = NetworkMessage::new(endpoint, data, headers).encode()?; let msg = self.transmitter.compressor().compress(net_msg)?; Ok(msg) } async fn send_to_sessions( &self, ctx: Context, target_session: TargetSession, data: Bytes, priority: Priority, ) -> Result<(), NetworkError> { let msg = TransmitterMessage { recipient: Recipient::Session(target_session), priority, data, ctx, }; self.transmitter.behaviour.send(msg).await } async fn send_to_peers<'a, P: AsRef<[Bytes]> + 'a>( &self, ctx: Context, peer_ids: P, data: Bytes, priority: Priority, ) -> Result<(), NetworkError> { let peer_ids = { let byteses = peer_ids.as_ref().iter(); let maybe_ids = byteses.map(::from_bytes); maybe_ids.collect::, _>>()? }; let msg = TransmitterMessage { recipient: Recipient::PeerId(peer_ids), priority, data, ctx, }; self.transmitter.behaviour.send(msg).await } } #[async_trait] impl Gossip for NetworkGossip { async fn broadcast( &self, mut cx: Context, endpoint: &str, msg: M, priority: Priority, ) -> ProtocolResult<()> where M: MessageCodec, { let msg = self.package_message(cx.clone(), endpoint, msg).await?; let ctx = cx.set_url(endpoint.to_owned()); self.send_to_sessions(ctx, TargetSession::All, msg, priority) .await?; common_apm::metrics::network::on_network_message_sent_all_target(endpoint); Ok(()) } async fn multicast<'a, M, P>( &self, mut cx: Context, endpoint: &str, peer_ids: P, msg: M, priority: Priority, ) -> ProtocolResult<()> where M: MessageCodec, P: AsRef<[Bytes]> + Send + 'a, { let msg = self.package_message(cx.clone(), endpoint, msg).await?; let multicast_count = peer_ids.as_ref().len(); let ctx = cx.set_url(endpoint.to_owned()); self.send_to_peers(ctx, peer_ids, msg, priority).await?; common_apm::metrics::network::on_network_message_sent_multi_target( endpoint, multicast_count as i64, ); Ok(()) } } ================================================ FILE: core/network/src/outbound/mod.rs ================================================ mod gossip; mod rpc; pub use gossip::NetworkGossip; pub use rpc::NetworkRpc; ================================================ FILE: core/network/src/outbound/rpc.rs ================================================ use std::time::Instant; use async_trait::async_trait; use futures::future::{self, Either}; use futures_timer::Delay; use protocol::traits::{Context, MessageCodec, Priority, Rpc}; use protocol::{Bytes, ProtocolResult}; use tentacle::service::TargetSession; use tentacle::SessionId; use crate::config::TimeoutConfig; use crate::endpoint::Endpoint; use crate::error::{ErrorKind, NetworkError}; use crate::message::{Headers, NetworkMessage}; use crate::protocols::{Recipient, Transmitter, TransmitterMessage}; use crate::rpc::{RpcErrorMessage, RpcResponse, RpcResponseCode}; use crate::traits::{Compression, NetworkContext}; #[derive(Clone)] pub struct NetworkRpc { transmitter: Transmitter, timeout: TimeoutConfig, } impl NetworkRpc { pub fn new(transmitter: Transmitter, timeout: TimeoutConfig) -> Self { NetworkRpc { transmitter, timeout, } } async fn send( &self, ctx: Context, session_id: SessionId, data: Bytes, priority: Priority, ) -> Result<(), NetworkError> { let compressed_data = self.transmitter.compressor().compress(data)?; let msg = TransmitterMessage { recipient: Recipient::Session(TargetSession::Single(session_id)), priority, data: compressed_data, ctx, }; self.transmitter.behaviour.send(msg).await } } #[async_trait] impl Rpc for NetworkRpc { async fn call( &self, mut cx: Context, endpoint: &str, mut msg: M, priority: Priority, ) -> ProtocolResult where M: MessageCodec, R: MessageCodec, { let endpoint = endpoint.parse::()?; let sid = cx.session_id()?; let rpc_map = &self.transmitter.router.rpc_map; let rid = rpc_map.next_rpc_id(); let connected_addr = cx.remote_connected_addr(); let done_rx = rpc_map.insert::(sid, rid); let inst = Instant::now(); struct _Guard { transmitter: Transmitter, sid: SessionId, rid: u64, } impl Drop for _Guard { fn drop(&mut self) { // Simple take then drop if there is one let rpc_map = &self.transmitter.router.rpc_map; let _ = rpc_map.take::(self.sid, self.rid); } } let _guard = _Guard { transmitter: self.transmitter.clone(), sid, rid, }; let data = msg.encode()?; let endpoint = endpoint.extend(&rid.to_string())?; let mut headers = Headers::default(); if let Some(state) = common_apm::muta_apm::MutaTracer::span_state(&cx) { headers.set_trace_id(state.trace_id()); headers.set_span_id(state.span_id()); log::info!("no trace id found for rpc {}", endpoint.full_url()); } common_apm::metrics::network::on_network_message_sent(endpoint.full_url()); let ctx = cx.set_url(endpoint.root()); let net_msg = NetworkMessage::new(endpoint, data, headers).encode()?; self.send(ctx, sid, net_msg, priority).await?; let timeout = Delay::new(self.timeout.rpc); let ret = match future::select(done_rx, timeout).await { Either::Left((ret, _timeout)) => { ret.map_err(|_| NetworkError::from(ErrorKind::RpcDropped(connected_addr)))? } Either::Right((_unresolved, _timeout)) => { common_apm::metrics::network::NETWORK_RPC_RESULT_COUNT_VEC_STATIC .timeout .inc(); return Err(NetworkError::from(ErrorKind::RpcTimeout(connected_addr)).into()); } }; match ret { RpcResponse::Success(v) => { common_apm::metrics::network::NETWORK_RPC_RESULT_COUNT_VEC_STATIC .success .inc(); common_apm::metrics::network::NETWORK_PROTOCOL_TIME_HISTOGRAM_VEC_STATIC .rpc .observe(common_apm::metrics::duration_to_sec(inst.elapsed())); Ok(R::decode(v)?) } RpcResponse::Error(e) => Err(NetworkError::RemoteResponse(Box::new(e)).into()), } } async fn response( &self, mut cx: Context, endpoint: &str, ret: ProtocolResult, priority: Priority, ) -> ProtocolResult<()> where M: MessageCodec, { let endpoint = endpoint.parse::()?; let sid = cx.session_id()?; let rid = cx.rpc_id()?; let mut resp = match ret.map_err(|e| e.to_string()) { Ok(mut m) => RpcResponse::Success(m.encode()?), Err(err_msg) => RpcResponse::Error(RpcErrorMessage { code: RpcResponseCode::ServerError, msg: err_msg, }), }; let encoded_resp = resp.encode()?; let endpoint = endpoint.extend(&rid.to_string())?; let mut headers = Headers::default(); if let Some(state) = common_apm::muta_apm::MutaTracer::span_state(&cx) { headers.set_trace_id(state.trace_id()); headers.set_span_id(state.span_id()); log::info!("no trace id found for rpc {}", endpoint.full_url()); } common_apm::metrics::network::on_network_message_sent(endpoint.full_url()); let ctx = cx.set_url(endpoint.root()); let net_msg = NetworkMessage::new(endpoint, encoded_resp, headers).encode()?; self.send(ctx, sid, net_msg, priority).await?; Ok(()) } } ================================================ FILE: core/network/src/peer_manager/addr_set.rs ================================================ use super::{PeerMultiaddr, MAX_RETRY_COUNT}; use std::{ borrow::{Borrow, Cow}, collections::HashSet, hash::{Hash, Hasher}, ops::Deref, sync::atomic::{AtomicUsize, Ordering}, }; use parking_lot::RwLock; use tentacle::{multiaddr::Multiaddr, secio::PeerId}; use crate::traits::MultiaddrExt; const MAX_ADDR_FAILURE: u8 = MAX_RETRY_COUNT; #[derive(Debug)] struct AddrInfo { addr: PeerMultiaddr, failure: AtomicUsize, } impl AddrInfo { pub fn owned_addr(&self) -> PeerMultiaddr { self.addr.to_owned() } pub fn owned_raw_addr(&self) -> Multiaddr { (*self.addr).to_owned() } #[cfg(test)] pub fn failure(&self) -> usize { self.failure.load(Ordering::SeqCst) } pub fn inc_failure(&self) { self.failure.fetch_add(1, Ordering::SeqCst); } pub fn give_up(&self) { self.failure .store(MAX_ADDR_FAILURE as usize + 1, Ordering::SeqCst); } pub fn reset_failure(&self) { self.failure.store(0, Ordering::SeqCst); } pub fn connectable(&self) -> bool { self.failure.load(Ordering::SeqCst) <= MAX_ADDR_FAILURE as usize } } impl Deref for AddrInfo { type Target = PeerMultiaddr; fn deref(&self) -> &Self::Target { &self.addr } } impl From for AddrInfo { fn from(pma: PeerMultiaddr) -> AddrInfo { AddrInfo { addr: pma, failure: AtomicUsize::new(0), } } } impl Borrow for AddrInfo { fn borrow(&self) -> &PeerMultiaddr { &self.addr } } impl PartialEq for AddrInfo { fn eq(&self, other: &Self) -> bool { self.addr == other.addr } } impl Eq for AddrInfo {} impl Hash for AddrInfo { fn hash(&self, state: &mut H) { self.addr.hash(state) } } #[derive(Debug)] pub struct PeerAddrSet { peer_id: PeerId, inner: RwLock>, } impl PeerAddrSet { pub fn new(peer_id: PeerId) -> Self { PeerAddrSet { peer_id, inner: Default::default(), } } pub fn insert(&self, multiaddrs: Vec) { let multiaddrs = { let set = self.inner.read(); // Filter already exists multiaddrs, we dont reset failure. multiaddrs .into_iter() .filter(|pma| self.match_peer_id(&pma) && !set.contains(pma)) .map(Into::into) .collect::>() }; self.inner.write().extend(multiaddrs); } pub fn set(&self, multiaddrs: Vec) { let multiaddrs = multiaddrs .into_iter() .filter(|pma| self.match_peer_id(&pma)) .map(Into::into) .collect::>(); *self.inner.write() = multiaddrs; } pub(crate) fn insert_raw(&self, multiaddr: Multiaddr) { if let Some(id_bytes) = multiaddr.id_bytes() { if id_bytes != self.peer_id.as_bytes() { return; } } self.insert(vec![PeerMultiaddr::new(multiaddr, &self.peer_id)]); } pub fn remove(&self, multiaddr: &PeerMultiaddr) { self.inner.write().remove(multiaddr); } pub fn contains(&self, multiaddr: &PeerMultiaddr) -> bool { self.inner.read().contains(multiaddr) } pub fn all(&self) -> Vec { self.inner.read().iter().map(AddrInfo::owned_addr).collect() } pub fn all_raw(&self) -> Vec { self.inner .read() .iter() .map(AddrInfo::owned_raw_addr) .collect() } pub fn connectable(&self) -> Vec { let to_pma = |a: &'_ AddrInfo| -> Option { if a.connectable() { Some(a.owned_addr()) } else { None } }; self.inner.read().iter().filter_map(to_pma).collect() } pub fn len(&self) -> usize { self.inner.read().len() } pub fn connectable_len(&self) -> usize { self.inner.read().iter().filter(|a| a.connectable()).count() } #[cfg(test)] pub fn failure(&self, pma: &PeerMultiaddr) -> Option { self.inner.read().get(pma).map(|a| a.failure()) } pub fn inc_failure(&self, pma: &PeerMultiaddr) { if let Some(info) = self.inner.read().get(pma) { info.inc_failure(); } } pub fn give_up(&self, pma: &PeerMultiaddr) { if let Some(info) = self.inner.read().get(pma) { info.give_up(); } } pub fn reset_failure(&self, pma: &PeerMultiaddr) { if let Some(info) = self.inner.read().get(pma) { info.reset_failure(); } } fn match_peer_id(&self, pma: &PeerMultiaddr) -> bool { pma.has_id() && pma.id_bytes() == Some(Cow::Borrowed(self.peer_id.as_bytes())) } } ================================================ FILE: core/network/src/peer_manager/diagnostic.rs ================================================ use super::{Inner, WORSE_TRUST_SCALAR_RATIO}; use crate::event::PeerManagerEvent; use derive_more::Display; use protocol::traits::TrustFeedback; use tentacle::{secio::PeerId, SessionId}; use std::sync::Arc; #[derive(Debug, Display)] #[display(fmt = "not found")] pub struct NotFound {} impl std::error::Error for NotFound {} #[derive(Debug, Display, Clone)] pub enum DiagnosticEvent { #[display(fmt = "new session")] NewSession, #[display(fmt = "session closed")] SessionClosed, #[display(fmt = "trust metric feedback {}", feedback)] TrustMetric { feedback: TrustFeedback }, #[display(fmt = "trust new interval report {}", report)] TrustNewInterval { report: TrustReport }, #[display(fmt = "remote height {}", height)] RemoteHeight { height: u64 }, } impl From<&PeerManagerEvent> for Option { fn from(event: &PeerManagerEvent) -> Self { use PeerManagerEvent::{NewSession, SessionClosed, TrustMetric}; match event { NewSession { .. } => Some(DiagnosticEvent::NewSession), SessionClosed { .. } => Some(DiagnosticEvent::SessionClosed), TrustMetric { feedback, .. } => Some(DiagnosticEvent::TrustMetric { feedback: feedback.to_owned(), }), _ => None, } } } pub type DiagnosticHookFn = Box; #[derive(Debug, Display, Clone, Copy)] #[display( fmt = "score {}, good {}, bad {}, worse scalar ratio {}", score, bad_events, good_events, worse_scalar_ratio )] pub struct TrustReport { pub score: u8, pub bad_events: usize, pub good_events: usize, pub worse_scalar_ratio: usize, } #[derive(Clone)] pub struct Diagnostic(Arc); impl Diagnostic { pub(super) fn new(inner: Arc) -> Self { Diagnostic(inner) } pub fn session(&self, peer_id: &PeerId) -> Option { match self.0.peer(peer_id).map(|p| p.session_id()) { Some(sid) if sid != SessionId::new(0) => Some(sid), _ => None, } } pub fn new_trust_interval(&self, sid: SessionId) -> Result { let session = self.0.session(sid).ok_or_else(|| NotFound {})?; let metric = session.peer.trust_metric().ok_or_else(|| NotFound {})?; let score = metric.trust_score(); let (good_events, bad_events) = metric.events(); let report = TrustReport { score, good_events, bad_events, worse_scalar_ratio: WORSE_TRUST_SCALAR_RATIO, }; metric.enter_new_interval(); Ok(report) } } ================================================ FILE: core/network/src/peer_manager/mod.rs ================================================ #![allow(clippy::mutable_key_type)] mod addr_set; mod peer; mod retry; mod save_restore; mod session_book; mod shared; mod tags; mod time; mod trust_metric; #[cfg(feature = "diagnostic")] pub mod diagnostic; #[cfg(test)] mod test_manager; use std::borrow::Borrow; use std::cmp::PartialEq; use std::collections::HashSet; use std::convert::{TryFrom, TryInto}; use std::future::Future; use std::hash::{Hash, Hasher}; use std::iter::FromIterator; use std::ops::Deref; use std::path::PathBuf; use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; use std::time::{Duration, Instant}; use arc_swap::ArcSwap; use derive_more::Display; use futures::channel::mpsc::{UnboundedReceiver, UnboundedSender}; use futures::stream::Stream; use futures::task::AtomicWaker; use log::{debug, error, info, warn}; use parking_lot::RwLock; use protocol::traits::{PeerTag, TrustFeedback}; use rand::seq::IteratorRandom; use serde_derive::{Deserialize, Serialize}; use tentacle::multiaddr::Multiaddr; use tentacle::secio::{PeerId, PublicKey}; use tentacle::service::SessionType; use tentacle::SessionId; use crate::common::{resolve_if_unspecified, HeartBeat}; use crate::error::{NetworkError, PeerIdNotFound}; use crate::event::{ ConnectionErrorKind, ConnectionEvent, ConnectionType, MisbehaviorKind, PeerManagerEvent, SessionErrorKind, }; use crate::protocols::identify::{self, Identify, WaitIdentification}; use crate::protocols::CoreProtocol; use crate::traits::{MultiaddrExt, NetworkProtocol}; use addr_set::PeerAddrSet; use retry::Retry; use save_restore::{NoPeerDatFile, PeerDatFile, SaveRestore}; use session_book::{AcceptableSession, ArcSession, SessionContext}; use tags::Tags; pub use peer::{ArcPeer, Connectedness}; pub use session_book::SessionBook; pub use shared::SharedSessions; pub use trust_metric::{TrustMetric, TrustMetricConfig}; const SAME_IP_LIMIT_BAN: Duration = Duration::from_secs(5 * 60); const REPEATED_CONNECTION_TIMEOUT: u64 = 30; // seconds const BACKOFF_BASE: u64 = 2; const MAX_RETRY_INTERVAL: u64 = 512; // seconds const MAX_RETRY_COUNT: u8 = 30; const SHORT_ALIVE_SESSION: u64 = 3; // seconds const MAX_CONNECTING_MARGIN: usize = 10; const MAX_RANDOM_NEXT_RETRY: u64 = 10; const MAX_CONNECTING_TIMEOUT: Duration = Duration::from_secs(30); const GOOD_TRUST_SCORE: u8 = 80u8; const WORSE_TRUST_SCALAR_RATIO: usize = 10; #[derive(Debug, Display)] pub enum NewSessionPreCheckError { #[display(fmt = "peer banned")] PeerBanned, #[display(fmt = "allow list peer only")] AllowListOnly, #[display(fmt = "reach max connection")] ReachMaxConnection, #[display(fmt = "peer already connected, only allow one connection per peer")] PeerAlreadyConnected, #[display(fmt = "{}", _0)] ReachSessionLimit(session_book::Error), } #[derive(Debug, Clone, Display, Serialize, Deserialize)] #[display(fmt = "{}", _0)] pub struct PeerMultiaddr(Multiaddr); impl PeerMultiaddr { pub fn new(mut ma: Multiaddr, peer_id: &PeerId) -> Self { if !ma.has_id() { ma.push_id(peer_id.to_owned()); } PeerMultiaddr(ma) } pub fn peer_id(&self) -> PeerId { Self::extract_id(&self.0).expect("impossible, should be verified already") } fn extract_id(ma: &Multiaddr) -> Option { if let Some(Ok(peer_id)) = ma .id_bytes() .map(|bytes| PeerId::from_bytes(bytes.to_vec())) { Some(peer_id) } else { None } } } impl Borrow for PeerMultiaddr { fn borrow(&self) -> &Multiaddr { &self.0 } } impl PartialEq for PeerMultiaddr { fn eq(&self, other: &PeerMultiaddr) -> bool { self.0 == other.0 } } impl Eq for PeerMultiaddr {} impl Hash for PeerMultiaddr { fn hash(&self, state: &mut H) { self.0.hash(state) } } impl Deref for PeerMultiaddr { type Target = Multiaddr; fn deref(&self) -> &Self::Target { &self.0 } } impl TryFrom for PeerMultiaddr { type Error = PeerIdNotFound; fn try_from(ma: Multiaddr) -> Result { if Self::extract_id(&ma).is_some() { Ok(PeerMultiaddr(ma)) } else { Err(PeerIdNotFound(ma)) } } } impl Into for PeerMultiaddr { fn into(self) -> Multiaddr { self.0 } } #[derive(Debug)] struct ConnectingAttempt { peer: ArcPeer, multiaddrs: HashSet, at: Instant, } impl ConnectingAttempt { fn new(peer: ArcPeer) -> Self { let multiaddrs = HashSet::from_iter(peer.multiaddrs.connectable()); let at = Instant::now(); ConnectingAttempt { peer, multiaddrs, at, } } fn multiaddrs(&self) -> usize { self.multiaddrs.len() } fn complete_one_multiaddr(&mut self, multiaddr: &PeerMultiaddr) { self.multiaddrs.remove(multiaddr); } fn is_timeout(&self) -> bool { self.at.elapsed() >= MAX_CONNECTING_TIMEOUT } #[cfg(test)] fn set_at(&mut self, duration: Duration) { self.at = self.at.checked_sub(duration).unwrap(); } } impl Borrow for ConnectingAttempt { fn borrow(&self) -> &PeerId { &self.peer.id } } impl PartialEq for ConnectingAttempt { fn eq(&self, other: &ConnectingAttempt) -> bool { self.peer.id == other.peer.id } } impl Eq for ConnectingAttempt {} impl Hash for ConnectingAttempt { fn hash(&self, state: &mut H) { self.peer.id.hash(state) } } struct Inner { our_id: Arc, chain_id: ArcSwap, sessions: SessionBook, consensus: RwLock>, peers: RwLock>, listen: RwLock>, } impl Inner { pub fn new(our_id: PeerId, sessions: SessionBook) -> Self { Inner { our_id: Arc::new(our_id), chain_id: ArcSwap::new(Arc::new(protocol::types::Hash::from_empty())), sessions, consensus: Default::default(), peers: Default::default(), listen: Default::default(), } } pub fn add_listen(&self, multiaddr: PeerMultiaddr) { self.listen.write().insert(multiaddr); } pub fn listen(&self) -> HashSet { self.listen.read().clone() } pub fn remove_listen(&self, multiaddr: &PeerMultiaddr) { self.listen.write().remove(multiaddr); } pub fn set_chain_id(&self, chain_id: protocol::types::Hash) { self.chain_id.store(Arc::new(chain_id)); } pub fn chain_id(&self) -> Arc { self.chain_id.load_full() } pub fn connected(&self) -> usize { self.sessions.len() } /// If peer exists, return false pub fn add_peer(&self, peer: ArcPeer) -> bool { common_apm::metrics::network::NETWORK_SAVED_PEER_COUNT.inc(); self.peers.write().insert(peer) } pub fn peer_count(&self) -> usize { self.peers.read().len() } pub fn peer(&self, peer_id: &PeerId) -> Option { self.peers.read().get(peer_id).cloned() } pub fn contains(&self, peer_id: &PeerId) -> bool { self.peers.read().contains(peer_id) } pub fn connectable_peers(&self, max: usize, addition_filter: F) -> Vec where F: Fn(&ArcPeer) -> bool + 'static, { let connectable = |p: &'_ &ArcPeer| -> bool { (p.connectedness() == Connectedness::NotConnected || p.connectedness() == Connectedness::CanConnect) && p.retry.ready() && p.multiaddrs.connectable_len() > 0 && !p.banned() && addition_filter(p) }; let mut rng = rand::thread_rng(); let book = self.peers.read(); let qualified_peers = book.iter().filter(connectable).map(ArcPeer::to_owned); qualified_peers.choose_multiple(&mut rng, max) } pub fn session(&self, sid: SessionId) -> Option { self.sessions.get(&sid) } pub fn share_sessions(&self) -> Vec { self.sessions.all() } pub fn remove_session(&self, sid: SessionId) -> Option { self.sessions.remove(&sid) } pub fn package_peers(&self) -> Vec { self.peers.read().iter().cloned().collect() } fn restore(&self, peers: Vec) { self.peers.write().extend(peers); } fn outbound_count(&self) -> usize { self.sessions.outbound_count() } } struct UnidentifiedSessionEvent { pubkey: PublicKey, ctx: Arc, } struct UnidentifiedSession { event: UnidentifiedSessionEvent, ident_fut: WaitIdentification, connected_at: Instant, } impl UnidentifiedSession { fn new(event: UnidentifiedSessionEvent, ident_fut: WaitIdentification) -> Self { UnidentifiedSession { event, ident_fut, connected_at: Instant::now(), } } fn peer_id(&self) -> PeerId { self.event.pubkey.peer_id() } } impl Borrow for UnidentifiedSession { fn borrow(&self) -> &SessionId { &self.event.ctx.id } } impl PartialEq for UnidentifiedSession { fn eq(&self, other: &UnidentifiedSession) -> bool { self.event.ctx.id == other.event.ctx.id } } impl Eq for UnidentifiedSession {} impl Hash for UnidentifiedSession { fn hash(&self, state: &mut H) { self.event.ctx.id.hash(state) } } #[derive(Debug, Clone)] pub struct PeerManagerConfig { /// Our Peer ID pub our_id: PeerId, /// Our public key pub pubkey: PublicKey, /// Bootstrap peers pub bootstraps: Vec, /// Always accept/connect peers in list pub allowlist: Vec, /// Only accept/conect peers in allowlist pub allowlist_only: bool, /// Limit connections from same ip pub same_ip_conn_limit: usize, /// Limit inbound connections pub inbound_conn_limit: usize, /// Limit outbound connections pub outbound_conn_limit: usize, /// Trust metric config pub peer_trust_config: Arc, pub peer_fatal_ban: Duration, pub peer_soft_ban: Duration, /// Max connections pub max_connections: usize, /// Routine job interval pub routine_interval: Duration, /// Peer dat file path pub peer_dat_file: PathBuf, } #[derive(Clone)] pub struct PeerManagerHandle { inner: Arc, } impl PeerManagerHandle { pub fn peer_id(&self, sid: SessionId) -> Option { self.inner.session(sid).map(|s| s.peer.owned_id()) } pub fn set_chain_id(&self, chain_id: protocol::types::Hash) { self.inner.set_chain_id(chain_id); } pub fn chain_id(&self) -> Arc { self.inner.chain_id() } pub fn contains_session(&self, session_id: SessionId) -> bool { self.inner.session(session_id).is_some() } pub fn random_addrs(&self, max: usize, sid: SessionId) -> Vec { let mut rng = rand::thread_rng(); let book = self.inner.peers.read(); let peers = book.iter().choose_multiple(&mut rng, max); let is_self_consensus = self .inner .peer(&self.inner.our_id) .map(|p| p.tags.contains(&PeerTag::Consensus)) .unwrap_or_else(|| false); let is_remote_consensus = self .inner .session(sid) .map(|s| s.peer.tags.contains(&PeerTag::Consensus)) .unwrap_or_else(|| false); let condidates = peers .into_iter() .filter_map(|p| { if !is_remote_consensus && p.tags.contains(&PeerTag::Consensus) { None } else { Some(p.multiaddrs.all_raw()) } }) .flatten(); if !is_self_consensus { // Should always include our self let our_self = self.listen_addrs(); our_self.into_iter().chain(condidates).take(max).collect() } else { condidates.take(max).collect() } } pub fn listen_addrs(&self) -> Vec { let listen = self.inner.listen(); debug_assert!(!listen.is_empty(), "listen should alway be set"); let sanitize = |pma: PeerMultiaddr| -> Multiaddr { let ma: Multiaddr = pma.into(); match resolve_if_unspecified(&ma) { Ok(resolved) => resolved, Err(_) => ma, } }; listen.into_iter().map(sanitize).collect() } pub fn tag(&self, peer_id: &PeerId, tag: PeerTag) -> Result<(), NetworkError> { let consensus_tag = tag == PeerTag::Consensus; if let Some(peer) = self.inner.peer(peer_id) { peer.tags.insert(tag)?; } else { let peer = ArcPeer::new(peer_id.to_owned()); peer.tags.insert(tag)?; self.inner.add_peer(peer); } if consensus_tag { self.inner.consensus.write().insert(peer_id.to_owned()); } Ok(()) } pub fn untag(&self, peer_id: &PeerId, tag: &PeerTag) { if let Some(peer) = self.inner.peer(peer_id) { peer.tags.remove(tag); } if tag == &PeerTag::Consensus { self.inner.consensus.write().remove(peer_id); } } pub fn tag_consensus(&self, peer_ids: Vec) { common_apm::metrics::network::NETWORK_TAGGED_CONSENSUS_PEERS.set(peer_ids.len() as i64); { for peer_id in self.inner.consensus.read().iter() { if let Some(peer) = self.inner.peer(peer_id) { peer.tags.remove(&PeerTag::Consensus) } } } for peer_id in peer_ids.iter() { let _ = self.tag(peer_id, PeerTag::Consensus); } { let id_set = HashSet::from_iter(peer_ids); *self.inner.consensus.write() = id_set; } } } pub struct PeerManager { // core peer pool inner: Arc, config: PeerManagerConfig, peer_id: PeerId, bootstraps: HashSet, // peers currently connecting connecting: HashSet, // unidentified session backlog unidentified_backlog: HashSet, event_rx: UnboundedReceiver, conn_tx: UnboundedSender, // heart beat, for current connections check, etc heart_beat: Option, hb_waker: Arc, // save restore peer_dat_file: Box, // diagnostic event hook #[cfg(feature = "diagnostic")] diagnostic_hook: Option, } impl PeerManager { pub fn new( config: PeerManagerConfig, event_rx: UnboundedReceiver, conn_tx: UnboundedSender, ) -> Self { let peer_id = config.our_id.clone(); let session_config = session_book::Config::from(&config); let session_book = SessionBook::new(session_config); let inner = Arc::new(Inner::new(config.our_id.clone(), session_book)); let bootstraps = HashSet::from_iter(config.bootstraps.clone()); let waker = Arc::new(AtomicWaker::new()); let heart_beat = HeartBeat::new(Arc::clone(&waker), config.routine_interval); let peer_dat_file = Box::new(NoPeerDatFile); for peer_id in config.allowlist.iter().cloned() { assert_eq!(inner.peer_count(), 0, "should be empty before bootstrapped"); let peer = ArcPeer::new(peer_id); let _ = peer.tags.insert(PeerTag::AlwaysAllow); inner.add_peer(peer); } PeerManager { inner, config, peer_id, bootstraps, connecting: Default::default(), unidentified_backlog: Default::default(), event_rx, conn_tx, heart_beat: Some(heart_beat), hb_waker: waker, peer_dat_file, #[cfg(feature = "diagnostic")] diagnostic_hook: None, } } pub fn handle(&self) -> PeerManagerHandle { PeerManagerHandle { inner: Arc::clone(&self.inner), } } pub fn share_session_book(&self, config: shared::Config) -> SharedSessions { SharedSessions::new(Arc::clone(&self.inner), config) } #[cfg(feature = "diagnostic")] pub fn register_diagnostic_hook(&mut self, f: diagnostic::DiagnosticHookFn) { self.diagnostic_hook = Some(f); } #[cfg(feature = "diagnostic")] pub fn diagnostic(&self) -> diagnostic::Diagnostic { diagnostic::Diagnostic::new(Arc::clone(&self.inner)) } pub fn enable_save_restore(&mut self) { let peer_dat_file = PeerDatFile::new(&self.config.peer_dat_file); self.peer_dat_file = Box::new(peer_dat_file); } pub fn restore_peers(&self) -> Result<(), NetworkError> { let peers = self.peer_dat_file.restore()?; self.inner.restore(peers); Ok(()) } pub fn bootstrap(&mut self) { // Insert bootstrap peers for peer in self.bootstraps.iter() { info!("network: {:?}: bootstrap peer: {}", self.peer_id, peer); if let Some(peer_exist) = self.inner.peer(&peer.id) { info!("restored peer {:?} found, insert multiaddr only", peer.id); peer_exist.multiaddrs.insert(peer.multiaddrs.all()); } else { self.inner.add_peer(peer.clone()); } } self.connect_peers(self.bootstraps.iter().cloned().collect()); } pub fn disconnect_session(&self, sid: SessionId) { let disconnect_peer = ConnectionEvent::Disconnect(sid); if self.conn_tx.unbounded_send(disconnect_peer).is_err() { error!("network: connection service exit"); } } #[cfg(test)] fn inner(&self) -> Arc { Arc::clone(&self.inner) } #[cfg(test)] fn config(&self) -> PeerManagerConfig { self.config.clone() } #[cfg(test)] fn set_connecting(&mut self, peers: Vec) { for peer in peers.into_iter() { self.connecting.insert(ConnectingAttempt::new(peer)); } } fn new_session_pre_check( &mut self, pubkey: &PublicKey, ctx: &Arc, ) -> Result { let remote_peer_id = pubkey.peer_id(); let remote_multiaddr = PeerMultiaddr::new(ctx.address.to_owned(), &remote_peer_id); // Remove from connecting if we dial this peer or create new one self.connecting.remove(&remote_peer_id); let opt_peer = self.inner.peer(&remote_peer_id); let remote_peer = opt_peer.unwrap_or_else(|| ArcPeer::new(remote_peer_id.clone())); // Inbound address is client address, it's useless match ctx.ty { SessionType::Inbound => remote_peer.multiaddrs.remove(&remote_multiaddr), SessionType::Outbound => { if remote_peer.multiaddrs.contains(&remote_multiaddr) { remote_peer.multiaddrs.reset_failure(&remote_multiaddr); } else { remote_peer.multiaddrs.insert(vec![remote_multiaddr]); } } } if remote_peer.banned() { info!("banned peer {:?} incomming", remote_peer_id); remote_peer.mark_disconnected(); self.disconnect_session(ctx.id); return Err(NewSessionPreCheckError::PeerBanned); } if self.config.allowlist_only && !remote_peer.tags.contains(&PeerTag::AlwaysAllow) && !remote_peer.tags.contains(&PeerTag::Consensus) { debug!("allowlist_only enabled, reject peer {:?}", remote_peer.id); remote_peer.mark_disconnected(); self.disconnect_session(ctx.id); return Err(NewSessionPreCheckError::AllowListOnly); } if self.inner.connected() >= self.config.max_connections { let found_replacement = || -> bool { let incoming_trust_score = match remote_peer.trust_metric() { Some(trust_metric) => trust_metric.trust_score(), None => return false, }; for session in self.inner.share_sessions() { let session_trust_score = match session.peer.trust_metric() { Some(trust_metric) => trust_metric.trust_score(), None => { // Impossible error!("session peer {:?} trust metric not found", session.peer.id); return false; } }; // Ensure that session be replaced has traveled enough // intervals if incoming_trust_score > session_trust_score && !session.peer.tags.contains(&PeerTag::AlwaysAllow) && !session.peer.tags.contains(&PeerTag::Consensus) && session.peer.alive() > self.config.peer_trust_config.interval().as_secs() * 20 { info!( "session peer {:?} is been replaced by peer {:?}", session.peer.id, remote_peer.id ); self.disconnect_session(session.id); return true; } } false }; if !remote_peer.tags.contains(&PeerTag::AlwaysAllow) && !remote_peer.tags.contains(&PeerTag::Consensus) && !found_replacement() { info!("reject peer {:?} due to max conn limit", remote_peer.id); remote_peer.mark_disconnected(); self.disconnect_session(ctx.id); return Err(NewSessionPreCheckError::ReachMaxConnection); } } let connectedness = remote_peer.connectedness(); if connectedness == Connectedness::Connected { // This should not happen, because of repeated connection event error!("got new session event on same peer {:?}", remote_peer.id); let exist_sid = remote_peer.session_id(); if exist_sid != ctx.id && self.inner.session(exist_sid).is_some() { // We don't support multiple connections, disconnect new one self.disconnect_session(ctx.id); return Err(NewSessionPreCheckError::PeerAlreadyConnected); } if self.inner.session(exist_sid).is_none() { // We keep new session, outdated will be updated after we insert // it. error!("network: impossible, peer session {} outdated", exist_sid); } } let session = ArcSession::new(remote_peer.clone(), Arc::clone(&ctx)); info!("check new session from {}", session.connected_addr); // Always allow peer in allowlist and consensus peer if !remote_peer.tags.contains(&PeerTag::AlwaysAllow) && !remote_peer.tags.contains(&PeerTag::Consensus) { if let Err(err) = self.inner.sessions.acceptable(&session) { warn!("session {} unacceptable {}", ctx.id, err); // Ban this peer for a while so we won't choose it again // NOTE: Always allowed and consensus peer cannot be banned. if let Err(err) = remote_peer.tags.insert_ban(SAME_IP_LIMIT_BAN) { warn!("ban same ip peer {:?} failed: {}", remote_peer.id, err); } remote_peer.mark_disconnected(); self.disconnect_session(ctx.id); return Err(NewSessionPreCheckError::ReachSessionLimit(err)); } } Ok(session) } fn new_unidentified_session(&mut self, pubkey: PublicKey, ctx: Arc) { let peer_id = pubkey.peer_id(); if let Err(err) = self.new_session_pre_check(&pubkey, &ctx) { log::info!("reject unidentified session due to {}", err); Identify::wait_failed(&peer_id, err.to_string()); return; } common_apm::metrics::network::NETWORK_UNIDENTIFIED_CONNECTIONS.inc(); let event = UnidentifiedSessionEvent { pubkey, ctx }; let ident_fut = Identify::wait_identified(peer_id); let unidentified_session = UnidentifiedSession::new(event, ident_fut); self.unidentified_backlog.insert(unidentified_session); } fn new_session(&mut self, pubkey: PublicKey, ctx: Arc) { let session = match self.new_session_pre_check(&pubkey, &ctx) { Ok(session) => session, Err(err) => { log::info!("reject new session due to {}", err); return; } }; info!("new session from {}", session.connected_addr); if !session.peer.has_pubkey() { if let Err(e) = session.peer.set_pubkey(pubkey) { error!("impossible, set public key failed {}", e); } } // Currently we only save accepted peer. // TODO: save to database if !self.inner.contains(&session.peer.id) { self.inner.add_peer(session.peer.clone()); } let remote_peer = session.peer.clone(); self.inner.sessions.insert(AcceptableSession(session)); remote_peer.mark_connected(ctx.id); common_apm::metrics::network::NETWORK_CONNECTED_PEERS.inc(); if remote_peer.tags.contains(&PeerTag::Consensus) { common_apm::metrics::network::NETWORK_CONNECTED_CONSENSUS_PEERS.inc(); } match remote_peer.trust_metric() { Some(trust_metric) => trust_metric.start(), None => { let trust_metric = TrustMetric::new(Arc::clone(&self.config.peer_trust_config)); trust_metric.start(); remote_peer.set_trust_metric(trust_metric); } } } fn session_closed(&mut self, pid: PeerId, sid: SessionId) { debug!("peer {:?} session {} closed", pid, sid); // Check unidentified session let opt_unidentified_session = self.unidentified_backlog.take(&sid); if opt_unidentified_session.is_none() { common_apm::metrics::network::NETWORK_CONNECTED_PEERS.dec(); } if self.connecting.take(&pid).is_some() { log::info!("connecting peer {:?} session closed", pid); } // Session may be removed by other event or rejected let opt_session = self.inner.remove_session(sid); if let Some(ref session) = opt_session { common_apm::metrics::network::NETWORK_IP_DISCONNECTED_COUNT_VEC .with_label_values(&[&session.connected_addr.host]) .inc(); log::info!("{} session closed", session.connected_addr); } let remote_peer = { match opt_session.map_or_else(|| self.inner.peer(&pid), |s| Some(s.peer.to_owned())) { Some(peer) => peer, None => { log::info!("close unsaved peer session, peer {:?}", pid); return; } } }; remote_peer.mark_disconnected(); if remote_peer.tags.contains(&PeerTag::Consensus) && opt_unidentified_session.is_none() { common_apm::metrics::network::NETWORK_CONNECTED_CONSENSUS_PEERS.dec(); } match remote_peer.trust_metric() { Some(trust_metric) => trust_metric.pause(), None => { warn!("session peer {:?} trust metric not found", remote_peer.id); let trust_metric = TrustMetric::new(Arc::clone(&self.config.peer_trust_config)); remote_peer.set_trust_metric(trust_metric); } } if remote_peer.alive() < SHORT_ALIVE_SESSION { // NOTE: peer maybe abnormally disconnect from others. When we try // to reconnect, other peers may treat this as repeated connection, // then disconnect. We have to wait for timeout. warn!( "increase peer {:?} retry due to repeated short live session", remote_peer.id ); while remote_peer.retry.eta() < REPEATED_CONNECTION_TIMEOUT { remote_peer.retry.inc(); } } else { // Set up a short ban, so we won't retry this peer immediately if remote_peer.tags.contains(&PeerTag::Consensus) || remote_peer.tags.contains(&PeerTag::AlwaysAllow) { return; } let rand_next_retry = { let mut duration = rand::random::() % MAX_RANDOM_NEXT_RETRY; if duration < 2 { duration = 2; // At least 2 seconds } Duration::from_secs(duration) }; if let Err(err) = remote_peer.tags.insert_ban(rand_next_retry) { log::info!("random retry for peer {:?} failed: {}", remote_peer.id, err); } } } fn connect_failed(&mut self, addr: Multiaddr, error_kind: ConnectionErrorKind) { use ConnectionErrorKind::{ DNSResolver, Io, MultiaddrNotSuppored, PeerIdNotMatch, ProtocolHandle, SecioHandshake, TimeOut, }; log::info!("connect to {:?} failed: {}", addr, error_kind); let peer_addr: PeerMultiaddr = match addr.clone().try_into() { Ok(pma) => pma, Err(e) => { // All multiaddrs we dial have peer id included error!("unconnectable multiaddr {} without peer id {}", addr, e); return; } }; let peer_id = peer_addr.peer_id(); let peer = match self.inner.peer(&peer_id) { Some(p) => p, None => { // Impossibe error!("outbound connecting peer not found {:?}", peer_id); return; } }; match error_kind { Io(_) | DNSResolver(_) => peer.multiaddrs.inc_failure(&peer_addr), MultiaddrNotSuppored(_) => { info!("give up unsupported multiaddr {}", addr); peer.multiaddrs.give_up(&peer_addr); } PeerIdNotMatch => { warn!("give up multiaddr {} because peer id not match", peer_addr); peer.multiaddrs.give_up(&peer_addr); } TimeOut(reason) => { info!("connect timeout {}", reason); peer.multiaddrs.inc_failure(&peer_addr); } SecioHandshake(_) | ProtocolHandle => { warn!("give up peer {:?} becasue {}", peer.id, error_kind); peer.set_connectedness(Connectedness::Unconnectable); } } if let Some(mut attempt) = self.connecting.take(&peer_id) { if attempt.peer.connectedness() == Connectedness::Unconnectable { // We already give up peer return; } attempt.complete_one_multiaddr(&peer_addr); // No more connecting multiaddrs from this peer // This means all multiaddrs failure if attempt.multiaddrs() == 0 { log::info!("peer {:?} increase retry", attempt.peer.id); attempt.peer.retry.inc(); attempt.peer.set_connectedness(Connectedness::CanConnect); if attempt.peer.retry.run_out() { warn!("give up peer {:?} due to retry run out", attempt.peer.id); attempt.peer.set_connectedness(Connectedness::Unconnectable); } // FIXME // if let Some(trust_metric) = attempt.peer.trust_metric() { // trust_metric.bad_events(1); // } } else { // Wait for other connecting multiaddrs result self.connecting.insert(attempt); } } } fn session_failed(&self, sid: SessionId, error_kind: SessionErrorKind) { warn!("session {} failed {}", sid, error_kind); use SessionErrorKind::{Io, Protocol, Unexpected}; let session = match self.inner.remove_session(sid) { Some(s) => s, None => return, /* Session may be removed by other event or rejected * due to max connections before insert */ }; // Ensure we disconnect this peer self.disconnect_session(sid); session.peer.mark_disconnected(); match session.peer.trust_metric() { Some(trust_metric) => trust_metric.bad_events(1), None => { warn!("session peer {:?} trust metric not found", session.peer.id); let trust_metric = TrustMetric::new(Arc::clone(&self.config.peer_trust_config)); trust_metric.bad_events(1); session.peer.set_trust_metric(trust_metric); } } match error_kind { Io(_) => { info!("peer {:?} session failed, increase retry", session.peer.id); session.peer.retry.inc(); } Protocol { .. } | Unexpected(_) => { let pid = &session.peer.id; let remote_addr = &session.connected_addr; warn!("give up peer {:?} from {} {}", pid, remote_addr, error_kind); session.peer.set_connectedness(Connectedness::Unconnectable); } } } fn update_peer_alive(&self, pid: &PeerId) { if let Some(peer) = self.inner.peer(pid) { let sid = peer.session_id(); if sid != 0.into() { if let Some(session) = self.inner.session(sid) { info!("peer {:?} {} alive", pid, session.connected_addr); } } peer.retry.reset(); // Just in case peer.update_alive(); } } fn peer_misbehave(&self, pid: PeerId, kind: MisbehaviorKind) { warn!("peer {:?} misbehave {}", pid, kind); use MisbehaviorKind::{Discovery, PingTimeout, PingUnexpect}; let peer = match self.inner.peer(&pid) { Some(p) => p, None => { error!("misbehave peer {:?} not found", pid); return; } }; match peer.trust_metric() { Some(trust_metric) => trust_metric.bad_events(1), None => { warn!("session peer {:?} trust metric not found", peer.id); let trust_metric = TrustMetric::new(Arc::clone(&self.config.peer_trust_config)); trust_metric.start(); trust_metric.bad_events(1); peer.set_trust_metric(trust_metric); } } let sid = peer.session_id(); if sid == SessionId::new(0) { // Impossible, connected session always bigger than 0 error!("misbehave peer with session id 0"); return; } self.inner.remove_session(sid); peer.mark_disconnected(); // Ensure we disconnect from this peer self.disconnect_session(sid); match kind { PingTimeout => peer.retry.inc(), PingUnexpect | Discovery => { warn!("give up peer {:?} because of {}", peer.id, kind); peer.set_connectedness(Connectedness::Unconnectable) } } } fn trust_metric_feedback(&self, pid: PeerId, feedback: TrustFeedback) { use TrustFeedback::{Bad, Fatal, Good, Neutral, Worse}; let peer = match self.inner.peer(&pid) { Some(p) => p, None => { error!("fatal peer {:?} not found", pid); return; } }; let peer_trust_metric = match peer.trust_metric() { Some(t) => t, None => { warn!("session peer {:?} trust metric not found", peer.id); let trust_metric = TrustMetric::new(Arc::clone(&self.config.peer_trust_config)); trust_metric.start(); peer.set_trust_metric(trust_metric.clone()); trust_metric } }; match &feedback { Fatal(reason) => { warn!("peer {:?} trust feedback fatal {}", pid, reason); if peer.tags.contains(&PeerTag::AlwaysAllow) || peer.tags.contains(&PeerTag::Consensus) { return; } let fatal_ban = self.config.peer_fatal_ban; info!("peer {:?} ban {} seconds", pid, fatal_ban.as_secs()); peer_trust_metric.pause(); if let Err(e) = peer.tags.insert_ban(fatal_ban) { warn!("ban peer {}", e); debug!("impossible, we already make sure peer isn't in allowlist"); } if let Some(session) = self.inner.remove_session(peer.session_id()) { self.disconnect_session(session.id); } peer.mark_disconnected(); } Bad(_) | Worse(_) => { match &feedback { Bad(reason) => { info!("peer {:?} trust feedback bad {}", pid, reason); peer_trust_metric.bad_events(1); } Worse(reason) => { warn!("peer {:?} trust feedback worse {}", pid, reason); peer_trust_metric.bad_events(WORSE_TRUST_SCALAR_RATIO); } _ => unreachable!(), }; if peer_trust_metric.knock_out() && !peer.tags.contains(&PeerTag::AlwaysAllow) && !peer.tags.contains(&PeerTag::Consensus) { let soft_ban = self.config.peer_soft_ban.as_secs(); info!("peer {:?} knocked out, soft ban {} seconds", pid, soft_ban); peer_trust_metric.pause(); if let Err(e) = peer.tags.insert_ban(Duration::from_secs(soft_ban)) { warn!("ban peer {}", e); debug!("impossible, we already make sure peer isn't in allowlist"); } if let Some(session) = self.inner.remove_session(peer.session_id()) { self.disconnect_session(session.id); } peer.mark_disconnected(); } } Neutral => (), Good => peer_trust_metric.good_events(1), } } fn session_blocked(&self, ctx: Arc) { warn!( "session {} blocked, pending data size {}", ctx.id, ctx.pending_data_size() ); if let Some(session) = self.inner.session(ctx.id) { session.block(); match session.peer.trust_metric() { Some(trust_metric) => trust_metric.bad_events(1), None => { warn!("session peer {:?} trust metric not found", session.peer.id); let trust_metric = TrustMetric::new(Arc::clone(&self.config.peer_trust_config)); trust_metric.start(); trust_metric.bad_events(1); session.peer.set_trust_metric(trust_metric); } }; } } fn connect_peers_now(&mut self, peers: Vec) { let peer_addrs = peers.into_iter().map(|peer| { peer.set_connectedness(Connectedness::Connecting); let addrs = peer.multiaddrs.all_raw(); self.connecting.insert(ConnectingAttempt::new(peer)); addrs }); let addrs = peer_addrs.flatten().collect(); info!("connect addrs {:?}", addrs); let connect_attempt = ConnectionEvent::Connect { addrs, proto: CoreProtocol::target(), }; if self.conn_tx.unbounded_send(connect_attempt).is_err() { error!("network: connection service exit"); } } fn connect_peers(&mut self, peers: Vec) { let connectable = |p: ArcPeer| -> Option { if p.multiaddrs.len() == 0 { log::info!("peer {:?} has no multiaddress", p.id); return None; } if self.config.allowlist_only && !p.tags.contains(&PeerTag::AlwaysAllow) && !p.tags.contains(&PeerTag::Consensus) { debug!("filter peer {:?} not in allowlist", p.id); return None; } let connectedness = p.connectedness(); if connectedness != Connectedness::CanConnect && connectedness != Connectedness::NotConnected { if connectedness == Connectedness::Unconnectable && p.tags.contains(&PeerTag::Consensus) { // For consensus peer, just try again. Some(p) } else { log::info!("peer {:?} connectedness {}", p.id, connectedness); None } } else { Some(p) } }; let connectable_peers: Vec<_> = peers.into_iter().filter_map(connectable).collect(); if !connectable_peers.is_empty() { self.connect_peers_now(connectable_peers); } } fn connect_peers_by_id(&mut self, pids: Vec) { let peers_to_connect = { let book = self.inner.peers.read(); pids.iter() .filter_map(|pid| book.get(pid).cloned()) .collect() }; log::info!("connect to peers {:?} found {:?}", pids, peers_to_connect); self.connect_peers(peers_to_connect); } fn discover_multiaddr(&mut self, addr: Multiaddr) { let peer_addr: PeerMultiaddr = match addr.try_into() { Ok(pma) => pma, _ => return, // Ignore multiaddr without peer id }; // Ignore our self if peer_addr.peer_id() == self.peer_id { return; } let peer_id = peer_addr.peer_id(); if let Some(peer) = self.inner.peer(&peer_id) { peer.multiaddrs.insert(vec![peer_addr]); } else { let new_peer = ArcPeer::new(peer_addr.peer_id()); new_peer.multiaddrs.insert(vec![peer_addr]); self.inner.add_peer(new_peer); } } fn dicover_multi_multiaddrs(&mut self, addrs: Vec) { for addr in addrs.into_iter() { self.discover_multiaddr(addr); } } fn identified_addrs(&self, pid: &PeerId, addrs: Vec) { info!("peer {:?} multi identified addrs {:?}", pid, addrs); if let Some(peer) = self.inner.peer(pid) { // Make sure all addresses include peer id let peer_addrs = addrs .into_iter() .map(|a| PeerMultiaddr::new(a, pid)) .collect(); peer.multiaddrs.insert(peer_addrs); } } fn repeated_connection(&mut self, ty: ConnectionType, sid: SessionId, addr: Multiaddr) { info!( "repeated session {:?}, ty {}, remote addr {:?}", sid, ty, addr ); let peer_id = { let opt_unidentified_session = self.unidentified_backlog.get(&sid); let opt_pid = opt_unidentified_session.map_or_else( || self.inner.session(sid).map(|s| s.peer.owned_id()), |unidentified_session| Some(unidentified_session.peer_id()), ); match opt_pid { Some(pid) => pid, None => { // Impossibl error!("repeated connection but session {} not found", sid); return; } } }; if let Some(peer) = self.inner.peer(&peer_id) { let peer_addr = PeerMultiaddr::new(addr, &peer_id); match ty { ConnectionType::Inbound => peer.multiaddrs.remove(&peer_addr), ConnectionType::Outbound => peer.multiaddrs.reset_failure(&peer_addr), } } } fn process_event(&mut self, event: PeerManagerEvent) { match event { PeerManagerEvent::ConnectPeersNow { pids } => self.connect_peers_by_id(pids), PeerManagerEvent::ConnectFailed { addr, kind } => self.connect_failed(addr, kind), PeerManagerEvent::UnidentifiedSession { pubkey, ctx, .. } => { self.new_unidentified_session(pubkey, ctx) } PeerManagerEvent::NewSession { pubkey, ctx, .. } => self.new_session(pubkey, ctx), // NOTE: Alice may disconnect to Bob, but bob didn't know // that, so the next time, Alice try to connect to Bob will // cause repeated connection. The only way to fix this right // now is wait for time out. PeerManagerEvent::RepeatedConnection { ty, sid, addr } => { self.repeated_connection(ty, sid, addr) } PeerManagerEvent::SessionBlocked { ctx, .. } => self.session_blocked(ctx), PeerManagerEvent::SessionClosed { sid, pid } => self.session_closed(pid, sid), PeerManagerEvent::SessionFailed { sid, kind } => self.session_failed(sid, kind), PeerManagerEvent::PeerAlive { pid } => self.update_peer_alive(&pid), PeerManagerEvent::Misbehave { pid, kind } => self.peer_misbehave(pid, kind), PeerManagerEvent::TrustMetric { pid, feedback } => { self.trust_metric_feedback(pid, feedback) } PeerManagerEvent::DiscoverMultiAddrs { addrs } => self.dicover_multi_multiaddrs(addrs), PeerManagerEvent::IdentifiedAddrs { pid, addrs } => self.identified_addrs(&pid, addrs), PeerManagerEvent::AddNewListenAddr { addr } => { let peer_addr = PeerMultiaddr::new(addr, &self.peer_id); self.inner.add_listen(peer_addr); } PeerManagerEvent::RemoveListenAddr { addr } => { self.inner .remove_listen(&PeerMultiaddr::new(addr, &self.peer_id)); } } } } // Save peers during shutdown impl Drop for PeerManager { fn drop(&mut self) { let peers = self.inner.package_peers(); if let Err(err) = self.peer_dat_file.save(peers) { error!("network: peer dat file: {}", err); } } } impl Future for PeerManager { type Output = (); fn poll(mut self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll { self.hb_waker.register(ctx.waker()); // Spawn heart beat if let Some(heart_beat) = self.heart_beat.take() { tokio::spawn(heart_beat); } // Process unidentified sessions let unidentified_sessions = self.unidentified_backlog.drain().collect::>(); for mut session in unidentified_sessions { let peer_id = session.event.pubkey.peer_id(); let ident_fut = &mut session.ident_fut; futures::pin_mut!(ident_fut); match ident_fut.poll(ctx) { Poll::Pending => { if session.connected_at.elapsed() >= identify::DEFAULT_TIMEOUT { warn!("reject peer {:?} due to identification timeout", peer_id); self.disconnect_session(session.event.ctx.id); if let Some(peer) = self.inner.peer(&peer_id) { peer.mark_disconnected(); } } else { self.unidentified_backlog.insert(session); } } Poll::Ready(ret) => match ret { Ok(()) => { let UnidentifiedSession { event, .. } = session; let new_session_event = PeerManagerEvent::NewSession { pid: event.pubkey.peer_id(), pubkey: event.pubkey, ctx: event.ctx, }; // TODO: Remove duplicate diag code #[cfg(feature = "diagnostic")] let diag_event: Option< diagnostic::DiagnosticEvent, > = From::from(&new_session_event); self.process_event(new_session_event); #[cfg(feature = "diagnostic")] if let (Some(hook), Some(event)) = (self.diagnostic_hook.as_ref(), diag_event) { hook(event) } } Err(err) => { warn!( "reject peer {:?} due to identification failed: {}", peer_id, err ); self.disconnect_session(session.event.ctx.id); if let Some(peer) = self.inner.peer(&peer_id) { peer.mark_disconnected(); } } }, } } common_apm::metrics::network::NETWORK_UNIDENTIFIED_CONNECTIONS .set(self.unidentified_backlog.len() as i64); // Process manager events loop { let event_rx = &mut self.as_mut().event_rx; futures::pin_mut!(event_rx); // service ready in common let event = crate::service_ready!("peer manager", event_rx.poll_next(ctx)); log::debug!("network: {:?}: event {}", self.peer_id, event); #[cfg(feature = "diagnostic")] let diag_event: Option = From::from(&event); self.process_event(event); #[cfg(feature = "diagnostic")] if let (Some(hook), Some(event)) = (self.diagnostic_hook.as_ref(), diag_event) { hook(event) } } // Check connecting timeout let timeout_reason = format!("exceed {} seconds", MAX_CONNECTING_TIMEOUT.as_secs()); let timeouted_mutiaddrs = { let connecting_attempts = self.connecting.iter(); let timeouted_attempts = connecting_attempts.filter_map(|attempt| { if !attempt.is_timeout() { return None; } Some(attempt.multiaddrs.iter().cloned().collect::>()) }); timeouted_attempts.flatten().collect::>() }; if !timeouted_mutiaddrs.is_empty() { log::info!("timeouted connecting found: {:?}", timeouted_mutiaddrs); } for peer_multiaddr in timeouted_mutiaddrs { self.connect_failed( Into::::into(peer_multiaddr), ConnectionErrorKind::TimeOut(timeout_reason.clone()), ) } common_apm::metrics::network::NETWORK_OUTBOUND_CONNECTING_PEERS .set(self.connecting.len() as i64); // Check connecting count let connected_count = self.inner.connected(); let outbound_count = self.inner.outbound_count(); let connection_attempts = outbound_count + self.connecting.len(); let max_connection_attempts = self.config.outbound_conn_limit + MAX_CONNECTING_MARGIN; if connected_count < self.config.max_connections && outbound_count < self.config.outbound_conn_limit && connection_attempts < max_connection_attempts { let filter_good_peer = |peer: &ArcPeer| -> bool { if let Some(trust_metric) = peer.trust_metric() { trust_metric.trust_score() > GOOD_TRUST_SCORE } else { false } }; let just_enough = |_: &ArcPeer| -> bool { true }; let remain_count = max_connection_attempts - connection_attempts; let mut connectable_peers = self.inner.connectable_peers(remain_count, filter_good_peer); if connectable_peers.is_empty() { connectable_peers = self.inner.connectable_peers(remain_count, just_enough); } let candidate_count = connectable_peers.len(); debug!( "network: {:?}: connections not fullfill, {} candidate peers found", self.peer_id, candidate_count ); if !connectable_peers.is_empty() { self.connect_peers(connectable_peers); common_apm::metrics::network::NETWORK_OUTBOUND_CONNECTING_PEERS .set(self.connecting.len() as i64); } } Poll::Pending } } ================================================ FILE: core/network/src/peer_manager/peer.rs ================================================ use super::{time, PeerAddrSet, Retry, Tags, TrustMetric, MAX_RETRY_COUNT}; use std::{ borrow::Borrow, fmt, hash::{Hash, Hasher}, ops::Deref, sync::{ atomic::{AtomicU64, AtomicUsize, Ordering}, Arc, }, time::{Duration, SystemTime, UNIX_EPOCH}, }; use derive_more::Display; use parking_lot::RwLock; use protocol::traits::PeerTag; use tentacle::{ secio::{PeerId, PublicKey}, SessionId, }; use crate::error::ErrorKind; #[derive(Debug, Eq, PartialEq, Ord, PartialOrd, Clone, Copy, Display)] #[repr(usize)] pub enum Connectedness { #[display(fmt = "not connected")] NotConnected = 0, #[display(fmt = "can connect")] CanConnect = 1, #[display(fmt = "connected")] Connected = 2, #[display(fmt = "unconnectable")] Unconnectable = 3, #[display(fmt = "connecting")] Connecting = 4, } impl From for Connectedness { fn from(src: usize) -> Connectedness { use self::Connectedness::{CanConnect, Connected, Connecting, NotConnected, Unconnectable}; match src { 0 => NotConnected, 1 => CanConnect, 2 => Connected, 3 => Unconnectable, 4 => Connecting, _ => NotConnected, } } } impl From for usize { fn from(src: Connectedness) -> usize { src as usize } } #[derive(Debug)] pub struct Peer { pub id: PeerId, pub multiaddrs: PeerAddrSet, pub retry: Retry, pub tags: Tags, pubkey: RwLock>, trust_metric: RwLock>, connectedness: AtomicUsize, session_id: AtomicUsize, connected_at: AtomicU64, disconnected_at: AtomicU64, alive: AtomicU64, } impl Peer { pub fn new(peer_id: PeerId) -> Self { Peer { id: peer_id.clone(), multiaddrs: PeerAddrSet::new(peer_id), retry: Retry::new(MAX_RETRY_COUNT), tags: Tags::default(), pubkey: RwLock::new(None), trust_metric: RwLock::new(None), connectedness: AtomicUsize::new(Connectedness::NotConnected as usize), session_id: AtomicUsize::new(0), connected_at: AtomicU64::new(0), disconnected_at: AtomicU64::new(0), alive: AtomicU64::new(0), } } pub fn from_pubkey(pubkey: PublicKey) -> Result { let peer = Peer::new(pubkey.peer_id()); peer.set_pubkey(pubkey)?; Ok(peer) } pub fn owned_id(&self) -> PeerId { self.id.to_owned() } pub fn has_pubkey(&self) -> bool { self.pubkey.read().is_some() } pub fn owned_pubkey(&self) -> Option { self.pubkey.read().clone() } pub fn set_pubkey(&self, pubkey: PublicKey) -> Result<(), ErrorKind> { if pubkey.peer_id() != self.id { Err(ErrorKind::PublicKeyNotMatchId { pubkey, id: self.id.clone(), }) } else { *self.pubkey.write() = Some(pubkey); Ok(()) } } pub fn trust_metric(&self) -> Option { self.trust_metric.read().clone() } pub fn set_trust_metric(&self, metric: TrustMetric) { *self.trust_metric.write() = Some(metric); } #[cfg(test)] pub fn remove_trust_metric(&self) { *self.trust_metric.write() = None; } pub fn connectedness(&self) -> Connectedness { Connectedness::from(self.connectedness.load(Ordering::SeqCst)) } pub fn set_connectedness(&self, flag: Connectedness) { self.connectedness .store(usize::from(flag), Ordering::SeqCst); } pub fn set_session_id(&self, sid: SessionId) { self.session_id.store(sid.value(), Ordering::SeqCst); } pub fn session_id(&self) -> SessionId { self.session_id.load(Ordering::SeqCst).into() } pub fn connected_at(&self) -> u64 { self.connected_at.load(Ordering::SeqCst) } pub(super) fn set_connected_at(&self, at: u64) { self.connected_at.store(at, Ordering::SeqCst); } pub fn disconnected_at(&self) -> u64 { self.disconnected_at.load(Ordering::SeqCst) } pub(super) fn set_disconnected_at(&self, at: u64) { self.disconnected_at.store(at, Ordering::SeqCst); } pub fn alive(&self) -> u64 { self.alive.load(Ordering::SeqCst) } pub fn update_alive(&self) { let connected_at = UNIX_EPOCH + Duration::from_secs(self.connected_at.load(Ordering::SeqCst)); let alive = time::duration_since(SystemTime::now(), connected_at).as_secs(); self.alive.store(alive, Ordering::SeqCst); } pub(super) fn set_alive(&self, live: u64) { self.alive.store(live, Ordering::SeqCst); } pub fn mark_connected(&self, sid: SessionId) { self.set_connectedness(Connectedness::Connected); self.set_session_id(sid); self.retry.reset(); self.update_connected(); } pub fn mark_disconnected(&self) { self.set_connectedness(Connectedness::CanConnect); self.set_session_id(0.into()); self.update_disconnected(); self.update_alive(); } pub fn banned(&self) -> bool { if let Some(until) = self.tags.get_banned_until() { if time::now() < until { return true; } self.tags.remove(&PeerTag::ban_key()); if let Some(trust_metric) = self.trust_metric() { // TODO: Reset just in case, may remove in // the future. trust_metric.reset_history(); } } false } fn update_connected(&self) { self.connected_at.store(time::now(), Ordering::SeqCst); } fn update_disconnected(&self) { self.disconnected_at.store(time::now(), Ordering::SeqCst); } } impl fmt::Display for Peer { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "{:?} multiaddr {:?} tags {:?} last connected at {} alive {} retry {} current {}", self.id, self.multiaddrs.all(), self.tags, self.connected_at.load(Ordering::SeqCst), self.alive.load(Ordering::SeqCst), self.retry.count(), Connectedness::from(self.connectedness.load(Ordering::SeqCst)) ) } } #[derive(Debug, Display, Clone)] #[display(fmt = "{}", _0)] pub struct ArcPeer(Arc); impl ArcPeer { pub fn new(peer_id: PeerId) -> Self { ArcPeer(Arc::new(Peer::new(peer_id))) } pub fn from_pubkey(pubkey: PublicKey) -> Result { Ok(ArcPeer(Arc::new(Peer::from_pubkey(pubkey)?))) } } impl Deref for ArcPeer { type Target = Peer; fn deref(&self) -> &Self::Target { &self.0 } } impl Borrow for ArcPeer { fn borrow(&self) -> &PeerId { &self.id } } impl PartialEq for ArcPeer { fn eq(&self, other: &ArcPeer) -> bool { self.id == other.id } } impl Eq for ArcPeer {} impl Hash for ArcPeer { fn hash(&self, state: &mut H) { self.id.hash(state) } } #[cfg(test)] mod tests { use super::{ArcPeer, Connectedness}; use crate::peer_manager::{time, TrustMetric, TrustMetricConfig}; use tentacle::secio::SecioKeyPair; use std::sync::Arc; #[test] fn should_reset_trust_metric_history_after_unban() { let keypair = SecioKeyPair::secp256k1_generated(); let pubkey = keypair.public_key(); let peer = ArcPeer::from_pubkey(pubkey).expect("make peer"); let peer_trust_config = Arc::new(TrustMetricConfig::default()); let trust_metric = TrustMetric::new(Arc::clone(&peer_trust_config)); peer.set_trust_metric(trust_metric.clone()); for _ in 0..2 { trust_metric.bad_events(10); trust_metric.enter_new_interval(); } assert!(trust_metric.trust_score() < 40, "should lower score"); peer.tags.set_ban_until(time::now() - 20); assert!(!peer.banned(), "should unban"); assert_eq!( trust_metric.intervals(), 0, "should reset peer trust history" ); } #[test] fn should_be_able_to_convert_between_connectedness_and_usize() { assert_eq!(usize::from(Connectedness::NotConnected), 0usize); assert_eq!(usize::from(Connectedness::CanConnect), 1usize); assert_eq!(usize::from(Connectedness::Connected), 2usize); assert_eq!(usize::from(Connectedness::Unconnectable), 3usize); assert_eq!(usize::from(Connectedness::Connecting), 4usize); assert_eq!(Connectedness::from(0usize), Connectedness::NotConnected); assert_eq!(Connectedness::from(1usize), Connectedness::CanConnect); assert_eq!(Connectedness::from(2usize), Connectedness::Connected); assert_eq!(Connectedness::from(3usize), Connectedness::Unconnectable); assert_eq!(Connectedness::from(4usize), Connectedness::Connecting); assert_eq!(Connectedness::from(5usize), Connectedness::NotConnected); } } ================================================ FILE: core/network/src/peer_manager/retry.rs ================================================ use super::{time, BACKOFF_BASE, MAX_RETRY_INTERVAL}; use std::sync::{ atomic::{AtomicU64, AtomicU8, Ordering}, Arc, }; use std::time::Duration; #[derive(Debug, Clone)] pub struct Retry { max: u8, count: Arc, next_attempt_at: Arc, } impl Retry { pub fn new(max: u8) -> Self { Retry { max, count: Arc::new(AtomicU8::new(0)), next_attempt_at: Arc::new(AtomicU64::new(0)), } } pub fn inc(&self) { let count = self.count.fetch_add(1, Ordering::SeqCst).saturating_add(1); let mut secs = BACKOFF_BASE.pow(count as u32); if secs > MAX_RETRY_INTERVAL { secs = MAX_RETRY_INTERVAL; } let at = time::now().saturating_add(secs); self.next_attempt_at.store(at, Ordering::SeqCst); } pub fn eta(&self) -> u64 { let next_attempt_at = self.next_attempt_at.load(Ordering::SeqCst); next_attempt_at.saturating_sub(time::now()) } pub fn reset(&self) { self.count.store(0, Ordering::SeqCst); } pub fn ready(&self) -> bool { let next_attempt_at = Duration::from_secs(self.next_attempt_at.load(Ordering::SeqCst)); time::now() > next_attempt_at.as_secs() } pub fn count(&self) -> u8 { self.count.load(Ordering::SeqCst) } pub fn next_attempt_at(&self) -> u64 { self.next_attempt_at.load(Ordering::SeqCst) } pub fn run_out(&self) -> bool { self.count() > self.max } // For test and save_restore pub(crate) fn set_next_attempt_at(&self, at: u64) { self.next_attempt_at.store(at, Ordering::SeqCst); } // For test and save_restore pub(crate) fn set(&self, n: u8) { self.count.store(n, Ordering::SeqCst); } } ================================================ FILE: core/network/src/peer_manager/save_restore.rs ================================================ use super::{ArcPeer, Connectedness, PeerMultiaddr}; use std::{ convert::TryFrom, fmt, fs::File, io::{BufReader, Read, Write}, path::{Path, PathBuf}, }; use serde::{de, ser}; use serde_derive::{Deserialize, Serialize}; use tentacle::{ multiaddr::Multiaddr, secio::{PeerId, PublicKey}, }; use crate::error::NetworkError; // TODO: remove skip tag on retry and next_attempt_at // TODO: save multiaddr failure count #[derive(Debug, Serialize, Deserialize)] struct SerdePeer { id: SerdePeerId, pubkey: Option, multiaddrs: Vec, connectedness: usize, #[serde(skip)] retry: u8, #[serde(skip)] next_attempt_at: u64, connected_at: u64, disconnected_at: u64, alive: u64, } impl From for SerdePeer { fn from(peer: ArcPeer) -> SerdePeer { let connectedness = match peer.connectedness() { Connectedness::Unconnectable => Connectedness::Unconnectable, _ => Connectedness::CanConnect, }; SerdePeer { id: SerdePeerId(peer.owned_id()), pubkey: peer.owned_pubkey().map(SerdePubKey), multiaddrs: peer.multiaddrs.all(), connectedness: connectedness as usize, retry: peer.retry.count(), next_attempt_at: peer.retry.next_attempt_at(), connected_at: peer.connected_at(), disconnected_at: peer.disconnected_at(), alive: peer.alive(), } } } impl TryFrom for ArcPeer { type Error = NetworkError; fn try_from(serde_peer: SerdePeer) -> Result { let peer_id = serde_peer.id.0; let peer = ArcPeer::new(peer_id.clone()); if let Some(pubkey) = serde_peer.pubkey { peer.set_pubkey(pubkey.0)?; } let multiaddrs = serde_peer .multiaddrs .into_iter() .map(|ma| { // Just ensure that our recovered multiaddr has id let ma: Multiaddr = ma.into(); PeerMultiaddr::new(ma, &peer_id) }) .collect(); peer.multiaddrs.set(multiaddrs); peer.set_connectedness(Connectedness::from(serde_peer.connectedness)); peer.retry.set(serde_peer.retry); peer.retry.set_next_attempt_at(serde_peer.next_attempt_at); peer.set_connected_at(serde_peer.connected_at); peer.set_disconnected_at(serde_peer.disconnected_at); peer.set_alive(serde_peer.alive); Ok(peer) } } // TODO: Async support, right now, it's ok since we only restore/save data once. pub(super) trait SaveRestore: Send + Sync { fn save(&self, peers: Vec) -> Result<(), NetworkError>; fn restore(&self) -> Result, NetworkError>; } #[derive(Clone)] pub(super) struct PeerDatFile { path: PathBuf, } impl PeerDatFile { pub fn new>(path: P) -> Self { PeerDatFile { path: path.as_ref().to_owned(), } } } impl SaveRestore for PeerDatFile { fn save(&self, peers: Vec) -> Result<(), NetworkError> { let mut file = File::create(&self.path)?; let peers_to_save = peers.into_iter().map(SerdePeer::from).collect::>(); let data = bincode::serialize(&peers_to_save)?; file.write_all(data.as_slice())?; Ok(()) } // restore data only happen once during network service starting fn restore(&self) -> Result, NetworkError> { let file = File::open(&self.path)?; let mut buf_reader = BufReader::new(file); let mut data = Vec::new(); buf_reader.read_to_end(&mut data)?; let peers_to_restore: Vec = bincode::deserialize(&data)?; let mut peers = Vec::with_capacity(peers_to_restore.len()); for p in peers_to_restore { if let Ok(p) = ArcPeer::try_from(p) { peers.push(p); } } Ok(peers) } } #[derive(Clone)] pub(super) struct NoPeerDatFile; impl SaveRestore for NoPeerDatFile { fn save(&self, _peers: Vec) -> Result<(), NetworkError> { Ok(()) } fn restore(&self) -> Result, NetworkError> { Ok(vec![]) } } #[derive(Debug, PartialEq, Eq, Hash)] pub struct SerdePubKey(PublicKey); impl ser::Serialize for SerdePubKey { fn serialize(&self, serializer: S) -> Result where S: ser::Serializer, { serializer.serialize_bytes(self.0.clone().encode().as_ref()) } } impl<'de> de::Deserialize<'de> for SerdePubKey { fn deserialize(deserializer: D) -> Result where D: de::Deserializer<'de>, { struct Visitor; impl<'de> de::Visitor<'de> for Visitor { type Value = SerdePubKey; fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter.write_str("peer pubkey") } fn visit_seq>(self, mut seq: A) -> Result { let mut buf: Vec = Vec::with_capacity(seq.size_hint().unwrap_or(0)); while let Some(val) = seq.next_element()? { buf.push(val); } self.visit_byte_buf(buf) } fn visit_byte_buf(self, v: Vec) -> Result { self.visit_bytes(v.as_slice()) } fn visit_bytes(self, v: &[u8]) -> Result { PublicKey::decode(v) .ok_or_else(|| de::Error::custom("not valid public key")) .map(SerdePubKey) } } deserializer.deserialize_bytes(Visitor) } } #[derive(Debug, PartialEq, Eq, Hash)] pub struct SerdePeerId(PeerId); impl ser::Serialize for SerdePeerId { fn serialize(&self, serializer: S) -> Result where S: ser::Serializer, { serializer.serialize_bytes(self.0.as_bytes()) } } impl<'de> de::Deserialize<'de> for SerdePeerId { fn deserialize(deserializer: D) -> Result where D: de::Deserializer<'de>, { struct Visitor; impl<'de> de::Visitor<'de> for Visitor { type Value = SerdePeerId; fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter.write_str("peer pubkey") } fn visit_seq>(self, mut seq: A) -> Result { let mut buf: Vec = Vec::with_capacity(seq.size_hint().unwrap_or(0)); while let Some(val) = seq.next_element()? { buf.push(val); } self.visit_byte_buf(buf) } fn visit_byte_buf(self, v: Vec) -> Result { self.visit_bytes(v.as_slice()) } fn visit_bytes(self, v: &[u8]) -> Result { PeerId::from_bytes(v.to_vec()) .map_err(|_| de::Error::custom("not valid peer id")) .map(SerdePeerId) } } deserializer.deserialize_bytes(Visitor) } } ================================================ FILE: core/network/src/peer_manager/session_book.rs ================================================ use std::borrow::Borrow; use std::collections::{HashMap, HashSet}; use std::hash::{Hash, Hasher}; use std::ops::Deref; use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use std::sync::Arc; use derive_more::Display; use parking_lot::RwLock; use tentacle::service::SessionType; use tentacle::SessionId; use super::{ArcPeer, PeerManagerConfig}; use crate::common::ConnectedAddr; use crate::config::{ DEFAULT_INBOUND_CONN_LIMIT, DEFAULT_MAX_CONNECTIONS, DEFAULT_SAME_IP_CONN_LIMIT, }; #[cfg(test)] pub use crate::test::mock::SessionContext; #[cfg(not(test))] pub use tentacle::context::SessionContext; type Host = String; type Count = usize; #[derive(Debug, Display, PartialEq, Eq)] pub enum Error { #[display(fmt = "reach same ip connections limit")] ReachSameIPConnLimit, #[display(fmt = "reach inbound connections limit")] ReachInboundConnLimit, #[display(fmt = "reach outbound connections limit")] ReachOutboundConnLimit, } #[derive(Debug)] pub struct Config { same_ip_conn_limit: usize, inbound_conn_limit: usize, outbound_conn_limit: usize, } impl Default for Config { fn default() -> Self { Config { same_ip_conn_limit: DEFAULT_SAME_IP_CONN_LIMIT, inbound_conn_limit: DEFAULT_INBOUND_CONN_LIMIT, outbound_conn_limit: DEFAULT_MAX_CONNECTIONS - DEFAULT_INBOUND_CONN_LIMIT, } } } impl From<&PeerManagerConfig> for Config { fn from(config: &PeerManagerConfig) -> Config { Config { same_ip_conn_limit: config.same_ip_conn_limit, inbound_conn_limit: config.inbound_conn_limit, outbound_conn_limit: config.outbound_conn_limit, } } } #[derive(Debug)] pub struct Session { pub(crate) id: SessionId, pub(crate) ctx: Arc, pub(crate) peer: ArcPeer, blocked: AtomicBool, pub(crate) connected_addr: ConnectedAddr, } #[derive(Debug, Clone)] pub struct ArcSession(Arc); impl ArcSession { pub fn new(peer: ArcPeer, ctx: Arc) -> Self { let connected_addr = ConnectedAddr::from(&ctx.address); let session = Session { id: ctx.id, ctx, peer, blocked: AtomicBool::new(false), connected_addr, }; ArcSession(Arc::new(session)) } pub fn ty(&self) -> SessionType { self.ctx.ty } pub fn block(&self) { self.blocked.store(true, Ordering::SeqCst); } pub fn is_blocked(&self) -> bool { self.blocked.load(Ordering::SeqCst) } pub fn unblock(&self) { self.blocked.store(false, Ordering::SeqCst); } } impl Borrow for ArcSession { fn borrow(&self) -> &SessionId { &self.id } } impl PartialEq for ArcSession { fn eq(&self, other: &ArcSession) -> bool { self.id == other.id } } impl Eq for ArcSession {} impl Hash for ArcSession { fn hash(&self, state: &mut H) { self.id.hash(state) } } impl Deref for ArcSession { type Target = Session; fn deref(&self) -> &Self::Target { &self.0 } } pub struct AcceptableSession(pub ArcSession); pub struct SessionBook { config: Config, hosts: RwLock>, sessions: RwLock>, inbound_count: AtomicUsize, outbound_count: AtomicUsize, } impl Default for SessionBook { fn default() -> SessionBook { let config = Config::default(); SessionBook::new(config) } } impl SessionBook { pub fn new(config: Config) -> Self { SessionBook { config, hosts: Default::default(), sessions: Default::default(), inbound_count: AtomicUsize::new(0), outbound_count: AtomicUsize::new(0), } } pub fn len(&self) -> usize { self.sessions.read().len() } pub fn get(&self, sid: &SessionId) -> Option { self.sessions.read().get(sid).cloned() } pub fn all(&self) -> Vec { self.sessions.read().iter().cloned().collect() } pub fn iter_fn(&self, f: F) -> R where F: for<'a> FnOnce(&mut dyn Iterator) -> R, { let sessions = self.sessions.read(); f(&mut sessions.iter()) } pub fn inbound_count(&self) -> usize { self.inbound_count.load(Ordering::SeqCst) } pub fn outbound_count(&self) -> usize { self.outbound_count.load(Ordering::SeqCst) } pub fn acceptable(&self, session: &ArcSession) -> Result<(), self::Error> { let session_host = &session.connected_addr.host; let host_count = { let hosts = self.hosts.read(); hosts.get(session_host).cloned().unwrap_or(0) }; if host_count == usize::MAX || host_count + 1 > self.config.same_ip_conn_limit { return Err(self::Error::ReachSameIPConnLimit); } match session.ty() { SessionType::Inbound if self.inbound_count() >= self.config.inbound_conn_limit => { Err(self::Error::ReachInboundConnLimit) } SessionType::Outbound if self.outbound_count() >= self.config.outbound_conn_limit => { Err(self::Error::ReachOutboundConnLimit) } _ => Ok(()), } } pub fn insert(&self, AcceptableSession(session): AcceptableSession) { let session_host = &session.connected_addr.host; let mut hosts = self.hosts.write(); hosts .entry(session_host.to_owned()) .and_modify(|c| *c += 1) .or_insert(1); match session.ty() { SessionType::Inbound => self.inbound_count.fetch_add(1, Ordering::SeqCst), SessionType::Outbound => self.outbound_count.fetch_add(1, Ordering::SeqCst), }; self.sessions.write().insert(session); } pub fn remove(&self, sid: &SessionId) -> Option { let session = self.sessions.write().take(sid); if let Some(connected_addr) = session.as_ref().map(|s| &s.connected_addr) { let session_host = &connected_addr.host; let mut hosts = self.hosts.write(); if hosts.get(session_host) == Some(&1) { hosts.remove(session_host); } else if let Some(count) = hosts.get_mut(session_host) { *count -= 1; } } if let Some(ty) = session.as_ref().map(|s| s.ty()) { match ty { SessionType::Inbound => self.inbound_count.fetch_sub(1, Ordering::SeqCst), SessionType::Outbound => self.outbound_count.fetch_sub(1, Ordering::SeqCst), }; } session } } #[cfg(test)] mod tests { use std::convert::TryInto; use std::sync::Arc; use tentacle::multiaddr::Multiaddr; use tentacle::secio::{PeerId, SecioKeyPair}; use tentacle::service::SessionType; use tentacle::SessionId; use super::{AcceptableSession, ArcSession, Config, Error, SessionBook}; use crate::peer_manager::{ArcPeer, PeerMultiaddr}; use crate::test::mock::SessionContext; use crate::traits::MultiaddrExt; fn make_multiaddr(port: u16, id: Option) -> Multiaddr { let mut multiaddr = format!("/ip4/127.0.0.1/tcp/{}", port) .parse::() .expect("peer multiaddr"); if let Some(id) = id { multiaddr.push_id(id); } multiaddr } fn make_peer_multiaddr(port: u16, id: PeerId) -> PeerMultiaddr { make_multiaddr(port, Some(id)) .try_into() .expect("try into peer multiaddr") } fn make_peer(port: u16) -> ArcPeer { let keypair = SecioKeyPair::secp256k1_generated(); let pubkey = keypair.public_key(); let peer_id = pubkey.peer_id(); let peer = ArcPeer::from_pubkey(pubkey).expect("make peer"); let multiaddr = make_peer_multiaddr(port, peer_id); peer.multiaddrs.set(vec![multiaddr]); peer } fn make_session(port: u16, sid: SessionId, ty: SessionType) -> ArcSession { let peer = make_peer(port); let multiaddr = peer.multiaddrs.all_raw().pop().unwrap(); let ctx = SessionContext::make(sid, multiaddr, ty, peer.owned_pubkey().unwrap()); ArcSession::new(peer, Arc::new(ctx)) } #[test] fn should_reject_session_when_reach_same_ip_conn_limit() { let config = Config { same_ip_conn_limit: 1, inbound_conn_limit: 20, outbound_conn_limit: 20, }; let book = SessionBook::new(config); let session = make_session(100, 1.into(), SessionType::Inbound); assert!(book.acceptable(&session).is_ok()); book.insert(AcceptableSession(session.clone())); assert_eq!( book.hosts.read().get(&session.connected_addr.host), Some(&1) ); let same_ip_session = make_session(101, 2.into(), SessionType::Inbound); assert_eq!( book.acceptable(&same_ip_session), Err(Error::ReachSameIPConnLimit) ); } #[test] fn should_reduce_host_count() { let config = Config { same_ip_conn_limit: 5, inbound_conn_limit: 20, outbound_conn_limit: 20, }; let book = SessionBook::new(config); let session = make_session(100, 1.into(), SessionType::Inbound); assert!(book.acceptable(&session).is_ok()); book.insert(AcceptableSession(session.clone())); assert_eq!( book.hosts.read().get(&session.connected_addr.host), Some(&1) ); book.remove(&(1.into())); assert_eq!(book.hosts.read().get(&session.connected_addr.host), None); } #[test] fn should_reject_inbound_session_when_reach_inbound_limit() { let config = Config { same_ip_conn_limit: 5, inbound_conn_limit: 1, outbound_conn_limit: 20, }; let book = SessionBook::new(config); let session = make_session(100, 1.into(), SessionType::Inbound); assert!(book.acceptable(&session).is_ok()); book.insert(AcceptableSession(session.clone())); assert_eq!( book.hosts.read().get(&session.connected_addr.host), Some(&1) ); assert_eq!(book.inbound_count(), 1); let same_ip_session = make_session(101, 2.into(), SessionType::Inbound); assert_eq!( book.acceptable(&same_ip_session), Err(Error::ReachInboundConnLimit) ); } #[test] fn should_reject_outbound_session_when_reach_outbound_limit() { let config = Config { same_ip_conn_limit: 5, inbound_conn_limit: 10, outbound_conn_limit: 1, }; let book = SessionBook::new(config); let session = make_session(100, 1.into(), SessionType::Outbound); assert!(book.acceptable(&session).is_ok()); book.insert(AcceptableSession(session.clone())); assert_eq!( book.hosts.read().get(&session.connected_addr.host), Some(&1) ); assert_eq!(book.outbound_count(), 1); let same_ip_session = make_session(101, 2.into(), SessionType::Outbound); assert_eq!( book.acceptable(&same_ip_session), Err(Error::ReachOutboundConnLimit) ); } } ================================================ FILE: core/network/src/peer_manager/shared.rs ================================================ use std::sync::Arc; use log::debug; use protocol::traits::PeerTag; use tentacle::secio::PeerId; use tentacle::SessionId; use super::{Connectedness, Inner}; use crate::common::ConnectedAddr; use crate::peer_manager::SessionBook; use crate::traits::SharedSessionBook; use crate::NetworkConfig; pub struct Config { pub max_stream_window_size: usize, pub write_timeout: u64, } // TODO: checkout max_frame_length impl From<&NetworkConfig> for Config { fn from(config: &NetworkConfig) -> Self { Config { write_timeout: config.write_timeout, max_stream_window_size: config.max_frame_length, } } } #[derive(Clone)] pub struct SharedSessions { inner: Arc, config: Arc, } impl SharedSessions { pub(super) fn new(inner: Arc, config: Config) -> Self { SharedSessions { inner, config: Arc::new(config), } } fn sessions(&self) -> &SessionBook { &self.inner.sessions } } impl SharedSessionBook for SharedSessions { fn all_sendable(&self) -> Vec { self.sessions().iter_fn(|iter| { iter.filter_map(|s| if !s.is_blocked() { Some(s.id) } else { None }) .collect() }) } fn all_blocked(&self) -> Vec { self.sessions().iter_fn(|iter| { iter.filter_map(|s| if s.is_blocked() { Some(s.id) } else { None }) .collect() }) } fn refresh_blocked(&self) { let all_blocked = self .sessions() .iter_fn(|iter| iter.filter(|s| s.is_blocked()).cloned().collect::>()); for session in all_blocked { let pending_data_size = session.ctx.pending_data_size(); // FIXME: multi streams let estimated_time = (pending_data_size / self.config.max_stream_window_size) as u64; if estimated_time < self.config.write_timeout { debug!("unblock session {}", session.id); session.unblock() } } } fn peers(&self, pids: Vec) -> (Vec, Vec) { let mut connected = Vec::new(); let mut unconnected = Vec::new(); for peer_id in pids { match self.inner.peer(&peer_id) { Some(peer) if peer.connectedness() == Connectedness::Connected => { connected.push(peer.session_id()) } _ => unconnected.push(peer_id), } } (connected, unconnected) } fn all(&self) -> Vec { self.sessions().iter_fn(|iter| iter.map(|s| s.id).collect()) } fn connected_addr(&self, sid: SessionId) -> Option { self.sessions() .get(&sid) .map(|s| s.connected_addr.to_owned()) } fn pending_data_size(&self, sid: SessionId) -> usize { self.sessions() .get(&sid) .map(|s| s.ctx.pending_data_size()) .unwrap_or_else(|| 0) } fn allowlist(&self) -> Vec { self.sessions().iter_fn(|iter| { iter.filter_map(|s| { if s.peer.tags.contains(&PeerTag::AlwaysAllow) { Some(s.peer.id.to_owned()) } else { None } }) .collect() }) } fn len(&self) -> usize { self.sessions().len() } } #[cfg(test)] mod tests { use super::{Config, SharedSessionBook, SharedSessions}; use crate::peer_manager::{Inner, SessionBook}; use tentacle::secio::SecioKeyPair; use std::sync::Arc; #[test] fn should_return_unconnected_peer_ids() { let sess_conf = Config { max_stream_window_size: 10, write_timeout: 10, }; let keypair = SecioKeyPair::secp256k1_generated(); let pubkey = keypair.public_key(); let self_peer_id = pubkey.peer_id(); let inner = Arc::new(Inner::new(self_peer_id, SessionBook::default())); let sessions = SharedSessions::new(Arc::clone(&inner), sess_conf); let keypair = SecioKeyPair::secp256k1_generated(); let pubkey = keypair.public_key(); let peer_id = pubkey.peer_id(); assert!(inner.peer(&peer_id).is_none(), "should not be registered"); let (_, unconnected) = sessions.peers(vec![peer_id.clone()]); assert!(unconnected.contains(&peer_id)); } } ================================================ FILE: core/network/src/peer_manager/tags.rs ================================================ use super::time; use crate::error::NetworkError; use derive_more::Display; use parking_lot::RwLock; use protocol::traits::PeerTag; use std::{collections::HashSet, time::Duration}; #[derive(Debug, Display, PartialEq, Eq)] pub enum TagError { #[display(fmt = "cannot ban always allowed or consensus peer")] AlwaysAllow, } impl std::error::Error for TagError {} impl From for NetworkError { fn from(err: TagError) -> NetworkError { NetworkError::Internal(Box::new(err)) } } #[derive(Debug)] pub struct Tags(RwLock>); impl Default for Tags { fn default() -> Self { Tags(Default::default()) } } impl Tags { pub fn get_banned_until(&self) -> Option { let opt_banned = { self.0.read().get(&PeerTag::ban_key()).cloned() }; if let Some(PeerTag::Ban { until }) = opt_banned { Some(until) } else { None } } pub fn insert_ban(&self, timeout: Duration) -> Result<(), TagError> { let until = Duration::from_secs(time::now()) + timeout; self.insert(PeerTag::ban(until.as_secs())) } #[cfg(test)] pub fn set_ban_until(&self, until: u64) { self.0.write().insert(PeerTag::ban(until)); } pub fn insert(&self, tag: PeerTag) -> Result<(), TagError> { if let PeerTag::Ban { .. } = tag { if self.contains(&PeerTag::Consensus) || self.contains(&PeerTag::AlwaysAllow) { return Err(TagError::AlwaysAllow); } } self.0.write().insert(tag); Ok(()) } pub fn remove(&self, tag: &PeerTag) { self.0.write().remove(&tag); } pub fn contains(&self, tag: &PeerTag) -> bool { self.0.read().contains(tag) } } ================================================ FILE: core/network/src/peer_manager/test_manager.rs ================================================ #![allow(clippy::needless_collect)] use super::{ time, ArcPeer, Connectedness, ConnectingAttempt, Inner, MisbehaviorKind, PeerManager, PeerManagerConfig, PeerMultiaddr, TrustMetric, TrustMetricConfig, GOOD_TRUST_SCORE, MAX_CONNECTING_MARGIN, MAX_CONNECTING_TIMEOUT, MAX_RANDOM_NEXT_RETRY, MAX_RETRY_COUNT, REPEATED_CONNECTION_TIMEOUT, SAME_IP_LIMIT_BAN, SHORT_ALIVE_SESSION, }; use crate::{ common::ConnectedAddr, event::{ ConnectionErrorKind, ConnectionEvent, ConnectionType, PeerManagerEvent, SessionErrorKind, }, test::mock::SessionContext, traits::MultiaddrExt, }; use futures::{ channel::mpsc::{unbounded, UnboundedReceiver, UnboundedSender}, StreamExt, }; use protocol::traits::{PeerTag, TrustFeedback}; use tentacle::{ multiaddr::Multiaddr, secio::{PeerId, PublicKey, SecioKeyPair}, service::SessionType, SessionId, }; use std::{ borrow::Cow, collections::HashSet, convert::TryInto, future::Future, pin::Pin, sync::Arc, task::{Context, Poll}, time::Duration, }; fn make_multiaddr(port: u16, id: Option) -> Multiaddr { let mut multiaddr = format!("/ip4/127.0.0.1/tcp/{}", port) .parse::() .expect("peer multiaddr"); if let Some(id) = id { multiaddr.push_id(id); } multiaddr } fn make_peer_multiaddr(port: u16, id: PeerId) -> PeerMultiaddr { make_multiaddr(port, Some(id)) .try_into() .expect("try into peer multiaddr") } fn make_peer(port: u16) -> ArcPeer { let keypair = SecioKeyPair::secp256k1_generated(); let pubkey = keypair.public_key(); let peer_id = pubkey.peer_id(); let peer = ArcPeer::from_pubkey(pubkey).expect("make peer"); let multiaddr = make_peer_multiaddr(port, peer_id); peer.multiaddrs.set(vec![multiaddr]); peer } fn make_bootstraps(num: usize) -> Vec { let mut init_port = 5000; (0..num) .map(|_| { let peer = make_peer(init_port); init_port += 1; peer }) .collect() } struct MockManager { event_tx: UnboundedSender, inner: PeerManager, } impl MockManager { pub fn new(inner: PeerManager, event_tx: UnboundedSender) -> Self { MockManager { event_tx, inner } } pub async fn poll_event(&mut self, event: PeerManagerEvent) { self.event_tx.unbounded_send(event).expect("send event"); self.await } pub async fn poll(&mut self) { self.await } pub fn config(&self) -> PeerManagerConfig { self.inner.config() } pub fn connecting(&self) -> &HashSet { &self.inner.connecting } pub fn connecting_mut(&mut self) -> &mut HashSet { &mut self.inner.connecting } pub fn core_inner(&self) -> Arc { self.inner.inner() } } impl Future for MockManager { type Output = (); fn poll(mut self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll { let _ = Future::poll(Pin::new(&mut self.as_mut().inner), ctx); Poll::Ready(()) } } fn make_manager( bootstrap_num: usize, max_connections: usize, ) -> (MockManager, UnboundedReceiver) { let manager_pubkey = make_pubkey(); let manager_id = manager_pubkey.peer_id(); let bootstraps = make_bootstraps(bootstrap_num); let mut peer_dat_file = std::env::temp_dir(); peer_dat_file.push("peer.dat"); let peer_trust_config = Arc::new(TrustMetricConfig::default()); let peer_fatal_ban = Duration::from_secs(50); let peer_soft_ban = Duration::from_secs(10); let inbound_conn_limit = max_connections / 2; let config = PeerManagerConfig { our_id: manager_id, pubkey: manager_pubkey, bootstraps, allowlist: Default::default(), allowlist_only: false, peer_trust_config, peer_fatal_ban, peer_soft_ban, max_connections, same_ip_conn_limit: max_connections, inbound_conn_limit, outbound_conn_limit: max_connections - inbound_conn_limit, routine_interval: Duration::from_secs(10), peer_dat_file, }; let (conn_tx, conn_rx) = unbounded(); let (mgr_tx, mgr_rx) = unbounded(); let manager = PeerManager::new(config, mgr_rx, conn_tx); (MockManager::new(manager, mgr_tx), conn_rx) } fn make_pubkey() -> PublicKey { let keypair = SecioKeyPair::secp256k1_generated(); keypair.public_key() } async fn make_sessions( mgr: &mut MockManager, num: u16, init_port: u16, sess_ty: SessionType, ) -> Vec { let mut next_sid = 1; let mut peers = Vec::with_capacity(num as usize); let inbound_limit = mgr.config().inbound_conn_limit; let outbound_limit = mgr.config().max_connections - inbound_limit; let inner = mgr.core_inner(); for n in (0..num).into_iter() { let remote_pubkey = make_pubkey(); let remote_pid = remote_pubkey.peer_id(); let remote_addr = make_multiaddr(init_port + n, Some(remote_pid.clone())); let ty = if sess_ty == SessionType::Outbound && inner.outbound_count() == outbound_limit { // Switch to create inbound session SessionType::Inbound } else { sess_ty }; let sess_ctx = SessionContext::make( SessionId::new(next_sid), remote_addr.clone(), ty, remote_pubkey.clone(), ); next_sid += 1; let new_session = PeerManagerEvent::NewSession { pid: remote_pid.clone(), pubkey: remote_pubkey, ctx: sess_ctx.arced(), }; mgr.poll_event(new_session).await; peers.push(inner.peer(&remote_pid).expect("make peer session")); } assert_eq!(inner.connected(), num as usize, "make some sessions"); peers } #[tokio::test] async fn should_accept_new_peer_inbound_connection_on_new_session() { let (mut mgr, _conn_rx) = make_manager(0, 20); let remote_pubkey = make_pubkey(); let remote_peer_id = remote_pubkey.peer_id(); let remote_addr = make_multiaddr(6000, Some(remote_pubkey.peer_id())); let sess_ctx = SessionContext::make( SessionId::new(1), remote_addr.clone(), SessionType::Inbound, remote_pubkey.clone(), ); let new_session = PeerManagerEvent::NewSession { pid: remote_peer_id.clone(), pubkey: remote_pubkey.clone(), ctx: sess_ctx.arced(), }; mgr.poll_event(new_session).await; let inner = mgr.core_inner(); assert_eq!(inner.connected(), 1, "should have one without bootstrap"); let saved_peer = inner.peer(&remote_peer_id).expect("should save peer"); assert_eq!(saved_peer.session_id(), 1.into()); assert!(saved_peer.has_pubkey(), "should have public key"); assert_eq!(saved_peer.connectedness(), Connectedness::Connected); assert_eq!(saved_peer.retry.count(), 0, "should reset retry"); let saved_session = inner.session(1.into()).expect( "should save session", ); assert_eq!(saved_session.peer.id, remote_pubkey.peer_id()); assert!(!saved_session.is_blocked()); assert_eq!( saved_session.connected_addr, ConnectedAddr::from(&remote_addr) ); } #[tokio::test] async fn should_accept_outbound_connection_and_remove_mached_connecting_on_new_session() { let (mut mgr, _conn_rx) = make_manager(0, 20); let test_peer = make_peer(9527); let test_multiaddr = test_peer.multiaddrs.all_raw().pop().expect("get multiaddr"); let target_attempt = ConnectingAttempt::new(test_peer.clone()); let inner = mgr.core_inner(); assert_eq!(inner.connected(), 0, "should have zero connected"); mgr.connecting_mut().insert(target_attempt); assert_eq!( mgr.connecting().len(), 1, "should have one connecting attempt" ); let sess_ctx = SessionContext::make( SessionId::new(1), test_multiaddr.clone(), SessionType::Outbound, test_peer.owned_pubkey().expect("pubkey"), ); let new_session = PeerManagerEvent::NewSession { pid: test_peer.owned_id(), pubkey: test_peer.owned_pubkey().expect("pubkey"), ctx: sess_ctx.arced(), }; mgr.poll_event(new_session).await; assert_eq!( mgr.connecting().len(), 0, "should have 0 connecting attempt" ); assert_eq!(inner.connected(), 1, "should have 1 connected"); assert!(inner.peer(&test_peer.id).is_some(), "should match peer"); } #[tokio::test] async fn should_set_matched_peer_pubkey_on_new_session() { let (mut mgr, _conn_rx) = make_manager(0, 2); let inner = mgr.core_inner(); let test_pubkey = make_pubkey(); let test_peer = ArcPeer::new(test_pubkey.peer_id()); inner.add_peer(test_peer.clone()); let sess_ctx = SessionContext::make( SessionId::new(1), make_multiaddr(9527, None), SessionType::Outbound, test_pubkey.clone(), ); let new_session = PeerManagerEvent::NewSession { pid: test_pubkey.peer_id(), pubkey: test_pubkey.clone(), ctx: sess_ctx.arced(), }; mgr.poll_event(new_session).await; let inner = mgr.core_inner(); assert_eq!(inner.connected(), 1, "should one connection"); assert_eq!( test_peer.owned_pubkey(), Some(test_pubkey), "should set peer pubkey" ); } #[tokio::test] async fn should_reset_outbound_peer_multiaddr_failure_count_on_new_session() { let (mut mgr, _conn_rx) = make_manager(0, 2); let inner = mgr.core_inner(); let test_peer = make_peer(9527); inner.add_peer(test_peer.clone()); let test_multiaddr = test_peer.multiaddrs.all().pop().expect("test multiaddr"); test_peer.multiaddrs.inc_failure(&test_multiaddr); assert_eq!( test_peer.multiaddrs.failure(&test_multiaddr), Some(1), "should have one failure" ); let sess_ctx = SessionContext::make( SessionId::new(1), make_multiaddr(9527, None), SessionType::Outbound, test_peer.owned_pubkey().expect("pubkey"), ); let new_session = PeerManagerEvent::NewSession { pid: test_peer.owned_id(), pubkey: test_peer.owned_pubkey().expect("pubkey"), ctx: sess_ctx.arced(), }; mgr.poll_event(new_session).await; let inner = mgr.core_inner(); assert_eq!(inner.connected(), 1, "should one connection"); assert_eq!( test_peer.multiaddrs.failure(&test_multiaddr), Some(0), "should reset matched outbound multiaddr's failure" ); } #[tokio::test] async fn should_ignore_inbound_address_on_new_session() { let (mut mgr, _conn_rx) = make_manager(2, 20); let remote_pubkey = make_pubkey(); let remote_peer_id = remote_pubkey.peer_id(); let remote_addr = make_multiaddr(6000, Some(remote_pubkey.peer_id())); let sess_ctx = SessionContext::make( SessionId::new(1), remote_addr.clone(), SessionType::Inbound, remote_pubkey.clone(), ); let new_session = PeerManagerEvent::NewSession { pid: remote_peer_id.clone(), pubkey: remote_pubkey.clone(), ctx: sess_ctx.arced(), }; mgr.poll_event(new_session).await; let inner = mgr.core_inner(); assert_eq!(inner.connected(), 1, "should have one without bootstrap"); let saved_peer = inner.peer(&remote_peer_id).expect("should save peer"); assert_eq!( saved_peer.multiaddrs.len(), 0, "should not save inbound multiaddr" ); } #[tokio::test] async fn should_enforce_id_in_multiaddr_on_new_session() { let (mut mgr, _conn_rx) = make_manager(2, 20); let remote_pubkey = make_pubkey(); let remote_peer_id = remote_pubkey.peer_id(); let remote_addr = make_multiaddr(6000, None); let sess_ctx = SessionContext::make( SessionId::new(1), remote_addr.clone(), SessionType::Outbound, remote_pubkey.clone(), ); let new_session = PeerManagerEvent::NewSession { pid: remote_pubkey.peer_id(), pubkey: remote_pubkey.clone(), ctx: sess_ctx.arced(), }; mgr.poll_event(new_session).await; let inner = mgr.core_inner(); assert_eq!(inner.connected(), 1, "should have one without bootstrap"); let saved_peer = inner.peer(&remote_peer_id).expect("should save peer"); let saved_addrs = saved_peer.multiaddrs.all_raw(); assert_eq!(saved_addrs.len(), 1, "should save outbound multiaddr"); let remote_addr = saved_addrs.first().expect("get first multiaddr"); assert!(remote_addr.has_id()); assert_eq!( remote_addr.id_bytes(), Some(Cow::Borrowed(remote_pubkey.peer_id().as_bytes())), "id should match" ); } #[tokio::test] async fn should_add_new_outbound_multiaddr_to_peer_on_new_session() { let (mut mgr, _conn_rx) = make_manager(2, 20); let remote_peers = make_sessions(&mut mgr, 1, 5000, SessionType::Outbound).await; let inner = mgr.core_inner(); assert_eq!(inner.connected(), 1, "should have one without bootstrap"); let test_peer = remote_peers.first().expect("get first"); let session_closed = PeerManagerEvent::SessionClosed { pid: test_peer.owned_id(), sid: test_peer.session_id(), }; mgr.poll_event(session_closed).await; let new_multiaddr = make_multiaddr(9999, None); let sess_ctx = SessionContext::make( SessionId::new(2), new_multiaddr, SessionType::Outbound, test_peer.owned_pubkey().expect("pubkey"), ); let new_session = PeerManagerEvent::NewSession { pid: test_peer.owned_id(), pubkey: test_peer.owned_pubkey().expect("pubkey"), ctx: sess_ctx.arced(), }; mgr.poll_event(new_session).await; assert_eq!(test_peer.multiaddrs.len(), 2, "should have 2 addrs"); let test_peer_multiaddr = make_peer_multiaddr(9999, test_peer.owned_id()); assert!( test_peer.multiaddrs.contains(&test_peer_multiaddr), "should have this new multiaddr" ); } #[tokio::test] async fn should_always_remove_inbound_multiaddr_even_if_we_reach_max_connections_on_new_session() { let (mut mgr, _conn_rx) = make_manager(0, 2); let _remote_peers = make_sessions(&mut mgr, 2, 5000, SessionType::Outbound).await; let inner = mgr.core_inner(); let test_peer = make_peer(9527); inner.add_peer(test_peer.clone()); assert_eq!( test_peer.multiaddrs.len(), 1, "should have on inbound address" ); let sess_ctx = SessionContext::make( SessionId::new(1), make_multiaddr(9527, Some(test_peer.owned_id())), SessionType::Inbound, test_peer.owned_pubkey().expect("pubkey"), ); let new_session = PeerManagerEvent::NewSession { pid: test_peer.owned_id(), pubkey: test_peer.owned_pubkey().expect("pubkey"), ctx: sess_ctx.arced(), }; mgr.poll_event(new_session).await; let inner = mgr.core_inner(); assert_eq!(inner.connected(), 2, "should not increase conn count"); assert_eq!( test_peer.multiaddrs.len(), 0, "should remove inbound address" ); } #[tokio::test] async fn should_remove_matched_peer_inbound_address_from_ctx_even_if_it_doesnt_have_id_on_new_session( ) { let (mut mgr, _conn_rx) = make_manager(0, 2); let inner = mgr.core_inner(); let test_peer = make_peer(9527); inner.add_peer(test_peer.clone()); assert_eq!( test_peer.multiaddrs.len(), 1, "should have on inbound address" ); let sess_ctx = SessionContext::make( SessionId::new(1), make_multiaddr(9527, None), SessionType::Inbound, test_peer.owned_pubkey().expect("pubkey"), ); let new_session = PeerManagerEvent::NewSession { pid: test_peer.owned_id(), pubkey: test_peer.owned_pubkey().expect("pubkey"), ctx: sess_ctx.arced(), }; mgr.poll_event(new_session).await; let inner = mgr.core_inner(); assert_eq!(inner.connected(), 1, "should have one connection"); assert_eq!( test_peer.multiaddrs.len(), 0, "should remove inbound address" ); } #[tokio::test] async fn should_reject_new_connection_for_same_peer_on_new_session() { let (mut mgr, mut conn_rx) = make_manager(0, 20); let remote_peers = make_sessions(&mut mgr, 1, 5000, SessionType::Outbound).await; let test_peer = remote_peers.first().expect("get first peer"); let expect_sid = test_peer.session_id(); let sess_ctx = SessionContext::make( SessionId::new(99), test_peer.multiaddrs.all_raw().pop().expect("get multiaddr"), SessionType::Outbound, test_peer.owned_pubkey().expect("pubkey"), ); let new_session = PeerManagerEvent::NewSession { pid: test_peer.owned_id(), pubkey: test_peer.owned_pubkey().expect("pubkey"), ctx: sess_ctx.arced(), }; mgr.poll_event(new_session).await; let inner = mgr.core_inner(); assert_eq!(inner.connected(), 1, "should not increase conn count"); assert_eq!( test_peer.session_id(), expect_sid, "should not change peer session id" ); let conn_event = conn_rx.next().await.expect("should have disconnect event"); match conn_event { ConnectionEvent::Disconnect(sid) => assert_eq!(sid, 99.into(), "should be new session id"), _ => panic!("should be disconnect event"), } } #[tokio::test] async fn should_keep_new_connection_for_error_outdated_peer_session_on_new_session() { let (mut mgr, mut conn_rx) = make_manager(0, 20); let remote_peers = make_sessions(&mut mgr, 1, 5000, SessionType::Outbound).await; let inner = mgr.core_inner(); let test_peer = remote_peers.first().expect("get first peer"); inner.remove_session(test_peer.session_id()); let sess_ctx = SessionContext::make( SessionId::new(99), test_peer.multiaddrs.all_raw().pop().expect("get multiaddr"), SessionType::Outbound, test_peer.owned_pubkey().expect("pubkey"), ); let new_session = PeerManagerEvent::NewSession { pid: test_peer.owned_id(), pubkey: test_peer.owned_pubkey().expect("pubkey"), ctx: sess_ctx.arced(), }; mgr.poll_event(new_session).await; assert_eq!(inner.connected(), 1, "should not increase conn count"); assert_eq!( test_peer.session_id(), 99.into(), "should update session id" ); match conn_rx.try_next() { Err(_) => (), // Err means channel is empty, it's expected _ => panic!("should not have any connection event"), } } #[tokio::test] async fn should_reject_new_connections_when_we_reach_max_connections_on_new_session() { let (mut mgr, mut conn_rx) = make_manager(0, 10); // set max to 10 let _remote_peers = make_sessions(&mut mgr, 10, 7000, SessionType::Outbound).await; let remote_pubkey = make_pubkey(); let remote_addr = make_multiaddr(2077, Some(remote_pubkey.peer_id())); let sess_ctx = SessionContext::make( SessionId::new(99), remote_addr, SessionType::Outbound, remote_pubkey.clone(), ); let new_session = PeerManagerEvent::NewSession { pid: remote_pubkey.peer_id(), pubkey: remote_pubkey.clone(), ctx: sess_ctx.arced(), }; mgr.poll_event(new_session).await; let inner = mgr.core_inner(); assert_eq!(inner.connected(), 10, "should not increase conn count"); let conn_event = conn_rx.next().await.expect("should have disconnect event"); match conn_event { ConnectionEvent::Disconnect(sid) => assert_eq!(sid, 99.into(), "should be new session id"), _ => panic!("should be disconnect event"), } } #[tokio::test] async fn should_remove_connecting_even_if_session_is_reject_due_to_reach_max_connections_on_new_session( ) { let (mut mgr, mut conn_rx) = make_manager(0, 5); // set max to 5 let _remote_peers = make_sessions(&mut mgr, 5, 7000, SessionType::Outbound).await; let test_peer = make_peer(2020); let inner = mgr.core_inner(); inner.add_peer(test_peer.clone()); mgr.connecting_mut() .insert(ConnectingAttempt::new(test_peer.clone())); assert_eq!(mgr.connecting().len(), 1, "should have one attempt"); let sess_ctx = SessionContext::make( SessionId::new(99), test_peer.multiaddrs.all_raw().pop().expect("multiaddr"), SessionType::Outbound, test_peer.owned_pubkey().expect("pubkey"), ); let new_session = PeerManagerEvent::NewSession { pid: test_peer.owned_id(), pubkey: test_peer.owned_pubkey().expect("pubkey"), ctx: sess_ctx.arced(), }; mgr.poll_event(new_session).await; assert_eq!(inner.connected(), 5, "should not increase conn count"); assert_eq!( mgr.connecting().len(), 0, "should remove connecting attempt" ); let conn_event = conn_rx.next().await.expect("should have disconnect event"); match conn_event { ConnectionEvent::Disconnect(sid) => assert_eq!(sid, 99.into(), "should be new session id"), _ => panic!("should be disconnect event"), } } #[tokio::test] async fn should_reject_banned_peer_on_new_session() { let (mut mgr, mut conn_rx) = make_manager(0, 10); let test_peer = make_peer(2077); let inner = mgr.core_inner(); inner.add_peer(test_peer.clone()); test_peer .tags .insert_ban(Duration::from_secs(10)) .expect("insert ban tag"); assert!(test_peer.banned(), "should be banned"); let sess_ctx = SessionContext::make( SessionId::new(99), test_peer.multiaddrs.all_raw().pop().expect("multiaddr"), SessionType::Outbound, test_peer.owned_pubkey().expect("pubkey"), ); let new_session = PeerManagerEvent::NewSession { pid: test_peer.owned_id(), pubkey: test_peer.owned_pubkey().expect("pubkey"), ctx: sess_ctx.arced(), }; mgr.poll_event(new_session).await; assert_eq!(inner.connected(), 0, "should not increase conn count"); let conn_event = conn_rx.next().await.expect("should have disconnect event"); match conn_event { ConnectionEvent::Disconnect(sid) => assert_eq!(sid, 99.into(), "should be new session id"), _ => panic!("should be disconnect event"), } } #[tokio::test] async fn should_start_trust_metric_on_connected_peer_on_new_session() { let (mut mgr, _conn_rx) = make_manager(0, 10); let test_peer = make_peer(2077); let inner = mgr.core_inner(); inner.add_peer(test_peer.clone()); let sess_ctx = SessionContext::make( SessionId::new(99), test_peer.multiaddrs.all_raw().pop().expect("multiaddr"), SessionType::Outbound, test_peer.owned_pubkey().expect("pubkey"), ); let new_session = PeerManagerEvent::NewSession { pid: test_peer.owned_id(), pubkey: test_peer.owned_pubkey().expect("pubkey"), ctx: sess_ctx.arced(), }; mgr.poll_event(new_session).await; let trust_metric = test_peer.trust_metric().expect("trust metric"); assert!(trust_metric.is_started(), "should start trust metric"); } #[tokio::test] async fn should_replace_low_quality_peer_with_better_one_due_to_max_connections_on_new_session() { let (mut mgr, mut conn_rx) = make_manager(0, 10); let remote_peers = make_sessions(&mut mgr, 10, 5000, SessionType::Outbound).await; let target_peer = remote_peers.first().expect("get first peer"); let peer_trust_config = Arc::new(TrustMetricConfig::default()); let inner = mgr.core_inner(); assert_eq!(inner.connected(), 10, "should reach max connections"); if let Some(metric) = target_peer.trust_metric() { for _ in 0..30 { metric.good_events(1); metric.bad_events(1); metric.enter_new_interval(); } assert!(metric.trust_score() < 80, "should less than 80"); } // Update alive, only old enough peer can be replaced target_peer.set_alive(peer_trust_config.interval().as_secs() * 20 + 20); let test_peer = make_peer(2077); inner.add_peer(test_peer.clone()); let trust_metric = TrustMetric::new(Arc::clone(&peer_trust_config)); test_peer.set_trust_metric(trust_metric.clone()); for _ in 0..10 { trust_metric.good_events(1); trust_metric.enter_new_interval(); } assert!(trust_metric.trust_score() > 90, "should have better score"); let sess_ctx = SessionContext::make( SessionId::new(99), test_peer.multiaddrs.all_raw().pop().expect("multiaddr"), SessionType::Outbound, test_peer.owned_pubkey().expect("pubkey"), ); let new_session = PeerManagerEvent::NewSession { pid: test_peer.owned_id(), pubkey: test_peer.owned_pubkey().expect("pubkey"), ctx: sess_ctx.arced(), }; mgr.poll_event(new_session).await; let target_sid = target_peer.session_id(); let conn_event = conn_rx.next().await.expect("should have disconnect event"); match conn_event { ConnectionEvent::Disconnect(sid) => { assert_eq!(sid, target_sid, "should be replaced session id") } _ => panic!("should be disconnect event"), } } #[tokio::test] async fn should_not_replace_any_peer_if_incoming_hasnt_trust_score_due_to_max_connections_on_new_session( ) { let (mut mgr, mut conn_rx) = make_manager(0, 10); let remote_peers = make_sessions(&mut mgr, 10, 5000, SessionType::Outbound).await; let target_peer = remote_peers.first().expect("get first peer"); let peer_trust_config = Arc::new(TrustMetricConfig::default()); let inner = mgr.core_inner(); assert_eq!(inner.connected(), 10, "should reach max connections"); if let Some(metric) = target_peer.trust_metric() { for _ in 0..30 { metric.good_events(1); metric.bad_events(1); metric.enter_new_interval(); } assert!(metric.trust_score() < 80, "should less than 80"); } // Update alive, only old enough peer can be replaced target_peer.set_alive(peer_trust_config.interval().as_secs() * 20 + 20); let test_peer = make_peer(2077); inner.add_peer(test_peer.clone()); let sess_ctx = SessionContext::make( SessionId::new(99), test_peer.multiaddrs.all_raw().pop().expect("multiaddr"), SessionType::Outbound, test_peer.owned_pubkey().expect("pubkey"), ); let new_session = PeerManagerEvent::NewSession { pid: test_peer.owned_id(), pubkey: test_peer.owned_pubkey().expect("pubkey"), ctx: sess_ctx.arced(), }; mgr.poll_event(new_session).await; let conn_event = conn_rx.next().await.expect("should have disconnect event"); match conn_event { ConnectionEvent::Disconnect(sid) => { assert_eq!(sid, 99.into(), "should be replaced session id") } _ => panic!("should be disconnect event"), } } #[tokio::test] async fn should_not_replace_any_higher_score_peer_due_to_max_connections_on_new_session() { let (mut mgr, mut conn_rx) = make_manager(0, 10); let remote_peers = make_sessions(&mut mgr, 10, 5000, SessionType::Outbound).await; let target_peer = remote_peers.first().expect("get first peer"); let peer_trust_config = Arc::new(TrustMetricConfig::default()); let inner = mgr.core_inner(); assert_eq!(inner.connected(), 10, "should reach max connections"); if let Some(metric) = target_peer.trust_metric() { for _ in 0..30 { metric.good_events(1); metric.enter_new_interval(); } assert!(metric.trust_score() > 90, "should have better score"); } // Update alive, only old enough peer can be replaced target_peer.set_alive(peer_trust_config.interval().as_secs() * 20 + 20); let test_peer = make_peer(2077); inner.add_peer(test_peer.clone()); let trust_metric = TrustMetric::new(Arc::clone(&peer_trust_config)); test_peer.set_trust_metric(trust_metric.clone()); for _ in 0..30 { trust_metric.good_events(1); trust_metric.bad_events(1); trust_metric.enter_new_interval(); } assert!(trust_metric.trust_score() < 90, "should have lower score"); let sess_ctx = SessionContext::make( SessionId::new(99), test_peer.multiaddrs.all_raw().pop().expect("multiaddr"), SessionType::Outbound, test_peer.owned_pubkey().expect("pubkey"), ); let new_session = PeerManagerEvent::NewSession { pid: test_peer.owned_id(), pubkey: test_peer.owned_pubkey().expect("pubkey"), ctx: sess_ctx.arced(), }; mgr.poll_event(new_session).await; let conn_event = conn_rx.next().await.expect("should have disconnect event"); match conn_event { ConnectionEvent::Disconnect(sid) => { assert_eq!(sid, 99.into(), "should be replaced session id") } _ => panic!("should be disconnect event"), } } #[tokio::test] async fn should_not_replace_peer_in_allowlist_with_better_score_peer_due_to_max_connections_on_new_session( ) { let (mut mgr, mut conn_rx) = make_manager(0, 1); let remote_peers = make_sessions(&mut mgr, 1, 5000, SessionType::Outbound).await; let target_peer = remote_peers.first().expect("get first peer"); let peer_trust_config = Arc::new(TrustMetricConfig::default()); let inner = mgr.core_inner(); assert_eq!(inner.connected(), 1, "should reach max connections"); if let Some(metric) = target_peer.trust_metric() { for _ in 0..30 { metric.good_events(1); metric.bad_events(1); metric.enter_new_interval(); } assert!(metric.trust_score() < 80, "should less than 80"); } // Update alive, only old enough peer can be replaced target_peer.set_alive(peer_trust_config.interval().as_secs() * 20 + 20); // Add always allow tag target_peer.tags.insert(PeerTag::AlwaysAllow).unwrap(); let test_peer = make_peer(2077); inner.add_peer(test_peer.clone()); let trust_metric = TrustMetric::new(Arc::clone(&peer_trust_config)); test_peer.set_trust_metric(trust_metric.clone()); for _ in 0..10 { trust_metric.good_events(1); trust_metric.enter_new_interval(); } assert!(trust_metric.trust_score() > 90, "should have better score"); let sess_ctx = SessionContext::make( SessionId::new(99), test_peer.multiaddrs.all_raw().pop().expect("multiaddr"), SessionType::Outbound, test_peer.owned_pubkey().expect("pubkey"), ); let new_session = PeerManagerEvent::NewSession { pid: test_peer.owned_id(), pubkey: test_peer.owned_pubkey().expect("pubkey"), ctx: sess_ctx.arced(), }; mgr.poll_event(new_session).await; let conn_event = conn_rx.next().await.expect("should have disconnect event"); match conn_event { ConnectionEvent::Disconnect(sid) => { assert_eq!(sid, 99.into(), "should be replaced session id") } _ => panic!("should be disconnect event"), } } #[tokio::test] async fn should_not_replace_peer_not_old_enough_due_to_max_connections_on_new_session() { let (mut mgr, mut conn_rx) = make_manager(0, 10); let remote_peers = make_sessions(&mut mgr, 10, 5000, SessionType::Outbound).await; let target_peer = remote_peers.first().expect("get first peer"); let peer_trust_config = Arc::new(TrustMetricConfig::default()); let inner = mgr.core_inner(); assert_eq!(inner.connected(), 10, "should reach max connections"); if let Some(metric) = target_peer.trust_metric() { for _ in 0..30 { metric.good_events(1); metric.bad_events(1); metric.enter_new_interval(); } assert!(metric.trust_score() < 80, "should less than 80"); } let test_peer = make_peer(2077); inner.add_peer(test_peer.clone()); let trust_metric = TrustMetric::new(Arc::clone(&peer_trust_config)); test_peer.set_trust_metric(trust_metric.clone()); for _ in 0..10 { trust_metric.good_events(1); trust_metric.enter_new_interval(); } assert!(trust_metric.trust_score() > 90, "should have better score"); let sess_ctx = SessionContext::make( SessionId::new(99), test_peer.multiaddrs.all_raw().pop().expect("multiaddr"), SessionType::Outbound, test_peer.owned_pubkey().expect("pubkey"), ); let new_session = PeerManagerEvent::NewSession { pid: test_peer.owned_id(), pubkey: test_peer.owned_pubkey().expect("pubkey"), ctx: sess_ctx.arced(), }; mgr.poll_event(new_session).await; let conn_event = conn_rx.next().await.expect("should have disconnect event"); match conn_event { ConnectionEvent::Disconnect(sid) => { assert_eq!(sid, 99.into(), "should be replaced session id") } _ => panic!("should be disconnect event"), } } #[tokio::test] async fn should_remove_session_on_session_closed() { let (mut mgr, _conn_rx) = make_manager(2, 20); let remote_peers = make_sessions(&mut mgr, 1, 5000, SessionType::Outbound).await; let test_peer = remote_peers.first().expect("get first peer"); assert_eq!( test_peer.retry.count(), 0, "should reset retry after connect" ); // Set connected at to older timestamp to increase peer alive test_peer.set_connected_at(time::now() - SHORT_ALIVE_SESSION - 1); let session_closed = PeerManagerEvent::SessionClosed { pid: test_peer.owned_id(), sid: test_peer.session_id(), }; mgr.poll_event(session_closed).await; let inner = mgr.core_inner(); assert_eq!(inner.connected(), 0, "shoulld have zero connected"); assert_eq!(inner.share_sessions().len(), 0, "should have no session"); assert_eq!( test_peer.connectedness(), Connectedness::CanConnect, "should set peer connectednes to Connecting since we have't reach max connection" ); assert_eq!(test_peer.retry.count(), 0, "should keep retry to 0"); } #[tokio::test] async fn should_not_reconnect_to_closed_session_immediately_after_session_closed() { let (mut mgr, _conn_rx) = make_manager(2, 20); let remote_peers = make_sessions(&mut mgr, 1, 5000, SessionType::Outbound).await; let test_peer = remote_peers.first().expect("get first peer"); assert_eq!( test_peer.retry.count(), 0, "should reset retry after connect" ); // Set connected at to older timestamp to increase peer alive test_peer.set_connected_at(time::now() - SHORT_ALIVE_SESSION - 1); let session_closed = PeerManagerEvent::SessionClosed { pid: test_peer.owned_id(), sid: test_peer.session_id(), }; mgr.poll_event(session_closed).await; let inner = mgr.core_inner(); let random_short_ban = { let opt_banned = test_peer.tags.get_banned_until(); opt_banned.expect("should have a random short ban") }; assert_eq!(inner.connected(), 0, "shoulld have zero connected"); assert_eq!(inner.share_sessions().len(), 0, "should have no session"); assert!( random_short_ban <= (time::now() + MAX_RANDOM_NEXT_RETRY), "should have a random short ban, so we dont reconnect to this peer immediately" ); assert_eq!( mgr.connecting().len(), 0, "should not reconnect immediately" ); assert_eq!( test_peer.connectedness(), Connectedness::CanConnect, "should set peer connectednes to Connecting since we have't reach max connection" ); assert_eq!(test_peer.retry.count(), 0, "should keep retry to 0"); } #[tokio::test] async fn should_pause_trust_metric_on_session_closed() { let (mut mgr, _conn_rx) = make_manager(2, 20); let remote_peers = make_sessions(&mut mgr, 1, 5000, SessionType::Outbound).await; let test_peer = remote_peers.first().expect("get first"); let session_closed = PeerManagerEvent::SessionClosed { pid: test_peer.owned_id(), sid: test_peer.session_id(), }; mgr.poll_event(session_closed).await; let trust_metric = test_peer.trust_metric().expect("get trust metric"); assert!(!trust_metric.is_started(), "should pause trust metric"); } #[tokio::test] async fn should_increase_retry_for_short_alive_session_on_session_closed() { let (mut mgr, _conn_rx) = make_manager(2, 20); let remote_peers = make_sessions(&mut mgr, 1, 5000, SessionType::Outbound).await; let test_peer = remote_peers.first().expect("get first peer"); assert_eq!( test_peer.retry.count(), 0, "should reset retry after connect" ); let session_closed = PeerManagerEvent::SessionClosed { pid: test_peer.owned_id(), sid: test_peer.session_id(), }; mgr.poll_event(session_closed).await; let inner = mgr.core_inner(); assert_eq!( inner.connected(), 0, "should have no session because of retry" ); assert_eq!(inner.share_sessions().len(), 0, "should have no session"); assert_eq!(test_peer.connectedness(), Connectedness::CanConnect); assert!( test_peer.retry.eta() > REPEATED_CONNECTION_TIMEOUT, "should increase retry count enough to cover repeated connection timeout" ); } #[tokio::test] async fn should_properly_update_peer_state_even_if_session_not_found_on_session_closed() { let (mut mgr, _conn_rx) = make_manager(2, 20); let remote_peers = make_sessions(&mut mgr, 1, 5000, SessionType::Outbound).await; let test_peer = remote_peers.first().expect("get first peer"); let inner = mgr.core_inner(); assert_eq!(inner.connected(), 1, "should have one session"); inner.remove_session(test_peer.session_id()); assert_eq!(inner.connected(), 0, "should have no session"); let session_closed = PeerManagerEvent::SessionClosed { pid: test_peer.owned_id(), sid: test_peer.session_id(), }; mgr.poll_event(session_closed).await; assert_eq!(test_peer.connectedness(), Connectedness::CanConnect); } #[tokio::test] async fn should_inc_peer_multiaddr_failure_count_for_io_error_on_connect_failed() { let (mut mgr, _conn_rx) = make_manager(1, 20); let inner = mgr.core_inner(); let test_peer = make_peer(2077); let test_multiaddr = test_peer.multiaddrs.all().pop().expect("multiaddr"); inner.add_peer(test_peer.clone()); mgr.connecting_mut() .insert(ConnectingAttempt::new(test_peer.clone())); let connect_failed = PeerManagerEvent::ConnectFailed { addr: (*test_multiaddr).to_owned(), kind: ConnectionErrorKind::Io(std::io::ErrorKind::Other.into()), }; mgr.poll_event(connect_failed).await; assert_eq!( test_peer.multiaddrs.failure(&test_multiaddr), Some(1), "should increase failure count" ); } #[tokio::test] async fn should_inc_peer_multiaddr_failure_count_for_dns_error_on_connect_failed() { let (mut mgr, _conn_rx) = make_manager(1, 20); let inner = mgr.core_inner(); let test_peer = make_peer(2077); let test_multiaddr = test_peer.multiaddrs.all().pop().expect("multiaddr"); inner.add_peer(test_peer.clone()); mgr.connecting_mut() .insert(ConnectingAttempt::new(test_peer.clone())); let connect_failed = PeerManagerEvent::ConnectFailed { addr: (*test_multiaddr).to_owned(), kind: ConnectionErrorKind::DNSResolver(Box::new(std::io::Error::from( std::io::ErrorKind::Other, )) as Box), }; mgr.poll_event(connect_failed).await; assert_eq!( test_peer.multiaddrs.failure(&test_multiaddr), Some(1), "should increase failure count" ); } #[tokio::test] async fn should_give_up_peer_multiaddr_if_peer_id_not_match_on_connect_failed() { let (mut mgr, _conn_rx) = make_manager(1, 20); let inner = mgr.core_inner(); let test_peer = make_peer(2077); let test_multiaddr = test_peer.multiaddrs.all().pop().expect("multiaddr"); assert_eq!( test_peer.multiaddrs.connectable_len(), 1, "should have one connectable multiaddr" ); inner.add_peer(test_peer.clone()); mgr.connecting_mut() .insert(ConnectingAttempt::new(test_peer.clone())); let connect_failed = PeerManagerEvent::ConnectFailed { addr: (*test_multiaddr).to_owned(), kind: ConnectionErrorKind::PeerIdNotMatch, }; mgr.poll_event(connect_failed).await; assert_eq!( test_peer.multiaddrs.connectable_len(), 0, "should not have any connectable multiaddr" ); } #[tokio::test] async fn should_give_up_peer_itself_if_secio_handshake_error_on_connect_failed() { let (mut mgr, _conn_rx) = make_manager(1, 20); let inner = mgr.core_inner(); let test_peer = make_peer(2077); let test_multiaddr = test_peer.multiaddrs.all().pop().expect("multiaddr"); inner.add_peer(test_peer.clone()); mgr.connecting_mut() .insert(ConnectingAttempt::new(test_peer.clone())); let connect_failed = PeerManagerEvent::ConnectFailed { addr: (*test_multiaddr).to_owned(), kind: ConnectionErrorKind::SecioHandshake(Box::new(std::io::Error::from( std::io::ErrorKind::Other, )) as Box), }; mgr.poll_event(connect_failed).await; assert_eq!(test_peer.connectedness(), Connectedness::Unconnectable); } #[tokio::test] async fn should_give_up_peer_itself_if_protocol_handle_error_on_connect_failed() { let (mut mgr, _conn_rx) = make_manager(1, 20); let inner = mgr.core_inner(); let test_peer = make_peer(2077); let test_multiaddr = test_peer.multiaddrs.all().pop().expect("multiaddr"); inner.add_peer(test_peer.clone()); mgr.connecting_mut() .insert(ConnectingAttempt::new(test_peer.clone())); let connect_failed = PeerManagerEvent::ConnectFailed { addr: (*test_multiaddr).to_owned(), kind: ConnectionErrorKind::ProtocolHandle, }; mgr.poll_event(connect_failed).await; assert_eq!(test_peer.connectedness(), Connectedness::Unconnectable); } #[tokio::test] async fn should_increase_peer_retry_if_all_multiaddrs_failed_on_conect_failed() { let (mut mgr, _conn_rx) = make_manager(0, 20); let inner = mgr.core_inner(); let test_peer = make_peer(2077); let test_multiaddr = test_peer.multiaddrs.all().pop().expect("multiaddr"); inner.add_peer(test_peer.clone()); mgr.connecting_mut() .insert(ConnectingAttempt::new(test_peer.clone())); let connect_failed = PeerManagerEvent::ConnectFailed { addr: (*test_multiaddr).to_owned(), kind: ConnectionErrorKind::Io(std::io::ErrorKind::Other.into()), }; mgr.poll_event(connect_failed).await; assert_eq!(mgr.connecting().len(), 0, "should not have any connecting"); assert_eq!(test_peer.retry.count(), 1, "should have 1 retry"); assert_eq!(test_peer.connectedness(), Connectedness::CanConnect); } #[tokio::test] async fn should_give_up_peer_if_run_out_retry_on_connect_failed() { let (mut mgr, _conn_rx) = make_manager(0, 20); let inner = mgr.core_inner(); let test_peer = make_peer(2077); let test_multiaddr = test_peer.multiaddrs.all().pop().expect("multiaddr"); inner.add_peer(test_peer.clone()); mgr.connecting_mut() .insert(ConnectingAttempt::new(test_peer.clone())); test_peer.retry.set(MAX_RETRY_COUNT); let connect_failed = PeerManagerEvent::ConnectFailed { addr: (*test_multiaddr).to_owned(), kind: ConnectionErrorKind::Io(std::io::ErrorKind::Other.into()), }; mgr.poll_event(connect_failed).await; assert_eq!(mgr.connecting().len(), 0, "should not have any connecting"); assert_eq!( test_peer.retry.count(), MAX_RETRY_COUNT + 1, "should exceed max retry" ); assert_eq!(test_peer.connectedness(), Connectedness::Unconnectable); } #[tokio::test] async fn should_return_early_if_we_already_give_up_peer_on_connect_failed() { let (mut mgr, _conn_rx) = make_manager(0, 20); let inner = mgr.core_inner(); let test_peer = make_peer(2077); let test_multiaddr = test_peer.multiaddrs.all().pop().expect("multiaddr"); inner.add_peer(test_peer.clone()); mgr.connecting_mut() .insert(ConnectingAttempt::new(test_peer.clone())); let connect_failed = PeerManagerEvent::ConnectFailed { addr: (*test_multiaddr).to_owned(), kind: ConnectionErrorKind::ProtocolHandle, }; mgr.poll_event(connect_failed).await; assert_eq!(mgr.connecting().len(), 0, "should not have any connecting"); assert_eq!(test_peer.connectedness(), Connectedness::Unconnectable); assert_eq!(test_peer.retry.count(), 0, "should not touch peer retry"); } #[tokio::test] async fn should_wait_for_other_connecting_multiaddrs_if_we_dont_give_up_peer_on_connect_failed() { let (mut mgr, _conn_rx) = make_manager(0, 20); let inner = mgr.core_inner(); let test_peer = make_peer(2077); let test_multiaddr = test_peer.multiaddrs.all().pop().expect("multiaddr"); test_peer .multiaddrs .insert(vec![make_peer_multiaddr(2020, test_peer.owned_id())]); assert_eq!( test_peer.multiaddrs.connectable_len(), 2, "should have two connectable multiaddrs" ); inner.add_peer(test_peer.clone()); mgr.connecting_mut() .insert(ConnectingAttempt::new(test_peer.clone())); let attempt = mgr.connecting().iter().next().expect("attempt"); assert_eq!( attempt.multiaddrs(), 2, "should still have two connecting multiaddrs" ); let connect_failed = PeerManagerEvent::ConnectFailed { addr: (*test_multiaddr).to_owned(), kind: ConnectionErrorKind::Io(std::io::ErrorKind::Other.into()), }; mgr.poll_event(connect_failed).await; assert_eq!(mgr.connecting().len(), 1, "should not have any connecting"); let attempt = mgr.connecting().iter().next().expect("attempt"); assert_eq!( attempt.multiaddrs(), 1, "should still have one connecting multiaddr" ); } #[tokio::test] async fn should_ensure_disconnect_session_on_session_failed() { let (mut mgr, mut conn_rx) = make_manager(0, 20); let remote_peers = make_sessions(&mut mgr, 1, 5000, SessionType::Outbound).await; let test_peer = remote_peers.first().expect("get first peer"); let expect_sid = test_peer.session_id(); let session_failed = PeerManagerEvent::SessionFailed { sid: expect_sid, kind: SessionErrorKind::Io(std::io::ErrorKind::Other.into()), }; mgr.poll_event(session_failed).await; let inner = mgr.core_inner(); assert_eq!(inner.share_sessions().len(), 0, "should disconnect session"); assert_eq!(inner.connected(), 0, "should disconnect session"); assert_eq!( test_peer.connectedness(), Connectedness::CanConnect, "should disconnect peer" ); let conn_event = conn_rx.next().await.expect("should have disconnect event"); match conn_event { ConnectionEvent::Disconnect(sid) => { assert_eq!(sid, expect_sid, "should disconnect session") } _ => panic!("should be disconnect event"), } } #[tokio::test] async fn should_increase_retry_for_io_error_on_session_failed() { let (mut mgr, _conn_rx) = make_manager(0, 20); let remote_peers = make_sessions(&mut mgr, 1, 5000, SessionType::Outbound).await; let test_peer = remote_peers.first().expect("get first peer"); let expect_sid = test_peer.session_id(); let session_failed = PeerManagerEvent::SessionFailed { sid: expect_sid, kind: SessionErrorKind::Io(std::io::ErrorKind::Other.into()), }; mgr.poll_event(session_failed).await; let inner = mgr.core_inner(); assert_eq!(inner.connected(), 0, "should disconnect session"); assert_eq!(test_peer.retry.count(), 1, "should increase onen retry"); } #[tokio::test] async fn should_give_up_peer_for_protocol_error_on_session_failed() { let (mut mgr, _conn_rx) = make_manager(0, 20); let remote_peers = make_sessions(&mut mgr, 1, 5000, SessionType::Outbound).await; let test_peer = remote_peers.first().expect("get first peer"); let expect_sid = test_peer.session_id(); let session_failed = PeerManagerEvent::SessionFailed { sid: expect_sid, kind: SessionErrorKind::Protocol { identity: None, cause: None, }, }; mgr.poll_event(session_failed).await; let inner = mgr.core_inner(); assert_eq!(inner.connected(), 0, "should disconnect session"); assert_eq!( test_peer.connectedness(), Connectedness::Unconnectable, "should give up peer" ); } #[tokio::test] async fn should_give_up_peer_for_unexpected_error_on_session_failed() { let (mut mgr, _conn_rx) = make_manager(0, 20); let remote_peers = make_sessions(&mut mgr, 1, 5000, SessionType::Outbound).await; let test_peer = remote_peers.first().expect("get first peer"); let expect_sid = test_peer.session_id(); let session_failed = PeerManagerEvent::SessionFailed { sid: expect_sid, kind: SessionErrorKind::Unexpected( Box::new(std::io::Error::from(std::io::ErrorKind::Other)) as Box, ), }; mgr.poll_event(session_failed).await; let inner = mgr.core_inner(); assert_eq!(inner.connected(), 0, "should disconnect session"); assert_eq!( test_peer.connectedness(), Connectedness::Unconnectable, "should give up peer" ); } #[tokio::test] async fn should_reduce_trust_score_on_session_failed() { let (mut mgr, _conn_rx) = make_manager(0, 20); let remote_peers = make_sessions(&mut mgr, 1, 5000, SessionType::Outbound).await; let test_peer = remote_peers.first().expect("get first peer"); let trust_metric = test_peer.trust_metric().expect("get trust metric"); for _ in 0..10 { trust_metric.good_events(1); trust_metric.enter_new_interval(); } let before_failed_score = trust_metric.trust_score(); assert!(before_failed_score > 90, "should trust score"); let expect_sid = test_peer.session_id(); let session_failed = PeerManagerEvent::SessionFailed { sid: expect_sid, kind: SessionErrorKind::Io(std::io::ErrorKind::Other.into()), }; mgr.poll_event(session_failed).await; assert!( trust_metric.trust_score() < before_failed_score, "should reduce trust score" ) } #[tokio::test] async fn should_update_peer_alive_on_peer_alive() { let (mut mgr, _conn_rx) = make_manager(0, 20); let remote_peers = make_sessions(&mut mgr, 1, 5000, SessionType::Outbound).await; let test_peer = remote_peers.first().expect("get first peer"); let old_alive = test_peer.alive(); // Set connected at to older timestamp to increase peer alive test_peer.set_connected_at(time::now() - SHORT_ALIVE_SESSION - 1); let peer_alive = PeerManagerEvent::PeerAlive { pid: test_peer.owned_id(), }; mgr.poll_event(peer_alive).await; assert_eq!( test_peer.alive(), old_alive + SHORT_ALIVE_SESSION + 1, "should update peer alive" ); } #[tokio::test] async fn should_reset_peer_retry_on_peer_alive() { let (mut mgr, _conn_rx) = make_manager(0, 20); let remote_peers = make_sessions(&mut mgr, 1, 5000, SessionType::Outbound).await; let test_peer = remote_peers.first().expect("get first peer"); assert_eq!(test_peer.retry.count(), 0, "should have 0 retry"); test_peer.retry.inc(); assert_eq!(test_peer.retry.count(), 1, "should now have 1 retry"); let peer_alive = PeerManagerEvent::PeerAlive { pid: test_peer.owned_id(), }; mgr.poll_event(peer_alive).await; assert_eq!(test_peer.retry.count(), 0, "should reset retry"); } #[tokio::test] async fn should_disconnect_peer_on_misbehave() { let (mut mgr, mut conn_rx) = make_manager(0, 20); let remote_peers = make_sessions(&mut mgr, 1, 5000, SessionType::Outbound).await; let test_peer = remote_peers.first().expect("get first peer"); let expect_sid = test_peer.session_id(); let peer_misbehave = PeerManagerEvent::Misbehave { pid: test_peer.owned_id(), kind: MisbehaviorKind::PingTimeout, }; mgr.poll_event(peer_misbehave).await; let inner = mgr.core_inner(); assert_eq!(inner.connected(), 0, "should disconnect session"); assert_eq!(inner.share_sessions().len(), 0, "should disconnect session"); let conn_event = conn_rx.next().await.expect("should have disconnect event"); match conn_event { ConnectionEvent::Disconnect(sid) => { assert_eq!(sid, expect_sid, "should disconnect session") } _ => panic!("should be disconnect event"), } } #[tokio::test] async fn should_reduce_trust_score_on_misbehave() { let (mut mgr, _conn_rx) = make_manager(0, 20); let remote_peers = make_sessions(&mut mgr, 1, 5000, SessionType::Outbound).await; let test_peer = remote_peers.first().expect("get first peer"); let trust_metric = test_peer.trust_metric().expect("get trust metric"); for _ in 0..10 { trust_metric.good_events(1); trust_metric.enter_new_interval(); } let before_failed_score = trust_metric.trust_score(); assert!(before_failed_score > 90, "should trust score"); let peer_misbehave = PeerManagerEvent::Misbehave { pid: test_peer.owned_id(), kind: MisbehaviorKind::PingTimeout, }; mgr.poll_event(peer_misbehave).await; assert!( trust_metric.trust_score() < before_failed_score, "should reduce trust score" ) } #[tokio::test] async fn should_increase_retry_for_ping_timeout_on_misbehave() { let (mut mgr, _conn_rx) = make_manager(0, 20); let remote_peers = make_sessions(&mut mgr, 1, 5000, SessionType::Outbound).await; let test_peer = remote_peers.first().expect("get first peer"); let peer_misbehave = PeerManagerEvent::Misbehave { pid: test_peer.owned_id(), kind: MisbehaviorKind::PingTimeout, }; mgr.poll_event(peer_misbehave).await; let inner = mgr.core_inner(); assert_eq!(inner.connected(), 0, "should disconnect session"); assert_eq!(test_peer.retry.count(), 1, "should increase retry"); } #[tokio::test] async fn should_give_up_peer_for_ping_unexpect_on_misbehave() { let (mut mgr, _conn_rx) = make_manager(0, 20); let remote_peers = make_sessions(&mut mgr, 1, 5000, SessionType::Outbound).await; let test_peer = remote_peers.first().expect("get first peer"); let peer_misbehave = PeerManagerEvent::Misbehave { pid: test_peer.owned_id(), kind: MisbehaviorKind::PingUnexpect, }; mgr.poll_event(peer_misbehave).await; let inner = mgr.core_inner(); assert_eq!(inner.connected(), 0, "should disconnect session"); assert_eq!( test_peer.connectedness(), Connectedness::Unconnectable, "should give up peer" ); } #[tokio::test] async fn should_give_up_peer_for_discovery_on_misbehave() { let (mut mgr, _conn_rx) = make_manager(0, 20); let remote_peers = make_sessions(&mut mgr, 1, 5000, SessionType::Outbound).await; let test_peer = remote_peers.first().expect("get first peer"); let peer_misbehave = PeerManagerEvent::Misbehave { pid: test_peer.owned_id(), kind: MisbehaviorKind::Discovery, }; mgr.poll_event(peer_misbehave).await; let inner = mgr.core_inner(); assert_eq!(inner.connected(), 0, "should disconnect session"); assert_eq!( test_peer.connectedness(), Connectedness::Unconnectable, "should give up peer" ); } #[tokio::test] async fn should_mark_session_blocked_on_session_blocked() { let (mut mgr, _conn_rx) = make_manager(0, 20); let remote_peers = make_sessions(&mut mgr, 1, 5000, SessionType::Outbound).await; let test_peer = remote_peers.first().expect("get first peer"); let sess_ctx = SessionContext::make( test_peer.session_id(), test_peer.multiaddrs.all_raw().pop().expect("get multiaddr"), SessionType::Outbound, test_peer.owned_pubkey().expect("pubkey"), ); let session_blocked = PeerManagerEvent::SessionBlocked { ctx: sess_ctx.arced(), }; mgr.poll_event(session_blocked).await; let inner = mgr.core_inner(); let session = inner .session(test_peer.session_id()) .expect("should have a session"); assert!(session.is_blocked(), "should be blocked"); } #[tokio::test] async fn should_add_one_bad_event_on_session_blocked() { let (mut mgr, _conn_rx) = make_manager(0, 20); let remote_peers = make_sessions(&mut mgr, 1, 5000, SessionType::Outbound).await; let test_peer = remote_peers.first().expect("get first peer"); let trust_metric = test_peer.trust_metric().expect("get trust metric"); let sess_ctx = SessionContext::make( test_peer.session_id(), test_peer.multiaddrs.all_raw().pop().expect("get multiaddr"), SessionType::Outbound, test_peer.owned_pubkey().expect("pubkey"), ); let session_blocked = PeerManagerEvent::SessionBlocked { ctx: sess_ctx.arced(), }; mgr.poll_event(session_blocked).await; assert_eq!( trust_metric.bad_events_count(), 1, "should add one bad event" ); } #[tokio::test] async fn should_try_all_peer_multiaddrs_on_connect_peers_now() { let (mut mgr, mut conn_rx) = make_manager(0, 20); let peers = (0..10) .map(|port| { // Every peer has two multiaddrs let p = make_peer(port + 7000); p.multiaddrs .insert(vec![make_peer_multiaddr(port + 8000, p.owned_id())]); p }) .collect::>(); let inner = mgr.core_inner(); for peer in peers.iter() { inner.add_peer(peer.clone()); } assert_eq!( mgr.connecting().len(), 0, "should have 0 connecting attempt" ); let connect_peers = PeerManagerEvent::ConnectPeersNow { pids: peers.iter().map(|p| p.owned_id()).collect(), }; mgr.poll_event(connect_peers).await; assert_eq!( mgr.connecting().len(), 10, "should have all peer in connecting attempt" ); let conn_event = conn_rx.next().await.expect("should have connect event"); let multiaddrs_in_event = match conn_event { ConnectionEvent::Connect { addrs, .. } => addrs, _ => panic!("should be connect event"), }; let expect_multiaddrs = peers .into_iter() .map(|p| p.multiaddrs.all_raw()) .flatten() .collect::>(); assert_eq!( multiaddrs_in_event.len(), expect_multiaddrs.len(), "should have same number of multiaddrs" ); assert!( !multiaddrs_in_event .iter() .any(|ma| !expect_multiaddrs.contains(ma)), "all multiaddrs should be included" ); } #[tokio::test] async fn should_skip_peers_not_in_can_connect_or_not_connected_connectedness_on_connect_peers_now() { let (mut mgr, mut conn_rx) = make_manager(0, 20); let peer_in_connected = make_peer(2020); let peer_in_unconnectable = make_peer(2059); peer_in_unconnectable.set_connectedness(Connectedness::Unconnectable); peer_in_connected.set_connectedness(Connectedness::Connected); let inner = mgr.core_inner(); inner.add_peer(peer_in_connected.clone()); inner.add_peer(peer_in_unconnectable.clone()); let connect_peers = PeerManagerEvent::ConnectPeersNow { pids: vec![ peer_in_unconnectable.owned_id(), peer_in_connected.owned_id(), ], }; mgr.poll_event(connect_peers).await; match conn_rx.try_next() { Err(_) => (), // Err means channel is empty, it's expected _ => panic!("should not have any connection event"), } } #[tokio::test] async fn should_connect_peers_even_if_they_are_not_retry_ready_on_connect_peers_now() { let (mut mgr, mut conn_rx) = make_manager(0, 20); let not_ready_peer = make_peer(2077); not_ready_peer.retry.inc(); let inner = mgr.core_inner(); inner.add_peer(not_ready_peer.clone()); let connect_peers = PeerManagerEvent::ConnectPeersNow { pids: vec![not_ready_peer.owned_id()], }; mgr.poll_event(connect_peers).await; let conn_event = conn_rx.next().await.expect("should have connect event"); let multiaddrs_in_event = match conn_event { ConnectionEvent::Connect { addrs, .. } => addrs, _ => panic!("should be connect event"), }; let expect_multiaddrs = not_ready_peer.multiaddrs.all_raw(); assert_eq!( multiaddrs_in_event.len(), expect_multiaddrs.len(), "should have same number of multiaddrs" ); assert!( !multiaddrs_in_event .iter() .any(|ma| !expect_multiaddrs.contains(ma)), "all multiaddrs should be included" ); } #[tokio::test] async fn should_insert_peers_on_discover_multi_addrs() { let (mut mgr, _conn_rx) = make_manager(0, 20); let peers = (0..10) .map(|port| make_peer(port + 7000)) .collect::>(); let peer_ids = peers .clone() .into_iter() .map(|p| p.owned_id()) .collect::>(); let test_multiaddrs = peers .into_iter() .map(|p| p.multiaddrs.all_raw().pop().expect("multiaddr")) .collect::>(); let discover_multi_addrs = PeerManagerEvent::DiscoverMultiAddrs { addrs: test_multiaddrs, }; mgr.poll_event(discover_multi_addrs).await; let inner = mgr.core_inner(); assert!( !peer_ids.iter().any(|pid| !inner.contains(pid)), "should insert all discovered peers" ); } #[tokio::test] async fn should_not_reset_exist_multiaddr_failure_count_on_discover_multi_addrs() { let (mut mgr, _conn_rx) = make_manager(0, 20); let test_peer = make_peer(2077); let test_multiaddr = test_peer.multiaddrs.all().pop().expect("multiaddr"); test_peer.multiaddrs.inc_failure(&test_multiaddr); assert_eq!( test_peer.multiaddrs.failure(&test_multiaddr), Some(1), "should have one failure" ); let discover_multi_addrs = PeerManagerEvent::DiscoverMultiAddrs { addrs: vec![test_multiaddr.clone().into()], }; mgr.poll_event(discover_multi_addrs).await; assert_eq!( test_peer.multiaddrs.failure(&test_multiaddr), Some(1), "should have one failure" ); } #[tokio::test] async fn should_skip_our_listen_multiaddrs_on_discover_multi_addrs() { let (mut mgr, _conn_rx) = make_manager(0, 20); let self_id = mgr.inner.peer_id.to_owned(); let inner = mgr.core_inner(); let listen_multiaddr = make_peer_multiaddr(2020, self_id.clone()); inner.add_listen(listen_multiaddr.clone()); assert!( inner.listen().contains(&listen_multiaddr), "should contains listen addr" ); let discover_multi_addrs = PeerManagerEvent::DiscoverMultiAddrs { addrs: vec![make_multiaddr(2020, Some(self_id.clone()))], }; mgr.poll_event(discover_multi_addrs).await; assert!(!inner.contains(&self_id), "should not add our self peer id"); } #[tokio::test] async fn should_add_multiaddrs_to_peer_on_identified_addrs() { let (mut mgr, _conn_rx) = make_manager(0, 20); let remote_peers = make_sessions(&mut mgr, 1, 5000, SessionType::Outbound).await; let test_peer = remote_peers.first().expect("get first"); let old_multiaddrs_len = test_peer.multiaddrs.len(); let test_multiaddrs: Vec<_> = (0..2) .map(|port| make_multiaddr(port + 9000, Some(test_peer.owned_id()))) .collect(); let identified_addrs = PeerManagerEvent::IdentifiedAddrs { pid: test_peer.owned_id(), addrs: test_multiaddrs.clone(), }; mgr.poll_event(identified_addrs).await; assert_eq!( test_peer.multiaddrs.len(), old_multiaddrs_len + 2, "should have correct multiaddrs len" ); assert!( !test_multiaddrs .iter() .any(|ma| !test_peer.multiaddrs.all_raw().contains(ma)), "should add all multiaddrs to peer" ); } #[tokio::test] async fn should_push_id_to_multiaddrs_if_not_included_on_identified_addrs() { let (mut mgr, _conn_rx) = make_manager(0, 20); let remote_peers = make_sessions(&mut mgr, 1, 5000, SessionType::Outbound).await; let test_peer = remote_peers.first().expect("get first"); let test_multiaddr = make_multiaddr(2077, None); let identified_addrs = PeerManagerEvent::IdentifiedAddrs { pid: test_peer.owned_id(), addrs: vec![test_multiaddr.clone()], }; mgr.poll_event(identified_addrs).await; assert!( !test_peer.multiaddrs.all_raw().contains(&test_multiaddr), "should not contain multiaddr without id included" ); let with_id = make_peer_multiaddr(2077, test_peer.owned_id()); assert!( test_peer.multiaddrs.contains(&with_id), "should push id to multiaddr when add it to peer" ); } #[tokio::test] async fn should_not_reset_exist_multiaddr_failure_count_on_identified_addrs() { let (mut mgr, _conn_rx) = make_manager(0, 20); let remote_peers = make_sessions(&mut mgr, 1, 5000, SessionType::Outbound).await; let test_peer = remote_peers.first().expect("get first"); let test_multiaddr = test_peer.multiaddrs.all().pop().expect("multiaddr"); test_peer.multiaddrs.inc_failure(&test_multiaddr); assert_eq!( test_peer.multiaddrs.failure(&test_multiaddr), Some(1), "should have one failure" ); let identified_addrs = PeerManagerEvent::IdentifiedAddrs { pid: test_peer.owned_id(), addrs: vec![test_multiaddr.clone().into()], }; mgr.poll_event(identified_addrs).await; assert_eq!( test_peer.multiaddrs.failure(&test_multiaddr), Some(1), "should have one failure" ); } #[tokio::test] async fn should_reset_peer_failure_for_outbound_multiaddr_on_repeated_connection() { let (mut mgr, _conn_rx) = make_manager(0, 20); let remote_peers = make_sessions(&mut mgr, 1, 5000, SessionType::Outbound).await; let test_peer = remote_peers.first().expect("get first"); let test_multiaddr = test_peer.multiaddrs.all().pop().expect("multiaddr"); test_peer.multiaddrs.inc_failure(&test_multiaddr); assert_eq!( test_peer.multiaddrs.failure(&test_multiaddr), Some(1), "should have one failure" ); let repeated_connection = PeerManagerEvent::RepeatedConnection { ty: ConnectionType::Outbound, sid: test_peer.session_id(), addr: test_multiaddr.clone().into(), }; mgr.poll_event(repeated_connection).await; assert_eq!( test_peer.multiaddrs.failure(&test_multiaddr), Some(0), "should have one failure" ); } #[tokio::test] async fn should_remove_inbound_multiaddr_on_repeated_connection() { let (mut mgr, _conn_rx) = make_manager(0, 20); let remote_peers = make_sessions(&mut mgr, 1, 5000, SessionType::Outbound).await; let test_peer = remote_peers.first().expect("get first"); let test_multiaddr = make_peer_multiaddr(2077, test_peer.owned_id()); test_peer.multiaddrs.insert(vec![test_multiaddr.clone()]); let repeated_connection = PeerManagerEvent::RepeatedConnection { ty: ConnectionType::Inbound, sid: test_peer.session_id(), addr: test_multiaddr.clone().into(), }; mgr.poll_event(repeated_connection).await; assert!( !test_peer.multiaddrs.contains(&test_multiaddr), "should remove inbound multiaddr" ); } #[tokio::test] async fn should_enforce_id_if_not_included_on_repeated_connection() { let (mut mgr, _conn_rx) = make_manager(0, 20); let remote_peers = make_sessions(&mut mgr, 1, 5000, SessionType::Outbound).await; let test_peer = remote_peers.first().expect("get first"); let test_multiaddr = test_peer.multiaddrs.all().pop().expect("multiaddr"); test_peer.multiaddrs.inc_failure(&test_multiaddr); assert_eq!( test_peer.multiaddrs.failure(&test_multiaddr), Some(1), "should have one failure" ); let repeated_connection = PeerManagerEvent::RepeatedConnection { ty: ConnectionType::Outbound, sid: test_peer.session_id(), addr: test_multiaddr.clone().into(), }; mgr.poll_event(repeated_connection).await; assert_eq!( test_peer.multiaddrs.failure(&test_multiaddr), Some(0), "should have one failure" ); } #[tokio::test] async fn should_add_new_listen_on_add_new_listen_addr() { let (mut mgr, _conn_rx) = make_manager(0, 20); let self_id = mgr.inner.peer_id.to_owned(); let inner = mgr.core_inner(); let listen_multiaddr = make_peer_multiaddr(2020, self_id.clone()); inner.add_listen(listen_multiaddr.clone()); assert!(!inner.listen().is_empty(), "should have listen address"); let test_multiaddr = make_multiaddr(2077, Some(self_id)); assert!(test_multiaddr != *listen_multiaddr); let add_listen_addr = PeerManagerEvent::AddNewListenAddr { addr: test_multiaddr.clone(), }; mgr.poll_event(add_listen_addr).await; assert_eq!(inner.listen().len(), 2, "should have 2 listen addrs"); assert!( inner.listen().contains(&test_multiaddr), "should add new listen multiaddr" ); } #[tokio::test] async fn should_push_id_to_listen_multiaddr_if_not_included_on_add_new_listen_addr() { let (mut mgr, _conn_rx) = make_manager(0, 20); let self_id = mgr.inner.peer_id.to_owned(); let inner = mgr.core_inner(); let test_multiaddr = make_multiaddr(2077, None); assert!(inner.listen().is_empty(), "should not have any listen addr"); let add_listen_addr = PeerManagerEvent::AddNewListenAddr { addr: test_multiaddr.clone(), }; mgr.poll_event(add_listen_addr).await; let with_id = make_multiaddr(2077, Some(self_id)); assert_eq!(inner.listen().len(), 1, "should have one listen addr"); assert!( inner.listen().contains(&with_id), "should add new listen multiaddr" ); } #[tokio::test] async fn should_remove_listen_on_remove_listen_addr() { let (mut mgr, _conn_rx) = make_manager(0, 20); let self_id = mgr.inner.peer_id.to_owned(); let inner = mgr.core_inner(); let listen_multiaddr = make_peer_multiaddr(2020, self_id.clone()); inner.add_listen(listen_multiaddr.clone()); assert!( inner.listen().contains(&listen_multiaddr), "should contains listen addr" ); let remove_listen_addr = PeerManagerEvent::RemoveListenAddr { addr: make_multiaddr(2020, Some(self_id)), }; mgr.poll_event(remove_listen_addr).await; assert_eq!(inner.listen().len(), 0, "should have 0 listen addrs"); } #[tokio::test] async fn should_remove_listen_even_if_no_peer_id_included_on_remove_listen_addr() { let (mut mgr, _conn_rx) = make_manager(0, 20); let self_id = mgr.inner.peer_id.to_owned(); let inner = mgr.core_inner(); let listen_multiaddr = make_peer_multiaddr(2020, self_id.clone()); inner.add_listen(listen_multiaddr.clone()); assert!( inner.listen().contains(&listen_multiaddr), "should contains listen addr" ); let remove_listen_addr = PeerManagerEvent::RemoveListenAddr { addr: make_multiaddr(2020, None), }; mgr.poll_event(remove_listen_addr).await; assert_eq!(inner.listen().len(), 0, "should have 0 listen addrs"); } #[tokio::test] async fn should_always_include_our_listen_addrs_in_return_from_manager_handle_random_addrs() { let (mgr, _conn_rx) = make_manager(0, 20); let self_id = mgr.inner.peer_id.to_owned(); let inner = mgr.core_inner(); let listen_multiaddrs = (0..5) .map(|port| make_peer_multiaddr(port + 9000, self_id.clone())) .collect::>(); for ma in listen_multiaddrs.iter() { inner.add_listen(ma.clone()); } let handle = mgr.inner.handle(); let addrs = handle.random_addrs(100, 1.into()); assert!( !listen_multiaddrs.iter().any(|lma| !addrs.contains(&*lma)), "should include our listen addresses" ); } #[tokio::test] async fn should_accept_always_allow_peer_even_if_we_reach_max_connections_on_new_session() { let (mut mgr, _conn_rx) = make_manager(0, 10); let _remote_peers = make_sessions(&mut mgr, 10, 5000, SessionType::Outbound).await; let peer = make_peer(2019); let always_allow_peer = make_peer(2077); always_allow_peer.tags.insert(PeerTag::AlwaysAllow).unwrap(); let inner = mgr.core_inner(); inner.add_peer(always_allow_peer.clone()); assert_eq!(inner.connected(), 10, "should have 10 connections"); // First one without AlwaysAllow tag let sess_ctx = SessionContext::make( SessionId::new(233), peer.multiaddrs.all_raw().pop().expect("peer multiaddr"), SessionType::Inbound, peer.owned_pubkey().expect("pubkey"), ); let new_session = PeerManagerEvent::NewSession { pid: peer.owned_id(), pubkey: peer.owned_pubkey().expect("pubkey"), ctx: sess_ctx.arced(), }; mgr.poll_event(new_session).await; assert_eq!(inner.connected(), 10, "should remain 10 connections"); // Now peer has AlwaysAllow tag let sess_ctx = SessionContext::make( SessionId::new(666), always_allow_peer .multiaddrs .all_raw() .pop() .expect("peer multiaddr"), SessionType::Inbound, always_allow_peer .owned_pubkey() .expect("always allow peer's pubkey"), ); let new_session = PeerManagerEvent::NewSession { pid: always_allow_peer.owned_id(), pubkey: always_allow_peer .owned_pubkey() .expect("always allow peer's pubkey"), ctx: sess_ctx.arced(), }; mgr.poll_event(new_session).await; assert_eq!(inner.connected(), 11, "should remain 11 connections"); let session = inner.session(666.into()).expect("should have session"); assert_eq!( session.peer.id, always_allow_peer.id, "should be alway allow peer" ); } #[tokio::test] async fn should_only_connect_peers_in_allowlist_if_enable_allowlist_only() { let manager_pubkey = make_pubkey(); let manager_id = manager_pubkey.peer_id(); let mut peer_dat_file = std::env::temp_dir(); peer_dat_file.push("peer.dat"); let peer_trust_config = Arc::new(TrustMetricConfig::default()); let peer_fatal_ban = Duration::from_secs(50); let peer_soft_ban = Duration::from_secs(10); let test_peer = make_peer(2077); let another_peer = make_peer(2020); let config = PeerManagerConfig { our_id: manager_id, pubkey: manager_pubkey, bootstraps: Default::default(), allowlist: vec![test_peer.id.to_owned()], allowlist_only: true, peer_trust_config, peer_fatal_ban, peer_soft_ban, max_connections: 10, same_ip_conn_limit: 99, inbound_conn_limit: 5, outbound_conn_limit: 5, routine_interval: Duration::from_secs(10), peer_dat_file, }; let (conn_tx, mut conn_rx) = unbounded(); let (mgr_tx, mgr_rx) = unbounded(); let manager = PeerManager::new(config, mgr_rx, conn_tx); let inner = manager.inner(); inner.add_peer(another_peer); let allowed_peer = inner .peer(&test_peer.id) .expect("should be inserted through config"); // Add multiaddrs to peer inserted by allowlist allowed_peer.multiaddrs.insert(test_peer.multiaddrs.all()); assert!(allowed_peer.tags.contains(&PeerTag::AlwaysAllow)); let mut manager = MockManager::new(manager, mgr_tx); manager.poll().await; let conn_event = conn_rx.next().await.expect("should have connect event"); let multiaddrs_in_event = match conn_event { ConnectionEvent::Connect { addrs, .. } => addrs, _ => panic!("should be connect event"), }; assert_eq!( multiaddrs_in_event.len(), 1, "should have on multiaddr to connect" ); let test_peer_multiaddr = test_peer.multiaddrs.all_raw().pop().expect("get multiaddr"); assert_eq!( multiaddrs_in_event[0], test_peer_multiaddr, "should be alway allow peer" ); } #[tokio::test] async fn should_only_accept_incoming_from_peer_in_allowlist_if_enable_allowlist_only() { let manager_pubkey = make_pubkey(); let manager_id = manager_pubkey.peer_id(); let mut peer_dat_file = std::env::temp_dir(); peer_dat_file.push("peer.dat"); let peer_trust_config = Arc::new(TrustMetricConfig::default()); let peer_fatal_ban = Duration::from_secs(50); let peer_soft_ban = Duration::from_secs(10); let test_peer = make_peer(2077); let another_peer = make_peer(2020); let config = PeerManagerConfig { our_id: manager_id, pubkey: manager_pubkey, bootstraps: Default::default(), allowlist: vec![test_peer.id.to_owned()], allowlist_only: true, peer_trust_config, peer_fatal_ban, peer_soft_ban, max_connections: 10, same_ip_conn_limit: 9, inbound_conn_limit: 5, outbound_conn_limit: 5, routine_interval: Duration::from_secs(10), peer_dat_file, }; let (conn_tx, _conn_rx) = unbounded(); let (mgr_tx, mgr_rx) = unbounded(); let manager = PeerManager::new(config, mgr_rx, conn_tx); let inner = manager.inner(); inner.add_peer(another_peer.clone()); let allowed_peer = inner .peer(&test_peer.id) .expect("should be inserted through config"); assert!(allowed_peer.tags.contains(&PeerTag::AlwaysAllow)); let mut manager = MockManager::new(manager, mgr_tx); assert_eq!(inner.connected(), 0, "should have zero connections"); // First one without AlwaysAllow tag let sess_ctx = SessionContext::make( SessionId::new(233), another_peer .multiaddrs .all_raw() .pop() .expect("peer multiaddr"), SessionType::Inbound, another_peer.owned_pubkey().expect("pubkey"), ); let new_session = PeerManagerEvent::NewSession { pid: another_peer.owned_id(), pubkey: another_peer.owned_pubkey().expect("pubkey"), ctx: sess_ctx.arced(), }; manager.poll_event(new_session).await; assert_eq!(inner.connected(), 0, "should remain 0 connections"); // Now with AlwaysAllow tag let sess_ctx = SessionContext::make( SessionId::new(666), test_peer .multiaddrs .all_raw() .pop() .expect("peer multiaddr"), SessionType::Inbound, test_peer .owned_pubkey() .expect("always allow peer's pubkey"), ); let new_session = PeerManagerEvent::NewSession { pid: test_peer.owned_id(), pubkey: test_peer .owned_pubkey() .expect("always allow peer's pubkey"), ctx: sess_ctx.arced(), }; manager.poll_event(new_session).await; assert_eq!(inner.connected(), 1, "should have 1 connection"); } #[tokio::test] async fn should_disconnect_and_ban_peer_for_fatal_feedback_on_trust_metric() { let (mut mgr, mut conn_rx) = make_manager(0, 20); let remote_peers = make_sessions(&mut mgr, 1, 5000, SessionType::Outbound).await; let test_peer = remote_peers.first().expect("get first peer"); let target_sid = test_peer.session_id(); let feedback = PeerManagerEvent::TrustMetric { pid: test_peer.owned_id(), feedback: TrustFeedback::Fatal("fatal".to_owned()), }; mgr.poll_event(feedback).await; assert!(test_peer.banned(), "should be banned"); assert_eq!( test_peer.tags.get_banned_until().expect("get banned until"), time::now() + mgr.config().peer_fatal_ban.as_secs(), "should use fatal ban duration" ); let trust_metric = test_peer.trust_metric().expect("get trust metric"); assert!(!trust_metric.is_started(), "should stop trust metric"); let conn_event = conn_rx.next().await.expect("should have disconnect event"); match conn_event { ConnectionEvent::Disconnect(sid) => { assert_eq!(sid, target_sid, "should be disconnected session id") } _ => panic!("should be disconnect event"), } } #[tokio::test] async fn should_exclude_always_allow_peer_from_fatal_feedback_ban_on_trust_metric() { let (mut mgr, _conn_rx) = make_manager(0, 20); let remote_peers = make_sessions(&mut mgr, 1, 5000, SessionType::Outbound).await; let test_peer = remote_peers.first().expect("get first peer"); let inner = mgr.core_inner(); test_peer.tags.insert(PeerTag::AlwaysAllow).unwrap(); inner.add_peer(test_peer.clone()); let feedback = PeerManagerEvent::TrustMetric { pid: test_peer.owned_id(), feedback: TrustFeedback::Fatal("fatal".to_owned()), }; mgr.poll_event(feedback).await; assert!(!test_peer.banned(), "should not ban"); let trust_metric = test_peer.trust_metric().expect("get trust metric"); assert!(trust_metric.is_started(), "should continue trust metric"); assert_eq!(inner.connected(), 1, "should not disconnect peer"); } #[tokio::test] async fn should_add_one_bad_event_for_bad_feedback_on_trust_metric() { let (mut mgr, _conn_rx) = make_manager(0, 20); let remote_peers = make_sessions(&mut mgr, 1, 5000, SessionType::Outbound).await; let test_peer = remote_peers.first().expect("get first peer"); let trust_metric = test_peer.trust_metric().expect("trust metric"); let feedback = PeerManagerEvent::TrustMetric { pid: test_peer.owned_id(), feedback: TrustFeedback::Bad("bad".to_owned()), }; mgr.poll_event(feedback).await; assert_eq!( trust_metric.bad_events_count(), 1, "should have one bad event count" ); } #[tokio::test] async fn should_add_ten_bad_events_for_worse_feedback_on_trust_metric() { let (mut mgr, _conn_rx) = make_manager(0, 20); let remote_peers = make_sessions(&mut mgr, 1, 5000, SessionType::Outbound).await; let test_peer = remote_peers.first().expect("get first peer"); let trust_metric = test_peer.trust_metric().expect("trust metric"); let feedback = PeerManagerEvent::TrustMetric { pid: test_peer.owned_id(), feedback: TrustFeedback::Worse("worse".to_owned()), }; mgr.poll_event(feedback).await; assert_eq!( trust_metric.bad_events_count(), 10, "should have ten bad events count" ); } #[tokio::test] async fn should_disconnect_and_soft_ban_peer_if_below_fourty_score_on_worse_feedback_on_trust_metric( ) { let (mut mgr, mut conn_rx) = make_manager(0, 20); let remote_peers = make_sessions(&mut mgr, 1, 5000, SessionType::Outbound).await; let test_peer = remote_peers.first().expect("get first peer"); let trust_metric = test_peer.trust_metric().expect("trust metric"); let test_sid = test_peer.session_id(); for _ in 0..4 { trust_metric.bad_events(1); trust_metric.enter_new_interval(); } assert!( trust_metric.trust_score() < 40, "should have score lower than 40" ); let feedback = PeerManagerEvent::TrustMetric { pid: test_peer.owned_id(), feedback: TrustFeedback::Worse("worse".to_owned()), }; mgr.poll_event(feedback).await; assert!(test_peer.banned(), "should be banned"); assert_eq!( test_peer.tags.get_banned_until().expect("get banned until"), time::now() + mgr.config().peer_soft_ban.as_secs(), "should use soft ban duration" ); let trust_metric = test_peer.trust_metric().expect("get trust metric"); assert!(!trust_metric.is_started(), "should stop trust metric"); let conn_event = conn_rx.next().await.expect("should have disconnect event"); match conn_event { ConnectionEvent::Disconnect(sid) => { assert_eq!(sid, test_sid, "should be replaced session id") } _ => panic!("should be disconnect event"), } } #[tokio::test] async fn should_not_knock_out_peer_just_set_up_trust_metric_on_worse_feedback_on_trust_metric() { let (mut mgr, _conn_rx) = make_manager(0, 20); let remote_peers = make_sessions(&mut mgr, 1, 5000, SessionType::Outbound).await; let test_peer = remote_peers.first().expect("get first peer"); let trust_metric = test_peer.trust_metric().expect("trust metric"); assert_eq!( trust_metric.good_events_count(), 0, "should not have any good events" ); assert_eq!( trust_metric.bad_events_count(), 0, "should not have any bad events" ); assert_eq!(trust_metric.intervals(), 0, "should not have any intervals"); let feedback = PeerManagerEvent::TrustMetric { pid: test_peer.owned_id(), feedback: TrustFeedback::Worse("worse".to_owned()), }; mgr.poll_event(feedback).await; let inner = mgr.core_inner(); assert!(!test_peer.banned(), "should not ban"); let trust_metric = test_peer.trust_metric().expect("get trust metric"); assert!(trust_metric.is_started(), "should continue trust metric"); assert_eq!(inner.connected(), 1, "should still connected"); } #[tokio::test] async fn should_not_punish_always_allow_peer_when_its_score_below_fourty_on_worse_feedback_on_trust_metric( ) { let (mut mgr, _conn_rx) = make_manager(0, 20); let remote_peers = make_sessions(&mut mgr, 1, 5000, SessionType::Outbound).await; let test_peer = remote_peers.first().expect("get first peer"); let trust_metric = test_peer.trust_metric().expect("trust metric"); for _ in 0..4 { trust_metric.bad_events(1); trust_metric.enter_new_interval(); } assert!( trust_metric.trust_score() < 40, "should have score lower than 40" ); let inner = mgr.core_inner(); test_peer.tags.insert(PeerTag::AlwaysAllow).unwrap(); inner.add_peer(test_peer.clone()); let feedback = PeerManagerEvent::TrustMetric { pid: test_peer.owned_id(), feedback: TrustFeedback::Worse("worse".to_owned()), }; mgr.poll_event(feedback).await; assert!(!test_peer.banned(), "should not ban"); let trust_metric = test_peer.trust_metric().expect("get trust metric"); assert!(trust_metric.is_started(), "should continue trust metric"); assert_eq!(inner.connected(), 1, "should still connected"); } #[tokio::test] async fn should_do_nothing_for_neutral_feedback_on_trust_metric() { let (mut mgr, _conn_rx) = make_manager(0, 20); let remote_peers = make_sessions(&mut mgr, 1, 5000, SessionType::Outbound).await; let test_peer = remote_peers.first().expect("get first peer"); let trust_metric = test_peer.trust_metric().expect("trust metric"); let feedback = PeerManagerEvent::TrustMetric { pid: test_peer.owned_id(), feedback: TrustFeedback::Neutral, }; mgr.poll_event(feedback).await; assert_eq!( trust_metric.good_events_count(), 0, "should not increase good events" ); assert_eq!( trust_metric.bad_events_count(), 0, "should not increase bad events" ); } #[tokio::test] async fn should_add_one_bad_event_for_good_feedback_on_trust_metric() { let (mut mgr, _conn_rx) = make_manager(0, 20); let remote_peers = make_sessions(&mut mgr, 1, 5000, SessionType::Outbound).await; let test_peer = remote_peers.first().expect("get first peer"); let trust_metric = test_peer.trust_metric().expect("trust metric"); let feedback = PeerManagerEvent::TrustMetric { pid: test_peer.owned_id(), feedback: TrustFeedback::Good, }; mgr.poll_event(feedback).await; assert_eq!( trust_metric.good_events_count(), 1, "should increase one good event" ); } #[tokio::test] async fn should_pick_good_peer_first_on_finding_connectable_peers() { let (mut mgr, mut conn_rx) = make_manager(0, 4); let outbound_conn_limit = mgr.config().outbound_conn_limit; let pre_connected_count = outbound_conn_limit - 1; let _remote_peers = make_sessions( &mut mgr, pre_connected_count as u16, 5000, SessionType::Outbound, ) .await; let inner = mgr.core_inner(); assert_eq!( inner.connected(), pre_connected_count, "should have pre connected connections just one below outbound conn limit" ); // Fill connecting attempts, left one for our test let fill_peers = (3..4 + MAX_CONNECTING_MARGIN - 1) .map(|port| make_peer(6000u16 + port as u16)) .collect::>(); mgr.inner.set_connecting(fill_peers); let good_peer = make_peer(2077); let normal_peer = make_peer(2020); inner.add_peer(good_peer.clone()); inner.add_peer(normal_peer); let trust_metric = TrustMetric::new(Arc::clone(&mgr.config().peer_trust_config)); good_peer.set_trust_metric(trust_metric.clone()); for _ in 0..10 { trust_metric.good_events(1); trust_metric.enter_new_interval(); } assert!( trust_metric.trust_score() > GOOD_TRUST_SCORE, "should have better score" ); mgr.poll().await; let conn_event = conn_rx.next().await.expect("should have connect event"); let multiaddrs_in_event = match conn_event { ConnectionEvent::Connect { addrs, .. } => addrs, _ => panic!("should be connect event"), }; assert_eq!( multiaddrs_in_event.len(), 1, "should have one connecting multiaddr" ); let expect_multiaddrs = good_peer.multiaddrs.all_raw(); assert_eq!( multiaddrs_in_event, expect_multiaddrs, "should be peer with better score" ); } #[tokio::test] async fn should_setup_trust_metric_if_none_on_session_closed() { let (mut mgr, _conn_rx) = make_manager(2, 20); let remote_peers = make_sessions(&mut mgr, 1, 5000, SessionType::Outbound).await; let test_peer = remote_peers.first().expect("get first peer"); test_peer.remove_trust_metric(); let session_closed = PeerManagerEvent::SessionClosed { pid: test_peer.owned_id(), sid: test_peer.session_id(), }; mgr.poll_event(session_closed).await; assert!( test_peer.trust_metric().is_some(), "should set up trust metric" ); } #[tokio::test] async fn should_setup_trust_metric_if_none_on_session_failed() { let (mut mgr, _conn_rx) = make_manager(0, 20); let remote_peers = make_sessions(&mut mgr, 1, 5000, SessionType::Outbound).await; let test_peer = remote_peers.first().expect("get first peer"); test_peer.remove_trust_metric(); let expect_sid = test_peer.session_id(); let session_failed = PeerManagerEvent::SessionFailed { sid: expect_sid, kind: SessionErrorKind::Io(std::io::ErrorKind::Other.into()), }; mgr.poll_event(session_failed).await; assert!( test_peer.trust_metric().is_some(), "should set up trust metric" ); let trust_metric = test_peer.trust_metric().expect("trust metric"); assert!(!trust_metric.is_started(), "should not start"); assert_eq!( trust_metric.bad_events_count(), 1, "should have 1 bad event" ); } #[tokio::test] async fn should_setup_trust_metric_if_none_on_peer_misbehave() { let (mut mgr, _conn_rx) = make_manager(0, 20); let remote_peers = make_sessions(&mut mgr, 1, 5000, SessionType::Outbound).await; let test_peer = remote_peers.first().expect("get first peer"); test_peer.remove_trust_metric(); let peer_misbehave = PeerManagerEvent::Misbehave { pid: test_peer.owned_id(), kind: MisbehaviorKind::PingTimeout, }; mgr.poll_event(peer_misbehave).await; assert!( test_peer.trust_metric().is_some(), "should set up trust metric" ); let trust_metric = test_peer.trust_metric().expect("trust metric"); assert!(trust_metric.is_started(), "should be started"); assert_eq!( trust_metric.bad_events_count(), 1, "should have 1 bad event" ); } #[tokio::test] async fn should_setup_trust_metric_if_none_on_session_blocked() { let (mut mgr, _conn_rx) = make_manager(0, 20); let remote_peers = make_sessions(&mut mgr, 1, 5000, SessionType::Outbound).await; let test_peer = remote_peers.first().expect("get first peer"); test_peer.remove_trust_metric(); let sess_ctx = SessionContext::make( test_peer.session_id(), test_peer.multiaddrs.all_raw().pop().expect("get multiaddr"), SessionType::Outbound, test_peer.owned_pubkey().expect("pubkey"), ); let session_blocked = PeerManagerEvent::SessionBlocked { ctx: sess_ctx.arced(), }; mgr.poll_event(session_blocked).await; assert!( test_peer.trust_metric().is_some(), "should set up trust metric" ); let trust_metric = test_peer.trust_metric().expect("trust metric"); assert!(trust_metric.is_started(), "should be started"); assert_eq!( trust_metric.bad_events_count(), 1, "should have 1 bad event" ); } #[tokio::test] async fn should_able_to_tag_peer() { let (mgr, _conn_rx) = make_manager(0, 20); let handle = mgr.inner.handle(); let peer = make_peer(2077); handle.tag(&peer.id, PeerTag::Consensus).unwrap(); let peer = mgr.core_inner().peer(&peer.id).unwrap(); assert!(peer.tags.contains(&PeerTag::Consensus)); } #[tokio::test] async fn should_able_to_untag_peer() { let (mgr, _conn_rx) = make_manager(0, 20); let handle = mgr.inner.handle(); let peer = make_peer(2077); handle.tag(&peer.id, PeerTag::Consensus).unwrap(); let peer = mgr.core_inner().peer(&peer.id).unwrap(); assert!(peer.tags.contains(&PeerTag::Consensus)); handle.untag(&peer.id, &PeerTag::Consensus); assert!(!peer.tags.contains(&PeerTag::Consensus)); } #[tokio::test] async fn should_remove_old_consensus_peer_tag_when_tag_consensus() { let (mgr, _conn_rx) = make_manager(0, 20); let handle = mgr.inner.handle(); let peer = make_peer(2077); handle.tag(&peer.id, PeerTag::Consensus).unwrap(); let peer = mgr.core_inner().peer(&peer.id).unwrap(); assert!(peer.tags.contains(&PeerTag::Consensus)); let new_consensus = make_peer(3077); handle.tag_consensus(vec![new_consensus.owned_id()]); let new_consensus = mgr.core_inner().peer(&new_consensus.id).unwrap(); assert!(new_consensus.tags.contains(&PeerTag::Consensus)); assert!(!peer.tags.contains(&PeerTag::Consensus)); } #[tokio::test] async fn should_reject_same_ip_connection_when_reach_limit_on_new_session() { let manager_pubkey = make_pubkey(); let manager_id = manager_pubkey.peer_id(); let mut peer_dat_file = std::env::temp_dir(); peer_dat_file.push("peer.dat"); let peer_trust_config = Arc::new(TrustMetricConfig::default()); let peer_fatal_ban = Duration::from_secs(50); let peer_soft_ban = Duration::from_secs(10); let config = PeerManagerConfig { our_id: manager_id, pubkey: manager_pubkey, bootstraps: Default::default(), allowlist: vec![], allowlist_only: false, peer_trust_config, peer_fatal_ban, peer_soft_ban, max_connections: 10, same_ip_conn_limit: 1, inbound_conn_limit: 5, outbound_conn_limit: 5, routine_interval: Duration::from_secs(10), peer_dat_file, }; let (conn_tx, mut conn_rx) = unbounded(); let (mgr_tx, mgr_rx) = unbounded(); let manager = PeerManager::new(config, mgr_rx, conn_tx); let mut mgr = MockManager::new(manager, mgr_tx); make_sessions(&mut mgr, 1, 5000, SessionType::Outbound).await; let same_ip_peer = make_peer(9527); let expect_sid = same_ip_peer.session_id(); // Save same ip peer let inner = mgr.core_inner(); inner.add_peer(same_ip_peer.clone()); let sess_ctx = SessionContext::make( SessionId::new(99), same_ip_peer.multiaddrs.all_raw().pop().unwrap(), SessionType::Outbound, same_ip_peer.owned_pubkey().expect("pubkey"), ); let new_session = PeerManagerEvent::NewSession { pid: same_ip_peer.owned_id(), pubkey: same_ip_peer.owned_pubkey().expect("pubkey"), ctx: sess_ctx.arced(), }; mgr.poll_event(new_session).await; assert_eq!(inner.connected(), 1, "should not increase conn count"); assert_eq!( same_ip_peer.session_id(), expect_sid, "should not change peer session id" ); let inserted_same_ip_peer = inner.peer(&same_ip_peer.id).unwrap(); assert_eq!( inserted_same_ip_peer.tags.get_banned_until(), Some(time::now() + SAME_IP_LIMIT_BAN.as_secs()) ); let conn_event = conn_rx.next().await.expect("should have disconnect event"); match conn_event { ConnectionEvent::Disconnect(sid) => assert_eq!(sid, 99.into(), "should be new session id"), _ => panic!("should be disconnect event"), } } #[tokio::test] async fn should_not_dail_new_peer_after_reach_outbound_conn_limit() { let (mut mgr, mut conn_rx) = make_manager(0, 4); let outbound_conn_limit = mgr.config().outbound_conn_limit; let _remote_peers = make_sessions( &mut mgr, outbound_conn_limit as u16, 5000, SessionType::Outbound, ) .await; let inner = mgr.core_inner(); assert_eq!( inner.connected(), outbound_conn_limit, "should have pre connected connections just one below outbound conn limit" ); mgr.poll().await; match conn_rx.try_next() { Err(_) => (), _ => panic!("should not have any event"), } } #[tokio::test] async fn should_reject_inbound_conn_when_reach_inbound_conn_limit() { let (mut mgr, mut conn_rx) = make_manager(0, 20); let inbound_conn_limit = mgr.config().inbound_conn_limit; let _remote_peers = make_sessions( &mut mgr, inbound_conn_limit as u16, 5000, SessionType::Inbound, ) .await; let inner = mgr.core_inner(); assert_eq!( inner.connected(), inbound_conn_limit, "should have reach inbound conn limit" ); let remote_pubkey = make_pubkey(); let remote_peer_id = remote_pubkey.peer_id(); let remote_addr = make_multiaddr(6000, Some(remote_pubkey.peer_id())); let sess_ctx = SessionContext::make( SessionId::new(99), remote_addr.clone(), SessionType::Inbound, remote_pubkey.clone(), ); let new_session = PeerManagerEvent::NewSession { pid: remote_peer_id.clone(), pubkey: remote_pubkey.clone(), ctx: sess_ctx.arced(), }; mgr.poll_event(new_session).await; assert_eq!( inner.connected(), inbound_conn_limit, "should not accept inbound connection" ); let conn_event = conn_rx.next().await.expect("should have disconnect event"); match conn_event { ConnectionEvent::Disconnect(sid) => assert_eq!(sid, 99.into(), "should be new session id"), _ => panic!("should be disconnect event"), } } #[tokio::test] async fn should_accept_peer_in_allowlist_even_reach_inbound_conn_limit() { let manager_pubkey = make_pubkey(); let manager_id = manager_pubkey.peer_id(); let mut peer_dat_file = std::env::temp_dir(); peer_dat_file.push("peer.dat"); let peer_trust_config = Arc::new(TrustMetricConfig::default()); let peer_fatal_ban = Duration::from_secs(50); let peer_soft_ban = Duration::from_secs(10); let test_peer = make_peer(2077); let config = PeerManagerConfig { our_id: manager_id, pubkey: manager_pubkey, bootstraps: Default::default(), allowlist: vec![test_peer.id.to_owned()], allowlist_only: false, peer_trust_config, peer_fatal_ban, peer_soft_ban, max_connections: 10, same_ip_conn_limit: 9, inbound_conn_limit: 5, outbound_conn_limit: 5, routine_interval: Duration::from_secs(10), peer_dat_file, }; let (conn_tx, _conn_rx) = unbounded(); let (mgr_tx, mgr_rx) = unbounded(); let manager = PeerManager::new(config, mgr_rx, conn_tx); let inner = manager.inner(); let allowed_peer = inner .peer(&test_peer.id) .expect("should be inserted through config"); assert!(allowed_peer.tags.contains(&PeerTag::AlwaysAllow)); let mut manager = MockManager::new(manager, mgr_tx); assert_eq!(inner.connected(), 0, "should have zero connections"); let inbound_conn_limit = manager.config().inbound_conn_limit; let _remote_peers = make_sessions( &mut manager, inbound_conn_limit as u16, 5000, SessionType::Inbound, ) .await; let sess_ctx = SessionContext::make( SessionId::new(666), test_peer .multiaddrs .all_raw() .pop() .expect("peer multiaddr"), SessionType::Inbound, test_peer .owned_pubkey() .expect("always allow peer's pubkey"), ); let new_session = PeerManagerEvent::NewSession { pid: test_peer.owned_id(), pubkey: test_peer .owned_pubkey() .expect("always allow peer's pubkey"), ctx: sess_ctx.arced(), }; manager.poll_event(new_session).await; assert_eq!( inner.connected(), inbound_conn_limit + 1, "should accept peer in allowlist" ); } #[tokio::test] async fn should_reject_new_connection_for_same_peer_on_unidentified_session() { let (mut mgr, mut conn_rx) = make_manager(0, 20); let remote_peers = make_sessions(&mut mgr, 1, 5000, SessionType::Outbound).await; let test_peer = remote_peers.first().expect("get first peer"); let sess_ctx = SessionContext::make( SessionId::new(99), test_peer.multiaddrs.all_raw().pop().expect("get multiaddr"), SessionType::Outbound, test_peer.owned_pubkey().expect("pubkey"), ); let new_session = PeerManagerEvent::UnidentifiedSession { pid: test_peer.owned_id(), pubkey: test_peer.owned_pubkey().expect("pubkey"), ctx: sess_ctx.arced(), }; mgr.poll_event(new_session).await; let conn_event = conn_rx.next().await.expect("should have disconnect event"); match conn_event { ConnectionEvent::Disconnect(sid) => assert_eq!(sid, 99.into(), "should be new session id"), _ => panic!("should be disconnect event"), } } #[tokio::test] async fn should_reject_same_ip_connection_when_reach_limit_on_unidentified_session() { let manager_pubkey = make_pubkey(); let manager_id = manager_pubkey.peer_id(); let mut peer_dat_file = std::env::temp_dir(); peer_dat_file.push("peer.dat"); let peer_trust_config = Arc::new(TrustMetricConfig::default()); let peer_fatal_ban = Duration::from_secs(50); let peer_soft_ban = Duration::from_secs(10); let config = PeerManagerConfig { our_id: manager_id, pubkey: manager_pubkey, bootstraps: Default::default(), allowlist: vec![], allowlist_only: false, peer_trust_config, peer_fatal_ban, peer_soft_ban, max_connections: 10, same_ip_conn_limit: 1, inbound_conn_limit: 5, outbound_conn_limit: 5, routine_interval: Duration::from_secs(10), peer_dat_file, }; let (conn_tx, mut conn_rx) = unbounded(); let (mgr_tx, mgr_rx) = unbounded(); let manager = PeerManager::new(config, mgr_rx, conn_tx); let mut mgr = MockManager::new(manager, mgr_tx); make_sessions(&mut mgr, 1, 5000, SessionType::Outbound).await; let same_ip_peer = make_peer(9527); // Save same ip peer let inner = mgr.core_inner(); inner.add_peer(same_ip_peer.clone()); let sess_ctx = SessionContext::make( SessionId::new(99), same_ip_peer.multiaddrs.all_raw().pop().unwrap(), SessionType::Outbound, same_ip_peer.owned_pubkey().expect("pubkey"), ); let new_session = PeerManagerEvent::UnidentifiedSession { pid: same_ip_peer.owned_id(), pubkey: same_ip_peer.owned_pubkey().expect("pubkey"), ctx: sess_ctx.arced(), }; mgr.poll_event(new_session).await; let inserted_same_ip_peer = inner.peer(&same_ip_peer.id).unwrap(); assert_eq!( inserted_same_ip_peer.tags.get_banned_until(), Some(time::now() + SAME_IP_LIMIT_BAN.as_secs()) ); let conn_event = conn_rx.next().await.expect("should have disconnect event"); match conn_event { ConnectionEvent::Disconnect(sid) => assert_eq!(sid, 99.into(), "should be new session id"), _ => panic!("should be disconnect event"), } } #[tokio::test] async fn should_accept_always_allow_peer_even_if_we_reach_max_connections_on_unidentified_session() { let (mut mgr, mut conn_rx) = make_manager(0, 10); let _remote_peers = make_sessions(&mut mgr, 10, 5000, SessionType::Outbound).await; let peer = make_peer(2019); let always_allow_peer = make_peer(2077); always_allow_peer.tags.insert(PeerTag::AlwaysAllow).unwrap(); let inner = mgr.core_inner(); inner.add_peer(always_allow_peer.clone()); assert_eq!(inner.connected(), 10, "should have 10 connections"); // First one without AlwaysAllow tag let sess_ctx = SessionContext::make( SessionId::new(233), peer.multiaddrs.all_raw().pop().expect("peer multiaddr"), SessionType::Inbound, peer.owned_pubkey().expect("pubkey"), ); let new_session = PeerManagerEvent::UnidentifiedSession { pid: peer.owned_id(), pubkey: peer.owned_pubkey().expect("pubkey"), ctx: sess_ctx.arced(), }; mgr.poll_event(new_session).await; let conn_event = conn_rx.next().await.expect("should have disconnect event"); match conn_event { ConnectionEvent::Disconnect(sid) => assert_eq!(sid, 233.into(), "should be new session id"), _ => panic!("should be disconnect event"), } // Now peer has AlwaysAllow tag let sess_ctx = SessionContext::make( SessionId::new(666), always_allow_peer .multiaddrs .all_raw() .pop() .expect("peer multiaddr"), SessionType::Inbound, always_allow_peer .owned_pubkey() .expect("always allow peer's pubkey"), ); let new_session = PeerManagerEvent::UnidentifiedSession { pid: always_allow_peer.owned_id(), pubkey: always_allow_peer .owned_pubkey() .expect("always allow peer's pubkey"), ctx: sess_ctx.arced(), }; mgr.poll_event(new_session).await; match conn_rx.try_next() { Err(_) => (), // Err means channel is empty, it's expected _ => panic!("should not have any disconnect event"), } } #[tokio::test] async fn should_remove_connecting_attempt_when_reach_timeout() { let (mut mgr, _conn_rx) = make_manager(0, 20); let test_peer = make_peer(9527); let mut target_attempt = ConnectingAttempt::new(test_peer.clone()); target_attempt.set_at(MAX_CONNECTING_TIMEOUT + Duration::from_secs(1)); let inner = mgr.core_inner(); inner.add_peer(test_peer); assert_eq!(inner.connected(), 0, "should have zero connected"); mgr.connecting_mut().insert(target_attempt); assert_eq!( mgr.connecting().len(), 1, "should have one connecting attempt" ); mgr.poll().await; assert_eq!( mgr.connecting().len(), 0, "should have 0 connecting attempt" ); assert_eq!(inner.connected(), 0, "should have 0 connected"); } ================================================ FILE: core/network/src/peer_manager/time.rs ================================================ use std::time::{Duration, SystemTime, UNIX_EPOCH}; pub fn now() -> u64 { duration_since(SystemTime::now(), UNIX_EPOCH).as_secs() } pub fn duration_since(now: SystemTime, early: SystemTime) -> Duration { match now.duration_since(early) { Ok(duration) => duration, Err(e) => e.duration(), } } ================================================ FILE: core/network/src/peer_manager/trust_metric.rs ================================================ use futures::{ future::{self, AbortHandle}, pin_mut, }; use futures_timer::Delay; use parking_lot::RwLock; use std::{ future::Future, ops::{Add, Deref}, pin::Pin, sync::atomic::{AtomicUsize, Ordering::SeqCst}, sync::Arc, task::{Context, Poll}, time::{Duration, Instant}, }; pub const PROPORTIONAL_WEIGHT: f64 = 0.4; pub const INTERGRAL_WEIGHT: f64 = 0.6; pub const OPTIMISTIC_HISTORY_WEIGHT: f64 = 0.8; pub const DERIVATIVE_POSITIVE_WEIGHT: f64 = 0.0; pub const DERIVATIVE_NEGATIVE_WEIGHT: f64 = 0.1; pub const INITIAL_HISTORY_VALUE: f64 = 0.8f64; pub const KNOCK_OUT_SCORE: u8 = 40; pub const GOOD_INTERVAL_CAP: usize = 30; pub const DEFAULT_INTERVAL_DURATION: Duration = Duration::from_secs(60); pub const DEFAULT_MAX_HISTORY_DURATION: Duration = Duration::from_secs(24 * 60 * 60 * 10); // 10 day // HISTORY_VLAUE_WEIGHTS are only determined by max_intervals and // OPTIMISTIC_HISTORY_WEIGHT. Right now, all peers share same configuration, so // we can calculate these values once. lazy_static::lazy_static! { static ref HISTORY_TRUST_WEIGHTS: Arc>> = Arc::new(RwLock::new(Vec::new())); } #[derive(Debug)] pub struct TrustMetricConfig { interval: Duration, max_history: Duration, max_intervals: u64, max_faded_memorys: u64, } impl TrustMetricConfig { pub fn new(interval: Duration, max_history: Duration) -> Self { let partial_config = TrustMetricConfig { interval, max_history, max_intervals: 0, max_faded_memorys: 0, }; partial_config.finish() } pub fn interval(&self) -> Duration { self.interval } fn finish(mut self) -> Self { self.max_intervals = self.max_history.as_secs() / self.interval.as_secs(); self.max_faded_memorys = ((self.max_intervals as f64).log2().floor() as u64) + 1; log::debug!(target: "network-trust-metric", "max intervals {}", self.max_intervals); log::debug!(target: "network-trust-metric", "max faded memorys {}", self.max_faded_memorys); { *HISTORY_TRUST_WEIGHTS.write() = (1..=self.max_intervals) .map(|k| OPTIMISTIC_HISTORY_WEIGHT.powf((k - 1) as f64)) .collect::>(); } self } } impl Default for TrustMetricConfig { fn default() -> Self { let partial_config = TrustMetricConfig { interval: DEFAULT_INTERVAL_DURATION, max_history: DEFAULT_MAX_HISTORY_DURATION, max_intervals: 0, max_faded_memorys: 0, }; partial_config.finish() } } #[derive(Debug, Clone, Copy, PartialEq)] struct FadedMemory(f64); impl Deref for FadedMemory { type Target = f64; fn deref(&self) -> &Self::Target { &self.0 } } impl FadedMemory { fn new(history_value: f64) -> Self { FadedMemory(history_value) } } #[derive(Debug)] struct History { max_intervals: u64, max_memorys: u64, intervals: u64, memorys: Vec, aggregate_trust: f64, weights_sum: f64, } impl History { fn new(max_intervals: u64, max_memorys: u64) -> History { History { max_intervals, max_memorys, intervals: 0, memorys: Vec::new(), aggregate_trust: INITIAL_HISTORY_VALUE, weights_sum: 0f64, } } #[cfg(test)] fn intervals(&self) -> u64 { self.intervals } fn latest_trust_value(&self) -> f64 { self.memorys.first().map(|v| **v).unwrap_or_else(|| 0f64) } fn remember_interval(&mut self, trust_value: f64) { if self.intervals < self.max_intervals { self.intervals += 1; let i = self.intervals; self.weights_sum += match HISTORY_TRUST_WEIGHTS.read().get(i as usize - 1).cloned() { Some(v) => v, None => { log::warn!(target: "network-trust-metric", "precalculated history interval {} trust weight not found", i); OPTIMISTIC_HISTORY_WEIGHT.powf((i - 1) as f64) } }; } if self.intervals <= self.max_memorys { self.memorys.insert(0, FadedMemory::new(trust_value)); return; } // Update faded memorys let memento = self.memorys.len() - 1; self.memorys = (1..=memento) .map(|j| { let w = 2f64.powf(j as f64); let ftv = (*self.memorys[j - 1] + (*self.memorys[j] * (w - 1f64))) / w; FadedMemory::new(ftv) }) .collect::>(); self.memorys.insert(0, FadedMemory::new(trust_value)); } fn update_aggregate_trust(&mut self) { let intervals = self.intervals; if intervals < 1 { return; } self.aggregate_trust = (1..=intervals).map(|i| { let memory_idx = (i as f64).log2().floor() as usize; let i_hist_trust = match self.memorys.get(memory_idx).cloned() { Some(v) => *v, None => { log::error!(target: "network-trust-metric", "history interval {} trust value not found", i); 0f64 } }; let i_hist_weight = match HISTORY_TRUST_WEIGHTS.read().get(i as usize - 1).cloned() { Some(v) => v, None => { log::warn!(target: "network-trust-metric", "precalculated history interval {} weight not found", i); OPTIMISTIC_HISTORY_WEIGHT.powf((i - 1) as f64) } }; i_hist_trust * (i_hist_weight / self.weights_sum) }).sum::(); log::debug!(target: "network-trust-metric", "aggregate trust {}", self.aggregate_trust); } } #[derive(Debug)] pub struct Inner { config: Arc, history: RwLock, good_events: AtomicUsize, bad_events: AtomicUsize, } impl Inner { pub fn new(config: Arc) -> Self { let max_intervals = config.max_intervals; let max_memorys = config.max_faded_memorys; Inner { config, history: RwLock::new(History::new(max_intervals, max_memorys)), good_events: AtomicUsize::new(0), bad_events: AtomicUsize::new(0), } } pub fn trust_score(&self) -> u8 { (self.trust_value() * 100f64) as u8 } pub fn good_events(&self, num: usize) { let curr_good_events = self.good_events.load(SeqCst); if curr_good_events + num <= GOOD_INTERVAL_CAP { self.good_events.fetch_add(num, SeqCst); } else if curr_good_events < GOOD_INTERVAL_CAP { self.good_events.store(GOOD_INTERVAL_CAP, SeqCst); } } pub fn bad_events(&self, num: usize) { self.bad_events.fetch_add(num, SeqCst); } pub fn knock_out(&self) -> bool { self.trust_score() < KNOCK_OUT_SCORE } pub fn events(&self) -> (usize, usize) { let good_events = self.good_events.load(SeqCst); let bad_events = self.bad_events.load(SeqCst); (good_events, bad_events) } pub fn enter_new_interval(&self) { let latest_trust_value = self.trust_value(); log::debug!(target: "network-trust-metric", "enter new interval, lastest trust value {}", latest_trust_value); { let mut history = self.history.write(); history.remember_interval(latest_trust_value); history.update_aggregate_trust(); } self.good_events.store(0, SeqCst); self.bad_events.store(0, SeqCst); } pub fn reset_history(&self) { let max_intervals = self.config.max_intervals; let max_memorys = self.config.max_faded_memorys; *self.history.write() = History::new(max_intervals, max_memorys); self.good_events.store(0, SeqCst); self.bad_events.store(0, SeqCst); } fn trust_value(&self) -> f64 { let proportional_value = match self.proportional_value() { Some(v) => v, None => return self.history.read().latest_trust_value(), }; let intergral_value = self.intergral_value(); let deviation_value = proportional_value - intergral_value; let derivative_value = if deviation_value >= 0f64 { DERIVATIVE_POSITIVE_WEIGHT * deviation_value } else { DERIVATIVE_NEGATIVE_WEIGHT * deviation_value }; log::debug!(target: "network-trust-metric", "trust value components: r {:?}, h {}, d {}", proportional_value, intergral_value, derivative_value); proportional_value + intergral_value + derivative_value } fn proportional_value(&self) -> Option { let good_events = self.good_events.load(SeqCst); let total = good_events + self.bad_events.load(SeqCst); if total > 0 { Some((good_events as f64 / total as f64) * PROPORTIONAL_WEIGHT) } else { None } } fn intergral_value(&self) -> f64 { self.history.read().aggregate_trust * INTERGRAL_WEIGHT } } struct HeartBeat { inner: Arc, interval: Duration, delay: Delay, pause_save: Arc>>, interval_start: Instant, } impl HeartBeat { pub fn new( inner: Arc, interval: Duration, resume: Option, pause_save: Arc>>, ) -> Self { let delay = match resume { Some(resume) if interval > resume => Delay::new(interval - resume), // None or resume > interval _ => Delay::new(interval), }; HeartBeat { inner, interval, delay, pause_save, interval_start: Instant::now(), } } } impl Drop for HeartBeat { fn drop(&mut self) { let elapsed = self.interval_start.elapsed(); *self.pause_save.write() = Some(elapsed); } } impl Future for HeartBeat { type Output = ::Output; fn poll(mut self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll { let ecg = &mut self.as_mut(); loop { let interval = ecg.interval; let delay = &mut ecg.delay; pin_mut!(delay); crate::loop_ready!(delay.poll(ctx)); ecg.inner.enter_new_interval(); ecg.interval_start = Instant::now(); let next_interval = Instant::now().add(interval); ecg.delay.reset(next_interval); } Poll::Pending } } #[derive(Debug, Clone)] pub struct TrustMetric { inner: Arc, hb_handle: Arc>>, pause: Arc>>, } impl TrustMetric { pub fn new(config: Arc) -> Self { TrustMetric { inner: Arc::new(Inner::new(config)), hb_handle: Arc::new(RwLock::new(None)), pause: Arc::new(RwLock::new(None)), } } pub fn start(&self) { if self.hb_handle.read().is_some() { // Already started return; } let interval = self.inner.config.interval; let resume = self.pause.write().take(); let heart_beat = HeartBeat::new( Arc::clone(&self.inner), interval, resume, Arc::clone(&self.pause), ); let (heart_beat, hb_handle) = future::abortable(heart_beat); *self.hb_handle.write() = Some(hb_handle); tokio::spawn(heart_beat); } #[cfg(test)] pub fn is_started(&self) -> bool { self.hb_handle.read().is_some() } pub fn pause(&self) { if let Some(abort_handle) = self.hb_handle.write().take() { abort_handle.abort(); } } #[cfg(test)] pub fn bad_events_count(&self) -> usize { self.inner.bad_events.load(SeqCst) } #[cfg(test)] pub fn good_events_count(&self) -> usize { self.inner.good_events.load(SeqCst) } #[cfg(test)] pub fn intervals(&self) -> u64 { self.inner.history.read().intervals() } } impl Deref for TrustMetric { type Target = Arc; fn deref(&self) -> &Self::Target { &self.inner } } #[cfg(test)] mod tests { use super::{Inner, TrustMetricConfig, GOOD_INTERVAL_CAP}; use std::sync::{atomic::Ordering::SeqCst, Arc}; #[test] fn basic_metric_test() { // env_logger::init(); let config = Arc::new(TrustMetricConfig::default()); let metric = Inner::new(config); for _ in 0..20 { metric.good_events(1); metric.enter_new_interval(); } assert!(metric.trust_score() >= 95); for _ in 0..4 { metric.bad_events(1); metric.enter_new_interval(); } assert!(metric.trust_score() < 40); // For S for _ in 0..20 { metric.good_events(1); metric.enter_new_interval(); } assert!(metric.trust_score() > 90 && metric.trust_score() < 95); for i in 0..17 { metric.bad_events(10); metric.good_events(1); metric.enter_new_interval(); if i != 16 { metric.good_events(1); metric.enter_new_interval(); } } assert!(metric.trust_score() < 40); // For Z for _ in 0..20 { metric.good_events(1); metric.enter_new_interval(); } assert!(metric.trust_score() >= 90 && metric.trust_score() < 95); for _ in 0..200 { metric.bad_events(1); metric.good_events(1); metric.enter_new_interval(); } assert!(metric.trust_score() > 40); } #[test] fn good_interval_cap_test() { let config = Arc::new(TrustMetricConfig::default()); let metric = Inner::new(config); metric.good_events(GOOD_INTERVAL_CAP - 1); assert_eq!(metric.good_events.load(SeqCst), GOOD_INTERVAL_CAP - 1); metric.good_events(20); assert_eq!(metric.good_events.load(SeqCst), GOOD_INTERVAL_CAP); } } ================================================ FILE: core/network/src/protocols/core.rs ================================================ use std::collections::{HashMap, HashSet}; use std::iter::FromIterator; use std::time::Duration; use futures::channel::mpsc::UnboundedSender; use lazy_static::lazy_static; use parking_lot::RwLock; use tentacle::secio::PeerId; use tentacle::service::{ProtocolMeta, TargetProtocol}; use tentacle::ProtocolId; use crate::compression::Snappy; use crate::event::PeerManagerEvent; use crate::peer_manager::PeerManagerHandle; use crate::protocols::discovery::Discovery; use crate::protocols::identify::Identify; use crate::protocols::ping::Ping; use crate::protocols::transmitter::Transmitter; use crate::reactor::MessageRouter; use crate::traits::NetworkProtocol; pub const PING_PROTOCOL_ID: usize = 1; pub const IDENTIFY_PROTOCOL_ID: usize = 2; pub const DISCOVERY_PROTOCOL_ID: usize = 3; pub const TRANSMITTER_PROTOCOL_ID: usize = 4; lazy_static! { // NOTE: Use peer id here because trust metric integrated test run in one process static ref PEER_OPENED_PROTOCOLS: RwLock>> = RwLock::new(HashMap::new()); } pub struct OpenedProtocols {} impl OpenedProtocols { pub fn register(peer_id: PeerId, proto_id: ProtocolId) { PEER_OPENED_PROTOCOLS .write() .entry(peer_id) .and_modify(|protos| { protos.insert(proto_id); }) .or_insert_with(|| HashSet::from_iter(vec![proto_id])); } #[allow(dead_code)] pub fn unregister(peer_id: &PeerId, proto_id: ProtocolId) { if let Some(ref mut proto_ids) = PEER_OPENED_PROTOCOLS.write().get_mut(peer_id) { proto_ids.remove(&proto_id); } } pub fn remove(peer_id: &PeerId) { PEER_OPENED_PROTOCOLS.write().remove(peer_id); } #[cfg(test)] pub fn is_open(peer_id: &PeerId, proto_id: &ProtocolId) -> bool { PEER_OPENED_PROTOCOLS .read() .get(peer_id) .map(|ids| ids.contains(proto_id)) .unwrap_or_else(|| false) } pub fn is_all_opened(peer_id: &PeerId) -> bool { PEER_OPENED_PROTOCOLS .read() .get(peer_id) .map(|ids| ids.len() == 4) .unwrap_or_else(|| false) } } #[derive(Default)] pub struct CoreProtocolBuilder { ping: Option, identify: Option, discovery: Option, transmitter: Option, } pub struct CoreProtocol { metas: Vec, transmitter: Transmitter, } impl CoreProtocol { pub fn build() -> CoreProtocolBuilder { CoreProtocolBuilder::new() } pub fn transmitter(&self) -> Transmitter { self.transmitter.clone() } } impl NetworkProtocol for CoreProtocol { fn target() -> TargetProtocol { TargetProtocol::Single(ProtocolId::new(IDENTIFY_PROTOCOL_ID)) } fn metas(self) -> Vec { self.metas } } impl CoreProtocolBuilder { pub fn new() -> Self { CoreProtocolBuilder { ping: None, identify: None, discovery: None, transmitter: None, } } pub fn ping( mut self, interval: Duration, timeout: Duration, event_tx: UnboundedSender, ) -> Self { let ping = Ping::new(interval, timeout, event_tx); self.ping = Some(ping); self } pub fn identify( mut self, peer_mgr: PeerManagerHandle, event_tx: UnboundedSender, ) -> Self { let identify = Identify::new(peer_mgr, event_tx); self.identify = Some(identify); self } pub fn discovery( mut self, peer_mgr: PeerManagerHandle, event_tx: UnboundedSender, sync_interval: Duration, ) -> Self { let discovery = Discovery::new(peer_mgr, event_tx, sync_interval); self.discovery = Some(discovery); self } pub fn transmitter( mut self, message_router: MessageRouter, peer_mgr: PeerManagerHandle, ) -> Self { let transmitter = Transmitter::new(message_router, peer_mgr); self.transmitter = Some(transmitter); self } pub fn build(self) -> CoreProtocol { let mut metas = Vec::with_capacity(4); let CoreProtocolBuilder { ping, identify, discovery, transmitter, } = self; let ping = ping.expect("init: missing protocol ping"); let identify = identify.expect("init: missing protocol identify"); let discovery = discovery.expect("init: missing protocol discovery"); let transmitter = transmitter.expect("init: missing protocol transmitter"); metas.push(ping.build_meta(PING_PROTOCOL_ID.into())); metas.push(identify.build_meta(IDENTIFY_PROTOCOL_ID.into())); metas.push(discovery.build_meta(DISCOVERY_PROTOCOL_ID.into())); metas.push( transmitter .clone() .build_meta(TRANSMITTER_PROTOCOL_ID.into()), ); CoreProtocol { metas, transmitter } } } ================================================ FILE: core/network/src/protocols/discovery/addr.rs ================================================ use crate::{ event::{MisbehaviorKind, PeerManagerEvent}, peer_manager::PeerManagerHandle, }; use futures::channel::mpsc::UnboundedSender; use log::{error, warn}; use tentacle::{ bytes::{Bytes, BytesMut}, multiaddr::{Multiaddr, Protocol}, utils::is_reachable, SessionId, }; use std::{ collections::{BTreeMap, HashMap, HashSet}, net::{IpAddr, SocketAddr}, time::Instant, }; pub(crate) const DEFAULT_MAX_KNOWN: usize = 5000; pub enum Misbehavior { // Already received GetNodes message DuplicateGetNodes, // Already received Nodes(announce=false) message DuplicateFirstNodes, // Nodes message include too many items TooManyItems { announce: bool, length: usize }, // Too many address in one item TooManyAddresses(usize), } /// Misbehavior report result pub enum MisbehaveResult { /// Continue to run #[allow(dead_code)] Continue, /// Disconnect this peer Disconnect, } impl MisbehaveResult { pub fn is_disconnect(&self) -> bool { matches!(self, MisbehaveResult::Disconnect) } } struct AddrReporter { inner: UnboundedSender, shutdown: bool, } impl AddrReporter { pub fn new(reporter: UnboundedSender) -> Self { AddrReporter { inner: reporter, shutdown: false, } } // TODO: upstream heart-beat check pub fn report(&mut self, event: PeerManagerEvent) { if self.shutdown { return; } if self.inner.unbounded_send(event).is_err() { error!("network: discovery: peer manager offline"); self.shutdown = true; } } } pub struct AddressManager { peer_mgr: PeerManagerHandle, reporter: AddrReporter, } // FIXME: Should be peer store? impl AddressManager { pub fn new(peer_mgr: PeerManagerHandle, event_tx: UnboundedSender) -> Self { let reporter = AddrReporter::new(event_tx); AddressManager { peer_mgr, reporter } } pub fn add_new_addr(&mut self, _sid: SessionId, addr: Multiaddr) { let add_addr = PeerManagerEvent::DiscoverMultiAddrs { addrs: vec![addr] }; self.reporter.report(add_addr); } pub fn add_new_addrs(&mut self, _sid: SessionId, addrs: Vec) { let add_multi_addrs = PeerManagerEvent::DiscoverMultiAddrs { addrs }; self.reporter.report(add_multi_addrs); } // TODO: reduce peer score based on kind pub fn misbehave(&mut self, sid: SessionId, _kind: Misbehavior) -> MisbehaveResult { warn!("network: session {} misbehave", sid); let pid = match self.peer_mgr.peer_id(sid) { Some(id) => id, None => { error!("network: session {} peer id not found", sid); return MisbehaveResult::Disconnect; } }; // Right now, we just remove peer let kind = MisbehaviorKind::Discovery; let peer_misbehave = PeerManagerEvent::Misbehave { pid, kind }; self.reporter.report(peer_misbehave); MisbehaveResult::Disconnect } pub fn get_random(&mut self, n: usize, sid: SessionId) -> Vec { self.peer_mgr.random_addrs(n, sid).into_iter().collect() } } // bitcoin: bloom.h, bloom.cpp => CRollingBloomFilter pub struct AddrKnown { max_known: usize, addrs: HashSet, addr_times: HashMap, time_addrs: BTreeMap, } impl AddrKnown { pub(crate) fn new(max_known: usize) -> AddrKnown { AddrKnown { max_known, addrs: HashSet::default(), addr_times: HashMap::default(), time_addrs: BTreeMap::default(), } } pub(crate) fn insert(&mut self, key: ConnectableAddr) { let now = Instant::now(); self.addrs.insert(key.clone()); self.time_addrs.insert(now, key.clone()); self.addr_times.insert(key, now); if self.addrs.len() > self.max_known { let first_time = { let (first_time, first_key) = self.time_addrs.iter().next().unwrap(); self.addrs.remove(&first_key); self.addr_times.remove(&first_key); *first_time }; self.time_addrs.remove(&first_time); } } pub(crate) fn contains(&self, addr: &ConnectableAddr) -> bool { self.addrs.contains(addr) } pub(crate) fn remove<'a>(&mut self, addrs: impl Iterator) { addrs.for_each(|addr| { self.addrs.remove(addr); if let Some(time) = self.addr_times.remove(addr) { self.time_addrs.remove(&time); } }) } } impl Default for AddrKnown { fn default() -> AddrKnown { AddrKnown::new(DEFAULT_MAX_KNOWN) } } #[derive(Clone, Debug, PartialOrd, Ord, Eq, PartialEq, Hash)] pub struct ConnectableAddr { host: Bytes, port: u16, } impl From<&Multiaddr> for ConnectableAddr { fn from(addr: &Multiaddr) -> ConnectableAddr { use tentacle::multiaddr::Protocol::{DNS4, DNS6, IP4, IP6, TCP, TLS}; let mut host = None; let mut port = 0u16; for proto in addr.iter() { match proto { IP4(_) | IP6(_) | DNS4(_) | DNS6(_) | TLS(_) => { let mut buf = BytesMut::new(); proto.write_to_bytes(&mut buf); host = Some(buf.freeze()); } TCP(p) => port = p, _ => (), } } let host = host.expect("impossible, unsupported host protocol"); ConnectableAddr { host, port } } } impl From for ConnectableAddr { fn from(addr: Multiaddr) -> ConnectableAddr { ConnectableAddr::from(&addr) } } impl From for ConnectableAddr { fn from(addr: SocketAddr) -> ConnectableAddr { let proto = match addr.ip() { IpAddr::V4(ipv4) => Protocol::IP4(ipv4), IpAddr::V6(ipv6) => Protocol::IP6(ipv6), }; let mut buf = BytesMut::new(); proto.write_to_bytes(&mut buf); ConnectableAddr { host: buf.freeze(), port: addr.port(), } } } #[allow(dead_code)] impl ConnectableAddr { pub fn port(&self) -> u16 { self.port } pub fn is_reachable(&self) -> bool { let (proto, _) = Protocol::from_bytes(&self.host).expect("impossible invalid host protocol"); match proto { Protocol::IP4(ipv4) => is_reachable(IpAddr::V4(ipv4)), Protocol::IP6(ipv6) => is_reachable(IpAddr::V6(ipv6)), _ => true, } } } ================================================ FILE: core/network/src/protocols/discovery/behaviour.rs ================================================ use std::collections::{HashMap, HashSet, VecDeque}; use std::pin::Pin; use std::task::{Context, Poll}; use std::time::{Duration, Instant}; use futures::channel::mpsc::{channel, Receiver, Sender}; use futures::stream::FusedStream; use futures::Stream; use log::debug; use rand::seq::SliceRandom; use tentacle::multiaddr::Multiaddr; use tentacle::utils::{is_reachable, multiaddr_to_socketaddr}; use tentacle::SessionId; use tokio::time::Interval; use crate::peer_manager::PeerManagerHandle; use super::addr::{AddressManager, ConnectableAddr, DEFAULT_MAX_KNOWN}; use super::message::{DiscoveryMessage, Nodes}; use super::substream::{RemoteAddress, Substream, SubstreamKey, SubstreamValue}; const CHECK_INTERVAL: Duration = Duration::from_secs(3); pub struct DiscoveryBehaviour { // Default: 5000 max_known: usize, // Address Manager addr_mgr: AddressManager, // TODO: Remove address manager // Peer Manager peer_mgr: PeerManagerHandle, // The Nodes not yet been yield pending_nodes: VecDeque<(SubstreamKey, SessionId, Nodes)>, // For manage those substreams substreams: HashMap, // For add new substream to Discovery substream_sender: Sender, // For add new substream to Discovery substream_receiver: Receiver, dead_keys: HashSet, dynamic_query_cycle: Option, check_interval: Option, } #[derive(Clone)] pub struct DiscoveryBehaviourHandle { pub substream_sender: Sender, pub peer_mgr: PeerManagerHandle, } impl DiscoveryBehaviourHandle { pub fn contains_session(&self, session_id: SessionId) -> bool { self.peer_mgr.contains_session(session_id) } } impl DiscoveryBehaviour { /// Query cycle means checking and synchronizing the cycle time of the /// currently connected node, default is 24 hours pub fn new( addr_mgr: AddressManager, peer_mgr: PeerManagerHandle, query_cycle: Option, ) -> DiscoveryBehaviour { let (substream_sender, substream_receiver) = channel(8); DiscoveryBehaviour { check_interval: None, max_known: DEFAULT_MAX_KNOWN, addr_mgr, peer_mgr, pending_nodes: VecDeque::default(), substreams: HashMap::default(), substream_sender, substream_receiver, dead_keys: HashSet::default(), dynamic_query_cycle: query_cycle, } } pub fn handle(&self) -> DiscoveryBehaviourHandle { DiscoveryBehaviourHandle { substream_sender: self.substream_sender.clone(), peer_mgr: self.peer_mgr.clone(), } } fn recv_substreams(&mut self, cx: &mut Context) { loop { if self.substream_receiver.is_terminated() { break; } match Pin::new(&mut self.substream_receiver) .as_mut() .poll_next(cx) { Poll::Ready(Some(substream)) => { let key = substream.key(); debug!("Received a substream: key={:?}", key); let value = SubstreamValue::new( key.direction, substream, self.max_known, self.dynamic_query_cycle, ); self.substreams.insert(key, value); } Poll::Ready(None) => unreachable!(), Poll::Pending => { debug!("DiscoveryBehaviour.substream_receiver Async::NotReady"); break; } } } } fn check_interval(&mut self, cx: &mut Context) { if self.check_interval.is_none() { self.check_interval = Some(tokio::time::interval(CHECK_INTERVAL)); } let mut interval = self.check_interval.take().unwrap(); loop { match Pin::new(&mut interval).as_mut().poll_next(cx) { Poll::Ready(Some(_)) => {} Poll::Ready(None) => { debug!("DiscoveryBehaviour check_interval poll finished"); break; } Poll::Pending => break, } } self.check_interval = Some(interval); } fn poll_substreams(&mut self, cx: &mut Context, announce_multiaddrs: &mut Vec) { #[cfg(feature = "global_ip_only")] let global_ip_only = true; #[cfg(not(feature = "global_ip_only"))] let global_ip_only = false; let announce_fn = |announce_multiaddrs: &mut Vec, addr: &Multiaddr| { if !global_ip_only || multiaddr_to_socketaddr(addr) .map(|addr| is_reachable(addr.ip())) .unwrap_or_default() { announce_multiaddrs.push(addr.clone()); } }; for (key, value) in self.substreams.iter_mut() { value.check_timer(); match value.receive_messages(cx, &mut self.addr_mgr) { Ok(Some((session_id, nodes_list))) => { for nodes in nodes_list { self.pending_nodes .push_back((key.clone(), session_id, nodes)); } } Ok(None) => { // stream close self.dead_keys.insert(key.clone()); } Err(err) => { debug!("substream {:?} receive messages error: {:?}", key, err); // remove the substream self.dead_keys.insert(key.clone()); } } match value.send_messages(cx) { Ok(_) => {} Err(err) => { debug!("substream {:?} send messages error: {:?}", key, err); // remove the substream self.dead_keys.insert(key.clone()); } } if value.announce { if let RemoteAddress::Listen(ref addr) = value.remote_addr { announce_fn(announce_multiaddrs, addr) } value.announce = false; value.last_announce = Some(Instant::now()); } } } fn remove_dead_stream(&mut self) { let mut dead_addr = Vec::default(); for key in self.dead_keys.drain() { if let Some(addr) = self.substreams.remove(&key) { dead_addr.push(ConnectableAddr::from(addr.remote_addr.into_inner())); } } if !dead_addr.is_empty() { self.substreams .values_mut() .for_each(|value| value.addr_known.remove(dead_addr.iter())); } } fn send_messages(&mut self, cx: &mut Context) { for (key, value) in self.substreams.iter_mut() { let announce_multiaddrs = value.announce_multiaddrs.split_off(0); if !announce_multiaddrs.is_empty() { let items = announce_multiaddrs .into_iter() .map(|addr| vec![addr]) .collect::>(); let announce = true; value .pending_messages .push_back(DiscoveryMessage::new_nodes(announce, items)); } match value.send_messages(cx) { Ok(_) => {} Err(err) => { debug!("substream {:?} send messages error: {:?}", key, err); // remove the substream self.dead_keys.insert(key.clone()); } } } } } impl Stream for DiscoveryBehaviour { type Item = (); fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { debug!("DiscoveryBehaviour.poll()"); self.recv_substreams(cx); self.check_interval(cx); let mut announce_multiaddrs = Vec::new(); self.poll_substreams(cx, &mut announce_multiaddrs); self.remove_dead_stream(); let mut rng = rand::thread_rng(); let mut remain_keys = self.substreams.keys().cloned().collect::>(); debug!("announce_multiaddrs: {:?}", announce_multiaddrs); for announce_multiaddr in announce_multiaddrs.into_iter() { let announce_addr = ConnectableAddr::from(announce_multiaddr.clone()); remain_keys.shuffle(&mut rng); for i in 0..2 { if let Some(key) = remain_keys.get(i) { if let Some(value) = self.substreams.get_mut(key) { debug!( ">> send {} to: {:?}, contains: {}", announce_multiaddr, value.remote_addr, value.addr_known.contains(&announce_addr) ); if value.announce_multiaddrs.len() < 10 && !value.addr_known.contains(&announce_addr) { value.announce_multiaddrs.push(announce_multiaddr.clone()); value.addr_known.insert(announce_addr.clone()); } } } } } self.send_messages(cx); match self.pending_nodes.pop_front() { Some((_key, session_id, nodes)) => { let addrs = nodes .items .into_iter() .flat_map(|node| node.addrs()) .collect::>(); self.addr_mgr.add_new_addrs(session_id, addrs); Poll::Ready(Some(())) } None => Poll::Pending, } } } ================================================ FILE: core/network/src/protocols/discovery/message.rs ================================================ use std::convert::TryFrom; use prost::{Message, Oneof}; use tentacle::multiaddr::Multiaddr; #[derive(Clone, Copy, PartialEq, Eq, Oneof)] pub enum ListenPort { #[prost(uint32, tag = "3")] On(u32), } #[derive(Clone, PartialEq, Eq, Message)] pub struct GetNodes { #[prost(uint32, tag = "1")] pub version: u32, #[prost(uint32, tag = "2")] pub count: u32, #[prost(oneof = "ListenPort", tags = "3")] pub listen_port: Option, } impl GetNodes { pub fn listen_port(&self) -> Option { match self.listen_port { Some(ListenPort::On(port)) if port <= u16::MAX as u32 => Some(port as u16), _ => None, } } } #[derive(Clone, PartialEq, Eq, Message)] pub struct Node { #[prost(bytes, repeated, tag = "1")] pub addrs: Vec>, } impl Node { pub fn addrs(self) -> Vec { let addrs = self.addrs.into_iter(); let to_multiaddrs = addrs.filter_map(|bytes| Multiaddr::try_from(bytes).ok()); to_multiaddrs.collect::>() } pub fn with_addrs(addrs: Vec) -> Self { Node { addrs: addrs.into_iter().map(|addr| addr.to_vec()).collect(), } } } #[derive(Clone, PartialEq, Eq, Message)] pub struct Nodes { #[prost(bool, tag = "1")] pub announce: bool, #[prost(message, repeated, tag = "2")] pub items: Vec, } #[derive(Clone, PartialEq, Eq, Oneof)] pub enum Payload { #[prost(message, tag = "1")] GetNodes(GetNodes), #[prost(message, tag = "2")] Nodes(Nodes), } #[derive(Clone, PartialEq, Eq, Message)] pub struct DiscoveryMessage { #[prost(oneof = "Payload", tags = "1, 2")] pub payload: Option, } impl DiscoveryMessage { pub fn new_get_nodes(version: u32, count: u32, listen_port: Option) -> Self { let listen_port = listen_port.map(|port| ListenPort::On(port as u32)); DiscoveryMessage { payload: Some(Payload::GetNodes(GetNodes { version, count, listen_port, })), } } pub fn new_nodes(announce: bool, nodes: Vec>) -> Self { DiscoveryMessage { payload: Some(Payload::Nodes(Nodes { announce, items: nodes.into_iter().map(Node::with_addrs).collect(), })), } } } impl std::fmt::Display for DiscoveryMessage { fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> { match self { DiscoveryMessage { payload: Some(Payload::GetNodes(GetNodes { version, count, .. })), } => { write!(f, "Payload::GetNodes(version:{}, count:{})", version, count)?; } DiscoveryMessage { payload: Some(Payload::Nodes(Nodes { announce, items })), } => { write!( f, "Payload::Nodes(announce:{}, items.length:{})", announce, items.len() )?; } DiscoveryMessage { payload: None } => write!(f, "Empty payload")?, } Ok(()) } } #[cfg(test)] mod tests { use super::*; use prost::Message; use protocol::BytesMut; #[test] fn discovery_message_serialize_deserialize() { let msg = DiscoveryMessage::new_get_nodes(0, 50, Some(1337)); let mut buf = BytesMut::with_capacity(msg.encoded_len()); msg.encode(&mut buf).unwrap(); let decoded_msg = DiscoveryMessage::decode(buf.freeze()).unwrap(); assert_eq!(decoded_msg, msg); } } ================================================ FILE: core/network/src/protocols/discovery/protocol.rs ================================================ use std::collections::HashMap; use futures::channel::mpsc::{channel, Sender}; use futures::stream::StreamExt; use futures::FutureExt; use log::{debug, warn}; use tentacle::context::{ProtocolContext, ProtocolContextMutRef}; use tentacle::traits::ServiceProtocol; use tentacle::SessionId; use super::behaviour::{DiscoveryBehaviour, DiscoveryBehaviourHandle}; use super::substream::Substream; pub struct DiscoveryProtocol { behaviour: Option, behaviour_handle: DiscoveryBehaviourHandle, discovery_senders: HashMap>>, } impl DiscoveryProtocol { pub fn new(behaviour: DiscoveryBehaviour) -> DiscoveryProtocol { let behaviour_handle = behaviour.handle(); DiscoveryProtocol { behaviour: Some(behaviour), behaviour_handle, discovery_senders: HashMap::default(), } } } impl ServiceProtocol for DiscoveryProtocol { fn init(&mut self, context: &mut ProtocolContext) { debug!("protocol [discovery({})]: init", context.proto_id); let discovery_task = self .behaviour .take() .map(|mut behaviour| { debug!("Start discovery future_task"); async move { loop { if behaviour.next().await.is_none() { warn!("discovery stream shutdown"); break; } } } .boxed() }) .unwrap(); if context.future_task(discovery_task).is_err() { warn!("start discovery fail"); }; } fn connected(&mut self, context: ProtocolContextMutRef, _: &str) { let session = context.session; debug!( "protocol [discovery] open on session [{}], address: [{}], type: [{:?}]", session.id, session.address, session.ty ); if !self.behaviour_handle.contains_session(session.id) { let _ = context.close_protocol(session.id, context.proto_id()); return; } let peer_id = match context.session.remote_pubkey.as_ref() { Some(pubkey) => pubkey.peer_id(), None => { log::warn!("peer connection must be encrypted"); let _ = context.disconnect(context.session.id); return; } }; crate::protocols::OpenedProtocols::register(peer_id, context.proto_id()); let (sender, receiver) = channel(8); self.discovery_senders.insert(session.id, sender); let substream = Substream::new(context, receiver); match self.behaviour_handle.substream_sender.try_send(substream) { Ok(_) => { debug!("Send substream success"); } Err(err) => { // TODO: handle channel is full (wait for poll API?) warn!("Send substream failed : {:?}", err); } } } fn disconnected(&mut self, context: ProtocolContextMutRef) { self.discovery_senders.remove(&context.session.id); debug!( "protocol [discovery] close on session [{}]", context.session.id ); } fn received(&mut self, context: ProtocolContextMutRef, data: bytes::Bytes) { debug!("[received message]: length={}", data.len()); if let Some(ref mut sender) = self.discovery_senders.get_mut(&context.session.id) { // TODO: handle channel is full (wait for poll API?) if let Err(err) = sender.try_send(data.to_vec()) { if err.is_full() { warn!("channel is full"); } else if err.is_disconnected() { warn!("channel is disconnected"); } else { warn!("other channel error: {:?}", err); } } } } } ================================================ FILE: core/network/src/protocols/discovery/substream.rs ================================================ use super::{ addr::{AddrKnown, AddressManager, ConnectableAddr, Misbehavior}, message::{DiscoveryMessage, Nodes, Payload}, }; use bytes::{BufMut, BytesMut}; use futures::{channel::mpsc::Receiver, Sink, Stream}; use log::{debug, trace, warn}; use prost::Message; use tentacle::{ context::ProtocolContextMutRef, error::SendErrorKind, multiaddr::{Multiaddr, Protocol}, service::{ServiceControl, SessionType}, utils::multiaddr_to_socketaddr, ProtocolId, SessionId, }; use tokio::io::{AsyncRead, AsyncWrite}; use tokio_util::codec::{length_delimited::LengthDelimitedCodec, Decoder, Encoder, Framed}; use std::{ collections::VecDeque, io, pin::Pin, task::{Context, Poll}, time::{Duration, Instant}, }; // FIXME: should be a more high level version number const VERSION: u32 = 0; // The maximum number of new addresses to accumulate before announcing. const MAX_ADDR_TO_SEND: u32 = 1000; // Every 24 hours send announce nodes message const ANNOUNCE_INTERVAL: u64 = 3600 * 24; const ANNOUNCE_THRESHOLD: usize = 10; // The maximum number addresses in on Nodes item const MAX_ADDRS: usize = 3; pub(crate) struct DiscoveryCodec { inner: LengthDelimitedCodec, } impl Default for DiscoveryCodec { fn default() -> DiscoveryCodec { DiscoveryCodec { inner: LengthDelimitedCodec::new(), } } } impl Decoder for DiscoveryCodec { type Error = io::Error; type Item = DiscoveryMessage; fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { match self.inner.decode(src) { Ok(Some(frame)) => { let maybe_msg = DiscoveryMessage::decode(frame.freeze()); maybe_msg.map(Some).map_err(|err| { debug!("deserialize {}", err); io::ErrorKind::InvalidData.into() }) } Ok(None) => Ok(None), Err(err) => { debug!("codec decode {}", err); Err(io::ErrorKind::InvalidData.into()) } } } } impl Encoder for DiscoveryCodec { type Error = io::Error; type Item = DiscoveryMessage; fn encode(&mut self, item: Self::Item, dst: &mut BytesMut) -> Result<(), Self::Error> { let mut buf = BytesMut::with_capacity(item.encoded_len()); item.encode(&mut buf).map_err(|err| { warn!("serialize {}", err); io::ErrorKind::InvalidData })?; self.inner.encode(buf.freeze(), dst) } } #[derive(Eq, PartialEq, Hash, Debug, Clone)] pub struct SubstreamKey { pub(crate) direction: SessionType, pub(crate) session_id: SessionId, pub(crate) proto_id: ProtocolId, } pub struct StreamHandle { data_buf: BytesMut, proto_id: ProtocolId, session_id: SessionId, pub(crate) receiver: Receiver>, pub(crate) sender: ServiceControl, } impl AsyncRead for StreamHandle { fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context, buf: &mut [u8], ) -> Poll> { for _ in 0..10 { match Pin::new(&mut self.receiver).as_mut().poll_next(cx) { Poll::Ready(Some(data)) => { self.data_buf.reserve(data.len()); self.data_buf.put(data.as_slice()); } Poll::Ready(None) => { return Poll::Ready(Err(io::ErrorKind::BrokenPipe.into())); } Poll::Pending => { break; } } } let n = std::cmp::min(buf.len(), self.data_buf.len()); if n == 0 { return Poll::Pending; } let b = self.data_buf.split_to(n); buf[..n].copy_from_slice(&b); Poll::Ready(Ok(n)) } } impl AsyncWrite for StreamHandle { fn poll_write(self: Pin<&mut Self>, _cx: &mut Context, buf: &[u8]) -> Poll> { self.sender .send_message_to(self.session_id, self.proto_id, BytesMut::from(buf).freeze()) .map(|()| buf.len()) .map_err(|e| match e { SendErrorKind::WouldBlock => io::ErrorKind::WouldBlock.into(), SendErrorKind::BrokenPipe => io::ErrorKind::BrokenPipe.into(), }) .into() } fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context) -> Poll> { Poll::Ready(Ok(())) } fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context) -> Poll> { Poll::Ready(Ok(())) } } pub struct SubstreamValue { framed_stream: Framed, // received pending messages pub(crate) pending_messages: VecDeque, pub(crate) addr_known: AddrKnown, // FIXME: Remote listen address, resolved by id protocol pub(crate) remote_addr: RemoteAddress, pub(crate) announce: bool, pub(crate) last_announce: Option, pub(crate) announce_multiaddrs: Vec, session_id: SessionId, announce_interval: Duration, received_get_nodes: bool, received_nodes: bool, remote_closed: bool, } impl SubstreamValue { pub(crate) fn new( direction: SessionType, substream: Substream, max_known: usize, query_cycle: Option, ) -> SubstreamValue { let session_id = substream.stream.session_id; let mut pending_messages = VecDeque::default(); debug!("direction: {:?}", direction); let mut addr_known = AddrKnown::new(max_known); let remote_addr = if direction.is_outbound() { pending_messages.push_back(DiscoveryMessage::new_get_nodes( VERSION, MAX_ADDR_TO_SEND, substream.listen_port, )); addr_known.insert(ConnectableAddr::from(&substream.remote_addr)); RemoteAddress::Listen(substream.remote_addr) } else { RemoteAddress::Init(substream.remote_addr) }; SubstreamValue { framed_stream: Framed::new(substream.stream, DiscoveryCodec::default()), last_announce: None, announce_interval: query_cycle .unwrap_or_else(|| Duration::from_secs(ANNOUNCE_INTERVAL)), pending_messages, addr_known, remote_addr, session_id, announce: false, announce_multiaddrs: Vec::new(), received_get_nodes: false, received_nodes: false, remote_closed: false, } } fn remote_connectable_addr(&self) -> ConnectableAddr { ConnectableAddr::from(self.remote_addr.to_inner()) } pub(crate) fn check_timer(&mut self) { if self .last_announce .map(|time| time.elapsed() > self.announce_interval) .unwrap_or(true) { debug!("announce this session: {:?}", self.session_id); self.announce = true; } } pub(crate) fn send_messages(&mut self, cx: &mut Context) -> Result<(), io::Error> { let mut sink = Pin::new(&mut self.framed_stream); while let Some(message) = self.pending_messages.pop_front() { debug!("Discovery sending message: {}", message); match sink.as_mut().poll_ready(cx)? { Poll::Pending => { self.pending_messages.push_front(message); return Ok(()); } Poll::Ready(()) => { sink.as_mut().start_send(message)?; } } } let _ = sink.as_mut().poll_flush(cx)?; Ok(()) } pub(crate) fn handle_message( &mut self, message: DiscoveryMessage, addr_mgr: &mut AddressManager, ) -> Result, io::Error> { match message { DiscoveryMessage { payload: Some(Payload::GetNodes(get_nodes)), } => { if self.received_get_nodes { // TODO: misbehavior if addr_mgr .misbehave(self.session_id, Misbehavior::DuplicateGetNodes) .is_disconnect() { // TODO: more clear error type warn!("Already received get nodes"); return Err(io::ErrorKind::Other.into()); } } else { // TODO: magic number // must get the item first, otherwise it is possible to load // the address of peer listen. let mut items = addr_mgr.get_random(2500, self.session_id); // change client random outbound port to client listen port let listen_port = get_nodes.listen_port(); debug!("listen port: {:?}", listen_port); if let Some(port) = listen_port { self.remote_addr.update_port(port); self.addr_known.insert(self.remote_connectable_addr()); // add client listen address to manager if let RemoteAddress::Listen(ref addr) = self.remote_addr { addr_mgr.add_new_addr(self.session_id, addr.clone()); } } while items.len() > 1000 { if let Some(last_item) = items.pop() { let idx = rand::random::() % 1000; items[idx] = last_item; } } let announce = false; let items = items.into_iter().map(|addr| vec![addr]).collect::>(); self.pending_messages .push_back(DiscoveryMessage::new_nodes(announce, items)); self.received_get_nodes = true; } } DiscoveryMessage { payload: Some(Payload::Nodes(nodes)), } => { for item in &nodes.items { if item.addrs.len() > MAX_ADDRS { let misbehavior = Misbehavior::TooManyAddresses(item.addrs.len()); if addr_mgr .misbehave(self.session_id, misbehavior) .is_disconnect() { // TODO: more clear error type return Err(io::ErrorKind::Other.into()); } } } if nodes.announce { if nodes.items.len() > ANNOUNCE_THRESHOLD { warn!("Nodes items more than {}", ANNOUNCE_THRESHOLD); // TODO: misbehavior let misbehavior = Misbehavior::TooManyItems { announce: nodes.announce, length: nodes.items.len(), }; if addr_mgr .misbehave(self.session_id, misbehavior) .is_disconnect() { // TODO: more clear error type return Err(io::ErrorKind::Other.into()); } } else { return Ok(Some(nodes)); } } else if self.received_nodes { warn!("already received Nodes(announce=false) message"); // TODO: misbehavior if addr_mgr .misbehave(self.session_id, Misbehavior::DuplicateFirstNodes) .is_disconnect() { // TODO: more clear error type return Err(io::ErrorKind::Other.into()); } } else if nodes.items.len() > MAX_ADDR_TO_SEND as usize { warn!( "Too many items (announce=false) length={}", nodes.items.len() ); // TODO: misbehavior let misbehavior = Misbehavior::TooManyItems { announce: nodes.announce, length: nodes.items.len(), }; if addr_mgr .misbehave(self.session_id, misbehavior) .is_disconnect() { // TODO: more clear error type return Err(io::ErrorKind::Other.into()); } } else { self.received_nodes = true; return Ok(Some(nodes)); } } DiscoveryMessage { payload: None } => { // TODO: misbehavior } } Ok(None) } pub(crate) fn receive_messages( &mut self, cx: &mut Context, addr_mgr: &mut AddressManager, ) -> Result)>, io::Error> { if self.remote_closed { return Ok(None); } let mut nodes_list = Vec::new(); loop { match Pin::new(&mut self.framed_stream).as_mut().poll_next(cx) { Poll::Ready(Some(res)) => { let message = res?; trace!("received message {}", message); if let Some(nodes) = self.handle_message(message, addr_mgr)? { // Add to known address list for node in &nodes.items { for addr in node.clone().addrs() { trace!("received address: {}", addr); self.addr_known.insert(ConnectableAddr::from(addr)); } } nodes_list.push(nodes); } } Poll::Ready(None) => { debug!("remote closed"); self.remote_closed = true; break; } Poll::Pending => { break; } } } Ok(Some((self.session_id, nodes_list))) } } pub struct Substream { pub remote_addr: Multiaddr, pub direction: SessionType, pub stream: StreamHandle, pub listen_port: Option, } impl Substream { pub fn new(context: ProtocolContextMutRef, receiver: Receiver>) -> Substream { let stream = StreamHandle { data_buf: BytesMut::default(), proto_id: context.proto_id, session_id: context.session.id, receiver, sender: context.control().clone(), }; let listen_port = if context.session.ty.is_outbound() { context .listens() .iter() .map(|address| multiaddr_to_socketaddr(address).unwrap().port()) .next() } else { None }; Substream { remote_addr: context.session.address.clone(), direction: context.session.ty, stream, listen_port, } } pub fn key(&self) -> SubstreamKey { SubstreamKey { direction: self.direction, session_id: self.stream.session_id, proto_id: self.stream.proto_id, } } } #[derive(Eq, PartialEq, Hash, Debug, Clone)] pub(crate) enum RemoteAddress { /// Inbound init remote address Init(Multiaddr), /// Outbound init remote address or Inbound listen address Listen(Multiaddr), } impl RemoteAddress { fn to_inner(&self) -> &Multiaddr { match self { RemoteAddress::Init(ref addr) | RemoteAddress::Listen(ref addr) => addr, } } pub(crate) fn into_inner(self) -> Multiaddr { match self { RemoteAddress::Init(addr) | RemoteAddress::Listen(addr) => addr, } } fn update_port(&mut self, port: u16) { if let RemoteAddress::Init(ref addr) = self { let addr = addr .into_iter() .map(|proto| { match proto { // TODO: other transport, UDP for example Protocol::TCP(_) => Protocol::TCP(port), value => value, } }) .collect(); *self = RemoteAddress::Listen(addr); } } } ================================================ FILE: core/network/src/protocols/discovery.rs ================================================ mod addr; mod behaviour; mod message; mod protocol; mod substream; use self::protocol::DiscoveryProtocol; use addr::AddressManager; use behaviour::DiscoveryBehaviour; use crate::{event::PeerManagerEvent, peer_manager::PeerManagerHandle}; use futures::channel::mpsc::UnboundedSender; use tentacle::{ builder::MetaBuilder, service::{ProtocolHandle, ProtocolMeta}, ProtocolId, }; use std::time::Duration; pub const NAME: &str = "chain_discovery"; pub const SUPPORT_VERSIONS: [&str; 1] = ["0.1"]; pub struct Discovery(DiscoveryProtocol); impl Discovery { pub fn new( peer_mgr: PeerManagerHandle, event_tx: UnboundedSender, sync_interval: Duration, ) -> Self { #[cfg(feature = "global_ip_only")] log::info!("turn on global ip only"); #[cfg(not(feature = "global_ip_only"))] log::info!("turn off global ip only"); let address_manager = AddressManager::new(peer_mgr.clone(), event_tx); let behaviour = DiscoveryBehaviour::new(address_manager, peer_mgr, Some(sync_interval)); Discovery(DiscoveryProtocol::new(behaviour)) } pub fn build_meta(self, protocol_id: ProtocolId) -> ProtocolMeta { MetaBuilder::new() .id(protocol_id) .name(name!(NAME)) .support_versions(support_versions!(SUPPORT_VERSIONS)) .service_handle(move || ProtocolHandle::Callback(Box::new(self.0))) .build() } } ================================================ FILE: core/network/src/protocols/identify/behaviour.rs ================================================ use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use futures::channel::mpsc::UnboundedSender; use tentacle::multiaddr::Multiaddr; use tentacle::secio::PeerId; use tentacle::service::SessionType; use crate::event::PeerManagerEvent; use crate::peer_manager::PeerManagerHandle; use super::common::reachable; use super::message; use super::protocol::StateContext; #[derive(Clone)] struct AddrReporter { inner: UnboundedSender, shutdown: Arc, } impl AddrReporter { pub fn new(reporter: UnboundedSender) -> Self { AddrReporter { inner: reporter, shutdown: Arc::new(AtomicBool::new(false)), } } // TODO: upstream heart-beat check pub fn report(&self, event: PeerManagerEvent) { if self.shutdown.load(Ordering::SeqCst) { return; } if self.inner.unbounded_send(event).is_err() { log::debug!("network: discovery: peer manager offline"); self.shutdown.store(true, Ordering::SeqCst); } } } /// Identify protocol pub struct IdentifyBehaviour { peer_mgr: PeerManagerHandle, addr_reporter: AddrReporter, } // Allow dead code for cfg(test) #[allow(dead_code)] impl IdentifyBehaviour { pub fn new(peer_mgr: PeerManagerHandle, event_tx: UnboundedSender) -> Self { let addr_reporter = AddrReporter::new(event_tx); IdentifyBehaviour { peer_mgr, addr_reporter, } } pub fn chain_id(&self) -> String { self.peer_mgr.chain_id().as_ref().as_hex() } pub fn local_listen_addrs(&self) -> Vec { let addrs = self.peer_mgr.listen_addrs(); let reachable_addrs = addrs.into_iter().filter(reachable); reachable_addrs.take(message::MAX_LISTEN_ADDRS).collect() } pub fn send_identity(&self, context: &StateContext) { let address_info = { let listen_addrs = self.local_listen_addrs(); let observed_addr = context.observed_addr(); message::AddressInfo::new(listen_addrs, observed_addr) }; let identity = { let msg = message::Identity::new(self.chain_id(), address_info); match msg.into_bytes() { Ok(msg) => msg, Err(err) => { log::warn!("encode identity msg failed {}", err); context.disconnect(); return; } } }; context.send_message(identity); } pub fn send_ack(&self, context: &StateContext) { let address_info = { let listen_addrs = self.local_listen_addrs(); let observed_addr = context.observed_addr(); message::AddressInfo::new(listen_addrs, observed_addr) }; let acknowledge = { let msg = message::Acknowledge::new(address_info); match msg.into_bytes() { Ok(msg) => msg, Err(err) => { log::warn!("encode acknowledge msg failed {}", err); context.disconnect(); return; } } }; context.send_message(acknowledge); } pub fn verify_remote_identity( &self, identity: &message::Identity, ) -> Result<(), super::protocol::Error> { if identity.chain_id != self.chain_id() { Err(super::protocol::Error::WrongChainId) } else { Ok(()) } } pub fn process_listens(&self, context: &StateContext, listens: Vec) { let peer_id = &context.remote_peer.id; log::debug!("listen addresses: {:?}", listens); let reachable_addrs = listens.into_iter().filter(reachable).collect::>(); let identified_addrs = PeerManagerEvent::IdentifiedAddrs { pid: peer_id.to_owned(), addrs: reachable_addrs, }; self.addr_reporter.report(identified_addrs); } pub fn process_observed(&self, context: &StateContext, observed: Multiaddr) { let peer_id = &context.remote_peer.id; let session_type = context.session_context.ty; log::debug!("observed addr {:?} from {}", observed, context.remote_peer); let unobservable = |observed| -> bool { self.add_observed_addr(peer_id, observed, session_type) .is_err() }; if reachable(&observed) && unobservable(observed.clone()) { log::warn!("unobservable {} from {}", observed, context.remote_peer); context.disconnect(); } } pub fn add_observed_addr( &self, peer: &PeerId, addr: Multiaddr, ty: SessionType, ) -> Result<(), ()> { log::debug!("add observed: {:?}, addr {:?}, ty: {:?}", peer, addr, ty); // Noop right now Ok(()) } } ================================================ FILE: core/network/src/protocols/identify/common.rs ================================================ use tentacle::{ multiaddr::Multiaddr, utils::{is_reachable, multiaddr_to_socketaddr}, }; pub fn reachable(addr: &Multiaddr) -> bool { #[cfg(feature = "global_ip_only")] let global_ip_only = true; #[cfg(not(feature = "global_ip_only"))] let global_ip_only = false; multiaddr_to_socketaddr(addr) .map(|socket_addr| !global_ip_only || is_reachable(socket_addr.ip())) .unwrap_or(false) } ================================================ FILE: core/network/src/protocols/identify/identification.rs ================================================ use std::borrow::Borrow; use std::collections::HashSet; use std::future::Future; use std::hash::{Hash, Hasher}; use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll, Waker}; use parking_lot::Mutex; type Index = usize; pub struct WaitIdentification { idx: Index, ident_status: Arc>, } impl WaitIdentification { fn new(ident_status: Arc>) -> Self { WaitIdentification { idx: usize::MAX, ident_status, } } } impl Future for WaitIdentification { type Output = Result<(), super::protocol::Error>; fn poll(mut self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll { let insert_idx = { let idx = self.idx; match &mut *self.ident_status.lock() { IdentificationStatus::Done(ret) => return Poll::Ready(ret.to_owned()), IdentificationStatus::Pending(_) if idx != usize::MAX => return Poll::Pending, IdentificationStatus::Pending(wakerset) => wakerset.insert(ctx.waker().to_owned()), } }; self.idx = insert_idx; Poll::Pending } } impl Drop for WaitIdentification { fn drop(&mut self) { if let IdentificationStatus::Pending(wakerset) = &mut *self.ident_status.lock() { wakerset.remove(self.idx); } } } pub struct Identification { status: Arc>, } impl Identification { pub(crate) fn new() -> Self { Identification { status: Default::default(), } } pub fn wait(&self) -> WaitIdentification { WaitIdentification::new(Arc::clone(&self.status)) } pub fn pass(&self) { self.done(Ok(())) } pub fn failed(&self, error: super::protocol::Error) { self.done(Err(error)) } fn fail_if_not_done(&self) { { let status = self.status.lock(); if let IdentificationStatus::Done(_) = &*status { return; } } self.failed(super::protocol::Error::WaitFutDropped) } fn done(&self, ret: Result<(), super::protocol::Error>) { let wakerset = { let mut status = self.status.lock(); if let IdentificationStatus::Pending(wakerset) = std::mem::replace(&mut *status, IdentificationStatus::Done(ret)) { wakerset } else { return; } }; wakerset.wake() } } impl Drop for Identification { fn drop(&mut self) { self.fail_if_not_done() } } struct IndexedWaker { idx: Index, waker: Waker, } impl IndexedWaker { fn wake(self) { self.waker.wake() } } impl Borrow for IndexedWaker { fn borrow(&self) -> &Index { &self.idx } } impl PartialEq for IndexedWaker { fn eq(&self, other: &IndexedWaker) -> bool { self.idx == other.idx } } impl Eq for IndexedWaker {} impl Hash for IndexedWaker { fn hash(&self, state: &mut H) { self.idx.hash(state) } } struct WakerSet { id: Index, wakers: HashSet, } impl WakerSet { fn new() -> WakerSet { WakerSet { id: 0, wakers: HashSet::new(), } } fn insert(&mut self, waker: Waker) -> Index { debug_assert!(self.id != std::usize::MAX); self.id += 1; let indexed_waker = IndexedWaker { idx: self.id, waker, }; self.wakers.insert(indexed_waker); self.id } fn remove(&mut self, idx: Index) { self.wakers.remove(&idx); } fn wake(self) { for waker in self.wakers { waker.wake() } } } enum IdentificationStatus { Pending(WakerSet), Done(Result<(), super::protocol::Error>), } impl Default for IdentificationStatus { fn default() -> Self { IdentificationStatus::Pending(WakerSet::new()) } } ================================================ FILE: core/network/src/protocols/identify/message.rs ================================================ use std::convert::TryFrom; use derive_more::Display; use prost::{EncodeError, Message}; use protocol::{Bytes, BytesMut}; use tentacle::multiaddr::Multiaddr; pub const MAX_LISTEN_ADDRS: usize = 10; #[derive(Debug, Display)] pub enum Error { #[display(fmt = "too many listen addrs")] TooManyListenAddrs, #[display(fmt = "no observed addrs")] NoObservedAddr, #[display(fmt = "no addr info")] NoAddrInfo, } pub trait AddressInfoMessage { fn validate(&self) -> Result<(), self::Error>; fn listen_addrs(&self) -> Vec; fn observed_addr(&self) -> Option; } impl AddressInfoMessage for Option { fn listen_addrs(&self) -> Vec { self.as_ref() .map(|ai| ai.listen_addrs()) .unwrap_or_else(Vec::new) } fn observed_addr(&self) -> Option { self.as_ref().map(|ai| ai.observed_addr()).flatten() } fn validate(&self) -> Result<(), self::Error> { match self.as_ref() { Some(addr_info) => addr_info.validate(), None => Err(self::Error::NoAddrInfo), } } } #[derive(Message)] pub struct AddressInfo { #[prost(bytes, repeated, tag = "1")] pub listen_addrs: Vec>, #[prost(bytes, tag = "2")] pub observed_addr: Vec, } impl AddressInfo { pub fn new(listen_addrs: Vec, observed_addr: Multiaddr) -> Self { AddressInfo { listen_addrs: listen_addrs.into_iter().map(|addr| addr.to_vec()).collect(), observed_addr: observed_addr.to_vec(), } } pub fn listen_addrs(&self) -> Vec { let addrs = self.listen_addrs.iter().cloned(); let to_multiaddrs = addrs.filter_map(|bytes| Multiaddr::try_from(bytes).ok()); to_multiaddrs.collect() } pub fn observed_addr(&self) -> Option { Multiaddr::try_from(self.observed_addr.clone()).ok() } pub fn validate(&self) -> Result<(), self::Error> { if self.listen_addrs.len() > MAX_LISTEN_ADDRS { return Err(self::Error::TooManyListenAddrs); } if self.observed_addr().is_none() { return Err(self::Error::NoObservedAddr); } Ok(()) } #[cfg(test)] pub fn mock_valid() -> Self { let listen_addr: Multiaddr = "/ip4/47.111.169.36/tcp/2000".parse().unwrap(); let observed_addr: Multiaddr = "/ip4/47.111.169.36/tcp/2001".parse().unwrap(); AddressInfo { listen_addrs: vec![listen_addr.to_vec()], observed_addr: observed_addr.to_vec(), } } #[cfg(test)] pub fn mock_invalid() -> Self { AddressInfo { listen_addrs: vec![], observed_addr: b"xxx".to_vec(), } } } #[derive(Message)] pub struct Identity { #[prost(string, tag = "1")] pub chain_id: String, #[prost(message, tag = "2")] pub addr_info: Option, } impl Identity { pub fn new(chain_id: String, addr_info: AddressInfo) -> Self { Identity { chain_id, addr_info: Some(addr_info), } } pub fn validate(&self) -> Result<(), self::Error> { self.addr_info.validate() } pub fn into_bytes(self) -> Result { let mut buf = BytesMut::with_capacity(self.encoded_len()); self.encode(&mut buf)?; Ok(buf.freeze()) } #[cfg(test)] pub fn mock_valid() -> Self { use protocol::types::Hash; Identity { chain_id: Hash::digest(Bytes::from_static(b"hello")).as_hex(), addr_info: Some(AddressInfo::mock_valid()), } } #[cfg(test)] pub fn mock_invalid() -> Self { use protocol::types::Hash; let identity = Identity { chain_id: Hash::digest(Bytes::from_static(b"hello")).as_hex(), addr_info: Some(AddressInfo::mock_invalid()), }; assert!(identity.validate().is_err()); identity } } #[derive(Message)] pub struct Acknowledge { #[prost(message, tag = "1")] pub addr_info: Option, } impl Acknowledge { pub fn new(addr_info: AddressInfo) -> Self { Acknowledge { addr_info: Some(addr_info), } } pub fn validate(&self) -> Result<(), self::Error> { self.addr_info.validate() } pub fn into_bytes(self) -> Result { let mut buf = BytesMut::with_capacity(self.encoded_len()); self.encode(&mut buf)?; Ok(buf.freeze()) } #[cfg(test)] pub fn mock_valid() -> Self { Acknowledge { addr_info: Some(AddressInfo::mock_valid()), } } #[cfg(test)] pub fn mock_invalid() -> Self { Acknowledge { addr_info: Some(AddressInfo::mock_invalid()), } } } ================================================ FILE: core/network/src/protocols/identify/protocol.rs ================================================ use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; use derive_more::Display; use futures::future::{self, AbortHandle}; use futures_timer::Delay; use lazy_static::lazy_static; use parking_lot::RwLock; use prost::Message; use protocol::Bytes; use tentacle::multiaddr::{Multiaddr, Protocol}; use tentacle::secio::PeerId; use tentacle::service::{SessionType, TargetProtocol}; use tentacle::traits::SessionProtocol; use tentacle::{ProtocolId, SessionId}; #[cfg(test)] use crate::test::mock::{ServiceControl, SessionContext}; #[cfg(not(test))] use tentacle::context::{ProtocolContextMutRef, SessionContext}; #[cfg(not(test))] use tentacle::service::ServiceControl; #[cfg(not(test))] use super::behaviour::IdentifyBehaviour; #[cfg(test)] use super::tests::MockIdentifyBehaviour; use super::identification::{Identification, WaitIdentification}; use super::message::{Acknowledge, AddressInfoMessage, Identity}; pub const DEFAULT_TIMEOUT: Duration = Duration::from_secs(8); pub const MAX_MESSAGE_SIZE: usize = 5 * 1000; // 5KB lazy_static! { // NOTE: Use peer id here because trust metric integrated test run in one process static ref PEER_IDENTIFICATION_BACKLOG: RwLock> = RwLock::new(HashMap::new()); } #[derive(Debug, Display, Clone)] pub enum Error { #[display(fmt = "wrong chain id")] WrongChainId, #[display(fmt = "timeout")] Timeout, #[display(fmt = "exceed max message size")] ExceedMaxMessageSize, #[display(fmt = "decode indentity failed")] DecodeIdentityFailed, #[display(fmt = "decode ack failed")] DecodeAckFailed, #[display(fmt = "{}", _0)] InvalidMessage(String), #[display(fmt = "wait future dropped")] WaitFutDropped, #[display(fmt = "disconnected")] Disconnected, #[display(fmt = "{}", _0)] Other(String), } // Wrap ProtocolContextMutRef for easy mock and test #[cfg(not(test))] pub struct IdentifyProtocolContext<'a>(ProtocolContextMutRef<'a>); #[cfg(test)] pub struct IdentifyProtocolContext<'a>(pub &'a crate::test::mock::ProtocolContext); #[derive(Debug, Display)] #[display(fmt = "peer {:?} addr {:?}", id, addr)] pub struct RemotePeer { pub id: PeerId, pub sid: SessionId, pub addr: Multiaddr, } pub struct NoEncryption; impl RemotePeer { pub fn from_proto_context( proto_context: &IdentifyProtocolContext, ) -> Result { match proto_context.0.session.remote_pubkey.as_ref() { None => Err(NoEncryption), Some(pubkey) => { let remote_peer = RemotePeer { id: pubkey.peer_id(), sid: proto_context.0.session.id, addr: proto_context.0.session.address.to_owned(), }; Ok(remote_peer) } } } } pub struct StateContext { pub remote_peer: Arc, pub proto_id: ProtocolId, pub service_control: ServiceControl, pub session_context: SessionContext, pub timeout_abort_handle: Option, } impl StateContext { pub fn from_proto_context( proto_context: &IdentifyProtocolContext, ) -> Result { let remote_peer = RemotePeer::from_proto_context(proto_context)?; let state_context = StateContext { remote_peer: Arc::new(remote_peer), proto_id: proto_context.0.proto_id(), service_control: proto_context.0.control().clone(), session_context: proto_context.0.session.clone(), timeout_abort_handle: None, }; Ok(state_context) } pub fn observed_addr(&self) -> Multiaddr { let remote_addr = self.session_context.address.iter(); remote_addr .filter(|proto| !matches!(proto, Protocol::P2P(_))) .collect() } pub fn send_message(&self, msg: Bytes) { if let Err(err) = self.service_control .quick_send_message_to(self.remote_peer.sid, self.proto_id, msg) { log::warn!( "internal error: quick send message to {} failed {}", self.remote_peer, err ); } } pub fn disconnect(&self) { let _ = self.service_control.disconnect(self.remote_peer.sid); } pub fn open_protocols(&self) { if let Err(err) = self .service_control .open_protocols(self.remote_peer.sid, TargetProtocol::All) { log::warn!("open protocols to peer {} failed {}", self.remote_peer, err); self.disconnect() } } pub fn set_open_protocols_timeout(&mut self, timeout: Duration) { let service_control = self.service_control.clone(); let remote_peer = Arc::clone(&self.remote_peer); tokio::spawn(async move { Delay::new(timeout).await; if crate::protocols::OpenedProtocols::is_all_opened(&remote_peer.id) { return; } log::warn!("peer {} open protocols timeout, disconnect it", remote_peer); finish_identify(&remote_peer, Err(self::Error::Timeout)); let _ = service_control.disconnect(remote_peer.sid); }); } pub fn set_timeout(&mut self, description: &'static str, timeout: Duration) { let service_control = self.service_control.clone(); let remote_peer = Arc::clone(&self.remote_peer); let (timeout, timeout_abort_handle) = future::abortable(async move { Delay::new(timeout).await; log::warn!( "{} timeout from peer {}, disconnect it", description, remote_peer, ); finish_identify(&remote_peer, Err(self::Error::Timeout)); let _ = service_control.disconnect(remote_peer.sid); }); self.timeout_abort_handle = Some(timeout_abort_handle); tokio::spawn(timeout); } pub fn cancel_timeout(&self) { if let Some(timeout) = self.timeout_abort_handle.as_ref() { timeout.abort() } } } impl Drop for StateContext { fn drop(&mut self) { // Something wrong happend, disconnect self.disconnect(); finish_identify( &self.remote_peer, Err(Error::Other("StateContext dropped".to_owned())), ); } } #[derive(Debug, Clone, PartialEq, Eq, Display)] pub enum ClientProcedure { #[display(fmt = "client wait for server identity acknowledge")] WaitAck, #[display(fmt = "client open other protocols")] OpenOtherProtocols, #[display(fmt = "server failed identification")] Failed, } #[derive(Debug, Clone, PartialEq, Eq, Display)] pub enum ServerProcedure { #[display(fmt = "server wait for client identity")] WaitIdentity, #[display(fmt = "server wait for client open protocols")] WaitOpenProtocols, // After accept session #[display(fmt = "client failed identification")] Failed, } pub enum State { SessionProtocolInited, FailedWithoutEncryption, FailedWithExceedMsgSize, ClientNegotiate { procedure: ClientProcedure, context: StateContext, }, ServerNegotiate { procedure: ServerProcedure, context: StateContext, }, } pub struct IdentifyProtocol { pub(crate) state: State, #[cfg(not(test))] behaviour: Arc, #[cfg(test)] pub(crate) behaviour: Arc, } impl IdentifyProtocol { #[cfg(not(test))] pub fn new(behaviour: Arc) -> Self { IdentifyProtocol { state: State::SessionProtocolInited, behaviour, } } #[cfg(test)] pub fn new() -> Self { IdentifyProtocol { state: State::SessionProtocolInited, behaviour: Arc::new(MockIdentifyBehaviour::new()), } } pub fn wait(peer_id: PeerId) -> WaitIdentification { let mut backlog = PEER_IDENTIFICATION_BACKLOG.write(); let identification = backlog.entry(peer_id).or_insert_with(Identification::new); identification.wait() } pub fn wait_failed(peer_id: &PeerId, error: String) { if let Some(identification) = { PEER_IDENTIFICATION_BACKLOG.write().remove(peer_id) } { identification.failed(self::Error::Other(error)) } } pub fn on_connected(&mut self, protocol_context: &IdentifyProtocolContext) { let mut state_context = match StateContext::from_proto_context(protocol_context) { Ok(ctx) => ctx, Err(_no) => { // Without peer id, there's no way to register a wait identification.No // need to clean it. log::warn!( "session from {:?} without encryption, disconnect it", protocol_context.0.session.address ); self.state = State::FailedWithoutEncryption; let _ = protocol_context.0.disconnect(protocol_context.0.session.id); return; } }; log::debug!("connected from {:?}", state_context.remote_peer); crate::protocols::OpenedProtocols::register( state_context.remote_peer.id.to_owned(), state_context.proto_id, ); match protocol_context.0.session.ty { SessionType::Inbound => { log::info!( "enter identify inbound procedure for {}", protocol_context.0.session.address ); state_context.set_timeout("wait client identity", DEFAULT_TIMEOUT); self.state = State::ServerNegotiate { procedure: ServerProcedure::WaitIdentity, context: state_context, }; } SessionType::Outbound => { log::info!( "enter identify outbound procedure for {}", protocol_context.0.session.address ); self.behaviour.send_identity(&state_context); state_context.set_timeout("wait server ack", DEFAULT_TIMEOUT); self.state = State::ClientNegotiate { procedure: ClientProcedure::WaitAck, context: state_context, }; } } } pub fn on_disconnected(&mut self, protocol_context: &IdentifyProtocolContext) { // Without peer id, there's no way to register a wait identification. No // need to clean it. let peer_id = match protocol_context.0.session.remote_pubkey.as_ref() { Some(pubkey) => pubkey.peer_id(), None => return, }; // TODO: Remove from upper level crate::protocols::OpenedProtocols::remove(&peer_id); if let Some(identification) = PEER_IDENTIFICATION_BACKLOG.write().remove(&peer_id) { identification.failed(self::Error::Disconnected); } } pub fn on_received(&mut self, protocol_context: &IdentifyProtocolContext, data: Bytes) { { if data.len() > MAX_MESSAGE_SIZE { let peer_id = match protocol_context.0.session.remote_pubkey.as_ref() { Some(pubkey) => pubkey.peer_id(), None => return, }; if let Some(identification) = PEER_IDENTIFICATION_BACKLOG.write().remove(&peer_id) { identification.failed(self::Error::ExceedMaxMessageSize); self.state = State::FailedWithExceedMsgSize; let _ = protocol_context.0.disconnect(protocol_context.0.session.id); return; } } } match &mut self.state { State::ServerNegotiate { ref mut procedure, context, } => match procedure { ServerProcedure::WaitIdentity => { let identity = match Identity::decode(data) { Ok(ident) => ident, Err(_) => { log::warn!("received invalid identity from {:?}", context.remote_peer); finish_identify( &context.remote_peer, Err(self::Error::DecodeIdentityFailed), ); *procedure = ServerProcedure::Failed; context.disconnect(); return; } }; context.cancel_timeout(); if let Err(err) = identity.validate() { finish_identify( &context.remote_peer, Err(self::Error::InvalidMessage(err.to_string())), ); *procedure = ServerProcedure::Failed; context.disconnect(); return; } if let Err(err) = self.behaviour.verify_remote_identity(&identity) { finish_identify(&context.remote_peer, Err(err)); *procedure = ServerProcedure::Failed; context.disconnect(); return; } finish_identify(&context.remote_peer, Ok(())); let listen_addrs = identity.addr_info.listen_addrs(); self.behaviour.process_listens(&context, listen_addrs); if let Some(observed_addr) = identity.addr_info.observed_addr() { self.behaviour.process_observed(&context, observed_addr); } self.behaviour.send_ack(&context); context.set_open_protocols_timeout(DEFAULT_TIMEOUT); *procedure = ServerProcedure::WaitOpenProtocols; } ServerProcedure::Failed | ServerProcedure::WaitOpenProtocols => { log::warn!( "should not received any more message from {} after acked identity", context.remote_peer ); context.disconnect(); } }, State::ClientNegotiate { ref mut procedure, context, } => match procedure { ClientProcedure::WaitAck => { let acknowledge = match Acknowledge::decode(data) { Ok(ack) => ack, Err(_) => { log::warn!("received invalid ack from {:?}", context.remote_peer); finish_identify( &context.remote_peer, Err(self::Error::DecodeAckFailed), ); *procedure = ClientProcedure::Failed; context.disconnect(); return; } }; context.cancel_timeout(); if let Err(err) = acknowledge.validate() { finish_identify( &context.remote_peer, Err(self::Error::InvalidMessage(err.to_string())), ); *procedure = ClientProcedure::Failed; context.disconnect(); return; } finish_identify(&context.remote_peer, Ok(())); let listen_addrs = acknowledge.addr_info.listen_addrs(); self.behaviour.process_listens(&context, listen_addrs); if let Some(observed_addr) = acknowledge.addr_info.observed_addr() { self.behaviour.process_observed(&context, observed_addr); } context.open_protocols(); *procedure = ClientProcedure::OpenOtherProtocols; } ClientProcedure::OpenOtherProtocols | ClientProcedure::Failed => { log::warn!( "should not received any more message from {} after open protocols", context.remote_peer ); context.disconnect(); } }, _ => { log::warn!( "should not received message from {} out of negotiate state", protocol_context.0.session.address ); let _ = protocol_context.0.disconnect(protocol_context.0.session.id); } } } } #[cfg(test)] impl SessionProtocol for IdentifyProtocol {} #[cfg(not(test))] impl SessionProtocol for IdentifyProtocol { fn connected(&mut self, protocol_context: ProtocolContextMutRef, _version: &str) { self.on_connected(&IdentifyProtocolContext(protocol_context)); } fn disconnected(&mut self, protocol_context: ProtocolContextMutRef) { self.on_disconnected(&IdentifyProtocolContext(protocol_context)); } fn received(&mut self, protocol_context: ProtocolContextMutRef, data: bytes::Bytes) { self.on_received(&IdentifyProtocolContext(protocol_context), data) } } fn finish_identify(peer: &RemotePeer, result: Result<(), self::Error>) { let identification = match { PEER_IDENTIFICATION_BACKLOG.write().remove(&peer.id) } { Some(ident) => ident, None => { log::debug!("peer {:?} identification has finished already", peer); return; } }; match result { Ok(()) => identification.pass(), Err(err) => { log::warn!("identification for peer {} failed: {}", peer, err); identification.failed(err); } } } ================================================ FILE: core/network/src/protocols/identify/tests.rs ================================================ use std::time::Duration; use futures_timer::Delay; use parking_lot::Mutex; use protocol::Bytes; use tentacle::multiaddr::Multiaddr; use tentacle::service::{SessionType, TargetProtocol}; use super::message; use super::protocol::{ ClientProcedure, Error, IdentifyProtocol, IdentifyProtocolContext, ServerProcedure, State, StateContext, MAX_MESSAGE_SIZE, }; use crate::test::mock::{ControlEvent, ProtocolContext}; const PROTOCOL_ID: usize = 2; const SESSION_ID: usize = 2; #[derive(Debug, Clone)] pub enum BehaviourEvent { SendIdentity, SendAck, ProcessListen, ProcessObserved, VerifyRemoteIdentity, } pub struct MockIdentifyBehaviour { event: Mutex>, skip_chain_id_verify: Mutex, } impl MockIdentifyBehaviour { pub fn new() -> Self { MockIdentifyBehaviour { event: Mutex::new(None), skip_chain_id_verify: Mutex::new(true), } } pub fn event(&self) -> Option { self.event.lock().clone() } pub fn send_identity(&self, _: &StateContext) { *self.event.lock() = Some(BehaviourEvent::SendIdentity) } pub fn send_ack(&self, _: &StateContext) { *self.event.lock() = Some(BehaviourEvent::SendAck) } pub fn process_listens(&self, _: &StateContext, _listen_addrs: Vec) { *self.event.lock() = Some(BehaviourEvent::ProcessListen) } pub fn process_observed(&self, _: &StateContext, _observed_addr: Multiaddr) { *self.event.lock() = Some(BehaviourEvent::ProcessObserved) } pub fn verify_remote_identity(&self, _identity: &message::Identity) -> Result<(), Error> { { *self.event.lock() = Some(BehaviourEvent::VerifyRemoteIdentity); } if *self.skip_chain_id_verify.lock() { Ok(()) } else { Err(Error::WrongChainId) } } pub fn skip_chain_id_verify(&self, result: bool) { *self.skip_chain_id_verify.lock() = result; } } #[test] fn should_reject_unencrypted_connection() { let mut identify = IdentifyProtocol::new(); let proto_context = ProtocolContext::make_no_encrypted( PROTOCOL_ID.into(), SESSION_ID.into(), SessionType::Inbound, ); identify.on_connected(&IdentifyProtocolContext(&proto_context)); match identify.state { State::FailedWithoutEncryption => (), _ => panic!("should enter failed state"), } match proto_context.control().event() { Some(ControlEvent::Disconnect { session_id }) if session_id == SESSION_ID.into() => (), _ => panic!("should disconnect"), } } #[tokio::test] async fn should_wait_client_identity_for_inbound_connection() { let mut identify = IdentifyProtocol::new(); let proto_context = ProtocolContext::make(PROTOCOL_ID.into(), SESSION_ID.into(), SessionType::Inbound); identify.on_connected(&IdentifyProtocolContext(&proto_context)); match identify.state { State::ServerNegotiate { procedure: ServerProcedure::WaitIdentity, context, } => assert!( context.timeout_abort_handle.is_some(), "should set up wait timeout" ), _ => panic!("should enter failed state"), } } #[tokio::test] async fn should_disconnect_if_wait_client_identity_timeout() { let mut identify = IdentifyProtocol::new(); let proto_context = ProtocolContext::make(PROTOCOL_ID.into(), SESSION_ID.into(), SessionType::Inbound); identify.on_connected(&IdentifyProtocolContext(&proto_context)); let mut context = match identify.state { State::ServerNegotiate { procedure: ServerProcedure::WaitIdentity, context, } => { assert!( context.timeout_abort_handle.is_some(), "should set up wait timeout" ); context } _ => panic!("should enter failed state"), }; context.set_timeout("override wait identity", Duration::from_millis(300)); Delay::new(Duration::from_millis(700)).await; match proto_context.control().event() { Some(ControlEvent::Disconnect { session_id }) if session_id == SESSION_ID.into() => (), _ => panic!("should disconnect"), } } #[tokio::test] async fn should_register_opened_protocol() { let mut identify = IdentifyProtocol::new(); let proto_context = ProtocolContext::make(PROTOCOL_ID.into(), SESSION_ID.into(), SessionType::Inbound); identify.on_connected(&IdentifyProtocolContext(&proto_context)); let peer_id = proto_context .session .remote_pubkey .as_ref() .unwrap() .peer_id(); assert!(crate::protocols::OpenedProtocols::is_open( &peer_id, &PROTOCOL_ID.into() )); } #[tokio::test] async fn should_send_identity_to_server_for_outbound_connection() { let mut identify = IdentifyProtocol::new(); let proto_context = ProtocolContext::make(PROTOCOL_ID.into(), SESSION_ID.into(), SessionType::Outbound); identify.on_connected(&IdentifyProtocolContext(&proto_context)); match identify.state { State::ClientNegotiate { procedure: ClientProcedure::WaitAck, context, } => assert!( context.timeout_abort_handle.is_some(), "should set up wait timeout" ), _ => panic!("should enter failed state"), } match identify.behaviour.event() { Some(BehaviourEvent::SendIdentity) => (), _ => panic!("should send identity"), } } #[tokio::test] async fn should_disconnect_if_wait_server_ack_timeout() { let mut identify = IdentifyProtocol::new(); let proto_context = ProtocolContext::make(PROTOCOL_ID.into(), SESSION_ID.into(), SessionType::Outbound); identify.on_connected(&IdentifyProtocolContext(&proto_context)); let mut context = match identify.state { State::ClientNegotiate { procedure: ClientProcedure::WaitAck, context, } => { assert!( context.timeout_abort_handle.is_some(), "should set up wait timeout" ); context } _ => panic!("should enter failed state"), }; match identify.behaviour.event() { Some(BehaviourEvent::SendIdentity) => (), _ => panic!("should send identity"), } context.set_timeout("override wait ack", Duration::from_millis(300)); Delay::new(Duration::from_millis(700)).await; match proto_context.control().event() { Some(ControlEvent::Disconnect { session_id }) if session_id == SESSION_ID.into() => (), _ => panic!("should disconnect"), } } #[tokio::test] async fn should_disconnect_if_exceed_max_message_size() { let mut identify = IdentifyProtocol::new(); let proto_context = ProtocolContext::make(PROTOCOL_ID.into(), SESSION_ID.into(), SessionType::Outbound); let msg = Bytes::from("a".repeat(MAX_MESSAGE_SIZE + 1)); identify.on_received(&IdentifyProtocolContext(&proto_context), msg); match proto_context.control().event() { Some(ControlEvent::Disconnect { session_id }) if session_id == SESSION_ID.into() => (), _ => panic!("should disconnect"), } } #[tokio::test] async fn should_send_ack_if_identity_is_valid_on_server_side() { let mut identify = IdentifyProtocol::new(); let proto_context = ProtocolContext::make(PROTOCOL_ID.into(), SESSION_ID.into(), SessionType::Inbound); identify.on_connected(&IdentifyProtocolContext(&proto_context)); let identity = message::Identity::mock_valid().into_bytes().unwrap(); identify.behaviour.skip_chain_id_verify(true); identify.on_received(&IdentifyProtocolContext(&proto_context), identity); match identify.state { State::ServerNegotiate { procedure: ServerProcedure::WaitOpenProtocols, context, } => assert!( context.timeout_abort_handle.is_some(), "should set up wait open protocols timeout" ), _ => panic!("should enter wait open protocols state"), } match identify.behaviour.event() { Some(BehaviourEvent::SendAck) => (), _ => panic!("should send ack"), } } #[tokio::test] async fn should_disconnect_if_client_open_protocols_timeout() { let mut identify = IdentifyProtocol::new(); let proto_context = ProtocolContext::make(PROTOCOL_ID.into(), SESSION_ID.into(), SessionType::Inbound); identify.on_connected(&IdentifyProtocolContext(&proto_context)); let identity = message::Identity::mock_valid().into_bytes().unwrap(); identify.behaviour.skip_chain_id_verify(true); identify.on_received(&IdentifyProtocolContext(&proto_context), identity); let mut context = match identify.state { State::ServerNegotiate { procedure: ServerProcedure::WaitOpenProtocols, context, } => { assert!( context.timeout_abort_handle.is_some(), "should set up wait open protocols timeout" ); context } _ => panic!("should enter wait open protocols state"), }; match identify.behaviour.event() { Some(BehaviourEvent::SendAck) => (), _ => panic!("should send ack"), } context.set_timeout("override wait open protocols", Duration::from_millis(300)); Delay::new(Duration::from_millis(700)).await; match proto_context.control().event() { Some(ControlEvent::Disconnect { session_id }) if session_id == SESSION_ID.into() => (), _ => panic!("should disconnect"), } } #[tokio::test] async fn should_disconnect_if_client_send_undecodeable_identity() { let mut identify = IdentifyProtocol::new(); let proto_context = ProtocolContext::make(PROTOCOL_ID.into(), SESSION_ID.into(), SessionType::Inbound); identify.on_connected(&IdentifyProtocolContext(&proto_context)); let msg = Bytes::from("a"); identify.on_received(&IdentifyProtocolContext(&proto_context), msg); match proto_context.control().event() { Some(ControlEvent::Disconnect { session_id }) if session_id == SESSION_ID.into() => (), _ => panic!("should disconnect"), } match identify.state { State::ServerNegotiate { procedure: ServerProcedure::Failed, .. } => (), _ => panic!("should enter failed state"), } } #[tokio::test] async fn should_disconnect_if_client_send_invalid_identity() { let mut identify = IdentifyProtocol::new(); let proto_context = ProtocolContext::make(PROTOCOL_ID.into(), SESSION_ID.into(), SessionType::Inbound); identify.on_connected(&IdentifyProtocolContext(&proto_context)); let msg = message::Identity::mock_invalid().into_bytes().unwrap(); identify.on_received(&IdentifyProtocolContext(&proto_context), msg); match proto_context.control().event() { Some(ControlEvent::Disconnect { session_id }) if session_id == SESSION_ID.into() => (), _ => panic!("should disconnect"), } match identify.state { State::ServerNegotiate { procedure: ServerProcedure::Failed, .. } => (), _ => panic!("should enter failed state"), } } #[tokio::test] async fn should_disconnect_if_client_send_different_chain_id() { let mut identify = IdentifyProtocol::new(); let proto_context = ProtocolContext::make(PROTOCOL_ID.into(), SESSION_ID.into(), SessionType::Inbound); identify.on_connected(&IdentifyProtocolContext(&proto_context)); let msg = message::Identity::mock_valid().into_bytes().unwrap(); identify.behaviour.skip_chain_id_verify(false); identify.on_received(&IdentifyProtocolContext(&proto_context), msg); match proto_context.control().event() { Some(ControlEvent::Disconnect { session_id }) if session_id == SESSION_ID.into() => (), _ => panic!("should disconnect"), } match identify.state { State::ServerNegotiate { procedure: ServerProcedure::Failed, .. } => (), _ => panic!("should enter failed state"), } } #[tokio::test] async fn should_disconnect_if_client_send_data_during_open_protocols() { let mut identify = IdentifyProtocol::new(); let proto_context = ProtocolContext::make(PROTOCOL_ID.into(), SESSION_ID.into(), SessionType::Inbound); identify.on_connected(&IdentifyProtocolContext(&proto_context)); let identity = message::Identity::mock_valid().into_bytes().unwrap(); identify.behaviour.skip_chain_id_verify(true); identify.on_received(&IdentifyProtocolContext(&proto_context), identity); match &identify.state { State::ServerNegotiate { procedure: ServerProcedure::WaitOpenProtocols, context, } => assert!( context.timeout_abort_handle.is_some(), "should set up wait open protocols timeout" ), _ => panic!("should enter wait open protocols state"), } match identify.behaviour.event() { Some(BehaviourEvent::SendAck) => (), _ => panic!("should send ack"), } identify.on_received( &IdentifyProtocolContext(&proto_context), Bytes::from_static(b"test"), ); match proto_context.control().event() { Some(ControlEvent::Disconnect { session_id }) if session_id == SESSION_ID.into() => (), _ => panic!("should disconnect"), } } #[tokio::test] async fn should_open_protocols_after_receive_valid_ack_from_server() { let mut identify = IdentifyProtocol::new(); let proto_context = ProtocolContext::make(PROTOCOL_ID.into(), SESSION_ID.into(), SessionType::Outbound); identify.on_connected(&IdentifyProtocolContext(&proto_context)); let ack = message::Acknowledge::mock_valid().into_bytes().unwrap(); identify.on_received(&IdentifyProtocolContext(&proto_context), ack); match identify.state { State::ClientNegotiate { procedure: ClientProcedure::OpenOtherProtocols, .. } => (), _ => panic!("should enter wait open protocols state"), } match proto_context.control().event() { Some(ControlEvent::OpenProtocols { session_id, target_proto, }) if session_id == SESSION_ID.into() && target_proto == TargetProtocol::All => (), _ => panic!("should open protocols"), } } #[tokio::test] async fn should_disconnect_if_server_send_undecodeable_ack() { let mut identify = IdentifyProtocol::new(); let proto_context = ProtocolContext::make(PROTOCOL_ID.into(), SESSION_ID.into(), SessionType::Outbound); identify.on_connected(&IdentifyProtocolContext(&proto_context)); identify.on_received( &IdentifyProtocolContext(&proto_context), Bytes::from_static(b"xxx"), ); match identify.state { State::ClientNegotiate { procedure: ClientProcedure::Failed, .. } => (), _ => panic!("should enter failed state"), } match proto_context.control().event() { Some(ControlEvent::Disconnect { session_id }) if session_id == SESSION_ID.into() => (), _ => panic!("should disconnect"), } } #[tokio::test] async fn should_disconnect_if_server_send_invalid_ack() { let mut identify = IdentifyProtocol::new(); let proto_context = ProtocolContext::make(PROTOCOL_ID.into(), SESSION_ID.into(), SessionType::Outbound); identify.on_connected(&IdentifyProtocolContext(&proto_context)); let ack = message::Acknowledge::mock_invalid().into_bytes().unwrap(); identify.on_received(&IdentifyProtocolContext(&proto_context), ack); match identify.state { State::ClientNegotiate { procedure: ClientProcedure::Failed, .. } => (), _ => panic!("should enter failed state"), } match proto_context.control().event() { Some(ControlEvent::Disconnect { session_id }) if session_id == SESSION_ID.into() => (), _ => panic!("should disconnect"), } } #[tokio::test] async fn should_disconnect_if_server_send_data_during_open_protocols() { let mut identify = IdentifyProtocol::new(); let proto_context = ProtocolContext::make(PROTOCOL_ID.into(), SESSION_ID.into(), SessionType::Outbound); identify.on_connected(&IdentifyProtocolContext(&proto_context)); let ack = message::Acknowledge::mock_valid().into_bytes().unwrap(); identify.on_received(&IdentifyProtocolContext(&proto_context), ack); match &identify.state { State::ClientNegotiate { procedure: ClientProcedure::OpenOtherProtocols, .. } => (), _ => panic!("should enter wait open protocols state"), } match proto_context.control().event() { Some(ControlEvent::OpenProtocols { session_id, target_proto, }) if session_id == SESSION_ID.into() && target_proto == TargetProtocol::All => (), _ => panic!("should open protocols"), } identify.on_received( &IdentifyProtocolContext(&proto_context), Bytes::from_static(b"test"), ); match proto_context.control().event() { Some(ControlEvent::Disconnect { session_id }) if session_id == SESSION_ID.into() => (), _ => panic!("should disconnect"), } } #[tokio::test] async fn should_disconnect_if_either_send_data_no_in_negotiate_procedure() { let mut identify = IdentifyProtocol::new(); let proto_context = ProtocolContext::make(PROTOCOL_ID.into(), SESSION_ID.into(), SessionType::Outbound); identify.on_received( &IdentifyProtocolContext(&proto_context), Bytes::from_static(b"test"), ); match proto_context.control().event() { Some(ControlEvent::Disconnect { session_id }) if session_id == SESSION_ID.into() => (), _ => panic!("should disconnect"), } } #[tokio::test] async fn should_wake_wait_identification_after_call_finish_identify() { let mut identify = IdentifyProtocol::new(); let proto_context = ProtocolContext::make(PROTOCOL_ID.into(), SESSION_ID.into(), SessionType::Inbound); let peer_id = proto_context .session .remote_pubkey .as_ref() .unwrap() .peer_id(); let wait_fut = IdentifyProtocol::wait(peer_id); tokio::spawn(async move { identify.on_connected(&IdentifyProtocolContext(&proto_context)); let identity = message::Identity::mock_valid().into_bytes().unwrap(); identify.behaviour.skip_chain_id_verify(true); identify.on_received(&IdentifyProtocolContext(&proto_context), identity); match identify.state { State::ServerNegotiate { procedure: ServerProcedure::WaitOpenProtocols, context, } => assert!( context.timeout_abort_handle.is_some(), "should set up wait open protocols timeout" ), _ => panic!("should enter wait open protocols state"), } match identify.behaviour.event() { Some(BehaviourEvent::SendAck) => (), _ => panic!("should send ack"), } }); assert!(wait_fut.await.is_ok(), "should be ok if pass identify"); } #[tokio::test] async fn should_pass_error_to_wait_identification_result_if_failed_identify() { let mut identify = IdentifyProtocol::new(); let proto_context = ProtocolContext::make(PROTOCOL_ID.into(), SESSION_ID.into(), SessionType::Outbound); let peer_id = proto_context .session .remote_pubkey .as_ref() .unwrap() .peer_id(); let wait_fut = IdentifyProtocol::wait(peer_id); tokio::spawn(async move { identify.on_connected(&IdentifyProtocolContext(&proto_context)); identify.on_received( &IdentifyProtocolContext(&proto_context), Bytes::from_static(b"xxx"), ); match identify.state { State::ClientNegotiate { procedure: ClientProcedure::Failed, .. } => (), _ => panic!("should enter failed state"), } match proto_context.control().event() { Some(ControlEvent::Disconnect { session_id }) if session_id == SESSION_ID.into() => (), _ => panic!("should disconnect"), } }); match wait_fut.await { Err(Error::DecodeAckFailed) => (), _ => panic!("should pass decode failed error"), } } #[tokio::test] async fn should_pass_disconnected_to_wait_identification_result_if_still_wait_identify_but_disconnected( ) { let mut identify = IdentifyProtocol::new(); let proto_context = ProtocolContext::make(PROTOCOL_ID.into(), SESSION_ID.into(), SessionType::Outbound); let peer_id = proto_context .session .remote_pubkey .as_ref() .unwrap() .peer_id(); let wait_fut = IdentifyProtocol::wait(peer_id); tokio::spawn(async move { identify.on_connected(&IdentifyProtocolContext(&proto_context)); identify.on_disconnected(&IdentifyProtocolContext(&proto_context)); }); match wait_fut.await { Err(Error::Disconnected) => (), _ => panic!("should pass disconnected error"), } } #[tokio::test] async fn should_remove_from_opened_protocols_after_disconnect() { let mut identify = IdentifyProtocol::new(); let proto_context = ProtocolContext::make(PROTOCOL_ID.into(), SESSION_ID.into(), SessionType::Outbound); let peer_id = proto_context .session .remote_pubkey .as_ref() .unwrap() .peer_id(); identify.on_connected(&IdentifyProtocolContext(&proto_context)); identify.on_disconnected(&IdentifyProtocolContext(&proto_context)); assert_eq!( crate::protocols::OpenedProtocols::is_open(&peer_id, &PROTOCOL_ID.into()), false ); } ================================================ FILE: core/network/src/protocols/identify.rs ================================================ mod behaviour; mod common; mod identification; mod message; mod protocol; #[cfg(test)] mod tests; use std::sync::Arc; use futures::channel::mpsc::UnboundedSender; use tentacle::builder::MetaBuilder; use tentacle::secio::PeerId; use tentacle::service::{ProtocolHandle, ProtocolMeta}; use tentacle::ProtocolId; use crate::event::PeerManagerEvent; use crate::peer_manager::PeerManagerHandle; use self::protocol::IdentifyProtocol; use behaviour::IdentifyBehaviour; pub use self::identification::WaitIdentification; pub use self::protocol::{Error, DEFAULT_TIMEOUT}; pub const NAME: &str = "chain_identify"; pub const SUPPORT_VERSIONS: [&str; 1] = ["0.2"]; pub struct Identify { behaviour: Arc, } impl Identify { pub fn new(peer_mgr: PeerManagerHandle, event_tx: UnboundedSender) -> Self { #[cfg(feature = "global_ip_only")] log::info!("turn on global ip only"); #[cfg(not(feature = "global_ip_only"))] log::info!("turn off global ip only"); let behaviour = Arc::new(IdentifyBehaviour::new(peer_mgr, event_tx)); Identify { behaviour } } #[cfg(not(test))] pub fn build_meta(self, protocol_id: ProtocolId) -> ProtocolMeta { let behaviour = self.behaviour; MetaBuilder::new() .id(protocol_id) .name(name!(NAME)) .support_versions(support_versions!(SUPPORT_VERSIONS)) .session_handle(move || { ProtocolHandle::Callback(Box::new(IdentifyProtocol::new(Arc::clone(&behaviour)))) }) .build() } #[cfg(test)] pub fn build_meta(self, protocol_id: ProtocolId) -> ProtocolMeta { let _ = self.behaviour; MetaBuilder::new() .id(protocol_id) .name(name!(NAME)) .support_versions(support_versions!(SUPPORT_VERSIONS)) .session_handle(move || ProtocolHandle::Callback(Box::new(IdentifyProtocol::new()))) .build() } pub fn wait_identified(peer_id: PeerId) -> WaitIdentification { IdentifyProtocol::wait(peer_id) } pub fn wait_failed(peer_id: &PeerId, error: String) { IdentifyProtocol::wait_failed(peer_id, error) } } ================================================ FILE: core/network/src/protocols/macro.rs ================================================ /// `Fn` protocol name generator #[macro_export] macro_rules! name { ($proto_name:expr) => { |id| format!("{}/{}", $proto_name, id) }; } /// Create `Vec` support versions from constant `[&str, N]` #[macro_export] macro_rules! support_versions { ($versions:expr) => { $versions.to_vec().into_iter().map(String::from).collect() }; } ================================================ FILE: core/network/src/protocols/mod.rs ================================================ #[macro_use] mod r#macro; mod core; mod discovery; mod ping; mod transmitter; pub mod identify; pub use self::core::{CoreProtocol, CoreProtocolBuilder, OpenedProtocols}; pub use transmitter::{ReceivedMessage, Recipient, Transmitter, TransmitterMessage}; ================================================ FILE: core/network/src/protocols/ping/behaviour.rs ================================================ use super::protocol::PingEvent; use crate::event::{MisbehaviorKind, PeerManagerEvent}; use futures::{ channel::mpsc::{Receiver, UnboundedSender}, Future, Stream, }; use log::debug; use std::{ pin::Pin, sync::atomic::{AtomicBool, Ordering}, task::{Context, Poll}, }; pub struct PingEventReporter { inner: UnboundedSender, mgr_shutdown: AtomicBool, } impl PingEventReporter { pub fn new(inner: UnboundedSender) -> Self { PingEventReporter { inner, mgr_shutdown: AtomicBool::new(false), } } fn is_mgr_shutdown(&self) -> bool { self.mgr_shutdown.load(Ordering::SeqCst) } fn mgr_shutdown(&self) { debug!("network: ping: peer manager shutdown"); self.mgr_shutdown.store(true, Ordering::SeqCst); } } #[derive(derive_more::Constructor)] pub struct EventTranslator { rx: Receiver, reporter: PingEventReporter, } impl Future for EventTranslator { type Output = (); fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { if self.reporter.is_mgr_shutdown() { return Poll::Ready(()); } loop { let event = match Stream::poll_next(Pin::new(&mut self.as_mut().rx), cx) { Poll::Pending => break, Poll::Ready(None) => return Poll::Ready(()), Poll::Ready(Some(event)) => event, }; let mgr_event = match event { PingEvent::Ping(ref _pid) => continue, PingEvent::Pong(ref pid, ref connected_addr, ping_time) => { let host = &connected_addr.host; common_apm::metrics::network::NETWORK_PING_HISTOGRAM_VEC .with_label_values(&[host]) .observe(ping_time.as_millis() as f64); PeerManagerEvent::PeerAlive { pid: pid.clone() } } PingEvent::Timeout(ref pid) => { let kind = MisbehaviorKind::PingTimeout; PeerManagerEvent::Misbehave { pid: pid.clone(), kind, } } PingEvent::UnexpectedError(ref pid) => { let kind = MisbehaviorKind::PingUnexpect; PeerManagerEvent::Misbehave { pid: pid.clone(), kind, } } }; if self.reporter.inner.unbounded_send(mgr_event).is_err() { self.reporter.mgr_shutdown(); return Poll::Ready(()); } } Poll::Pending } } ================================================ FILE: core/network/src/protocols/ping/message.rs ================================================ use prost::{EncodeError, Message, Oneof}; use protocol::{Bytes, BytesMut}; #[derive(Clone, Copy, PartialEq, Eq, Oneof)] pub enum PingPayload { #[prost(uint32, tag = "1")] Ping(u32), #[prost(uint32, tag = "2")] Pong(u32), } #[derive(Clone, PartialEq, Message)] pub struct PingMessage { #[prost(oneof = "PingPayload", tags = "1, 2")] pub payload: Option, } impl PingMessage { pub fn new_pong(nonce: u32) -> Self { PingMessage { payload: Some(PingPayload::Pong(nonce)), } } pub fn new_ping(nonce: u32) -> Self { PingMessage { payload: Some(PingPayload::Ping(nonce)), } } pub fn into_bytes(self) -> Result { let mut buf = BytesMut::with_capacity(self.encoded_len()); self.encode(&mut buf)?; Ok(buf.freeze()) } } ================================================ FILE: core/network/src/protocols/ping/protocol.rs ================================================ use super::message::{PingMessage, PingPayload}; use futures::channel::mpsc::Sender; use log::{debug, error, warn}; use prost::Message; use tentacle::{ context::{ProtocolContext, ProtocolContextMutRef}, secio::PeerId, service::TargetSession, traits::ServiceProtocol, SessionId, }; use crate::common::ConnectedAddr; use std::{ collections::HashMap, str, time::{Duration, SystemTime, UNIX_EPOCH}, }; const SEND_PING_TOKEN: u64 = 0; const CHECK_TIMEOUT_TOKEN: u64 = 1; /// Ping protocol events #[derive(Debug)] pub enum PingEvent { /// Peer send ping to us. Ping(PeerId), /// Peer send pong to us. Pong(PeerId, ConnectedAddr, Duration), /// Peer is timeout. Timeout(PeerId), /// Peer cause a unexpected error. UnexpectedError(PeerId), } /// PingStatus of a peer #[derive(Clone, Debug)] struct PingStatus { /// Are we currently pinging this peer? processing: bool, /// The time we last send ping to this peer. last_ping: SystemTime, peer_id: PeerId, } impl PingStatus { /// A meaningless value, peer must send a pong has same nonce to respond a /// ping. fn nonce(&self) -> u32 { self.last_ping .duration_since(UNIX_EPOCH) .map(|dur| dur.as_secs()) .unwrap_or(0) as u32 } /// Time duration since we last send ping. fn elapsed(&self) -> Duration { self.last_ping .elapsed() .unwrap_or_else(|_| Duration::from_secs(0)) } } /// Ping protocol handler. /// /// The interval means that we send ping to peers. /// The timeout means that consider peer is timeout if during a timeout we still /// have not received pong from a peer pub struct PingProtocol { interval: Duration, timeout: Duration, connected_session_ids: HashMap, event_sender: Sender, } impl PingProtocol { pub fn new( interval: Duration, timeout: Duration, event_sender: Sender, ) -> PingProtocol { PingProtocol { interval, timeout, connected_session_ids: Default::default(), event_sender, } } pub fn send_event(&mut self, event: PingEvent) { if let Err(err) = self.event_sender.try_send(event) { error!("send ping event error: {}", err); } } } impl ServiceProtocol for PingProtocol { fn init(&mut self, context: &mut ProtocolContext) { // send ping to peers periodically let proto_id = context.proto_id; if context .set_service_notify(proto_id, self.interval, SEND_PING_TOKEN) .is_err() { warn!("start ping fail"); } if context .set_service_notify(proto_id, self.timeout, CHECK_TIMEOUT_TOKEN) .is_err() { warn!("start ping fail"); } } fn connected(&mut self, context: ProtocolContextMutRef, version: &str) { let session = context.session; match session.remote_pubkey { Some(ref pubkey) => { let peer_id = pubkey.peer_id(); self.connected_session_ids .entry(session.id) .or_insert_with(|| PingStatus { last_ping: SystemTime::now(), processing: false, peer_id: peer_id.clone(), }); debug!( "proto id [{}] open on session [{}], address: [{}], type: [{:?}], version: {}", context.proto_id, session.id, session.address, session.ty, version ); debug!("connected sessions are: {:?}", self.connected_session_ids); crate::protocols::OpenedProtocols::register(peer_id, context.proto_id); } None => { if context.disconnect(session.id).is_err() { debug!("disconnect fail"); } } } } fn disconnected(&mut self, context: ProtocolContextMutRef) { let session = context.session; self.connected_session_ids.remove(&session.id); debug!( "proto id [{}] close on session [{}]", context.proto_id, session.id ); } fn received(&mut self, context: ProtocolContextMutRef, data: bytes::Bytes) { let session = context.session; if let Some(peer_id) = self .connected_session_ids .get(&session.id) .map(|ps| ps.peer_id.clone()) { match PingMessage::decode(data) { Err(err) => { warn!("decode message {}", err); self.send_event(PingEvent::UnexpectedError(peer_id)) } Ok(PingMessage { payload: None }) => { self.send_event(PingEvent::UnexpectedError(peer_id)) } Ok(PingMessage { payload: Some(pld) }) => match pld { PingPayload::Ping(nonce) => { let pong = match PingMessage::new_pong(nonce).into_bytes() { Ok(p) => p, Err(err) => { warn!("encode pong {}", err); return; } }; if let Err(err) = context.send_message(pong) { debug!("send message {}", err); } self.send_event(PingEvent::Ping(peer_id)); } PingPayload::Pong(nonce) => { // check pong if self .connected_session_ids .get(&session.id) .map(|ps| (ps.processing, ps.nonce())) == Some((true, nonce)) { let ping_time = match self.connected_session_ids.get_mut(&session.id) { Some(ps) => { ps.processing = false; ps.elapsed() } None => return, }; let connected_addr = ConnectedAddr::from(&context.session.address); self.send_event(PingEvent::Pong(peer_id, connected_addr, ping_time)); } else { // ignore if nonce is incorrect self.send_event(PingEvent::UnexpectedError(peer_id)); } } }, } } } fn notify(&mut self, context: &mut ProtocolContext, token: u64) { match token { SEND_PING_TOKEN => { debug!("proto [{}] start ping peers", context.proto_id); let now = SystemTime::now(); let peers: Vec<(SessionId, u32)> = self .connected_session_ids .iter_mut() .filter_map(|(session_id, ps)| { if ps.processing { None } else { ps.processing = true; ps.last_ping = now; Some((*session_id, ps.nonce())) } }) .collect(); if !peers.is_empty() { let ping = match PingMessage::new_ping(peers[0].1).into_bytes() { Ok(p) => p, Err(err) => { warn!("encode ping {}", err); return; } }; let peer_ids: Vec = peers .into_iter() .map(|(session_id, _)| session_id) .collect(); let proto_id = context.proto_id; let target = TargetSession::Multi(peer_ids); if let Err(err) = context.filter_broadcast(target, proto_id, ping) { debug!("send message {}", err); } } } CHECK_TIMEOUT_TOKEN => { debug!("proto [{}] check ping timeout", context.proto_id); let timeout = self.timeout; for peer_id in self .connected_session_ids .values() .filter(|ps| ps.processing && ps.elapsed() >= timeout) .map(|ps| ps.peer_id.clone()) .collect::>() { self.send_event(PingEvent::Timeout(peer_id)); } } _ => panic!("unknown token {}", token), } } } ================================================ FILE: core/network/src/protocols/ping.rs ================================================ mod behaviour; mod message; mod protocol; use self::protocol::PingProtocol; use behaviour::{EventTranslator, PingEventReporter}; use crate::event::PeerManagerEvent; use futures::channel::mpsc::{self, UnboundedSender}; use tentacle::{ builder::MetaBuilder, service::{ProtocolHandle, ProtocolMeta}, ProtocolId, }; use std::time::Duration; pub const NAME: &str = "chain_ping"; pub const SUPPORT_VERSIONS: [&str; 1] = ["0.1"]; pub struct Ping(PingProtocol); impl Ping { pub fn new( interval: Duration, timeout: Duration, sender: UnboundedSender, ) -> Self { let reporter = PingEventReporter::new(sender); let (tx, rx) = mpsc::channel(1000); let translator = EventTranslator::new(rx, reporter); tokio::spawn(translator); Ping(PingProtocol::new(interval, timeout, tx)) } pub fn build_meta(self, protocol_id: ProtocolId) -> ProtocolMeta { MetaBuilder::new() .id(protocol_id) .name(name!(NAME)) .support_versions(support_versions!(SUPPORT_VERSIONS)) .service_handle(move || ProtocolHandle::Callback(Box::new(self.0))) .build() } } ================================================ FILE: core/network/src/protocols/transmitter/behaviour.rs ================================================ use std::future::Future; use std::pin::Pin; use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::Arc; use std::task::{Context, Poll}; use arc_swap::ArcSwapOption; use futures::channel::mpsc::{self, UnboundedReceiver, UnboundedSender}; use futures::channel::oneshot; use futures::stream::Stream; use protocol::traits::Priority; use protocol::Bytes; use tentacle::error::SendErrorKind; use tentacle::secio::PeerId; use tentacle::service::TargetSession; use tentacle::SessionId; use super::message::{Recipient, SeqChunkMessage, TransmitterMessage}; use super::MAX_CHUNK_SIZE; use crate::connection::{ConnectionServiceControl, ProtocolMessage}; use crate::error::{ErrorKind, NetworkError}; use crate::event::PeerManagerEvent; use crate::peer_manager::SharedSessions; use crate::protocols::core::TRANSMITTER_PROTOCOL_ID; use crate::traits::{NetworkContext, SharedSessionBook}; // TODO: Refactor connection service, decouple protocol and service // initialization. #[derive(Clone)] pub struct TransmitterBehaviour { pending_sending_tx: ArcSwapOption>, } impl TransmitterBehaviour { pub fn new() -> Self { let pending_sending_tx = ArcSwapOption::from(None); TransmitterBehaviour { pending_sending_tx } } pub fn init( &self, conn_ctrl: ConnectionServiceControl, peers_serv: UnboundedSender, sessions: SharedSessions, ) { let (pending_sending_tx, pending_sending_rx) = mpsc::unbounded(); let background_sending = BackgroundSending::new(conn_ctrl, peers_serv, sessions, pending_sending_rx); tokio::spawn(background_sending); self.pending_sending_tx .store(Some(Arc::new(pending_sending_tx))) } pub fn send(&self, msg: TransmitterMessage) -> impl Future> { let (tx, rx) = oneshot::channel(); let pending_sending = PendingSending { msg, tx }; let tx_guard = self.pending_sending_tx.load(); async move { match tx_guard.as_ref() { Some(tx) => { if let Err(e) = tx.unbounded_send(pending_sending) { log::error!("pending sending tx dropped"); return Err(NetworkError::Internal(Box::new(e))); } } None => { log::error!("transmitter behaviour isn't inited"); return Err(NetworkError::Internal(Box::new(ErrorKind::Internal( "transmitter behaviour isn't inited".to_owned(), )))); } } match rx.await { Ok(Err(e)) => Err(NetworkError::Internal(Box::new(e))), Err(e) => Err(NetworkError::Internal(Box::new(e))), Ok(Ok(_)) => Ok(()), } } } } struct PendingSending { msg: TransmitterMessage, tx: oneshot::Sender>, } struct BackgroundSending { conn_ctrl: ConnectionServiceControl, peers_serv: UnboundedSender, sessions: SharedSessions, data_seq: AtomicU64, pending_sending_rx: UnboundedReceiver, } impl BackgroundSending { pub fn new( conn_ctrl: ConnectionServiceControl, peers_serv: UnboundedSender, sessions: SharedSessions, pending_sending_rx: UnboundedReceiver, ) -> Self { BackgroundSending { conn_ctrl, peers_serv, sessions, data_seq: AtomicU64::new(0), pending_sending_rx, } } pub fn context(&self) -> SendingContext<'_> { SendingContext { conn_ctrl: &self.conn_ctrl, peers_serv: &self.peers_serv, sessions: &self.sessions, data_seq: &self.data_seq, } } } impl Future for BackgroundSending { type Output = (); fn poll(mut self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll { loop { let pending_sending_rx = &mut self.as_mut().pending_sending_rx; futures::pin_mut!(pending_sending_rx); match futures::ready!(pending_sending_rx.poll_next(ctx)) { Some(PendingSending { msg, tx }) => { if let Err(e) = tx.send(self.context().send(msg)) { log::warn!("pending sending result {:?}", e); } } None => { log::error!("transmitter pending tx dropped"); return Poll::Ready(()); } } } } } type MessageContext = protocol::traits::Context; struct SendingContext<'a> { conn_ctrl: &'a ConnectionServiceControl, peers_serv: &'a UnboundedSender, sessions: &'a SharedSessions, data_seq: &'a AtomicU64, } impl<'a> SendingContext<'a> { fn send(&self, msg: TransmitterMessage) -> Result<(), NetworkError> { let TransmitterMessage { priority, data, .. } = msg; match msg.recipient { Recipient::Session(target) => self.send_to_sessions(target, data, priority, msg.ctx), Recipient::PeerId(peer_ids) => self.send_to_peers(peer_ids, data, priority, msg.ctx), } } fn send_to_sessions( &self, target: TargetSession, mut data: Bytes, priority: Priority, msg_ctx: MessageContext, ) -> Result<(), NetworkError> { let (target, opt_blocked) = match self.filter_blocked(target) { (None, None) => unreachable!(), (None, blocked) => { return Err(NetworkError::Send { blocked, other: None, }); } (Some(tar), opt_blocked) => (tar, opt_blocked), }; let url = msg_ctx.url().unwrap_or(""); let data_size = match &target { TargetSession::Single(_) => data.len(), TargetSession::Multi(sessions) => data.len().saturating_mul(sessions.len()), TargetSession::All => data.len().saturating_mul(self.sessions.len()), }; common_apm::metrics::network::NETWORK_MESSAGE_SIZE_COUNT_VEC .with_label_values(&["send", url]) .inc_by(data_size as i64); let seq = self.data_seq.fetch_add(1, Ordering::SeqCst); log::debug!("seq {} data size {}", seq, data.len()); if data.len() < MAX_CHUNK_SIZE { let internal_msg = SeqChunkMessage { seq, eof: true, data, }; let proto_msg = ProtocolMessage { protocol_id: TRANSMITTER_PROTOCOL_ID.into(), target, data: internal_msg.encode(), priority, }; let ret = self.conn_ctrl.send(proto_msg).map_err(|err| match &err { SendErrorKind::BrokenPipe => NetworkError::Shutdown, SendErrorKind::WouldBlock => NetworkError::Busy, }); if ret.is_err() || opt_blocked.is_some() { let other = ret.err(); return Err(NetworkError::Send { blocked: opt_blocked, other: other.map(NetworkError::boxed), }); } return Ok(()); } while !data.is_empty() { if data.len() > MAX_CHUNK_SIZE { let chunk = data.split_to(MAX_CHUNK_SIZE); let internal_msg = SeqChunkMessage { seq, eof: false, data: chunk, }; let proto_msg = ProtocolMessage { protocol_id: TRANSMITTER_PROTOCOL_ID.into(), target: target.clone(), data: internal_msg.encode(), priority, }; let ret = self.conn_ctrl.send(proto_msg).map_err(|err| match &err { SendErrorKind::BrokenPipe => NetworkError::Shutdown, SendErrorKind::WouldBlock => NetworkError::Busy, }); if ret.is_err() { let other = ret.err(); return Err(NetworkError::Send { blocked: opt_blocked, other: other.map(NetworkError::boxed), }); } } else { let last_data = std::mem::replace(&mut data, Bytes::new()); let internal_msg = SeqChunkMessage { seq, eof: true, data: last_data, }; let proto_msg = ProtocolMessage { protocol_id: TRANSMITTER_PROTOCOL_ID.into(), target: target.clone(), data: internal_msg.encode(), priority, }; let ret = self.conn_ctrl.send(proto_msg).map_err(|err| match &err { SendErrorKind::BrokenPipe => NetworkError::Shutdown, SendErrorKind::WouldBlock => NetworkError::Busy, }); if ret.is_err() || opt_blocked.is_some() { let other = ret.err(); return Err(NetworkError::Send { blocked: opt_blocked, other: other.map(NetworkError::boxed), }); } } } Ok(()) } fn send_to_peers( &self, peer_ids: Vec, data: Bytes, priority: Priority, msg_ctx: MessageContext, ) -> Result<(), NetworkError> { let (connected, unconnected) = self.sessions.peers(peer_ids); let send_ret = self.send_to_sessions(TargetSession::Multi(connected), data, priority, msg_ctx); if unconnected.is_empty() { return send_ret; } let connect_peers = PeerManagerEvent::ConnectPeersNow { pids: unconnected.clone(), }; if self.peers_serv.unbounded_send(connect_peers).is_err() { log::error!("network: peer manager service exit"); } if send_ret.is_err() || !unconnected.is_empty() { let other = send_ret.err().map(NetworkError::boxed); let unconnected = if unconnected.is_empty() { None } else { Some(unconnected) }; return Err(NetworkError::MultiCast { unconnected, other }); } Ok(()) } fn filter_blocked( &self, target: TargetSession, ) -> (Option, Option>) { self.sessions.refresh_blocked(); let all_blocked = self.sessions.all_blocked(); if all_blocked.is_empty() { return (Some(target), None); } match target { TargetSession::Single(sid) => { if all_blocked.contains(&sid) { (None, Some(vec![sid])) } else { (Some(TargetSession::Single(sid)), None) } } TargetSession::Multi(sids) => { let (sendable, blocked): (Vec, Vec) = sids.into_iter().partition(|sid| !all_blocked.contains(sid)); if sendable.is_empty() && blocked.is_empty() { unreachable!() } else if sendable.is_empty() { (None, Some(blocked)) } else if blocked.is_empty() { (Some(TargetSession::Multi(sendable)), None) } else { (Some(TargetSession::Multi(sendable)), Some(blocked)) } } TargetSession::All => { let sendable = self.sessions.all_sendable(); (Some(TargetSession::Multi(sendable)), Some(all_blocked)) } } } } ================================================ FILE: core/network/src/protocols/transmitter/message.rs ================================================ use bytes::{Buf, BufMut}; use protocol::traits::{Context, Priority}; use protocol::{Bytes, BytesMut}; use tentacle::secio::PeerId; use tentacle::service::TargetSession; use tentacle::SessionId; pub enum Recipient { Session(TargetSession), PeerId(Vec), } pub struct TransmitterMessage { pub recipient: Recipient, pub priority: Priority, pub data: Bytes, pub ctx: Context, // For metric } pub struct ReceivedMessage { pub session_id: SessionId, pub peer_id: PeerId, pub data: Bytes, } pub(crate) struct SeqChunkMessage { pub seq: u64, pub eof: bool, pub data: Bytes, } impl SeqChunkMessage { pub fn encode(self) -> Bytes { let eof = if self.eof { 1u8 } else { 0u8 }; let mut buf = BytesMut::with_capacity(9 + self.data.len()); buf.put_u64(self.seq); buf.put_u8(eof); buf.extend_from_slice(self.data.as_ref()); buf.freeze() } // Note: already check data size in protocol received. pub fn decode(mut bytes: Bytes) -> Self { let data = bytes.split_off(9); let seq = bytes.get_u64(); let eof = bytes.get_u8() == 1; SeqChunkMessage { seq, eof, data } } } #[cfg(test)] mod tests { use super::SeqChunkMessage; use protocol::Bytes; #[test] fn test_internal_message_codec() { let data = b"hello muta"; let chunk = SeqChunkMessage { seq: 1u64, eof: false, data: Bytes::from_static(data), }; let chunk = SeqChunkMessage::decode(chunk.encode()); assert_eq!(chunk.data, Bytes::from_static(data)); assert_eq!(chunk.eof, false); } } ================================================ FILE: core/network/src/protocols/transmitter/protocol.rs ================================================ use std::time::Instant; use protocol::Bytes; use tentacle::context::ProtocolContextMutRef; use tentacle::traits::SessionProtocol; use crate::compression::Snappy; use crate::peer_manager::PeerManagerHandle; use crate::reactor::{MessageRouter, RemotePeer}; use super::message::{ReceivedMessage, SeqChunkMessage}; use super::{DATA_SEQ_TIMEOUT, MAX_CHUNK_SIZE}; pub struct TransmitterProtocol { router: MessageRouter, peer_mgr: PeerManagerHandle, data_buf: Vec, current_data_seq: u64, first_seq_bytes_at: Instant, } impl TransmitterProtocol { pub fn new(router: MessageRouter, peer_mgr: PeerManagerHandle) -> Self { TransmitterProtocol { router, peer_mgr, data_buf: Vec::new(), current_data_seq: 0, first_seq_bytes_at: Instant::now(), } } } impl SessionProtocol for TransmitterProtocol { fn connected(&mut self, context: ProtocolContextMutRef, _version: &str) { if !self.peer_mgr.contains_session(context.session.id) { let _ = context.close_protocol(context.session.id, context.proto_id()); return; } let peer_id = match context.session.remote_pubkey.as_ref() { Some(pubkey) => pubkey.peer_id(), None => { log::warn!("peer connection must be encrypted"); let _ = context.disconnect(context.session.id); return; } }; crate::protocols::OpenedProtocols::register(peer_id, context.proto_id()); } fn received(&mut self, ctx: ProtocolContextMutRef, data: Bytes) { let peer_id = match ctx.session.remote_pubkey.as_ref() { Some(pk) => pk.peer_id(), None => { // Dont care result here, connection/keeper will also handle this. let _ = ctx.disconnect(ctx.session.id); return; } }; let session_id = ctx.session.id; // Seq u64 takes 8 bytes, and eof bool take 1 byte, so a valid data length // must be bigger or equal than 10. if data.len() < 10 { log::warn!("session {} data size < 10, drop it", session_id); return; } let SeqChunkMessage { seq, eof, data } = SeqChunkMessage::decode(data); log::debug!("recived seq {} eof {} data size {}", seq, eof, data.len()); if data.len() > MAX_CHUNK_SIZE { log::warn!( "session {} data size > {}, drop it", session_id, MAX_CHUNK_SIZE ); return; } if seq == self.current_data_seq { if self.first_seq_bytes_at.elapsed() > DATA_SEQ_TIMEOUT { log::warn!( "session {} data seq {} timeout, drop it", session_id, self.current_data_seq ); self.data_buf.clear(); return; } self.data_buf.extend(data.as_ref()); log::debug!("data buf size {}", self.data_buf.len()); } else { log::debug!("new data seq {}", seq); self.current_data_seq = seq; self.data_buf.clear(); self.data_buf.extend(data.as_ref()); self.data_buf.shrink_to_fit(); self.first_seq_bytes_at = Instant::now(); } if !eof { return; } let data = std::mem::replace(&mut self.data_buf, Vec::new()); log::debug!("final seq {} data size {}", seq, data.len()); let remote_peer = match RemotePeer::from_proto_context(&ctx) { Ok(peer) => peer, Err(_err) => { log::warn!("received data from unencrypted peer, impossible, drop it"); return; } }; let recv_msg = ReceivedMessage { session_id, peer_id, data: Bytes::from(data), }; let host = remote_peer.connected_addr.host.to_owned(); let route_fut = self.router.route_message(remote_peer.clone(), recv_msg); tokio::spawn(async move { common_apm::metrics::network::NETWORK_RECEIVED_MESSAGE_IN_PROCESSING_GUAGE.inc(); common_apm::metrics::network::NETWORK_RECEIVED_IP_MESSAGE_IN_PROCESSING_GUAGE_VEC .with_label_values(&[&host]) .inc(); if let Err(err) = route_fut.await { log::warn!("route message from {} failed: {}", remote_peer, err); } common_apm::metrics::network::NETWORK_RECEIVED_MESSAGE_IN_PROCESSING_GUAGE.dec(); common_apm::metrics::network::NETWORK_RECEIVED_IP_MESSAGE_IN_PROCESSING_GUAGE_VEC .with_label_values(&[&host]) .dec(); }); } } ================================================ FILE: core/network/src/protocols/transmitter.rs ================================================ mod behaviour; mod message; mod protocol; use std::time::Duration; use tentacle::builder::MetaBuilder; use tentacle::service::{ProtocolHandle, ProtocolMeta}; use tentacle::ProtocolId; use crate::compression::Snappy; use crate::peer_manager::PeerManagerHandle; use crate::reactor::MessageRouter; use crate::traits::Compression; use self::behaviour::TransmitterBehaviour; use self::protocol::TransmitterProtocol; pub use message::{ReceivedMessage, Recipient, TransmitterMessage}; pub const NAME: &str = "chain_transmitter"; pub const SUPPORT_VERSIONS: [&str; 1] = ["0.3"]; pub const DATA_SEQ_TIMEOUT: Duration = Duration::from_secs(60); pub const MAX_CHUNK_SIZE: usize = 4 * 1000 * 1000; // 4MB #[derive(Clone)] pub struct Transmitter { pub(crate) router: MessageRouter, pub(crate) behaviour: TransmitterBehaviour, peer_mgr: PeerManagerHandle, } impl Transmitter { pub fn new(router: MessageRouter, peer_mgr: PeerManagerHandle) -> Self { let behaviour = TransmitterBehaviour::new(); Transmitter { router, behaviour, peer_mgr, } } pub fn build_meta(self, protocol_id: ProtocolId) -> ProtocolMeta { MetaBuilder::new() .id(protocol_id) .name(name!(NAME)) .support_versions(support_versions!(SUPPORT_VERSIONS)) .session_handle(move || { let proto = TransmitterProtocol::new(self.router.clone(), self.peer_mgr.clone()); ProtocolHandle::Callback(Box::new(proto)) }) .build() } pub fn compressor(&self) -> impl Compression { Snappy } } ================================================ FILE: core/network/src/reactor/mod.rs ================================================ mod router; mod rpc_map; use std::convert::TryFrom; use std::marker::PhantomData; use async_trait::async_trait; use protocol::traits::{Context, MessageCodec, MessageHandler, TrustFeedback}; use protocol::{Bytes, ProtocolResult}; use crate::endpoint::{Endpoint, EndpointScheme, RpcEndpoint}; use crate::message::NetworkMessage; use crate::rpc::RpcResponse; use crate::traits::NetworkContext; pub(crate) use router::{MessageRouter, RemotePeer, RouterContext}; #[async_trait] pub trait Reactor: Send + Sync { async fn react( &self, context: RouterContext, endpoint: Endpoint, network_message: NetworkMessage, ) -> ProtocolResult<()>; } pub struct MessageReactor> { msg_handler: H, } pub fn generate>(h: H) -> MessageReactor { MessageReactor { msg_handler: h } } pub fn rpc_resp() -> MessageReactor> { MessageReactor { msg_handler: NoopHandler::new(), } } #[async_trait] impl> Reactor for MessageReactor { async fn react( &self, context: RouterContext, endpoint: Endpoint, network_message: NetworkMessage, ) -> ProtocolResult<()> { let ctx = Context::new() .set_session_id(context.remote_peer.session_id) .set_remote_peer_id(context.remote_peer.peer_id.clone()) .set_remote_connected_addr(context.remote_peer.connected_addr.clone()); let mut ctx = match (network_message.trace_id(), network_message.span_id()) { (Some(trace_id), Some(span_id)) => { let span_state = common_apm::muta_apm::MutaTracer::new_state(trace_id, span_id); common_apm::muta_apm::MutaTracer::inject_span_state(ctx, span_state) } _ => ctx, }; let session_id = context.remote_peer.session_id; let raw_context = Bytes::from(network_message.content); let feedback = match endpoint.scheme() { EndpointScheme::Gossip => { let content = M::decode(raw_context)?; self.msg_handler.process(ctx, content).await } EndpointScheme::RpcCall => { let content = M::decode(raw_context)?; let rpc_endpoint = RpcEndpoint::try_from(endpoint)?; let ctx = ctx.set_rpc_id(rpc_endpoint.rpc_id().value()); self.msg_handler.process(ctx, content).await } EndpointScheme::RpcResponse => { let content = RpcResponse::decode(raw_context)?; let rpc_endpoint = RpcEndpoint::try_from(endpoint)?; let rpc_id = rpc_endpoint.rpc_id().value(); if !context.rpc_map.contains(session_id, rpc_id) { let full_url = rpc_endpoint.endpoint().full_url(); log::warn!( "rpc to {} from {} not found, maybe timeout", full_url, context.remote_peer ); return Ok(()); } let rpc_id = rpc_endpoint.rpc_id().value(); let resp_tx = context.rpc_map.take::(session_id, rpc_id)?; if resp_tx.send(content).is_err() { let end = rpc_endpoint.endpoint().full_url(); log::warn!("network: reactor: {} rpc dropped on {}", session_id, end); } return Ok(()); } }; context.report_feedback(feedback); Ok(()) } } #[derive(Debug)] pub struct NoopHandler { pin_m: PhantomData M>, } impl NoopHandler where M: MessageCodec, { pub fn new() -> Self { NoopHandler { pin_m: PhantomData } } } #[async_trait] impl MessageHandler for NoopHandler where M: MessageCodec, { type Message = M; async fn process(&self, _: Context, _: Self::Message) -> TrustFeedback { TrustFeedback::Neutral } } ================================================ FILE: core/network/src/reactor/router.rs ================================================ use std::collections::HashMap; use std::future::Future; use std::sync::Arc; use derive_more::Display; use futures::channel::mpsc::UnboundedSender; use parking_lot::RwLock; use protocol::traits::{MessageCodec, MessageHandler, TrustFeedback}; use protocol::ProtocolResult; use tentacle::context::ProtocolContextMutRef; use tentacle::secio::PeerId; use tentacle::SessionId; use crate::common::ConnectedAddr; use crate::endpoint::Endpoint; use crate::error::{ErrorKind, NetworkError}; use crate::event::PeerManagerEvent; use crate::message::NetworkMessage; use crate::protocols::ReceivedMessage; use crate::traits::Compression; use super::rpc_map::RpcMap; use super::Reactor; #[derive(Debug, Display)] #[display(fmt = "connection isnt encrypted, no peer id")] pub struct NoEncryption {} #[derive(Debug, Display, Clone)] #[display(fmt = "remote peer {:?} addr {}", peer_id, connected_addr)] pub struct RemotePeer { pub session_id: SessionId, pub peer_id: PeerId, pub connected_addr: ConnectedAddr, } impl RemotePeer { pub fn from_proto_context( protocol_context: &ProtocolContextMutRef, ) -> Result { let session = protocol_context.session; let pubkey = session .remote_pubkey .as_ref() .ok_or_else(|| NoEncryption {})?; Ok(RemotePeer { session_id: session.id, peer_id: pubkey.peer_id(), connected_addr: ConnectedAddr::from(&session.address), }) } } pub struct RouterContext { pub(crate) remote_peer: RemotePeer, pub(crate) rpc_map: Arc, trust_tx: UnboundedSender, } impl RouterContext { fn new( remote_peer: RemotePeer, rpc_map: Arc, trust_tx: UnboundedSender, ) -> Self { RouterContext { remote_peer, rpc_map, trust_tx, } } pub fn report_feedback(&self, feedback: TrustFeedback) { let feedback_event = PeerManagerEvent::TrustMetric { pid: self.remote_peer.peer_id.clone(), feedback, }; if let Err(e) = self.trust_tx.unbounded_send(feedback_event) { log::error!("send peer {} feedback failed {}", self.remote_peer, e); } } } type ReactorMap = HashMap>>; #[derive(Clone)] pub struct MessageRouter { // Endpoint to reactor channel map reactor_map: Arc>, // Rpc map pub(crate) rpc_map: Arc, // Sender for peer trust metric feedback trust_tx: UnboundedSender, // Compression to decompress message compression: C, } impl MessageRouter where C: Compression + Send + Clone + 'static, { pub fn new(trust_tx: UnboundedSender, compression: C) -> Self { MessageRouter { reactor_map: Default::default(), rpc_map: Arc::new(RpcMap::new()), trust_tx, compression, } } pub fn register_reactor( &self, endpoint: Endpoint, message_handler: impl MessageHandler, ) { let reactor = super::generate(message_handler); self.reactor_map .write() .insert(endpoint, Arc::new(Box::new(reactor))); } pub fn register_rpc_response(&self, endpoint: Endpoint) { let reactor = super::rpc_resp::<()>(); self.reactor_map .write() .insert(endpoint, Arc::new(Box::new(reactor))); } pub fn route_message( &self, remote_peer: RemotePeer, recv_msg: ReceivedMessage, ) -> impl Future> { let reactor_map = Arc::clone(&self.reactor_map); let compression = self.compression.clone(); let router_context = RouterContext::new( remote_peer, Arc::clone(&self.rpc_map), self.trust_tx.clone(), ); let raw_data_size = recv_msg.data.len(); async move { let network_message = { let decompressed = compression.decompress(recv_msg.data)?; NetworkMessage::decode(decompressed)? }; common_apm::metrics::network::on_network_message_received(&network_message.url); let endpoint = network_message.url.parse::()?; common_apm::metrics::network::NETWORK_MESSAGE_SIZE_COUNT_VEC .with_label_values(&["received", &endpoint.root()]) .inc_by(raw_data_size as i64); let reactor = { let opt_reactor = reactor_map.read().get(&endpoint).cloned(); opt_reactor .ok_or_else(|| NetworkError::from(ErrorKind::NoReactor(endpoint.root())))? }; let ret = reactor .react(router_context, endpoint.clone(), network_message) .await; if let Err(err) = ret.as_ref() { log::error!("process {} message failed: {}", endpoint, err); } ret } } } ================================================ FILE: core/network/src/reactor/rpc_map.rs ================================================ use std::{ any::Any, collections::HashMap, sync::atomic::{AtomicU64, Ordering}, sync::Arc, }; use derive_more::Constructor; use futures::channel::oneshot::{self, Receiver, Sender}; use parking_lot::RwLock; use tentacle::SessionId; use crate::error::{ErrorKind, NetworkError}; #[derive(Debug, PartialEq, Eq, Hash, Clone, Copy, Constructor)] struct Key { sid: SessionId, rid: u64, } struct BackSender(Box>); #[derive(Default)] pub struct RpcMap { next_id: Arc, map: Arc>>, } impl RpcMap { pub fn new() -> Self { RpcMap { next_id: Arc::new(AtomicU64::new(0)), map: Default::default(), } } pub fn next_rpc_id(&self) -> u64 { self.next_id.fetch_add(1, Ordering::SeqCst) } pub fn insert(&self, sid: SessionId, rid: u64) -> Receiver { let key = Key::new(sid, rid); let (done_tx, done_rx) = oneshot::channel(); let sender = BackSender(Box::new(Arc::new(done_tx))); self.map.write().insert(key, sender); done_rx } pub fn contains(&self, sid: SessionId, rid: u64) -> bool { let key = Key::new(sid, rid); self.map.read().contains_key(&key) } pub fn take( &self, sid: SessionId, rid: u64, ) -> Result, NetworkError> { let key = Key::new(sid, rid); if !self.map.read().contains_key(&key) { return Err(ErrorKind::UnknownRpc { sid, rid }.into()); } let BackSender(boxed_any) = { let opt_sender = self.map.write().remove(&key); opt_sender.ok_or_else(|| ErrorKind::UnknownRpc { sid, rid })? }; let arc_sender: Arc> = boxed_any .downcast::>() .map_err(|_| ErrorKind::UnexpectedRpcSender)?; Arc::try_unwrap(arc_sender).map_err(|_| ErrorKind::MoreArcRpcSender.into()) } } ================================================ FILE: core/network/src/rpc.rs ================================================ use derive_more::Display; use serde::{Deserialize, Serialize}; use protocol::Bytes; #[derive(Debug, Deserialize, Serialize, Display)] #[repr(u8)] pub enum RpcResponseCode { ServerError, Other(u8), } #[derive(Debug, Deserialize, Serialize, Display)] #[display(fmt = "rpc err code {} msg {}", code, msg)] pub struct RpcErrorMessage { pub code: RpcResponseCode, pub msg: String, } impl std::error::Error for RpcErrorMessage {} #[derive(Debug, Deserialize, Serialize)] pub enum RpcResponse { Success(Bytes), Error(RpcErrorMessage), } ================================================ FILE: core/network/src/selfcheck.rs ================================================ use std::{ future::Future, pin::Pin, sync::Arc, task::{Context, Poll}, time::Duration, }; use futures::task::AtomicWaker; use log::info; use crate::{common::HeartBeat, traits::SharedSessionBook}; pub struct SelfCheckConfig { pub interval: Duration, } pub(crate) struct SelfCheck { sessions: S, heart_beat: Option, hb_waker: Arc, } impl SelfCheck where S: SharedSessionBook + Send + Unpin + 'static, { pub fn new(sessions: S, config: SelfCheckConfig) -> Self { let waker = Arc::new(AtomicWaker::new()); let heart_beat = HeartBeat::new(Arc::clone(&waker), config.interval); SelfCheck { sessions, heart_beat: Some(heart_beat), hb_waker: waker, } } fn report_allowlist(&self) { info!("peers in allowlist: {:?}", self.sessions.allowlist()); } fn report_pending_data(&self) { let sids = self.sessions.all(); let mut total_size = 0; let peer_reports = sids .into_iter() .map(|sid| { let connected_addr = self.sessions.connected_addr(sid); let data_size = self.sessions.pending_data_size(sid) / (1000 * 1000); // MB not MiB total_size += data_size; (connected_addr, data_size) }) .collect::>(); info!( "total connected peers: {}, pending size {} MB, session(s) {:?}", peer_reports.len(), total_size, peer_reports ); } } impl Future for SelfCheck where S: SharedSessionBook + Send + Unpin + 'static, { type Output = (); fn poll(mut self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll { self.hb_waker.register(ctx.waker()); // Spawn heart beat if let Some(heart_beat) = self.heart_beat.take() { tokio::spawn(heart_beat); // No needed for first run return Poll::Pending; } self.as_ref().report_pending_data(); self.as_ref().report_allowlist(); Poll::Pending } } ================================================ FILE: core/network/src/service.rs ================================================ use std::future::Future; use std::net::SocketAddr; use std::pin::Pin; use std::sync::Arc; use std::task::{Context as TaskContext, Poll}; use async_trait::async_trait; use futures::channel::mpsc::{unbounded, UnboundedReceiver, UnboundedSender}; use futures::stream::Stream; use futures::task::AtomicWaker; use log::{debug, error, info}; use protocol::traits::{ Context, Gossip, MessageCodec, MessageHandler, Network, PeerTag, PeerTrust, Priority, Rpc, TrustFeedback, }; use protocol::types::Hash; use protocol::{Bytes, ProtocolResult}; use tentacle::secio::PeerId; use crate::common::{socket_to_multi_addr, HeartBeat}; use crate::compression::Snappy; use crate::connection::{ConnectionConfig, ConnectionService, ConnectionServiceKeeper}; use crate::endpoint::{Endpoint, EndpointScheme}; use crate::error::NetworkError; use crate::event::{ConnectionEvent, PeerManagerEvent}; use crate::metrics::Metrics; use crate::outbound::{NetworkGossip, NetworkRpc}; #[cfg(feature = "diagnostic")] use crate::peer_manager::diagnostic::{Diagnostic, DiagnosticHookFn}; use crate::peer_manager::{PeerManager, PeerManagerConfig, PeerManagerHandle, SharedSessions}; use crate::protocols::{CoreProtocol, Transmitter}; use crate::reactor::MessageRouter; use crate::selfcheck::SelfCheck; use crate::traits::NetworkContext; use crate::{NetworkConfig, PeerIdExt}; #[derive(Clone)] pub struct NetworkServiceHandle { gossip: NetworkGossip, rpc: NetworkRpc, peer_trust: UnboundedSender, peer_state: PeerManagerHandle, #[cfg(feature = "diagnostic")] pub diagnostic: Diagnostic, } #[async_trait] impl Gossip for NetworkServiceHandle { async fn broadcast(&self, cx: Context, end: &str, msg: M, p: Priority) -> ProtocolResult<()> where M: MessageCodec, { self.gossip.broadcast(cx, end, msg, p).await } async fn multicast<'a, M, P>( &self, cx: Context, end: &str, peer_ids: P, msg: M, p: Priority, ) -> ProtocolResult<()> where M: MessageCodec, P: AsRef<[Bytes]> + Send + 'a, { self.gossip.multicast(cx, end, peer_ids, msg, p).await } } #[async_trait] impl Rpc for NetworkServiceHandle { async fn call(&self, cx: Context, end: &str, msg: M, p: Priority) -> ProtocolResult where M: MessageCodec, R: MessageCodec, { self.rpc.call(cx, end, msg, p).await } async fn response( &self, cx: Context, end: &str, msg: ProtocolResult, p: Priority, ) -> ProtocolResult<()> where M: MessageCodec, { self.rpc.response(cx, end, msg, p).await } } impl PeerTrust for NetworkServiceHandle { fn report(&self, ctx: Context, feedback: TrustFeedback) { let remote_peer_id = match ctx.remote_peer_id() { Ok(id) => id, Err(e) => { log::error!( "peer id not found on trust report ctx, repoort {}, err {}", feedback, e ); return; } }; let feedback = PeerManagerEvent::TrustMetric { pid: remote_peer_id, feedback, }; if let Err(e) = self.peer_trust.unbounded_send(feedback) { log::error!("peer manager offline {}", e); } } } impl Network for NetworkServiceHandle { fn tag(&self, _: Context, peer_id: Bytes, tag: PeerTag) -> ProtocolResult<()> { let peer_id = ::from_bytes(peer_id)?; self.peer_state.tag(&peer_id, tag)?; Ok(()) } fn untag(&self, _: Context, peer_id: Bytes, tag: &PeerTag) -> ProtocolResult<()> { let peer_id = ::from_bytes(peer_id)?; self.peer_state.untag(&peer_id, tag); Ok(()) } fn tag_consensus(&self, _: Context, peer_ids: Vec) -> ProtocolResult<()> { let peer_ids = peer_ids .into_iter() .map(::from_bytes) .collect::, _>>()?; self.peer_state.tag_consensus(peer_ids); Ok(()) } } enum NetworkConnectionService { NoListen(ConnectionService), // no listen address yet Ready(ConnectionService), } pub struct NetworkService { sys_rx: UnboundedReceiver, // Heart beats conn_tx: UnboundedSender, mgr_tx: UnboundedSender, heart_beat: Option, hb_waker: Arc, // Config backup config: NetworkConfig, // Public service components gossip: NetworkGossip, rpc: NetworkRpc, transmitter: Transmitter, // Core service net_conn_srv: Option, peer_mgr: Option, peer_mgr_handle: PeerManagerHandle, // Metrics metrics: Option>, // Self check selfcheck: Option>, // Diagnostic #[cfg(feature = "diagnostic")] diagnostic: Diagnostic, } impl NetworkService { pub fn new(config: NetworkConfig) -> Self { let (mgr_tx, mgr_rx) = unbounded(); let (conn_tx, conn_rx) = unbounded(); let (sys_tx, sys_rx) = unbounded(); let hb_waker = Arc::new(AtomicWaker::new()); let heart_beat = HeartBeat::new(Arc::clone(&hb_waker), config.heart_beat_interval); let mgr_config = PeerManagerConfig::from(&config); let conn_config = ConnectionConfig::from(&config); // Build peer manager let mut peer_mgr = PeerManager::new(mgr_config, mgr_rx, conn_tx.clone()); let peer_mgr_handle = peer_mgr.handle(); let session_book = peer_mgr.share_session_book((&config).into()); #[cfg(feature = "diagnostic")] let diagnostic = peer_mgr.diagnostic(); if config.enable_save_restore { peer_mgr.enable_save_restore(); } if let Err(err) = peer_mgr.restore_peers() { error!("network: peer manager: load peers failure: {}", err); } if !config.bootstraps.is_empty() { peer_mgr.bootstrap(); } // Build service protocol let disc_sync_interval = config.discovery_sync_interval; let message_router = MessageRouter::new(mgr_tx.clone(), Snappy); let proto = CoreProtocol::build() .ping(config.ping_interval, config.ping_timeout, mgr_tx.clone()) .identify(peer_mgr_handle.clone(), mgr_tx.clone()) .discovery(peer_mgr_handle.clone(), mgr_tx.clone(), disc_sync_interval) .transmitter(message_router, peer_mgr_handle.clone()) .build(); let transmitter = proto.transmitter(); // Build connection service let keeper = ConnectionServiceKeeper::new(mgr_tx.clone(), sys_tx); let conn_srv = ConnectionService::::new(proto, conn_config, keeper, conn_rx); let conn_ctrl = conn_srv.control(); transmitter .behaviour .init(conn_ctrl, mgr_tx.clone(), session_book.clone()); // Build public service components let gossip = NetworkGossip::new(transmitter.clone()); let rpc = NetworkRpc::new(transmitter.clone(), (&config).into()); // Build metrics service let metrics = Metrics::new(session_book.clone()); // Build selfcheck service let selfcheck = SelfCheck::new(session_book, (&config).into()); NetworkService { sys_rx, conn_tx, mgr_tx, hb_waker, heart_beat: Some(heart_beat), config, gossip, rpc, transmitter, net_conn_srv: Some(NetworkConnectionService::NoListen(conn_srv)), peer_mgr: Some(peer_mgr), peer_mgr_handle, metrics: Some(metrics), selfcheck: Some(selfcheck), #[cfg(feature = "diagnostic")] diagnostic, } } pub fn register_endpoint_handler( &mut self, end: &str, handler: impl MessageHandler, ) -> ProtocolResult<()> where M: MessageCodec, { let endpoint = end.parse::()?; if endpoint.scheme() == EndpointScheme::RpcResponse { let err = "use register_rpc_response() instead".to_owned(); return Err(NetworkError::UnexpectedScheme(err).into()); } self.transmitter.router.register_reactor(endpoint, handler); Ok(()) } // Currently rpc response dont invoke message handler, so we create a dummy // for it. pub fn register_rpc_response(&mut self, end: &str) -> ProtocolResult<()> where M: MessageCodec, { let endpoint = end.parse::()?; if endpoint.scheme() != EndpointScheme::RpcResponse { return Err(NetworkError::UnexpectedScheme(end.to_owned()).into()); } self.transmitter.router.register_rpc_response(endpoint); Ok(()) } #[cfg(feature = "diagnostic")] pub fn register_diagnostic_hook(&mut self, f: DiagnosticHookFn) { if let Some(peer_mgr) = self.peer_mgr.as_mut() { peer_mgr.register_diagnostic_hook(f); } } pub fn handle(&self) -> NetworkServiceHandle { NetworkServiceHandle { gossip: self.gossip.clone(), rpc: self.rpc.clone(), peer_trust: self.mgr_tx.clone(), peer_state: self.peer_mgr_handle.clone(), #[cfg(feature = "diagnostic")] diagnostic: self.diagnostic.clone(), } } pub fn peer_id(&self) -> PeerId { self.config.secio_keypair.peer_id() } pub fn set_chain_id(&self, chain_id: Hash) { self.peer_mgr_handle.set_chain_id(chain_id); } pub async fn listen(&mut self, socket_addr: SocketAddr) -> ProtocolResult<()> { if let Some(NetworkConnectionService::NoListen(conn_srv)) = &mut self.net_conn_srv { debug!("network: listen to {}", socket_addr); let addr = socket_to_multi_addr(socket_addr); conn_srv.listen(addr.clone()).await?; // Update service state if let Some(NetworkConnectionService::NoListen(conn_srv)) = self.net_conn_srv.take() { self.net_conn_srv = Some(NetworkConnectionService::Ready(conn_srv)); } else { unreachable!("connection service must be there"); } } Ok(()) } } impl Future for NetworkService { type Output = (); fn poll(mut self: Pin<&mut Self>, ctx: &mut TaskContext<'_>) -> Poll { self.hb_waker.register(ctx.waker()); macro_rules! service_ready { ($poll:expr) => { match $poll { Poll::Pending => break, Poll::Ready(Some(v)) => v, Poll::Ready(None) => { info!("network shutdown"); return Poll::Ready(()); } } }; } // Preflight if let Some(conn_srv) = self.net_conn_srv.take() { let default_listen = self.config.default_listen.clone(); tokio::spawn(async move { let conn_srv = match conn_srv { NetworkConnectionService::NoListen(mut conn_srv) => { conn_srv .listen(default_listen) .await .expect("fail to listen default address"); conn_srv } NetworkConnectionService::Ready(conn_srv) => conn_srv, }; conn_srv.await }); } if let Some(peer_mgr) = self.peer_mgr.take() { tokio::spawn(peer_mgr); } if let Some(metrics) = self.metrics.take() { tokio::spawn(metrics); } if let Some(selfcheck) = self.selfcheck.take() { tokio::spawn(selfcheck); } // Heart beats if let Some(heart_beat) = self.heart_beat.take() { tokio::spawn(heart_beat); } // TODO: Reboot ceased service? Right now we just assume that it's // normal shutdown, simple log it and let it go. // // let it go ~~~ , let it go ~~~ // i am one with the wind and sky // let it go, let it go // you'll never see me cry // bla bla bal ~~~ if self.conn_tx.is_closed() { info!("network: connection service closed"); } if self.mgr_tx.is_closed() { info!("network: peer manager closed"); } // Process system error report loop { let sys_rx = &mut self.as_mut().sys_rx; futures::pin_mut!(sys_rx); let sys_err = service_ready!(sys_rx.poll_next(ctx)); error!("network: system error: {}", sys_err); } Poll::Pending } } ================================================ FILE: core/network/src/test/mock.rs ================================================ use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use parking_lot::Mutex; use protocol::Bytes; use tentacle::multiaddr::Multiaddr; use tentacle::secio::{PublicKey, SecioKeyPair}; use tentacle::service::{SessionType, TargetProtocol}; use tentacle::{ProtocolId, SessionId}; #[derive(Clone, Debug)] pub struct SessionContext { pub id: SessionId, pub address: Multiaddr, pub ty: SessionType, pub remote_pubkey: Option, pending_data_size: Arc, } impl SessionContext { pub fn no_encrypted(id: SessionId, ty: SessionType) -> Self { let address = "/ip4/47.111.169.36/tcp/3000".parse().expect("multiaddr"); SessionContext { id, address, ty, remote_pubkey: None, pending_data_size: Arc::new(AtomicUsize::new(0)), } } pub fn random(id: SessionId, ty: SessionType) -> Self { let keypair = SecioKeyPair::secp256k1_generated(); let pubkey = keypair.public_key(); let peer_id = pubkey.peer_id(); let address = { let addr_str = format!("/ip4/47.111.169.36/tcp/3000/p2p/{}", peer_id.to_base58()); addr_str.parse().expect("multiaddr") }; SessionContext { id, address, ty, remote_pubkey: Some(pubkey), pending_data_size: Arc::new(AtomicUsize::new(0)), } } pub fn make(id: SessionId, address: Multiaddr, ty: SessionType, pubkey: PublicKey) -> Self { SessionContext { id, address, ty, remote_pubkey: Some(pubkey), pending_data_size: Arc::new(AtomicUsize::new(0)), } } pub fn pending_data_size(&self) -> usize { self.pending_data_size.load(Ordering::SeqCst) } pub fn arced(self) -> Arc { Arc::new(self) } } impl From> for SessionContext { fn from(ctx: Arc) -> Self { SessionContext { id: ctx.id, address: ctx.address.to_owned(), ty: ctx.ty, remote_pubkey: ctx.remote_pubkey.clone(), pending_data_size: Arc::new(AtomicUsize::new(ctx.pending_data_size())), } } } #[derive(Clone, PartialEq, Eq)] pub enum ControlEvent { SendMessage { proto_id: ProtocolId, session_id: SessionId, msg: Bytes, }, Disconnect { session_id: SessionId, }, OpenProtocols { session_id: SessionId, target_proto: TargetProtocol, }, } #[derive(Clone)] pub struct ServiceControl { pub event: Arc>>, } impl Default for ServiceControl { fn default() -> Self { ServiceControl { event: Arc::new(Mutex::new(None)), } } } impl ServiceControl { pub fn event(&self) -> Option { self.event.lock().clone() } pub fn quick_send_message_to( &self, session_id: SessionId, proto_id: ProtocolId, msg: Bytes, ) -> Result<(), String> { *self.event.lock() = Some(ControlEvent::SendMessage { session_id, proto_id, msg, }); Ok(()) } pub fn disconnect(&self, session_id: SessionId) { *self.event.lock() = Some(ControlEvent::Disconnect { session_id }); } pub fn open_protocols( &self, session_id: SessionId, target_proto: TargetProtocol, ) -> Result<(), String> { *self.event.lock() = Some(ControlEvent::OpenProtocols { session_id, target_proto, }); Ok(()) } } pub struct ProtocolContext { proto_id: ProtocolId, pub session: SessionContext, pub control: ServiceControl, } impl ProtocolContext { pub fn make_no_encrypted(proto_id: ProtocolId, id: SessionId, ty: SessionType) -> Self { ProtocolContext { proto_id, session: SessionContext::no_encrypted(id, ty), control: ServiceControl::default(), } } pub fn make(proto_id: ProtocolId, id: SessionId, ty: SessionType) -> Self { ProtocolContext { proto_id, session: SessionContext::random(id, ty), control: ServiceControl::default(), } } pub fn proto_id(&self) -> ProtocolId { self.proto_id } pub fn control(&self) -> &ServiceControl { &self.control } pub fn disconnect(&self, session_id: SessionId) { self.control.disconnect(session_id) } } ================================================ FILE: core/network/src/test.rs ================================================ pub mod mock; ================================================ FILE: core/network/src/traits.rs ================================================ use std::borrow::Cow; use protocol::traits::Context; use protocol::Bytes; use tentacle::multiaddr::Multiaddr; use tentacle::secio::PeerId; use tentacle::service::{ProtocolMeta, TargetProtocol}; use tentacle::SessionId; use crate::common::ConnectedAddr; use crate::error::{ErrorKind, NetworkError}; pub trait NetworkProtocol { fn target() -> TargetProtocol; fn metas(self) -> Vec; } pub trait Compression { fn compress(&self, bytes: Bytes) -> Result; fn decompress(&self, bytes: Bytes) -> Result; } pub trait NetworkContext: Sized { fn session_id(&self) -> Result; fn set_session_id(&mut self, sid: SessionId) -> Self; fn remote_peer_id(&self) -> Result; fn set_remote_peer_id(&mut self, pid: PeerId) -> Self; // This connected address is for debug purpose, so soft failure is ok. fn remote_connected_addr(&self) -> Option; fn set_remote_connected_addr(&mut self, addr: ConnectedAddr) -> Self; fn rpc_id(&self) -> Result; fn set_rpc_id(&mut self, rid: u64) -> Self; fn url(&self) -> Result<&str, ()>; fn set_url(&mut self, url: String) -> Self; } pub trait ListenExchangeManager { fn listen_addr(&self) -> Multiaddr; fn add_remote_listen_addr(&mut self, pid: PeerId, addr: Multiaddr); fn misbehave(&mut self, sid: SessionId); } pub trait SharedSessionBook { fn all_sendable(&self) -> Vec; fn all_blocked(&self) -> Vec; fn refresh_blocked(&self); fn peers(&self, pids: Vec) -> (Vec, Vec); fn all(&self) -> Vec; fn connected_addr(&self, sid: SessionId) -> Option; fn pending_data_size(&self, sid: SessionId) -> usize; fn allowlist(&self) -> Vec; fn len(&self) -> usize; } pub trait MultiaddrExt { fn id_bytes(&self) -> Option>; fn has_id(&self) -> bool; fn push_id(&mut self, peer_id: PeerId); } #[derive(Debug, Clone)] struct CtxRpcId(u64); impl NetworkContext for Context { fn session_id(&self) -> Result { self.get::("session_id") .map(|sid| SessionId::new(*sid)) .ok_or_else(|| ErrorKind::NoSessionId.into()) } #[must_use] fn set_session_id(&mut self, sid: SessionId) -> Self { self.with_value::("session_id", sid.value()) } fn remote_peer_id(&self) -> Result { self.get::("remote_peer_id") .map(ToOwned::to_owned) .ok_or_else(|| ErrorKind::NoRemotePeerId.into()) } #[must_use] fn set_remote_peer_id(&mut self, pid: PeerId) -> Self { self.with_value::("remote_peer_id", pid) } fn remote_connected_addr(&self) -> Option { self.get::("remote_connected_addr") .map(ToOwned::to_owned) } #[must_use] fn set_remote_connected_addr(&mut self, addr: ConnectedAddr) -> Self { self.with_value::("remote_connected_addr", addr) } fn rpc_id(&self) -> Result { self.get::("rpc_id") .map(|ctx_rid| ctx_rid.0) .ok_or_else(|| ErrorKind::NoRpcId.into()) } #[must_use] fn set_rpc_id(&mut self, rid: u64) -> Self { self.with_value::("rpc_id", CtxRpcId(rid)) } fn url(&self) -> Result<&str, ()> { self.get::("url") .map(String::as_str) .ok_or_else(|| ()) } #[must_use] fn set_url(&mut self, url: String) -> Self { self.with_value::("url", url) } } ================================================ FILE: core/network/tests/common.rs ================================================ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use lazy_static::lazy_static; use tentacle::secio::SecioKeyPair; use core_network::{NetworkConfig, NetworkService}; pub const IP_ADDR: IpAddr = IpAddr::V4(Ipv4Addr::new(10, 137, 0, 25)); pub const BOOTSTRAP_PORT: u16 = 1337; lazy_static! { pub static ref BOOTSTRAP_SECKEY: String = hex::encode("8".repeat(32)); pub static ref BOOTSTRAP_PUBKEY: String = hex::encode( SecioKeyPair::secp256k1_raw_key("8".repeat(32)) .expect("seckey") .public_key() .inner() ); pub static ref BOOTSTRAP_ADDR: SocketAddr = SocketAddr::new(IP_ADDR, BOOTSTRAP_PORT); } pub async fn setup_bootstrap() -> NetworkService { let bootstrap_conf = NetworkConfig::new() .secio_keypair(BOOTSTRAP_SECKEY.to_string()) .expect("bootstrap secio keypair"); let mut bootstrap = NetworkService::new(bootstrap_conf); bootstrap .listen(*BOOTSTRAP_ADDR) .await .expect("bootstrap listen"); bootstrap } pub async fn setup_peer(port: u16) -> NetworkService { let peer_conf = NetworkConfig::new() .bootstraps(vec![( BOOTSTRAP_PUBKEY.to_string(), (*BOOTSTRAP_ADDR).to_string(), )]) .expect("peer bootstraps"); let mut peer = NetworkService::new(peer_conf); peer.listen(SocketAddr::new(IP_ADDR, port)) .await .expect("peer listen"); peer } ================================================ FILE: core/network/tests/gossip_test.rs ================================================ mod common; use std::{ sync::atomic::{AtomicBool, Ordering}, thread, time::{Duration, SystemTime}, }; use async_trait::async_trait; use futures::{ channel::mpsc::{unbounded, UnboundedSender}, stream::StreamExt, }; use protocol::traits::{Context, Gossip, MessageHandler, Priority, TrustFeedback}; const END_TEST_BROADCAST: &str = "/gossip/test/message"; const TEST_MESSAGE: &str = "spike lee action started"; const BROADCAST_TEST_TIMEOUT: u64 = 30; enum TestResult { TimeOut, Success, } struct NewsReader { sent: AtomicBool, done_tx: UnboundedSender<()>, } impl NewsReader { pub fn new(done_tx: UnboundedSender<()>) -> Self { NewsReader { sent: AtomicBool::new(false), done_tx, } } pub fn sent(&self) -> bool { self.sent.load(Ordering::SeqCst) } pub fn set_sent(&self) { self.sent.store(true, Ordering::SeqCst); } } #[async_trait] impl MessageHandler for NewsReader { type Message = String; async fn process(&self, _ctx: Context, msg: Self::Message) -> TrustFeedback { if !self.sent() { assert_eq!(&msg, TEST_MESSAGE); self.done_tx.unbounded_send(()).expect("news reader done"); self.set_sent(); } TrustFeedback::Neutral } } // FIXME: sometimes timeout #[tokio::test] #[ignore] async fn broadcast() { env_logger::init(); let (test_tx, mut test_rx) = unbounded(); // Init bootstrap node let mut bootstrap = common::setup_bootstrap().await; let (done_tx, mut bootstrap_done) = unbounded(); bootstrap .register_endpoint_handler(END_TEST_BROADCAST, NewsReader::new(done_tx)) .expect("bootstrap register news reader"); tokio::spawn(bootstrap); // Init peer alpha let mut alpha = common::setup_peer(common::BOOTSTRAP_PORT + 1).await; let (done_tx, mut alpha_done) = unbounded(); alpha .register_endpoint_handler(END_TEST_BROADCAST, NewsReader::new(done_tx)) .expect("alpha register news reader"); tokio::spawn(alpha); // Init peer brova let mut brova = common::setup_peer(common::BOOTSTRAP_PORT + 2).await; let (done_tx, mut brova_done) = unbounded(); brova .register_endpoint_handler(END_TEST_BROADCAST, NewsReader::new(done_tx)) .expect("brova register news reader"); tokio::spawn(brova); // Init peer charlie let charlie = common::setup_peer(common::BOOTSTRAP_PORT + 3).await; let broadcaster = charlie.handle(); tokio::spawn(charlie); // Sleep a while for bootstrap phrase, so peers can connect to each other thread::sleep(Duration::from_secs(3)); // Loop broadcast test message until all peers receive test message let test_tx_clone = test_tx.clone(); tokio::spawn(async move { let ctx = Context::new(); let end = END_TEST_BROADCAST; let msg = TEST_MESSAGE.to_owned(); let start = SystemTime::now(); loop { if SystemTime::now() .duration_since(start) .expect("duration") .as_secs() > BROADCAST_TEST_TIMEOUT { test_tx_clone .unbounded_send(TestResult::TimeOut) .expect("timeout send"); } broadcaster .broadcast(ctx.clone(), end, msg.clone(), Priority::Normal) .await .expect("gossip broadcast"); thread::sleep(Duration::from_secs(2)); } }); tokio::spawn(async move { bootstrap_done.next().await.expect("bootstrap done"); alpha_done.next().await.expect("alpha done"); brova_done.next().await.expect("brova done"); test_tx .unbounded_send(TestResult::Success) .expect("success send"); }); match test_rx.next().await { Some(TestResult::TimeOut) => panic!("timeout"), Some(TestResult::Success) => (), None => panic!("fail"), } } ================================================ FILE: core/run/Cargo.toml ================================================ [package] name = "run" version = "0.2.1" authors = ["Muta Dev "] edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] backtrace = "0.3" actix-rt = "1.0" derive_more = "0.99" futures = "0.3" parking_lot = "0.11" serde = "1.0" serde_derive = "1.0" serde_json = "1.0" log = "0.4" clap = "2.33" bytes = "0.5" hex = "0.4" rlp = "0.4" toml = "0.5" futures-timer="3.0" cita_trie = "2.0" tokio = { version = "0.2", features = ["macros", "sync", "rt-core", "rt-util", "signal", "time"] } byzantine = { path = "../../byzantine" } common-apm = { path = "../../common/apm" } common-config-parser = { path = "../../common/config-parser" } common-crypto = { path = "../../common/crypto" } common-logger = { path = "../../common/logger" } protocol = { path = "../../protocol", package = "muta-protocol" } core-api = { path = "../../core/api" } core-storage = { path = "../../core/storage" } core-mempool = { path = "../../core/mempool" } core-network = { path = "../../core/network" } core-consensus = { path = "../../core/consensus" } binding-macro = { path = "../../binding-macro" } framework = { path = "../../framework" } ================================================ FILE: core/run/src/lib.rs ================================================ #![feature(async_closure)] #![allow(clippy::mutable_key_type)] use derive_more::{Display, From}; use protocol::{ProtocolError, ProtocolErrorKind}; use std::collections::HashMap; use std::convert::TryFrom; use std::panic; use std::sync::Arc; use std::thread; use std::time::Duration; use backtrace::Backtrace; use bytes::Bytes; use futures::stream::StreamExt; use futures::{future, lock::Mutex}; use futures_timer::Delay; #[cfg(unix)] use tokio::signal::unix::{self as os_impl}; use common_config_parser::types::Config; use common_crypto::{ BlsCommonReference, BlsPrivateKey, BlsPublicKey, PublicKey, Secp256k1, Secp256k1PrivateKey, ToPublicKey, UncompressedPublicKey, }; use core_api::adapter::DefaultAPIAdapter; use core_api::config::{GraphQLConfig, GraphQLTLS}; use core_consensus::fixed_types::{FixedBlock, FixedProof, FixedSignedTxs}; use core_consensus::message::{ ChokeMessageHandler, ProposalMessageHandler, PullBlockRpcHandler, PullProofRpcHandler, PullTxsRpcHandler, QCMessageHandler, RemoteHeightMessageHandler, VoteMessageHandler, BROADCAST_HEIGHT, END_GOSSIP_AGGREGATED_VOTE, END_GOSSIP_SIGNED_CHOKE, END_GOSSIP_SIGNED_PROPOSAL, END_GOSSIP_SIGNED_VOTE, RPC_RESP_SYNC_PULL_BLOCK, RPC_RESP_SYNC_PULL_PROOF, RPC_RESP_SYNC_PULL_TXS, RPC_SYNC_PULL_BLOCK, RPC_SYNC_PULL_PROOF, RPC_SYNC_PULL_TXS, }; use core_consensus::status::{CurrentConsensusStatus, StatusAgent}; use core_consensus::util::OverlordCrypto; use core_consensus::{ ConsensusWal, DurationConfig, Node, OverlordConsensus, OverlordConsensusAdapter, OverlordSynchronization, RichBlock, SignedTxsWAL, }; use core_mempool::{ DefaultMemPoolAdapter, HashMemPool, MsgPushTxs, NewTxsHandler, PullTxsHandler, END_GOSSIP_NEW_TXS, RPC_PULL_TXS, RPC_RESP_PULL_TXS, RPC_RESP_PULL_TXS_SYNC, }; use core_network::{NetworkConfig, NetworkService, PeerId, PeerIdExt}; use core_storage::{adapter::rocks::RocksAdapter, ImplStorage, StorageError}; use framework::binding::state::RocksTrieDB; use framework::executor::{ServiceExecutor, ServiceExecutorFactory}; use protocol::traits::{ APIAdapter, CommonStorage, Context, MemPool, Network, NodeInfo, ServiceMapping, Storage, }; use protocol::types::{Address, Block, BlockHeader, Genesis, Hash, Metadata, Proof, Validator}; use protocol::{fixed_codec::FixedCodec, ProtocolResult}; use common_apm::muta_apm; pub struct Muta where Mapping: ServiceMapping, { config: Config, genesis: Genesis, service_mapping: Arc, } impl Muta { pub fn new(config: Config, genesis: Genesis, service_mapping: Arc) -> Self { Self { config, genesis, service_mapping, } } pub fn run(self) -> ProtocolResult<()> { if let Some(apm_config) = &self.config.apm { muta_apm::global_tracer_register( &apm_config.service_name, apm_config.tracing_address, apm_config.tracing_batch_size, ); log::info!("muta_apm start"); } // run muta let mut rt = tokio::runtime::Runtime::new().expect("new tokio runtime"); let local = tokio::task::LocalSet::new(); local.block_on(&mut rt, async move { self.create_genesis().await?; self.start().await })?; Ok(()) } pub async fn create_genesis(&self) -> ProtocolResult { log::info!("Genesis data: {:?}", self.genesis); let metadata_payload = self.genesis.get_payload("metadata"); let hrp = Metadata::get_hrp_from_json(metadata_payload.to_string()); // Set bech32 address hrp if !protocol::address_hrp_inited() { protocol::init_address_hrp(hrp.into()); } // Init Block db let path_block = self.config.data_path_for_block(); let rocks_adapter = Arc::new(RocksAdapter::new( path_block, self.config.rocksdb.max_open_files, )?); let storage = Arc::new(ImplStorage::new(rocks_adapter)); match storage.get_latest_block(Context::new()).await { Ok(genesis_block) => { log::info!("The Genesis block has been initialized."); return Ok(genesis_block); } Err(e) => { if !e.to_string().contains("GetNone") { return Err(e); } } }; // Init trie db let path_state = self.config.data_path_for_state(); let trie_db = Arc::new(RocksTrieDB::new( path_state, self.config.executor.light, self.config.rocksdb.max_open_files, self.config.executor.triedb_cache_size, )?); let metadata: Metadata = serde_json::from_str(self.genesis.get_payload("metadata")) .expect("Decode metadata failed!"); let validators: Vec = metadata .verifier_list .iter() .map(|v| Validator { pub_key: v.pub_key.decode(), propose_weight: v.propose_weight, vote_weight: v.vote_weight, }) .collect(); // Init genesis let genesis_state_root = ServiceExecutor::create_genesis( self.genesis.services.clone(), Arc::clone(&trie_db), Arc::clone(&storage), Arc::clone(&self.service_mapping), )?; // Build genesis block. let proposer = Address::from_hash(Hash::digest(protocol::address_hrp().as_str()))?; let genesis_block_header = BlockHeader { chain_id: metadata.chain_id.clone(), height: 0, exec_height: 0, prev_hash: Hash::from_empty(), timestamp: self.genesis.timestamp, order_root: Hash::from_empty(), order_signed_transactions_hash: Hash::from_empty(), confirm_root: vec![], state_root: genesis_state_root, receipt_root: vec![], cycles_used: vec![], proposer, proof: Proof { height: 0, round: 0, block_hash: Hash::from_empty(), signature: Bytes::new(), bitmap: Bytes::new(), }, validator_version: 0, validators, }; let latest_proof = genesis_block_header.proof.clone(); let genesis_block = Block { header: genesis_block_header, ordered_tx_hashes: vec![], }; storage .insert_block(Context::new(), genesis_block.clone()) .await?; storage .update_latest_proof(Context::new(), latest_proof) .await?; log::info!("The genesis block is created {:?}", genesis_block); Ok(genesis_block) } pub async fn start(self) -> ProtocolResult<()> { log::info!("node starts"); let config = self.config; let service_mapping = self.service_mapping; // Init Block db let path_block = config.data_path_for_block(); log::info!("Data path for block: {:?}", path_block); let rocks_adapter = Arc::new(RocksAdapter::new( path_block.clone(), config.rocksdb.max_open_files, )?); let storage = Arc::new(ImplStorage::new(Arc::clone(&rocks_adapter))); // Init network let network_config = NetworkConfig::new() .max_connections(config.network.max_connected_peers)? .same_ip_conn_limit(config.network.same_ip_conn_limit) .inbound_conn_limit(config.network.inbound_conn_limit)? .allowlist_only(config.network.allowlist_only) .peer_trust_metric( config.network.trust_interval_duration, config.network.trust_max_history_duration, )? .peer_soft_ban(config.network.soft_ban_duration) .peer_fatal_ban(config.network.fatal_ban_duration) .rpc_timeout(config.network.rpc_timeout) .ping_interval(config.network.ping_interval) .selfcheck_interval(config.network.selfcheck_interval) .max_wait_streams(config.network.max_wait_streams) .max_frame_length(config.network.max_frame_length) .send_buffer_size(config.network.send_buffer_size) .write_timeout(config.network.write_timeout) .recv_buffer_size(config.network.recv_buffer_size); let network_privkey = config.privkey.as_string_trim0x(); let mut bootstrap_pairs = vec![]; if let Some(bootstrap) = &config.network.bootstraps { for bootstrap in bootstrap.iter() { bootstrap_pairs.push((bootstrap.peer_id.to_owned(), bootstrap.address.to_owned())); } } let allowlist = config.network.allowlist.clone().unwrap_or_default(); let network_config = network_config .bootstraps(bootstrap_pairs)? .allowlist(allowlist)? .secio_keypair(network_privkey)?; let mut network_service = NetworkService::new(network_config); network_service .listen(config.network.listening_address) .await?; // Init trie db let path_state = config.data_path_for_state(); let trie_db = Arc::new(RocksTrieDB::new( path_state, config.executor.light, config.rocksdb.max_open_files, config.executor.triedb_cache_size, )?); // Init full transactions wal let txs_wal_path = config.data_path_for_txs_wal().to_str().unwrap().to_string(); let txs_wal = Arc::new(SignedTxsWAL::new(txs_wal_path)); // Init consensus wal let consensus_wal_path = config .data_path_for_consensus_wal() .to_str() .unwrap() .to_string(); let consensus_wal = Arc::new(ConsensusWal::new(consensus_wal_path)); // Recover signed transactions of current height let current_block = storage.get_latest_block(Context::new()).await?; let current_stxs = txs_wal.load_by_height(current_block.header.height + 1); log::info!( "Recover {} tx of height {} from wal", current_stxs.len(), current_block.header.height + 1 ); // Init mempool let mempool_adapter = DefaultMemPoolAdapter::::new( network_service.handle(), Arc::clone(&storage), Arc::clone(&trie_db), Arc::clone(&service_mapping), config.mempool.broadcast_txs_size, config.mempool.broadcast_txs_interval, ); let mempool = Arc::new( HashMemPool::new( config.mempool.pool_size as usize, mempool_adapter, current_stxs, ) .await, ); let monitor_mempool = Arc::clone(&mempool); tokio::spawn(async move { let interval = Duration::from_millis(1000); loop { Delay::new(interval).await; common_apm::metrics::mempool::MEMPOOL_LEN_GAUGE .set(monitor_mempool.get_tx_cache().len().await as i64); } }); // self private key let hex_privkey = hex::decode(config.privkey.as_string_trim0x()).map_err(MainError::FromHex)?; let my_privkey = Secp256k1PrivateKey::try_from(hex_privkey.as_ref()).map_err(MainError::Crypto)?; let my_pubkey = my_privkey.pub_key(); let my_address = Address::from_pubkey_bytes(my_pubkey.to_uncompressed_bytes())?; // Get metadata let api_adapter = DefaultAPIAdapter::::new( Arc::clone(&mempool), Arc::clone(&storage), Arc::clone(&trie_db), Arc::clone(&service_mapping), ); let exec_resp = api_adapter .query_service( Context::new(), current_block.header.height, u64::max_value(), 1, my_address.clone(), "metadata".to_string(), "get_metadata".to_string(), "".to_string(), ) .await?; let metadata: Metadata = serde_json::from_str(&exec_resp.succeed_data).expect("Decode metadata failed!"); // Set bech32 address hrp if !protocol::address_hrp_inited() { protocol::init_address_hrp(metadata.bech32_address_hrp.into()); } // set chain id in network network_service.set_chain_id(metadata.chain_id.clone()); // set args in mempool mempool.set_args( metadata.timeout_gap, metadata.cycles_limit, metadata.max_tx_size, ); // register broadcast new transaction network_service.register_endpoint_handler( END_GOSSIP_NEW_TXS, NewTxsHandler::new(Arc::clone(&mempool)), )?; // register pull txs from other node network_service.register_endpoint_handler( RPC_PULL_TXS, PullTxsHandler::new(Arc::new(network_service.handle()), Arc::clone(&mempool)), )?; network_service.register_rpc_response::(RPC_RESP_PULL_TXS)?; network_service.register_rpc_response::(RPC_RESP_PULL_TXS_SYNC)?; // Init Consensus let validators: Vec = metadata .verifier_list .iter() .map(|v| Validator { pub_key: v.pub_key.decode(), propose_weight: v.propose_weight, vote_weight: v.vote_weight, }) .collect(); let node_info = NodeInfo { chain_id: metadata.chain_id.clone(), self_address: my_address.clone(), self_pub_key: my_pubkey.to_bytes(), }; let current_header = ¤t_block.header; let block_hash = Hash::digest(current_block.header.encode_fixed()?); let current_height = current_block.header.height; let exec_height = current_block.header.exec_height; let proof = if let Ok(temp) = storage.get_latest_proof(Context::new()).await { temp } else { current_header.proof.clone() }; let current_consensus_status = CurrentConsensusStatus { cycles_price: metadata.cycles_price, cycles_limit: metadata.cycles_limit, latest_committed_height: current_block.header.height, exec_height: current_block.header.exec_height, current_hash: block_hash, latest_committed_state_root: current_header.state_root.clone(), list_confirm_root: vec![], list_state_root: vec![], list_receipt_root: vec![], list_cycles_used: vec![], current_proof: proof, validators: validators.clone(), consensus_interval: metadata.interval, propose_ratio: metadata.propose_ratio, prevote_ratio: metadata.prevote_ratio, precommit_ratio: metadata.precommit_ratio, brake_ratio: metadata.brake_ratio, max_tx_size: metadata.max_tx_size, tx_num_limit: metadata.tx_num_limit, }; let consensus_interval = current_consensus_status.consensus_interval; let status_agent = StatusAgent::new(current_consensus_status); let mut bls_pub_keys = HashMap::new(); for validator_extend in metadata.verifier_list.iter() { let address = validator_extend.pub_key.decode(); let hex_pubkey = hex::decode(validator_extend.bls_pub_key.as_string_trim0x()) .map_err(MainError::FromHex)?; let pub_key = BlsPublicKey::try_from(hex_pubkey.as_ref()).map_err(MainError::Crypto)?; bls_pub_keys.insert(address, pub_key); } let mut priv_key = Vec::new(); priv_key.extend_from_slice(&[0u8; 16]); let mut tmp = hex::decode(config.privkey.as_string_trim0x()).unwrap(); priv_key.append(&mut tmp); let bls_priv_key = BlsPrivateKey::try_from(priv_key.as_ref()).map_err(MainError::Crypto)?; let hex_common_ref = hex::decode(metadata.common_ref.as_string_trim0x()).map_err(MainError::FromHex)?; let common_ref: BlsCommonReference = std::str::from_utf8(hex_common_ref.as_ref()) .map_err(MainError::Utf8)? .into(); let crypto = Arc::new(OverlordCrypto::new(bls_priv_key, bls_pub_keys, common_ref)); let mut consensus_adapter = OverlordConsensusAdapter::::new( Arc::new(network_service.handle()), Arc::clone(&mempool), Arc::clone(&storage), Arc::clone(&trie_db), Arc::clone(&service_mapping), status_agent.clone(), Arc::clone(&crypto), config.consensus.overlord_gap, )?; let exec_demon = consensus_adapter.take_exec_demon(); let consensus_adapter = Arc::new(consensus_adapter); let lock = Arc::new(Mutex::new(())); let overlord_consensus = Arc::new(OverlordConsensus::new( status_agent.clone(), node_info, Arc::clone(&crypto), Arc::clone(&txs_wal), Arc::clone(&consensus_adapter), Arc::clone(&lock), Arc::clone(&consensus_wal), )); consensus_adapter.set_overlord_handler(overlord_consensus.get_overlord_handler()); let synchronization = Arc::new(OverlordSynchronization::<_>::new( config.consensus.sync_txs_chunk_size, consensus_adapter, status_agent.clone(), crypto, lock, )); let peer_ids = metadata .verifier_list .iter() .map(|v| PeerId::from_pubkey_bytes(v.pub_key.decode()).map(PeerIdExt::into_bytes_ext)) .collect::, _>>()?; network_service .handle() .tag_consensus(Context::new(), peer_ids)?; // Re-execute block from exec_height + 1 to current_height, so that init the // lost current status. log::info!("Re-execute from {} to {}", exec_height + 1, current_height); for height in exec_height + 1..=current_height { let block = storage .get_block(Context::new(), height) .await? .ok_or(StorageError::GetNone)?; let txs = storage .get_transactions( Context::new(), block.header.height, &block.ordered_tx_hashes, ) .await? .into_iter() .filter_map(|opt_stx| opt_stx) .collect::>(); if txs.len() != block.ordered_tx_hashes.len() { return Err(StorageError::GetNone.into()); } let rich_block = RichBlock { block, txs }; let _ = synchronization .exec_block(Context::new(), rich_block, status_agent.clone()) .await?; } // register consensus network_service.register_endpoint_handler( END_GOSSIP_SIGNED_PROPOSAL, ProposalMessageHandler::new(Arc::clone(&overlord_consensus)), )?; network_service.register_endpoint_handler( END_GOSSIP_AGGREGATED_VOTE, QCMessageHandler::new(Arc::clone(&overlord_consensus)), )?; network_service.register_endpoint_handler( END_GOSSIP_SIGNED_VOTE, VoteMessageHandler::new(Arc::clone(&overlord_consensus)), )?; network_service.register_endpoint_handler( END_GOSSIP_SIGNED_CHOKE, ChokeMessageHandler::new(Arc::clone(&overlord_consensus)), )?; network_service.register_endpoint_handler( BROADCAST_HEIGHT, RemoteHeightMessageHandler::new(Arc::clone(&synchronization)), )?; network_service.register_endpoint_handler( RPC_SYNC_PULL_BLOCK, PullBlockRpcHandler::new(Arc::new(network_service.handle()), Arc::clone(&storage)), )?; network_service.register_endpoint_handler( RPC_SYNC_PULL_PROOF, PullProofRpcHandler::new(Arc::new(network_service.handle()), Arc::clone(&storage)), )?; network_service.register_endpoint_handler( RPC_SYNC_PULL_TXS, PullTxsRpcHandler::new(Arc::new(network_service.handle()), Arc::clone(&storage)), )?; network_service.register_rpc_response::(RPC_RESP_SYNC_PULL_BLOCK)?; network_service.register_rpc_response::(RPC_RESP_SYNC_PULL_PROOF)?; network_service.register_rpc_response::(RPC_RESP_SYNC_PULL_TXS)?; // Run network tokio::spawn(network_service); // Run sync tokio::spawn(async move { if let Err(e) = synchronization.polling_broadcast().await { log::error!("synchronization: {:?}", e); } }); // Run consensus let authority_list = validators .iter() .map(|v| Node { address: v.pub_key.clone(), propose_weight: v.propose_weight, vote_weight: v.vote_weight, }) .collect::>(); let timer_config = DurationConfig { propose_ratio: metadata.propose_ratio, prevote_ratio: metadata.prevote_ratio, precommit_ratio: metadata.precommit_ratio, brake_ratio: metadata.brake_ratio, }; tokio::spawn(async move { if let Err(e) = overlord_consensus .run( current_height, consensus_interval, authority_list, Some(timer_config), ) .await { log::error!("muta-consensus: {:?} error", e); } }); let (abortable_demon, abort_handle) = future::abortable(exec_demon.run()); let exec_handler = tokio::task::spawn_local(abortable_demon); // Init graphql let mut graphql_config = GraphQLConfig::default(); graphql_config.listening_address = config.graphql.listening_address; graphql_config.graphql_uri = config.graphql.graphql_uri.clone(); graphql_config.graphiql_uri = config.graphql.graphiql_uri.clone(); if config.graphql.workers != 0 { graphql_config.workers = config.graphql.workers; } if config.graphql.maxconn != 0 { graphql_config.maxconn = config.graphql.maxconn; } if config.graphql.max_payload_size != 0 { graphql_config.max_payload_size = config.graphql.max_payload_size; } if let Some(tls) = config.graphql.tls { graphql_config.tls = Some(GraphQLTLS { private_key_file_path: tls.private_key_file_path, certificate_chain_file_path: tls.certificate_chain_file_path, }) } graphql_config.enable_dump_profile = config.graphql.enable_dump_profile.unwrap_or(false); tokio::task::spawn_local(async move { let local = tokio::task::LocalSet::new(); let actix_rt = actix_rt::System::run_in_tokio("muta-graphql", &local); tokio::task::spawn_local(actix_rt); core_api::start_graphql(graphql_config, api_adapter).await; }); let ctrl_c_handler = tokio::task::spawn_local(async { #[cfg(windows)] let _ = tokio::signal::ctrl_c().await; #[cfg(unix)] { let mut sigtun_int = os_impl::signal(os_impl::SignalKind::interrupt()).unwrap(); let mut sigtun_term = os_impl::signal(os_impl::SignalKind::terminate()).unwrap(); tokio::select! { _ = sigtun_int.recv() => {} _ = sigtun_term.recv() => {} }; } }); // register channel of panic let (panic_sender, mut panic_receiver) = tokio::sync::mpsc::channel::<()>(1); panic::set_hook(Box::new(move |info: &panic::PanicInfo| { let mut panic_sender = panic_sender.clone(); Self::panic_log(info); panic_sender.try_send(()).expect("panic_receiver is droped"); })); tokio::select! { _ = exec_handler =>{log::error!("exec_daemon is down, quit.")}, _ = ctrl_c_handler =>{log::info!("ctrl + c is pressed, quit.")}, _ = panic_receiver.next() =>{log::info!("child thraed panic, quit.")}, }; abort_handle.abort(); Ok(()) } fn panic_log(info: &panic::PanicInfo) { let backtrace = Backtrace::new(); let thread = thread::current(); let name = thread.name().unwrap_or("unnamed"); let location = info.location().unwrap(); // The current implementation always returns Some let msg = match info.payload().downcast_ref::<&'static str>() { Some(s) => *s, None => match info.payload().downcast_ref::() { Some(s) => &*s, None => "Box", }, }; log::error!( target: "panic", "thread '{}' panicked at '{}': {}:{} {:?}", name, msg, location.file(), location.line(), backtrace, ); } } #[derive(Debug, Display, From)] pub enum MainError { #[display(fmt = "The muta configuration read failed {:?}", _0)] ConfigParse(common_config_parser::ParseError), #[display(fmt = "{:?}", _0)] Io(std::io::Error), #[display(fmt = "Toml fails to parse genesis {:?}", _0)] GenesisTomlDe(toml::de::Error), #[display(fmt = "hex error {:?}", _0)] FromHex(hex::FromHexError), #[display(fmt = "crypto error {:?}", _0)] Crypto(common_crypto::Error), #[display(fmt = "{:?}", _0)] Utf8(std::str::Utf8Error), #[display(fmt = "{:?}", _0)] JSONParse(serde_json::error::Error), #[display(fmt = "other error {:?}", _0)] Other(String), } impl std::error::Error for MainError {} impl From for ProtocolError { fn from(error: MainError) -> ProtocolError { ProtocolError::new(ProtocolErrorKind::Main, Box::new(error)) } } ================================================ FILE: core/storage/Cargo.toml ================================================ [package] name = "core-storage" version = "0.2.1" authors = ["Muta Dev "] edition = "2018" repository = "https://github.com/nervosnetwork/muta" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] common-apm = { path = "../../common/apm" } protocol = { path = "../../protocol", package = "muta-protocol" } futures = "0.3" derive_more = "0.15" lazy_static = "1.4" parking_lot = "0.11" async-trait = "0.1" rocksdb = "0.14" tokio = "0.2" arc-swap = "0.4" [dev-dependencies] num-traits = "0.2" rand = "0.6" hex = "0.4" tokio = { version = "0.2", features = ["macros", "rt-core", "rt-util", "signal", "time"]} ================================================ FILE: core/storage/examples/bench.rs ================================================ use core_storage::{adapter::rocks::RocksAdapter, CommonHashKey, ImplStorage}; use protocol::{ traits::{Context, Storage}, types::{Bytes, Hash, RawTransaction, SignedTransaction, TransactionRequest}, }; use std::{ fs::OpenOptions, io::prelude::*, io::{BufReader, LineWriter}, path::PathBuf, str::FromStr, sync::Arc, time::Instant, }; const NUMBER_OF_TXS_PER_ROUND: usize = 15_000; // 1.5W, 2.5M const ADDRESS_STR: &str = "muta14e0lmgck835vm2dfm0w3ckv6svmez8fdgdl705"; #[tokio::main] pub async fn main() { if std::env::args().nth(1) == Some("generate".to_string()) { println!("generate 1.5W txs"); let mut height = 1u64; let mut count = std::env::args() .nth(2) .expect("number of round(1.5W txs per round, 2.5M)") .parse::() .expect("number of round(1.5W txs per round, 2.5M)"); let db_path = std::env::args().nth(3).expect("db patch"); let max_fd = std::env::args() .nth(4) .expect("max open files for rocksdb") .parse::() .expect("max open files for rocksdb"); let mut hash_keys_file = { let mut file_path = PathBuf::from(db_path.clone()); file_path.push("hash_keys"); let file = OpenOptions::new() .write(true) .append(true) .create_new(true) .open(file_path) .expect("tx hashes file"); LineWriter::new(file) }; let adapter = RocksAdapter::new(db_path, max_fd).expect("create adapter"); let storage = ImplStorage::new(Arc::new(adapter)); let mut hash_keys = Vec::with_capacity(NUMBER_OF_TXS_PER_ROUND); while count > 0 { let stxs = (0..NUMBER_OF_TXS_PER_ROUND) .map(|_| { let bytes = get_random_bytes(); let hash = Hash::digest(bytes); hash_keys.push(CommonHashKey::new(height, hash.clone())); mock_signed_tx(hash) }) .collect::>(); for key in hash_keys.drain(..) { let encoded_key = key.to_string(); hash_keys_file .write_all(encoded_key.as_bytes()) .expect("write tx hash"); hash_keys_file.write_all(b"\n").expect("write line"); } storage .insert_transactions(Context::new(), height, stxs) .await .expect("insert transaction"); count -= 1; height += 1; } println!("insert complete, height {}", height - 1); } else if std::env::args().nth(1) == Some("fetch".to_string()) { let db_path = std::env::args().nth(2).expect("db patch"); let max_fd = std::env::args() .nth(3) .expect("max open files for rocksdb") .parse::() .expect("max open files for rocksdb"); let height = std::env::args() .nth(4) .expect("height") .parse::() .expect("height"); let hash_keys_file = { let mut file_path = PathBuf::from(db_path.clone()); file_path.push("hash_keys"); let file = OpenOptions::new() .read(true) .open(file_path) .expect("tx hashes file"); BufReader::new(file).lines() }; let hashes = hash_keys_file .skip((height - 1) as usize * NUMBER_OF_TXS_PER_ROUND) .take(NUMBER_OF_TXS_PER_ROUND) .map(|l| { let key = CommonHashKey::from_str(&l.expect("read line")).expect("key"); key.hash().to_owned() }) .collect::>(); let adapter = RocksAdapter::new(db_path, max_fd).expect("create adapter"); let storage = ImplStorage::new(Arc::new(adapter)); let now = Instant::now(); let stxs = storage .get_transactions(Context::new(), height, &hashes) .await .expect("fetch"); println!("total {}, fetch {}", NUMBER_OF_TXS_PER_ROUND, stxs.len()); println!("fetch cost {} ms", now.elapsed().as_millis()); } else { println!( r#" Usage: generate [round] [db path] [fd] fetch [db path] [fd] [height] "# ); } } fn get_random_bytes() -> Bytes { let mut buf = [0u8; 32]; for u in &mut buf { *u = rand::random::(); } Bytes::copy_from_slice(&buf) } fn mock_signed_tx(tx_hash: Hash) -> SignedTransaction { let nonce = Hash::digest(Bytes::from("XXXX")); let request = TransactionRequest { service_name: "test".to_owned(), method: "test".to_owned(), payload: "test".to_owned(), }; let raw = RawTransaction { chain_id: nonce.clone(), nonce, timeout: 10, cycles_limit: 10, cycles_price: 1, request, sender: ADDRESS_STR.parse().unwrap(), }; SignedTransaction { raw, tx_hash, pubkey: Default::default(), signature: Default::default(), } } ================================================ FILE: core/storage/src/adapter/memory.rs ================================================ use std::collections::{hash_map, HashMap}; use std::error::Error; use std::marker::PhantomData; use std::sync::Arc; use async_trait::async_trait; use derive_more::{Display, From}; use parking_lot::RwLock; use protocol::codec::ProtocolCodecSync; use protocol::traits::{ IntoIteratorByRef, StorageAdapter, StorageBatchModify, StorageIterator, StorageSchema, }; use protocol::Bytes; use protocol::{ProtocolError, ProtocolErrorKind, ProtocolResult}; type Category = HashMap, Vec>; #[derive(Debug)] pub struct MemoryAdapter { db: Arc>>, } impl MemoryAdapter { pub fn new() -> Self { MemoryAdapter { db: Arc::new(RwLock::new(HashMap::new())), } } } impl Default for MemoryAdapter { fn default() -> Self { MemoryAdapter { db: Arc::new(RwLock::new(HashMap::new())), } } } pub struct MemoryIterator<'a, S: StorageSchema> { inner: hash_map::Iter<'a, Vec, Vec>, pin_s: PhantomData, } impl<'a, S: StorageSchema> Iterator for MemoryIterator<'a, S> { type Item = ProtocolResult<(::Key, ::Value)>; fn next(&mut self) -> Option { let kv_decode = |(k_bytes, v_bytes): (&Vec, &Vec)| -> ProtocolResult<_> { let k_bytes = Bytes::copy_from_slice(k_bytes.as_ref()); let key = <_>::decode_sync(k_bytes)?; let v_bytes = Bytes::copy_from_slice(&v_bytes.as_ref()); let val = <_>::decode_sync(v_bytes)?; Ok((key, val)) }; self.inner.next().map(kv_decode) } } pub struct MemoryIntoIterator<'a, S: StorageSchema> { inner: parking_lot::RwLockReadGuard<'a, HashMap>, pin_s: PhantomData, } impl<'a, 'b: 'a, S: StorageSchema> IntoIterator for &'b MemoryIntoIterator<'a, S> { type IntoIter = StorageIterator<'a, S>; type Item = ProtocolResult<(::Key, ::Value)>; fn into_iter(self) -> Self::IntoIter { Box::new(MemoryIterator { inner: self .inner .get(&S::category().to_string()) .expect("impossible, already ensure we have category in prepare_iter") .iter(), pin_s: PhantomData::, }) } } impl<'c, S: StorageSchema> IntoIteratorByRef for MemoryIntoIterator<'c, S> { fn ref_to_iter<'a, 'b: 'a>(&'b self) -> StorageIterator<'a, S> { self.into_iter() } } #[async_trait] impl StorageAdapter for MemoryAdapter { async fn insert( &self, key: ::Key, val: ::Value, ) -> ProtocolResult<()> { let key = key.encode_sync()?.to_vec(); let val = val.encode_sync()?.to_vec(); let mut db = self.db.write(); let db = db .entry(S::category().to_string()) .or_insert_with(HashMap::new); db.insert(key, val); Ok(()) } async fn get( &self, key: ::Key, ) -> ProtocolResult::Value>> { let key = key.encode_sync()?; let mut db = self.db.write(); let db = db .entry(S::category().to_string()) .or_insert_with(HashMap::new); let opt_bytes = db.get(&key.to_vec()).cloned(); if let Some(bytes) = opt_bytes { let val = <_>::decode_sync(Bytes::copy_from_slice(&bytes))?; Ok(Some(val)) } else { Ok(None) } } async fn remove(&self, key: ::Key) -> ProtocolResult<()> { let key = key.encode_sync()?.to_vec(); let mut db = self.db.write(); let db = db .entry(S::category().to_string()) .or_insert_with(HashMap::new); db.remove(&key); Ok(()) } async fn contains( &self, key: ::Key, ) -> ProtocolResult { let key = key.encode_sync()?.to_vec(); let mut db = self.db.write(); let db = db .entry(S::category().to_string()) .or_insert_with(HashMap::new); Ok(db.get(&key).is_some()) } async fn batch_modify( &self, keys: Vec<::Key>, vals: Vec>, ) -> ProtocolResult<()> { if keys.len() != vals.len() { return Err(MemoryAdapterError::BatchLengthMismatch.into()); } let mut pairs: Vec<(Bytes, Option)> = Vec::with_capacity(keys.len()); for (key, value) in keys.into_iter().zip(vals.into_iter()) { let key = key.encode_sync()?; let value = match value { StorageBatchModify::Insert(value) => Some(value.encode_sync()?), StorageBatchModify::Remove => None, }; pairs.push((key, value)) } let mut db = self.db.write(); let db = db .entry(S::category().to_string()) .or_insert_with(HashMap::new); for (key, value) in pairs.into_iter() { match value { Some(value) => db.insert(key.to_vec(), value.to_vec()), None => db.remove(&key.to_vec()), }; } Ok(()) } fn prepare_iter<'a, 'b: 'a, S: StorageSchema + 'static, P: AsRef<[u8]> + 'a>( &'b self, _prefix: &P, ) -> ProtocolResult + 'a>> { { self.db .write() .entry(S::category().to_string()) .or_insert_with(HashMap::new); } Ok(Box::new(MemoryIntoIterator { inner: self.db.read(), pin_s: PhantomData::, })) } } #[derive(Debug, Display, From)] pub enum MemoryAdapterError { #[display(fmt = "batch length dont match")] BatchLengthMismatch, } impl Error for MemoryAdapterError {} impl From for ProtocolError { fn from(err: MemoryAdapterError) -> ProtocolError { ProtocolError::new(ProtocolErrorKind::Storage, Box::new(err)) } } ================================================ FILE: core/storage/src/adapter/mod.rs ================================================ pub mod memory; pub mod rocks; ================================================ FILE: core/storage/src/adapter/rocks.rs ================================================ use std::error::Error; use std::marker::PhantomData; use std::path::Path; use std::sync::Arc; use std::time::Instant; use derive_more::{Display, From}; use rocksdb::{ColumnFamily, DBIterator, Options, WriteBatch, DB}; use async_trait::async_trait; use common_apm::metrics::storage::on_storage_put_cf; use protocol::codec::ProtocolCodecSync; use protocol::traits::{ IntoIteratorByRef, StorageAdapter, StorageBatchModify, StorageCategory, StorageIterator, StorageSchema, }; use protocol::Bytes; use protocol::{ProtocolError, ProtocolErrorKind, ProtocolResult}; #[derive(Debug)] pub struct RocksAdapter { db: Arc, } impl RocksAdapter { pub fn new>(path: P, max_open_files: i32) -> ProtocolResult { let mut opts = Options::default(); opts.create_if_missing(true); opts.create_missing_column_families(true); opts.set_max_open_files(max_open_files); let categories = [ map_category(StorageCategory::Block), map_category(StorageCategory::BlockHeader), map_category(StorageCategory::Receipt), map_category(StorageCategory::SignedTransaction), map_category(StorageCategory::Wal), map_category(StorageCategory::HashHeight), ]; let db = DB::open_cf(&opts, path, categories.iter()).map_err(RocksAdapterError::from)?; Ok(RocksAdapter { db: Arc::new(db) }) } } macro_rules! db { ($db:expr, $op:ident, $column:expr, $key:expr) => { $db.$op($column, $key).map_err(RocksAdapterError::from) }; ($db:expr, $op:ident, $column:expr, $key:expr, $val:expr) => { $db.$op($column, $key, $val) .map_err(RocksAdapterError::from) }; } pub struct RocksIterator<'a, S: StorageSchema> { inner: DBIterator<'a>, pin_s: PhantomData, } impl<'a, S: StorageSchema> Iterator for RocksIterator<'a, S> { type Item = ProtocolResult<(::Key, ::Value)>; fn next(&mut self) -> Option { let kv_decode = |(k_bytes, v_bytes): (Box<[u8]>, Box<[u8]>)| -> ProtocolResult<_> { let k_bytes = Bytes::copy_from_slice(k_bytes.as_ref()); let key = <_>::decode_sync(k_bytes)?; let v_bytes = Bytes::copy_from_slice(&v_bytes.as_ref()); let val = <_>::decode_sync(v_bytes)?; Ok((key, val)) }; self.inner.next().map(kv_decode) } } pub struct RocksIntoIterator<'a, S: StorageSchema, P: AsRef<[u8]>> { db: Arc, column: &'a ColumnFamily, prefix: &'a P, pin_s: PhantomData, } impl<'a, 'b: 'a, S: StorageSchema, P: AsRef<[u8]>> IntoIterator for &'b RocksIntoIterator<'a, S, P> { type IntoIter = StorageIterator<'a, S>; type Item = ProtocolResult<(::Key, ::Value)>; fn into_iter(self) -> Self::IntoIter { let iter: DBIterator<'_> = self.db.prefix_iterator_cf(self.column, self.prefix); Box::new(RocksIterator { inner: iter, pin_s: PhantomData::, }) } } impl<'c, S: StorageSchema, P: AsRef<[u8]>> IntoIteratorByRef for RocksIntoIterator<'c, S, P> { fn ref_to_iter<'a, 'b: 'a>(&'b self) -> StorageIterator<'a, S> { self.into_iter() } } #[async_trait] impl StorageAdapter for RocksAdapter { async fn insert(&self, key: S::Key, val: S::Value) -> ProtocolResult<()> { let inst = Instant::now(); let column = get_column::(&self.db)?; let key = key.encode_sync()?.to_vec(); let val = val.encode_sync()?.to_vec(); let size = val.len() as i64; db!(self.db, put_cf, column, key, val)?; on_storage_put_cf(S::category(), inst.elapsed(), size); Ok(()) } async fn get( &self, key: ::Key, ) -> ProtocolResult::Value>> { let column = get_column::(&self.db)?; let key = key.encode_sync()?; let opt_bytes = { db!(self.db, get_cf, column, key)?.map(|db_vec| Bytes::copy_from_slice(&db_vec)) }; if let Some(bytes) = opt_bytes { let val = <_>::decode_sync(bytes)?; Ok(Some(val)) } else { Ok(None) } } async fn remove(&self, key: ::Key) -> ProtocolResult<()> { let column = get_column::(&self.db)?; let key = key.encode_sync()?.to_vec(); db!(self.db, delete_cf, column, key)?; Ok(()) } async fn contains( &self, key: ::Key, ) -> ProtocolResult { let column = get_column::(&self.db)?; let key = key.encode_sync()?.to_vec(); let val = db!(self.db, get_cf, column, key)?; Ok(val.is_some()) } async fn batch_modify( &self, keys: Vec<::Key>, vals: Vec>, ) -> ProtocolResult<()> { if keys.len() != vals.len() { return Err(RocksAdapterError::BatchLengthMismatch.into()); } let column = get_column::(&self.db)?; let mut pairs: Vec<(Bytes, Option)> = Vec::with_capacity(keys.len()); for (key, value) in keys.into_iter().zip(vals.into_iter()) { let key = key.encode_sync()?; let value = match value { StorageBatchModify::Insert(value) => Some(value.encode_sync()?), StorageBatchModify::Remove => None, }; pairs.push((key, value)) } let mut batch = WriteBatch::default(); let mut insert_size = 0usize; let inst = Instant::now(); for (key, value) in pairs.into_iter() { match value { Some(value) => { insert_size += value.len(); batch.put_cf(column, key, value) } None => batch.delete_cf(column, key), } } on_storage_put_cf(S::category(), inst.elapsed(), insert_size as i64); self.db.write(batch).map_err(RocksAdapterError::from)?; Ok(()) } fn prepare_iter<'a, 'b: 'a, S: StorageSchema + 'static, P: AsRef<[u8]> + 'a>( &'b self, prefix: &'a P, ) -> ProtocolResult + 'a>> { let column = get_column::(&self.db)?; let rocks_iter = RocksIntoIterator { db: Arc::clone(&self.db), column, prefix, pin_s: PhantomData::, }; Ok(Box::new(rocks_iter)) } } #[derive(Debug, Display, From)] pub enum RocksAdapterError { #[display(fmt = "category {} not found", _0)] CategoryNotFound(&'static str), #[display(fmt = "rocksdb {}", _0)] RocksDB(rocksdb::Error), #[display(fmt = "parameters do not match")] InsertParameter, #[display(fmt = "batch length dont match")] BatchLengthMismatch, } impl Error for RocksAdapterError {} impl From for ProtocolError { fn from(err: RocksAdapterError) -> ProtocolError { ProtocolError::new(ProtocolErrorKind::Storage, Box::new(err)) } } const C_BLOCKS: &str = "c1"; const C_SIGNED_TRANSACTIONS: &str = "c2"; const C_RECEIPTS: &str = "c3"; const C_WALS: &str = "c4"; const C_HASH_HEIGHT_MAP: &str = "c5"; const C_BLOCK_HEADERS: &str = "c6"; fn map_category(c: StorageCategory) -> &'static str { match c { StorageCategory::Block => C_BLOCKS, StorageCategory::BlockHeader => C_BLOCK_HEADERS, StorageCategory::Receipt => C_RECEIPTS, StorageCategory::SignedTransaction => C_SIGNED_TRANSACTIONS, StorageCategory::Wal => C_WALS, StorageCategory::HashHeight => C_HASH_HEIGHT_MAP, } } fn get_column(db: &DB) -> Result<&ColumnFamily, RocksAdapterError> { let category = map_category(S::category()); let column = db .cf_handle(category) .ok_or_else(|| RocksAdapterError::from(category))?; Ok(column) } ================================================ FILE: core/storage/src/lib.rs ================================================ #![feature(test)] #![allow(clippy::mutable_key_type)] #[cfg(test)] mod tests; pub mod adapter; use std::collections::{HashMap, HashSet}; use std::convert::From; use std::error::Error; use std::str::FromStr; use std::sync::Arc; use std::time::Instant; use arc_swap::ArcSwap; use async_trait::async_trait; use derive_more::{Display, From}; use lazy_static::lazy_static; use common_apm::metrics::storage::on_storage_get_cf; use common_apm::muta_apm; use protocol::codec::ProtocolCodecSync; use protocol::traits::{ CommonStorage, Context, MaintenanceStorage, Storage, StorageAdapter, StorageBatchModify, StorageCategory, StorageSchema, }; use protocol::types::{Block, BlockHeader, Hash, Proof, Receipt, SignedTransaction}; use protocol::Bytes; use protocol::{ProtocolError, ProtocolErrorKind, ProtocolResult}; const BATCH_VALUE_DECODE_NUMBER: usize = 1000; lazy_static! { pub static ref LATEST_BLOCK_KEY: Hash = Hash::digest(Bytes::from("latest_hash")); pub static ref LATEST_PROOF_KEY: Hash = Hash::digest(Bytes::from("latest_proof")); } // FIXME: https://github.com/facebook/rocksdb/wiki/Transactions macro_rules! batch_insert { ($self_: ident, $block_height:expr, $vec: expr, $schema: ident) => { let (hashes, heights) = $vec .iter() .map(|item| { ( item.tx_hash.clone(), StorageBatchModify::Insert($block_height), ) }) .unzip(); let (keys, batch_stxs): (Vec<_>, Vec<_>) = $vec .into_iter() .map(|item| { ( CommonHashKey::new($block_height, item.tx_hash.clone()), StorageBatchModify::Insert(item), ) }) .unzip(); $self_ .adapter .batch_modify::<$schema>(keys, batch_stxs) .await?; $self_ .adapter .batch_modify::(hashes, heights) .await?; }; } macro_rules! get { ($self_: ident, $key: expr, $schema: ident) => {{ $self_.adapter.get::<$schema>($key).await }}; } macro_rules! ensure_get { ($self_: ident, $key: expr, $schema: ident) => {{ let opt = get!($self_, $key, $schema)?; opt.ok_or(StorageError::GetNone)? }}; } macro_rules! impl_storage_schema_for { ($name: ident, $key: ident, $val: ident, $category: ident) => { pub struct $name; impl StorageSchema for $name { type Key = $key; type Value = $val; fn category() -> StorageCategory { StorageCategory::$category } } }; } #[derive(Debug)] pub struct ImplStorage { adapter: Arc, latest_block: ArcSwap>, } impl ImplStorage { pub fn new(adapter: Arc) -> Self { Self { adapter, latest_block: ArcSwap::from(Arc::new(None)), } } } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub struct CommonPrefix { block_height: [u8; 8], // BigEndian } impl CommonPrefix { pub fn new(block_height: u64) -> Self { CommonPrefix { block_height: block_height.to_be_bytes(), } } pub fn len() -> usize { 8 } pub fn height(self) -> u64 { u64::from_be_bytes(self.block_height) } pub fn make_hash_key(self, hash: &Hash) -> [u8; 40] { debug_assert!(hash.as_bytes().len() == 32); let mut key = [0u8; 40]; key[0..8].copy_from_slice(&self.block_height); key[8..40].copy_from_slice(&hash.as_bytes()[..32]); key } } impl AsRef<[u8]> for CommonPrefix { fn as_ref(&self) -> &[u8] { &self.block_height } } impl From<&[u8]> for CommonPrefix { fn from(bytes: &[u8]) -> CommonPrefix { debug_assert!(bytes.len() >= 8); let mut h_buf = [0u8; 8]; h_buf.copy_from_slice(&bytes[0..8]); CommonPrefix { block_height: h_buf, } } } impl ProtocolCodecSync for CommonPrefix { fn encode_sync(&self) -> ProtocolResult { Ok(Bytes::copy_from_slice(&self.block_height)) } fn decode_sync(bytes: Bytes) -> ProtocolResult { Ok(CommonPrefix::from(&bytes[..8])) } } #[derive(Debug, Clone)] pub struct CommonHashKey { prefix: CommonPrefix, hash: Hash, } impl CommonHashKey { pub fn new(block_height: u64, hash: Hash) -> Self { CommonHashKey { prefix: CommonPrefix::new(block_height), hash, } } pub fn height(&self) -> u64 { self.prefix.height() } pub fn hash(&self) -> &Hash { &self.hash } } impl ProtocolCodecSync for CommonHashKey { fn encode_sync(&self) -> ProtocolResult { Ok(Bytes::copy_from_slice( &self.prefix.make_hash_key(&self.hash), )) } fn decode_sync(mut bytes: Bytes) -> ProtocolResult { debug_assert!(bytes.len() >= CommonPrefix::len()); let prefix = CommonPrefix::from(&bytes[0..CommonPrefix::len()]); let hash = Hash::from_bytes(bytes.split_off(CommonPrefix::len()))?; Ok(CommonHashKey { prefix, hash }) } } impl ToString for CommonHashKey { fn to_string(&self) -> String { format!("{}:{}", self.prefix.height(), self.hash.as_hex()) } } impl FromStr for CommonHashKey { type Err = (); fn from_str(s: &str) -> Result { let parts = s.split(':').collect::>(); debug_assert!(parts.len() == 2); let height = parts[0].parse::().map_err(|_| ())?; let hash = Hash::from_hex(parts[1]).map_err(|_| ())?; Ok(CommonHashKey::new(height, hash)) } } pub type BlockKey = CommonPrefix; impl_storage_schema_for!( TransactionSchema, CommonHashKey, SignedTransaction, SignedTransaction ); impl_storage_schema_for!( TransactionBytesSchema, CommonHashKey, Bytes, SignedTransaction ); impl_storage_schema_for!(BlockSchema, BlockKey, Block, Block); impl_storage_schema_for!(BlockHeaderSchema, BlockKey, BlockHeader, BlockHeader); impl_storage_schema_for!(ReceiptSchema, CommonHashKey, Receipt, Receipt); impl_storage_schema_for!(ReceiptBytesSchema, CommonHashKey, Bytes, Receipt); impl_storage_schema_for!(HashHeightSchema, Hash, u64, HashHeight); impl_storage_schema_for!(LatestBlockSchema, Hash, Block, Block); impl_storage_schema_for!(LatestProofSchema, Hash, Proof, Block); #[async_trait] impl MaintenanceStorage for ImplStorage {} #[async_trait] impl Storage for ImplStorage { #[muta_apm::derive::tracing_span(kind = "storage")] async fn insert_transactions( &self, ctx: Context, block_height: u64, signed_txs: Vec, ) -> ProtocolResult<()> { batch_insert!(self, block_height, signed_txs, TransactionSchema); Ok(()) } #[muta_apm::derive::tracing_span(kind = "storage")] async fn get_transactions( &self, ctx: Context, block_height: u64, hashes: &[Hash], ) -> ProtocolResult>> { let key_prefix = CommonPrefix::new(block_height); let mut found = Vec::with_capacity(hashes.len()); { let inst = Instant::now(); let prepare_iter = self .adapter .prepare_iter::(&key_prefix)?; let mut iter = prepare_iter.ref_to_iter(); let set = hashes.iter().collect::>(); let mut count = hashes.len(); on_storage_get_cf( StorageCategory::SignedTransaction, inst.elapsed(), count as i64, ); while count > 0 { let (key, stx_bytes) = match iter.next() { None => break, Some(Ok(key_to_stx_bytes)) => key_to_stx_bytes, Some(Err(err)) => return Err(err), }; // Note: fix clippy::suspicious_else_formatting if key.height() != block_height { break; } else if !set.contains(&key.hash) { continue; } else { found.push((key.hash, stx_bytes)); count -= 1; } } } let mut found = { if found.len() <= BATCH_VALUE_DECODE_NUMBER { found .drain(..) .map(|(k, v): (Hash, Bytes)| SignedTransaction::decode_sync(v).map(|v| (k, v))) .collect::>>()? .into_iter() .collect::>() } else { let futs = found .chunks(BATCH_VALUE_DECODE_NUMBER) .map(|vals| { let vals = vals.to_owned(); // FIXME: cancel decode tokio::spawn(async move { vals.into_iter() .map(|(k, v)| <_>::decode_sync(v).map(|v| (k, v))) .collect::>>() }) }) .collect::>(); futures::future::try_join_all(futs) .await .map_err(|_| StorageError::BatchDecode)? .into_iter() .collect::>>>()? .into_iter() .flatten() .collect::>() } }; Ok(hashes.iter().map(|h| found.remove(&h)).collect::>()) } async fn get_transaction_by_hash( &self, _ctx: Context, hash: &Hash, ) -> ProtocolResult> { if let Some(block_height) = get!(self, hash.clone(), HashHeightSchema)? { get!( self, CommonHashKey::new(block_height, hash.clone()), TransactionSchema ) } else { Ok(None) } } #[muta_apm::derive::tracing_span(kind = "storage")] async fn insert_receipts( &self, ctx: Context, block_height: u64, receipts: Vec, ) -> ProtocolResult<()> { batch_insert!(self, block_height, receipts, ReceiptSchema); Ok(()) } #[muta_apm::derive::tracing_span(kind = "storage")] async fn get_receipts( &self, ctx: Context, block_height: u64, hashes: Vec, ) -> ProtocolResult>> { let key_prefix = CommonPrefix::new(block_height); let mut found = Vec::with_capacity(hashes.len()); { let inst = Instant::now(); let prepare_iter = self .adapter .prepare_iter::(&key_prefix)?; let mut iter = prepare_iter.ref_to_iter(); let set = hashes.iter().collect::>(); let mut count = hashes.len(); on_storage_get_cf(StorageCategory::Receipt, inst.elapsed(), count as i64); while count > 0 { let (key, stx_bytes) = match iter.next() { None => break, Some(Ok(key_to_stx_bytes)) => key_to_stx_bytes, Some(Err(err)) => return Err(err), }; // Note: fix clippy::suspicious_else_formatting if key.height() != block_height { break; } else if !set.contains(&key.hash) { continue; } else { found.push((key.hash, stx_bytes)); count -= 1; } } } let mut found = { if found.len() <= BATCH_VALUE_DECODE_NUMBER { found .drain(..) .map(|(k, v): (Hash, Bytes)| Receipt::decode_sync(v).map(|v| (k, v))) .collect::>>()? .into_iter() .collect::>() } else { let futs = found .chunks(BATCH_VALUE_DECODE_NUMBER) .map(|vals| { let vals = vals.to_owned(); // FIXME: cancel decode tokio::spawn(async move { vals.into_iter() .map(|(k, v)| <_>::decode_sync(v).map(|v| (k, v))) .collect::>>() }) }) .collect::>(); futures::future::try_join_all(futs) .await .map_err(|_| StorageError::BatchDecode)? .into_iter() .collect::>>>()? .into_iter() .flatten() .collect::>() } }; Ok(hashes .into_iter() .map(|h| found.remove(&h)) .collect::>()) } async fn get_receipt_by_hash( &self, _ctx: Context, hash: Hash, ) -> ProtocolResult> { if let Some(block_height) = get!(self, hash.clone(), HashHeightSchema)? { get!(self, CommonHashKey::new(block_height, hash), ReceiptSchema) } else { Ok(None) } } async fn update_latest_proof(&self, _ctx: Context, proof: Proof) -> ProtocolResult<()> { self.adapter .insert::(LATEST_PROOF_KEY.clone(), proof) .await?; Ok(()) } async fn get_latest_proof(&self, _ctx: Context) -> ProtocolResult { let proof = ensure_get!(self, LATEST_PROOF_KEY.clone(), LatestProofSchema); Ok(proof) } } #[async_trait] impl CommonStorage for ImplStorage { async fn insert_block(&self, ctx: Context, block: Block) -> ProtocolResult<()> { self.set_block(ctx.clone(), block.clone()).await?; self.set_latest_block(ctx, block).await?; Ok(()) } async fn get_block(&self, _ctx: Context, height: u64) -> ProtocolResult> { self.adapter.get::(BlockKey::new(height)).await } async fn get_block_header( &self, ctx: Context, height: u64, ) -> ProtocolResult> { let opt_header = self .adapter .get::(BlockKey::new(height)) .await?; if opt_header.is_some() { return Ok(opt_header); } Ok(self.get_block(ctx, height).await?.map(|b| b.header)) } // !!!be careful, the prev_hash may mismatch and latest block may diverse!!! async fn set_block(&self, _ctx: Context, block: Block) -> ProtocolResult<()> { self.adapter .insert::(BlockKey::new(block.header.height), block.clone()) .await?; self.adapter .insert::(BlockKey::new(block.header.height), block.header.clone()) .await?; Ok(()) } // !be careful, only call this function in maintenance mode! async fn remove_block(&self, _ctx: Context, height: u64) -> ProtocolResult<()> { self.adapter .remove::(BlockKey::new(height)) .await } async fn get_latest_block(&self, _ctx: Context) -> ProtocolResult { if let Some(block) = self.latest_block.load().as_ref().clone() { Ok(block) } else { let block = ensure_get!(self, LATEST_BLOCK_KEY.clone(), LatestBlockSchema); Ok(block) } } async fn get_latest_block_header(&self, _ctx: Context) -> ProtocolResult { let opt_header = { let guard = self.latest_block.load(); let opt_block = guard.as_ref(); opt_block.as_ref().map(|b| b.header.clone()) }; if let Some(header) = opt_header { Ok(header) } else { let block = ensure_get!(self, LATEST_BLOCK_KEY.clone(), LatestBlockSchema); Ok(block.header) } } async fn set_latest_block(&self, _ctx: Context, block: Block) -> ProtocolResult<()> { self.adapter .insert::(LATEST_BLOCK_KEY.clone(), block.clone()) .await?; self.latest_block.store(Arc::new(Some(block))); Ok(()) } } #[derive(Debug, Display, From)] pub enum StorageError { #[display(fmt = "get none")] GetNone, #[display(fmt = "decode batch value")] BatchDecode, } impl Error for StorageError {} impl From for ProtocolError { fn from(err: StorageError) -> ProtocolError { ProtocolError::new(ProtocolErrorKind::Storage, Box::new(err)) } } ================================================ FILE: core/storage/src/tests/adapter.rs ================================================ use protocol::traits::{StorageAdapter, StorageBatchModify}; use protocol::types::Hash; use crate::adapter::memory::MemoryAdapter; use crate::adapter::rocks::RocksAdapter; use crate::tests::{get_random_bytes, mock_signed_tx}; use crate::{CommonHashKey, TransactionSchema}; #[tokio::test] async fn test_adapter_insert() { adapter_insert_test(MemoryAdapter::new()).await; adapter_insert_test(RocksAdapter::new("rocksdb/test_adapter_insert".to_string(), 64).unwrap()) .await } #[tokio::test] async fn test_adapter_batch_modify() { adapter_batch_modify_test(MemoryAdapter::new()).await; adapter_batch_modify_test( RocksAdapter::new("rocksdb/test_adapter_batch_modify".to_string(), 64).unwrap(), ) .await } #[tokio::test] async fn test_adapter_remove() { adapter_remove_test(MemoryAdapter::new()).await; adapter_remove_test(RocksAdapter::new("rocksdb/test_adapter_remove".to_string(), 64).unwrap()) .await } async fn adapter_insert_test(db: impl StorageAdapter) { let tx_hash = Hash::digest(get_random_bytes(10)); let tx_key = CommonHashKey::new(1, tx_hash.clone()); let stx = mock_signed_tx(tx_hash.clone()); db.insert::(tx_key.clone(), stx.clone()) .await .unwrap(); let stx = db.get::(tx_key).await.unwrap().unwrap(); assert_eq!(tx_hash, stx.tx_hash); } async fn adapter_batch_modify_test(db: impl StorageAdapter) { let mut stxs = Vec::new(); let mut keys = Vec::new(); let mut inserts = Vec::new(); for _ in 0..10 { let tx_hash = Hash::digest(get_random_bytes(10)); keys.push(CommonHashKey::new(1, tx_hash.clone())); let stx = mock_signed_tx(tx_hash); stxs.push(stx.clone()); inserts.push(StorageBatchModify::Insert::(stx)); } db.batch_modify::(keys.clone(), inserts) .await .unwrap(); let opt_stxs = db.get_batch::(keys).await.unwrap(); for i in 0..10 { assert_eq!( stxs.get(i).unwrap().tx_hash, opt_stxs.get(i).unwrap().as_ref().unwrap().tx_hash ); } } async fn adapter_remove_test(db: impl StorageAdapter) { let tx_hash = Hash::digest(get_random_bytes(10)); let tx_key = CommonHashKey::new(1, tx_hash.clone()); let is_exist = db .contains::(tx_key.clone()) .await .unwrap(); assert!(!is_exist); let stx = &mock_signed_tx(tx_hash); db.insert::(tx_key.clone(), stx.clone()) .await .unwrap(); let is_exist = db .contains::(tx_key.clone()) .await .unwrap(); assert!(is_exist); db.remove::(tx_key.clone()) .await .unwrap(); let is_exist = db.contains::(tx_key).await.unwrap(); assert!(!is_exist); } ================================================ FILE: core/storage/src/tests/mod.rs ================================================ extern crate test; mod adapter; mod storage; use rand::random; use protocol::traits::ServiceResponse; use protocol::types::{ Block, BlockHeader, Hash, Proof, RawTransaction, Receipt, ReceiptResponse, SignedTransaction, TransactionRequest, }; use protocol::Bytes; const ADDRESS_STR: &str = "muta14e0lmgck835vm2dfm0w3ckv6svmez8fdgdl705"; fn mock_signed_tx(tx_hash: Hash) -> SignedTransaction { let nonce = Hash::digest(Bytes::from("XXXX")); let request = TransactionRequest { service_name: "test".to_owned(), method: "test".to_owned(), payload: "test".to_owned(), }; let raw = RawTransaction { chain_id: nonce.clone(), nonce, timeout: 10, cycles_limit: 10, cycles_price: 1, request, sender: ADDRESS_STR.parse().unwrap(), }; SignedTransaction { raw, tx_hash, pubkey: Default::default(), signature: Default::default(), } } fn mock_receipt(tx_hash: Hash) -> Receipt { let nonce = Hash::digest(Bytes::from("XXXX")); let response = ReceiptResponse { service_name: "test".to_owned(), method: "test".to_owned(), response: ServiceResponse:: { code: 0, succeed_data: "ok".to_owned(), error_message: "".to_owned(), }, }; Receipt { state_root: nonce, height: 10, tx_hash, cycles_used: 10, events: vec![], response, } } fn mock_block(height: u64, block_hash: Hash) -> Block { let nonce = Hash::digest(Bytes::from("XXXX")); let header = BlockHeader { chain_id: nonce.clone(), height, exec_height: height - 1, prev_hash: nonce.clone(), timestamp: 1000, order_root: nonce.clone(), order_signed_transactions_hash: nonce.clone(), confirm_root: Vec::new(), state_root: nonce, receipt_root: Vec::new(), cycles_used: vec![999_999], proposer: ADDRESS_STR.parse().unwrap(), proof: mock_proof(block_hash), validator_version: 1, validators: Vec::new(), }; Block { header, ordered_tx_hashes: Vec::new(), } } fn mock_proof(block_hash: Hash) -> Proof { Proof { height: 0, round: 0, block_hash, signature: Default::default(), bitmap: Default::default(), } } fn get_random_bytes(len: usize) -> Bytes { let vec: Vec = (0..len).map(|_| random::()).collect(); Bytes::from(vec) } ================================================ FILE: core/storage/src/tests/storage.rs ================================================ extern crate test; use std::sync::Arc; use test::Bencher; use protocol::traits::{CommonStorage, Context, Storage}; use protocol::types::Hash; use tokio::runtime::Runtime; use crate::adapter::memory::MemoryAdapter; use crate::tests::{get_random_bytes, mock_block, mock_proof, mock_receipt, mock_signed_tx}; use crate::ImplStorage; use crate::BATCH_VALUE_DECODE_NUMBER; #[tokio::test] async fn test_storage_block_insert() { let storage = ImplStorage::new(Arc::new(MemoryAdapter::new())); let height = 100; let block = mock_block(height, Hash::digest(get_random_bytes(10))); storage.insert_block(Context::new(), block).await.unwrap(); let block = storage.get_latest_block(Context::new()).await.unwrap(); assert_eq!(height, block.header.height); let block = storage.get_block(Context::new(), height).await.unwrap(); assert_eq!(Some(height), block.map(|b| b.header.height)); } #[tokio::test] async fn test_storage_receipts_insert() { let storage = ImplStorage::new(Arc::new(MemoryAdapter::new())); let height = 2077; let mut receipts = Vec::new(); let mut hashes = Vec::new(); for _ in 0..10 { let tx_hash = Hash::digest(get_random_bytes(10)); hashes.push(tx_hash.clone()); let receipt = mock_receipt(tx_hash.clone()); receipts.push(receipt); } storage .insert_receipts(Context::new(), height, receipts.clone()) .await .unwrap(); let receipts_2 = storage .get_receipts(Context::new(), height, hashes) .await .unwrap(); for i in 0..10 { assert_eq!( Some(receipts.get(i).unwrap()), receipts_2.get(i).unwrap().as_ref() ); } } #[tokio::test] async fn test_storage_receipts_get_batch_decode() { let storage = ImplStorage::new(Arc::new(MemoryAdapter::new())); let height = 2077; let count = BATCH_VALUE_DECODE_NUMBER + 100; let mut receipts = Vec::new(); let mut hashes = Vec::new(); for _ in 0..count { let tx_hash = Hash::digest(get_random_bytes(10)); hashes.push(tx_hash.clone()); let receipt = mock_receipt(tx_hash.clone()); receipts.push(receipt); } storage .insert_receipts(Context::new(), height, receipts.clone()) .await .unwrap(); let receipts_2 = storage .get_receipts(Context::new(), height, hashes) .await .unwrap(); for i in 0..count { assert_eq!( Some(receipts.get(i).unwrap()), receipts_2.get(i).unwrap().as_ref() ); } } #[tokio::test] async fn test_storage_transactions_insert() { let storage = ImplStorage::new(Arc::new(MemoryAdapter::new())); let height = 2020; let mut transactions = Vec::new(); let mut hashes = Vec::new(); for _ in 0..10 { let tx_hash = Hash::digest(get_random_bytes(10)); hashes.push(tx_hash.clone()); let transaction = mock_signed_tx(tx_hash.clone()); transactions.push(transaction); } storage .insert_transactions(Context::new(), height, transactions.clone()) .await .unwrap(); let transactions_2 = storage .get_transactions(Context::new(), height, &hashes) .await .unwrap(); for i in 0..10 { assert_eq!( Some(transactions.get(i).unwrap()), transactions_2.get(i).unwrap().as_ref() ); } } #[tokio::test] async fn test_storage_transactions_get_batch_decode() { let storage = ImplStorage::new(Arc::new(MemoryAdapter::new())); let height = 2020; let count = BATCH_VALUE_DECODE_NUMBER + 100; let mut transactions = Vec::new(); let mut hashes = Vec::new(); for _ in 0..count { let tx_hash = Hash::digest(get_random_bytes(10)); hashes.push(tx_hash.clone()); let transaction = mock_signed_tx(tx_hash.clone()); transactions.push(transaction); } storage .insert_transactions(Context::new(), height, transactions.clone()) .await .unwrap(); let transactions_2 = storage .get_transactions(Context::new(), height, &hashes) .await .unwrap(); for i in 0..count { assert_eq!( Some(transactions.get(i).unwrap()), transactions_2.get(i).unwrap().as_ref() ); } } #[tokio::test] async fn test_storage_latest_proof_insert() { let storage = ImplStorage::new(Arc::new(MemoryAdapter::new())); let block_hash = Hash::digest(get_random_bytes(10)); let proof = mock_proof(block_hash); storage .update_latest_proof(Context::new(), proof.clone()) .await .unwrap(); let proof_2 = storage.get_latest_proof(Context::new()).await.unwrap(); assert_eq!(proof.block_hash, proof_2.block_hash); } #[rustfmt::skip] /// Bench in Intel(R) Core(TM) i7-4770HQ CPU @ 2.20GHz (8 x 2200) /// test tests::storage::bench_insert_10000_receipts ... bench: 33,954,916 ns/iter (+/- 3,818,780) /// test tests::storage::bench_insert_20000_receipts ... bench: 69,476,334 ns/iter (+/- 25,206,468) /// test tests::storage::bench_insert_40000_receipts ... bench: 138,903,121 ns/iter (+/- 26,053,433) /// test tests::storage::bench_insert_80000_receipts ... bench: 289,629,756 ns/iter (+/- 114,583,692) /// test tests::storage::bench_insert_10000_txs ... bench: 37,900,652 ns/iter (+/- 19,055,351) /// test tests::storage::bench_insert_20000_txs ... bench: 76,499,664 ns/iter (+/- 17,883,127) /// test tests::storage::bench_insert_40000_txs ... bench: 148,111,340 ns/iter (+/- 5,637,411) /// test tests::storage::bench_insert_80000_txs ... bench: 311,861,163 ns/iter (+/- 16,891,290) #[bench] fn bench_insert_10000_receipts(b: &mut Bencher) { let storage = ImplStorage::new(Arc::new(MemoryAdapter::new())); let height = 2045; let receipts = (0..10000) .map(|_| mock_receipt(Hash::digest(get_random_bytes(10)))) .collect::>(); let mut rt = Runtime::new().unwrap(); b.iter(|| { rt.block_on(storage.insert_receipts(Context::new(), height, receipts.clone())).unwrap() }) } #[bench] fn bench_insert_20000_receipts(b: &mut Bencher) { let storage = ImplStorage::new(Arc::new(MemoryAdapter::new())); let height = 2045; let receipts = (0..20000) .map(|_| mock_receipt(Hash::digest(get_random_bytes(10)))) .collect::>(); let mut rt = Runtime::new().unwrap(); b.iter(move || { rt.block_on(storage.insert_receipts(Context::new(), height, receipts.clone())) .unwrap() }) } #[bench] fn bench_insert_40000_receipts(b: &mut Bencher) { let storage = ImplStorage::new(Arc::new(MemoryAdapter::new())); let height = 2077; let receipts = (0..40000) .map(|_| mock_receipt(Hash::digest(get_random_bytes(10)))) .collect::>(); let mut rt = Runtime::new().unwrap(); b.iter(move || { rt.block_on(storage.insert_receipts(Context::new(), height, receipts.clone())) .unwrap() }) } #[bench] fn bench_insert_80000_receipts(b: &mut Bencher) { let storage = ImplStorage::new(Arc::new(MemoryAdapter::new())); let height = 2077; let receipts = (0..80000) .map(|_| mock_receipt(Hash::digest(get_random_bytes(10)))) .collect::>(); let mut rt = Runtime::new().unwrap(); b.iter(move || { rt.block_on(storage.insert_receipts(Context::new(), height, receipts.clone())) .unwrap() }) } #[bench] fn bench_insert_10000_txs(b: &mut Bencher) { let storage = ImplStorage::new(Arc::new(MemoryAdapter::new())); let height = 2077; let txs = (0..10000) .map(|_| mock_signed_tx(Hash::digest(get_random_bytes(10)))) .collect::>(); let mut rt = Runtime::new().unwrap(); b.iter(move || { rt.block_on(storage.insert_transactions(Context::new(), height, txs.clone())) .unwrap() }) } #[bench] fn bench_insert_20000_txs(b: &mut Bencher) { let storage = ImplStorage::new(Arc::new(MemoryAdapter::new())); let height = 2077; let txs = (0..20000) .map(|_| mock_signed_tx(Hash::digest(get_random_bytes(10)))) .collect::>(); let mut rt = Runtime::new().unwrap(); b.iter(move || { rt.block_on(storage.insert_transactions(Context::new(), height, txs.clone())) .unwrap() }) } #[bench] fn bench_insert_40000_txs(b: &mut Bencher) { let storage = ImplStorage::new(Arc::new(MemoryAdapter::new())); let height = 2077; let txs = (0..40000) .map(|_| mock_signed_tx(Hash::digest(get_random_bytes(10)))) .collect::>(); let mut rt = Runtime::new().unwrap(); b.iter(move || { rt.block_on(storage.insert_transactions(Context::new(), height, txs.clone())) .unwrap() }) } #[bench] fn bench_insert_80000_txs(b: &mut Bencher) { let storage = ImplStorage::new(Arc::new(MemoryAdapter::new())); let height = 2077; let txs = (0..80000) .map(|_| mock_signed_tx(Hash::digest(get_random_bytes(10)))) .collect::>(); let mut rt = Runtime::new().unwrap(); b.iter(move || { rt.block_on(storage.insert_transactions(Context::new(), height, txs.clone())) .unwrap() }) } ================================================ FILE: devtools/chain/README.md ================================================ # A simple config set for creating a new chain Address in genesis: Address | Asset(MTT) | PrivKey | Pubkey | --------------------------------------------- | ------------- | -------------------------------------------------------------------- | ---------------------------------------------------------------------- | `muta14e0lmgck835vm2dfm0w3ckv6svmez8fdgdl705` | `0x100000000` | `0x8dfbd3c689308d29c058cce163984a2ae8d5fc5191ce6b1e18bd1d7b95a8c632` | `0x03dbd1dbf3835efb4ec34a360ee671ee1d22425425368edfc5b9ffafc812e86200` | ================================================ FILE: devtools/chain/config.toml ================================================ # crypto privkey = "0x5ec982173d54d830b6789cbbbe43eaa2853a5ff752d1ebc1b266cf9790314f8a" # db config data_path = "./devtools/chain/data" [graphql] listening_address = "127.0.0.1:8000" graphql_uri = "/graphql" graphiql_uri = "/graphiql" workers = 0 # if 0, uses number of available logical cpu as threads count. maxconn = 25000 max_payload_size = 1048576 # enable_dump_profile = false # [graphql.tls] # private_key_file_path = "key.pem" # certificate_chain_file_path = "cert.pem" [network] listening_address = "0.0.0.0:1337" rpc_timeout = 10 [consensus] overlord_gap = 5 sync_txs_chunk_size = 5000 [[network.bootstraps]] peer_id = "QmTEJkB5QKWsEq37huryZZfVvqBKb54sHnKn9TQcA6j3n9" address = "0.0.0.0:1888" [mempool] pool_size = 20000 broadcast_txs_size = 200 broadcast_txs_interval = 200 [executor] light = false triedb_cache_size = 2000 [logger] filter = "info" log_to_console = true console_show_file_and_line = false log_path = "logs/" log_to_file = true file_size_limit = 1073741824 # 1 GiB metrics = true # you can specify log level for modules with config below # modules_level = { "overlord::state::process" = "debug", core_consensus = "error" } [rocksdb] max_open_files = 64 # [apm] # service_name = "muta" # tracing_address = "127.0.0.1:6831" # tracing_batch_size = 50 ================================================ FILE: devtools/chain/genesis.toml ================================================ timestamp = 0 prevhash = "0x44915be5b6c20b0678cf05fcddbbaa832e25d7e6ac538784cd5c24de00d47472" [[services]] name = "asset" payload = ''' { "id": "0xf56924db538e77bb5951eb5ff0d02b88983c49c45eea30e8ae3e7234b311436c", "name": "MutaToken", "symbol": "MT", "supply": 320000011, "issuer": "muta14e0lmgck835vm2dfm0w3ckv6svmez8fdgdl705" } ''' [[services]] name = "metadata" payload = ''' { "chain_id": "0xb6a4d7da21443f5e816e8700eea87610e6d769657d6b8ec73028457bf2ca4036", "bech32_address_hrp": "muta", "common_ref": "0x6c747758636859487038", "timeout_gap": 20, "cycles_limit": 4294967295, "cycles_price": 1, "interval": 3000, "verifier_list": [ { "bls_pub_key": "0x04102947214862a503c73904deb5818298a186d68c7907bb609583192a7de6331493835e5b8281f4d9ee705537c0e765580e06f86ddce5867812fceb42eecefd209f0eddd0389d6b7b0100f00fb119ef9ab23826c6ea09aadcc76fa6cea6a32724", "pub_key": "0x02ef0cb0d7bc6c18b4bea1f5908d9106522b35ab3c399369605d4242525bda7e60", "address": "muta14e0lmgck835vm2dfm0w3ckv6svmez8fdgdl705", "propose_weight": 1, "vote_weight": 1 } ], "propose_ratio": 15, "prevote_ratio": 10, "precommit_ratio": 10, "brake_ratio": 7, "tx_num_limit": 20000, "max_tx_size": 1024 } ''' ================================================ FILE: devtools/docker-build/Dockerfile ================================================ FROM ubuntu:18.04 LABEL maintainer="yejiayu.fe@gmail.com" COPY target/release/examples/muta-chain . COPY devtools/chain/config.toml devtools/chain/config.toml COPY devtools/chain/genesis.toml devtools/chain/genesis.toml EXPOSE 1337 8000 CMD ["./muta-chain"] ================================================ FILE: devtools/docker-build/Dockerfile.build-env ================================================ FROM ubuntu:18.04 LABEL maintainer="yejiayu.fe@gmail.com" RUN set -eux; \ apt-get update; \ apt-get install -y --no-install-recommends \ ca-certificates \ gcc \ libc6-dev \ wget \ git \ build-essential \ pkg-config \ openssl \ libssl-dev \ libclang-dev clang; \ rm -rf /var/lib/apt/lists/* ENV RUSTUP_HOME=/usr/local/rustup \ CARGO_HOME=/usr/local/cargo \ PATH=/usr/local/cargo/bin:$PATH \ RUSTUP_VERSION=1.21.1 \ RUSTUP_SHA256=ad1f8b5199b3b9e231472ed7aa08d2e5d1d539198a15c5b1e53c746aad81d27b \ RUST_ARCH=x86_64-unknown-linux-gnu RUN set -eux; \ url="https://static.rust-lang.org/rustup/archive/${RUSTUP_VERSION}/${RUST_ARCH}/rustup-init"; \ wget "$url"; \ echo "${RUSTUP_SHA256} *rustup-init" | sha256sum -c -; \ chmod +x rustup-init ENV RUST_VERSION=1.41.0 RUN set -eux; \ ./rustup-init -y --no-modify-path --default-toolchain $RUST_VERSION; \ rm rustup-init; \ chmod -R a+w $RUSTUP_HOME $CARGO_HOME; \ rustup --version; \ cargo --version; \ rustc --version; \ openssl version; ================================================ FILE: devtools/docker-build/Dockerfile.e2e-env ================================================ FROM mutadev/muta-build-env:v0.3.0 LABEL maintainer="yejiayu.fe@gmail.com" RUN set -eux; \ apt-get update; \ apt-get install -y --no-install-recommends \ curl \ curl -sL https://deb.nodesource.com/setup_12.x | bash -; \ apt-get install -y nodejs; \ rm -rf /var/lib/apt/lists/* RUN npm i yarn -g; ================================================ FILE: devtools/keypair/Cargo.toml ================================================ [package] name = "muta-keypair" version = "0.2.1" authors = ["Muta Dev "] edition = "2018" include = ["Cargo.toml", "src/*"] repository = "https://github.com/nervosnetwork/muta/tree/master/devtools/keypair" license = "MIT" description = "A tool to generate keypairs for muta framework" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] clap = { version = "2.33", features = ["yaml"] } hex = "0.4" ophelia-bls-amcl = "0.3" ophelia = "0.3" protocol = { path = "../../protocol", package = "muta-protocol" } rand = "0.7" serde = {version = "1.0", features = ["derive"]} serde_json = "1.0" tentacle-secio = { version = "0.1", features = [ "molc" ] } ================================================ FILE: devtools/keypair/src/keypair.yml ================================================ name: muta_keypair version: "0.1" about: a tool to generate keypairs for muta author: Muta Dev args: - number: help: Number of keypairs to generate short: n long: number default_value: "4" - private_keys: help: Generate keypairs from a given private key vector short: p long: private_keys multiple: true takes_value: true - common_ref: help: common_ref for bls signature, it will be randomly generated if not passed short: c long: common_ref default_value: "" ================================================ FILE: devtools/keypair/src/main.rs ================================================ #[macro_use] extern crate clap; use std::convert::TryFrom; use std::default::Default; use clap::App; use ophelia::{PublicKey, ToBlsPublicKey}; use ophelia_bls_amcl::BlsPrivateKey; use protocol::types::{Address, Hash}; use protocol::{Bytes, BytesMut}; use rand::distributions::Alphanumeric; use rand::Rng; use rand::{rngs::OsRng, RngCore}; use serde::Serialize; use tentacle_secio::SecioKeyPair; #[derive(Default, Serialize, Debug)] struct Keypair { pub index: usize, pub private_key: String, pub public_key: String, pub address: String, pub peer_id: String, pub bls_public_key: String, } #[derive(Default, Serialize, Debug)] struct Output { pub common_ref: String, pub keypairs: Vec, } #[allow(clippy::needless_range_loop)] pub fn main() { let yml = load_yaml!("keypair.yml"); let m = App::from(yml).get_matches(); let number = value_t!(m, "number", usize).unwrap(); let priv_keys = values_t!(m.values_of("private_keys"), String).unwrap_or_default(); let len = priv_keys.len(); if len > number { panic!("private keys length can not be larger than number"); } let common_ref_encoded = value_t!(m, "common_ref", String).unwrap(); let common_ref = if common_ref_encoded.is_empty() { rand::thread_rng() .sample_iter(&Alphanumeric) .take(10) .collect::() } else { String::from_utf8( hex::decode(common_ref_encoded).expect("common_ref should be a hex string"), ) .expect("common_ref should be a valid utf8 string") }; let mut output = Output { common_ref: add_0x(hex::encode(common_ref.clone())), keypairs: vec![], }; for i in 0..number { let mut k = Keypair::default(); let seckey = if i < len { Bytes::from(hex::decode(&priv_keys[i]).expect("decode hex private key")) } else { let mut seed = [0u8; 32]; OsRng.fill_bytes(&mut seed); Hash::digest(BytesMut::from(seed.as_ref()).freeze()).as_bytes() }; let keypair = SecioKeyPair::secp256k1_raw_key(seckey.as_ref()).expect("secp256k1 keypair"); let pubkey = keypair.to_public_key().inner(); let address = Address::from_pubkey_bytes(pubkey.clone()).expect("address"); k.private_key = add_0x(hex::encode(seckey.as_ref())); k.public_key = add_0x(hex::encode(pubkey)); k.peer_id = keypair.to_public_key().peer_id().to_base58(); k.address = address.to_string(); let priv_key = BlsPrivateKey::try_from([&[0u8; 16], seckey.as_ref()].concat().as_ref()).unwrap(); let pub_key = priv_key.pub_key(&common_ref.as_str().into()); k.bls_public_key = add_0x(hex::encode(pub_key.to_bytes())); k.index = i + 1; output.keypairs.push(k); } let output_str = serde_json::to_string_pretty(&output).unwrap(); println!("{}", output_str); } fn add_0x(s: String) -> String { "0x".to_owned() + &s } ================================================ FILE: devtools/kube/deploy-chaos-crd-template.yml ================================================ apiVersion: nervos.org/v1alpha1 kind: Muta metadata: name: chaos-${REPO_NAME}-${VERSION} namespace: mutadev # Only supports deployment to the mutadev namespace spec: image: mutadev/muta:latest # docker image resources: limits: cpu: 1100m memory: 3Gi ephemeral-storage: 5Gi requests: cpu: 1100m memory: 3Gi ephemeral-storage: 5Gi chaos: # all / stable-network-corrupt / stable-network-delay / stable-network-duplicate / stable-network-loss / stable-network-partition / stable-node-failure / stable-node-kill - all size: 4 # Node numbers persistent: false # Persistent data config: # see https://github.com/nervosnetwork/muta/blob/master/devtools/chain/config.toml data_path: "/app/data" graphql: listening_address: "0.0.0.0:8000" graphql_uri: "/graphql" graphiql_uri: "/" workers: 0 # if 0, uses number of available logical cpu as threads count. maxconn: 25000 network: listening_address: "0.0.0.0:1337" rpc_timeout: 10 mempool: pool_size: 20000 broadcast_txs_size: 200 broadcast_txs_interval: 200 executor: light: false logger: filter: "info" log_to_console: true console_show_file_and_line: false log_path: "logs/" log_to_file: true metrics: true modules_level: # "overlord::state::process": "debug" # "core_consensus": "error" genesis: # https://github.com/nervosnetwork/muta/blob/master/devtools/chain/genesis.toml prevhash: "0x44915be5b6c20b0678cf05fcddbbaa832e25d7e6ac538784cd5c24de00d47472" metadata: chain_id: "0xb6a4d7da21443f5e816e8700eea87610e6d769657d6b8ec73028457bf2ca4036" timeout_gap: 20 cycles_limit: 99999999 cycles_price: 1 interval: 3000 propose_ratio: 15 prevote_ratio: 15 precommit_ratio: 10 brake_ratio: 3 tx_num_limit: 20000 max_tx_size: 1073741824 services: - name: asset payload: '{ "id": "0xf56924db538e77bb5951eb5ff0d02b88983c49c45eea30e8ae3e7234b311436c", "name": "MutaToken", "symbol": "MT", "supply": 320000011, "issuer": "muta14e0lmgck835vm2dfm0w3ckv6svmez8fdgdl705" }' ================================================ FILE: docs/_config.yml ================================================ theme: jekyll-theme-minimal ================================================ FILE: docs/build/gql_api.sh ================================================ #!/usr/bin/env bash BASEDIR=$(dirname "$0") function check() { if ! type "$1" > /dev/null; then echo "$1 is required, install first $2" echo $2 exit 1 fi } check node check graphql-markdown "run npm install graphql-markdown --global" endpoint="http://127.0.0.1:8000/graphql" if [ ! -z "$1" ]; then endpoint=$1 fi #res_code=$(curl --write-out %{http_code} --silent --output /dev/null \ # -X POST -d 'query q{\n getBlock(height:"0x00"){\n hash \n }\n}' \ # $endpoint) res_code=$(curl $endpoint --write-out %{http_code} --silent --output /dev/null -H 'content-type: application/json' --data-binary '{"operationName":"q","variables":{},"query":"query q {\n getBlock(height: \"0x00\") {\n hash\n }\n}\n"}') if [ $res_code -ne 200 ]; then echo "$endpoint GraphQL endpoint request failed" echo "start API server at first or use the custom endpoint make doc-api http://x.x.x.x:8000/graphql" exit 1; fi prologue=" >[GraphQL](https://graphql.org) is a query language for APIs and a runtime for fulfilling those queries with your existing data. GraphQL provides a complete and understandable description of the data in your API, gives clients the power to ask for exactly what they need and nothing more, makes it easier to evolve APIs over time, and enables powerful developer tools. Muta has embeded a [Graph**i**QL](https://github.com/graphql/graphiql) for checking and calling API. Started a the Muta node, and then try open http://127.0.0.1:8000/graphiql in the browser. " graphql-markdown $endpoint --title "Muta GraphQL API" --prologue "$prologue" > $BASEDIR/../graphql_api.md sed -i -E 's///g' $BASEDIR/../graphql_api.md ================================================ FILE: docs/graphql_api.md ================================================ # Muta GraphQL API >[GraphQL](https://graphql.org) is a query language for APIs and a runtime for fulfilling those queries with your existing data. GraphQL provides a complete and understandable description of the data in your API, gives clients the power to ask for exactly what they need and nothing more, makes it easier to evolve APIs over time, and enables powerful developer tools. Muta has embeded a [Graph**i**QL](https://github.com/graphql/graphiql) for checking and calling API. Started a the Muta node, and then try open http://127.0.0.1:8000/graphiql in the browser.
Table of Contents * [Query](#query) * [Mutation](#mutation) * [Objects](#objects) * [Block](#block) * [BlockHeader](#blockheader) * [Event](#event) * [Proof](#proof) * [Receipt](#receipt) * [ReceiptResponse](#receiptresponse) * [ServiceResponse](#serviceresponse) * [SignedTransaction](#signedtransaction) * [Validator](#validator) * [Inputs](#inputs) * [InputRawTransaction](#inputrawtransaction) * [InputTransactionEncryption](#inputtransactionencryption) * [Scalars](#scalars) * [Address](#address) * [Boolean](#boolean) * [Bytes](#bytes) * [Hash](#hash) * [Int](#int) * [String](#string) * [Uint64](#uint64)
## Query
Field Argument Type Description
getBlock Block Get the block
height Uint64
getTransaction SignedTransaction Get the transaction by hash
txHash Hash!
getReceipt Receipt Get the receipt by transaction hash
txHash Hash!
queryService ServiceResponse! query service
height Uint64
cyclesLimit Uint64
cyclesPrice Uint64
caller Address!
serviceName String!
method String!
payload String!
## Mutation
Field Argument Type Description
sendTransaction Hash! send transaction
inputRaw InputRawTransaction!
inputEncryption InputTransactionEncryption!
unsafeSendTransaction ⚠️ Hash!

⚠️ DEPRECATED

DON'T use it in production! This is just for development.
inputRaw InputRawTransaction!
inputPrivkey Bytes!
## Objects ### Block Block is a single digital record created within a blockchain. Each block contains a record of the previous Block, and when linked together these become the “chain”.A block is always composed of header and body.
Field Argument Type Description
header BlockHeader! The header section of a block
orderedTxHashes [Hash!]! The body section of a block
hash Hash! Hash of the block
### BlockHeader A block header is like the metadata of a block.
Field Argument Type Description
chainId Hash! Identifier of a chain in order to prevent replay attacks across channels
height Uint64! block height
execHeight Uint64! The height to which the block has been executed
prevHash Hash! The hash of the serialized previous block
timestamp Uint64! A timestamp that records when the block was created
orderRoot Hash! The merkle root of ordered transactions
orderSignedTransactionsHash Hash! The hash of ordered signed transactions
confirmRoot [Hash!]! The merkle roots of all the confirms
stateRoot Hash! The merkle root of state root
receiptRoot [Hash!]! The merkle roots of receipts
cyclesUsed [Uint64!]! The sum of all transactions costs
proposer Address! The address descirbed who packed the block
proof Proof!
validatorVersion Uint64! The version of validator is designed for cross chain
validators [Validator!]!
### Event
Field Argument Type Description
service String!
name String!
data String!
### Proof The verifier of the block header proved
Field Argument Type Description
height Uint64!
round Uint64!
blockHash Hash!
signature Bytes!
bitmap Bytes!
### Receipt
Field Argument Type Description
stateRoot Hash!
height Uint64!
txHash Hash!
cyclesUsed Uint64!
events [Event!]!
response ReceiptResponse!
### ReceiptResponse
Field Argument Type Description
serviceName String!
method String!
response ServiceResponse!
### ServiceResponse
Field Argument Type Description
code Uint64!
succeedData String!
errorMessage String!
### SignedTransaction
Field Argument Type Description
chainId Hash!
cyclesLimit Uint64!
cyclesPrice Uint64!
nonce Hash!
timeout Uint64!
sender Address!
serviceName String!
method String!
payload String!
txHash Hash!
pubkey Bytes!
signature Bytes!
### Validator Validator address set
Field Argument Type Description
pubkey Bytes!
proposeWeight Int!
voteWeight Int!
## Inputs ### InputRawTransaction There was many types of transaction in Muta, A transaction often require computing resources or write data to chain,these resources are valuable so we need to pay some token for them.InputRawTransaction describes information above
Field Type Description
chainId Hash! Identifier of the chain.
cyclesLimit Uint64! Mostly like the gas limit in Ethereum, describes the fee that you are willing to pay the highest price for the transaction
cyclesPrice Uint64!
nonce Hash! Every transaction has its own id, unlike Ethereum's nonce,the nonce in Muta is an hash
timeout Uint64! For security and performance reasons, Muta will only deal with trade request over a period of time,the `timeout` should be `timeout > current_block_height` and `timeout < current_block_height + timeout_gap`,the `timeout_gap` generally equal to 20.
serviceName String!
method String!
payload String!
sender Address!
### InputTransactionEncryption Signature of the transaction
Field Type Description
txHash Hash! The digest of the transaction
pubkey Bytes! The public key of transfer
signature Bytes! The signature of the transaction
## Scalars ### Address 20 bytes of account address ### Boolean ### Bytes Bytes corresponding hex string. ### Hash The output digest of Keccak hash function ### Int ### String ### Uint64 Uint64 ================================================ FILE: docs/how_to_deploy_a_core_crate.md ================================================ # How to develop a core crate. > This document will show you how to develop a core crate. Now, take a look at an example, we are going to develop a `storage` crate, which is used to store data from the blockchain. ## Step 0 Define the trait of the crate. ```rust // muta/protocol/src/traits/storage.rs use async_trait::async_trait; #[async_trait] pub trait Storage: Send + Sync { async fn insert_transactions(&self, signed_txs: Vec) -> ProtocolResult<()>; async fn get_transaction_by_hash( &self, tx_hash: Hash, ) -> ProtocolResult>; } ``` **Starting with the first line of code, you will first see a macro: `#[async_trait]`:** [`#[async_trait]`](https://crates.io/crates/async-trait) is a macro that allows you to define `async fn` in a `trait`. In most cases you should use async to define your `fn`. **Next is the second line, a trait declaration:** Here we constrain this trait to `Send` + `Sync`,if you don't understand the semantics of `Send` and `Sync` you can get knowledge from [the official documentation](https://doc.rust-lang.org/std/marker/index.html). In short, this constraint is necessary because our runtime is always asynchronous, and you must ensure that your crate satisfies the constraints under asynchronous conditions. **Define the function signature:** You only need to pay attention to two points: 1. Always use `&self` and handle the internal variables yourself. 2. The return value is uniformly used with `ProtocolResult`, `ProtocolResult` is wrap to `Result `, and `ProtocolError` is a global error type. ## Step 1 The adapter that defines crate Earlier we mentioned that the role of `storage` is to store blockchain data, but it does not care where the final data is stored. It can be memory, network database, hard drive, etc. The existence of a `StorageAdapter` is decoupled persistence logic that specifies a set of key-value database interfaces that implement various `StorageAdapters` to enforce data persistence requirements in a variety of situations. ```rust // muta/protocol/src/traits/storage.rs use async_trait::async_trait; use bytes::Bytes; #[async_trait] pub trait Storage: Send + Sync { async fn insert_transactions(&self, signed_txs: Vec) -> ProtocolResult<()>; async fn get_transaction_by_hash( &self, tx_hash: Hash, ) -> ProtocolResult>; } #[async_trait] pub trait StorageAdapter: Send + Sync { async fn get(&self, c: StorageCategory, key: Bytes) -> ProtocolResult>; async fn get_batch( &self, c: StorageCategory, keys: Vec, ) -> ProtocolResult>>; async fn insert(&self, c: StorageCategory, key: Bytes, value: Bytes) -> ProtocolResult<()>; async fn insert_batch( &self, c: StorageCategory, keys: Vec, values: Vec, ) -> ProtocolResult<()>; async fn contains(&self, c: StorageCategory, key: Bytes) -> ProtocolResult; async fn remove(&self, c: StorageCategory, key: Bytes) -> ProtocolResult<()>; async fn remove_batch(&self, c: StorageCategory, keys: Vec) -> ProtocolResult<()>; } ``` Finally, don't forget to add the `pub trait Storage` constraint to the `Storage`. Its purpose is to make you remember to always rely on an adapter. ## Step 3 Implement storage crate See: https://github.com/nervosnetwork/muta/blob/master/core/storage/src/lib.rs Note: 1. Core crate does not allow dependencies on other cores crate. 2. The adapter can rely on other core crate. ================================================ FILE: docs/layout.md ================================================ ## Layout ```sh . ├── common │   ├── channel │   ├── config-parser │   ├── crypto │   ├── logger │   ├── merkle │   └── metrics │   └── pubsub ├── core │   ├── api │   ├── consensus │   ├── database │   ├── executor │   ├── network │   ├── storage │   └── mempool ├── devtools │   └── ci ├── docs │   └── menu.md ├── protocol │   ├── codec │   ├── traits │   └── types ├── src    └── main.rs ``` A brief description: - `common` Contains utilities for muta-chain. - `core` Contains implementations of module traits. - `devtools` Contains scripts and configurations for better use of the this repository. - `docs` for project documentations. - `protocol` Contains types, serialization, core traits for muta-chain. - `src` Contains main packages ================================================ FILE: docs/resources.md ================================================ # Resources ## SDK - [muta-sdk-java](https://dl.bintray.com/lycrushamster/Muta-Java-SDK/org/nervos/muta-sdk-java/1.4/) - The Java SDK - [muta-sdk-js](https://www.npmjs.com/package/@mutadev/muta-sdk/v/0.2.0-alpha.1) - The JavaScript SDK ## Others - [muta-cli](https://github.com/nervosnetwork/muta-cli) - A command-line util for new to Muta - [muta-bench](https://github.com/nervosnetwork/muta-benchmark/tree/v0.1.12) - A transfer-based performance test script - [hermit-purple](https://github.com/homura/hermit-purple-server) - Cache server for Muta ================================================ FILE: examples/byzantine_node.rs ================================================ use std::fs; use byzantine::config::{Config, Generators}; use byzantine::default_start::start; use protocol::types::Genesis; fn main() { let config_path = std::env::var("CONFIG").unwrap_or_else(|_| "devtools/chain/config.toml".to_owned()); let genesis_path = std::env::var("GENESIS").unwrap_or_else(|_| "devtools/chain/genesis.toml".to_owned()); let generators_path = std::env::var("GENERATORS").unwrap_or_else(|_| "byzantine/generators.toml".to_owned()); let config: Config = common_config_parser::parse(&config_path).expect("parse config failed"); let genesis_toml = fs::read_to_string(&genesis_path).expect("read genesis.toml failed"); let genesis: Genesis = toml::from_str(&genesis_toml).expect("parse genesis failed"); let generators_toml = fs::read_to_string(&generators_path).expect("read generators.toml failed"); let generators: Generators = toml::from_str(&generators_toml).expect("parse generators failed"); let mut rt = tokio::runtime::Runtime::new().expect("new tokio runtime"); let local = tokio::task::LocalSet::new(); local .block_on( &mut rt, async move { start(config, genesis, generators).await }, ) .expect("start failed"); } ================================================ FILE: examples/config-1.toml ================================================ data_path = "./devtools/chain/data/1" privkey = "0x5ec982173d54d830b6789cbbbe43eaa2853a5ff752d1ebc1b266cf9790314f8a" [network] listening_address = "0.0.0.0:1337" rpc_timeout = 10 [graphql] graphiql_uri = "/graphiql" listening_address = "0.0.0.0:8000" graphql_uri = "/graphql" workers = 0 # if 0, uses number of available logical cpu as threads count. maxconn = 25000 max_payload_size = 1048576 [executor] light = false triedb_cache_size = 2000 [mempool] broadcast_txs_size = 200 broadcast_txs_interval = 200 pool_size = 1000 [logger] metrics = false log_path = "./devtools/chain/logs/1" log_to_console = true filter = "info" log_to_file = true console_show_file_and_line = false file_size_limit = 1073741824 # 1 GiB [rocksdb] max_open_files = 64 ================================================ FILE: examples/config-2.toml ================================================ data_path = "./devtools/chain/data/2" privkey = "0x8dfbd3c689308d29c058cce163984a2ae8d5fc5191ce6b1e18bd1d7b95a8c632" [network] listening_address = "0.0.0.0:1338" rpc_timeout = 10 [[network.bootstraps]] peer_id = "QmTEJkB5QKWsEq37huryZZfVvqBKb54sHnKn9TQcA6j3n9" address = "127.0.0.1:1337" # Replace it with your IP [graphql] graphiql_uri = "/graphiql" listening_address = "0.0.0.0:8001" graphql_uri = "/graphql" workers = 0 # if 0, uses number of available logical cpu as threads count. maxconn = 25000 max_payload_size = 1048576 [executor] light = false triedb_cache_size = 2000 [mempool] broadcast_txs_size = 200 broadcast_txs_interval = 200 pool_size = 1000 [logger] metrics = false log_path = "./devtools/chain/logs/2" log_to_console = true filter = "info" log_to_file = true console_show_file_and_line = false file_size_limit = 1073741824 # 1 GiB [rocksdb] max_open_files = 64 ================================================ FILE: examples/config-3.toml ================================================ data_path = "./devtools/chain/data/3" privkey = "0xfc659f0ed09a4ba0d2d1836af7520d1a050a7739d598dc98517bbbe7a2e38124" [network] listening_address = "0.0.0.0:1339" rpc_timeout = 10 [[network.bootstraps]] peer_id = "QmTEJkB5QKWsEq37huryZZfVvqBKb54sHnKn9TQcA6j3n9" address = "127.0.0.1:1337" # Replace it with your IP [graphql] graphiql_uri = "/graphiql" listening_address = "0.0.0.0:8002" graphql_uri = "/graphql" workers = 0 # if 0, uses number of available logical cpu as threads count. maxconn = 25000 max_payload_size = 1048576 [executor] light = false triedb_cache_size = 2000 [mempool] broadcast_txs_size = 200 broadcast_txs_interval = 200 pool_size = 1000 [logger] metrics = false log_path = "./devtools/chain/logs/3" log_to_console = true filter = "info" log_to_file = true console_show_file_and_line = false file_size_limit = 1073741824 # 1 GiB [rocksdb] max_open_files = 64 ================================================ FILE: examples/config-4.toml ================================================ data_path = "./devtools/chain/data/4" privkey = "0x7c01d6539419cffc78ab0779dabe88fad3f70c20ef47a562ac4ba5b7bd704b8e" [network] listening_address = "0.0.0.0:1340" rpc_timeout = 10 [[network.bootstraps]] peer_id = "QmTEJkB5QKWsEq37huryZZfVvqBKb54sHnKn9TQcA6j3n9" address = "127.0.0.1:1337" # Replace it with your IP [graphql] graphiql_uri = "/graphiql" listening_address = "0.0.0.0:8004" graphql_uri = "/graphql" workers = 0 # if 0, uses number of available logical cpu as threads count. maxconn = 25000 max_payload_size = 1048576 [executor] light = false triedb_cache_size = 2000 [mempool] broadcast_txs_size = 200 broadcast_txs_interval = 200 pool_size = 1000 [logger] metrics = false log_path = "./devtools/chain/logs/4" log_to_console = true filter = "info" log_to_file = true console_show_file_and_line = false file_size_limit = 1073741824 # 1 GiB [rocksdb] max_open_files = 64 ================================================ FILE: examples/genesis.toml ================================================ timestamp = 0 prevhash = "0x44915be5b6c20b0678cf05fcddbbaa832e25d7e6ac538784cd5c24de00d47472" [[services]] name = "asset" payload = ''' { "id": "0xf56924db538e77bb5951eb5ff0d02b88983c49c45eea30e8ae3e7234b311436c", "name": "MutaToken", "symbol": "MT", "supply": 320000011, "issuer": "muta14e0lmgck835vm2dfm0w3ckv6svmez8fdgdl705" } ''' # private keys: # 0x5ec982173d54d830b6789cbbbe43eaa2853a5ff752d1ebc1b266cf9790314f8a # 0x8dfbd3c689308d29c058cce163984a2ae8d5fc5191ce6b1e18bd1d7b95a8c632 # 0xfc659f0ed09a4ba0d2d1836af7520d1a050a7739d598dc98517bbbe7a2e38124 # 0x7c01d6539419cffc78ab0779dabe88fad3f70c20ef47a562ac4ba5b7bd704b8e [[services]] name = "metadata" payload = ''' { "chain_id": "0xb6a4d7da21443f5e816e8700eea87610e6d769657d6b8ec73028457bf2ca4036", "bech32_address_hrp": "muta", "common_ref": "0x6c747758636859487038", "timeout_gap": 20, "cycles_limit": 999999999999, "cycles_price": 1, "interval": 3000, "verifier_list": [ { "bls_pub_key": "0x04102947214862a503c73904deb5818298a186d68c7907bb609583192a7de6331493835e5b8281f4d9ee705537c0e765580e06f86ddce5867812fceb42eecefd209f0eddd0389d6b7b0100f00fb119ef9ab23826c6ea09aadcc76fa6cea6a32724", "pub_key": "0x02ef0cb0d7bc6c18b4bea1f5908d9106522b35ab3c399369605d4242525bda7e60", "address": "muta14e0lmgck835vm2dfm0w3ckv6svmez8fdgdl705", "propose_weight": 1, "vote_weight": 1 }, { "bls_pub_key": "0x0418e16bd67ce0b58a575f506967706be733c96feef19a06bb37d510000d89905f2f61b7da4d831cb1bb01e2f99833362602a0a252dfd1e95c75c1eadb0db220e3722c9a077b730e7f6cec5f4a55bfc9a4d88db3e6c27684aa8335456824070501", "pub_key": "0x03dbd1dbf3835efb4ec34a360ee671ee1d22425425368edfc5b9ffafc812e86200", "address": "muta15a8a9ksxe3hhjpw3l7wz7ry778qg8h9wz8y35p", "propose_weight": 1, "vote_weight": 1 }, { "bls_pub_key": "0x040944276f414c46330227f2c0c5a998aba3d400ed19cfc2d31d3e7fcc442ce9f91ea86e172dc3c1b6cedc364bd52ba1cf074529e52337cd80ab32a196a3d42ab46eee25120b44fdd2b5c4268bf3b84c72d068ea83d0530a5461dc30b6a63a60e9", "pub_key": "0x03cba4ae147eb24891d78c9527798577419b7db913b4b03ba548c28f40c5841166", "address": "muta1h99h6f54vytatam3ckftrmvcdpn4jlmnwm6hl0", "propose_weight": 1, "vote_weight": 1 }, { "bls_pub_key": "0x041342e9a35278b298a67006cd98d663053e3f7eb72a08ffe9835074e430b2112a866c1c8d981edcd793cb16d459fc952b0464007d876355eea671e74727588bae69740c6a0b49d8142b7b0821a78acd34b4d8012b9ef69444a476e03d5fea5330", "pub_key": "0x0245a0c291f56c2c5751db1c0bf1ed986e703d29a0fe023df770fe92c7c2347316", "address": "muta16xukzz73l5r6vulk9q697tave8c5mfu33mwud6", "propose_weight": 1, "vote_weight": 1 } ], "propose_ratio": 15, "prevote_ratio": 10, "precommit_ratio": 10, "brake_ratio": 7, "tx_num_limit": 20000, "max_tx_size": 1024 } ''' ================================================ FILE: examples/muta-chain.rs ================================================ use derive_more::{Display, From}; use protocol::traits::{SDKFactory, Service, ServiceMapping, ServiceSDK}; use protocol::{ProtocolError, ProtocolErrorKind, ProtocolResult}; use asset::{AssetService, ASSET_SERVICE_NAME}; use authorization::{AuthorizationService, AUTHORIZATION_SERVICE_NAME}; use metadata::{MetadataService, METADATA_SERVICE_NAME}; use multi_signature::{MultiSignatureService, MULTI_SIG_SERVICE_NAME}; use util::{UtilService, UTIL_SERVICE_NAME}; struct DefaultServiceMapping; impl ServiceMapping for DefaultServiceMapping { fn get_service>( &self, name: &str, factory: &Factory, ) -> ProtocolResult> { let sdk = factory.get_sdk(name)?; let service = match name { AUTHORIZATION_SERVICE_NAME => { let multi_sig_sdk = factory.get_sdk("multi_signature")?; Box::new(AuthorizationService::new( sdk, MultiSignatureService::new(multi_sig_sdk), )) as Box } ASSET_SERVICE_NAME => Box::new(AssetService::new(sdk)) as Box, METADATA_SERVICE_NAME => Box::new(MetadataService::new(sdk)) as Box, MULTI_SIG_SERVICE_NAME => Box::new(MultiSignatureService::new(sdk)) as Box, UTIL_SERVICE_NAME => Box::new(UtilService::new(sdk)) as Box, _ => { return Err(MappingError::NotFoundService { service: name.to_owned(), } .into()); } }; Ok(service) } fn list_service_name(&self) -> Vec { vec![ ASSET_SERVICE_NAME.to_owned(), AUTHORIZATION_SERVICE_NAME.to_owned(), METADATA_SERVICE_NAME.to_owned(), MULTI_SIG_SERVICE_NAME.to_owned(), UTIL_SERVICE_NAME.to_owned(), ] } } pub fn main() { muta::run( DefaultServiceMapping, "muta-chain", "v0.2.1", "Muta Dev ", "./devtools/chain/config.toml", "./devtools/chain/genesis.toml", None, ) } #[derive(Debug, Display, From)] pub enum MappingError { #[display(fmt = "service {:?} was not found", service)] NotFoundService { service: String }, } impl std::error::Error for MappingError {} impl From for ProtocolError { fn from(err: MappingError) -> ProtocolError { ProtocolError::new(ProtocolErrorKind::Service, Box::new(err)) } } ================================================ FILE: framework/Cargo.toml ================================================ [package] name = "framework" version = "0.2.1" authors = ["Muta Dev "] edition = "2018" repository = "https://github.com/nervosnetwork/muta" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] common-apm = { path = "../common/apm" } protocol = { path = "../protocol", package = "muta-protocol" } asset = { path = "../built-in-services/asset"} metadata = { path = "../built-in-services/metadata"} util = { path = "../built-in-services/util"} hasher = { version = "0.1", features = ['hash-keccak'] } cita_trie = "2.0" bytes = "0.5" derive_more = "0.99" rocksdb = "0.14" lazy_static = "1.4" byteorder = "1.3" rlp = "0.4" futures = "0.3" json = "0.12" hex = "0.4" serde_json = "1.0" log = "0.4" rayon = "1.3" lru-cache = "0.1" lru = "0.6" parking_lot = "0.11" rand = { version = "0.7", features = ["small_rng"]} [dev-dependencies] async-trait = "0.1" toml = "0.5" binding-macro = { path = "../binding-macro" } serde = { version = "1.0", features = ["derive"] } muta-codec-derive = "0.2" ================================================ FILE: framework/src/binding/mod.rs ================================================ #[cfg(test)] mod tests; pub mod sdk; pub mod state; pub mod store; ================================================ FILE: framework/src/binding/sdk/chain_querier.rs ================================================ use std::sync::Arc; use derive_more::{Display, From}; use futures::executor::block_on; use protocol::traits::{ChainQuerier, Context, Storage}; use protocol::types::{Block, Hash, Receipt, SignedTransaction}; use protocol::{ProtocolError, ProtocolErrorKind, ProtocolResult}; pub struct DefaultChainQuerier { storage: Arc, } impl DefaultChainQuerier { pub fn new(storage: Arc) -> Self { Self { storage } } } impl ChainQuerier for DefaultChainQuerier { fn get_transaction_by_hash(&self, tx_hash: &Hash) -> ProtocolResult> { let ret = block_on( self.storage .get_transaction_by_hash(Context::new(), &tx_hash), ) .map_err(|_| ChainQueryError::AsyncStorage)?; Ok(ret) } fn get_block_by_height(&self, height: Option) -> ProtocolResult> { if let Some(u) = height { let ret = block_on(self.storage.get_block(Context::new(), u)) .map_err(|_| ChainQueryError::AsyncStorage)?; Ok(ret) } else { let ret = block_on(self.storage.get_latest_block(Context::new())) .map_err(|_| ChainQueryError::AsyncStorage)?; Ok(Some(ret)) } } fn get_receipt_by_hash(&self, tx_hash: &Hash) -> ProtocolResult> { let ret = block_on( self.storage .get_receipt_by_hash(Context::new(), tx_hash.clone()), ) .map_err(|_| ChainQueryError::AsyncStorage)?; Ok(ret) } } #[derive(Debug, Display, From)] pub enum ChainQueryError { #[display(fmt = "get error when call async method of storage")] AsyncStorage, } impl std::error::Error for ChainQueryError {} impl From for ProtocolError { fn from(err: ChainQueryError) -> ProtocolError { ProtocolError::new(ProtocolErrorKind::Binding, Box::new(err)) } } ================================================ FILE: framework/src/binding/sdk/mod.rs ================================================ mod chain_querier; pub use chain_querier::{ChainQueryError, DefaultChainQuerier}; use std::cell::RefCell; use std::rc::Rc; use cita_trie::DB as TrieDB; use derive_more::Display; use protocol::fixed_codec::FixedCodec; use protocol::traits::{ ChainQuerier, SDKFactory, ServiceSDK, ServiceState, StoreArray, StoreBool, StoreMap, StoreString, StoreUint64, }; use protocol::types::{Address, Block, Hash, Receipt, SignedTransaction}; use protocol::{ProtocolError, ProtocolErrorKind, ProtocolResult}; use crate::binding::state::GeneralServiceState; use crate::binding::store::{ DefaultStoreArray, DefaultStoreBool, DefaultStoreMap, DefaultStoreString, DefaultStoreUint64, }; use crate::executor::ServiceStateMap; pub struct DefaultSDKFactory { states: Rc>, chain_querier: Rc, } impl DefaultSDKFactory { pub fn new(states: Rc>, chain_querier: Rc) -> Self { DefaultSDKFactory { states, chain_querier, } } } impl SDKFactory, C>> for DefaultSDKFactory { fn get_sdk(&self, name: &str) -> ProtocolResult, C>> { let state = self.states.get(name).ok_or(SDKError::NotFoundService { service: name.to_owned(), })?; Ok(DefaultServiceSDK::new( Rc::clone(state), Rc::clone(&self.chain_querier), )) } } pub struct DefaultServiceSDK { state: Rc>, chain_querier: Rc, } impl DefaultServiceSDK { pub fn new(state: Rc>, chain_querier: Rc) -> Self { Self { state, chain_querier, } } } impl ServiceSDK for DefaultServiceSDK { // Alloc or recover a `Map` by` var_name` fn alloc_or_recover_map< K: 'static + Send + FixedCodec + Clone + PartialEq, V: 'static + FixedCodec, >( &mut self, var_name: &str, ) -> Box> { Box::new(DefaultStoreMap::::new( Rc::clone(&self.state), var_name, )) } // Alloc or recover a `Array` by` var_name` fn alloc_or_recover_array( &mut self, var_name: &str, ) -> Box> { Box::new(DefaultStoreArray::::new( Rc::clone(&self.state), var_name, )) } // Alloc or recover a `Uint64` by` var_name` fn alloc_or_recover_uint64(&mut self, var_name: &str) -> Box { Box::new(DefaultStoreUint64::new(Rc::clone(&self.state), var_name)) } // Alloc or recover a `String` by` var_name` fn alloc_or_recover_string(&mut self, var_name: &str) -> Box { Box::new(DefaultStoreString::new(Rc::clone(&self.state), var_name)) } // Alloc or recover a `Bool` by` var_name` fn alloc_or_recover_bool(&mut self, var_name: &str) -> Box { Box::new(DefaultStoreBool::new(Rc::clone(&self.state), var_name)) } // Get a value from the service state by key fn get_value(&self, key: &Key) -> Option { self.state .borrow() .get(key) .unwrap_or_else(|e| panic!("service sdk get value failed: {}", e)) } // Set a value to the service state by key fn set_value(&mut self, key: Key, val: Val) { self.state .borrow_mut() .insert(key, val) .unwrap_or_else(|e| panic!("service sdk set value failed: {}", e)); } // Get a value from the specified address by key fn get_account_value( &self, address: &Address, key: &Key, ) -> Option { self.state .borrow() .get_account_value(address, key) .unwrap_or_else(|e| panic!("service sdk get account value failed: {}", e)) } // Insert a pair of key / value to the specified address fn set_account_value( &mut self, address: &Address, key: Key, val: Val, ) { self.state .borrow_mut() .set_account_value(address, key, val) .unwrap_or_else(|e| panic!("service sdk set account value failed: {}", e)); } // Get a signed transaction by `tx_hash` // if not found on the chain, return None fn get_transaction_by_hash(&self, tx_hash: &Hash) -> Option { self.chain_querier .get_transaction_by_hash(tx_hash) .unwrap_or_else(|e| panic!("service sdk get transaction by hash failed: {}", e)) } // Get a block by `height` // if not found on the chain, return None // When the parameter `height` is None, get the latest (executing)` block` fn get_block_by_height(&self, height: Option) -> Option { self.chain_querier .get_block_by_height(height) .unwrap_or_else(|e| panic!("service sdk get block by height failed: {}", e)) } // Get a receipt by `tx_hash` // if not found on the chain, return None fn get_receipt_by_hash(&self, tx_hash: &Hash) -> Option { self.chain_querier .get_receipt_by_hash(tx_hash) .unwrap_or_else(|e| panic!("service sdk get receipt by hash failed: {}", e)) } } #[derive(Debug, Display)] pub enum SDKError { #[display(fmt = "service {:?} was not found", service)] NotFoundService { service: String }, } impl std::error::Error for SDKError {} impl From for ProtocolError { fn from(err: SDKError) -> ProtocolError { ProtocolError::new(ProtocolErrorKind::Binding, Box::new(err)) } } ================================================ FILE: framework/src/binding/state/mod.rs ================================================ mod trie; pub mod trie_db; pub use trie::{MPTTrie, MPTTrieError}; pub use trie_db::{RocksTrieDB, RocksTrieDBError}; use std::collections::HashMap; use bytes::Bytes; use cita_trie::DB as TrieDB; use protocol::fixed_codec::FixedCodec; use protocol::traits::ServiceState; use protocol::types::{Address, Hash, MerkleRoot}; use protocol::ProtocolResult; pub struct GeneralServiceState { trie: MPTTrie, // TODO(@yejiayu): The value of HashMap should be changed to Box to avoid multiple // serializations. cache_map: HashMap, stash_map: HashMap, } impl GeneralServiceState { pub fn new(trie: MPTTrie) -> Self { Self { trie, cache_map: HashMap::new(), stash_map: HashMap::new(), } } fn get_bytes_value(&self, key: Bytes) -> ProtocolResult> { if let Some(value_bytes) = self.cache_map.get(&key) { if value_bytes.is_empty() { return Ok(None); } return Ok(Some(value_bytes.clone())); } if let Some(value_bytes) = self.stash_map.get(&key) { if value_bytes.is_empty() { return Ok(None); } return Ok(Some(value_bytes.clone())); } if let Some(value_bytes) = self.trie.get(&key)? { if value_bytes.is_empty() { return Ok(None); } return Ok(Some(value_bytes)); } Ok(None) } } impl ServiceState for GeneralServiceState { fn get(&self, key: &Key) -> ProtocolResult> { let encoded_key = key.encode_fixed()?; if let Some(value_bytes) = self.get_bytes_value(encoded_key)? { let inst = <_>::decode_fixed(value_bytes)?; Ok(Some(inst)) } else { Ok(None) } } fn contains(&self, key: &Key) -> ProtocolResult { let encoded_key = key.encode_fixed()?; Ok(self.get_bytes_value(encoded_key)?.is_some()) } // Insert a pair of key / value // Note: This key/value pair will go into the cache first // and will not be persisted to MPT until `commit` is called. fn insert( &mut self, key: Key, value: Value, ) -> ProtocolResult<()> { self.cache_map .insert(key.encode_fixed()?, value.encode_fixed()?); Ok(()) } fn get_account_value( &self, address: &Address, key: &Key, ) -> ProtocolResult> { let hash_key = get_address_key(address, key)?; self.get(&hash_key) } fn set_account_value( &mut self, address: &Address, key: Key, val: Val, ) -> ProtocolResult<()> { let hash_key = get_address_key(address, &key)?; self.insert(hash_key, val) } // Roll back all data in the cache fn revert_cache(&mut self) -> ProtocolResult<()> { self.cache_map.clear(); Ok(()) } // Move data from cache to stash fn stash(&mut self) -> ProtocolResult<()> { for (k, v) in self.cache_map.drain() { self.stash_map.insert(k, v); } Ok(()) } // Persist data from stash into MPT fn commit(&mut self) -> ProtocolResult { for (key, value) in self.stash_map.drain() { self.trie.insert(key, value)?; } let root = self.trie.commit()?; Ok(root) } } fn get_address_key(address: &Address, key: &Key) -> ProtocolResult { let mut hash_bytes = address.as_bytes().to_vec(); hash_bytes.extend_from_slice(key.encode_fixed()?.as_ref()); Ok(Hash::digest(Bytes::from(hash_bytes))) } #[cfg(test)] mod tests { use bytes::Bytes; use std::sync::Arc; use cita_trie::MemoryDB; use protocol::traits::ServiceState; use super::*; use crate::binding::state::MPTTrie; #[test] fn test_get_trie() { let mut state = GeneralServiceState::new(MPTTrie::new(Arc::new(MemoryDB::new(false)))); let key = Bytes::from("test"); let value = Bytes::from("test"); state.insert(key.clone(), value.clone()).unwrap(); assert_eq!(state.get::(&key).unwrap(), Some(value)); state.insert(key.clone(), Bytes::new()).unwrap(); assert_eq!(state.get::(&key).unwrap().is_some(), false); assert_eq!(state.contains(&key).unwrap(), false); } } ================================================ FILE: framework/src/binding/state/trie.rs ================================================ use std::sync::Arc; use bytes::Bytes; use cita_trie::{PatriciaTrie, Trie, TrieError, DB as TrieDB}; use derive_more::{Display, From}; use hasher::HasherKeccak; use lazy_static::lazy_static; use protocol::types::{Hash, MerkleRoot}; use protocol::{ProtocolError, ProtocolErrorKind, ProtocolResult}; lazy_static! { static ref HASHER_INST: Arc = Arc::new(HasherKeccak::new()); } pub struct MPTTrie { root: MerkleRoot, trie: PatriciaTrie, } impl MPTTrie { pub fn new(db: Arc) -> Self { let trie = PatriciaTrie::new(db, Arc::clone(&HASHER_INST)); Self { root: Hash::from_empty(), trie, } } pub fn from(root: MerkleRoot, db: Arc) -> ProtocolResult { let trie = PatriciaTrie::from(db, Arc::clone(&HASHER_INST), &root.as_bytes()) .map_err(MPTTrieError::from)?; Ok(Self { root, trie }) } pub fn get(&self, key: &Bytes) -> ProtocolResult> { Ok(self .trie .get(key) .map_err(MPTTrieError::from)? .map(Bytes::from)) } pub fn contains(&self, key: &Bytes) -> ProtocolResult { Ok(self.trie.contains(key).map_err(MPTTrieError::from)?) } pub fn insert(&mut self, key: Bytes, value: Bytes) -> ProtocolResult<()> { self.trie .insert(key.to_vec(), value.to_vec()) .map_err(MPTTrieError::from)?; Ok(()) } pub fn commit(&mut self) -> ProtocolResult { let root_bytes = self.trie.root().map_err(MPTTrieError::from)?; let root = MerkleRoot::from_bytes(Bytes::from(root_bytes))?; self.root = root; Ok(self.root.clone()) } } #[derive(Debug, Display, From)] pub enum MPTTrieError { #[display(fmt = "{:?}", _0)] Trie(TrieError), } impl std::error::Error for MPTTrieError {} impl From for ProtocolError { fn from(err: MPTTrieError) -> ProtocolError { ProtocolError::new(ProtocolErrorKind::Binding, Box::new(err)) } } ================================================ FILE: framework/src/binding/state/trie_db.rs ================================================ use std::collections::HashMap; use std::path::Path; use std::sync::Arc; use std::time::Instant; use bytes::Bytes; use derive_more::{Display, From}; use parking_lot::RwLock; use rand::{rngs::SmallRng, Rng, SeedableRng}; use rocksdb::{Options, WriteBatch, DB}; use common_apm::metrics::storage::{on_storage_get_state, on_storage_put_state}; use protocol::{ProtocolError, ProtocolErrorKind, ProtocolResult}; // 49999 is the largest prime number within 50000. const RAND_SEED: u64 = 49999; pub struct RocksTrieDB { light: bool, db: Arc, cache_size: usize, cache: RwLock, Vec>>, } impl RocksTrieDB { pub fn new>( path: P, light: bool, max_open_files: i32, cache_size: usize, ) -> ProtocolResult { let mut opts = Options::default(); opts.create_if_missing(true); opts.create_missing_column_families(true); opts.set_max_open_files(max_open_files); let db = DB::open(&opts, path).map_err(RocksTrieDBError::from)?; // Init HashMap with capacity 2 * cache_size to avoid reallocate memory. Ok(RocksTrieDB { light, db: Arc::new(db), cache: RwLock::new(HashMap::with_capacity(cache_size + cache_size)), cache_size, }) } fn inner_get(&self, key: &[u8]) -> Result>, RocksTrieDBError> { let res = { let cache = self.cache.read(); cache.get(key).cloned() }; if res.is_none() { let inst = Instant::now(); let ret = self.db.get(key).map_err(to_store_err)?; on_storage_get_state(inst.elapsed(), 1i64); if let Some(val) = ret.clone() { let mut cache = self.cache.write(); cache.insert(key.to_owned(), val); } return Ok(ret); } Ok(res) } #[cfg(test)] pub fn insert_batch_without_cache(&self, keys: Vec>, values: Vec>) { let mut _total_size = 0; let mut batch = WriteBatch::default(); assert_eq!(keys.len(), values.len()); for (key, val) in keys.iter().zip(values.iter()) { _total_size += key.len(); _total_size += val.len(); batch.put(key, val); } self.db.write(batch).unwrap(); } #[cfg(test)] pub fn insert_without_cache(&self, key: Vec, value: Vec) { self.db.put(key, value).unwrap(); } #[cfg(test)] pub fn get_without_cache(&self, key: &[u8]) -> Option> { self.db.get(key).unwrap() } #[cfg(test)] pub fn cache(&self) -> HashMap, Vec> { let cache = self.cache.read(); cache.clone() } } impl cita_trie::DB for RocksTrieDB { type Error = RocksTrieDBError; fn get(&self, key: &[u8]) -> Result>, Self::Error> { self.inner_get(key) } fn contains(&self, key: &[u8]) -> Result { let res = { let cache = self.cache.read(); cache.contains_key(key) }; if res { Ok(true) } else { if let Some(val) = self.db.get(key).map_err(to_store_err)? { let mut cache = self.cache.write(); cache.insert(key.to_owned(), val); return Ok(true); } Ok(false) } } fn insert(&self, key: Vec, value: Vec) -> Result<(), Self::Error> { let inst = Instant::now(); let size = key.len() + value.len(); { let mut cache = self.cache.write(); cache.insert(key.clone(), value.clone()); } self.db .put(Bytes::from(key), Bytes::from(value)) .map_err(to_store_err)?; on_storage_put_state(inst.elapsed(), size as i64); Ok(()) } fn insert_batch(&self, keys: Vec>, values: Vec>) -> Result<(), Self::Error> { if keys.len() != values.len() { return Err(RocksTrieDBError::BatchLengthMismatch); } let mut total_size = 0; let mut batch = WriteBatch::default(); { let mut cache = self.cache.write(); for (key, val) in keys.iter().zip(values.iter()) { total_size += key.len(); total_size += val.len(); batch.put(key, val); cache.insert(key.clone(), val.clone()); } } let inst = Instant::now(); self.db.write(batch).map_err(to_store_err)?; on_storage_put_state(inst.elapsed(), total_size as i64); Ok(()) } fn remove(&self, key: &[u8]) -> Result<(), Self::Error> { if self.light { { let mut cache = self.cache.write(); cache.remove(key); } self.db.delete(key).map_err(to_store_err)?; } Ok(()) } fn remove_batch(&self, keys: &[Vec]) -> Result<(), Self::Error> { if self.light { let mut batch = WriteBatch::default(); { let mut cache = self.cache.write(); for key in keys { batch.delete(key); cache.remove(key); } } self.db.write(batch).map_err(to_store_err)?; } Ok(()) } fn flush(&self) -> Result<(), Self::Error> { let mut cache = self.cache.write(); let len = cache.len(); if len <= self.cache_size { return Ok(()); } let keys = cache.keys().collect::>(); let remove_list = rand_remove_list(keys, len - self.cache_size); for item in remove_list.iter() { cache.remove(item); } Ok(()) } } fn rand_remove_list(keys: Vec<&T>, num: usize) -> Vec { let mut len = keys.len() - 1; let mut idx_list = (0..len).collect::>(); let mut rng = SmallRng::seed_from_u64(RAND_SEED); let mut ret = Vec::new(); for _ in 0..num { let tmp = rng.gen_range(0, len); let idx = idx_list.remove(tmp); ret.push(keys[idx].to_owned()); len -= 1; } ret } #[derive(Debug, Display, From)] pub enum RocksTrieDBError { #[display(fmt = "store error")] Store, #[display(fmt = "rocksdb {}", _0)] RocksDB(rocksdb::Error), #[display(fmt = "parameters do not match")] InsertParameter, #[display(fmt = "batch length dont match")] BatchLengthMismatch, } impl std::error::Error for RocksTrieDBError {} impl From for ProtocolError { fn from(err: RocksTrieDBError) -> ProtocolError { ProtocolError::new(ProtocolErrorKind::Binding, Box::new(err)) } } fn to_store_err(e: rocksdb::Error) -> RocksTrieDBError { log::error!("[framework] trie db {:?}", e); RocksTrieDBError::Store } #[cfg(test)] mod tests { extern crate test; use test::Bencher; use super::*; #[bench] fn bench_rand(b: &mut Bencher) { b.iter(|| { let mut rng = SmallRng::seed_from_u64(RAND_SEED); for _ in 0..10000 { rng.gen_range(10, 1000000); } }) } #[test] fn test_rand_remove() { let list = (0..10).collect::>(); let keys = list.iter().collect::>(); for num in 1..10 { let res = rand_remove_list(keys.clone(), num); assert_eq!(res.len(), num); } } } ================================================ FILE: framework/src/binding/store/array.rs ================================================ use std::cell::RefCell; use std::marker::PhantomData; use std::rc::Rc; use bytes::Bytes; use protocol::fixed_codec::FixedCodec; use protocol::traits::{ServiceState, StoreArray}; use protocol::types::Hash; use protocol::ProtocolResult; use crate::binding::store::FixedKeys; pub struct DefaultStoreArray { state: Rc>, var_name: Hash, keys: FixedKeys, phantom: PhantomData, } impl DefaultStoreArray { pub fn new(state: Rc>, name: &str) -> Self { let var_name = Hash::digest(Bytes::from(name.to_owned() + "array")); let opt_bs: Option = state .borrow() .get(&var_name) .expect("get array should not fail"); let keys = if let Some(bs) = opt_bs { <_>::decode_fixed(bs).expect("decode keys should not fail") } else { FixedKeys { inner: Vec::new() } }; Self { state, var_name, keys, phantom: PhantomData, } } fn inner_get(&self, index: u64) -> ProtocolResult> { if let Some(k) = self.keys.inner.get(index as usize) { self.state .borrow() .get(k)? .map_or_else(|| Ok(None), |v| Ok(Some(v))) } else { Ok(None) } } // TODO(@zhounan): Atomicity of insert(k, v) and insert self.keys to // ServiceState is not guaranteed for now That must be settled soon after. fn inner_push(&mut self, elm: E) -> ProtocolResult<()> { let key = Hash::digest(elm.encode_fixed()?); self.keys.inner.push(key.clone()); self.state .borrow_mut() .insert(self.var_name.clone(), self.keys.encode_fixed()?)?; self.state.borrow_mut().insert(key, elm) } // TODO(@zhounan): Atomicity of insert(k, v) and insert self.keys to // ServiceState is not guaranteed for now That must be settled soon after. fn inner_remove(&mut self, index: u64) -> ProtocolResult<()> { let key = self.keys.inner.remove(index as usize); self.state .borrow_mut() .insert(self.var_name.clone(), self.keys.encode_fixed()?)?; self.state.borrow_mut().insert(key, Bytes::new()) } } impl StoreArray for DefaultStoreArray { fn get(&self, index: u64) -> Option { self.inner_get(index) .unwrap_or_else(|e| panic!("StoreArray get value failed: {}", e)) } fn push(&mut self, elm: E) { self.inner_push(elm) .unwrap_or_else(|e| panic!("StoreArray push value failed: {}", e)); } fn remove(&mut self, index: u64) { self.inner_remove(index) .unwrap_or_else(|e| panic!("StoreArray remove value failed: {}", e)); } fn len(&self) -> u64 { self.keys.inner.len() as u64 } fn is_empty(&self) -> bool { self.len() == 0 } fn iter<'a>(&'a self) -> Box + 'a> { Box::new(ArrayIter::::new(0, self)) } } struct ArrayIter<'a, E: FixedCodec, A: StoreArray> { idx: u64, array: &'a A, phantom: PhantomData, } impl<'a, E: FixedCodec, A: StoreArray> ArrayIter<'a, E, A> { pub fn new(idx: u64, array: &'a A) -> Self { ArrayIter { idx, array, phantom: PhantomData, } } } impl<'a, E: FixedCodec, A: StoreArray> Iterator for ArrayIter<'a, E, A> { type Item = (u64, E); fn next(&mut self) -> Option { if self.idx < self.array.len() { let ele = self .array .get(self.idx) .expect("StoreArray should get Some when index inbound"); self.idx += 1; Some((self.idx - 1, ele)) } else { None } } } ================================================ FILE: framework/src/binding/store/map.rs ================================================ use std::cell::RefCell; use std::iter::Iterator; use std::marker::PhantomData; use std::rc::Rc; use bytes::Bytes; use rayon::prelude::*; use protocol::fixed_codec::FixedCodec; use protocol::traits::{ServiceState, StoreMap}; use protocol::types::Hash; use protocol::ProtocolResult; use crate::binding::store::{get_bucket_index, Bucket, FixedBuckets}; pub struct DefaultStoreMap { state: Rc>, var_name: String, keys: RefCell>, len_key: Bytes, len: u64, phantom: PhantomData, } impl DefaultStoreMap where S: 'static + ServiceState, K: 'static + Send + FixedCodec + PartialEq, V: 'static + FixedCodec, { pub fn new(state: Rc>, name: &str) -> Self { let len_key = Bytes::from(name.to_string() + "_map_len"); let len = state .borrow() .get(&len_key) .expect("Get len failed") .unwrap_or(0u64); DefaultStoreMap { state, len_key, len, var_name: name.to_string(), keys: RefCell::new(FixedBuckets::new()), phantom: PhantomData, } } fn inner_insert(&mut self, key: K, value: V) -> ProtocolResult<()> { let key_bytes = key.encode_fixed()?; let mk = self.get_map_key(&key_bytes); let bkt_idx = get_bucket_index(&key_bytes); if !self.inner_contains(bkt_idx, &key)? { self.keys.borrow_mut().insert(bkt_idx, key); self.state.borrow_mut().insert( self.get_bucket_name(bkt_idx), self.keys.borrow().get_bucket(bkt_idx).encode_fixed()?, )?; self.len_add_one()?; } self.state.borrow_mut().insert(mk, value) } fn inner_get(&self, key: &K) -> ProtocolResult> { let key_bytes = key.encode_fixed()?; let bkt_idx = get_bucket_index(&key_bytes); if self.inner_contains(bkt_idx, &key)? { self.state .borrow() .get(&self.get_map_key(&key_bytes))? .map_or_else(|| Ok(None), |v| Ok(Some(v))) } else { Ok(None) } } fn inner_remove(&mut self, key: &K) -> ProtocolResult> { let key_bytes = key.encode_fixed()?; let bkt_idx = get_bucket_index(&key_bytes); if self.inner_contains(bkt_idx, &key)? { let value = self.inner_get(key)?.expect("value should be existed"); let bkt_idx = get_bucket_index(&key_bytes); let bkt_name = self.get_bucket_name(bkt_idx); let _ = self.keys.borrow_mut().remove_item(bkt_idx, key)?; self.state.borrow_mut().insert( bkt_name, self.keys.borrow().get_bucket(bkt_idx).encode_fixed()?, )?; self.state .borrow_mut() .insert(self.get_map_key(&key_bytes), Bytes::new())?; self.len_sub_one()?; Ok(Some(value)) } else { Ok(None) } } #[inline(always)] fn inner_contains(&self, bkt_idx: usize, key: &K) -> ProtocolResult { if self.keys.borrow().is_bucket_recovered(bkt_idx) { return Ok(self.keys.borrow().contains(bkt_idx, key)); } let bkt = if let Some(bytes) = self.state.borrow().get(&self.get_bucket_name(bkt_idx))? { <_>::decode_fixed(bytes)? } else { Bucket::new() }; let ret = bkt.contains(key); self.keys.borrow_mut().recover_bucket(bkt_idx, bkt); Ok(ret) } fn get_map_key(&self, key_bytes: &Bytes) -> Bytes { let mut name_bytes = self.var_name.as_bytes().to_vec(); name_bytes.extend_from_slice(key_bytes); if key_bytes.len() > 32 { Hash::digest(Bytes::from(name_bytes)).as_bytes() } else { Bytes::from(name_bytes) } } fn get_bucket_name(&self, index: usize) -> Bytes { let mut bytes = (self.var_name.clone() + "_bucket_").as_bytes().to_vec(); bytes.extend_from_slice(&index.to_le_bytes()); Bytes::from(bytes) } fn len_add_one(&mut self) -> ProtocolResult<()> { self.len += 1; self.state .borrow_mut() .insert(self.len_key.clone(), self.len.encode_fixed()?) } fn len_sub_one(&mut self) -> ProtocolResult<()> { self.len -= 1; self.state .borrow_mut() .insert(self.len_key.clone(), self.len.encode_fixed()?) } fn recover_all_buckets(&self) { let idxs = self .keys .borrow() .is_recovered .iter() .enumerate() .filter_map(|(i, &res)| if !res { Some(i) } else { None }) .collect::>(); let opt_bytes = idxs .iter() .map(|idx| { let name = self.get_bucket_name(*idx); self.state.borrow().get(&name).unwrap() }) .collect::>(); let buckets = opt_bytes .into_par_iter() .map(|bytes| { if let Some(bs) = bytes { <_>::decode_fixed(bs).expect("Decode bucket failed") } else { Bucket::new() } }) .collect::>(); for (idx, bkt) in idxs.into_iter().zip(buckets.into_iter()) { self.keys.borrow_mut().recover_bucket(idx, bkt); } } #[cfg(test)] fn get_buckets(self) -> FixedBuckets { self.keys.into_inner() } } impl StoreMap for DefaultStoreMap where S: 'static + ServiceState, K: 'static + Send + FixedCodec + Clone + PartialEq, V: 'static + FixedCodec, { fn get(&self, key: &K) -> Option { self.inner_get(key) .unwrap_or_else(|e| panic!("StoreMap get failed: {}", e)) } fn insert(&mut self, key: K, value: V) { self.inner_insert(key, value) .unwrap_or_else(|e| panic!("StoreMap insert failed: {}", e)); } fn remove(&mut self, key: &K) -> Option { self.inner_remove(key) .unwrap_or_else(|e| panic!("StoreMap remove failed: {}", e)) } fn contains(&self, key: &K) -> bool { if let Ok(bytes) = key.encode_fixed() { self.inner_contains(get_bucket_index(&bytes), &key) .unwrap_or(false) } else { false } } fn len(&self) -> u64 { self.len } fn is_empty(&self) -> bool { self.len == 0 } fn iter<'a>(&'a self) -> Box + 'a> { self.recover_all_buckets(); Box::new(NewMapIter::::new(0, self)) } } pub struct NewMapIter< 'a, S: 'static + ServiceState, K: 'static + FixedCodec + PartialEq, V: 'static + FixedCodec, > { idx: u64, map: &'a DefaultStoreMap, } impl<'a, S, K, V> NewMapIter<'a, S, K, V> where S: 'static + ServiceState, K: 'static + FixedCodec + PartialEq, V: 'static + FixedCodec, { pub fn new(idx: u64, map: &'a DefaultStoreMap) -> Self { Self { idx, map } } } impl<'a, S, K, V> Iterator for NewMapIter<'a, S, K, V> where S: 'static + ServiceState, K: 'static + Send + FixedCodec + Clone + PartialEq, V: 'static + FixedCodec, { type Item = (K, V); fn next(&mut self) -> Option { let idx = self.idx; if idx >= self.map.len { return None; } for i in 0..16 { let (left, right) = self.map.keys.borrow().get_abs_index_interval(i); if left <= idx && idx < right { let index = idx - left; let key = self.map.keys.borrow().keys_bucket[i] .0 .get(index as usize) .cloned() .expect("get key should not fail"); self.idx += 1; return Some(( key.clone(), self.map.get(&key).expect("get value should not fail"), )); } } None } } #[cfg(test)] mod tests { use std::sync::Arc; use cita_trie::MemoryDB; use rand::random; use crate::binding::state::{GeneralServiceState, MPTTrie}; use crate::binding::store::map::DefaultStoreMap; use super::*; fn gen_bytes() -> Bytes { Bytes::from((0..16).map(|_| random::()).collect::>()) } #[test] fn test_map_and_bucket() { let state = Rc::new(RefCell::new(GeneralServiceState::new(MPTTrie::new( Arc::new(MemoryDB::new(false)), )))); let mut map = DefaultStoreMap::<_, Bytes, Bytes>::new(Rc::clone(&state), "test"); let key_1 = gen_bytes(); let val_1 = gen_bytes(); let key_2 = gen_bytes(); let val_2 = gen_bytes(); let key_idx_1 = get_bucket_index(&key_1.encode_fixed().unwrap()); let key_idx_2 = get_bucket_index(&key_2.encode_fixed().unwrap()); map.insert(key_1, val_1); map.insert(key_2, val_2); assert_eq!(map.len(), 2); let fbkt = map.get_buckets(); assert!(fbkt.is_recovered[key_idx_1]); assert!(fbkt.is_recovered[key_idx_2]); assert_eq!(fbkt.len(), 2); let max = key_idx_1.max(key_idx_2); let min = key_idx_1.min(key_idx_2); let res = (0..17) .map(|i| { if i > max { 2u64 } else if i > min { 1u64 } else { 0u64 } }) .collect::>(); assert_eq!(fbkt.bucket_lens, res); } } ================================================ FILE: framework/src/binding/store/mod.rs ================================================ mod array; mod map; mod primitive; use bytes::Bytes; use derive_more::{Display, From}; use protocol::fixed_codec::{FixedCodec, FixedCodecError}; use protocol::{ProtocolError, ProtocolErrorKind, ProtocolResult}; pub use array::DefaultStoreArray; pub use map::DefaultStoreMap; pub use primitive::{DefaultStoreBool, DefaultStoreString, DefaultStoreUint64}; pub struct FixedKeys { pub inner: Vec, } impl rlp::Encodable for FixedKeys { fn rlp_append(&self, s: &mut rlp::RlpStream) { let inner: Vec> = self .inner .iter() .map(|k| k.encode_fixed().expect("encode should not fail").to_vec()) .collect(); s.begin_list(1).append_list::, _>(&inner); } } impl rlp::Decodable for FixedKeys { fn decode(r: &rlp::Rlp) -> Result { let inner_u8: Vec> = rlp::decode_list(r.at(0)?.as_raw()); let inner_k: Result, _> = inner_u8 .into_iter() .map(|v| <_>::decode_fixed(Bytes::from(v))) .collect(); let inner = inner_k.map_err(|_| rlp::DecoderError::Custom("decode K from bytes fail"))?; Ok(FixedKeys { inner }) } } impl FixedCodec for FixedKeys { fn encode_fixed(&self) -> ProtocolResult { Ok(Bytes::from(rlp::encode(self))) } fn decode_fixed(bytes: Bytes) -> ProtocolResult { Ok(rlp::decode(bytes.as_ref()).map_err(FixedCodecError::from)?) } } pub struct FixedBuckets { pub keys_bucket: Vec>, pub bucket_lens: Vec, pub is_recovered: Vec, } impl FixedBuckets { fn new() -> Self { let mut keys_bucket = Vec::new(); let mut bucket_lens = vec![0]; let mut is_recovered = Vec::new(); for _i in 0..16 { keys_bucket.push(Bucket::new()); bucket_lens.push(0u64); is_recovered.push(false); } FixedBuckets { keys_bucket, bucket_lens, is_recovered, } } fn recover_bucket(&mut self, index: usize, bucket: Bucket) { self.keys_bucket[index] = bucket; self.is_recovered[index] = true; self.update_index_interval(index); } fn insert(&mut self, index: usize, key: K) { let bkt = self.keys_bucket.get_mut(index).unwrap(); bkt.push(key); self.update_index_interval(index); } fn contains(&self, index: usize, key: &K) -> bool { self.keys_bucket[index].contains(key) } fn remove_item(&mut self, index: usize, key: &K) -> ProtocolResult { let bkt = self.keys_bucket.get_mut(index).unwrap(); if bkt.contains(key) { let val = bkt.remove_item(key)?; self.update_index_interval(index); Ok(val) } else { Err(StoreError::GetNone.into()) } } fn get_bucket(&self, index: usize) -> &Bucket { self.keys_bucket .get(index) .expect("index must less than 16") } /// The function will panic when index is greater than or equal 16. fn get_abs_index_interval(&self, index: usize) -> (u64, u64) { (self.bucket_lens[index], self.bucket_lens[index + 1]) } fn is_bucket_recovered(&self, index: usize) -> bool { self.is_recovered[index] } fn update_index_interval(&mut self, index: usize) { let start = index + 1; let mut acc = self.bucket_lens[index]; for i in start..17 { acc += self.keys_bucket[i - 1].len() as u64; self.bucket_lens[i] = acc; } } #[cfg(test)] fn len(&self) -> u64 { self.bucket_lens[16] } #[cfg(test)] fn is_empty(&self) -> bool { self.len() == 0 } } pub struct Bucket(Vec); impl Bucket { fn new() -> Self { Bucket(Vec::new()) } fn len(&self) -> usize { self.0.len() } fn contains(&self, x: &K) -> bool { self.0.contains(x) } fn push(&mut self, value: K) { self.0.push(value); } fn remove_item(&mut self, key: &K) -> ProtocolResult { let mut idx = self.len(); for (i, item) in self.0.iter().enumerate() { if item == key { idx = i; break; } } if idx < self.len() { Ok(self.0.remove(idx)) } else { Err(StoreError::GetNone.into()) } } } impl rlp::Encodable for Bucket { fn rlp_append(&self, s: &mut rlp::RlpStream) { let inner: Vec> = self .0 .iter() .map(|k| k.encode_fixed().expect("encode should not fail").to_vec()) .collect(); s.begin_list(1).append_list::, _>(&inner); } } impl rlp::Decodable for Bucket { fn decode(r: &rlp::Rlp) -> Result { let inner_u8: Vec> = rlp::decode_list(r.at(0)?.as_raw()); let inner_k: Result, _> = inner_u8 .into_iter() .map(|v| <_>::decode_fixed(Bytes::from(v))) .collect(); let inner = inner_k.map_err(|_| rlp::DecoderError::Custom("decode K from bytes fail"))?; Ok(Bucket(inner)) } } impl FixedCodec for Bucket { fn encode_fixed(&self) -> ProtocolResult { Ok(Bytes::from(rlp::encode(self))) } fn decode_fixed(bytes: Bytes) -> ProtocolResult { Ok(rlp::decode(bytes.as_ref()).map_err(FixedCodecError::from)?) } } #[inline(always)] fn get_bucket_index(bytes: &Bytes) -> usize { let len = bytes.len() - 1; (bytes[len] >> 4) as usize } #[derive(Debug, Display, From)] pub enum StoreError { #[display(fmt = "the key not existed")] GetNone, #[display(fmt = "access array out of range")] OutRange, #[display(fmt = "decode error")] DecodeError, #[display(fmt = "overflow when calculating")] Overflow, } impl std::error::Error for StoreError {} impl From for ProtocolError { fn from(err: StoreError) -> ProtocolError { ProtocolError::new(ProtocolErrorKind::Binding, Box::new(err)) } } #[cfg(test)] mod tests { use super::*; #[test] fn test_insert() { let mut buckets = FixedBuckets::new(); assert!(buckets.is_empty()); for i in 0..=255u8 { let key = Bytes::from(vec![i]); buckets.insert(get_bucket_index(&key), key); } println!("{:?}", buckets.bucket_lens); let intervals = (0u64..=16).map(|i| i * 16).collect::>(); assert!(intervals == buckets.bucket_lens); assert!(buckets.len() == 256); for i in 0..16 { assert!(buckets.get_bucket(i).len() == 16); } let mut buckets = FixedBuckets::new(); for i in 0..8 { let key = Bytes::from(vec![i]); buckets.insert(get_bucket_index(&key), key); } assert!(buckets.get_bucket(0).len() == 8); assert!(buckets.len() == 8); for i in 1..16 { assert!(buckets.get_bucket(i).len() == 0); } } #[test] fn test_remove() { let mut buckets = FixedBuckets::new(); for i in 0..=255u8 { let key = Bytes::from(vec![i]); buckets.insert(get_bucket_index(&key), key); } let key = Bytes::from(vec![0]); let _ = buckets .remove_item(get_bucket_index(&key.encode_fixed().unwrap()), &key) .unwrap(); let intervals = (0u64..=16) .map(|i| if i == 0 { 0 } else { i * 16 - 1 }) .collect::>(); assert!(buckets.len() == 255); assert!(intervals == buckets.bucket_lens); } #[test] fn test_contains() { let mut buckets = FixedBuckets::new(); for i in 0..3u8 { let key = Bytes::from(vec![i]); buckets.insert(get_bucket_index(&key), key); } let key = Bytes::from(vec![0]); assert!(buckets.contains(get_bucket_index(&key.encode_fixed().unwrap()), &key)); let key = Bytes::from(vec![5]); assert!(!buckets.contains(get_bucket_index(&key.encode_fixed().unwrap()), &key)); let key = Bytes::from(vec![20]); assert!(!buckets.contains(get_bucket_index(&key.encode_fixed().unwrap()), &key)); } } ================================================ FILE: framework/src/binding/store/primitive.rs ================================================ use std::cell::RefCell; use std::rc::Rc; use bytes::Bytes; use protocol::traits::{ServiceState, StoreBool, StoreString, StoreUint64}; use protocol::types::Hash; use protocol::ProtocolResult; pub struct DefaultStoreBool { state: Rc>, key: Hash, } impl DefaultStoreBool { pub fn new(state: Rc>, var_name: &str) -> Self { Self { state, key: Hash::digest(Bytes::from(var_name.to_owned() + "bool")), } } fn inner_get(&self) -> ProtocolResult { let b: Option = self.state.borrow().get(&self.key)?; match b { Some(v) => Ok(v), None => { self.state.borrow_mut().insert(self.key.clone(), false)?; Ok(false) } } } fn inner_set(&mut self, b: bool) -> ProtocolResult<()> { self.state.borrow_mut().insert(self.key.clone(), b)?; Ok(()) } } impl StoreBool for DefaultStoreBool { fn get(&self) -> bool { self.inner_get() .unwrap_or_else(|e| panic!("StoreBool get failed: {}", e)) } fn set(&mut self, b: bool) { self.inner_set(b) .unwrap_or_else(|e| panic!("StoreBool set failed: {}", e)); } } pub struct DefaultStoreUint64 { state: Rc>, key: Hash, } impl DefaultStoreUint64 { pub fn new(state: Rc>, var_name: &str) -> Self { Self { state, key: Hash::digest(Bytes::from(var_name.to_owned() + "uint64")), } } fn inner_get(&self) -> u64 { let u: Option = self .state .borrow() .get(&self.key) .unwrap_or_else(|e| panic!("StoreUint64 get failed: {}", e)); match u { Some(v) => v, None => { self.state .borrow_mut() .insert(self.key.clone(), 0u64) .unwrap_or_else(|e| panic!("StoreUint64 get failed: {}", e)); 0 } } } fn inner_set(&mut self, val: u64) { self.state .borrow_mut() .insert(self.key.clone(), val) .unwrap_or_else(|e| panic!("StoreUint64 set failed: {}", e)); } // Add val with self // And set the result back to self fn inner_add(&mut self, val: u64) -> bool { let sv = self.inner_get(); match val.overflowing_add(sv) { (sum, false) => { self.inner_set(sum); false } _ => true, } } // Self minus val // And set the result back to self fn inner_sub(&mut self, val: u64) -> bool { let sv = self.inner_get(); if sv >= val { self.inner_set(sv - val); false } else { true } } // Multiply val with self // And set the result back to self fn inner_mul(&mut self, val: u64) -> bool { let sv = self.inner_get(); match val.overflowing_mul(sv) { (mul, false) => { self.inner_set(mul); false } _ => true, } } // Power of self // And set the result back to self fn inner_pow(&mut self, val: u32) -> bool { let sv = self.inner_get(); match sv.overflowing_pow(val) { (pow, false) => { self.inner_set(pow); false } _ => true, } } // Self divided by val // And set the result back to self fn inner_div(&mut self, val: u64) -> bool { let sv = self.inner_get(); if let 0 = val { true } else { self.inner_set(sv / val); false } } // Remainder of self // And set the result back to self fn inner_rem(&mut self, val: u64) -> bool { let sv = self.inner_get(); if let 0 = val { true } else { self.inner_set(sv % val); false } } } impl StoreUint64 for DefaultStoreUint64 { fn get(&self) -> u64 { self.inner_get() } fn set(&mut self, val: u64) { self.inner_set(val); } // Add val with self // And set the result back to self fn safe_add(&mut self, val: u64) -> bool { self.inner_add(val) } // Self minus val // And set the result back to self fn safe_sub(&mut self, val: u64) -> bool { self.inner_sub(val) } // Multiply val with self // And set the result back to self fn safe_mul(&mut self, val: u64) -> bool { self.inner_mul(val) } // Power of self // And set the result back to self fn safe_pow(&mut self, val: u32) -> bool { self.inner_pow(val) } // Self divided by val // And set the result back to self fn safe_div(&mut self, val: u64) -> bool { self.inner_div(val) } // Remainder of self // And set the result back to self fn safe_rem(&mut self, val: u64) -> bool { self.inner_rem(val) } } pub struct DefaultStoreString { state: Rc>, key: Hash, } impl DefaultStoreString { pub fn new(state: Rc>, var_name: &str) -> Self { Self { state, key: Hash::digest(Bytes::from(var_name.to_owned() + "string")), } } fn inner_set(&mut self, val: &str) -> ProtocolResult<()> { self.state .borrow_mut() .insert(self.key.clone(), val.to_string())?; Ok(()) } fn inner_get(&self) -> ProtocolResult { let s: Option = self.state.borrow().get(&self.key)?; match s { Some(v) => Ok(v), None => { self.state .borrow_mut() .insert(self.key.clone(), "".to_string())?; Ok("".to_string()) } } } fn inner_len(&self) -> ProtocolResult { self.inner_get().map(|s| s.len() as u64) } fn is_empty_(&self) -> ProtocolResult { self.inner_get().map(|s| s.is_empty()) } } impl StoreString for DefaultStoreString { fn get(&self) -> String { self.inner_get() .unwrap_or_else(|e| panic!("StoreString get failed: {}", e)) } fn set(&mut self, val: &str) { self.inner_set(val) .unwrap_or_else(|e| panic!("StoreString set failed: {}", e)); } fn len(&self) -> u64 { self.inner_len() .unwrap_or_else(|e| panic!("StoreString get length failed: {}", e)) } fn is_empty(&self) -> bool { self.is_empty_() .unwrap_or_else(|e| panic!("StoreString get is_empty failed: {}", e)) } } ================================================ FILE: framework/src/binding/tests/mod.rs ================================================ mod sdk; mod state; mod store; ================================================ FILE: framework/src/binding/tests/sdk.rs ================================================ use std::cell::RefCell; use std::rc::Rc; use std::sync::Arc; use async_trait::async_trait; use bytes::Bytes; use cita_trie::MemoryDB; use protocol::traits::{CommonStorage, Context, ServiceResponse, ServiceSDK, Storage}; use protocol::types::{ Address, Block, BlockHeader, Event, Hash, MerkleRoot, Proof, RawTransaction, Receipt, ReceiptResponse, SignedTransaction, TransactionRequest, Validator, }; use protocol::ProtocolResult; use crate::binding::sdk::{DefaultChainQuerier, DefaultServiceSDK}; use crate::binding::store::StoreError; use crate::binding::tests::state::new_state; #[test] fn test_service_sdk() { let memdb = Arc::new(MemoryDB::new(false)); let state = new_state(Arc::clone(&memdb), None); let rs = Rc::new(RefCell::new(state)); let arcs = Arc::new(MockStorage {}); let cq = DefaultChainQuerier::new(Arc::clone(&arcs)); let mut sdk = DefaultServiceSDK::new(Rc::clone(&rs), Rc::new(cq)); // test sdk store bool let mut sdk_bool = sdk.alloc_or_recover_bool("test_bool"); sdk_bool.set(true); assert_eq!(sdk_bool.get(), true); // test sdk store string let mut sdk_string = sdk.alloc_or_recover_string("test_string"); sdk_string.set("hello"); assert_eq!(sdk_string.get(), "hello".to_owned()); // test sdk store uint64 let mut sdk_uint64 = sdk.alloc_or_recover_uint64("test_uint64"); sdk_uint64.set(99); assert_eq!(sdk_uint64.get(), 99); // test sdk map let mut sdk_map = sdk.alloc_or_recover_map::("test_map"); assert_eq!(sdk_map.is_empty(), true); sdk_map.insert(Hash::digest(Bytes::from("key_1")), Bytes::from("val_1")); assert_eq!( sdk_map.get(&Hash::digest(Bytes::from("key_1"))).unwrap(), Bytes::from("val_1") ); let mut it = sdk_map.iter(); assert_eq!( it.next().unwrap(), (Hash::digest(Bytes::from("key_1")), Bytes::from("val_1")) ); assert_eq!(it.next().is_none(), true); // test sdk array let mut sdk_array = sdk.alloc_or_recover_array::("test_array"); assert_eq!(sdk_array.is_empty(), true); sdk_array.push(Hash::digest(Bytes::from("key_1"))); assert_eq!( sdk_array.get(0).unwrap(), Hash::digest(Bytes::from("key_1")) ); let mut it = sdk_array.iter(); assert_eq!(it.next().unwrap(), (0, Hash::digest(Bytes::from("key_1")))); assert_eq!(it.next().is_none(), true); // test get/set account value sdk.set_account_value(&mock_address(), Bytes::from("ak"), Bytes::from("av")); let account_value: Bytes = sdk .get_account_value(&mock_address(), &Bytes::from("ak")) .unwrap(); assert_eq!(Bytes::from("av"), account_value); // test get/set value sdk.set_value(Bytes::from("ak"), Bytes::from("av")); let value: Bytes = sdk.get_value(&Bytes::from("ak")).unwrap(); assert_eq!(Bytes::from("av"), value); // test query chain let tx_data = sdk .get_transaction_by_hash(&Hash::digest(Bytes::from("param"))) .unwrap(); assert_eq!(mock_signed_tx(), tx_data); let receipt_data = sdk .get_receipt_by_hash(&Hash::digest(Bytes::from("param"))) .unwrap(); assert_eq!(mock_receipt(), receipt_data); let block_data = sdk.get_block_by_height(Some(1)).unwrap(); assert_eq!(mock_block(1), block_data); } struct MockStorage; #[async_trait] impl CommonStorage for MockStorage { async fn insert_block(&self, _ctx: Context, _block: Block) -> ProtocolResult<()> { Ok(()) } async fn get_block(&self, _ctx: Context, _height: u64) -> ProtocolResult> { Ok(Some(mock_block(1))) } async fn get_block_header( &self, _ctx: Context, _height: u64, ) -> ProtocolResult> { Ok(Some(mock_block(1).header)) } async fn set_block(&self, _ctx: Context, _block: Block) -> ProtocolResult<()> { Ok(()) } async fn remove_block(&self, _ctx: Context, _height: u64) -> ProtocolResult<()> { Ok(()) } async fn get_latest_block(&self, _ctx: Context) -> ProtocolResult { Ok(mock_block(1)) } async fn set_latest_block(&self, _ctx: Context, _block: Block) -> ProtocolResult<()> { Ok(()) } async fn get_latest_block_header(&self, _ctx: Context) -> ProtocolResult { Ok(mock_block(1).header) } } #[async_trait] impl Storage for MockStorage { async fn insert_transactions( &self, _ctx: Context, _: u64, _: Vec, ) -> ProtocolResult<()> { Ok(()) } async fn insert_receipts(&self, _ctx: Context, _: u64, _: Vec) -> ProtocolResult<()> { Ok(()) } async fn update_latest_proof(&self, _ctx: Context, _: Proof) -> ProtocolResult<()> { Ok(()) } async fn get_transaction_by_hash( &self, _ctx: Context, _tx_hash: &Hash, ) -> ProtocolResult> { Ok(Some(mock_signed_tx())) } async fn get_transactions( &self, _ctx: Context, _height: u64, _hashes: &[Hash], ) -> ProtocolResult>> { Err(StoreError::GetNone.into()) } async fn get_receipt_by_hash(&self, _ctx: Context, _: Hash) -> ProtocolResult> { Ok(Some(mock_receipt())) } async fn get_receipts( &self, _ctx: Context, _: u64, _: Vec, ) -> ProtocolResult>> { Err(StoreError::GetNone.into()) } async fn get_latest_proof(&self, _ctx: Context) -> ProtocolResult { Err(StoreError::GetNone.into()) } } // ##################### // Mock Primitive // ##################### pub fn mock_address() -> Address { let hash = mock_hash(); Address::from_hash(hash).unwrap() } pub fn mock_pub_key(s: &'static str) -> Bytes { Hash::digest(Bytes::from(s)).as_bytes() } pub fn mock_hash() -> Hash { Hash::digest(Bytes::from("mock")) } pub fn mock_merkle_root() -> MerkleRoot { Hash::digest(Bytes::from("mock")) } // ##################### // Mock Transaction // ##################### pub fn mock_transaction_request() -> TransactionRequest { TransactionRequest { service_name: "mock-service".to_owned(), method: "mock-method".to_owned(), payload: "mock-payload".to_owned(), } } pub fn mock_raw_tx() -> RawTransaction { RawTransaction { chain_id: mock_hash(), nonce: mock_hash(), timeout: 100, cycles_price: 1, cycles_limit: 100, request: mock_transaction_request(), sender: mock_address(), } } pub fn mock_signed_tx() -> SignedTransaction { SignedTransaction { raw: mock_raw_tx(), tx_hash: mock_hash(), pubkey: Default::default(), signature: Default::default(), } } // ##################### // Mock Receipt // ##################### pub fn mock_receipt() -> Receipt { Receipt { state_root: mock_merkle_root(), height: 13, tx_hash: mock_hash(), cycles_used: 100, events: vec![mock_event()], response: mock_receipt_response(), } } pub fn mock_receipt_response() -> ReceiptResponse { ReceiptResponse { service_name: "mock-service".to_owned(), method: "mock-method".to_owned(), response: ServiceResponse:: { code: 0, succeed_data: "ok".to_owned(), error_message: "".to_owned(), }, } } pub fn mock_event() -> Event { Event { service: "mock-event".to_owned(), name: "mock-method".to_owned(), data: "mock-data".to_owned(), } } // ##################### // Mock Block // ##################### pub fn mock_validator(s: &'static str) -> Validator { Validator { pub_key: mock_pub_key(s), propose_weight: 1, vote_weight: 1, } } pub fn mock_proof() -> Proof { Proof { height: 4, round: 99, block_hash: mock_hash(), signature: Default::default(), bitmap: Default::default(), } } pub fn mock_block_header() -> BlockHeader { BlockHeader { chain_id: mock_hash(), height: 42, exec_height: 41, prev_hash: mock_hash(), timestamp: 420_000_000, order_root: mock_merkle_root(), order_signed_transactions_hash: mock_hash(), confirm_root: vec![mock_hash(), mock_hash()], state_root: mock_merkle_root(), receipt_root: vec![mock_hash(), mock_hash()], cycles_used: vec![999_999], proposer: mock_address(), proof: mock_proof(), validator_version: 1, validators: vec![ mock_validator("a"), mock_validator("b"), mock_validator("c"), mock_validator("d"), ], } } pub fn mock_block(order_size: usize) -> Block { Block { header: mock_block_header(), ordered_tx_hashes: (0..order_size).map(|_| mock_hash()).collect(), } } ================================================ FILE: framework/src/binding/tests/state.rs ================================================ extern crate test; use std::collections::HashSet; use std::path::PathBuf; use std::sync::Arc; use bytes::Bytes; use cita_trie::{MemoryDB, DB}; use test::Bencher; use protocol::traits::ServiceState; use protocol::types::{Address, Hash, MerkleRoot}; use crate::binding::state::{GeneralServiceState, MPTTrie, RocksTrieDB}; #[rustfmt::skip] /// Bench in AMD Ryzen 7 3800X 8-Core Processor (16 x 4250) /// test binding::tests::state::bench_get_cache_hit ... bench: 47 ns/iter (+/- 3) /// test binding::tests::state::bench_get_cache_miss ... bench: 1,063 ns/iter (+/- 35) /// test binding::tests::state::bench_get_without_cache ... bench: 526 ns/iter (+/- 19) /// test binding::tests::state::bench_insert_batch_with_cache ... bench: 1,113,015 ns/iter (+/- 489,068) /// test binding::tests::state::bench_insert_batch_without_cache ... bench: 979,408 ns/iter (+/- 510,953) /// test binding::tests::state::bench_insert_with_cache ... bench: 2,716 ns/iter (+/- 602) /// test binding::tests::state::bench_insert_without_cache ... bench: 2,491 ns/iter (+/- 486) #[bench] fn bench_insert_batch_with_cache(b: &mut Bencher) { let triedb = new_triedb("bench_insert_batch_with_cache"); let keys = (0..1000).map(|_| rand_bytes()).collect::>(); let values = (0..1000).map(|_| rand_bytes()).collect::>(); b.iter(|| { triedb.insert_batch(keys.clone(), values.clone()).unwrap(); }) } #[bench] fn bench_insert_batch_without_cache(b: &mut Bencher) { let triedb = new_triedb("bench_insert_batch_without_cache"); let keys = (0..1000).map(|_| rand_bytes()).collect::>(); let values = (0..1000).map(|_| rand_bytes()).collect::>(); b.iter(|| { triedb.insert_batch_without_cache(keys.clone(), values.clone()); }) } #[bench] fn bench_insert_with_cache(b: &mut Bencher) { let triedb = new_triedb("bench_insert_with_cache"); let key = rand_bytes(); let value = rand_bytes(); b.iter(|| { triedb.insert(key.clone(), value.clone()).unwrap(); }) } #[bench] fn bench_insert_without_cache(b: &mut Bencher) { let triedb = new_triedb("bench_insert_without_cache"); let key = rand_bytes(); let value = rand_bytes(); b.iter(|| { triedb.insert_without_cache(key.clone(), value.clone()); }) } #[bench] fn bench_get_cache_hit(b: &mut Bencher) { let triedb = new_triedb("bench_get_cache_hit"); let keys = (0..1000).map(|_| rand_bytes()).collect::>(); let values = (0..1000).map(|_| rand_bytes()).collect::>(); triedb.insert_batch(keys.clone(), values).unwrap(); let key = keys[0].clone(); b.iter(|| { let _ = triedb.get(&key).unwrap(); }) } #[bench] fn bench_get_cache_miss(b: &mut Bencher) { let triedb = new_triedb("bench_get_cache_miss"); let keys = (0..1000).map(|_| rand_bytes()).collect::>(); let values = (0..1000).map(|_| rand_bytes()).collect::>(); triedb.insert_batch(keys.clone(), values).unwrap(); let keys = keys.iter().collect::>(); let key = { let mut tmp = rand_bytes(); while keys.contains(&tmp) { tmp = rand_bytes(); } tmp }; b.iter(|| { let _ = triedb.get(&key).unwrap(); }) } #[bench] fn bench_get_without_cache(b: &mut Bencher) { let triedb = new_triedb("bench_get_without_cache"); let keys = (0..1000).map(|_| rand_bytes()).collect::>(); let values = (0..1000).map(|_| rand_bytes()).collect::>(); triedb.insert_batch_without_cache(keys.clone(), values); let key = keys[0].clone(); b.iter(|| { let _ = triedb.get_without_cache(&key).unwrap(); }) } #[test] fn test_trie_db() { let triedb = new_triedb("test_trie_db"); let key = rand_bytes(); let value = rand_bytes(); triedb.insert(key.clone(), value.clone()).unwrap(); assert_eq!(triedb.get(&key).unwrap().unwrap(), value); let keys = (0..3000).map(|_| rand_bytes()).collect::>(); let values = (0..3000).map(|_| rand_bytes()).collect::>(); triedb.insert_batch(keys.clone(), values.clone()).unwrap(); let _ = keys .iter() .zip(values.iter()) .map(|(k, v)| assert_eq!(&triedb.get(k).unwrap().unwrap(), v)); assert_eq!(triedb.cache().len(), 3001); triedb.flush().unwrap(); assert_eq!(triedb.cache().len(), 2000); let _ = keys.iter().map(|k| assert!(triedb.contains(k).unwrap())); } #[test] fn test_state_insert() { let memdb = Arc::new(MemoryDB::new(false)); let mut state = new_state(Arc::clone(&memdb), None); let key = Hash::digest(Bytes::from("key".to_owned())); let value = Hash::digest(Bytes::from("value".to_owned())); state.insert(key.clone(), value.clone()).unwrap(); let val: Hash = state.get(&key).unwrap().unwrap(); assert_eq!(val, value); state.stash().unwrap(); let new_root = state.commit().unwrap(); let val: Hash = state.get(&key).unwrap().unwrap(); assert_eq!(val, value); let new_state = new_state(Arc::clone(&memdb), Some(new_root)); let val: Hash = new_state.get(&key).unwrap().unwrap(); assert_eq!(val, value); } #[test] fn test_state_account() { let memdb = Arc::new(MemoryDB::new(false)); let mut state = new_state(Arc::clone(&memdb), None); let address = Address::from_hash(Hash::digest(Bytes::from("test-address"))).unwrap(); let key = Hash::digest(Bytes::from("key".to_owned())); let value = Hash::digest(Bytes::from("value".to_owned())); state .set_account_value(&address, key.clone(), value.clone()) .unwrap(); let val: Hash = state.get_account_value(&address, &key).unwrap().unwrap(); assert_eq!(val, value); state.stash().unwrap(); let new_root = state.commit().unwrap(); let new_state = new_state(Arc::clone(&memdb), Some(new_root)); let val: Hash = new_state .get_account_value(&address, &key) .unwrap() .unwrap(); assert_eq!(val, value); } pub fn new_state(memdb: Arc, root: Option) -> GeneralServiceState { let trie = match root { Some(root) => MPTTrie::from(root, memdb).unwrap(), None => MPTTrie::new(memdb), }; GeneralServiceState::new(trie) } fn new_triedb(name: &str) -> RocksTrieDB { let mut path = PathBuf::from("./free-space/"); path.push(name); RocksTrieDB::new(path, false, 1024, 2000).unwrap() } fn rand_bytes() -> Vec { (0..32).map(|_| rand::random::()).collect::>() } ================================================ FILE: framework/src/binding/tests/store.rs ================================================ use std::cell::RefCell; use std::rc::Rc; use std::sync::Arc; use bytes::Bytes; use cita_trie::MemoryDB; use protocol::traits::{StoreArray, StoreBool, StoreMap, StoreString, StoreUint64}; use protocol::types::Hash; use crate::binding::store::{ DefaultStoreArray, DefaultStoreBool, DefaultStoreMap, DefaultStoreString, DefaultStoreUint64, }; use crate::binding::tests::state::new_state; #[test] fn test_default_store_bool() { let memdb = Arc::new(MemoryDB::new(false)); let state = new_state(Arc::clone(&memdb), None); let mut sb = DefaultStoreBool::new(Rc::new(RefCell::new(state)), "test"); assert_eq!(sb.get(), false); sb.set(true); assert_eq!(sb.get(), true); sb.set(false); assert_eq!(sb.get(), false); } #[test] fn test_default_store_uint64() { let memdb = Arc::new(MemoryDB::new(false)); let state = new_state(Arc::clone(&memdb), None); let mut su = DefaultStoreUint64::new(Rc::new(RefCell::new(state)), "test"); assert_eq!(su.get(), 0u64); su.set(8u64); assert_eq!(su.get(), 8u64); assert_eq!(su.safe_add(12u64), false); assert_eq!(su.get(), 20u64); assert_eq!(su.safe_sub(10u64), false); assert_eq!(su.get(), 10u64); assert_eq!(su.safe_mul(8u64), false); assert_eq!(su.get(), 80u64); assert_eq!(su.safe_div(10u64), false); assert_eq!(su.get(), 8u64); assert_eq!(su.safe_pow(2u32), false); assert_eq!(su.get(), 64u64); assert_eq!(su.safe_rem(5u64), false); assert_eq!(su.get(), 4u64); } #[test] fn test_default_store_string() { let memdb = Arc::new(MemoryDB::new(false)); let state = new_state(Arc::clone(&memdb), None); let rs = Rc::new(RefCell::new(state)); let mut ss = DefaultStoreString::new(Rc::clone(&rs), "test"); assert_eq!(ss.get(), ""); ss.set(""); assert_eq!(ss.get(), ""); assert_eq!(ss.is_empty(), true); ss.set("ok"); assert_eq!(ss.get(), String::from("ok")); assert_eq!(ss.len(), 2u64); } #[test] fn test_default_store_map() { let memdb = Arc::new(MemoryDB::new(false)); let state = new_state(Arc::clone(&memdb), None); let rs = Rc::new(RefCell::new(state)); let mut sm = DefaultStoreMap::<_, Hash, Bytes>::new(Rc::clone(&rs), "test"); assert_eq!(sm.get(&Hash::digest(Bytes::from("key_1"))).is_none(), true); sm.insert(Hash::digest(Bytes::from("key_1")), Bytes::from("val_1")); sm.insert(Hash::digest(Bytes::from("key_2")), Bytes::from("val_2")); { let mut it = sm.iter(); assert_eq!( it.next().unwrap(), (Hash::digest(Bytes::from("key_2")), Bytes::from("val_2")) ); assert_eq!( it.next().unwrap(), (Hash::digest(Bytes::from("key_1")), Bytes::from("val_1")) ); assert_eq!(it.next().is_none(), true); } assert_eq!( sm.get(&Hash::digest(Bytes::from("key_1"))).unwrap(), Bytes::from("val_1") ); assert_eq!( sm.get(&Hash::digest(Bytes::from("key_2"))).unwrap(), Bytes::from("val_2") ); sm.remove(&Hash::digest(Bytes::from("key_1"))).unwrap(); assert_eq!(sm.contains(&Hash::digest(Bytes::from("key_1"))), false); assert_eq!(sm.len(), 1u64); let sm = DefaultStoreMap::<_, Hash, Bytes>::new(Rc::clone(&rs), "test"); assert_eq!( sm.get(&Hash::digest(Bytes::from("key_2"))).unwrap(), Bytes::from("val_2") ); } #[test] fn test_default_store_array() { let memdb = Arc::new(MemoryDB::new(false)); let state = new_state(Arc::clone(&memdb), None); let rs = Rc::new(RefCell::new(state)); let mut sa = DefaultStoreArray::<_, Bytes>::new(Rc::clone(&rs), "test"); assert_eq!(sa.len(), 0u64); assert_eq!(sa.get(0u64).is_none(), true); sa.push(Bytes::from("111")); sa.push(Bytes::from("222")); assert_eq!(sa.get(3u64).is_none(), true); { let mut it = sa.iter(); assert_eq!(it.next().unwrap(), (0u64, Bytes::from("111"))); assert_eq!(it.next().unwrap(), (1u64, Bytes::from("222"))); assert_eq!(it.next().is_none(), true); } assert_eq!(sa.get(0u64).unwrap(), Bytes::from("111")); assert_eq!(sa.get(1u64).unwrap(), Bytes::from("222")); sa.remove(0u64); assert_eq!(sa.len(), 1u64); assert_eq!(sa.get(0u64).unwrap(), Bytes::from("222")); } ================================================ FILE: framework/src/executor/error.rs ================================================ use derive_more::Display; use protocol::{ProtocolError, ProtocolErrorKind}; use std::any::Any; #[derive(Debug, Display)] pub enum ExecutorError { #[display(fmt = "service {:?} was not found", service)] NotFoundService { service: String }, #[display(fmt = "service {:?} method {:?} was not found", service, method)] NotFoundMethod { service: String, method: String }, #[display(fmt = "Parsing payload to json failed {:?}", _0)] JsonParse(serde_json::Error), #[display(fmt = "Init service genesis failed: {:?}", _0)] InitService(String), #[display(fmt = "Query service failed: {:?}", _0)] QueryService(String), #[display(fmt = "Call service failed: {:?}", _0)] CallService(String), #[display(fmt = "Tx hook panic: {:?}", _0)] TxHook(Box), } impl std::error::Error for ExecutorError {} impl From for ProtocolError { fn from(err: ExecutorError) -> ProtocolError { ProtocolError::new(ProtocolErrorKind::Executor, Box::new(err)) } } ================================================ FILE: framework/src/executor/factory.rs ================================================ use std::sync::Arc; use protocol::traits::{Executor, ExecutorFactory, ServiceMapping, Storage}; use protocol::types::MerkleRoot; use protocol::ProtocolResult; use crate::executor::ServiceExecutor; pub struct ServiceExecutorFactory; impl ExecutorFactory for ServiceExecutorFactory { fn from_root( root: MerkleRoot, db: Arc, storage: Arc, mapping: Arc, ) -> ProtocolResult> { let executor = ServiceExecutor::with_root(root, db, storage, mapping)?; Ok(Box::new(executor)) } } ================================================ FILE: framework/src/executor/mod.rs ================================================ mod error; mod factory; #[cfg(test)] mod tests; pub use factory::ServiceExecutorFactory; use std::{ cell::RefCell, collections::HashMap, marker::PhantomData, ops::{Deref, DerefMut}, panic::{self, AssertUnwindSafe}, rc::Rc, sync::Arc, }; use cita_trie::DB as TrieDB; use common_apm::muta_apm; use protocol::traits::{ Context, Executor, ExecutorParams, ExecutorResp, Service, ServiceMapping, ServiceResponse, ServiceState, Storage, }; use protocol::types::{ Address, Event, Hash, MerkleRoot, Receipt, ReceiptResponse, ServiceContext, ServiceContextParams, ServiceParam, SignedTransaction, TransactionRequest, }; use protocol::{ProtocolError, ProtocolResult}; use crate::binding::sdk::{DefaultChainQuerier, DefaultSDKFactory}; use crate::binding::state::{GeneralServiceState, MPTTrie}; use crate::executor::error::ExecutorError; const SERVICE_NOT_FOUND_CODE: u64 = 62077; trait TxHooks { fn before( &mut self, _: Context, _: ServiceContext, ) -> ProtocolResult>> { Ok(vec![ServiceResponse::from_succeed( "default_implement".to_owned(), )]) } fn after( &mut self, _: Context, _: ServiceContext, ) -> ProtocolResult>> { Ok(vec![ServiceResponse::from_succeed( "default_implement".to_owned(), )]) } } impl TxHooks for () {} enum HookType { Before, After, } #[derive(Clone, Copy)] enum ExecType { Read, Write, } pub struct ServiceStateMap(HashMap>>>); impl ServiceStateMap { fn new() -> ServiceStateMap { Self(HashMap::new()) } } impl Deref for ServiceStateMap { type Target = HashMap>>>; fn deref(&self) -> &Self::Target { &self.0 } } impl DerefMut for ServiceStateMap { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl ServiceStateMap { fn stash(&self) -> ProtocolResult<()> { for state in self.0.values() { state.borrow_mut().stash()?; } Ok(()) } fn revert_cache(&self) -> ProtocolResult<()> { for state in self.0.values() { state.borrow_mut().revert_cache()?; } Ok(()) } } struct CommitHooks { inner: Vec>>>, states: Rc>, } impl CommitHooks { fn new( hooks: Vec>>>, states: Rc>, ) -> CommitHooks { Self { inner: hooks, states, } } // bagua kan 101 :) fn kan ServiceResponse>( _context: ServiceContext, states: Rc>, hook: H, ) -> ProtocolResult> { match panic::catch_unwind(AssertUnwindSafe(hook)) { Ok(res) => { states.stash()?; Ok(res) } Err(e) => { states.revert_cache()?; // something really bad happens, chain maybe fork, must halt Err(ProtocolError::from(ExecutorError::TxHook(e))) } } } } impl TxHooks for CommitHooks { fn before( &mut self, _context: Context, service_context: ServiceContext, ) -> ProtocolResult>> { let mut ret: Vec> = Vec::new(); for hook in self.inner.iter_mut() { let resp = Self::kan(service_context.clone(), Rc::clone(&self.states), || { hook.borrow_mut().tx_hook_before_(service_context.clone()) })?; ret.push(resp); } Ok(ret) } fn after( &mut self, _context: Context, service_context: ServiceContext, ) -> ProtocolResult>> { let mut ret: Vec> = Vec::new(); for hook in self.inner.iter_mut() { let resp = Self::kan(service_context.clone(), Rc::clone(&self.states), || { hook.borrow_mut().tx_hook_after_(service_context.clone()) })?; ret.push(resp); } Ok(ret) } } pub struct ServiceExecutor { service_mapping: Arc, states: Rc>, root_state: GeneralServiceState, services: HashMap>>>, phantom: PhantomData, } impl ServiceExecutor { pub fn create_genesis( services: Vec, trie_db: Arc, storage: Arc, mapping: Arc, ) -> ProtocolResult { let querier = Rc::new(DefaultChainQuerier::new(Arc::clone(&storage))); let mut states = ServiceStateMap::new(); for name in mapping.list_service_name().into_iter() { let trie = MPTTrie::new(Arc::clone(&trie_db)); states.insert(name, Rc::new(RefCell::new(GeneralServiceState::new(trie)))); } let states = Rc::new(states); let sdk_factory = DefaultSDKFactory::new(Rc::clone(&states), Rc::clone(&querier)); for params in services.into_iter() { let state = states .get(¶ms.name) .ok_or(ExecutorError::NotFoundService { service: params.name.to_owned(), })?; let mut service = mapping.get_service(¶ms.name, &sdk_factory)?; panic::catch_unwind(AssertUnwindSafe(|| { service.genesis_(params.payload.clone()) })) .map_err(|e| ProtocolError::from(ExecutorError::InitService(format!("{:?}", e))))?; state.borrow_mut().stash()?; } let trie = MPTTrie::new(Arc::clone(&trie_db)); let mut root_state = GeneralServiceState::new(trie); for (name, state) in states.iter() { let root = state.borrow_mut().commit()?; root_state.insert(name.to_owned(), root)?; } root_state.stash()?; root_state.commit() } pub fn with_root( root: MerkleRoot, trie_db: Arc, storage: Arc, service_mapping: Arc, ) -> ProtocolResult { let querier = Rc::new(DefaultChainQuerier::new(Arc::clone(&storage))); let trie = MPTTrie::from(root, Arc::clone(&trie_db))?; let root_state = GeneralServiceState::new(trie); let list_service_name = service_mapping.list_service_name(); let mut states = ServiceStateMap::new(); for name in list_service_name.iter() { let trie = match root_state.get(name)? { Some(service_root) => MPTTrie::from(service_root, Arc::clone(&trie_db))?, None => MPTTrie::new(Arc::clone(&trie_db)), }; let service_state = GeneralServiceState::new(trie); states.insert(name.to_owned(), Rc::new(RefCell::new(service_state))); } let states = Rc::new(states); let sdk_factory = DefaultSDKFactory::new(Rc::clone(&states), Rc::clone(&querier)); let mut services = HashMap::new(); for name in list_service_name.iter() { let service = service_mapping.get_service(name, &sdk_factory)?; services.insert(name.clone(), Rc::new(RefCell::new(service))); } Ok(Self { service_mapping, states, root_state, services, phantom: PhantomData, }) } #[muta_apm::derive::tracing_span(kind = "executor.commit")] fn commit(&mut self, ctx: Context) -> ProtocolResult { for (name, state) in self.states.iter() { let root = state.borrow_mut().commit()?; self.root_state.insert(name.to_owned(), root)?; } self.root_state.stash()?; self.root_state.commit() } fn stash(&mut self) -> ProtocolResult<()> { self.states.stash() } fn revert_cache(&mut self) -> ProtocolResult<()> { self.states.revert_cache() } #[muta_apm::derive::tracing_span( kind = "executor.before_hook", tags = "{'hook_type': 'hook_type'}" )] fn hook( &mut self, ctx: Context, hook_type: HookType, exec_params: &ExecutorParams, ) -> ProtocolResult<()> { for name in self.service_mapping.list_service_name().into_iter() { let service = self.get_service(name.as_str())?; let hook_ret = match hook_type { HookType::Before => panic::catch_unwind(AssertUnwindSafe(|| { service.borrow_mut().hook_before_(exec_params) })), HookType::After => panic::catch_unwind(AssertUnwindSafe(|| { service.borrow_mut().hook_after_(exec_params) })), }; if hook_ret.is_err() { self.revert_cache()?; } else { self.stash()?; } } Ok(()) } fn get_service(&self, service: &str) -> ProtocolResult>>> { self.services .get(service) .map(|s| Rc::clone(s)) .ok_or_else(|| { ExecutorError::NotFoundService { service: service.to_owned(), } .into() }) } fn get_context( &self, tx_hash: Option, nonce: Option, caller: &Address, cycles_price: u64, cycles_limit: u64, params: &ExecutorParams, request: &TransactionRequest, event: Rc>>, ) -> ProtocolResult { let ctx_params = ServiceContextParams { tx_hash, nonce, cycles_limit, cycles_price, cycles_used: Rc::new(RefCell::new(0)), caller: caller.clone(), height: params.height, timestamp: params.timestamp, service_name: request.service_name.to_owned(), service_method: request.method.to_owned(), service_payload: request.payload.to_owned(), extra: None, events: event, }; Ok(ServiceContext::new(ctx_params)) } fn get_tx_hooks(&self, exec_type: ExecType) -> Box { match exec_type { ExecType::Read => Box::new(()), ExecType::Write => { let mut tx_hooks = vec![]; for name in self.service_mapping.list_service_name().into_iter() { let tx_hook_service = self.get_service(name.as_str()).expect("no service"); tx_hooks.push(tx_hook_service); } Box::new(CommitHooks::new(tx_hooks, Rc::clone(&self.states))) } } } fn catch_call( &mut self, context: Context, service_context: ServiceContext, exec_type: ExecType, event: Rc>>, ) -> ProtocolResult> { let mut tx_hooks = self.get_tx_hooks(exec_type); let resp = tx_hooks.before(context.clone(), service_context.clone())?; self.states.stash()?; let event_index = event.borrow_mut().len(); let ret = if resp.iter().any(|r| r.is_error()) { self.revert_cache()?; event.borrow_mut().truncate(event_index); ServiceResponse::from_error(65535, "skip_tx_run".to_owned()) } else { match panic::catch_unwind(AssertUnwindSafe(|| { self.call(service_context.clone(), exec_type) })) { Ok(r) => Ok(r), Err(e) => { self.revert_cache()?; log::error!("inner chain error occurred when calling service: {:?}", e); Err(ProtocolError::from(ExecutorError::CallService(format!( "{:?}", e )))) } }? }; if ret.is_error() { event.borrow_mut().truncate(event_index); self.states.revert_cache()?; service_context.cancel("tx_exec_return_code_not_zero".to_owned()); } let resp = tx_hooks.after(context, service_context)?; if resp.iter().any(|r| r.is_error()) { event.borrow_mut().truncate(event_index); self.states.revert_cache()?; } else { self.states.stash()?; } Ok(ret) } fn call(&self, context: ServiceContext, exec_type: ExecType) -> ServiceResponse { let service_name = context.get_service_name(); let service = self.get_service(service_name); if service.is_err() { return ServiceResponse::from_error( SERVICE_NOT_FOUND_CODE, "can not found service".to_owned(), ); } let service = service.unwrap(); match exec_type { ExecType::Read => service.borrow().read_(context), ExecType::Write => service.borrow_mut().write_(context), } } } impl Executor for ServiceExecutor { #[muta_apm::derive::tracing_span(kind = "executor.exec", logs = "{'tx_len': 'txs.len()'}")] fn exec( &mut self, ctx: Context, params: &ExecutorParams, txs: &[SignedTransaction], ) -> ProtocolResult { self.hook(ctx.clone(), HookType::Before, params)?; let mut receipts = txs .iter() .map(|stx| { let event = Rc::new(RefCell::new(vec![])); let service_context = self.get_context( Some(stx.tx_hash.clone()), Some(stx.raw.nonce.clone()), &stx.raw.sender, stx.raw.cycles_price, stx.raw.cycles_limit, params, &stx.raw.request, Rc::clone(&event), )?; let exec_resp = self.catch_call( ctx.clone(), service_context.clone(), ExecType::Write, Rc::clone(&event), )?; Ok(Receipt { state_root: MerkleRoot::from_empty(), height: service_context.get_current_height(), tx_hash: stx.tx_hash.clone(), cycles_used: service_context.get_cycles_used(), events: service_context.get_events(), response: ReceiptResponse { service_name: service_context.get_service_name().to_owned(), method: service_context.get_service_method().to_owned(), response: exec_resp, }, }) }) .collect::, ProtocolError>>()?; self.hook(ctx.clone(), HookType::After, params)?; let state_root = self.commit(ctx)?; let mut all_cycles_used = 0; for receipt in receipts.iter_mut() { receipt.state_root = state_root.clone(); all_cycles_used += receipt.cycles_used; } Ok(ExecutorResp { receipts, all_cycles_used, state_root, }) } fn read( &self, params: &ExecutorParams, caller: &Address, cycles_price: u64, request: &TransactionRequest, ) -> ProtocolResult> { let context = self.get_context( None, None, caller, cycles_price, std::u64::MAX, params, request, Rc::new(RefCell::new(vec![])), )?; panic::catch_unwind(AssertUnwindSafe(|| self.call(context, ExecType::Read))) .map_err(|e| ProtocolError::from(ExecutorError::QueryService(format!("{:?}", e)))) } } ================================================ FILE: framework/src/executor/tests/framework.rs ================================================ use crate::executor::ServiceExecutor; use async_trait::async_trait; use binding_macro::{cycles, service, tx_hook_after, tx_hook_before}; use bytes::{Bytes, BytesMut}; use cita_trie::MemoryDB; use protocol::traits::{ CommonStorage, Context, Executor, ExecutorParams, ExecutorResp, SDKFactory, Service, ServiceMapping, ServiceResponse, ServiceSDK, Storage, }; use protocol::types::{ Address, Block, BlockHeader, Genesis, Hash, Proof, RawTransaction, Receipt, ServiceContext, SignedTransaction, TransactionRequest, }; use protocol::ProtocolResult; use std::sync::Arc; lazy_static::lazy_static! { pub static ref ADMIN_ACCOUNT: Address = "muta14e0lmgck835vm2dfm0w3ckv6svmez8fdgdl705".parse().unwrap(); } macro_rules! exec_txs { ($exec_cycle_limit: expr, $tx_cycle_limit: expr $(, ($service: expr, $method: expr, $payload: expr))*) => { { let memdb = Arc::new(MemoryDB::new(false)); let arcs = Arc::new(MockStorage {}); let toml_str = include_str!("./framework_genesis_services.toml"); let genesis: Genesis = toml::from_str(toml_str).unwrap(); let root = ServiceExecutor::create_genesis( genesis.services, Arc::clone(&memdb), Arc::new(MockStorage {}), Arc::new(MockServiceMapping {}), ) .unwrap(); let mut executor = ServiceExecutor::with_root( root.clone(), Arc::clone(&memdb), Arc::clone(&arcs), Arc::new(MockServiceMapping {}), ) .unwrap(); let params = ExecutorParams { state_root: root, height: 1, timestamp: 0, cycles_limit: $exec_cycle_limit, proposer: ADMIN_ACCOUNT.clone(), }; let mut stxs = Vec::new(); $(stxs.push(construct_stx( $tx_cycle_limit, $service.to_owned(), $method.to_owned(), serde_json::to_string(&$payload).unwrap() )); )* let resp : ExecutorResp = executor.exec(Context::new(), ¶ms, &stxs).unwrap(); resp } }; } pub fn construct_stx( tx_cycle_limit: u64, service_name: String, method: String, payload: String, ) -> SignedTransaction { let raw_tx = RawTransaction { chain_id: Hash::from_empty(), nonce: Hash::from_empty(), timeout: 0, cycles_price: 1, cycles_limit: tx_cycle_limit, request: TransactionRequest { service_name, method, payload, }, sender: ADMIN_ACCOUNT.clone(), }; SignedTransaction { raw: raw_tx, tx_hash: Hash::from_empty(), pubkey: Bytes::from( hex::decode("031288a6788678c25952eba8693b2f278f66e2187004b64ac09416d07f83f96d5b") .unwrap(), ), signature: BytesMut::from("").freeze(), } } struct MockStorage; #[async_trait] impl CommonStorage for MockStorage { async fn insert_block(&self, _ctx: Context, _block: Block) -> ProtocolResult<()> { unimplemented!() } async fn get_block(&self, _ctx: Context, _height: u64) -> ProtocolResult> { unimplemented!() } async fn get_block_header( &self, _ctx: Context, _height: u64, ) -> ProtocolResult> { unimplemented!() } async fn set_block(&self, _ctx: Context, _block: Block) -> ProtocolResult<()> { unimplemented!() } async fn remove_block(&self, _ctx: Context, _height: u64) -> ProtocolResult<()> { unimplemented!() } async fn get_latest_block(&self, _ctx: Context) -> ProtocolResult { unimplemented!() } async fn set_latest_block(&self, _ctx: Context, _block: Block) -> ProtocolResult<()> { unimplemented!() } async fn get_latest_block_header(&self, _ctx: Context) -> ProtocolResult { unimplemented!() } } #[async_trait] impl Storage for MockStorage { async fn insert_transactions( &self, _ctx: Context, _: u64, _: Vec, ) -> ProtocolResult<()> { unimplemented!() } async fn insert_receipts(&self, _ctx: Context, _: u64, _: Vec) -> ProtocolResult<()> { unimplemented!() } async fn update_latest_proof(&self, _ctx: Context, _: Proof) -> ProtocolResult<()> { unimplemented!() } async fn get_transaction_by_hash( &self, _ctx: Context, _: &Hash, ) -> ProtocolResult> { unimplemented!() } async fn get_transactions( &self, _ctx: Context, _: u64, _: &[Hash], ) -> ProtocolResult>> { unimplemented!() } async fn get_receipt_by_hash(&self, _ctx: Context, _: Hash) -> ProtocolResult> { unimplemented!() } async fn get_receipts( &self, _ctx: Context, _: u64, _: Vec, ) -> ProtocolResult>> { unimplemented!() } async fn get_latest_proof(&self, _ctx: Context) -> ProtocolResult { unimplemented!() } } pub struct MockServiceMapping; impl ServiceMapping for MockServiceMapping { fn get_service>( &self, name: &str, factory: &Factory, ) -> ProtocolResult> { let sdk = factory.get_sdk(name)?; let service = match name { "TestService" => Box::new(TestService::new(sdk)) as Box, _ => panic!("not found service"), }; Ok(service) } fn list_service_name(&self) -> Vec { vec!["TestService".to_owned()] } } pub struct TestService { _sdk: SDK, } #[service] impl TestService { pub fn new(sdk: SDK) -> Self { Self { _sdk: sdk } } #[cycles(10_000)] #[read] fn test_read(&self, _ctx: ServiceContext) -> ServiceResponse { ServiceResponse::from_succeed("".to_owned()) } #[cycles(300_00)] #[write] fn test_write(&mut self, ctx: ServiceContext) -> ServiceResponse { ctx.emit_event( "test_service".to_owned(), "write".to_owned(), "write".to_owned(), ); ServiceResponse::from_succeed("".to_owned()) } #[tx_hook_before] fn test_tx_hook_before(&mut self, ctx: ServiceContext) -> ServiceResponse<()> { // we emit an event ctx.emit_event( "test_service".to_owned(), "before".to_owned(), "before".to_owned(), ); if ctx.get_payload().contains("before") { return ServiceResponse::from_error(2, "before_error".to_owned()); } ServiceResponse::from_succeed(()) } #[tx_hook_after] fn test_tx_hook_after(&mut self, ctx: ServiceContext) -> ServiceResponse<()> { if ctx.get_payload().contains("after") { return ServiceResponse::from_error(2, "after_error".to_owned()); } ctx.emit_event( "test_service".to_owned(), "after".to_owned(), "after".to_owned(), ); ServiceResponse::from_succeed(()) } } #[test] fn test_tx_hook_ok_ok() { let resp: ExecutorResp = exec_txs!(50000, 50000, ("TestService", "test_write", "a test string")); assert_eq!(3, resp.receipts.get(0).unwrap().events.len()); let resp: ExecutorResp = exec_txs!(50000, 50000, ("TestService", "test_write", "before")); assert_eq!(2, resp.receipts.get(0).unwrap().events.len()); assert!(resp .receipts .get(0) .unwrap() .events .iter() .any(|e| { e.name.as_str() == "after" })); assert!(resp .receipts .get(0) .unwrap() .events .iter() .any(|e| { e.name.as_str() == "before" })); let resp: ExecutorResp = exec_txs!(50000, 50000, ("TestService", "test_write", "after")); assert_eq!(1, resp.receipts.get(0).unwrap().events.len()); assert!(resp .receipts .get(0) .unwrap() .events .iter() .any(|e| { e.name.as_str() == "before" })); let resp: ExecutorResp = exec_txs!(50000, 50000, ("TestService", "test_write", "before_after")); assert_eq!(1, resp.receipts.get(0).unwrap().events.len()); assert!(resp .receipts .get(0) .unwrap() .events .iter() .any(|e| { e.name.as_str() == "before" })); } ================================================ FILE: framework/src/executor/tests/framework_genesis_services.toml ================================================ timestamp = 0 prevhash = "0x44915be5b6c20b0678cf05fcddbbaa832e25d7e6ac538784cd5c24de00d47472" [[services]] name = "TestService" payload = '' ================================================ FILE: framework/src/executor/tests/genesis_services.toml ================================================ timestamp = 0 prevhash = "0x44915be5b6c20b0678cf05fcddbbaa832e25d7e6ac538784cd5c24de00d47472" [[services]] name = "asset" payload = '''{ "id": "0xf56924db538e77bb5951eb5ff0d02b88983c49c45eea30e8ae3e7234b311436c", "name": "MutaToken", "symbol": "MT", "supply": 320000011, "issuer": "muta14e0lmgck835vm2dfm0w3ckv6svmez8fdgdl705" }''' ================================================ FILE: framework/src/executor/tests/mod.rs ================================================ extern crate test; #[cfg(test)] mod framework; mod test_service; use std::str::FromStr; use std::sync::Arc; use async_trait::async_trait; use bytes::{Bytes, BytesMut}; use cita_trie::MemoryDB; use test::Bencher; use asset::types::{Asset, GetBalanceResponse}; use asset::AssetService; use metadata::MetadataService; use protocol::traits::{ CommonStorage, Context, Executor, ExecutorParams, SDKFactory, Service, ServiceMapping, ServiceSDK, Storage, }; use protocol::types::{ Address, Block, BlockHeader, Genesis, Hash, Proof, RawTransaction, Receipt, SignedTransaction, TransactionRequest, }; use protocol::ProtocolResult; use crate::executor::{ServiceExecutor, SERVICE_NOT_FOUND_CODE}; use test_service::TestService; macro_rules! read { ($executor:expr, $params:expr, $caller:expr, $payload:expr) => {{ let request = TransactionRequest { service_name: "test".to_owned(), method: "test_read".to_owned(), payload: $payload.to_owned(), }; $executor .read($params, $caller, 1, &request) .expect(&format!("read {}", $payload)) }}; } pub const PUB_KEY_STR: &str = "02ef0cb0d7bc6c18b4bea1f5908d9106522b35ab3c399369605d4242525bda7e60"; #[test] fn test_create_genesis() { let toml_str = include_str!("./genesis_services.toml"); let genesis: Genesis = toml::from_str(toml_str).unwrap(); let db = Arc::new(MemoryDB::new(false)); let root = ServiceExecutor::create_genesis( genesis.services, Arc::clone(&db), Arc::new(MockStorage {}), Arc::new(MockServiceMapping {}), ) .unwrap(); let executor = ServiceExecutor::with_root( root.clone(), Arc::clone(&db), Arc::new(MockStorage {}), Arc::new(MockServiceMapping {}), ) .unwrap(); let params = ExecutorParams { state_root: root, height: 1, timestamp: 0, cycles_limit: std::u64::MAX, proposer: Address::from_hash(Hash::from_empty()).unwrap(), }; let caller = Address::from_str("muta14e0lmgck835vm2dfm0w3ckv6svmez8fdgdl705").unwrap(); let request = TransactionRequest { service_name: "asset".to_owned(), method: "get_balance".to_owned(), payload: r#"{"asset_id": "0xf56924db538e77bb5951eb5ff0d02b88983c49c45eea30e8ae3e7234b311436c", "user": "muta14e0lmgck835vm2dfm0w3ckv6svmez8fdgdl705"}"# .to_owned(), }; let res = executor.read(¶ms, &caller, 1, &request).unwrap(); let resp: GetBalanceResponse = serde_json::from_str(&res.succeed_data).unwrap(); assert_eq!(resp.balance, 320_000_011); } #[test] fn test_exec() { let toml_str = include_str!("./genesis_services.toml"); let genesis: Genesis = toml::from_str(toml_str).unwrap(); let db = Arc::new(MemoryDB::new(false)); let root = ServiceExecutor::create_genesis( genesis.services, Arc::clone(&db), Arc::new(MockStorage {}), Arc::new(MockServiceMapping {}), ) .unwrap(); let mut executor = ServiceExecutor::with_root( root.clone(), Arc::clone(&db), Arc::new(MockStorage {}), Arc::new(MockServiceMapping {}), ) .unwrap(); let params = ExecutorParams { state_root: root, height: 1, timestamp: 0, cycles_limit: std::u64::MAX, proposer: Address::from_hash(Hash::from_empty()).unwrap(), }; let stx = mock_signed_tx(); let txs = vec![stx]; let executor_resp = executor.exec(Context::new(), ¶ms, &txs).unwrap(); let receipt = &executor_resp.receipts[0]; assert_eq!(receipt.response.response.code, 0); let asset: Asset = serde_json::from_str(&receipt.response.response.succeed_data).unwrap(); assert_eq!(asset.name, "MutaToken2"); assert_eq!(asset.symbol, "MT2"); assert_eq!(asset.supply, 320_000_011); } #[test] fn test_emit_event() { let toml_str = include_str!("./genesis_services.toml"); let genesis: Genesis = toml::from_str(toml_str).unwrap(); let db = Arc::new(MemoryDB::new(false)); let root = ServiceExecutor::create_genesis( genesis.services, Arc::clone(&db), Arc::new(MockStorage {}), Arc::new(MockServiceMapping {}), ) .unwrap(); let mut executor = ServiceExecutor::with_root( root.clone(), Arc::clone(&db), Arc::new(MockStorage {}), Arc::new(MockServiceMapping {}), ) .unwrap(); let params = ExecutorParams { state_root: root, height: 1, timestamp: 0, cycles_limit: std::u64::MAX, proposer: Address::from_hash(Hash::from_empty()).unwrap(), }; let mut stx = mock_signed_tx(); stx.raw.request.service_name = "test".to_owned(); stx.raw.request.method = "test_event".to_owned(); stx.raw.request.payload = r#"{ "key": "", "value": "", "extra": "" }"# .to_owned(); let txs = vec![stx]; let executor_resp = executor.exec(Context::new(), ¶ms, &txs).unwrap(); let receipt = &executor_resp.receipts[0]; assert_eq!(receipt.response.response.code, 0); assert_eq!(receipt.events.len(), 1); assert_eq!(&receipt.events[0].data, "test"); assert_eq!(&receipt.events[0].name, "test-name"); assert_eq!(&receipt.events[0].service, "wow"); } #[test] fn test_revert_event_on_exec_error() { let toml_str = include_str!("./genesis_services.toml"); let genesis: Genesis = toml::from_str(toml_str).unwrap(); let db = Arc::new(MemoryDB::new(false)); let root = ServiceExecutor::create_genesis( genesis.services, Arc::clone(&db), Arc::new(MockStorage {}), Arc::new(MockServiceMapping {}), ) .unwrap(); let mut executor = ServiceExecutor::with_root( root.clone(), Arc::clone(&db), Arc::new(MockStorage {}), Arc::new(MockServiceMapping {}), ) .unwrap(); let params = ExecutorParams { state_root: root, height: 1, timestamp: 0, cycles_limit: std::u64::MAX, proposer: Address::from_hash(Hash::from_empty()).unwrap(), }; let mut stx = mock_signed_tx(); stx.raw.request.service_name = "test".to_owned(); stx.raw.request.method = "test_revert_event".to_owned(); stx.raw.request.payload = r#"{ "key": "", "value": "", "extra": "" }"# .to_owned(); let txs = vec![stx]; let executor_resp = executor.exec(Context::new(), ¶ms, &txs).unwrap(); let receipt = &executor_resp.receipts[0]; assert_eq!(receipt.response.response.code, 111); assert_eq!(receipt.events.len(), 0); } #[test] fn test_service_not_found_panic() { let toml_str = include_str!("./genesis_services.toml"); let genesis: Genesis = toml::from_str(toml_str).unwrap(); let db = Arc::new(MemoryDB::new(false)); let root = ServiceExecutor::create_genesis( genesis.services, Arc::clone(&db), Arc::new(MockStorage {}), Arc::new(MockServiceMapping {}), ) .unwrap(); let mut executor = ServiceExecutor::with_root( root.clone(), Arc::clone(&db), Arc::new(MockStorage {}), Arc::new(MockServiceMapping {}), ) .unwrap(); let params = ExecutorParams { state_root: root, height: 1, timestamp: 0, cycles_limit: std::u64::MAX, proposer: Address::from_hash(Hash::from_empty()).unwrap(), }; let mut stx = mock_signed_tx(); stx.raw.request.service_name = "FlyMeToTheMars".to_owned(); let txs = vec![stx]; let executor_resp = executor .exec(Context::new(), ¶ms, &txs) .expect("should not panic on service not found"); let receipt = &executor_resp.receipts[0]; assert_eq!(receipt.response.response.code, SERVICE_NOT_FOUND_CODE); } #[test] fn test_tx_hook() { let toml_str = include_str!("./genesis_services.toml"); let genesis: Genesis = toml::from_str(toml_str).unwrap(); let db = Arc::new(MemoryDB::new(false)); let root = ServiceExecutor::create_genesis( genesis.services, Arc::clone(&db), Arc::new(MockStorage {}), Arc::new(MockServiceMapping {}), ) .unwrap(); let mut executor = ServiceExecutor::with_root( root.clone(), Arc::clone(&db), Arc::new(MockStorage {}), Arc::new(MockServiceMapping {}), ) .unwrap(); let params = ExecutorParams { state_root: root, height: 1, timestamp: 0, cycles_limit: std::u64::MAX, proposer: Address::from_hash(Hash::from_empty()).unwrap(), }; // no tx hook let mut stx = mock_signed_tx(); stx.raw.request.service_name = "test".to_owned(); stx.raw.request.method = "test_write".to_owned(); stx.raw.request.payload = r#"{ "key": "foo", "value": "bar", "extra": "" }"# .to_owned(); let txs = vec![stx.clone()]; let executor_resp = executor.exec(Context::new(), ¶ms, &txs).unwrap(); let receipt = &executor_resp.receipts[0]; assert_eq!(receipt.response.response.code, 0); assert_eq!(receipt.events.len(), 0); // tx hook stx.raw.request.payload = r#"{ "key": "foo", "value": "bar", "extra": "test_hook_before; test_hook_after" }"# .to_owned(); let txs = vec![stx.clone()]; let executor_resp = executor.exec(Context::new(), ¶ms, &txs).unwrap(); let receipt = &executor_resp.receipts[0]; assert_eq!(receipt.response.response.code, 0); assert_eq!(receipt.events.len(), 2); assert_eq!(&receipt.events[0].data, "test_tx_hook_before invoked"); assert_eq!(&receipt.events[1].data, "test_tx_hook_after invoked"); // test_service_call_invoke_hook_only_once stx.raw.request.method = "test_service_call_invoke_hook_only_once".to_owned(); stx.raw.request.payload = r#"{ "key": "foo", "value": "bar", "extra": "test_hook_before; test_hook_after" }"# .to_owned(); let txs = vec![stx]; let executor_resp = executor.exec(Context::new(), ¶ms, &txs).unwrap(); let receipt = &executor_resp.receipts[0]; assert_eq!(receipt.response.response.code, 0); assert_eq!(receipt.events.len(), 2); assert_eq!(&receipt.events[0].data, "test_tx_hook_before invoked"); assert_eq!(&receipt.events[1].data, "test_tx_hook_after invoked"); } #[test] fn test_commit_tx_hook_use_panic_tx() { let toml_str = include_str!("./genesis_services.toml"); let genesis: Genesis = toml::from_str(toml_str).unwrap(); let db = Arc::new(MemoryDB::new(false)); let root = ServiceExecutor::create_genesis( genesis.services, Arc::clone(&db), Arc::new(MockStorage {}), Arc::new(MockServiceMapping {}), ) .unwrap(); let mut executor = ServiceExecutor::with_root( root.clone(), Arc::clone(&db), Arc::new(MockStorage {}), Arc::new(MockServiceMapping {}), ) .unwrap(); let params = ExecutorParams { state_root: root, height: 1, timestamp: 0, cycles_limit: std::u64::MAX, proposer: Address::from_hash(Hash::from_empty()).unwrap(), }; let mut stx = mock_signed_tx(); stx.raw.request.service_name = "test".to_owned(); stx.raw.request.method = "test_panic".to_owned(); stx.raw.request.payload = r#""""#.to_owned(); let txs = vec![stx]; let error_resp = executor.exec(Context::new(), ¶ms, &txs); assert!(error_resp.is_err()); let caller = Address::from_str("muta14e0lmgck835vm2dfm0w3ckv6svmez8fdgdl705").unwrap(); let before = read!(executor, ¶ms, &caller, r#""before""#); assert_eq!(before.succeed_data, r#""before""#); let after = read!(executor, ¶ms, &caller, r#""after""#); assert_eq!(after.succeed_data, r#""""#); } #[test] fn test_tx_hook_before_panic() { let toml_str = include_str!("./genesis_services.toml"); let genesis: Genesis = toml::from_str(toml_str).unwrap(); let db = Arc::new(MemoryDB::new(false)); let root = ServiceExecutor::create_genesis( genesis.services, Arc::clone(&db), Arc::new(MockStorage {}), Arc::new(MockServiceMapping {}), ) .unwrap(); let mut executor = ServiceExecutor::with_root( root.clone(), Arc::clone(&db), Arc::new(MockStorage {}), Arc::new(MockServiceMapping {}), ) .unwrap(); let params = ExecutorParams { state_root: root, height: 1, timestamp: 0, cycles_limit: std::u64::MAX, proposer: Address::from_hash(Hash::from_empty()).unwrap(), }; let mut stx = mock_signed_tx(); stx.raw.request.service_name = "test".to_owned(); stx.raw.request.method = "tx_hook_before_panic".to_owned(); stx.raw.request.payload = r#""""#.to_owned(); let txs = vec![stx]; let error_resp = executor.exec(Context::new(), ¶ms, &txs); assert!(error_resp.is_err()); let caller = Address::from_str("muta14e0lmgck835vm2dfm0w3ckv6svmez8fdgdl705").unwrap(); let before = read!(executor, ¶ms, &caller, r#""before""#); assert_eq!(before.succeed_data, r#""""#); let tx_hook_before_panic = read!(executor, ¶ms, &caller, r#""tx_hook_before_panic""#); assert_eq!(tx_hook_before_panic.succeed_data, r#""""#); let after = read!(executor, ¶ms, &caller, r#""after""#); assert_eq!(after.succeed_data, r#""""#); } #[test] fn test_tx_hook_after_panic() { let toml_str = include_str!("./genesis_services.toml"); let genesis: Genesis = toml::from_str(toml_str).unwrap(); let db = Arc::new(MemoryDB::new(false)); let root = ServiceExecutor::create_genesis( genesis.services, Arc::clone(&db), Arc::new(MockStorage {}), Arc::new(MockServiceMapping {}), ) .unwrap(); let mut executor = ServiceExecutor::with_root( root.clone(), Arc::clone(&db), Arc::new(MockStorage {}), Arc::new(MockServiceMapping {}), ) .unwrap(); let params = ExecutorParams { state_root: root, height: 1, timestamp: 0, cycles_limit: std::u64::MAX, proposer: Address::from_hash(Hash::from_empty()).unwrap(), }; let mut stx = mock_signed_tx(); stx.raw.request.service_name = "test".to_owned(); stx.raw.request.method = "tx_hook_after_panic".to_owned(); stx.raw.request.payload = r#""""#.to_owned(); let txs = vec![stx]; let error_resp = executor.exec(Context::new(), ¶ms, &txs); assert!(error_resp.is_err()); let caller = Address::from_str("muta14e0lmgck835vm2dfm0w3ckv6svmez8fdgdl705").unwrap(); let before = read!(executor, ¶ms, &caller, r#""before""#); assert_eq!(before.succeed_data, r#""before""#); let tx_hook_after_panic = read!(executor, ¶ms, &caller, r#""tx_hook_after_panic""#); assert_eq!(tx_hook_after_panic.succeed_data, r#""tx_hook_after_panic""#); let after = read!(executor, ¶ms, &caller, r#""after""#); assert_eq!(after.succeed_data, r#""""#); } #[bench] fn bench_execute(b: &mut Bencher) { let toml_str = include_str!("./genesis_services.toml"); let genesis: Genesis = toml::from_str(toml_str).unwrap(); let db = Arc::new(MemoryDB::new(false)); let root = ServiceExecutor::create_genesis( genesis.services, Arc::clone(&db), Arc::new(MockStorage {}), Arc::new(MockServiceMapping {}), ) .unwrap(); let mut executor = ServiceExecutor::with_root( root.clone(), Arc::clone(&db), Arc::new(MockStorage {}), Arc::new(MockServiceMapping {}), ) .unwrap(); let txs: Vec = (0..1000).map(|_| mock_signed_tx()).collect(); b.iter(|| { let params = ExecutorParams { state_root: root.clone(), height: 1, timestamp: 0, cycles_limit: std::u64::MAX, proposer: Address::from_hash(Hash::from_empty()).unwrap(), }; let txs = txs.clone(); executor.exec(Context::new(), ¶ms, &txs).unwrap(); }); } fn mock_signed_tx() -> SignedTransaction { let raw = RawTransaction { chain_id: Hash::from_empty(), nonce: Hash::from_empty(), timeout: 0, cycles_price: 1, cycles_limit: std::u64::MAX, request: TransactionRequest { service_name: "asset".to_owned(), method: "create_asset".to_owned(), payload: r#"{ "name": "MutaToken2", "symbol": "MT2", "supply": 320000011 }"# .to_owned(), }, sender: Address::from_pubkey_bytes(Bytes::from(hex::decode(PUB_KEY_STR).unwrap())) .unwrap(), }; SignedTransaction { raw, tx_hash: Hash::from_empty(), pubkey: Bytes::from(hex::decode(PUB_KEY_STR).unwrap()), signature: BytesMut::from("").freeze(), } } struct MockServiceMapping; impl ServiceMapping for MockServiceMapping { fn get_service>( &self, name: &str, factory: &Factory, ) -> ProtocolResult> { let sdk = factory.get_sdk(name)?; let service = match name { "asset" => Box::new(AssetService::new(sdk)) as Box, "metadata" => Box::new(MetadataService::new(sdk)) as Box, "test" => Box::new(TestService::new(sdk)) as Box, _ => panic!("not found service"), }; Ok(service) } fn list_service_name(&self) -> Vec { vec!["asset".to_owned(), "metadata".to_owned(), "test".to_owned()] } } struct MockStorage; #[async_trait] impl CommonStorage for MockStorage { async fn insert_block(&self, _ctx: Context, _block: Block) -> ProtocolResult<()> { unimplemented!() } async fn get_block(&self, _ctx: Context, _height: u64) -> ProtocolResult> { unimplemented!() } async fn get_block_header( &self, _ctx: Context, _height: u64, ) -> ProtocolResult> { unimplemented!() } async fn set_block(&self, _ctx: Context, _block: Block) -> ProtocolResult<()> { unimplemented!() } async fn remove_block(&self, _ctx: Context, _height: u64) -> ProtocolResult<()> { unimplemented!() } async fn get_latest_block(&self, _ctx: Context) -> ProtocolResult { unimplemented!() } async fn set_latest_block(&self, _ctx: Context, _block: Block) -> ProtocolResult<()> { unimplemented!() } async fn get_latest_block_header(&self, _ctx: Context) -> ProtocolResult { unimplemented!() } } #[async_trait] impl Storage for MockStorage { async fn insert_transactions( &self, _ctx: Context, _: u64, _: Vec, ) -> ProtocolResult<()> { unimplemented!() } async fn insert_receipts(&self, _ctx: Context, _: u64, _: Vec) -> ProtocolResult<()> { unimplemented!() } async fn update_latest_proof(&self, _ctx: Context, _: Proof) -> ProtocolResult<()> { unimplemented!() } async fn get_transaction_by_hash( &self, _ctx: Context, _: &Hash, ) -> ProtocolResult> { unimplemented!() } async fn get_transactions( &self, _ctx: Context, _: u64, _: &[Hash], ) -> ProtocolResult>> { unimplemented!() } async fn get_receipt_by_hash(&self, _ctx: Context, _: Hash) -> ProtocolResult> { unimplemented!() } async fn get_receipts( &self, _ctx: Context, _: u64, _: Vec, ) -> ProtocolResult>> { unimplemented!() } async fn get_latest_proof(&self, _ctx: Context) -> ProtocolResult { unimplemented!() } } ================================================ FILE: framework/src/executor/tests/test_service.rs ================================================ use serde::{Deserialize, Serialize}; use binding_macro::{cycles, service, tx_hook_after, tx_hook_before}; use protocol::traits::{ExecutorParams, ServiceResponse, ServiceSDK}; use protocol::types::ServiceContext; pub struct TestService { sdk: SDK, } #[derive(Deserialize, Serialize, Clone, Debug)] pub struct TestWritePayload { pub key: String, pub value: String, pub extra: String, } #[derive(Deserialize, Serialize, Clone, Debug, Default)] pub struct TestWriteResponse {} #[service] impl TestService { pub fn new(sdk: SDK) -> Self { Self { sdk } } #[cycles(10_000)] #[read] fn test_read(&self, ctx: ServiceContext, payload: String) -> ServiceResponse { let value: String = self.sdk.get_value(&payload).unwrap_or_default(); ServiceResponse::from_succeed(value) } #[cycles(21_000)] #[write] fn test_write( &mut self, ctx: ServiceContext, payload: TestWritePayload, ) -> ServiceResponse { self.sdk.set_value(payload.key, payload.value); ServiceResponse::::from_succeed(TestWriteResponse {}) } #[cycles(21_000)] #[write] fn test_revert_event( &mut self, ctx: ServiceContext, _: TestWritePayload, ) -> ServiceResponse { ServiceResponse::from_error(111, "error".to_owned()) } #[cycles(21_000)] #[write] fn test_event( &mut self, ctx: ServiceContext, _: TestWritePayload, ) -> ServiceResponse { ctx.emit_event("wow".to_owned(), "test-name".to_owned(), "test".to_owned()); ServiceResponse::from_succeed(TestWriteResponse::default()) } #[cycles(21_000)] #[write] fn test_service_call_invoke_hook_only_once( &mut self, ctx: ServiceContext, payload: TestWritePayload, ) -> ServiceResponse { self.test_write(ctx, payload); ServiceResponse::::from_succeed(TestWriteResponse {}) } #[cycles(21_000)] #[write] fn test_panic(&mut self, ctx: ServiceContext, _payload: String) -> ServiceResponse<()> { panic!("hello panic"); } #[cycles(21_000)] #[write] fn tx_hook_before_panic( &mut self, ctx: ServiceContext, _payload: String, ) -> ServiceResponse<()> { self.sdk.set_value( "tx_hook_before_panic".to_owned(), "tx_hook_before_panic".to_owned(), ); ServiceResponse::from_succeed(()) } #[cycles(21_000)] #[write] fn tx_hook_after_panic( &mut self, ctx: ServiceContext, _payload: String, ) -> ServiceResponse<()> { self.sdk.set_value( "tx_hook_after_panic".to_owned(), "tx_hook_after_panic".to_owned(), ); ServiceResponse::from_succeed(()) } #[tx_hook_before] fn test_tx_hook_before(&mut self, ctx: ServiceContext) -> ServiceResponse<()> { if ctx.get_service_name() == "test" && ctx.get_payload().to_owned().contains("test_hook_before") { ctx.emit_event( "test_service".to_owned(), "test-name".to_owned(), "test_tx_hook_before invoked".to_owned(), ); } if ctx.get_service_method() == "tx_hook_before_panic" { panic!("tx hook before"); } self.sdk.set_value("before".to_owned(), "before".to_owned()); ServiceResponse::from_succeed(()) } #[tx_hook_after] fn test_tx_hook_after(&mut self, ctx: ServiceContext) -> ServiceResponse<()> { if ctx.get_service_name() == "test" && ctx.get_payload().to_owned().contains("test_hook_after") { ctx.emit_event( "test_service".to_owned(), "test-name".to_owned(), "test_tx_hook_after invoked".to_owned(), ); } if ctx.get_service_method() == "tx_hook_after_panic" { panic!("tx hook before"); } self.sdk.set_value("after".to_owned(), "after".to_owned()); ServiceResponse::from_succeed(()) } } ================================================ FILE: framework/src/lib.rs ================================================ #![feature(vec_remove_item)] #![feature(test)] pub mod binding; pub mod executor; ================================================ FILE: jenkins-x-chaos.yml ================================================ buildPack: none pipelineConfig: pipelines: pullRequest: pipeline: agent: image: mutadev/muta-build-env:v0.3.0 options: timeout: time: 180 # 3H unit: minutes stages: - name: chaos environment: - name: BASE_WORKSPACE value: /workspace/source - name: NODE_SIZE value: "4" - name: CHAIN_GENESIS_TIMEOUT_GAP value: "9999" options: containerOptions: volumeMounts: - name: jenkins-docker-cfg mountPath: /kaniko/.docker resources: limits: cpu: 4 memory: 8Gi requests: cpu: 2 memory: 8Gi volumes: - name: jenkins-docker-cfg secret: secretName: jenkins-docker-cfg items: - key: config.json path: config.json steps: - name: build-release image: mutadev/muta-build-env:v0.3.0 env: - name: OPENSSL_STATIC value: "1" - name: OPENSSL_LIB_DIR value: /usr/lib/x86_64-linux-gnu - name: OPENSSL_INCLUDE_DIR value: /usr/include/openssl command: cargo args: - build - --release - --example - muta-chain - name: push-image image: gcr.io/kaniko-project/executor:9912ccbf8d22bbafbf971124600fbb0b13b9cbd6 command: /kaniko/executor args: - --dockerfile=/workspace/source/devtools/docker-build/Dockerfile - --destination=mutadev/${REPO_NAME}:pr-${PULL_NUMBER}-${BUILD_NUMBER} - --context=/workspace/source - name: create-chaos-crd image: alpine/helm:3.2.4 command: helm args: - install - chaos-${REPO_NAME}-pr-${PULL_NUMBER}-${BUILD_NUMBER} - charts/deploy-chaos - --namespace - mutadev - --set - size=${NODE_SIZE},repo_name=${REPO_NAME},version=pr-${PULL_NUMBER}-${BUILD_NUMBER},resources.cpu=1100m,resources.memory=8Gi,chain_genesis.metadata.timeout_gap=${CHAIN_GENESIS_TIMEOUT_GAP} - name: watchdog image: mutadev/muta-watchdog:v0.2.0-rc env: - name: WATCH_DURATION value: 1H - name: APP_NAMESPACE value: mutadev - name: APP_PORT value: "8000" - name: APP_GRAPHQL_URL value: graphql - name: JOB_BENCHMARK_DURATION value: "300" - name: JOB_BENCHMARK_TIMEOUT_GAP value: "9999" - name: JOB_BENCHMARK_CPU value: "3" command: APP_NAME=chaos-${REPO_NAME}-pr-${PULL_NUMBER}-${BUILD_NUMBER} node /watchdog/index.js - name: delete-chaos-crd image: alpine/helm:3.2.4 command: helm args: - uninstall - chaos-${REPO_NAME}-pr-${PULL_NUMBER}-${BUILD_NUMBER} - --namespace - mutadev ================================================ FILE: jenkins-x-e2e.yml ================================================ buildPack: none pipelineConfig: pipelines: pullRequest: pipeline: agent: image: mutadev/muta-e2e-env:v0.3.0 options: timeout: time: 30 unit: minutes stages: - name: e2e options: containerOptions: resources: limits: cpu: 4 memory: 8Gi requests: cpu: 2 memory: 8Gi steps: - name: e2e command: make args: - e2e-test ================================================ FILE: jenkins-x-lint.yml ================================================ buildPack: none pipelineConfig: pipelines: pullRequest: pipeline: agent: image: mutadev/muta-build-env:v0.3.0 options: timeout: time: 30 unit: minutes stages: - name: lint options: containerOptions: resources: limits: cpu: 4 memory: 8Gi requests: cpu: 2 memory: 8Gi steps: - name: fmt command: make args: - fmt - name: clippy command: make args: - clippy ================================================ FILE: jenkins-x-unit.yml ================================================ buildPack: none pipelineConfig: pipelines: pullRequest: pipeline: agent: image: mutadev/muta-build-env:v0.3.0 options: timeout: time: 60 unit: minutes stages: - name: unit options: containerOptions: resources: limits: cpu: 4 memory: 12Gi requests: cpu: 2 memory: 12Gi steps: - name: unit command: make args: - test ================================================ FILE: jenkins-x.yml ================================================ buildPack: none noReleasePrepare: true pipelineConfig: pipelines: release: pipeline: agent: image: mutadev/muta-build-env:v0.3.0 stages: - name: release environment: - name: BASE_WORKSPACE value: /workspace/source options: containerOptions: volumeMounts: - name: jenkins-docker-cfg mountPath: /kaniko/.docker resources: limits: cpu: 4 memory: 8Gi requests: cpu: 2 memory: 8Gi volumes: - name: jenkins-docker-cfg secret: secretName: jenkins-docker-cfg items: - key: config.json path: config.json steps: - name: build-release image: mutadev/muta-build-env:v0.3.0 env: - name: OPENSSL_STATIC value: "1" - name: OPENSSL_LIB_DIR value: /usr/lib/x86_64-linux-gnu - name: OPENSSL_INCLUDE_DIR value: /usr/include/openssl command: cargo args: - build - --release - --example - muta-chain - name: push-image image: gcr.io/kaniko-project/executor:9912ccbf8d22bbafbf971124600fbb0b13b9cbd6 command: /kaniko/executor args: - --dockerfile=/workspace/source/devtools/docker-build/Dockerfile - --destination=mutadev/${REPO_NAME}:latest - --context=/workspace/source ================================================ FILE: protocol/Cargo.toml ================================================ [package] name = "muta-protocol" version = "0.2.1" authors = ["Muta Dev "] edition = "2018" repository = "https://github.com/nervosnetwork/muta" license = "MIT" description = "Contains all the core data types and traits of the muta framework" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] futures = "0.3" derive_more = "0.99" async-trait = "0.1" lazy_static = "1.4" hex = "0.4" prost = "0.6" bytes = { version = "0.5", features = ["serde"] } hasher = { version = "0.1", features = ['hash-keccak'] } creep = "0.2" bincode = "1.3" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" rlp = "0.4" cita_trie = "2.0" json = "0.12" byteorder = "1.3" muta-codec-derive = "0.2" ophelia = "0.3" ophelia-secp256k1 = "0.3" bech32 = "0.7" arc-swap = "0.4" smol_str = "0.1" log = "0.4" [dev-dependencies] rayon = "1.3" rand = "0.7" ================================================ FILE: protocol/src/codec/block.rs ================================================ use std::convert::TryFrom; use bytes::Bytes; use prost::Message; use crate::{ codec::{ primitive::{Address, Hash}, CodecError, ProtocolCodecSync, }, field, impl_default_bytes_codec_for, types::primitive as protocol_primitive, ProtocolError, ProtocolResult, }; // ##################### // Protobuf // ##################### #[derive(Clone, Message)] pub struct Block { #[prost(message, tag = "1")] pub header: Option, #[prost(message, repeated, tag = "2")] pub ordered_tx_hashes: Vec, } #[derive(Clone, Message)] pub struct BlockHeader { #[prost(message, tag = "1")] pub chain_id: Option, #[prost(uint64, tag = "2")] pub height: u64, #[prost(message, tag = "3")] pub prev_hash: Option, #[prost(uint64, tag = "4")] pub timestamp: u64, #[prost(message, tag = "5")] pub order_root: Option, #[prost(message, tag = "6")] pub order_signed_transactions_hash: Option, #[prost(message, repeated, tag = "7")] pub confirm_root: Vec, #[prost(message, tag = "8")] pub state_root: Option, #[prost(message, repeated, tag = "9")] pub receipt_root: Vec, #[prost(message, repeated, tag = "10")] pub cycles_used: Vec, #[prost(message, tag = "11")] pub proposer: Option
, #[prost(message, tag = "12")] pub proof: Option, #[prost(uint64, tag = "13")] pub validator_version: u64, #[prost(message, repeated, tag = "14")] pub validators: Vec, #[prost(uint64, tag = "15")] pub exec_height: u64, } #[derive(Clone, Message)] pub struct Proof { #[prost(uint64, tag = "1")] pub height: u64, #[prost(uint64, tag = "2")] pub round: u64, #[prost(message, tag = "3")] pub block_hash: Option, #[prost(bytes, tag = "4")] pub signature: Vec, #[prost(bytes, tag = "5")] pub bitmap: Vec, } #[derive(Clone, Message)] pub struct Validator { #[prost(bytes, tag = "1")] pub pub_key: Vec, #[prost(uint32, tag = "2")] pub propose_weight: u32, #[prost(uint32, tag = "3")] pub vote_weight: u32, } #[derive(Clone, Message)] pub struct Pill { #[prost(message, tag = "1")] pub block: Option, #[prost(message, repeated, tag = "2")] pub propose_hashes: Vec, } // ################# // Conversion // ################# // Block impl From for Block { fn from(block: block::Block) -> Block { let header = Some(BlockHeader::from(block.header)); let ordered_tx_hashes = block .ordered_tx_hashes .into_iter() .map(Hash::from) .collect::>(); Block { header, ordered_tx_hashes, } } } impl TryFrom for block::Block { type Error = ProtocolError; fn try_from(block: Block) -> Result { let header = field!(block.header, "Block", "header")?; let mut ordered_tx_hashes = Vec::new(); for hash in block.ordered_tx_hashes { ordered_tx_hashes.push(protocol_primitive::Hash::try_from(hash)?); } let block = block::Block { header: block::BlockHeader::try_from(header)?, ordered_tx_hashes, }; Ok(block) } } // BlockHeader impl From for BlockHeader { fn from(block_header: block::BlockHeader) -> BlockHeader { let chain_id = Some(Hash::from(block_header.chain_id)); let prev_hash = Some(Hash::from(block_header.prev_hash)); let order_root = Some(Hash::from(block_header.order_root)); let order_signed_transactions_hash = Some(Hash::from(block_header.order_signed_transactions_hash)); let state_root = Some(Hash::from(block_header.state_root)); let proposer = Some(Address::from(block_header.proposer)); let proof = Some(Proof::from(block_header.proof)); let confirm_root = block_header .confirm_root .into_iter() .map(Hash::from) .collect::>(); let receipt_root = block_header .receipt_root .into_iter() .map(Hash::from) .collect::>(); let validators = block_header .validators .into_iter() .map(Validator::from) .collect::>(); BlockHeader { chain_id, height: block_header.height, exec_height: block_header.exec_height, prev_hash, timestamp: block_header.timestamp, order_root, order_signed_transactions_hash, confirm_root, state_root, receipt_root, cycles_used: block_header.cycles_used, proposer, proof, validator_version: block_header.validator_version, validators, } } } impl TryFrom for block::BlockHeader { type Error = ProtocolError; fn try_from(block_header: BlockHeader) -> Result { let chain_id = field!(block_header.chain_id, "BlockHeader", "chain_id")?; let prev_hash = field!(block_header.prev_hash, "BlockHeader", "prev_hash")?; let order_root = field!(block_header.order_root, "BlockHeader", "order_root")?; let order_signed_transactions_hash = field!( block_header.order_signed_transactions_hash, "BlockHeader", "order_signed_transactions_hash" )?; let state_root = field!(block_header.state_root, "BlockHeader", "state_root")?; let proposer = field!(block_header.proposer, "BlockHeader", "proposer")?; let proof = field!(block_header.proof, "BlockHeader", "proof")?; let mut confirm_root = Vec::new(); for root in block_header.confirm_root { confirm_root.push(protocol_primitive::Hash::try_from(root)?); } let mut receipt_root = Vec::new(); for root in block_header.receipt_root { receipt_root.push(protocol_primitive::Hash::try_from(root)?); } let mut validators = Vec::new(); for validator in block_header.validators { validators.push(block::Validator::try_from(validator)?); } let proof = block::BlockHeader { chain_id: protocol_primitive::Hash::try_from(chain_id)?, height: block_header.height, exec_height: block_header.exec_height, prev_hash: protocol_primitive::Hash::try_from(prev_hash)?, timestamp: block_header.timestamp, order_root: protocol_primitive::Hash::try_from(order_root)?, order_signed_transactions_hash: protocol_primitive::Hash::try_from( order_signed_transactions_hash, )?, confirm_root, state_root: protocol_primitive::Hash::try_from(state_root)?, receipt_root, cycles_used: block_header.cycles_used, proposer: protocol_primitive::Address::try_from(proposer)?, proof: block::Proof::try_from(proof)?, validator_version: block_header.validator_version, validators, }; Ok(proof) } } // Proof impl From for Proof { fn from(proof: block::Proof) -> Proof { let block_hash = Some(Hash::from(proof.block_hash)); Proof { height: proof.height, round: proof.round, block_hash, signature: proof.signature.to_vec(), bitmap: proof.bitmap.to_vec(), } } } impl TryFrom for block::Proof { type Error = ProtocolError; fn try_from(proof: Proof) -> Result { let block_hash = field!(proof.block_hash, "Proof", "block_hash")?; let proof = block::Proof { height: proof.height, round: proof.round, block_hash: protocol_primitive::Hash::try_from(block_hash)?, signature: Bytes::from(proof.signature), bitmap: Bytes::from(proof.bitmap), }; Ok(proof) } } // Validator impl From for Validator { fn from(validator: block::Validator) -> Validator { Validator { pub_key: validator.pub_key.to_vec(), propose_weight: validator.propose_weight, vote_weight: validator.vote_weight, } } } impl TryFrom for block::Validator { type Error = ProtocolError; fn try_from(validator: Validator) -> Result { let validator = block::Validator { pub_key: Bytes::from(validator.pub_key), propose_weight: validator.propose_weight, vote_weight: validator.vote_weight, }; Ok(validator) } } // Pill impl From for Pill { fn from(pill: block::Pill) -> Pill { let block = Some(Block::from(pill.block)); let propose_hashes = pill .propose_hashes .into_iter() .map(Hash::from) .collect::>(); Pill { block, propose_hashes, } } } impl TryFrom for block::Pill { type Error = ProtocolError; fn try_from(pill: Pill) -> Result { let block = field!(pill.block, "Pill", "block")?; let mut propose_hashes = Vec::new(); for hash in pill.propose_hashes { propose_hashes.push(protocol_primitive::Hash::try_from(hash)?); } let pill = block::Pill { block: block::Block::try_from(block)?, propose_hashes, }; Ok(pill) } } // ################# // Codec // ################# impl_default_bytes_codec_for!(block, [Block, BlockHeader, Proof, Validator, Pill]); #[cfg(test)] mod test { #[test] fn test_u8_convert_u32() { for i in u8::min_value()..u8::max_value() { let j = u32::from(i); assert_eq!(i, (j as u8)); } } } ================================================ FILE: protocol/src/codec/macro.rs ================================================ #[macro_export] macro_rules! field { ($opt_field:expr, $type:expr, $field:expr) => { $opt_field.ok_or_else(|| crate::codec::CodecError::MissingField { r#type: $type, field: $field, }) }; } #[macro_export] macro_rules! impl_default_bytes_codec_for { ($category:ident, [$($type:ident),+]) => ( use crate::types::$category; $( impl ProtocolCodecSync for $category::$type { fn encode_sync(&self) -> ProtocolResult { let ser_type = $type::from(self.clone()); let mut buf = Vec::with_capacity(ser_type.encoded_len()); ser_type.encode(&mut buf).map_err(CodecError::from)?; Ok(Bytes::from(buf)) } fn decode_sync(bytes: Bytes) -> ProtocolResult { let ser_type = $type::decode(bytes).map_err(CodecError::from)?; $category::$type::try_from(ser_type) } } )+ ) } ================================================ FILE: protocol/src/codec/mod.rs ================================================ // TODO: change Vec to Bytes // pin: https://github.com/danburkert/prost/pull/190 #[macro_use] mod r#macro; pub mod block; pub mod primitive; pub mod receipt; #[cfg(test)] mod tests; pub mod transaction; use std::error::Error; use async_trait::async_trait; use bytes::Bytes; use derive_more::{Display, From}; use crate::{ProtocolError, ProtocolErrorKind, ProtocolResult}; pub use serde::{Deserialize, Serialize}; #[async_trait] pub trait ProtocolCodec: Sized + Send + ProtocolCodecSync { // Note: We take mut reference so that it can be pinned. This removes Sync // requirement. async fn encode(&mut self) -> ProtocolResult; async fn decode + Send>(bytes: B) -> ProtocolResult; } // Sync version is still useful in some cases, for example, use in Stream. // This also work around #[async_trait] problem inside macro #[doc(hidden)] pub trait ProtocolCodecSync: Sized + Send { fn encode_sync(&self) -> ProtocolResult; fn decode_sync(bytes: Bytes) -> ProtocolResult; } #[async_trait] impl ProtocolCodec for T { async fn encode(&mut self) -> ProtocolResult { ::encode_sync(self) } async fn decode + Send>(bytes: B) -> ProtocolResult { let bytes: Bytes = bytes.into(); ::decode_sync(bytes) } } impl ProtocolCodecSync for Bytes { fn encode_sync(&self) -> ProtocolResult { Ok(self.clone()) } fn decode_sync(bytes: Bytes) -> ProtocolResult { Ok(bytes) } } #[derive(Debug, From, Display)] pub enum CodecError { #[display(fmt = "prost encode: {}", _0)] ProtobufEncode(prost::EncodeError), #[display(fmt = "prost decode: {}", _0)] ProtobufDecode(prost::DecodeError), #[display(fmt = "{} missing field {}", r#type, field)] MissingField { r#type: &'static str, field: &'static str, }, #[display(fmt = "invalid contract type {}", _0)] InvalidContractType(i32), #[display(fmt = "wrong bytes length: {{ expect: {}, got: {} }}", expect, real)] WrongBytesLength { expect: usize, real: usize }, #[display(fmt = "from string {}", _0)] FromStringUtf8(std::string::FromUtf8Error), } impl Error for CodecError {} // TODO: derive macro impl From for ProtocolError { fn from(err: CodecError) -> ProtocolError { ProtocolError::new(ProtocolErrorKind::Codec, Box::new(err)) } } ================================================ FILE: protocol/src/codec/primitive.rs ================================================ use std::{convert::TryFrom, default::Default, mem}; use byteorder::{ByteOrder, LittleEndian}; use bytes::{Bytes, BytesMut}; use derive_more::From; use prost::Message; use crate::{ codec::{CodecError, ProtocolCodecSync}, field, impl_default_bytes_codec_for, types::primitive as protocol_primitive, ProtocolError, ProtocolResult, }; // ##################### // Protobuf // ##################### #[derive(Clone, Message, From)] pub struct Hash { #[prost(bytes, tag = "1")] pub value: Vec, } #[derive(Clone, Message, From)] pub struct MerkleRoot { #[prost(message, tag = "1")] pub value: Option, } #[derive(Clone, Message, From)] pub struct Address { #[prost(bytes, tag = "1")] pub value: Vec, } // ##################### // Conversion // ##################### // Hash impl From for Hash { fn from(hash: protocol_primitive::Hash) -> Hash { let value = hash.as_bytes().to_vec(); Hash { value } } } impl TryFrom for protocol_primitive::Hash { type Error = ProtocolError; fn try_from(hash: Hash) -> Result { let bytes = Bytes::from(hash.value); protocol_primitive::Hash::from_bytes(bytes) } } // Address impl From for Address { fn from(address: protocol_primitive::Address) -> Address { let value = address.as_bytes().to_vec(); Address { value } } } impl TryFrom
for protocol_primitive::Address { type Error = ProtocolError; fn try_from(address: Address) -> Result { let bytes = Bytes::from(address.value); protocol_primitive::Address::from_bytes(bytes) } } // MerkleRoot impl From for MerkleRoot { fn from(root: protocol_primitive::MerkleRoot) -> MerkleRoot { let value = Some(Hash::from(root)); MerkleRoot { value } } } impl TryFrom for protocol_primitive::MerkleRoot { type Error = ProtocolError; fn try_from(root: MerkleRoot) -> Result { let hash = field!(root.value, "MerkleRoot", "value")?; protocol_primitive::Hash::try_from(hash) } } // ##################### // Codec // ##################### // MerkleRoot and AssetID are just Hash aliases impl_default_bytes_codec_for!(primitive, [Hash, Address]); impl ProtocolCodecSync for u64 { fn encode_sync(&self) -> ProtocolResult { let mut buf = [0u8; mem::size_of::()]; LittleEndian::write_u64(&mut buf, *self); Ok(BytesMut::from(buf.as_ref()).freeze()) } fn decode_sync(bytes: Bytes) -> ProtocolResult { Ok(LittleEndian::read_u64(bytes.as_ref())) } } // ##################### // Util // ##################### #[allow(dead_code)] fn ensure_len(real: usize, expect: usize) -> Result<(), CodecError> { if real != expect { return Err(CodecError::WrongBytesLength { expect, real }); } Ok(()) } ================================================ FILE: protocol/src/codec/receipt.rs ================================================ use std::convert::TryFrom; use bytes::Bytes; use prost::Message; use crate::{ codec::{primitive::Hash, CodecError, ProtocolCodecSync}, field, impl_default_bytes_codec_for, traits::ServiceResponse, types::primitive as protocol_primitive, types::receipt as protocol_receipt, ProtocolError, ProtocolResult, }; // ##################### // Protobuf // ##################### #[derive(Clone, Message)] pub struct Receipt { #[prost(message, tag = "1")] pub state_root: Option, #[prost(uint64, tag = "2")] pub height: u64, #[prost(message, tag = "3")] pub tx_hash: Option, #[prost(uint64, tag = "4")] pub cycles_used: u64, #[prost(message, repeated, tag = "5")] pub events: Vec, #[prost(message, tag = "6")] pub response: Option, } #[derive(Clone, Message)] pub struct ReceiptResponse { #[prost(bytes, tag = "1")] pub service_name: Vec, #[prost(bytes, tag = "2")] pub method: Vec, #[prost(uint64, tag = "3")] pub code: u64, #[prost(bytes, tag = "4")] pub succeed_data: Vec, #[prost(bytes, tag = "5")] pub error_message: Vec, } #[derive(Clone, Message)] pub struct Event { #[prost(bytes, tag = "1")] pub service: Vec, #[prost(bytes, tag = "2")] pub name: Vec, #[prost(bytes, tag = "3")] pub data: Vec, } // ################# // Conversion // ################# // ReceiptResult impl From for ReceiptResponse { fn from(response: receipt::ReceiptResponse) -> ReceiptResponse { ReceiptResponse { service_name: response.service_name.as_bytes().to_vec(), method: response.method.as_bytes().to_vec(), code: response.response.code, succeed_data: response.response.succeed_data.as_bytes().to_vec(), error_message: response.response.error_message.as_bytes().to_vec(), } } } impl TryFrom for receipt::ReceiptResponse { type Error = ProtocolError; fn try_from(response: ReceiptResponse) -> Result { Ok(receipt::ReceiptResponse { service_name: String::from_utf8(response.service_name) .map_err(CodecError::FromStringUtf8)?, method: String::from_utf8(response.method).map_err(CodecError::FromStringUtf8)?, response: ServiceResponse { code: response.code, succeed_data: String::from_utf8(response.succeed_data) .map_err(CodecError::FromStringUtf8)?, error_message: String::from_utf8(response.error_message) .map_err(CodecError::FromStringUtf8)?, }, }) } } // Receipt impl From for Receipt { fn from(receipt: receipt::Receipt) -> Receipt { let state_root = Some(Hash::from(receipt.state_root)); let tx_hash = Some(Hash::from(receipt.tx_hash)); let events = receipt.events.into_iter().map(Event::from).collect(); let response = Some(ReceiptResponse::from(receipt.response)); Receipt { state_root, height: receipt.height, tx_hash, cycles_used: receipt.cycles_used, events, response, } } } impl TryFrom for receipt::Receipt { type Error = ProtocolError; fn try_from(receipt: Receipt) -> Result { let state_root = field!(receipt.state_root, "Receipt", "state_root")?; let tx_hash = field!(receipt.tx_hash, "Receipt", "tx_hash")?; let response = field!(receipt.response, "Receipt", "response")?; let events = receipt .events .into_iter() .map(protocol_receipt::Event::try_from) .collect::, ProtocolError>>()?; let receipt = receipt::Receipt { state_root: protocol_primitive::Hash::try_from(state_root)?, height: receipt.height, tx_hash: protocol_primitive::Hash::try_from(tx_hash)?, cycles_used: receipt.cycles_used, events, response: receipt::ReceiptResponse::try_from(response)?, }; Ok(receipt) } } // Event impl From for Event { fn from(event: receipt::Event) -> Event { Event { service: event.service.as_bytes().to_vec(), name: event.name.as_bytes().to_vec(), data: event.data.as_bytes().to_vec(), } } } impl TryFrom for receipt::Event { type Error = ProtocolError; fn try_from(event: Event) -> Result { Ok(receipt::Event { service: String::from_utf8(event.service).map_err(CodecError::FromStringUtf8)?, name: String::from_utf8(event.name).map_err(CodecError::FromStringUtf8)?, data: String::from_utf8(event.data).map_err(CodecError::FromStringUtf8)?, }) } } // ################# // Codec // ################# impl_default_bytes_codec_for!(receipt, [Receipt]); ================================================ FILE: protocol/src/codec/tests/mod.rs ================================================ extern crate test; use std::convert::TryInto; use bytes::Bytes; use test::Bencher; use crate::codec::ProtocolCodecSync; use crate::types::block::Block; use crate::types::transaction::SignedTransaction; use crate::{codec, types}; use crate::fixed_codec::tests::*; macro_rules! test { ($mod: ident, $r#type: ident, $mock_func: ident $(, $arg: expr)*) => { { let before_val = $mock_func($($arg),*); let codec_val: codec::$mod::$r#type = before_val.into(); let after_val: types::$mod::$r#type = codec_val.try_into().unwrap(); after_val } }; } #[test] fn test_codec() { test!(primitive, Hash, mock_hash); test!(primitive, MerkleRoot, mock_merkle_root); test!(receipt, Receipt, mock_receipt); test!(transaction, TransactionRequest, mock_transaction_request); test!(transaction, RawTransaction, mock_raw_tx); test!(transaction, SignedTransaction, mock_sign_tx); test!(block, Validator, mock_validator); test!(block, Proof, mock_proof); test!(block, BlockHeader, mock_block_header); test!(block, Block, mock_block, 100); test!(block, Pill, mock_pill, 100, 200); } #[test] fn test_signed_tx_serialize_size() { let txs: Vec = (0..50_000) .map(|_| mock_sign_tx().encode_sync().unwrap()) .collect(); let size = &txs.iter().fold(0, |acc, x| acc + x.len()); println!("1 tx size {:?}", txs[1].len()); println!("50_000 tx size {:?}", size); } #[bench] fn bench_signed_tx_serialize(b: &mut Bencher) { let txs: Vec = (0..50_000).map(|_| mock_sign_tx()).collect(); b.iter(|| { txs.iter().for_each(|signed_tx| { signed_tx.encode_sync().unwrap(); }); }); } #[bench] fn bench_signed_tx_deserialize(b: &mut Bencher) { let txs: Vec = (0..50_000) .map(|_| mock_sign_tx().encode_sync().unwrap()) .collect(); b.iter(|| { txs.iter().for_each(|signed_tx| { SignedTransaction::decode_sync(signed_tx.clone()).unwrap(); }); }); } #[bench] fn bench_block_serialize(b: &mut Bencher) { let block = mock_block(50_000); b.iter(|| { block.encode_sync().unwrap(); }); } #[bench] fn bench_block_try_into(b: &mut Bencher) { let block = mock_block(50_000).encode_sync().unwrap(); b.iter(|| { Block::decode_sync(block.clone()).unwrap(); }); } ================================================ FILE: protocol/src/codec/transaction.rs ================================================ use std::convert::TryFrom; use bytes::Bytes; use prost::Message; use crate::{ codec::primitive::{Address, Hash}, codec::{CodecError, ProtocolCodecSync}, field, impl_default_bytes_codec_for, types::primitive as protocol_primitive, ProtocolError, ProtocolResult, }; #[derive(Clone, Message)] pub struct TransactionRequest { #[prost(bytes, tag = "1")] pub service_name: Vec, #[prost(bytes, tag = "2")] pub method: Vec, #[prost(bytes, tag = "3")] pub payload: Vec, } #[derive(Clone, Message)] pub struct RawTransaction { #[prost(message, tag = "1")] pub chain_id: Option, #[prost(message, tag = "2")] pub nonce: Option, #[prost(uint64, tag = "3")] pub timeout: u64, #[prost(uint64, tag = "4")] pub cycles_price: u64, #[prost(uint64, tag = "5")] pub cycles_limit: u64, #[prost(message, tag = "6")] pub request: Option, #[prost(message, tag = "7")] pub sender: Option
, } #[derive(Clone, Message)] pub struct SignedTransaction { #[prost(message, tag = "1")] pub raw: Option, #[prost(message, tag = "2")] pub tx_hash: Option, #[prost(bytes, tag = "3")] pub pubkey: Vec, #[prost(bytes, tag = "4")] pub signature: Vec, } // ################# // Conversion // ################# // TransactionAction impl From for TransactionRequest { fn from(request: transaction::TransactionRequest) -> TransactionRequest { TransactionRequest { service_name: request.service_name.as_bytes().to_vec(), method: request.method.as_bytes().to_vec(), payload: request.payload.as_bytes().to_vec(), } } } impl TryFrom for transaction::TransactionRequest { type Error = ProtocolError; fn try_from( request: TransactionRequest, ) -> Result { Ok(transaction::TransactionRequest { service_name: String::from_utf8(request.service_name) .map_err(CodecError::FromStringUtf8)?, method: String::from_utf8(request.method).map_err(CodecError::FromStringUtf8)?, payload: String::from_utf8(request.payload).map_err(CodecError::FromStringUtf8)?, }) } } // RawTransaction impl From for RawTransaction { fn from(raw: transaction::RawTransaction) -> RawTransaction { let chain_id = Some(Hash::from(raw.chain_id)); let nonce = Some(Hash::from(raw.nonce)); let request = Some(TransactionRequest::from(raw.request)); let sender = Some(Address::from(raw.sender)); RawTransaction { chain_id, nonce, cycles_price: raw.cycles_price, timeout: raw.timeout, cycles_limit: raw.cycles_limit, request, sender, } } } impl TryFrom for transaction::RawTransaction { type Error = ProtocolError; fn try_from(raw: RawTransaction) -> Result { let chain_id = field!(raw.chain_id, "RawTransaction", "chain_id")?; let nonce = field!(raw.nonce, "RawTransaction", "nonce")?; let request = field!(raw.request, "RawTransaction", "request")?; let sender = field!(raw.sender, "RawTransaction", "sender")?; let raw_tx = transaction::RawTransaction { chain_id: protocol_primitive::Hash::try_from(chain_id)?, nonce: protocol_primitive::Hash::try_from(nonce)?, timeout: raw.timeout, cycles_price: raw.cycles_price, cycles_limit: raw.cycles_limit, request: transaction::TransactionRequest::try_from(request)?, sender: protocol_primitive::Address::try_from(sender)?, }; Ok(raw_tx) } } // SignedTransaction impl From for SignedTransaction { fn from(stx: transaction::SignedTransaction) -> SignedTransaction { let raw = RawTransaction::from(stx.raw); let tx_hash = Hash::from(stx.tx_hash); SignedTransaction { raw: Some(raw), tx_hash: Some(tx_hash), pubkey: stx.pubkey.to_vec(), signature: stx.signature.to_vec(), } } } impl TryFrom for transaction::SignedTransaction { type Error = ProtocolError; fn try_from(stx: SignedTransaction) -> Result { let raw = field!(stx.raw, "SignedTransaction", "raw")?; let tx_hash = field!(stx.tx_hash, "SignedTransaction", "tx_hash")?; let stx = transaction::SignedTransaction { raw: transaction::RawTransaction::try_from(raw)?, tx_hash: protocol_primitive::Hash::try_from(tx_hash)?, pubkey: Bytes::from(stx.pubkey), signature: Bytes::from(stx.signature), }; Ok(stx) } } // ################# // Codec // ################# impl_default_bytes_codec_for!(transaction, [RawTransaction, SignedTransaction]); ================================================ FILE: protocol/src/fixed_codec/mod.rs ================================================ pub mod primitive; pub mod receipt; #[cfg(test)] pub mod tests; pub mod transaction; use std::error::Error; use bytes::Bytes; use derive_more::{Display, From}; use crate::{ProtocolError, ProtocolErrorKind, ProtocolResult}; // Consistent serialization trait using rlp-algorithm pub trait FixedCodec: Sized { fn encode_fixed(&self) -> ProtocolResult; fn decode_fixed(bytes: Bytes) -> ProtocolResult; } #[derive(Debug, Display, From)] pub enum FixedCodecError { Decoder(rlp::DecoderError), StringUTF8(std::string::FromUtf8Error), #[display(fmt = "wrong bytes of bool")] DecodeBool, #[display(fmt = "wrong bytes of u8")] DecodeUint8, } impl Error for FixedCodecError {} impl From for ProtocolError { fn from(err: FixedCodecError) -> ProtocolError { ProtocolError::new(ProtocolErrorKind::FixedCodec, Box::new(err)) } } ================================================ FILE: protocol/src/fixed_codec/primitive.rs ================================================ use std::mem; use byteorder::{ByteOrder, LittleEndian}; use crate::fixed_codec::{FixedCodec, FixedCodecError}; use crate::types::{Bytes, BytesMut, Hex}; use crate::ProtocolResult; impl FixedCodec for bool { fn encode_fixed(&self) -> ProtocolResult { let bs = if *self { [1u8; mem::size_of::()] } else { [0u8; mem::size_of::()] }; Ok(BytesMut::from(bs.as_ref()).freeze()) } fn decode_fixed(bytes: Bytes) -> ProtocolResult { let u = *bytes.to_vec().get(0).ok_or(FixedCodecError::DecodeBool)?; match u { 0 => Ok(false), 1 => Ok(true), _ => Err(FixedCodecError::DecodeBool.into()), } } } impl FixedCodec for u8 { fn encode_fixed(&self) -> ProtocolResult { Ok(BytesMut::from([*self].as_ref()).freeze()) } fn decode_fixed(bytes: Bytes) -> ProtocolResult { let u = *bytes.to_vec().get(0).ok_or(FixedCodecError::DecodeUint8)?; Ok(u) } } impl FixedCodec for u16 { fn encode_fixed(&self) -> ProtocolResult { let mut buf = [0u8; mem::size_of::()]; LittleEndian::write_u16(&mut buf, *self); Ok(BytesMut::from(buf.as_ref()).freeze()) } fn decode_fixed(bytes: Bytes) -> ProtocolResult { Ok(LittleEndian::read_u16(bytes.as_ref())) } } impl FixedCodec for u32 { fn encode_fixed(&self) -> ProtocolResult { let mut buf = [0u8; mem::size_of::()]; LittleEndian::write_u32(&mut buf, *self); Ok(BytesMut::from(buf.as_ref()).freeze()) } fn decode_fixed(bytes: Bytes) -> ProtocolResult { Ok(LittleEndian::read_u32(bytes.as_ref())) } } impl FixedCodec for u64 { fn encode_fixed(&self) -> ProtocolResult { let mut buf = [0u8; mem::size_of::()]; LittleEndian::write_u64(&mut buf, *self); Ok(BytesMut::from(buf.as_ref()).freeze()) } fn decode_fixed(bytes: Bytes) -> ProtocolResult { Ok(LittleEndian::read_u64(bytes.as_ref())) } } impl FixedCodec for u128 { fn encode_fixed(&self) -> ProtocolResult { let mut buf = [0u8; mem::size_of::()]; LittleEndian::write_u128(&mut buf, *self); Ok(BytesMut::from(buf.as_ref()).freeze()) } fn decode_fixed(bytes: Bytes) -> ProtocolResult { Ok(LittleEndian::read_u128(bytes.as_ref())) } } impl FixedCodec for String { fn encode_fixed(&self) -> ProtocolResult { Ok(Bytes::from(self.clone())) } fn decode_fixed(bytes: Bytes) -> ProtocolResult { String::from_utf8(bytes.to_vec()).map_err(|e| FixedCodecError::StringUTF8(e).into()) } } impl FixedCodec for Bytes { fn encode_fixed(&self) -> ProtocolResult { Ok(self.clone()) } fn decode_fixed(bytes: Bytes) -> ProtocolResult { Ok(bytes) } } impl FixedCodec for Vec { fn encode_fixed(&self) -> ProtocolResult { Ok(Bytes::from(self.clone())) } fn decode_fixed(bytes: Bytes) -> ProtocolResult { Ok(bytes.to_vec()) } } impl FixedCodec for Hex { fn encode_fixed(&self) -> ProtocolResult { let bytes = self.as_string_trim0x().as_bytes().to_vec(); Ok(bytes::Bytes::from(bytes)) } fn decode_fixed(bytes: bytes::Bytes) -> ProtocolResult { let s = String::from_utf8(bytes.to_vec()).map_err(FixedCodecError::StringUTF8)?; Ok(Hex::from_string("0x".to_owned() + s.as_str())?) } } ================================================ FILE: protocol/src/fixed_codec/receipt.rs ================================================ use crate::fixed_codec::{FixedCodec, FixedCodecError}; use crate::traits::ServiceResponse; use crate::types::receipt::ReceiptResponse; use crate::ProtocolResult; impl rlp::Encodable for ReceiptResponse { fn rlp_append(&self, s: &mut rlp::RlpStream) { s.begin_list(5) .append(&self.response.code) .append(&self.response.succeed_data) .append(&self.response.error_message) .append(&self.method) .append(&self.service_name); } } impl rlp::Decodable for ReceiptResponse { fn decode(r: &rlp::Rlp) -> Result { if !r.is_list() && r.size() != 5 { return Err(rlp::DecoderError::RlpIncorrectListLen); } let code = r.at(0)?.as_val()?; let succeed_data = r.at(1)?.as_val()?; let error_message = r.at(2)?.as_val()?; let method = r.at(3)?.as_val()?; let service_name = r.at(4)?.as_val()?; Ok(ReceiptResponse { service_name, method, response: ServiceResponse { code, succeed_data, error_message, }, }) } } impl FixedCodec for ReceiptResponse { fn encode_fixed(&self) -> ProtocolResult { Ok(bytes::Bytes::from(rlp::encode(self))) } fn decode_fixed(bytes: bytes::Bytes) -> ProtocolResult { Ok(rlp::decode(bytes.as_ref()).map_err(FixedCodecError::from)?) } } ================================================ FILE: protocol/src/fixed_codec/tests/fixed_codec.rs ================================================ extern crate test; use test::Bencher; use crate::fixed_codec::FixedCodec; use crate::types; use super::*; macro_rules! test_eq { ($category: ident, $r#type: ident, $mock_func: ident $(, $arg: expr)*) => { let before_val = $mock_func($($arg),*); let rlp_bytes = before_val.encode_fixed().unwrap(); let after_val: types::$category::$r#type = <_>::decode_fixed(rlp_bytes.clone()).unwrap(); assert_eq!(before_val, after_val); }; } #[test] fn test_fixed_codec_primitive() { let bs = true.encode_fixed().unwrap(); assert_eq!(::decode_fixed(bs).unwrap(), true); let bs = false.encode_fixed().unwrap(); assert_eq!(::decode_fixed(bs).unwrap(), false); let bs = 0u8.encode_fixed().unwrap(); assert_eq!(::decode_fixed(bs).unwrap(), 0u8); let bs = 8u8.encode_fixed().unwrap(); assert_eq!(::decode_fixed(bs).unwrap(), 8u8); let bs = 8u32.encode_fixed().unwrap(); assert_eq!(::decode_fixed(bs).unwrap(), 8u32); let bs = 8u64.encode_fixed().unwrap(); assert_eq!(::decode_fixed(bs).unwrap(), 8u64); let bs = 8u128.encode_fixed().unwrap(); assert_eq!(::decode_fixed(bs).unwrap(), 8u64); let bs = "test".to_owned().encode_fixed().unwrap(); assert_eq!( ::decode_fixed(bs).unwrap(), "test".to_owned() ); } #[test] fn test_fixed_codec() { test_eq!(primitive, Hash, mock_hash); test_eq!(transaction, RawTransaction, mock_raw_tx); test_eq!(transaction, SignedTransaction, mock_sign_tx); test_eq!(block, Proof, mock_proof); test_eq!(block, BlockHeader, mock_block_header); test_eq!(block, Block, mock_block, 33); test_eq!(block, Pill, mock_pill, 22, 33); test_eq!(block, Validator, mock_validator); test_eq!(receipt, Receipt, mock_receipt); test_eq!(receipt, Receipt, mock_receipt); test_eq!(receipt, Receipt, mock_receipt); test_eq!(receipt, Receipt, mock_receipt); } #[test] fn test_signed_tx_serialize_size() { let txs: Vec = (0..50_000) .map(|_| mock_sign_tx().encode_fixed().unwrap()) .collect(); let size = &txs.iter().fold(0, |acc, x| acc + x.len()); println!("1 tx size {:?}", txs[1].len()); println!("50_000 tx size {:?}", size); } #[bench] fn bench_signed_tx_serialize(b: &mut Bencher) { let txs: Vec = (0..50_000).map(|_| mock_sign_tx()).collect(); b.iter(|| { txs.iter().for_each(|signed_tx| { signed_tx.encode_fixed().unwrap(); }); }); } #[bench] fn bench_signed_tx_deserialize(b: &mut Bencher) { let txs: Vec = (0..50_000) .map(|_| mock_sign_tx().encode_fixed().unwrap()) .collect(); b.iter(|| { txs.iter().for_each(|signed_tx| { SignedTransaction::decode_fixed(signed_tx.clone()).unwrap(); }); }); } #[bench] fn bench_block_serialize(b: &mut Bencher) { let block = mock_block(50_000); b.iter(|| { block.encode_fixed().unwrap(); }); } #[bench] fn bench_block_deserialize(b: &mut Bencher) { let block = mock_block(50_000).encode_fixed().unwrap(); b.iter(|| { Block::decode_fixed(block.clone()).unwrap(); }); } ================================================ FILE: protocol/src/fixed_codec/tests/mod.rs ================================================ mod fixed_codec; use bytes::Bytes; use rand::random; use crate::traits::ServiceResponse; use crate::types::block::{Block, BlockHeader, Pill, Proof, Validator}; use crate::types::primitive::{Address, Hash, MerkleRoot}; use crate::types::receipt::{Event, Receipt, ReceiptResponse}; use crate::types::transaction::{RawTransaction, SignedTransaction, TransactionRequest}; // ##################### // Mock Primitive // ##################### pub fn mock_hash() -> Hash { Hash::digest(get_random_bytes(10)) } pub fn mock_merkle_root() -> MerkleRoot { Hash::digest(get_random_bytes(10)) } pub fn mock_address() -> Address { let hash = mock_hash(); Address::from_hash(hash).unwrap() } // ##################### // Mock Receipt // ##################### pub fn mock_receipt_response() -> ReceiptResponse { ReceiptResponse { service_name: "mock-service".to_owned(), method: "mock-method".to_owned(), response: ServiceResponse:: { code: 0, succeed_data: "ok".to_owned(), error_message: "".to_owned(), }, } } pub fn mock_receipt() -> Receipt { Receipt { state_root: mock_merkle_root(), height: 13, tx_hash: mock_hash(), cycles_used: 100, events: vec![mock_event()], response: mock_receipt_response(), } } pub fn mock_event() -> Event { Event { service: "mock-event".to_owned(), name: "mock-name".to_owned(), data: "mock-data".to_owned(), } } // ##################### // Mock Transaction // ##################### pub fn mock_transaction_request() -> TransactionRequest { TransactionRequest { service_name: "mock-service".to_owned(), method: "mock-method".to_owned(), payload: "mock-payload".to_owned(), } } pub fn mock_raw_tx() -> RawTransaction { RawTransaction { chain_id: mock_hash(), nonce: mock_hash(), timeout: 100, cycles_price: 1, cycles_limit: 100, request: mock_transaction_request(), sender: mock_address(), } } pub fn mock_sign_tx() -> SignedTransaction { SignedTransaction { raw: mock_raw_tx(), tx_hash: mock_hash(), pubkey: Default::default(), signature: Default::default(), } } // ##################### // Mock Block // ##################### pub fn mock_validator() -> Validator { Validator { pub_key: get_random_bytes(32), propose_weight: 1, vote_weight: 1, } } pub fn mock_proof() -> Proof { Proof { height: 4, round: 99, block_hash: mock_hash(), signature: Default::default(), bitmap: Default::default(), } } pub fn mock_block_header() -> BlockHeader { BlockHeader { chain_id: mock_hash(), height: 42, exec_height: 41, prev_hash: mock_hash(), timestamp: 420_000_000, order_root: mock_merkle_root(), order_signed_transactions_hash: Hash::default(), confirm_root: vec![mock_hash(), mock_hash()], state_root: mock_merkle_root(), receipt_root: vec![mock_hash(), mock_hash()], cycles_used: vec![999_999], proposer: mock_address(), proof: mock_proof(), validator_version: 1, validators: vec![ mock_validator(), mock_validator(), mock_validator(), mock_validator(), ], } } pub fn mock_block(order_size: usize) -> Block { Block { header: mock_block_header(), ordered_tx_hashes: (0..order_size).map(|_| mock_hash()).collect(), } } pub fn mock_pill(order_size: usize, propose_size: usize) -> Pill { Pill { block: mock_block(order_size), propose_hashes: (0..propose_size).map(|_| mock_hash()).collect(), } } pub fn get_random_bytes(len: usize) -> Bytes { let vec: Vec = (0..len).map(|_| random::()).collect(); Bytes::from(vec) } ================================================ FILE: protocol/src/fixed_codec/transaction.rs ================================================ use bytes::BytesMut; use crate::fixed_codec::{FixedCodec, FixedCodecError}; use crate::types::{Hash, RawTransaction, TransactionRequest}; use crate::ProtocolResult; impl rlp::Encodable for RawTransaction { fn rlp_append(&self, s: &mut rlp::RlpStream) { s.begin_list(9); s.append(&self.chain_id.as_bytes().to_vec()); s.append(&self.cycles_limit); s.append(&self.cycles_price); s.append(&self.nonce.as_bytes().to_vec()); s.append(&self.request.method); s.append(&self.request.service_name); s.append(&self.request.payload); s.append(&self.timeout); s.append(&self.sender); } } impl rlp::Decodable for RawTransaction { fn decode(r: &rlp::Rlp) -> Result { let chain_id = Hash::from_bytes(BytesMut::from(r.at(0)?.data()?).freeze()) .map_err(|_| rlp::DecoderError::RlpInvalidLength)?; let cycles_limit: u64 = r.at(1)?.as_val()?; let cycles_price: u64 = r.at(2)?.as_val()?; let nonce = Hash::from_bytes(BytesMut::from(r.at(3)?.data()?).freeze()) .map_err(|_| rlp::DecoderError::RlpInvalidLength)?; let request = TransactionRequest { method: r.at(4)?.as_val()?, service_name: r.at(5)?.as_val()?, payload: r.at(6)?.as_val()?, }; let timeout = r.at(7)?.as_val()?; let sender = r.at(8)?.as_val()?; Ok(Self { chain_id, cycles_price, cycles_limit, nonce, request, timeout, sender, }) } } impl FixedCodec for RawTransaction { fn encode_fixed(&self) -> ProtocolResult { Ok(bytes::Bytes::from(rlp::encode(self))) } fn decode_fixed(bytes: bytes::Bytes) -> ProtocolResult { Ok(rlp::decode(bytes.as_ref()).map_err(FixedCodecError::from)?) } } ================================================ FILE: protocol/src/lib.rs ================================================ #![feature(test)] #![allow(clippy::mutable_key_type)] pub mod codec; pub mod fixed_codec; pub mod traits; pub mod types; use std::error::Error; pub use async_trait::async_trait; pub use bytes::{Buf, BufMut, Bytes, BytesMut}; use derive_more::{Constructor, Display}; pub use types::{address_hrp, address_hrp_inited, init_address_hrp}; #[derive(Debug, Clone)] pub enum ProtocolErrorKind { // traits API, Consensus, Executor, Mempool, Network, Storage, Runtime, Binding, BindingMacro, Service, Main, // codec Codec, // fixed codec FixedCodec, // types Types, // metric Metric, Cli, } // refer to https://github.com/rust-lang/rust/blob/a17951c4f80eb5208030f91fdb4ae93919fa6b12/src/libstd/io/error.rs#L73 #[derive(Debug, Constructor, Display)] #[display(fmt = "[ProtocolError] Kind: {:?} Error: {:?}", kind, error)] pub struct ProtocolError { kind: ProtocolErrorKind, error: Box, } impl From for Box { fn from(error: ProtocolError) -> Self { Box::new(error) as Box } } impl Error for ProtocolError {} pub type ProtocolResult = Result; ================================================ FILE: protocol/src/traits/api.rs ================================================ use async_trait::async_trait; use crate::traits::{Context, ServiceResponse}; use crate::types::{Address, Block, BlockHeader, Hash, Receipt, SignedTransaction}; use crate::ProtocolResult; #[async_trait] pub trait APIAdapter: Send + Sync { async fn insert_signed_txs( &self, ctx: Context, signed_tx: SignedTransaction, ) -> ProtocolResult<()>; async fn get_block_by_height( &self, ctx: Context, height: Option, ) -> ProtocolResult>; async fn get_block_header_by_height( &self, ctx: Context, height: Option, ) -> ProtocolResult>; async fn get_receipt_by_tx_hash( &self, ctx: Context, tx_hash: Hash, ) -> ProtocolResult>; async fn get_transaction_by_hash( &self, ctx: Context, tx_hash: Hash, ) -> ProtocolResult>; async fn query_service( &self, ctx: Context, height: u64, cycles_limit: u64, cycles_price: u64, caller: Address, service_name: String, method: String, payload: String, ) -> ProtocolResult>; } ================================================ FILE: protocol/src/traits/binding.rs ================================================ use std::iter::Iterator; use crate::fixed_codec::FixedCodec; use crate::traits::{ExecutorParams, ServiceResponse}; use crate::types::{Address, Block, Hash, MerkleRoot, Receipt, ServiceContext, SignedTransaction}; use crate::ProtocolResult; #[macro_export] macro_rules! try_service_response { ($service_resp: expr) => {{ if $service_resp.is_error() { return ServiceResponse::from_error($service_resp.code, $service_resp.error_message); } $service_resp.succeed_data }}; } pub trait SDKFactory { fn get_sdk(&self, name: &str) -> ProtocolResult; } pub trait ServiceMapping: Send + Sync { fn get_service>( &self, name: &str, factory: &Factory, ) -> ProtocolResult>; fn list_service_name(&self) -> Vec; } // `ServiceState` provides access to` world state` and `account` for` service`. // The bottom layer is an MPT tree. // // Each `service` will have a separate` ServiceState`, so their states are // isolated from each other. pub trait ServiceState { fn get(&self, key: &Key) -> ProtocolResult>; fn contains(&self, key: &Key) -> ProtocolResult; // Insert a pair of key / value // Note: This key/value pair will go into the cache first // and will not be persisted to MPT until `commit` is called. fn insert( &mut self, key: Key, value: Value, ) -> ProtocolResult<()>; fn get_account_value( &self, address: &Address, key: &Key, ) -> ProtocolResult>; fn set_account_value( &mut self, address: &Address, key: Key, val: Val, ) -> ProtocolResult<()>; // Roll back all data in the cache fn revert_cache(&mut self) -> ProtocolResult<()>; // Move data from cache to stash fn stash(&mut self) -> ProtocolResult<()>; // Persist data from stash into MPT fn commit(&mut self) -> ProtocolResult; } pub trait ChainQuerier { fn get_transaction_by_hash(&self, tx_hash: &Hash) -> ProtocolResult>; // To get the latest `Block` of finality, set `height` to `None` fn get_block_by_height(&self, height: Option) -> ProtocolResult>; fn get_receipt_by_hash(&self, tx_hash: &Hash) -> ProtocolResult>; } // Admission control will be called before entering service pub trait AdmissionControl { fn next(&self, ctx: ServiceContext, sdk: SDK) -> ProtocolResult<()>; } // Developers can use service to customize blockchain business // // It contains: // - init: Initialize the service. // - hooks: A pair of hooks that allow inserting a piece of logic before and // after the block is executed. // - read: Provide some read-only functions for users or other services to call // - write: provide some writable functions for users or other services to call pub trait Service { // Executed to create genesis states when starting chain fn genesis_(&mut self, _payload: String) {} // Called before block execution fn hook_before_(&mut self, _params: &ExecutorParams) {} // Called after block execution fn hook_after_(&mut self, _params: &ExecutorParams) {} // Called before tx execution fn tx_hook_before_(&mut self, _ctx: ServiceContext) -> ServiceResponse; // Called after tx execution fn tx_hook_after_(&mut self, _ctx: ServiceContext) -> ServiceResponse; fn write_(&mut self, ctx: ServiceContext) -> ServiceResponse; fn read_(&self, ctx: ServiceContext) -> ServiceResponse; } // `ServiceSDK` provides multiple rich interfaces for `service` developers // // It contains: // // - Various data structures that store data to `world state`(call // `alloc_or_recover_*`) // - Access and modify `account` // - Access service state // - Event triggered // - Access to data on the chain (block, transaction, receipt) // - Read / write other `service` // // In fact, these functions depend on: // // - ChainDB // - ServiceState pub trait ServiceSDK { // Alloc or recover a `Map` by` var_name` fn alloc_or_recover_map< Key: 'static + Send + FixedCodec + Clone + PartialEq, Val: 'static + FixedCodec, >( &mut self, var_name: &str, ) -> Box>; // Alloc or recover a `Array` by` var_name` fn alloc_or_recover_array( &mut self, var_name: &str, ) -> Box>; // Alloc or recover a `Uint64` by` var_name` fn alloc_or_recover_uint64(&mut self, var_name: &str) -> Box; // Alloc or recover a `String` by` var_name` fn alloc_or_recover_string(&mut self, var_name: &str) -> Box; // Alloc or recover a `Bool` by` var_name` fn alloc_or_recover_bool(&mut self, var_name: &str) -> Box; // Get a value from the service state by key fn get_value(&self, key: &Key) -> Option; // Set a value to the service state by key fn set_value(&mut self, key: Key, val: Val); // Get a value from the specified address by key fn get_account_value( &self, address: &Address, key: &Key, ) -> Option; // Insert a pair of key / value to the specified address fn set_account_value( &mut self, address: &Address, key: Key, val: Val, ); // Get a signed transaction by `tx_hash` // if not found on the chain, return None fn get_transaction_by_hash(&self, tx_hash: &Hash) -> Option; // Get a block by `height` // if not found on the chain, return None // When the parameter `height` is None, get the latest (executing)` block` fn get_block_by_height(&self, height: Option) -> Option; // Get a receipt by `tx_hash` // if not found on the chain, return None fn get_receipt_by_hash(&self, tx_hash: &Hash) -> Option; } pub trait StoreMap { fn get(&self, key: &K) -> Option; fn contains(&self, key: &K) -> bool; fn insert(&mut self, key: K, value: V); fn remove(&mut self, key: &K) -> Option; fn len(&self) -> u64; fn is_empty(&self) -> bool; fn iter<'a>(&'a self) -> Box + 'a>; } pub trait StoreArray { fn get(&self, index: u64) -> Option; fn push(&mut self, element: E); fn remove(&mut self, index: u64); fn len(&self) -> u64; fn is_empty(&self) -> bool; fn iter<'a>(&'a self) -> Box + 'a>; } pub trait StoreUint64 { fn get(&self) -> u64; fn set(&mut self, val: u64); // Add val with self // And set the result back to self fn safe_add(&mut self, val: u64) -> bool; // Self minus val // And set the result back to self fn safe_sub(&mut self, val: u64) -> bool; // Multiply val with self // And set the result back to self fn safe_mul(&mut self, val: u64) -> bool; // Power of self // And set the result back to self fn safe_pow(&mut self, val: u32) -> bool; // Self divided by val // And set the result back to self fn safe_div(&mut self, val: u64) -> bool; // Remainder of self // And set the result back to self fn safe_rem(&mut self, val: u64) -> bool; } pub trait StoreString { fn get(&self) -> String; fn set(&mut self, val: &str); fn len(&self) -> u64; fn is_empty(&self) -> bool; } pub trait StoreBool { fn get(&self) -> bool; fn set(&mut self, b: bool); } ================================================ FILE: protocol/src/traits/consensus.rs ================================================ use std::collections::HashMap; use async_trait::async_trait; use creep::Context; use crate::traits::{ExecutorParams, ExecutorResp, TrustFeedback}; use crate::types::{ Address, Block, BlockHeader, Bytes, Hash, Hex, MerkleRoot, Metadata, Proof, Receipt, SignedTransaction, Validator, }; use crate::{traits::mempool::MixedTxHashes, ProtocolResult}; #[derive(Clone, Debug, PartialEq, Eq)] pub enum MessageTarget { Broadcast, Specified(Bytes), } #[derive(Debug, Clone)] pub struct NodeInfo { pub chain_id: Hash, pub self_pub_key: Bytes, pub self_address: Address, } #[async_trait] pub trait Consensus: Send + Sync { /// Network set a received signed proposal to consensus. async fn set_proposal(&self, ctx: Context, proposal: Vec) -> ProtocolResult<()>; /// Network set a received signed vote to consensus. async fn set_vote(&self, ctx: Context, vote: Vec) -> ProtocolResult<()>; /// Network set a received quorum certificate to consensus. async fn set_qc(&self, ctx: Context, qc: Vec) -> ProtocolResult<()>; /// Network set a received signed choke to consensus. async fn set_choke(&self, ctx: Context, choke: Vec) -> ProtocolResult<()>; } #[async_trait] pub trait Synchronization: Send + Sync { async fn receive_remote_block(&self, ctx: Context, remote_height: u64) -> ProtocolResult<()>; } #[async_trait] pub trait SynchronizationAdapter: CommonConsensusAdapter + Send + Sync { fn update_status( &self, ctx: Context, height: u64, consensus_interval: u64, propose_ratio: u64, prevote_ratio: u64, precommit_ratio: u64, brake_ratio: u64, validators: Vec, ) -> ProtocolResult<()>; fn sync_exec( &self, ctx: Context, params: &ExecutorParams, txs: &[SignedTransaction], ) -> ProtocolResult; /// Pull some blocks from other nodes from `begin` to `end`. async fn get_block_from_remote(&self, ctx: Context, height: u64) -> ProtocolResult; /// Pull signed transactions corresponding to the given hashes from other /// nodes. async fn get_txs_from_remote( &self, ctx: Context, height: u64, hashes: &[Hash], ) -> ProtocolResult>; async fn get_proof_from_remote(&self, ctx: Context, height: u64) -> ProtocolResult; } #[async_trait] pub trait CommonConsensusAdapter: Send + Sync { /// Save a block to the database. async fn save_block(&self, ctx: Context, block: Block) -> ProtocolResult<()>; async fn save_proof(&self, ctx: Context, proof: Proof) -> ProtocolResult<()>; /// Save some signed transactions to the database. async fn save_signed_txs( &self, ctx: Context, block_height: u64, signed_txs: Vec, ) -> ProtocolResult<()>; async fn save_receipts( &self, ctx: Context, height: u64, receipts: Vec, ) -> ProtocolResult<()>; /// Flush the given transactions in the mempool. async fn flush_mempool(&self, ctx: Context, ordered_tx_hashes: &[Hash]) -> ProtocolResult<()>; /// Get a block corresponding to the given height. async fn get_block_by_height(&self, ctx: Context, height: u64) -> ProtocolResult; async fn get_block_header_by_height( &self, ctx: Context, height: u64, ) -> ProtocolResult; /// Get the current height from storage. async fn get_current_height(&self, ctx: Context) -> ProtocolResult; async fn get_txs_from_storage( &self, ctx: Context, tx_hashes: &[Hash], ) -> ProtocolResult>; async fn broadcast_height(&self, ctx: Context, height: u64) -> ProtocolResult<()>; /// Get metadata by the giving state_root. fn get_metadata( &self, context: Context, state_root: MerkleRoot, height: u64, timestamp: u64, proposer: Address, ) -> ProtocolResult; fn tag_consensus(&self, ctx: Context, peer_ids: Vec) -> ProtocolResult<()>; fn report_bad(&self, ctx: Context, feedback: TrustFeedback); fn set_args(&self, context: Context, timeout_gap: u64, cycles_limit: u64, max_tx_size: u64); async fn verify_proof( &self, ctx: Context, block_header: &BlockHeader, proof: &Proof, ) -> ProtocolResult<()>; async fn verify_block_header(&self, ctx: Context, block: &Block) -> ProtocolResult<()>; fn verify_proof_signature( &self, ctx: Context, block_height: u64, vote_hash: Bytes, aggregated_signature_bytes: Bytes, vote_pubkeys: Vec, ) -> ProtocolResult<()>; fn verify_proof_weight( &self, ctx: Context, block_height: u64, weight_map: HashMap, signed_voters: Vec, ) -> ProtocolResult<()>; } #[async_trait] pub trait ConsensusAdapter: CommonConsensusAdapter + Send + Sync { /// Get some transaction hashes of the given height. The amount of the /// transactions is limited by the given cycle limit and return a /// `MixedTxHashes` struct. async fn get_txs_from_mempool( &self, ctx: Context, height: u64, cycle_limit: u64, tx_num_limit: u64, ) -> ProtocolResult; /// Synchronous signed transactions. async fn sync_txs(&self, ctx: Context, propose_txs: Vec) -> ProtocolResult<()>; /// Get the signed transactions corresponding to the given hashes. async fn get_full_txs( &self, ctx: Context, order_txs: &[Hash], ) -> ProtocolResult>; /// Consensus transmit a message to the given target. async fn transmit( &self, ctx: Context, msg: Vec, end: &str, target: MessageTarget, ) -> ProtocolResult<()>; /// Execute some transactions. #[allow(clippy::too_many_arguments)] async fn execute( &self, ctx: Context, chain_id: Hash, order_root: MerkleRoot, height: u64, cycles_price: u64, proposer: Address, block_hash: Hash, signed_txs: Vec, cycles_limit: u64, timestamp: u64, ) -> ProtocolResult<()>; /// Get the validator list of the given last block. async fn get_last_validators( &self, ctx: Context, height: u64, ) -> ProtocolResult>; /// Get the current height from storage. async fn get_current_height(&self, ctx: Context) -> ProtocolResult; /// Pull some blocks from other nodes from `begin` to `end`. async fn pull_block(&self, ctx: Context, height: u64, end: &str) -> ProtocolResult; async fn verify_txs(&self, ctx: Context, height: u64, txs: &[Hash]) -> ProtocolResult<()>; } ================================================ FILE: protocol/src/traits/executor.rs ================================================ use std::sync::Arc; use creep::Context; use crate::traits::{ServiceMapping, Storage}; use crate::types::{Address, MerkleRoot, Receipt, SignedTransaction, TransactionRequest}; use crate::ProtocolResult; #[derive(Debug, Clone)] pub struct ExecutorResp { pub receipts: Vec, pub all_cycles_used: u64, pub state_root: MerkleRoot, } #[derive(Debug, Clone)] pub struct ExecutorParams { pub state_root: MerkleRoot, pub height: u64, pub timestamp: u64, pub cycles_limit: u64, pub proposer: Address, } #[derive(Debug, Clone, Default)] pub struct ServiceResponse { pub code: u64, pub succeed_data: T, pub error_message: String, } impl ServiceResponse { pub fn from_error(code: u64, error_message: String) -> Self { Self { code, succeed_data: T::default(), error_message, } } pub fn from_succeed(succeed_data: T) -> Self { Self { code: 0, succeed_data, error_message: "".to_owned(), } } pub fn is_error(&self) -> bool { self.code != 0 } } impl PartialEq for ServiceResponse { fn eq(&self, other: &Self) -> bool { self.code == other.code && self.succeed_data == other.succeed_data && self.error_message == other.error_message } } impl Eq for ServiceResponse {} pub trait ExecutorFactory: Send + Sync { fn from_root( root: MerkleRoot, db: Arc, storage: Arc, mapping: Arc, ) -> ProtocolResult>; } pub trait Executor { fn exec( &mut self, ctx: Context, params: &ExecutorParams, txs: &[SignedTransaction], ) -> ProtocolResult; fn read( &self, params: &ExecutorParams, caller: &Address, cycles_price: u64, request: &TransactionRequest, ) -> ProtocolResult>; } ================================================ FILE: protocol/src/traits/mempool.rs ================================================ use async_trait::async_trait; use creep::Context; use crate::types::{Hash, SignedTransaction}; use crate::ProtocolResult; #[allow(dead_code)] pub struct MixedTxHashes { pub order_tx_hashes: Vec, pub propose_tx_hashes: Vec, } impl MixedTxHashes { pub fn clap(self) -> (Vec, Vec) { (self.order_tx_hashes, self.propose_tx_hashes) } } #[async_trait] pub trait MemPool: Send + Sync { async fn insert(&self, ctx: Context, tx: SignedTransaction) -> ProtocolResult<()>; async fn package( &self, ctx: Context, cycles_limit: u64, tx_num_limit: u64, ) -> ProtocolResult; async fn flush(&self, ctx: Context, tx_hashes: &[Hash]) -> ProtocolResult<()>; async fn get_full_txs( &self, ctx: Context, height: Option, tx_hashes: &[Hash], ) -> ProtocolResult>; async fn ensure_order_txs( &self, ctx: Context, height: Option, order_tx_hashes: &[Hash], ) -> ProtocolResult<()>; async fn sync_propose_txs( &self, ctx: Context, propose_tx_hashes: Vec, ) -> ProtocolResult<()>; fn set_args(&self, timeout_gap: u64, cycles_limit: u64, max_tx_size: u64); } #[async_trait] pub trait MemPoolAdapter: Send + Sync { async fn pull_txs( &self, ctx: Context, height: Option, tx_hashes: Vec, ) -> ProtocolResult>; async fn broadcast_tx(&self, ctx: Context, tx: SignedTransaction) -> ProtocolResult<()>; async fn check_authorization( &self, ctx: Context, tx: Box, ) -> ProtocolResult<()>; async fn check_transaction(&self, ctx: Context, tx: &SignedTransaction) -> ProtocolResult<()>; async fn check_storage_exist(&self, ctx: Context, tx_hash: &Hash) -> ProtocolResult<()>; async fn get_latest_height(&self, ctx: Context) -> ProtocolResult; async fn get_transactions_from_storage( &self, ctx: Context, block_height: Option, tx_hashes: &[Hash], ) -> ProtocolResult>>; fn report_good(&self, ctx: Context); fn set_args(&self, timeout_gap: u64, cycles_limit: u64, max_tx_size: u64); } ================================================ FILE: protocol/src/traits/mod.rs ================================================ mod api; mod binding; mod consensus; mod executor; mod mempool; mod network; mod storage; pub use api::APIAdapter; pub use binding::{ AdmissionControl, ChainQuerier, SDKFactory, Service, ServiceMapping, ServiceSDK, ServiceState, StoreArray, StoreBool, StoreMap, StoreString, StoreUint64, }; pub use consensus::{ CommonConsensusAdapter, Consensus, ConsensusAdapter, MessageTarget, NodeInfo, Synchronization, SynchronizationAdapter, }; pub use executor::{Executor, ExecutorFactory, ExecutorParams, ExecutorResp, ServiceResponse}; pub use mempool::{MemPool, MemPoolAdapter, MixedTxHashes}; pub use network::{ Gossip, MessageCodec, MessageHandler, Network, PeerTag, PeerTrust, Priority, Rpc, TrustFeedback, }; pub use storage::{ CommonStorage, IntoIteratorByRef, MaintenanceStorage, Storage, StorageAdapter, StorageBatchModify, StorageCategory, StorageIterator, StorageSchema, }; pub use creep::{Cloneable, Context}; ================================================ FILE: protocol/src/traits/network.rs ================================================ use std::{ error::Error, fmt::Debug, hash::{Hash, Hasher}, }; use async_trait::async_trait; use bytes::Bytes; use derive_more::Display; use serde::{Deserialize, Serialize}; use crate::{traits::Context, ProtocolError, ProtocolErrorKind, ProtocolResult}; #[derive(Clone, Debug, Copy, Deserialize)] pub enum Priority { High, Normal, } #[derive(Debug, Display, Clone)] pub enum TrustFeedback { #[display(fmt = "fatal {}", _0)] Fatal(String), #[display(fmt = "worse {}", _0)] Worse(String), #[display(fmt = "bad {}", _0)] Bad(String), #[display(fmt = "neutral")] Neutral, #[display(fmt = "good")] Good, } #[derive(Debug, Display, Clone)] pub enum PeerTag { #[display(fmt = "consensus")] Consensus, #[display(fmt = "always allow")] AlwaysAllow, #[display(fmt = "banned, until {}", until)] Ban { until: u64 }, // timestamp #[display(fmt = "{}", _0)] Custom(String), // TODO: Hide custom constructor } impl PeerTag { pub fn ban(until: u64) -> Self { PeerTag::Ban { until } } pub fn ban_key() -> Self { PeerTag::Ban { until: 0 } } pub fn custom>(s: S) -> Result { let custom_str = s.as_ref(); match custom_str { "consensus" | "always_allow" | "ban" => Err(()), _ => Ok(PeerTag::Custom(custom_str.to_owned())), } } pub fn str(&self) -> &str { match self { PeerTag::Consensus => "consensus", PeerTag::AlwaysAllow => "always_allow", PeerTag::Ban { .. } => "ban", PeerTag::Custom(str) => str, } } } impl PartialEq for PeerTag { fn eq(&self, other: &PeerTag) -> bool { self.str() == other.str() } } impl Eq for PeerTag {} impl Hash for PeerTag { fn hash(&self, state: &mut H) { self.str().hash(state) } } pub trait MessageCodec: Sized + Send + Debug + 'static { fn encode(&mut self) -> ProtocolResult; fn decode(bytes: Bytes) -> ProtocolResult; } #[derive(Debug, Display)] #[display(fmt = "cannot serde encode or decode: {}", _0)] struct SerdeError(Box); impl Error for SerdeError {} impl From for ProtocolError { fn from(err: SerdeError) -> ProtocolError { ProtocolError::new(ProtocolErrorKind::Network, Box::new(err)) } } impl MessageCodec for T where T: Serialize + for<'a> Deserialize<'a> + Send + Debug + 'static, { fn encode(&mut self) -> ProtocolResult { let bytes = bincode::serialize(self).map_err(|e| SerdeError(Box::new(e)))?; Ok(bytes.into()) } fn decode(bytes: Bytes) -> ProtocolResult { bincode::deserialize::(&bytes.as_ref()).map_err(|e| SerdeError(Box::new(e)).into()) } } #[async_trait] pub trait Gossip: Send + Sync { async fn broadcast(&self, cx: Context, end: &str, msg: M, p: Priority) -> ProtocolResult<()> where M: MessageCodec; async fn multicast<'a, M, P>( &self, cx: Context, end: &str, peer_ids: P, msg: M, p: Priority, ) -> ProtocolResult<()> where M: MessageCodec, P: AsRef<[Bytes]> + Send + 'a; } #[async_trait] pub trait Rpc: Send + Sync { async fn call(&self, ctx: Context, end: &str, msg: M, pri: Priority) -> ProtocolResult where M: MessageCodec, R: MessageCodec; async fn response( &self, cx: Context, end: &str, ret: ProtocolResult, p: Priority, ) -> ProtocolResult<()> where M: MessageCodec; } pub trait Network: Send + Sync { fn tag(&self, ctx: Context, peer_id: Bytes, tag: PeerTag) -> ProtocolResult<()>; fn untag(&self, ctx: Context, peer_id: Bytes, tag: &PeerTag) -> ProtocolResult<()>; fn tag_consensus(&self, ctx: Context, peer_ids: Vec) -> ProtocolResult<()>; } pub trait PeerTrust: Send + Sync { fn report(&self, ctx: Context, feedback: TrustFeedback); } #[async_trait] pub trait MessageHandler: Sync + Send + 'static { type Message: MessageCodec; async fn process(&self, ctx: Context, msg: Self::Message) -> TrustFeedback; } ================================================ FILE: protocol/src/traits/storage.rs ================================================ use async_trait::async_trait; use derive_more::Display; use crate::codec::ProtocolCodec; use crate::traits::Context; use crate::types::block::{Block, BlockHeader, Proof}; use crate::types::receipt::Receipt; use crate::types::{Hash, SignedTransaction}; use crate::ProtocolResult; #[derive(Debug, Copy, Clone, Display)] pub enum StorageCategory { Block, BlockHeader, Receipt, SignedTransaction, Wal, HashHeight, } pub type StorageIterator<'a, S> = Box< dyn Iterator::Key, ::Value)>> + 'a, >; pub trait StorageSchema { type Key: ProtocolCodec + Send; type Value: ProtocolCodec + Send; fn category() -> StorageCategory; } pub trait IntoIteratorByRef { fn ref_to_iter<'a, 'b: 'a>(&'b self) -> StorageIterator<'a, S>; } #[async_trait] pub trait CommonStorage: Send + Sync { async fn insert_block(&self, ctx: Context, block: Block) -> ProtocolResult<()>; async fn get_block(&self, ctx: Context, height: u64) -> ProtocolResult>; async fn get_block_header( &self, ctx: Context, height: u64, ) -> ProtocolResult>; async fn set_block(&self, _ctx: Context, block: Block) -> ProtocolResult<()>; async fn remove_block(&self, ctx: Context, height: u64) -> ProtocolResult<()>; async fn get_latest_block(&self, ctx: Context) -> ProtocolResult; async fn set_latest_block(&self, ctx: Context, block: Block) -> ProtocolResult<()>; async fn get_latest_block_header(&self, ctx: Context) -> ProtocolResult; } #[async_trait] pub trait Storage: CommonStorage { async fn insert_transactions( &self, ctx: Context, block_height: u64, signed_txs: Vec, ) -> ProtocolResult<()>; async fn get_transactions( &self, ctx: Context, block_height: u64, hashes: &[Hash], ) -> ProtocolResult>>; async fn get_transaction_by_hash( &self, ctx: Context, hash: &Hash, ) -> ProtocolResult>; async fn insert_receipts( &self, ctx: Context, block_height: u64, receipts: Vec, ) -> ProtocolResult<()>; async fn get_receipt_by_hash( &self, ctx: Context, hash: Hash, ) -> ProtocolResult>; async fn get_receipts( &self, ctx: Context, block_height: u64, hashes: Vec, ) -> ProtocolResult>>; async fn update_latest_proof(&self, ctx: Context, proof: Proof) -> ProtocolResult<()>; async fn get_latest_proof(&self, ctx: Context) -> ProtocolResult; } #[async_trait] pub trait MaintenanceStorage: CommonStorage {} pub enum StorageBatchModify { Remove, Insert(::Value), } #[async_trait] pub trait StorageAdapter: Send + Sync { async fn insert( &self, key: ::Key, val: ::Value, ) -> ProtocolResult<()>; async fn get( &self, key: ::Key, ) -> ProtocolResult::Value>>; async fn get_batch( &self, keys: Vec<::Key>, ) -> ProtocolResult::Value>>> { let mut vec = Vec::new(); for key in keys { vec.push(self.get::(key).await?); } Ok(vec) } async fn remove(&self, key: ::Key) -> ProtocolResult<()>; async fn contains( &self, key: ::Key, ) -> ProtocolResult; async fn batch_modify( &self, keys: Vec<::Key>, vals: Vec>, ) -> ProtocolResult<()>; fn prepare_iter<'a, 'b: 'a, S: StorageSchema + 'static, P: AsRef<[u8]> + 'a>( &'b self, prefix: &'a P, ) -> ProtocolResult + 'a>>; } ================================================ FILE: protocol/src/types/block.rs ================================================ use bytes::Bytes; use derive_more::Display; use muta_codec_derive::RlpFixedCodec; use serde::{Deserialize, Serialize}; use crate::fixed_codec::{FixedCodec, FixedCodecError}; use crate::types::{Address, Hash, MerkleRoot}; use crate::ProtocolResult; #[derive(RlpFixedCodec, Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] pub struct Block { pub header: BlockHeader, pub ordered_tx_hashes: Vec, } #[derive(RlpFixedCodec, Clone, Debug, Display, PartialEq, Eq, Deserialize, Serialize)] #[display( fmt = "chain id {:?}, height {}, exec height {}, previous hash {:?}, ordered root {:?}, order_signed_transactions_hash {:?}, confirm root {:?}, state root {:?}, receipt root {:?},cycles_used {:?}, proposer {:?}, proof {:?}, validators {:?}", chain_id, height, exec_height, prev_hash, order_root, order_signed_transactions_hash, confirm_root, state_root, receipt_root, cycles_used, proposer, proof, validators )] pub struct BlockHeader { pub chain_id: Hash, pub height: u64, pub exec_height: u64, pub prev_hash: Hash, pub timestamp: u64, pub order_root: MerkleRoot, pub order_signed_transactions_hash: Hash, pub confirm_root: Vec, pub state_root: MerkleRoot, pub receipt_root: Vec, pub cycles_used: Vec, pub proposer: Address, pub proof: Proof, pub validator_version: u64, pub validators: Vec, } #[derive(RlpFixedCodec, Serialize, Deserialize, Clone, Debug, Hash, PartialEq, Eq)] pub struct Proof { pub height: u64, pub round: u64, pub block_hash: Hash, pub signature: Bytes, pub bitmap: Bytes, } #[derive(RlpFixedCodec, Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] pub struct Validator { pub pub_key: Bytes, pub propose_weight: u32, pub vote_weight: u32, } #[derive(RlpFixedCodec, Clone, Debug, PartialEq, Eq)] pub struct Pill { pub block: Block, pub propose_hashes: Vec, } ================================================ FILE: protocol/src/types/genesis.rs ================================================ use bytes::Bytes; use muta_codec_derive::RlpFixedCodec; use serde::Deserialize; use crate::fixed_codec::{FixedCodec, FixedCodecError}; use crate::{types::primitive::Hex, ProtocolResult}; #[derive(RlpFixedCodec, Clone, Debug, Deserialize, PartialEq, Eq)] pub struct Genesis { pub timestamp: u64, pub prevhash: Hex, pub services: Vec, } impl Genesis { pub fn get_payload(&self, name: &str) -> &str { &self .services .iter() .find(|&service| service.name == name) .unwrap_or_else(|| panic!("miss {:?} service!", name)) .payload } } #[derive(RlpFixedCodec, Clone, Debug, Deserialize, PartialEq, Eq)] pub struct ServiceParam { pub name: String, pub payload: String, } ================================================ FILE: protocol/src/types/mod.rs ================================================ pub(crate) mod block; pub(crate) mod genesis; pub(crate) mod primitive; pub(crate) mod receipt; pub(crate) mod service_context; pub(crate) mod transaction; use std::error::Error; use derive_more::{Display, From}; use crate::{ProtocolError, ProtocolErrorKind}; pub use block::{Block, BlockHeader, Pill, Proof, Validator}; pub use bytes::{Bytes, BytesMut}; pub use genesis::{Genesis, ServiceParam}; pub use primitive::{ address_hrp, address_hrp_inited, init_address_hrp, Address, Hash, Hex, JsonString, MerkleRoot, Metadata, ValidatorExtend, GENESIS_HEIGHT, METADATA_KEY, }; pub use receipt::{Event, Receipt, ReceiptResponse}; pub use service_context::{ServiceContext, ServiceContextError, ServiceContextParams}; pub use transaction::{RawTransaction, SignedTransaction, TransactionRequest}; #[derive(Debug, Display, From)] pub enum TypesError { #[display(fmt = "Expect {:?}, get {:?}.", expect, real)] LengthMismatch { expect: usize, real: usize }, #[display(fmt = "{:?}", error)] FromHex { error: hex::FromHexError }, #[display(fmt = "{:?} is an invalid address", address)] InvalidAddress { address: String }, #[display(fmt = "{}", error)] Bech32 { error: bech32::Error }, #[display(fmt = "Hex should start with 0x")] HexPrefix, #[display(fmt = "Invalid public key")] InvalidPublicKey, } impl Error for TypesError {} impl From for ProtocolError { fn from(error: TypesError) -> ProtocolError { ProtocolError::new(ProtocolErrorKind::Types, Box::new(error)) } } ================================================ FILE: protocol/src/types/primitive.rs ================================================ use std::convert::TryFrom; use std::fmt; use std::str::FromStr; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use arc_swap::ArcSwap; use bech32::{self, FromBase32, ToBase32}; use bytes::Bytes; use hasher::{Hasher, HasherKeccak}; use lazy_static::lazy_static; use muta_codec_derive::RlpFixedCodec; use ophelia::{PublicKey, UncompressedPublicKey}; use ophelia_secp256k1::Secp256k1PublicKey; use serde::de; use serde::{Deserialize, Serialize}; use serde_json::Value; use smol_str::SmolStr; use crate::fixed_codec::{FixedCodec, FixedCodecError}; use crate::types::TypesError; use crate::ProtocolResult; pub const METADATA_KEY: &str = "metadata"; lazy_static! { static ref HASHER_INST: HasherKeccak = HasherKeccak::new(); static ref ADDRESS_HRP: ArcSwap = ArcSwap::from(Arc::new("muta".into())); static ref ADDRESS_HRP_INITED: AtomicBool = AtomicBool::new(false); } pub fn address_hrp() -> SmolStr { ADDRESS_HRP.load().as_ref().clone() } pub fn init_address_hrp(address_hrp: SmolStr) { if ADDRESS_HRP_INITED.load(Ordering::SeqCst) { panic!("address hrp can only be inited once"); } if address_hrp.is_heap_allocated() { log::warn!("address hrp too long"); } // Verify address hrp let hash = HASHER_INST.digest(b"hello muta"); assert_eq!(hash.len(), 32); let bytes = &hash[12..]; assert_eq!(bytes.len(), 20); bech32::encode(&address_hrp, bytes.to_base32()).expect("invalid address hrp"); // Set address hrp ADDRESS_HRP.store(Arc::new(address_hrp)); ADDRESS_HRP_INITED.store(true, Ordering::SeqCst); } pub fn address_hrp_inited() -> bool { ADDRESS_HRP_INITED.load(Ordering::SeqCst) } /// The height of the genesis block. pub const GENESIS_HEIGHT: u64 = 0; /// Hash length const HASH_LEN: usize = 32; // Should started with 0x #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] pub struct Hex(String); impl Hex { pub fn from_string(s: String) -> ProtocolResult { if (!s.starts_with("0x") && !s.starts_with("0X")) || s.len() < 3 { return Err(TypesError::HexPrefix.into()); } hex::decode(&s[2..]).map_err(|error| TypesError::FromHex { error })?; Ok(Hex(s)) } pub fn as_string(&self) -> String { self.0.to_owned() } pub fn as_string_trim0x(&self) -> String { (&self.0[2..]).to_owned() } pub fn decode(&self) -> Bytes { Bytes::from(hex::decode(&self.0[2..]).expect("impossible, already checked in from_string")) } } impl Default for Hex { fn default() -> Self { Hex::from_string("0x1".to_owned()).expect("Hex must start with 0x") } } impl Serialize for Hex { fn serialize(&self, serializer: S) -> Result where S: serde::ser::Serializer, { serializer.serialize_str(&self.0) } } struct HexVisitor; impl<'de> de::Visitor<'de> for HexVisitor { type Value = Hex; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str("Expect a hex string") } fn visit_string(self, v: String) -> Result where E: de::Error, { Hex::from_string(v).map_err(|e| de::Error::custom(e.to_string())) } fn visit_str(self, v: &str) -> Result where E: de::Error, { Hex::from_string(v.to_owned()).map_err(|e| de::Error::custom(e.to_string())) } } impl<'de> Deserialize<'de> for Hex { fn deserialize(deserializer: D) -> Result where D: de::Deserializer<'de>, { deserializer.deserialize_string(HexVisitor) } } #[derive(RlpFixedCodec, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] pub struct Hash(Bytes); /// Merkel root hash pub type MerkleRoot = Hash; /// Json string pub type JsonString = String; impl Serialize for Hash { fn serialize(&self, serializer: S) -> Result where S: serde::ser::Serializer, { serializer.serialize_str(&self.as_hex()) } } struct HashVisitor; impl<'de> de::Visitor<'de> for HashVisitor { type Value = Hash; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str("Expect a hex string") } fn visit_string(self, v: String) -> Result where E: de::Error, { Hash::from_hex(&v).map_err(|e| de::Error::custom(e.to_string())) } fn visit_str(self, v: &str) -> Result where E: de::Error, { Hash::from_hex(&v).map_err(|e| de::Error::custom(e.to_string())) } } impl<'de> Deserialize<'de> for Hash { fn deserialize(deserializer: D) -> Result where D: de::Deserializer<'de>, { deserializer.deserialize_string(HashVisitor) } } impl Hash { /// Enter an array of bytes to get a 32-bit hash. /// Note: sha3 is used for the time being and may be replaced with other /// hashing algorithms later. pub fn digest>(bytes: B) -> Self { let out = HASHER_INST.digest(bytes.as_ref()); Self(Bytes::from(out)) } pub fn from_empty() -> Self { let out = HASHER_INST.digest(&rlp::NULL_RLP); Self(Bytes::from(out)) } /// Converts the byte array to a Hash type. /// Note: if you want to compute the hash value of the byte array, you /// should call `fn digest`. pub fn from_bytes(bytes: Bytes) -> ProtocolResult { ensure_len(bytes.len(), HASH_LEN)?; Ok(Self(bytes)) } pub fn from_hex(s: &str) -> ProtocolResult { let s = clean_0x(s)?; let bytes = hex::decode(s).map_err(TypesError::from)?; let bytes = Bytes::from(bytes); Self::from_bytes(bytes) } pub fn as_bytes(&self) -> Bytes { self.0.clone() } pub fn as_slice(&self) -> &[u8] { &self.0 } pub fn as_hex(&self) -> String { "0x".to_owned() + &hex::encode(self.0.clone()) } /// Used for byzantine test pub fn from_invalid_bytes(bytes: Bytes) -> Self { Self(bytes) } } impl Default for Hash { fn default() -> Self { Hash::from_empty() } } impl fmt::Debug for Hash { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.as_hex()) } } /// Address length. const ADDRESS_LEN: usize = 20; #[derive(RlpFixedCodec, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] pub struct Address(Bytes); impl Default for Address { fn default() -> Self { Address::from_hex("0x0000000000000000000000000000000000000000") .expect("Address must consist of 20 bytes") } } impl Serialize for Address { fn serialize(&self, serializer: S) -> Result where S: serde::ser::Serializer, { serializer.serialize_str(&self.to_string()) } } struct AddressVisitor; impl<'de> de::Visitor<'de> for AddressVisitor { type Value = Address; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str("Expect a bech32 string") } fn visit_string(self, v: String) -> Result where E: de::Error, { Address::from_str(&v).map_err(|e| de::Error::custom(e.to_string())) } fn visit_str(self, v: &str) -> Result where E: de::Error, { Address::from_str(&v).map_err(|e| de::Error::custom(e.to_string())) } } impl<'de> Deserialize<'de> for Address { fn deserialize(deserializer: D) -> Result where D: de::Deserializer<'de>, { deserializer.deserialize_string(AddressVisitor) } } impl Address { pub fn from_pubkey_bytes>(bytes: B) -> ProtocolResult { let compressed_pubkey_len = ::LENGTH; let uncompressed_pubkey_len = ::LENGTH; let slice = bytes.as_ref(); if slice.len() != compressed_pubkey_len && slice.len() != uncompressed_pubkey_len { return Err(TypesError::InvalidPublicKey.into()); } // Drop first byte let hash = { if slice.len() == compressed_pubkey_len { let pubkey = Secp256k1PublicKey::try_from(slice) .map_err(|_| TypesError::InvalidPublicKey)?; Hash::digest(&(pubkey.to_uncompressed_bytes())[1..]) } else { Hash::digest(&slice[1..]) } }; Self::from_hash(hash) } pub fn from_hash(hash: Hash) -> ProtocolResult { let hash_val = hash.as_slice(); ensure_len(hash_val.len(), HASH_LEN)?; Self::from_bytes(Bytes::copy_from_slice(&hash_val[12..])) } pub fn from_bytes(bytes: Bytes) -> ProtocolResult { ensure_len(bytes.len(), ADDRESS_LEN)?; Ok(Self(bytes)) } pub fn as_bytes(&self) -> Bytes { self.0.clone() } pub fn as_slice(&self) -> &[u8] { &self.0 } pub fn from_hex(s: &str) -> ProtocolResult { let s = clean_0x(s)?; let bytes = hex::decode(s).map_err(TypesError::from)?; let bytes = Bytes::from(bytes); Self::from_bytes(bytes) } /// Used for byzantine test pub fn from_invalid_bytes(bytes: Bytes) -> Self { Self(bytes) } } impl FromStr for Address { type Err = TypesError; fn from_str(s: &str) -> Result { let (hrp, data) = bech32::decode(s).map_err(TypesError::from)?; if hrp != address_hrp() { return Err(TypesError::InvalidAddress { address: s.to_owned(), }); } let bytes = Vec::::from_base32(&data).map_err(TypesError::from)?; Ok(Address(Bytes::from(bytes))) } } impl fmt::Debug for Address { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { // NOTE: ADDRESS_HRP was verified in init_address_hrp fn bech32::encode_to_fmt(f, address_hrp().as_ref(), &self.0.to_base32()).unwrap() } } impl fmt::Display for Address { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { // NOTE: ADDRESS_HRP was verified in init_address_hrp fn bech32::encode_to_fmt(f, address_hrp().as_ref(), &self.0.to_base32()).unwrap() } } #[derive(RlpFixedCodec, Deserialize, Default, Serialize, Clone, Debug, PartialEq, Eq)] pub struct Metadata { pub chain_id: Hash, pub bech32_address_hrp: String, pub common_ref: Hex, pub timeout_gap: u64, pub cycles_limit: u64, pub cycles_price: u64, pub interval: u64, pub verifier_list: Vec, pub propose_ratio: u64, pub prevote_ratio: u64, pub precommit_ratio: u64, pub brake_ratio: u64, pub tx_num_limit: u64, pub max_tx_size: u64, } impl Metadata { pub fn get_hrp_from_json(payload: String) -> String { let nodes: Value = serde_json::from_str(payload.as_str()) .expect("metadata's genesis payload is invalid JSON"); nodes["bech32_address_hrp"] .as_str() .expect("bech32_address_hrp in genesis payload is not string?") .to_string() } } #[derive(RlpFixedCodec, Serialize, Deserialize, Clone, PartialEq, Eq, Default)] pub struct ValidatorExtend { pub bls_pub_key: Hex, pub pub_key: Hex, pub address: Address, pub propose_weight: u32, pub vote_weight: u32, } impl fmt::Debug for ValidatorExtend { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let bls_pub_key = self.bls_pub_key.as_string_trim0x(); let pk = if bls_pub_key.len() > 8 { unsafe { bls_pub_key.get_unchecked(0..8) } } else { bls_pub_key.as_str() }; write!( f, "bls public key {:?}, public key {:?}, address {:?} propose weight {}, vote weight {}", pk, self.pub_key, self.address, self.propose_weight, self.vote_weight ) } } fn clean_0x(s: &str) -> ProtocolResult<&str> { if s.starts_with("0x") || s.starts_with("0X") { Ok(&s[2..]) } else { Err(TypesError::HexPrefix.into()) } } fn ensure_len(real: usize, expect: usize) -> ProtocolResult<()> { if real != expect { Err(TypesError::LengthMismatch { expect, real }.into()) } else { Ok(()) } } #[cfg(test)] mod tests { use bech32::{self, FromBase32}; use bytes::Bytes; use super::{address_hrp, init_address_hrp, Address, Hash, ValidatorExtend}; use crate::types::Metadata; use crate::{fixed_codec::FixedCodec, types::Hex}; #[test] fn test_hash() { let hash = Hash::digest(Bytes::from("xxxxxx")); let bytes = hash.as_bytes(); Hash::from_bytes(bytes).unwrap(); } #[test] fn test_from_hex() { let address_hex = "0x755cdba6ae4f479f7164792b318b2a06c759833b"; let address_bech32 = "muta1w4wdhf4wfare7uty0y4nrze2qmr4nqem9j7teu"; let address = Address::from_hex(address_hex).unwrap(); assert_eq!(address.to_string(), address_bech32); } #[test] fn test_from_pubkey_bytes() { let pubkey = "02ef0cb0d7bc6c18b4bea1f5908d9106522b35ab3c399369605d4242525bda7e60"; let expect_addr = "muta14e0lmgck835vm2dfm0w3ckv6svmez8fdgdl705"; let pubkey_bytes = Bytes::from(hex::decode(pubkey).unwrap()); let addr = Address::from_pubkey_bytes(pubkey_bytes).unwrap(); assert_eq!(addr.to_string(), expect_addr); } #[test] fn test_address() { let add_str = "muta14e0lmgck835vm2dfm0w3ckv6svmez8fdgdl705"; let (_, data) = bech32::decode(add_str).unwrap(); let bytes = Bytes::from(Vec::::from_base32(&data).unwrap()); let address = Address::from_bytes(bytes).unwrap(); assert_eq!(add_str, &address.to_string()); } #[test] fn test_hex() { let hex_str = "0x112233445566AABBcc"; let hex = Hex::from_string(hex_str.to_owned()).unwrap(); assert_eq!(hex_str, hex.0.as_str()); } #[test] fn test_validator_extend() { let extend = ValidatorExtend { bls_pub_key: Hex::from_string("0x04102947214862a503c73904deb5818298a186d68c7907bb609583192a7de6331493835e5b8281f4d9ee705537c0e765580e06f86ddce5867812fceb42eecefd209f0eddd0389d6b7b0100f00fb119ef9ab23826c6ea09aadcc76fa6cea6a32724".to_owned()).unwrap(), pub_key: Hex::from_string("0x02ef0cb0d7bc6c18b4bea1f5908d9106522b35ab3c399369605d4242525bda7e60".to_owned()).unwrap(), address: "muta14e0lmgck835vm2dfm0w3ckv6svmez8fdgdl705".parse().unwrap(), propose_weight: 1, vote_weight: 1, }; let decoded = ValidatorExtend::decode_fixed(extend.encode_fixed().unwrap()).unwrap(); assert_eq!(decoded, extend); } // Note: All tests run in same process, change ADDRESS_HRP affects other tests #[test] #[should_panic(expected = "must set hrp before deserialization")] fn test_init_address_hrp() { assert_eq!(address_hrp(), "muta", "default value"); let metadata_payload = r#" { "chain_id": "0xb6a4d7da21443f5e816e8700eea87610e6d769657d6b8ec73028457bf2ca4036", "bech32_address_hrp": "ham", "common_ref": "0x6c747758636859487038", "timeout_gap": 20, "cycles_limit": 4294967295, "cycles_price": 1, "interval": 3000, "verifier_list": [ { "bls_pub_key": "0x04102947214862a503c73904deb5818298a186d68c7907bb609583192a7de6331493835e5b8281f4d9ee705537c0e765580e06f86ddce5867812fceb42eecefd209f0eddd0389d6b7b0100f00fb119ef9ab23826c6ea09aadcc76fa6cea6a32724", "pub_key": "0x02ef0cb0d7bc6c18b4bea1f5908d9106522b35ab3c399369605d4242525bda7e60", "address": "ham14e0lmgck835vm2dfm0w3ckv6svmez8fdmq5fts", "propose_weight": 1, "vote_weight": 1 } ], "propose_ratio": 15, "prevote_ratio": 10, "precommit_ratio": 10, "brake_ratio": 7, "tx_num_limit": 20000, "max_tx_size": 1024 } "#; let hrp = Metadata::get_hrp_from_json(metadata_payload.to_string()); assert_eq!("ham".to_string(), hrp, "should be same"); // this should fail because we did not set hrp to ham like // init_address_hrp(hrp); serde_json::from_str::(metadata_payload) .expect("must set hrp before deserialization"); } #[test] #[should_panic(expected = "address hrp can only be inited once")] fn test_init_address_hrp_twice() { init_address_hrp("muta".into()); init_address_hrp("muta".into()); } } ================================================ FILE: protocol/src/types/receipt.rs ================================================ use bytes::Bytes; use muta_codec_derive::RlpFixedCodec; use crate::fixed_codec::{FixedCodec, FixedCodecError}; use crate::types::{Hash, MerkleRoot}; use crate::{traits::ServiceResponse, ProtocolResult}; #[derive(RlpFixedCodec, Debug, Clone, PartialEq, Eq)] pub struct Event { pub service: String, pub name: String, pub data: String, } #[derive(RlpFixedCodec, Clone, Debug, PartialEq, Eq)] pub struct Receipt { pub state_root: MerkleRoot, pub height: u64, pub tx_hash: Hash, pub cycles_used: u64, pub events: Vec, pub response: ReceiptResponse, } #[derive(Clone, Debug, PartialEq, Eq)] pub struct ReceiptResponse { pub service_name: String, pub method: String, pub response: ServiceResponse, } ================================================ FILE: protocol/src/types/service_context.rs ================================================ use std::cell::RefCell; use std::rc::Rc; use bytes::Bytes; use derive_more::{Display, From}; use crate::types::{Address, Event, Hash}; use crate::{ProtocolError, ProtocolErrorKind}; #[derive(Debug, Clone)] pub struct ServiceContextParams { pub tx_hash: Option, pub nonce: Option, pub cycles_limit: u64, pub cycles_price: u64, pub cycles_used: Rc>, pub caller: Address, pub height: u64, pub service_name: String, pub service_method: String, pub service_payload: String, pub extra: Option, pub timestamp: u64, pub events: Rc>>, } pub type Reason = String; #[derive(Debug, Clone, PartialEq)] pub struct ServiceContext { tx_hash: Option, nonce: Option, cycles_limit: u64, cycles_price: u64, cycles_used: Rc>, caller: Address, height: u64, service_name: String, service_method: String, service_payload: String, extra: Option, timestamp: u64, events: Rc>>, canceled: Rc>>, } impl ServiceContext { pub fn new(params: ServiceContextParams) -> Self { Self { tx_hash: params.tx_hash, nonce: params.nonce, cycles_limit: params.cycles_limit, cycles_price: params.cycles_price, cycles_used: params.cycles_used, caller: params.caller, height: params.height, service_name: params.service_name, service_method: params.service_method, service_payload: params.service_payload, extra: params.extra, timestamp: params.timestamp, events: params.events, canceled: Rc::new(RefCell::new(None)), } } pub fn with_context( context: &ServiceContext, extra: Option, service_name: String, service_method: String, service_payload: String, ) -> Self { Self { tx_hash: context.tx_hash.clone(), nonce: context.nonce.clone(), cycles_limit: context.cycles_limit, cycles_price: context.cycles_price, cycles_used: Rc::clone(&context.cycles_used), caller: context.caller.clone(), height: context.height, service_name, service_method, service_payload, extra, timestamp: context.get_timestamp(), events: Rc::clone(&context.events), canceled: Rc::clone(&context.canceled), } } pub fn get_tx_hash(&self) -> Option { self.tx_hash.clone() } pub fn get_nonce(&self) -> Option { self.nonce.clone() } pub fn get_events(&self) -> Vec { self.events.borrow().clone() } pub fn sub_cycles(&self, cycles: u64) -> bool { if self.get_cycles_used() + cycles <= self.cycles_limit { *self.cycles_used.borrow_mut() = self.get_cycles_used() + cycles; true } else { false } } pub fn get_cycles_price(&self) -> u64 { self.cycles_price } pub fn get_cycles_limit(&self) -> u64 { self.cycles_limit } pub fn get_cycles_used(&self) -> u64 { *self.cycles_used.borrow() } pub fn get_caller(&self) -> Address { self.caller.clone() } pub fn get_current_height(&self) -> u64 { self.height } pub fn get_service_name(&self) -> &str { &self.service_name } pub fn get_service_method(&self) -> &str { &self.service_method } pub fn get_payload(&self) -> &str { &self.service_payload } pub fn get_extra(&self) -> Option { self.extra.clone() } pub fn get_timestamp(&self) -> u64 { self.timestamp } pub fn canceled(&self) -> bool { self.canceled.borrow().is_some() } pub fn cancel_reason(&self) -> Option { self.canceled.borrow().to_owned() } pub fn cancel(&self, reason: String) { *self.canceled.borrow_mut() = Some(reason); } pub fn emit_event(&self, service: String, name: String, message: String) { self.events.borrow_mut().push(Event { service, name, data: message, }) } } #[derive(Debug, Display, From)] pub enum ServiceContextError { #[display(fmt = "out of cycles")] OutOfCycles, } impl std::error::Error for ServiceContextError {} impl From for ProtocolError { fn from(err: ServiceContextError) -> ProtocolError { ProtocolError::new(ProtocolErrorKind::Service, Box::new(err)) } } #[cfg(test)] mod tests { use std::cell::RefCell; use std::rc::Rc; use super::{ServiceContext, ServiceContextParams}; use crate::types::{Address, Hash}; #[test] fn test_request_context() { let params = ServiceContextParams { tx_hash: None, nonce: None, cycles_limit: 100, cycles_price: 8, cycles_used: Rc::new(RefCell::new(10)), caller: Address::from_hash(Hash::from_empty()).unwrap(), height: 1, timestamp: 0, service_name: "service_name".to_owned(), service_method: "service_method".to_owned(), service_payload: "service_payload".to_owned(), extra: None, events: Rc::new(RefCell::new(vec![])), }; let ctx = ServiceContext::new(params); ctx.sub_cycles(8); assert_eq!(ctx.get_cycles_used(), 18); assert_eq!(ctx.get_cycles_limit(), 100); assert_eq!(ctx.get_cycles_price(), 8); assert_eq!( ctx.get_caller(), Address::from_hash(Hash::from_empty()).unwrap() ); assert_eq!(ctx.get_current_height(), 1); assert_eq!(ctx.get_timestamp(), 0); assert_eq!(ctx.get_service_name(), "service_name"); assert_eq!(ctx.get_service_method(), "service_method"); assert_eq!(ctx.get_payload(), "service_payload"); let bro = ctx.clone(); let reason = "hurry up, bus is about to leave".to_owned(); ctx.cancel(reason.clone()); assert!(ctx.canceled()); assert!(bro.canceled()); assert_eq!(bro.cancel_reason(), Some(reason)); } } ================================================ FILE: protocol/src/types/transaction.rs ================================================ use bytes::Bytes; use muta_codec_derive::RlpFixedCodec; use serde::{Deserialize, Serialize}; use crate::fixed_codec::{FixedCodec, FixedCodecError}; use crate::types::primitive::{Address, Hash, JsonString}; use crate::ProtocolResult; #[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Eq)] pub struct RawTransaction { pub chain_id: Hash, pub cycles_price: u64, pub cycles_limit: u64, pub nonce: Hash, pub request: TransactionRequest, pub timeout: u64, pub sender: Address, } #[derive(RlpFixedCodec, Deserialize, Serialize, Clone, Debug, PartialEq, Eq)] pub struct TransactionRequest { pub method: String, pub service_name: String, pub payload: JsonString, } #[derive(RlpFixedCodec, Deserialize, Serialize, Clone, Debug, PartialEq, Eq)] pub struct SignedTransaction { pub raw: RawTransaction, pub tx_hash: Hash, pub pubkey: Bytes, pub signature: Bytes, } ================================================ FILE: rust-toolchain ================================================ nightly-2020-09-20 ================================================ FILE: rustfmt.toml ================================================ # Convert /* */ comments to // comments where possible # # Default value: false # Possible values: true, false # Stable: No (tracking issue: #3350) # false (default): # // Lorem ipsum: # fn dolor() -> usize {} # # /* sit amet: */ # fn adipiscing() -> usize {} # true: # // Lorem ipsum: # fn dolor() -> usize {} # # // sit amet: # fn adipiscing() -> usize {} normalize_comments = true # Reorder impl items. type and const are put first, then macros and methods. # # Default value: false # Possible values: true, false # Stable: No (tracking issue: #3363) # false (default) # struct Dummy; # # impl Iterator for Dummy { # fn next(&mut self) -> Option { # None # } # # type Item = i32; # } # true # struct Dummy; # # impl Iterator for Dummy { # type Item = i32; # # fn next(&mut self) -> Option { # None # } # } reorder_impl_items = true # The maximum diff of width between struct fields to be aligned with each other. # # Default value : 0 # Possible values: any non-negative integer # Stable: No (tracking issue: #3371) # 0 (default): # struct Foo { # x: u32, # yy: u32, # zzz: u32, # } # 20: # struct Foo { # x: u32, # yy: u32, # zzz: u32, # } struct_field_align_threshold = 25 # Use field initialize shorthand if possible. # # Default value: false # Possible values: true, false # Stable: Yes # false (default): # struct Foo { # x: u32, # y: u32, # z: u32, # } # # fn main() { # let x = 1; # let y = 2; # let z = 3; # let a = Foo { x: x, y: y, z: z }; # } # true: # struct Foo { # x: u32, # y: u32, # z: u32, # } # # fn main() { # let x = 1; # let y = 2; # let z = 3; # let a = Foo { x, y, z }; # } use_field_init_shorthand = true # Replace uses of the try! macro by the ? shorthand # # Default value: false # Possible values: true, false # Stable: Yes # false (default): # fn main() { # let lorem = try!(ipsum.map(|dolor| dolor.sit())); # } # true: # fn main() { # let lorem = ipsum.map(|dolor| dolor.sit())?; # } use_try_shorthand = true # Break comments to fit on the line # # Default value: false # Possible values: true, false # Stable: No (tracking issue: #3347) # false (default): # // Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. # true: # // Lorem ipsum dolor sit amet, consectetur adipiscing elit, # // sed do eiusmod tempor incididunt ut labore et dolore # // magna aliqua. Ut enim ad minim veniam, quis nostrud # // exercitation ullamco laboris nisi ut aliquip ex ea # // commodo consequat. wrap_comments = true # When structs, slices, arrays, and block/array-like macros are used as the last argument in an expression list, allow them to overflow (like blocks/closures) instead of being indented on a new line. # # Default value: false # Possible values: true, false # Stable: No (tracking issue: #3370) # false (default): # fn example() { # foo(ctx, |param| { # action(); # foo(param) # }); # # foo( # ctx, # Bar { # x: value, # y: value2, # }, # ); # # foo( # ctx, # &[ # MAROON_TOMATOES, # PURPLE_POTATOES, # ORGANE_ORANGES, # GREEN_PEARS, # RED_APPLES, # ], # ); # # foo( # ctx, # vec![ # MAROON_TOMATOES, # PURPLE_POTATOES, # ORGANE_ORANGES, # GREEN_PEARS, # RED_APPLES, # ], # ); # } # true: # fn example() { # foo(ctx, |param| { # action(); # foo(param) # }); # # foo(ctx, Bar { # x: value, # y: value2, # }); # # foo(ctx, &[ # MAROON_TOMATOES, # PURPLE_POTATOES, # ORGANE_ORANGES, # GREEN_PEARS, # RED_APPLES, # ]); # # foo(ctx, vec![ # MAROON_TOMATOES, # PURPLE_POTATOES, # ORGANE_ORANGES, # GREEN_PEARS, # RED_APPLES, # ]); # } overflow_delimited_expr = true ================================================ FILE: src/lib.rs ================================================ #![feature(async_closure)] #![allow(clippy::mutable_key_type)] use protocol::traits::ServiceMapping; use cli::{Cli, CliConfig}; pub fn run( service_mapping: Mapping, app_name: &'static str, version: &'static str, author: &'static str, config_path: &'static str, genesis_patch: &'static str, target_commands: Option>, ) { Cli::run( service_mapping, CliConfig { app_name, version, author, config_path, genesis_patch, }, target_commands, ) } ================================================ FILE: tests/common/mod.rs ================================================ #![allow(clippy::mutable_key_type)] pub mod node; use std::net::TcpListener; use std::path::PathBuf; use std::sync::atomic::{AtomicU16, Ordering}; use protocol::types::Hash; use protocol::BytesMut; use rand::{rngs::OsRng, RngCore}; static AVAILABLE_PORT: AtomicU16 = AtomicU16::new(2000); pub fn tmp_dir() -> PathBuf { let mut tmp_dir = std::env::temp_dir(); let sub_dir = { let mut random_bytes = [0u8; 32]; OsRng.fill_bytes(&mut random_bytes); Hash::digest(BytesMut::from(random_bytes.as_ref()).freeze()).as_hex() }; tmp_dir.push(sub_dir + "/"); tmp_dir } pub fn available_port_pair() -> (u16, u16) { (available_port(), available_port()) } fn available_port() -> u16 { let is_available = |port| -> bool { TcpListener::bind(("127.0.0.1", port)).is_ok() }; loop { let port = AVAILABLE_PORT.fetch_add(1, Ordering::SeqCst); if is_available(port) { return port; } } } ================================================ FILE: tests/common/node/config.rs ================================================ use std::collections::HashMap; use std::net::SocketAddr; use std::path::PathBuf; use serde_derive::Deserialize; use core_mempool::{DEFAULT_BROADCAST_TXS_INTERVAL, DEFAULT_BROADCAST_TXS_SIZE}; use protocol::types::Hex; #[derive(Debug, Deserialize)] pub struct ConfigNetwork { pub bootstraps: Option>, pub allowlist: Option>, pub allowlist_only: Option, pub trust_interval_duration: Option, pub trust_max_history_duration: Option, pub fatal_ban_duration: Option, pub soft_ban_duration: Option, pub max_connected_peers: Option, pub listening_address: SocketAddr, pub rpc_timeout: Option, pub selfcheck_interval: Option, pub send_buffer_size: Option, pub write_timeout: Option, pub recv_buffer_size: Option, pub max_frame_length: Option, pub max_wait_streams: Option, pub ping_interval: Option, } #[derive(Debug, Deserialize)] pub struct ConfigNetworkBootstrap { pub peer_id: String, pub address: String, } #[derive(Debug, Deserialize)] pub struct ConfigConsensus { pub overlord_gap: usize, pub sync_txs_chunk_size: usize, } impl Default for ConfigConsensus { fn default() -> Self { Self { overlord_gap: 5, sync_txs_chunk_size: 5000, } } } fn default_broadcast_txs_size() -> usize { DEFAULT_BROADCAST_TXS_SIZE } fn default_broadcast_txs_interval() -> u64 { DEFAULT_BROADCAST_TXS_INTERVAL } #[derive(Debug, Deserialize)] pub struct ConfigMempool { pub pool_size: u64, #[serde(default = "default_broadcast_txs_size")] pub broadcast_txs_size: usize, #[serde(default = "default_broadcast_txs_interval")] pub broadcast_txs_interval: u64, } #[derive(Debug, Deserialize)] pub struct ConfigExecutor { pub light: bool, } #[derive(Debug, Deserialize)] pub struct ConfigLogger { pub filter: String, pub log_to_console: bool, pub console_show_file_and_line: bool, pub log_to_file: bool, pub metrics: bool, pub log_path: PathBuf, #[serde(default)] pub modules_level: HashMap, } impl Default for ConfigLogger { fn default() -> Self { Self { filter: "info".into(), log_to_console: true, console_show_file_and_line: false, log_to_file: true, metrics: true, log_path: "logs/".into(), modules_level: HashMap::new(), } } } #[derive(Debug, Deserialize)] pub struct Config { // crypto pub privkey: Hex, pub network: ConfigNetwork, pub mempool: ConfigMempool, pub executor: ConfigExecutor, #[serde(default)] pub consensus: ConfigConsensus, #[serde(default)] pub logger: ConfigLogger, } ================================================ FILE: tests/common/node/consts.rs ================================================ pub const CHAIN_CONFIG_PATH: &str = "devtools/chain/config.toml"; pub const CHAIN_GENESIS_PATH: &str = "devtools/chain/genesis.toml"; pub const CHAIN_ID: &str = "0xb6a4d7da21443f5e816e8700eea87610e6d769657d6b8ec73028457bf2ca4036"; // Disable ping pub const NETWORK_PING_INTERVAL: Option = Some(99999); // Enough interval for tests pub const NETWORK_TRUST_METRIC_INTERVAL: Option = Some(99); // Trust metric soft hard ban duration pub const NETWORK_SOFT_BAND_DURATION: Option = Some(5); pub const MEMPOOL_POOL_SIZE: usize = 10; ================================================ FILE: tests/common/node/diagnostic.rs ================================================ use super::sync::Sync; use core_network::{DiagnosticEvent, NetworkServiceHandle}; use protocol::{ async_trait, traits::{Context, MessageHandler, PeerTrust, TrustFeedback}, }; use serde_derive::{Deserialize, Serialize}; use std::ops::Deref; pub const GOSSIP_TRUST_NEW_INTERVAL: &str = "/gossip/diagnostic/trust_new_interval"; pub const GOSSIP_TRUST_TWIN_EVENT: &str = "/gossip/diagnostic/trust_twin_event"; #[derive(Debug, Serialize, Deserialize)] pub struct TrustNewIntervalReq(pub u8); pub struct TrustNewIntervalHandler { pub sync: Sync, pub network: NetworkServiceHandle, } impl TrustNewIntervalHandler { pub fn new(sync: Sync, network: NetworkServiceHandle) -> Self { TrustNewIntervalHandler { sync, network } } } #[async_trait] impl MessageHandler for TrustNewIntervalHandler { type Message = TrustNewIntervalReq; async fn process(&self, ctx: Context, _msg: Self::Message) -> TrustFeedback { let session_id = ctx .get::("session_id") .cloned() .expect("impossible, session id not found"); let report = self .network .diagnostic .new_trust_interval(session_id.into()) .expect("failed to enter new trust interval"); self.sync.emit(DiagnosticEvent::TrustNewInterval { report }); TrustFeedback::Neutral } } #[repr(u8)] #[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq, Hash)] pub enum TwinEvent { Good = 0, Bad = 1, Worse = 2, Both = 3, } #[derive(Debug, Serialize, Deserialize)] pub struct TrustTwinEventReq(pub TwinEvent); pub struct TrustTwinEventHandler(pub NetworkServiceHandle); #[async_trait] impl MessageHandler for TrustTwinEventHandler { type Message = TrustTwinEventReq; async fn process(&self, ctx: Context, msg: Self::Message) -> TrustFeedback { match msg.0 { TwinEvent::Good => self.report(ctx, TrustFeedback::Good), TwinEvent::Bad => self.report(ctx, TrustFeedback::Bad("twin bad".to_owned())), TwinEvent::Worse => self.report(ctx, TrustFeedback::Worse("twin worse".to_owned())), TwinEvent::Both => { self.report(ctx.clone(), TrustFeedback::Good); self.report(ctx, TrustFeedback::Bad("twin bad".to_owned())); } } TrustFeedback::Neutral } } impl Deref for TrustTwinEventHandler { type Target = NetworkServiceHandle; fn deref(&self) -> &Self::Target { &self.0 } } ================================================ FILE: tests/common/node/full_node/builder.rs ================================================ use super::{ config::Config, default_start::{create_genesis, start}, error::MainError, memory_db::MemoryDB, Sync, }; use std::{ fs, net::{IpAddr, Ipv4Addr, SocketAddr}, sync::Arc, }; use protocol::traits::ServiceMapping; use protocol::types::{Block, Genesis}; use protocol::ProtocolResult; #[derive(Default)] pub struct MutaBuilder { config_path: Option, genesis_path: Option, servive_mapping: Option>, } impl MutaBuilder { pub fn new() -> Self { Self { servive_mapping: None, config_path: None, genesis_path: None, } } pub fn service_mapping(mut self, mapping: Mapping) -> MutaBuilder { self.servive_mapping = Some(Arc::new(mapping)); self } pub fn config_path(mut self, path: &str) -> MutaBuilder { self.config_path = Some(path.to_owned()); self } pub fn genesis_path(mut self, path: &str) -> MutaBuilder { self.genesis_path = Some(path.to_owned()); self } pub fn build(self, listen_port: u16) -> ProtocolResult> { let mut config: Config = common_config_parser::parse(&self.config_path.expect("config path is not set")) .map_err(MainError::ConfigParse)?; // Override listening address let listen_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), listen_port); config.network.listening_address = listen_addr; let genesis_toml = fs::read_to_string(&self.genesis_path.expect("genesis path is not set")) .map_err(MainError::Io)?; let genesis: Genesis = toml::from_str(&genesis_toml).map_err(MainError::GenesisTomlDe)?; Ok(Muta::new( config, genesis, self.servive_mapping .expect("service mapping cannot be None"), )) } } pub struct Muta { config: Config, genesis: Genesis, service_mapping: Arc, } impl Muta { pub fn new(config: Config, genesis: Genesis, service_mapping: Arc) -> Self { Self { config, genesis, service_mapping, } } pub async fn run(self, seckey: String, sync: Sync) -> ProtocolResult<()> { // run muta let memory_db = MemoryDB::default(); self.create_genesis(memory_db.clone()).await?; start( self.config, Arc::clone(&self.service_mapping), memory_db, seckey, sync, ) .await?; Ok(()) } async fn create_genesis(&self, db: MemoryDB) -> ProtocolResult { create_genesis(&self.genesis, Arc::clone(&self.service_mapping), db).await } } ================================================ FILE: tests/common/node/full_node/default_start.rs ================================================ use super::diagnostic::{ TrustNewIntervalHandler, TrustTwinEventHandler, GOSSIP_TRUST_NEW_INTERVAL, GOSSIP_TRUST_TWIN_EVENT, }; /// Almost same as src/default_start.rs, only remove graphql service. use super::{config::Config, consts, error::MainError, memory_db::MemoryDB, Sync}; use std::collections::HashMap; use std::convert::TryFrom; use std::sync::Arc; use bytes::Bytes; use futures::lock::Mutex; use common_crypto::{ BlsCommonReference, BlsPrivateKey, BlsPublicKey, PublicKey, Secp256k1, Secp256k1PrivateKey, ToPublicKey, UncompressedPublicKey, }; use core_api::adapter::DefaultAPIAdapter; use core_consensus::fixed_types::{FixedBlock, FixedProof, FixedSignedTxs}; use core_consensus::message::{ ChokeMessageHandler, ProposalMessageHandler, PullBlockRpcHandler, PullProofRpcHandler, PullTxsRpcHandler, QCMessageHandler, RemoteHeightMessageHandler, VoteMessageHandler, BROADCAST_HEIGHT, END_GOSSIP_AGGREGATED_VOTE, END_GOSSIP_SIGNED_CHOKE, END_GOSSIP_SIGNED_PROPOSAL, END_GOSSIP_SIGNED_VOTE, RPC_RESP_SYNC_PULL_BLOCK, RPC_RESP_SYNC_PULL_PROOF, RPC_RESP_SYNC_PULL_TXS, RPC_SYNC_PULL_BLOCK, RPC_SYNC_PULL_PROOF, RPC_SYNC_PULL_TXS, }; use core_consensus::status::{CurrentConsensusStatus, StatusAgent}; use core_consensus::util::OverlordCrypto; use core_consensus::{ ConsensusWal, DurationConfig, Node, OverlordConsensus, OverlordConsensusAdapter, OverlordSynchronization, RichBlock, SignedTxsWAL, }; use core_mempool::{ DefaultMemPoolAdapter, HashMemPool, MsgPushTxs, NewTxsHandler, PullTxsHandler, END_GOSSIP_NEW_TXS, RPC_PULL_TXS, RPC_RESP_PULL_TXS, }; use core_network::{DiagnosticEvent, NetworkConfig, NetworkService, PeerId, PeerIdExt}; use core_storage::{ImplStorage, StorageError}; use framework::executor::{ServiceExecutor, ServiceExecutorFactory}; use protocol::traits::{ APIAdapter, CommonStorage, Context, MemPool, Network, NodeInfo, ServiceMapping, Storage, }; use protocol::types::{Address, Block, BlockHeader, Genesis, Hash, Metadata, Proof, Validator}; use protocol::{fixed_codec::FixedCodec, ProtocolResult}; pub async fn create_genesis( genesis: &Genesis, servive_mapping: Arc, db: MemoryDB, ) -> ProtocolResult { let metadata: Metadata = serde_json::from_str(genesis.get_payload("metadata")).expect("Decode metadata failed!"); let validators: Vec = metadata .verifier_list .iter() .map(|v| Validator { pub_key: v.pub_key.decode(), propose_weight: v.propose_weight, vote_weight: v.vote_weight, }) .collect(); // Read genesis. log::info!("Genesis data: {:?}", genesis); // Init Block db let storage = Arc::new(ImplStorage::new(Arc::new(db.clone()))); match storage.get_latest_block(Context::new()).await { Ok(genesis_block) => { log::info!("The Genesis block has been initialized."); return Ok(genesis_block); } Err(e) => { if !e.to_string().contains("GetNone") { return Err(e); } } }; // Init genesis let genesis_state_root = ServiceExecutor::create_genesis( genesis.services.clone(), Arc::new(db), Arc::clone(&storage), servive_mapping, )?; // Build genesis block. let proposer = Address::from_hash(Hash::digest(protocol::address_hrp().as_str()))?; let genesis_block_header = BlockHeader { chain_id: metadata.chain_id.clone(), height: 0, exec_height: 0, prev_hash: Hash::from_empty(), timestamp: genesis.timestamp, order_root: Hash::from_empty(), order_signed_transactions_hash: Hash::from_empty(), confirm_root: vec![], state_root: genesis_state_root, receipt_root: vec![], cycles_used: vec![], proposer, proof: Proof { height: 0, round: 0, block_hash: Hash::from_empty(), signature: Bytes::new(), bitmap: Bytes::new(), }, validator_version: 0, validators, }; let latest_proof = genesis_block_header.proof.clone(); let genesis_block = Block { header: genesis_block_header, ordered_tx_hashes: vec![], }; storage .insert_block(Context::new(), genesis_block.clone()) .await?; storage .update_latest_proof(Context::new(), latest_proof) .await?; log::info!("The genesis block is created {:?}", genesis_block); Ok(genesis_block) } pub async fn start( config: Config, service_mapping: Arc, db: MemoryDB, seckey: String, sync: Sync, ) -> ProtocolResult<()> { log::info!("node starts"); // Init Block db let storage = Arc::new(ImplStorage::new(Arc::new(db.clone()))); // Init network let network_config = NetworkConfig::new() .max_connections(config.network.max_connected_peers)? .allowlist_only(config.network.allowlist_only) .peer_trust_metric( consts::NETWORK_TRUST_METRIC_INTERVAL, config.network.trust_max_history_duration, )? .peer_soft_ban(consts::NETWORK_SOFT_BAND_DURATION) .peer_fatal_ban(config.network.fatal_ban_duration) .rpc_timeout(config.network.rpc_timeout) .ping_interval(consts::NETWORK_PING_INTERVAL) .selfcheck_interval(config.network.selfcheck_interval) .max_wait_streams(config.network.max_wait_streams) .max_frame_length(config.network.max_frame_length) .send_buffer_size(config.network.send_buffer_size) .write_timeout(config.network.write_timeout) .recv_buffer_size(config.network.recv_buffer_size); let mut bootstrap_pairs = vec![]; if let Some(bootstrap) = &config.network.bootstraps { for bootstrap in bootstrap.iter() { bootstrap_pairs.push((bootstrap.peer_id.to_owned(), bootstrap.address.to_owned())); } } let allowlist = config.network.allowlist.clone().unwrap_or_default(); let network_config = network_config .bootstraps(bootstrap_pairs)? .allowlist(allowlist)? .secio_keypair(seckey.clone())?; let mut network_service = NetworkService::new(network_config); network_service .listen(config.network.listening_address) .await?; // Register diagnostic network_service.register_endpoint_handler( GOSSIP_TRUST_NEW_INTERVAL, TrustNewIntervalHandler::new(sync.clone(), network_service.handle()), )?; network_service.register_endpoint_handler( GOSSIP_TRUST_TWIN_EVENT, TrustTwinEventHandler(network_service.handle()), )?; let hook_fn = |sync: Sync| -> _ { Box::new(move |event: DiagnosticEvent| sync.emit(event)) }; network_service.register_diagnostic_hook(hook_fn(sync.clone())); // Init mempool let current_block = storage.get_latest_block(Context::new()).await?; let mempool_adapter = DefaultMemPoolAdapter::::new( network_service.handle(), Arc::clone(&storage), Arc::new(db.clone()), Arc::clone(&service_mapping), config.mempool.broadcast_txs_size, config.mempool.broadcast_txs_interval, ); let mempool = Arc::new(HashMemPool::new(consts::MEMPOOL_POOL_SIZE, mempool_adapter, vec![]).await); // self private key let hex_privkey = hex::decode(config.privkey.as_string_trim0x()).map_err(MainError::FromHex)?; let my_privkey = Secp256k1PrivateKey::try_from(hex_privkey.as_ref()).map_err(MainError::Crypto)?; let my_pubkey = my_privkey.pub_key(); let my_address = Address::from_pubkey_bytes(my_pubkey.to_uncompressed_bytes())?; // Get metadata let api_adapter = DefaultAPIAdapter::::new( Arc::clone(&mempool), Arc::clone(&storage), Arc::new(db.clone()), Arc::clone(&service_mapping), ); // Create full transactions wal let wal_path = crate::common::tmp_dir() .to_str() .expect("wal path string") .to_string(); let txs_wal = Arc::new(SignedTxsWAL::new(wal_path)); // Init consensus wal let wal_path = crate::common::tmp_dir() .to_str() .expect("wal path string") .to_string(); let consensus_wal = Arc::new(ConsensusWal::new(wal_path)); let exec_resp = api_adapter .query_service( Context::new(), current_block.header.height, u64::max_value(), 1, my_address.clone(), "metadata".to_string(), "get_metadata".to_string(), "".to_string(), ) .await?; let metadata: Metadata = serde_json::from_str(&exec_resp.succeed_data).expect("Decode metadata failed!"); // set chain id in network network_service.set_chain_id(Hash::from_hex(consts::CHAIN_ID).expect("chain id")); // set args in mempool mempool.set_args( metadata.timeout_gap, metadata.cycles_limit, metadata.max_tx_size, ); // register broadcast new transaction network_service .register_endpoint_handler(END_GOSSIP_NEW_TXS, NewTxsHandler::new(Arc::clone(&mempool)))?; // register pull txs from other node network_service.register_endpoint_handler( RPC_PULL_TXS, PullTxsHandler::new(Arc::new(network_service.handle()), Arc::clone(&mempool)), )?; network_service.register_rpc_response::(RPC_RESP_PULL_TXS)?; // Init Consensus let validators: Vec = metadata .verifier_list .iter() .map(|v| Validator { pub_key: v.pub_key.decode(), propose_weight: v.propose_weight, vote_weight: v.vote_weight, }) .collect(); let node_info = NodeInfo { chain_id: metadata.chain_id.clone(), self_address: my_address.clone(), self_pub_key: my_pubkey.to_bytes(), }; let current_header = ¤t_block.header; let block_hash = Hash::digest(current_block.header.encode_fixed()?); let current_height = current_block.header.height; let exec_height = current_block.header.exec_height; let current_consensus_status = CurrentConsensusStatus { cycles_price: metadata.cycles_price, cycles_limit: metadata.cycles_limit, latest_committed_height: current_block.header.height, exec_height: current_block.header.exec_height, current_hash: block_hash, latest_committed_state_root: current_header.state_root.clone(), list_confirm_root: vec![], list_state_root: vec![], list_receipt_root: vec![], list_cycles_used: vec![], current_proof: current_header.proof.clone(), validators: validators.clone(), consensus_interval: metadata.interval, propose_ratio: metadata.propose_ratio, prevote_ratio: metadata.prevote_ratio, precommit_ratio: metadata.precommit_ratio, brake_ratio: metadata.brake_ratio, max_tx_size: metadata.max_tx_size, tx_num_limit: metadata.tx_num_limit, }; let consensus_interval = current_consensus_status.consensus_interval; let status_agent = StatusAgent::new(current_consensus_status); let mut bls_pub_keys = HashMap::new(); for validator_extend in metadata.verifier_list.iter() { let address = validator_extend.pub_key.decode(); let hex_pubkey = hex::decode(validator_extend.bls_pub_key.as_string_trim0x()) .map_err(MainError::FromHex)?; let pub_key = BlsPublicKey::try_from(hex_pubkey.as_ref()).map_err(MainError::Crypto)?; bls_pub_keys.insert(address, pub_key); } let mut priv_key = Vec::new(); priv_key.extend_from_slice(&[0u8; 16]); let mut tmp = hex::decode(config.privkey.as_string_trim0x()).unwrap(); priv_key.append(&mut tmp); let bls_priv_key = BlsPrivateKey::try_from(priv_key.as_ref()).map_err(MainError::Crypto)?; let hex_common_ref = hex::decode(metadata.common_ref.as_string_trim0x()).map_err(MainError::FromHex)?; let common_ref: BlsCommonReference = std::str::from_utf8(hex_common_ref.as_ref()) .map_err(MainError::Utf8)? .into(); let crypto = Arc::new(OverlordCrypto::new(bls_priv_key, bls_pub_keys, common_ref)); let mut consensus_adapter = OverlordConsensusAdapter::::new( Arc::new(network_service.handle()), Arc::clone(&mempool), Arc::clone(&storage), Arc::new(db), Arc::clone(&service_mapping), status_agent.clone(), Arc::clone(&crypto), config.consensus.overlord_gap, )?; let exec_demon = consensus_adapter.take_exec_demon(); let consensus_adapter = Arc::new(consensus_adapter); let lock = Arc::new(Mutex::new(())); let overlord_consensus = Arc::new(OverlordConsensus::new( status_agent.clone(), node_info, Arc::clone(&crypto), Arc::clone(&txs_wal), Arc::clone(&consensus_adapter), Arc::clone(&lock), Arc::clone(&consensus_wal), )); consensus_adapter.set_overlord_handler(overlord_consensus.get_overlord_handler()); let synchronization = Arc::new(OverlordSynchronization::<_>::new( config.consensus.sync_txs_chunk_size, consensus_adapter, status_agent.clone(), crypto, lock, )); let peer_ids = metadata .verifier_list .iter() .map(|v| PeerId::from_pubkey_bytes(v.pub_key.decode()).map(PeerIdExt::into_bytes_ext)) .collect::, _>>()?; network_service .handle() .tag_consensus(Context::new(), peer_ids)?; // Re-execute block from exec_height + 1 to current_height, so that init the // lost current status. log::info!("Re-execute from {} to {}", exec_height + 1, current_height); for height in exec_height + 1..=current_height { let block = storage .get_block(Context::new(), height) .await? .ok_or(StorageError::GetNone)?; let txs = storage .get_transactions( Context::new(), block.header.height, &block.ordered_tx_hashes, ) .await? .into_iter() .filter_map(|opt_stx| opt_stx) .collect::>(); if txs.len() != block.ordered_tx_hashes.len() { return Err(StorageError::GetNone.into()); } let rich_block = RichBlock { block, txs }; let _ = synchronization .exec_block(Context::new(), rich_block, status_agent.clone()) .await?; } // register consensus network_service.register_endpoint_handler( END_GOSSIP_SIGNED_PROPOSAL, ProposalMessageHandler::new(Arc::clone(&overlord_consensus)), )?; network_service.register_endpoint_handler( END_GOSSIP_AGGREGATED_VOTE, QCMessageHandler::new(Arc::clone(&overlord_consensus)), )?; network_service.register_endpoint_handler( END_GOSSIP_SIGNED_VOTE, VoteMessageHandler::new(Arc::clone(&overlord_consensus)), )?; network_service.register_endpoint_handler( END_GOSSIP_SIGNED_CHOKE, ChokeMessageHandler::new(Arc::clone(&overlord_consensus)), )?; network_service.register_endpoint_handler( BROADCAST_HEIGHT, RemoteHeightMessageHandler::new(Arc::clone(&synchronization)), )?; network_service.register_endpoint_handler( RPC_SYNC_PULL_BLOCK, PullBlockRpcHandler::new(Arc::new(network_service.handle()), Arc::clone(&storage)), )?; network_service.register_endpoint_handler( RPC_SYNC_PULL_PROOF, PullProofRpcHandler::new(Arc::new(network_service.handle()), Arc::clone(&storage)), )?; network_service.register_endpoint_handler( RPC_SYNC_PULL_TXS, PullTxsRpcHandler::new(Arc::new(network_service.handle()), Arc::clone(&storage)), )?; network_service.register_rpc_response::(RPC_RESP_SYNC_PULL_BLOCK)?; network_service.register_rpc_response::(RPC_RESP_SYNC_PULL_PROOF)?; network_service.register_rpc_response::(RPC_RESP_SYNC_PULL_TXS)?; // Run network tokio::spawn(network_service); sync.wait().await; // Run sync tokio::spawn(async move { if let Err(e) = synchronization.polling_broadcast().await { log::error!("synchronization: {:?}", e); } }); // Run consensus let authority_list = validators .iter() .map(|v| Node { address: v.pub_key.clone(), propose_weight: v.propose_weight, vote_weight: v.vote_weight, }) .collect::>(); let timer_config = DurationConfig { propose_ratio: metadata.propose_ratio, prevote_ratio: metadata.prevote_ratio, precommit_ratio: metadata.precommit_ratio, brake_ratio: metadata.brake_ratio, }; let consensus_handle = tokio::spawn(async move { if let Err(e) = overlord_consensus .run( current_height, consensus_interval, authority_list, Some(timer_config), ) .await { log::error!("muta-consensus: {:?} error", e); } }); exec_demon.run().await; let _ = consensus_handle.await; let _ = sync; Ok(()) } ================================================ FILE: tests/common/node/full_node/error.rs ================================================ use derive_more::{Display, From}; use protocol::{ProtocolError, ProtocolErrorKind}; #[derive(Debug, Display, From)] pub enum MainError { #[display(fmt = "The muta configuration read failed {:?}", _0)] ConfigParse(common_config_parser::ParseError), #[display(fmt = "{:?}", _0)] Io(std::io::Error), #[display(fmt = "Toml fails to parse genesis {:?}", _0)] GenesisTomlDe(toml::de::Error), #[display(fmt = "hex error {:?}", _0)] FromHex(hex::FromHexError), #[display(fmt = "crypto error {:?}", _0)] Crypto(common_crypto::Error), #[display(fmt = "{:?}", _0)] Utf8(std::str::Utf8Error), #[display(fmt = "other error {:?}", _0)] Other(String), } impl std::error::Error for MainError {} impl From for ProtocolError { fn from(error: MainError) -> ProtocolError { ProtocolError::new(ProtocolErrorKind::Main, Box::new(error)) } } ================================================ FILE: tests/common/node/full_node/memory_db.rs ================================================ use derive_more::Display; use parking_lot::RwLock; use protocol::{ async_trait, codec::ProtocolCodecSync, traits::{ IntoIteratorByRef, StorageAdapter, StorageBatchModify, StorageIterator, StorageSchema, }, Bytes, ProtocolError, ProtocolErrorKind, ProtocolResult, }; use std::{ collections::{hash_map, HashMap}, marker::PhantomData, ops::Deref, sync::Arc, }; #[derive(Debug, Display)] pub enum MemoryDBError { #[display(fmt = "batch length dont match")] BatchLengthMismatch, } impl std::error::Error for MemoryDBError {} impl From for ProtocolError { fn from(err: MemoryDBError) -> ProtocolError { ProtocolError::new(ProtocolErrorKind::Storage, Box::new(err)) } } type Category = HashMap, Vec>; #[derive(Clone)] pub struct MemoryDB { trie: Arc, Vec>>>, db: Arc>>, } impl Default for MemoryDB { fn default() -> Self { MemoryDB { trie: Default::default(), db: Default::default(), } } } impl Deref for MemoryDB { type Target = Arc, Vec>>>; fn deref(&self) -> &Self::Target { &self.trie } } impl cita_trie::DB for MemoryDB { type Error = MemoryDBError; fn get(&self, key: &[u8]) -> Result>, Self::Error> { Ok(self.read().get(key).cloned()) } fn contains(&self, key: &[u8]) -> Result { Ok(self.read().contains_key(key)) } fn insert(&self, key: Vec, value: Vec) -> Result<(), Self::Error> { self.write().insert(key, value); Ok(()) } fn insert_batch(&self, keys: Vec>, values: Vec>) -> Result<(), Self::Error> { if keys.len() != values.len() { return Err(MemoryDBError::BatchLengthMismatch); } for (key, value) in keys.into_iter().zip(values.into_iter()) { self.write().insert(key, value); } Ok(()) } fn remove(&self, key: &[u8]) -> Result<(), Self::Error> { self.write().remove(key); Ok(()) } fn remove_batch(&self, keys: &[Vec]) -> Result<(), Self::Error> { for key in keys { self.write().remove(key); } Ok(()) } fn flush(&self) -> Result<(), Self::Error> { Ok(()) } } pub struct MemoryIterator<'a, S: StorageSchema> { inner: hash_map::Iter<'a, Vec, Vec>, pin_s: PhantomData, } impl<'a, S: StorageSchema> Iterator for MemoryIterator<'a, S> { type Item = ProtocolResult<(::Key, ::Value)>; fn next(&mut self) -> Option { let kv_decode = |(k_bytes, v_bytes): (&Vec, &Vec)| -> ProtocolResult<_> { let k_bytes = Bytes::copy_from_slice(k_bytes.as_ref()); let key = <_>::decode_sync(k_bytes)?; let v_bytes = Bytes::copy_from_slice(&v_bytes.as_ref()); let val = <_>::decode_sync(v_bytes)?; Ok((key, val)) }; self.inner.next().map(kv_decode) } } pub struct MemoryIntoIterator<'a, S: StorageSchema> { inner: parking_lot::RwLockReadGuard<'a, HashMap>, pin_s: PhantomData, } impl<'a, 'b: 'a, S: StorageSchema> IntoIterator for &'b MemoryIntoIterator<'a, S> { type IntoIter = StorageIterator<'a, S>; type Item = ProtocolResult<(::Key, ::Value)>; fn into_iter(self) -> Self::IntoIter { Box::new(MemoryIterator { inner: self .inner .get(&S::category().to_string()) .expect("impossible, already ensure we have category in prepare_iter") .iter(), pin_s: PhantomData::, }) } } impl<'c, S: StorageSchema> IntoIteratorByRef for MemoryIntoIterator<'c, S> { fn ref_to_iter<'a, 'b: 'a>(&'b self) -> StorageIterator<'a, S> { self.into_iter() } } #[async_trait] impl StorageAdapter for MemoryDB { async fn insert( &self, key: ::Key, val: ::Value, ) -> ProtocolResult<()> { let key = key.encode_sync()?.to_vec(); let val = val.encode_sync()?.to_vec(); let mut db = self.db.write(); let db = db .entry(S::category().to_string()) .or_insert_with(HashMap::new); db.insert(key, val); Ok(()) } async fn get( &self, key: ::Key, ) -> ProtocolResult::Value>> { let key = key.encode_sync()?; let mut db = self.db.write(); let db = db .entry(S::category().to_string()) .or_insert_with(HashMap::new); let opt_bytes = db.get(&key.to_vec()).cloned(); if let Some(bytes) = opt_bytes { let val = <_>::decode_sync(Bytes::copy_from_slice(&bytes))?; Ok(Some(val)) } else { Ok(None) } } async fn remove(&self, key: ::Key) -> ProtocolResult<()> { let key = key.encode_sync()?.to_vec(); let mut db = self.db.write(); let db = db .entry(S::category().to_string()) .or_insert_with(HashMap::new); db.remove(&key); Ok(()) } async fn contains( &self, key: ::Key, ) -> ProtocolResult { let key = key.encode_sync()?.to_vec(); let mut db = self.db.write(); let db = db .entry(S::category().to_string()) .or_insert_with(HashMap::new); Ok(db.get(&key).is_some()) } async fn batch_modify( &self, keys: Vec<::Key>, vals: Vec>, ) -> ProtocolResult<()> { if keys.len() != vals.len() { return Err(MemoryDBError::BatchLengthMismatch.into()); } let mut pairs: Vec<(Bytes, Option)> = Vec::with_capacity(keys.len()); for (key, value) in keys.into_iter().zip(vals.into_iter()) { let key = key.encode_sync()?; let value = match value { StorageBatchModify::Insert(value) => Some(value.encode_sync()?), StorageBatchModify::Remove => None, }; pairs.push((key, value)) } let mut db = self.db.write(); let db = db .entry(S::category().to_string()) .or_insert_with(HashMap::new); for (key, value) in pairs.into_iter() { match value { Some(value) => db.insert(key.to_vec(), value.to_vec()), None => db.remove(&key.to_vec()), }; } Ok(()) } fn prepare_iter<'a, 'b: 'a, S: StorageSchema + 'static, P: AsRef<[u8]> + 'a>( &'b self, _prefix: &P, ) -> ProtocolResult + 'a>> { { self.db .write() .entry(S::category().to_string()) .or_insert_with(HashMap::new); } Ok(Box::new(MemoryIntoIterator { inner: self.db.read(), pin_s: PhantomData::, })) } } ================================================ FILE: tests/common/node/full_node.rs ================================================ mod builder; mod default_start; mod error; mod memory_db; use super::{config, consts, diagnostic, sync::Sync}; use builder::MutaBuilder; use asset::AssetService; use authorization::AuthorizationService; use derive_more::{Display, From}; use metadata::MetadataService; use multi_signature::MultiSignatureService; use protocol::traits::{SDKFactory, Service, ServiceMapping, ServiceSDK}; use protocol::{ProtocolError, ProtocolErrorKind, ProtocolResult}; struct DefaultServiceMapping; impl ServiceMapping for DefaultServiceMapping { fn get_service>( &self, name: &str, factory: &Factory, ) -> ProtocolResult> { let sdk = factory.get_sdk(name)?; let service = match name { "authorization" => { let multi_sig_sdk = factory.get_sdk("multi_signature")?; Box::new(AuthorizationService::new( sdk, MultiSignatureService::new(multi_sig_sdk), )) as Box } "asset" => Box::new(AssetService::new(sdk)) as Box, "metadata" => Box::new(MetadataService::new(sdk)) as Box, "multi_signature" => Box::new(MultiSignatureService::new(sdk)) as Box, _ => { return Err(MappingError::NotFoundService { service: name.to_owned(), } .into()) } }; Ok(service) } fn list_service_name(&self) -> Vec { vec![ "asset".to_owned(), "authorization".to_owned(), "metadata".to_owned(), "multi_signature".to_owned(), ] } } #[derive(Debug, Display, From)] enum MappingError { #[display(fmt = "service {:?} was not found", service)] NotFoundService { service: String }, } impl std::error::Error for MappingError {} impl From for ProtocolError { fn from(err: MappingError) -> ProtocolError { ProtocolError::new(ProtocolErrorKind::Service, Box::new(err)) } } // Note: inject runnning_status pub async fn run(listen_port: u16, seckey: String, sync: Sync) { let builder = MutaBuilder::new() .config_path(consts::CHAIN_CONFIG_PATH) .genesis_path(consts::CHAIN_GENESIS_PATH) .service_mapping(DefaultServiceMapping {}); let muta = builder.build(listen_port).expect("build"); muta.run(seckey, sync).await.expect("run"); } ================================================ FILE: tests/common/node/sync.rs ================================================ use core_network::{DiagnosticEvent, TrustReport}; use derive_more::Display; use protocol::traits::TrustFeedback; use tokio::sync::{ broadcast::{channel, Receiver, RecvError, Sender}, Barrier, BarrierWaitResult, Mutex, }; use tokio::time::timeout; use std::{ sync::atomic::{AtomicBool, Ordering}, sync::Arc, time::Duration, }; const SYNC_RECV_TIMEOUT: Duration = Duration::from_secs(60); #[derive(Debug, Display)] pub enum SyncError { #[display(fmt = "timeout")] Timeout, #[display(fmt = "recv {}", _0)] Recv(RecvError), #[display(fmt = "disconnected")] Disconected, } #[derive(Debug, Display)] pub enum SyncEvent { #[display(fmt = "connected")] Connected, #[display(fmt = "remote height {}", _0)] RemoteHeight(u64), #[display(fmt = "feedback {}", _0)] TrustMetric(TrustFeedback), #[display(fmt = "report {}", _0)] TrustReport(TrustReport), } #[derive(Clone)] pub struct Sync { diag_tx: Sender, diag_rx: Arc>>, barrier: Arc, connected: Arc, } impl Sync { pub fn new() -> Self { let (diag_tx, diag_rx) = channel(10); let barrier = Arc::new(Barrier::new(2)); let connected = Arc::new(AtomicBool::new(false)); let diag_rx = Arc::new(Mutex::new(diag_rx)); Sync { diag_tx, diag_rx, barrier, connected, } } pub fn is_connected(&self) -> bool { self.connected.load(Ordering::SeqCst) } pub fn set_connected(&self) { self.connected.store(true, Ordering::SeqCst); } pub fn disconnect(&self) { self.connected.store(false, Ordering::SeqCst); } pub async fn wait(&self) -> BarrierWaitResult { self.barrier.wait().await } // # Panic pub async fn wait_connected(&self) { let mut count: usize = 2; // Wait client node and full node both be connected to each other while count > 0 { match self.recv().await { Ok(SyncEvent::Connected) => count -= 1, Ok(event) => panic!("wait connected, but receive {}", event), Err(err) => panic!("connect to full node failed {:?}", err), } } self.set_connected(); loop { match self.recv().await { Ok(SyncEvent::RemoteHeight(height)) if height > 0 => break, Ok(event) => panic!("wait remote height, but receive {}", event), Err(err) => panic!("wait remote height failed {:?}", err), } } } pub fn emit(&self, event: DiagnosticEvent) { self.diag_tx.send(event).unwrap(); } pub async fn recv(&self) -> Result { match timeout(SYNC_RECV_TIMEOUT, self.diag_rx.lock().await.recv()).await { Err(_) if !self.is_connected() => Err(SyncError::Disconected), Err(_) => Err(SyncError::Timeout), Ok(Err(e)) => Err(SyncError::Recv(e)), Ok(Ok(event)) => match event { DiagnosticEvent::SessionClosed => { self.disconnect(); Err(SyncError::Disconected) } DiagnosticEvent::RemoteHeight { height } => Ok(SyncEvent::RemoteHeight(height)), DiagnosticEvent::TrustMetric { feedback } => Ok(SyncEvent::TrustMetric(feedback)), DiagnosticEvent::TrustNewInterval { report } => Ok(SyncEvent::TrustReport(report)), DiagnosticEvent::NewSession => Ok(SyncEvent::Connected), }, } } } impl Default for Sync { fn default() -> Self { Sync::new() } } impl Drop for Sync { fn drop(&mut self) { self.connected.store(false, Ordering::SeqCst); } } ================================================ FILE: tests/common/node.rs ================================================ pub mod config; pub mod consts; pub mod diagnostic; pub mod full_node; pub mod sync; pub use diagnostic::TwinEvent; ================================================ FILE: tests/e2e/jest.config.js ================================================ module.exports = { displayName: "Unit Tests", testRegex: "(/.*.(test|spec))\\.(ts?|js?)$", transform: { "^.+\\.ts?$": "ts-jest" }, moduleFileExtensions: ["ts", "js", "json"], testTimeout: 50000 }; ================================================ FILE: tests/e2e/package.json ================================================ { "name": "muta-e2e-tests", "version": "1.0.0", "description": "", "author": "huwenchao", "license": "MIT", "scripts": { "test": "jest --color", "lint": "eslint --fix '{src,test}/**/*.{js,ts}'", "prettier": "prettier --write **/*.{js,ts,graphql}" }, "dependencies": { "@mutadev/muta-sdk": "0.2.0-rc.0", "@mutadev/service": "0.2.0-rc.0", "@types/node": "^14.0.14", "@types/node-fetch": "^2.5.7", "apollo-boost": "^0.4.4", "graphql": "^15.2.0", "graphql-tag": "^2.10.1", "node-fetch": "^2.6.0", "toml": "^3.0.0", "ts-node": "^8.3.0", "typescript": "^3.5.3" }, "devDependencies": { "@types/jest": "^24.0.23", "jest": "^24.9.0", "prettier": "^1.19.1", "ts-jest": "^26.0.0" } } ================================================ FILE: tests/e2e/sdk.test.ts ================================================ import { AssetService, MultiSignatureService } from '@mutadev/service' import * as sdk from '@mutadev/muta-sdk'; import { mutaClient } from './utils'; const { Account, retry } = sdk; const { toHex } = sdk.utils; describe("API test via @mutadev/muta-sdk-js", () => { test("getLatestBlock", async () => { let current_height = await mutaClient.getLatestBlockHeight(); expect(current_height).toBeGreaterThan(0); }); test("getNoneBlock", async () => { let block = await mutaClient.getBlock("0xffffffff"); expect(block).toBe(null); }) test("getNoneTransaction", async () => { let tx = await mutaClient.getTransaction("0xf56924db538e77bb5951eb5ff0d02b88983c49c45eea30e8ae3e7234b311436c"); expect(tx).toBe(null); }) test("getNoneReceipt", async () => { let receipt = await mutaClient.getReceipt("0xf56924db538e77bb5951eb5ff0d02b88983c49c45eea30e8ae3e7234b311436c"); expect(receipt).toBe(null); }) test("transfer work", async () => { const from_addr = "muta14e0lmgck835vm2dfm0w3ckv6svmez8fdgdl705"; const from_pk = "0x5ec982173d54d830b6789cbbbe43eaa2853a5ff752d1ebc1b266cf9790314f8a"; const to_addr = "muta15a8a9ksxe3hhjpw3l7wz7ry778qg8h9wz8y35p"; const asset_id = "0xf56924db538e77bb5951eb5ff0d02b88983c49c45eea30e8ae3e7234b311436c"; const account = new sdk.Account(from_pk); const assetService = new AssetService(mutaClient, account); const from_balance_before = await assetService.read.get_balance({ user: from_addr, asset_id: asset_id })!; const to_balance_before = await assetService.read.get_balance({ user: to_addr, asset_id: asset_id, })!; // transfer expect(account.address).toBe(from_addr); await assetService.write.transfer({ asset_id: asset_id, to: to_addr, value: 0x01, }) // check result let from_balance_after = await assetService.read.get_balance({ user: from_addr, asset_id: asset_id, })!; const to_balance_after = await assetService.read.get_balance({ user: to_addr, asset_id: asset_id, })!; const c1 = from_balance_before.succeedData.balance as number; expect(from_balance_after.succeedData.balance).toBe(c1 - 1); const c2 = to_balance_before.succeedData.balance as number; expect(to_balance_after.succeedData.balance).toBe(c2 + 1); }); test('multisig', async () => { const wangYe = Account.fromPrivateKey( '0x1000000000000000000000000000000000000000000000000000000000000000', ); const qing = Account.fromPrivateKey( '0x2000000000000000000000000000000000000000000000000000000000000000', ); const multiSigService = new MultiSignatureService(mutaClient, wangYe); var GenerateMultiSigAccountPayload = { owner: wangYe.address, autonomy: false, addr_with_weight: [{ address: wangYe.address, weight: 1 }, { address: qing.address, weight: 1 }], threshold: 2, memo: 'welcome to BiYouCun' }; const generated = await multiSigService.write.generate_account(GenerateMultiSigAccountPayload); expect(Number(generated.response.response.code)).toBe(0); const multiSigAddress = generated.response.response.succeedData.address; const createAssetTx = await mutaClient.composeTransaction({ method: 'create_asset', payload: { name: 'miao', supply: 2077, symbol: '😺', }, serviceName: 'asset', sender: multiSigAddress, }); const signedCreateAssetTx = wangYe.signTransaction(createAssetTx); try { await mutaClient.sendTransaction(signedCreateAssetTx); throw 'should failed'; } catch(e) { expect(String(e)).toContain('CheckAuthorization'); } const bothSignedCreateAssetTx = qing.signTransaction(signedCreateAssetTx); const txHash = await mutaClient.sendTransaction(bothSignedCreateAssetTx); const receipt = await retry(() => mutaClient.getReceipt(toHex(txHash))); expect(Number(receipt.response.response.code)).toBe(0); // MultiSig address balance const asset = JSON.parse(receipt.response.response.succeedData as string); const assetService = new AssetService(mutaClient, wangYe); const balance = await assetService.read.get_balance({ asset_id: asset.id, user: multiSigAddress, }); expect(Number(balance.code)).toBe(0); expect(Number(balance.succeedData.balance)).toBe(2077); const updateAccountPayload = { account_address: multiSigAddress, owner: wangYe.address, addr_with_weight: [{ address: wangYe.address, weight: 3 }, { address: qing.address, weight: 1 }], threshold: 4, memo: 'welcome to BiYouCun' }; const update = await multiSigService.write.update_account(updateAccountPayload); expect(Number(update.response.response.code)).toBe(0); const fei = Account.fromPrivateKey( '0x3000000000000000000000000000000000000000000000000000000000000000', ); var GenerateMultiSigAccountPayload = { owner: wangYe.address, autonomy: false, addr_with_weight: [{ address: multiSigAddress, weight: 2 }, { address: fei.address, weight: 1 }], threshold: 2, memo: 'welcome to CiYouCun' }; const newGenerate = await multiSigService.write.generate_account(GenerateMultiSigAccountPayload); expect(Number(newGenerate.response.response.code)).toBe(0); const newMultiSigAddress = newGenerate.response.response.succeedData.address; const newAssetTx = await mutaClient.composeTransaction({ method: 'create_asset', payload: { name: 'miaomiao', supply: 2078, symbol: '😺😺', }, serviceName: 'asset', sender: newMultiSigAddress, }); const newSignedCreateAssetTx = wangYe.signTransaction(newAssetTx); const newBothCreateAssetTx = qing.signTransaction(newSignedCreateAssetTx); const newTxHash = await mutaClient.sendTransaction(newBothCreateAssetTx); const newReceipt = await retry(() => mutaClient.getReceipt(toHex(newTxHash))); expect(Number(newReceipt.response.response.code)).toBe(0); const newAsset = JSON.parse(newReceipt.response.response.succeedData as string); const newAssetService = new AssetService(mutaClient, wangYe); const newBalance = await newAssetService.read.get_balance({ asset_id: newAsset.id, user: newMultiSigAddress, }); expect(Number(newBalance.code)).toBe(0); expect(Number(newBalance.succeedData.balance)).toBe(2078); }); }); ================================================ FILE: tests/e2e/tsconfig.json ================================================ { "compilerOptions": { "target": "es2017", "module": "commonjs", "strict": true, "skipLibCheck": true, "declaration": true, "esModuleInterop": true, "noUnusedLocals": true, "noUnusedParameters": true, "noImplicitReturns": true, "noFallthroughCasesInSwitch": true, "traceResolution": false, "listEmittedFiles": false, "listFiles": false, "pretty": true, "composite": true, "lib": ["es2017"], "sourceMap": true, "inlineSources": true, "outDir": "lib", "rootDir": "src" }, "files": ["./sdk.test.ts", "./utils.ts"], "references": [ ] } ================================================ FILE: tests/e2e/utils.ts ================================================ import fetch from "node-fetch"; import { createHttpLink } from "apollo-link-http"; import { InMemoryCache } from "apollo-cache-inmemory"; import ApolloClient from "apollo-client"; import { Muta } from "@mutadev/muta-sdk"; export const CHAIN_ID = "0xb6a4d7da21443f5e816e8700eea87610e6d769657d6b8ec73028457bf2ca4036"; export const API_URL = process.env.API_URL || "http://localhost:8000/graphql"; export const client = new ApolloClient({ link: createHttpLink({ uri: API_URL, fetch: fetch }), cache: new InMemoryCache(), defaultOptions: { query: { fetchPolicy: "no-cache" } } }); export const muta = new Muta({ endpoint: API_URL, chainId: CHAIN_ID }); export const mutaClient = muta.client(); export function makeid(length: number) { var result = ""; var characters = "abcdef0123456789"; var charactersLength = characters.length; for (var i = 0; i < length; i++) { result += characters.charAt(Math.floor(Math.random() * charactersLength)); } return result; } export function getNonce() { return makeid(64); } export function delay(ms: number) { return new Promise(resolve => setTimeout(resolve, ms)); } ================================================ FILE: tests/e2e/wait-for-it.sh ================================================ #!/usr/bin/env bash # Use this script to test if a given TCP host/port are available # copy from https://github.com/vishnubob/wait-for-it WAITFORIT_cmdname=${0##*/} echoerr() { if [[ $WAITFORIT_QUIET -ne 1 ]]; then echo "$@" 1>&2; fi } usage() { cat << USAGE >&2 Usage: $WAITFORIT_cmdname host:port [-s] [-t timeout] [-- command args] -h HOST | --host=HOST Host or IP under test -p PORT | --port=PORT TCP port under test Alternatively, you specify the host and port as host:port -s | --strict Only execute subcommand if the test succeeds -q | --quiet Don't output any status messages -t TIMEOUT | --timeout=TIMEOUT Timeout in seconds, zero for no timeout -- COMMAND ARGS Execute command with args after the test finishes USAGE exit 1 } wait_for() { if [[ $WAITFORIT_TIMEOUT -gt 0 ]]; then echoerr "$WAITFORIT_cmdname: waiting $WAITFORIT_TIMEOUT seconds for $WAITFORIT_HOST:$WAITFORIT_PORT" else echoerr "$WAITFORIT_cmdname: waiting for $WAITFORIT_HOST:$WAITFORIT_PORT without a timeout" fi WAITFORIT_start_ts=$(date +%s) while : do if [[ $WAITFORIT_ISBUSY -eq 1 ]]; then nc -z $WAITFORIT_HOST $WAITFORIT_PORT WAITFORIT_result=$? else (echo > /dev/tcp/$WAITFORIT_HOST/$WAITFORIT_PORT) >/dev/null 2>&1 WAITFORIT_result=$? fi if [[ $WAITFORIT_result -eq 0 ]]; then WAITFORIT_end_ts=$(date +%s) echoerr "$WAITFORIT_cmdname: $WAITFORIT_HOST:$WAITFORIT_PORT is available after $((WAITFORIT_end_ts - WAITFORIT_start_ts)) seconds" break fi sleep 1 done return $WAITFORIT_result } wait_for_wrapper() { # In order to support SIGINT during timeout: http://unix.stackexchange.com/a/57692 if [[ $WAITFORIT_QUIET -eq 1 ]]; then timeout $WAITFORIT_BUSYTIMEFLAG $WAITFORIT_TIMEOUT $0 --quiet --child --host=$WAITFORIT_HOST --port=$WAITFORIT_PORT --timeout=$WAITFORIT_TIMEOUT & else timeout $WAITFORIT_BUSYTIMEFLAG $WAITFORIT_TIMEOUT $0 --child --host=$WAITFORIT_HOST --port=$WAITFORIT_PORT --timeout=$WAITFORIT_TIMEOUT & fi WAITFORIT_PID=$! trap "kill -INT -$WAITFORIT_PID" INT wait $WAITFORIT_PID WAITFORIT_RESULT=$? if [[ $WAITFORIT_RESULT -ne 0 ]]; then echoerr "$WAITFORIT_cmdname: timeout occurred after waiting $WAITFORIT_TIMEOUT seconds for $WAITFORIT_HOST:$WAITFORIT_PORT" fi return $WAITFORIT_RESULT } # process arguments while [[ $# -gt 0 ]] do case "$1" in *:* ) WAITFORIT_hostport=(${1//:/ }) WAITFORIT_HOST=${WAITFORIT_hostport[0]} WAITFORIT_PORT=${WAITFORIT_hostport[1]} shift 1 ;; --child) WAITFORIT_CHILD=1 shift 1 ;; -q | --quiet) WAITFORIT_QUIET=1 shift 1 ;; -s | --strict) WAITFORIT_STRICT=1 shift 1 ;; -h) WAITFORIT_HOST="$2" if [[ $WAITFORIT_HOST == "" ]]; then break; fi shift 2 ;; --host=*) WAITFORIT_HOST="${1#*=}" shift 1 ;; -p) WAITFORIT_PORT="$2" if [[ $WAITFORIT_PORT == "" ]]; then break; fi shift 2 ;; --port=*) WAITFORIT_PORT="${1#*=}" shift 1 ;; -t) WAITFORIT_TIMEOUT="$2" if [[ $WAITFORIT_TIMEOUT == "" ]]; then break; fi shift 2 ;; --timeout=*) WAITFORIT_TIMEOUT="${1#*=}" shift 1 ;; --) shift WAITFORIT_CLI=("$@") break ;; --help) usage ;; *) echoerr "Unknown argument: $1" usage ;; esac done if [[ "$WAITFORIT_HOST" == "" || "$WAITFORIT_PORT" == "" ]]; then echoerr "Error: you need to provide a host and port to test." usage fi WAITFORIT_TIMEOUT=${WAITFORIT_TIMEOUT:-15} WAITFORIT_STRICT=${WAITFORIT_STRICT:-0} WAITFORIT_CHILD=${WAITFORIT_CHILD:-0} WAITFORIT_QUIET=${WAITFORIT_QUIET:-0} # check to see if timeout is from busybox? WAITFORIT_TIMEOUT_PATH=$(type -p timeout) WAITFORIT_TIMEOUT_PATH=$(realpath $WAITFORIT_TIMEOUT_PATH 2>/dev/null || readlink -f $WAITFORIT_TIMEOUT_PATH) if [[ $WAITFORIT_TIMEOUT_PATH =~ "busybox" ]]; then WAITFORIT_ISBUSY=1 WAITFORIT_BUSYTIMEFLAG="-t" else WAITFORIT_ISBUSY=0 WAITFORIT_BUSYTIMEFLAG="" fi if [[ $WAITFORIT_CHILD -gt 0 ]]; then wait_for WAITFORIT_RESULT=$? exit $WAITFORIT_RESULT else if [[ $WAITFORIT_TIMEOUT -gt 0 ]]; then wait_for_wrapper WAITFORIT_RESULT=$? else wait_for WAITFORIT_RESULT=$? fi fi if [[ $WAITFORIT_CLI != "" ]]; then if [[ $WAITFORIT_RESULT -ne 0 && $WAITFORIT_STRICT -eq 1 ]]; then echoerr "$WAITFORIT_cmdname: strict mode, refusing to execute subprocess" exit $WAITFORIT_RESULT fi exec "${WAITFORIT_CLI[@]}" else exit $WAITFORIT_RESULT fi ================================================ FILE: tests/trust_metric.rs ================================================ /// NOTE: Test may panic after drop full node future, which is /// expected. pub mod common; mod trust_metric_all; ================================================ FILE: tests/trust_metric_all/client_node.rs ================================================ use std::collections::HashSet; use std::convert::TryFrom; use std::iter::FromIterator; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::ops::Deref; use std::str::FromStr; use common_crypto::{PrivateKey, PublicKey, Secp256k1PrivateKey, ToPublicKey}; use core_consensus::message::{ FixedBlock, FixedHeight, BROADCAST_HEIGHT, RPC_RESP_SYNC_PULL_BLOCK, RPC_SYNC_PULL_BLOCK, }; use core_network::{ DiagnosticEvent, NetworkConfig, NetworkService, NetworkServiceHandle, PeerId, PeerIdExt, TrustReport, }; use derive_more::Display; use protocol::traits::{ Context, Gossip, MessageCodec, MessageHandler, Priority, Rpc, TrustFeedback, }; use protocol::types::{Address, Block, BlockHeader, Hash, Proof}; use protocol::{async_trait, Bytes}; use crate::common::node::consts; use crate::common::node::diagnostic::{ TrustNewIntervalReq, TrustTwinEventReq, TwinEvent, GOSSIP_TRUST_NEW_INTERVAL, GOSSIP_TRUST_TWIN_EVENT, }; use crate::common::node::sync::{Sync, SyncError, SyncEvent}; #[derive(Debug, Display)] pub enum ClientNodeError { #[display(fmt = "not connected")] NotConnected, #[display(fmt = "unexpected {}", _0)] Unexpected(String), } impl std::error::Error for ClientNodeError {} impl From for ClientNodeError { fn from(err: SyncError) -> Self { match err { SyncError::Recv(err) => ClientNodeError::Unexpected(err.to_string()), SyncError::Timeout => ClientNodeError::Unexpected(err.to_string()), SyncError::Disconected => ClientNodeError::NotConnected, } } } type ClientResult = Result; struct DummyPullBlockRpcHandler(NetworkServiceHandle); #[async_trait] impl MessageHandler for DummyPullBlockRpcHandler { type Message = FixedHeight; async fn process(&self, ctx: Context, msg: FixedHeight) -> TrustFeedback { let block = FixedBlock::new(mock_block(msg.inner)); self.0 .response(ctx, RPC_RESP_SYNC_PULL_BLOCK, Ok(block), Priority::High) .await .expect("dummy response pull block"); TrustFeedback::Neutral } } struct ReceiveRemoteHeight(Sync); #[async_trait] impl MessageHandler for ReceiveRemoteHeight { type Message = u64; async fn process(&self, _: Context, msg: u64) -> TrustFeedback { self.0.emit(DiagnosticEvent::RemoteHeight { height: msg }); TrustFeedback::Neutral } } pub struct ClientNode { pub network: NetworkServiceHandle, pub remote_peer_id: PeerId, pub priv_key: Secp256k1PrivateKey, pub sync: Sync, } pub async fn connect( full_node_port: u16, full_seckey: String, listen_port: u16, sync: Sync, ) -> ClientNode { let full_node_peer_id = full_node_peer_id(&full_seckey); let full_node_addr = format!("127.0.0.1:{}", full_node_port); let config = NetworkConfig::new() .ping_interval(consts::NETWORK_PING_INTERVAL) .peer_trust_metric(consts::NETWORK_TRUST_METRIC_INTERVAL, None) .expect("peer trust") .bootstraps(vec![(full_node_peer_id.to_base58(), full_node_addr)]) .expect("test node config"); let priv_key = Secp256k1PrivateKey::generate(&mut rand::rngs::OsRng); let mut network = NetworkService::new(config); let handle = network.handle(); network.set_chain_id(Hash::from_hex(consts::CHAIN_ID).expect("chain id")); network .register_endpoint_handler( RPC_SYNC_PULL_BLOCK, DummyPullBlockRpcHandler(handle.clone()), ) .expect("register consensus rpc pull block"); network .register_rpc_response::(RPC_RESP_SYNC_PULL_BLOCK) .expect("register consensus rpc response pull block"); network .register_endpoint_handler(BROADCAST_HEIGHT, ReceiveRemoteHeight(sync.clone())) .expect("register remote height"); let hook_fn = |sync: Sync| -> _ { Box::new(move |event: DiagnosticEvent| { // We only care connected event on client node if let DiagnosticEvent::NewSession = event { sync.emit(event) } }) }; network.register_diagnostic_hook(hook_fn(sync.clone())); network .listen(SocketAddr::new( IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), listen_port, )) .await .expect("test node listen"); tokio::spawn(network); sync.wait_connected().await; ClientNode { network: handle, remote_peer_id: full_node_peer_id, priv_key, sync, } } impl ClientNode { // # Panic pub async fn wait_connected(&self) { self.sync.wait_connected().await } pub fn connected(&self) -> bool { let diagnostic = &self.network.diagnostic; let opt_session = diagnostic.session(&self.remote_peer_id); self.sync.is_connected() && opt_session.is_some() } pub fn connected_session(&self, peer_id: &PeerId) -> Option { if !self.connected() { None } else { let diagnostic = &self.network.diagnostic; let opt_session = diagnostic.session(peer_id); opt_session.map(|sid| sid.value()) } } pub async fn broadcast(&self, endpoint: &str, msg: M) -> ClientResult<()> { use Priority::High; let sid = match self.connected_session(&self.remote_peer_id) { Some(sid) => sid, None => return Err(ClientNodeError::NotConnected), }; let ctx = Context::new().with_value::("session_id", sid); let peers = vec![Bytes::from(self.remote_peer_id.clone().into_bytes())]; match self.multicast(ctx, endpoint, peers, msg, High).await { Err(_) if !self.connected() => Err(ClientNodeError::NotConnected), Err(e) => { let err_msg = format!("broadcast to {} {}", endpoint, e); Err(ClientNodeError::Unexpected(err_msg)) } Ok(_) => Ok(()), } } pub async fn rpc(&self, endpoint: &str, msg: M) -> ClientResult where M: MessageCodec, R: MessageCodec, { let sid = match self.connected_session(&self.remote_peer_id) { Some(sid) => sid, None => return Err(ClientNodeError::NotConnected), }; let ctx = Context::new().with_value::("session_id", sid); match self.call::(ctx, endpoint, msg, Priority::High).await { Ok(resp) => Ok(resp), Err(e) if e.to_string().to_lowercase().contains("timeout") && !self.connected() => { Err(ClientNodeError::NotConnected) } Err(e) => { let err_msg = format!("rpc to {} {}", endpoint, e); Err(ClientNodeError::Unexpected(err_msg)) } } } pub async fn get_block(&self, height: u64) -> ClientResult { let resp = self .rpc::<_, FixedBlock>(RPC_SYNC_PULL_BLOCK, FixedHeight::new(height)) .await?; Ok(resp.inner) } pub async fn trust_twin_event(&self, event: TwinEvent) -> ClientResult<()> { self.broadcast(GOSSIP_TRUST_TWIN_EVENT, TrustTwinEventReq(event)) .await?; let mut targets: HashSet = if event == TwinEvent::Both { HashSet::from_iter(vec![TwinEvent::Good, TwinEvent::Bad]) } else { HashSet::from_iter(vec![event]) }; while !targets.is_empty() { let _ = match self.until_trust_processed().await? { TrustFeedback::Bad(_) => targets.remove(&TwinEvent::Bad), TrustFeedback::Good => targets.remove(&TwinEvent::Good), TrustFeedback::Worse(_) => targets.remove(&TwinEvent::Worse), TrustFeedback::Neutral | TrustFeedback::Fatal(_) => { // No Fatal action yet println!("skip neutral or fatal feedback"); continue; } }; } Ok(()) } pub async fn until_trust_processed(&self) -> ClientResult { loop { let event = self.sync.recv().await?; match event { SyncEvent::TrustMetric(feedback) => return Ok(feedback), SyncEvent::RemoteHeight(_) => continue, _ => return Err(ClientNodeError::Unexpected(event.to_string())), } } } pub async fn trust_new_interval(&self) -> ClientResult { self.broadcast(GOSSIP_TRUST_NEW_INTERVAL, TrustNewIntervalReq(0)) .await?; loop { let event = self.sync.recv().await?; match event { SyncEvent::TrustReport(report) => return Ok(report), SyncEvent::Connected => { return Err(ClientNodeError::Unexpected("connected".to_owned())) } SyncEvent::TrustMetric(_) | SyncEvent::RemoteHeight(_) => { println!("skip event {}", event); continue; } } } } } impl Deref for ClientNode { type Target = NetworkServiceHandle; fn deref(&self) -> &Self::Target { &self.network } } fn full_node_peer_id(full_seckey: &str) -> PeerId { let seckey = { let key = hex::decode(full_seckey).expect("hex private key string"); Secp256k1PrivateKey::try_from(key.as_ref()).expect("valid private key") }; let pubkey = seckey.pub_key(); PeerId::from_pubkey_bytes(pubkey.to_bytes()).expect("valid public key") } fn mock_block(height: u64) -> Block { let block_hash = Hash::digest(Bytes::from("22")); let nonce = Hash::digest(Bytes::from("33")); let addr_str = "muta14e0lmgck835vm2dfm0w3ckv6svmez8fdgdl705"; let proof = Proof { height: 0, round: 0, block_hash, signature: Default::default(), bitmap: Default::default(), }; let header = BlockHeader { chain_id: nonce.clone(), height, exec_height: height - 1, prev_hash: nonce.clone(), timestamp: 1000, order_root: nonce.clone(), order_signed_transactions_hash: nonce.clone(), confirm_root: Vec::new(), state_root: nonce, receipt_root: Vec::new(), cycles_used: vec![999_999], proposer: Address::from_str(addr_str).unwrap(), proof, validator_version: 1, validators: Vec::new(), }; Block { header, ordered_tx_hashes: Vec::new(), } } ================================================ FILE: tests/trust_metric_all/common.rs ================================================ use common_crypto::{ Crypto, PrivateKey, PublicKey, Secp256k1, Secp256k1PrivateKey, Signature, ToPublicKey, }; use protocol::fixed_codec::FixedCodec; use protocol::types::{ Address, Hash, JsonString, RawTransaction, SignedTransaction, TransactionRequest, }; use protocol::{Bytes, BytesMut}; use rand::{rngs::OsRng, RngCore}; use crate::common::node::consts; pub struct SignedTransactionBuilder { chain_id: Hash, timeout: u64, cycles_limit: u64, payload: JsonString, } impl Default for SignedTransactionBuilder { fn default() -> Self { let chain_id = Hash::from_hex(consts::CHAIN_ID).expect("chain id"); let timeout = 19; let cycles_limit = 314_159; let payload = "test".to_owned(); SignedTransactionBuilder { chain_id, timeout, cycles_limit, payload, } } } impl SignedTransactionBuilder { pub fn chain_id(mut self, chain_id_bytes: Bytes) -> Self { self.chain_id = Hash::digest(chain_id_bytes); self } pub fn cycles_limit(mut self, cycles_limit: u64) -> Self { self.cycles_limit = cycles_limit; self } pub fn payload(mut self, payload: JsonString) -> Self { self.payload = payload; self } pub fn build(self, pk: &Secp256k1PrivateKey) -> SignedTransaction { let nonce = { let mut random_bytes = [0u8; 32]; OsRng.fill_bytes(&mut random_bytes); Hash::digest(BytesMut::from(random_bytes.as_ref()).freeze()) }; let request = TransactionRequest { service_name: "metadata".to_owned(), method: "get_metadata".to_owned(), payload: self.payload, }; let raw = RawTransaction { chain_id: self.chain_id, nonce, timeout: self.timeout, cycles_limit: self.cycles_limit, cycles_price: 1, request, sender: Address::from_pubkey_bytes(pk.pub_key().to_bytes()).unwrap(), }; let raw_bytes = raw.encode_fixed().expect("encode raw tx"); let tx_hash = Hash::digest(raw_bytes); let sig = Secp256k1::sign_message(&tx_hash.as_bytes(), &pk.to_bytes()).expect("sign tx"); SignedTransaction { raw, tx_hash, pubkey: Bytes::from(rlp::encode_list::, _>(&[pk .pub_key() .to_bytes() .to_vec()])), signature: Bytes::from(rlp::encode_list::, _>(&[sig.to_bytes().to_vec()])), } } } pub fn stx_builder() -> SignedTransactionBuilder { SignedTransactionBuilder::default() } ================================================ FILE: tests/trust_metric_all/consensus.rs ================================================ use core_consensus::message::{ Choke, Proposal, Vote, BROADCAST_HEIGHT, END_GOSSIP_AGGREGATED_VOTE, END_GOSSIP_SIGNED_CHOKE, END_GOSSIP_SIGNED_PROPOSAL, END_GOSSIP_SIGNED_VOTE, QC, }; use protocol::traits::TrustFeedback; use super::client_node::ClientNodeError; use super::trust_test; #[test] fn should_be_disconnected_for_repeated_undecodeable_proposal_within_four_intervals() { trust_test(move |client_node| { Box::pin(async move { let proposal = Proposal(vec![0000]); for _ in 0..4u8 { if let Err(ClientNodeError::Unexpected(e)) = client_node .broadcast(END_GOSSIP_SIGNED_PROPOSAL, proposal.clone()) .await { panic!("unexpected {}", e); } loop { match client_node.until_trust_processed().await { Ok(TrustFeedback::Worse(_)) => break, Ok(TrustFeedback::Neutral) => continue, Ok(feedback) => panic!("unexpected feedback {}", feedback), _ => panic!("fetch trust report"), } } match client_node.trust_new_interval().await { Ok(_) => (), Err(ClientNodeError::NotConnected) => return, Err(e) => panic!("unexpected error {}", e), } } assert!(!client_node.connected()); }) }); } #[test] fn should_be_disconnected_for_repeated_undecodeable_vote_within_four_intervals() { trust_test(move |client_node| { Box::pin(async move { let vote = Vote(vec![0000]); for _ in 0..4u8 { if let Err(ClientNodeError::Unexpected(e)) = client_node .broadcast(END_GOSSIP_SIGNED_VOTE, vote.clone()) .await { panic!("unexpected {}", e); } loop { match client_node.until_trust_processed().await { Ok(TrustFeedback::Worse(_)) => break, Ok(TrustFeedback::Neutral) => continue, Ok(feedback) => panic!("unexpected feedback {}", feedback), _ => panic!("fetch trust report"), } } match client_node.trust_new_interval().await { Ok(_) => (), Err(ClientNodeError::NotConnected) => return, Err(e) => panic!("unexpected error {}", e), } } assert!(!client_node.connected()); }) }); } #[test] fn should_be_disconnected_for_repeated_undecodeable_qc_within_four_intervals() { trust_test(move |client_node| { Box::pin(async move { let qc = QC(vec![0000]); for _ in 0..4u8 { if let Err(ClientNodeError::Unexpected(e)) = client_node .broadcast(END_GOSSIP_AGGREGATED_VOTE, qc.clone()) .await { panic!("unexpected {}", e); } loop { match client_node.until_trust_processed().await { Ok(TrustFeedback::Worse(_)) => break, Ok(TrustFeedback::Neutral) => continue, Ok(feedback) => panic!("unexpected feedback {}", feedback), _ => panic!("fetch trust report"), } } match client_node.trust_new_interval().await { Ok(_) => (), Err(ClientNodeError::NotConnected) => return, Err(e) => panic!("unexpected error {}", e), } } assert!(!client_node.connected()); }) }); } #[test] fn should_be_disconnected_for_repeated_undecodeable_choke_within_four_intervals() { trust_test(move |client_node| { Box::pin(async move { let choke = Choke(vec![0000]); for _ in 0..4u8 { if let Err(ClientNodeError::Unexpected(e)) = client_node .broadcast(END_GOSSIP_SIGNED_CHOKE, choke.clone()) .await { panic!("unexpected {}", e); } loop { match client_node.until_trust_processed().await { Ok(TrustFeedback::Worse(_)) => break, Ok(TrustFeedback::Neutral) => continue, Ok(feedback) => panic!("unexpected feedback {}", feedback), _ => panic!("fetch trust report"), } } match client_node.trust_new_interval().await { Ok(_) => (), Err(ClientNodeError::NotConnected) => return, Err(e) => panic!("unexpected error {}", e), } } assert!(!client_node.connected()); }) }); } #[test] fn should_be_disconnected_for_repeated_malicious_new_height_broadcast_within_four_intervals() { trust_test(move |client_node| { Box::pin(async move { for _ in 0..4u8 { if let Err(ClientNodeError::Unexpected(e)) = client_node.broadcast(BROADCAST_HEIGHT, 99u64).await { panic!("unexpected {}", e); } loop { match client_node.until_trust_processed().await { Ok(TrustFeedback::Bad(_)) => break, Ok(TrustFeedback::Neutral) => continue, Ok(feedback) => panic!("unexpected feedback {}", feedback), _ => panic!("fetch trust report"), } } match client_node.trust_new_interval().await { Ok(_) => (), Err(ClientNodeError::NotConnected) => return, Err(e) => panic!("unexpected error {}", e), } } assert!(!client_node.connected()); }) }); } ================================================ FILE: tests/trust_metric_all/logger.rs ================================================ use std::{collections::HashMap, path::PathBuf}; const LOGGER_FILTER: &str = "warn"; const LOGGER_LOG_TO_CONSOLE: bool = true; const LOGGER_CONSOLE_SHOW_FILE_AND_LINE: bool = false; const LOGGER_LOG_TO_FILE: bool = false; const LOGGER_METRICS: bool = false; const LOGGER_FILE_SIZE_LIMIT: u64 = 1024 * 1024 * 1024; #[allow(dead_code)] pub fn init() { let log_path = PathBuf::new(); let mut modules_level = HashMap::new(); modules_level.insert("core_network".to_owned(), "debug".to_owned()); common_logger::init( LOGGER_FILTER.to_owned(), LOGGER_LOG_TO_CONSOLE, LOGGER_CONSOLE_SHOW_FILE_AND_LINE, LOGGER_LOG_TO_FILE, LOGGER_METRICS, log_path, LOGGER_FILE_SIZE_LIMIT, modules_level, ) } ================================================ FILE: tests/trust_metric_all/mempool.rs ================================================ use core_mempool::{MsgNewTxs, END_GOSSIP_NEW_TXS}; use protocol::{traits::TrustFeedback, types::Hash, Bytes}; use super::client_node::ClientNodeError; use super::common; use super::trust_test; #[test] fn should_report_good_on_valid_transaction() { trust_test(move |client_node| { Box::pin(async move { let stx = common::stx_builder().build(&client_node.priv_key); let msg_stxs = MsgNewTxs { batch_stxs: vec![stx.clone()], }; client_node .broadcast(END_GOSSIP_NEW_TXS, msg_stxs) .await .expect("broadcast stx"); match client_node.until_trust_processed().await { Ok(TrustFeedback::Good) => {} Ok(_) => panic!("should be good report"), _ => panic!("fetch trust report"), } }) }); } #[test] fn should_be_disconnected_for_repeated_wrong_signature_only_within_four_intervals() { trust_test(move |client_node| { Box::pin(async move { let mut stx = common::stx_builder().build(&client_node.priv_key); stx.signature = Bytes::from(vec![0]); for _ in 0..4u8 { let msg_stxs = MsgNewTxs { batch_stxs: vec![stx.clone()], }; if let Err(ClientNodeError::Unexpected(e)) = client_node.broadcast(END_GOSSIP_NEW_TXS, msg_stxs).await { panic!("unexpected {}", e); } loop { match client_node.until_trust_processed().await { Ok(TrustFeedback::Worse(_)) => break, Ok(TrustFeedback::Neutral) => continue, Ok(feedback) => panic!("unexpected feedback {}", feedback), _ => panic!("fetch trust report"), } } match client_node.trust_new_interval().await { Ok(_) => (), Err(ClientNodeError::NotConnected) => return, Err(e) => panic!("unexpected error {}", e), } } assert!(!client_node.connected()); }) }); } #[test] fn should_be_disconnected_for_repeated_wrong_tx_hash_only_within_four_intervals() { trust_test(move |client_node| { Box::pin(async move { let mut stx = common::stx_builder().build(&client_node.priv_key); stx.tx_hash = Hash::digest(Bytes::from(vec![0])); for _ in 0..4u8 { let msg_stxs = MsgNewTxs { batch_stxs: vec![stx.clone()], }; if let Err(ClientNodeError::Unexpected(e)) = client_node.broadcast(END_GOSSIP_NEW_TXS, msg_stxs).await { panic!("unexpected {}", e); } loop { match client_node.until_trust_processed().await { Ok(TrustFeedback::Worse(_)) => break, Ok(TrustFeedback::Neutral) => continue, Ok(_) => panic!("should be good report"), _ => panic!("fetch trust report"), } } match client_node.trust_new_interval().await { Ok(_) => (), Err(ClientNodeError::NotConnected) => return, Err(e) => panic!("unexpected error {}", e), } } assert!(!client_node.connected()); }) }); } #[test] fn should_be_disconnected_for_repeated_exceed_tx_size_limit_only_within_four_intervals() { trust_test(move |client_node| { Box::pin(async move { let stx = common::stx_builder() .payload("trust-metric".repeat(1_000)) .build(&client_node.priv_key); for _ in 0..4u8 { let msg_stxs = MsgNewTxs { batch_stxs: vec![stx.clone()], }; if let Err(ClientNodeError::Unexpected(e)) = client_node.broadcast(END_GOSSIP_NEW_TXS, msg_stxs).await { panic!("unexpected {}", e); } loop { match client_node.until_trust_processed().await { Ok(TrustFeedback::Bad(_)) => break, Ok(TrustFeedback::Neutral) => continue, Ok(_) => panic!("should be good report"), _ => panic!("fetch trust report"), } } match client_node.trust_new_interval().await { Ok(_) => (), Err(ClientNodeError::NotConnected) => return, Err(e) => panic!("unexpected error {}", e), } } assert!(!client_node.connected()); }) }); } #[test] fn should_be_disconnected_for_repeated_exceed_cycles_limit_only_within_four_intervals() { trust_test(move |client_node| { Box::pin(async move { let stx = common::stx_builder() .cycles_limit(999_999_999_999) .build(&client_node.priv_key); for _ in 0..4u8 { let msg_stxs = MsgNewTxs { batch_stxs: vec![stx.clone()], }; if let Err(ClientNodeError::Unexpected(e)) = client_node.broadcast(END_GOSSIP_NEW_TXS, msg_stxs).await { panic!("unexpected {}", e); } loop { match client_node.until_trust_processed().await { Ok(TrustFeedback::Bad(_)) => break, Ok(TrustFeedback::Neutral) => continue, Ok(_) => panic!("should be good report"), _ => panic!("fetch trust report"), } } match client_node.trust_new_interval().await { Ok(_) => (), Err(ClientNodeError::NotConnected) => return, Err(e) => panic!("unexpected error {}", e), } } assert!(!client_node.connected()); }) }); } #[test] fn should_be_disconnected_for_repeated_wrong_chain_id_only_within_four_intervals() { trust_test(move |client_node| { Box::pin(async move { let stx = common::stx_builder() .chain_id(Bytes::from(vec![0])) .build(&client_node.priv_key); for _ in 0..4u8 { let msg_stxs = MsgNewTxs { batch_stxs: vec![stx.clone()], }; if let Err(ClientNodeError::Unexpected(e)) = client_node.broadcast(END_GOSSIP_NEW_TXS, msg_stxs).await { panic!("unexpected {}", e); } loop { match client_node.until_trust_processed().await { Ok(TrustFeedback::Worse(_)) => break, Ok(TrustFeedback::Neutral) => continue, Ok(_) => panic!("should be good report"), _ => panic!("fetch trust report"), } } match client_node.trust_new_interval().await { Ok(_) => (), Err(ClientNodeError::NotConnected) => return, Err(e) => panic!("unexpected error {}", e), } } assert!(!client_node.connected()); }) }); } ================================================ FILE: tests/trust_metric_all/mod.rs ================================================ #![allow(clippy::mutable_key_type)] mod client_node; mod common; mod consensus; mod logger; mod mempool; use std::panic; use common_crypto::{PrivateKey, Secp256k1PrivateKey}; use futures::future::BoxFuture; use crate::common::node::sync::Sync; use crate::common::{available_port_pair, node}; use client_node::{ClientNode, ClientNodeError}; fn trust_test(test: impl FnOnce(ClientNode) -> BoxFuture<'static, ()> + Send + 'static) { let (full_port, client_port) = available_port_pair(); let mut rt = tokio::runtime::Runtime::new().expect("create runtime"); let local = tokio::task::LocalSet::new(); local.block_on(&mut rt, async move { let sync = Sync::new(); let full_seckey = { let key = Secp256k1PrivateKey::generate(&mut rand::rngs::OsRng); hex::encode(key.to_bytes()).to_string() }; tokio::task::spawn_local(node::full_node::run( full_port, full_seckey.clone(), sync.clone(), )); // Wait full node network initialization sync.wait().await; let handle = tokio::spawn(async move { let client_node = client_node::connect(full_port, full_seckey, client_port, sync).await; test(client_node).await; }); handle.await.expect("test failed"); }); } #[test] fn trust_metric_basic_setup_test() { trust_test(move |client_node| { Box::pin(async move { let block = client_node.get_block(0).await.expect("get genesis"); assert_eq!(block.header.height, 0); }) }); } #[test] fn should_have_working_trust_diagnostic() { trust_test(move |client_node| { Box::pin(async move { client_node .trust_twin_event(node::TwinEvent::Both) .await .expect("test trust twin event"); let report = client_node.trust_new_interval().await.unwrap(); assert_eq!(report.good_events, 1, "should have 1 good event"); assert_eq!(report.bad_events, 1, "should have 1 good event"); }) }); } #[test] fn should_be_disconnected_for_repeated_bad_only_within_four_intervals_from_max_score() { trust_test(move |client_node| { Box::pin(async move { // Repeat at least 30 interval let mut count = 30u8; while count > 0 { count -= 1; client_node .trust_twin_event(node::TwinEvent::Good) .await .expect("test trust twin event"); let report = client_node .trust_new_interval() .await .expect("test trust new interval"); if report.score >= 95 { break; } } for _ in 0..4u8 { if let Err(ClientNodeError::Unexpected(e)) = client_node.trust_twin_event(node::TwinEvent::Bad).await { panic!("unexpected {}", e); } match client_node.trust_new_interval().await { Ok(_) => continue, Err(ClientNodeError::NotConnected) => return, Err(e) => panic!("unexpected error {}", e), } } assert!(!client_node.connected()); }) }); } #[test] fn should_be_disconnected_for_repeated_s_strategy_within_17_intervals_from_max_score() { trust_test(move |client_node| { Box::pin(async move { // Repeat at least 30 interval let mut count = 30u8; while count > 0 { count -= 1; client_node .trust_twin_event(node::TwinEvent::Good) .await .expect("test trust twin event"); let report = client_node .trust_new_interval() .await .expect("test trust new interval"); if report.score >= 95 { break; } } for _ in 0..17u8 { if let Err(ClientNodeError::Unexpected(e)) = client_node.trust_twin_event(node::TwinEvent::Worse).await { panic!("unexpected {}", e); } if let Err(ClientNodeError::Unexpected(e)) = client_node.trust_twin_event(node::TwinEvent::Good).await { panic!("unexpected {}", e); } match client_node.trust_new_interval().await { Ok(_) => (), Err(ClientNodeError::NotConnected) => return, Err(e) => panic!("unexpected error {}", e), }; if let Err(ClientNodeError::Unexpected(e)) = client_node.trust_twin_event(node::TwinEvent::Good).await { panic!("unexpected {}", e); } match client_node.trust_new_interval().await { Ok(_) => continue, Err(ClientNodeError::NotConnected) => return, Err(e) => panic!("unexpected error {}", e), }; } assert!(!client_node.connected()); }) }); } #[test] fn should_keep_connected_for_z_strategy_but_have_lower_score() { trust_test(move |client_node| { Box::pin(async move { let mut base_report = None; // Repeat at least 30 interval let mut count = 30u8; while count > 0 { count -= 1; client_node .trust_twin_event(node::TwinEvent::Good) .await .expect("test trust twin event"); let report = client_node .trust_new_interval() .await .expect("test trust new interval"); if report.score >= 95 { base_report = Some(report); break; } } let mut report = base_report.expect("should have base report"); for _ in 0..100u8 { if let Err(ClientNodeError::Unexpected(e)) = client_node.trust_twin_event(node::TwinEvent::Bad).await { panic!("unexpected {}", e); } if let Err(ClientNodeError::Unexpected(e)) = client_node.trust_twin_event(node::TwinEvent::Good).await { panic!("unexpected {}", e); } let latest_report = match client_node.trust_new_interval().await { Ok(report) => report, Err(ClientNodeError::NotConnected) => return, Err(e) => panic!("unexpected error {}", e), }; assert!(latest_report.score <= report.score); report = latest_report; } assert!(client_node.connected(), "should be connected"); }) }); } #[test] fn should_able_to_reconnect_after_trust_metric_soft_ban() { trust_test(move |client_node| { Box::pin(async move { let mut count = 30u8; while count > 0 { count -= 1; if let Err(ClientNodeError::Unexpected(e)) = client_node.trust_twin_event(node::TwinEvent::Bad).await { panic!("unexpected {}", e); } match client_node.trust_new_interval().await { Ok(_) => (), Err(ClientNodeError::NotConnected) => return, Err(e) => panic!("unexpected error {}", e), }; if !client_node.connected() { break; } } assert!(!client_node.connected(), "should be disconnected"); // Ensure we we dont sleep longer than back-off time let soft_ban_duration = node::consts::NETWORK_SOFT_BAND_DURATION.expect("soft ban") * 2u64; tokio::time::delay_for(std::time::Duration::from_secs(soft_ban_duration)).await; client_node.wait_connected().await; }) }); } ================================================ FILE: tests/verify_chain_id.rs ================================================ /// NOTE: Test may panic after drop full node future, which is /// expected. pub mod common; use std::convert::TryFrom; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::ops::Deref; use common_crypto::{PrivateKey, PublicKey, Secp256k1PrivateKey, ToPublicKey}; use core_consensus::message::{ FixedBlock, FixedHeight, BROADCAST_HEIGHT, RPC_RESP_SYNC_PULL_BLOCK, RPC_SYNC_PULL_BLOCK, }; use core_network::{ DiagnosticEvent, NetworkConfig, NetworkService, NetworkServiceHandle, PeerId, PeerIdExt, }; use derive_more::Display; use protocol::traits::{Context, MessageCodec, MessageHandler, Priority, Rpc, TrustFeedback}; use protocol::types::{Block, Hash}; use protocol::{async_trait, Bytes}; use crate::common::available_port_pair; use crate::common::node::consts; use crate::common::node::full_node; use crate::common::node::sync::{Sync, SyncError}; #[test] fn should_be_disconnected_due_to_different_chain_id() { let (full_port, client_port) = available_port_pair(); let mut rt = tokio::runtime::Runtime::new().expect("create runtime"); let local = tokio::task::LocalSet::new(); local.block_on(&mut rt, async move { let sync = Sync::new(); let full_seckey = { let key = Secp256k1PrivateKey::generate(&mut rand::rngs::OsRng); hex::encode(key.to_bytes()).to_string() }; tokio::task::spawn_local(full_node::run(full_port, full_seckey.clone(), sync.clone())); // Wait full node network initialization sync.wait().await; let chain_id = Hash::digest(Bytes::from_static(b"beautiful world")); let full_node_peer_id = full_node_peer_id(&full_seckey); let full_node_addr = format!("127.0.0.1:{}", full_port); let config = NetworkConfig::new() .ping_interval(consts::NETWORK_PING_INTERVAL) .peer_trust_metric(consts::NETWORK_TRUST_METRIC_INTERVAL, None) .expect("peer trust") .bootstraps(vec![(full_node_peer_id.to_base58(), full_node_addr)]) .expect("test node config"); let mut network = NetworkService::new(config); network.set_chain_id(chain_id); network .register_endpoint_handler(BROADCAST_HEIGHT, ReceiveRemoteHeight(sync.clone())) .expect("register remote height"); let hook_fn = |sync: Sync| -> _ { Box::new(move |event: DiagnosticEvent| { // We only care connected event on client node if let DiagnosticEvent::NewSession = event { sync.emit(event) } }) }; network.register_diagnostic_hook(hook_fn(sync.clone())); network .listen(SocketAddr::new( IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), client_port, )) .await .expect("test node listen"); tokio::spawn(network); match sync.recv().await { Err(SyncError::Disconected) => (), Err(err) => panic!("unexpected err {}", err), Ok(event) => panic!("unexpected event {}", event), } }); } #[test] fn should_be_connected_with_same_chain_id() { let (full_port, client_port) = available_port_pair(); let mut rt = tokio::runtime::Runtime::new().expect("create runtime"); let local = tokio::task::LocalSet::new(); local.block_on(&mut rt, async move { let sync = Sync::new(); let full_seckey = { let key = Secp256k1PrivateKey::generate(&mut rand::rngs::OsRng); hex::encode(key.to_bytes()).to_string() }; tokio::task::spawn_local(full_node::run(full_port, full_seckey.clone(), sync.clone())); // Wait full node network initialization sync.wait().await; let chain_id = Hash::from_hex(consts::CHAIN_ID).expect("chain id"); let client_node = connect(full_port, full_seckey, chain_id, client_port, sync.clone()).await; let block = client_node.get_block(0).await.expect("get genesis"); assert_eq!(block.header.height, 0); }); } #[derive(Debug, Display)] enum ClientNodeError { #[display(fmt = "not connected")] NotConnected, #[display(fmt = "unexpected {}", _0)] Unexpected(String), } impl std::error::Error for ClientNodeError {} impl From for ClientNodeError { fn from(err: SyncError) -> Self { match err { SyncError::Recv(err) => ClientNodeError::Unexpected(err.to_string()), SyncError::Timeout => ClientNodeError::Unexpected(err.to_string()), SyncError::Disconected => ClientNodeError::NotConnected, } } } type ClientResult = Result; struct ReceiveRemoteHeight(Sync); #[async_trait] impl MessageHandler for ReceiveRemoteHeight { type Message = u64; async fn process(&self, _: Context, msg: u64) -> TrustFeedback { self.0.emit(DiagnosticEvent::RemoteHeight { height: msg }); TrustFeedback::Neutral } } struct ClientNode { pub network: NetworkServiceHandle, pub remote_peer_id: PeerId, pub priv_key: Secp256k1PrivateKey, pub sync: Sync, } async fn connect( full_node_port: u16, full_seckey: String, chain_id: Hash, listen_port: u16, sync: Sync, ) -> ClientNode { let full_node_peer_id = full_node_peer_id(&full_seckey); let full_node_addr = format!("127.0.0.1:{}", full_node_port); let config = NetworkConfig::new() .ping_interval(consts::NETWORK_PING_INTERVAL) .peer_trust_metric(consts::NETWORK_TRUST_METRIC_INTERVAL, None) .expect("peer trust") .bootstraps(vec![(full_node_peer_id.to_base58(), full_node_addr)]) .expect("test node config"); let priv_key = Secp256k1PrivateKey::generate(&mut rand::rngs::OsRng); let mut network = NetworkService::new(config); let handle = network.handle(); network.set_chain_id(chain_id); network .register_rpc_response::(RPC_RESP_SYNC_PULL_BLOCK) .expect("register consensus rpc response pull block"); network .register_endpoint_handler(BROADCAST_HEIGHT, ReceiveRemoteHeight(sync.clone())) .expect("register remote height"); let hook_fn = |sync: Sync| -> _ { Box::new(move |event: DiagnosticEvent| { // We only care connected event on client node if let DiagnosticEvent::NewSession = event { sync.emit(event) } }) }; network.register_diagnostic_hook(hook_fn(sync.clone())); network .listen(SocketAddr::new( IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), listen_port, )) .await .expect("test node listen"); tokio::spawn(network); sync.wait_connected().await; ClientNode { network: handle, remote_peer_id: full_node_peer_id, priv_key, sync, } } impl ClientNode { pub fn connected(&self) -> bool { let diagnostic = &self.network.diagnostic; let opt_session = diagnostic.session(&self.remote_peer_id); self.sync.is_connected() && opt_session.is_some() } pub fn connected_session(&self, peer_id: &PeerId) -> Option { if !self.connected() { None } else { let diagnostic = &self.network.diagnostic; let opt_session = diagnostic.session(peer_id); opt_session.map(|sid| sid.value()) } } pub async fn rpc(&self, endpoint: &str, msg: M) -> ClientResult where M: MessageCodec, R: MessageCodec, { let sid = match self.connected_session(&self.remote_peer_id) { Some(sid) => sid, None => return Err(ClientNodeError::NotConnected), }; let ctx = Context::new().with_value::("session_id", sid); match self.call::(ctx, endpoint, msg, Priority::High).await { Ok(resp) => Ok(resp), Err(e) if e.to_string().to_lowercase().contains("timeout") && !self.connected() => { Err(ClientNodeError::NotConnected) } Err(e) => { let err_msg = format!("rpc to {} {}", endpoint, e); Err(ClientNodeError::Unexpected(err_msg)) } } } pub async fn get_block(&self, height: u64) -> ClientResult { let resp = self .rpc::<_, FixedBlock>(RPC_SYNC_PULL_BLOCK, FixedHeight::new(height)) .await?; Ok(resp.inner) } } impl Deref for ClientNode { type Target = NetworkServiceHandle; fn deref(&self) -> &Self::Target { &self.network } } fn full_node_peer_id(full_seckey: &str) -> PeerId { let seckey = { let key = hex::decode(full_seckey).expect("hex private key string"); Secp256k1PrivateKey::try_from(key.as_ref()).expect("valid private key") }; let pubkey = seckey.pub_key(); PeerId::from_pubkey_bytes(pubkey.to_bytes()).expect("valid public key") }