Repository: ArweaveTeam/arweave Branch: master Commit: 138acae765c7 Files: 999 Total size: 32.1 MB Directory structure: gitextract_87p354ak/ ├── .cursor/ │ ├── BUGBOT.md │ └── rules/ │ ├── build.mdc │ └── protocol.mdc ├── .gitattributes ├── .github/ │ └── workflows/ │ ├── e2e-test.yml │ ├── gitstamp.yaml │ ├── release.yml │ ├── test-amd64-ubuntu-22.04.yml │ ├── test-arm64-macos-26.yml │ ├── x-build.yml │ ├── x-common-test.yml │ ├── x-release-linux.yml │ ├── x-release-macos.yml │ ├── x-test-canary.yml │ ├── x-test-full.yml │ ├── x-test-on-demand.yml │ └── x-test-vdf.yml ├── .gitignore ├── .gitmodules ├── .jupyter/ │ └── jupyter_server_config.py ├── CANARY.md ├── CONTRIBUTING.md ├── LICENSE.md ├── README.md ├── apps/ │ ├── ar_sqlite3/ │ │ ├── c_src/ │ │ │ ├── erl_comm.o │ │ │ ├── sqlite3.o │ │ │ └── sqlite3_driver.o │ │ └── priv/ │ │ └── ar_sqlite3_driver │ ├── arweave/ │ │ ├── c_src/ │ │ │ ├── Makefile │ │ │ ├── ar_nif.c │ │ │ ├── ar_nif.h │ │ │ ├── randomx/ │ │ │ │ ├── ar_randomx_impl.h │ │ │ │ ├── crc32.h │ │ │ │ ├── feistel_msgsize_key_cipher.cpp │ │ │ │ ├── feistel_msgsize_key_cipher.h │ │ │ │ ├── randomx_long_with_entropy.cpp │ │ │ │ ├── randomx_long_with_entropy.h │ │ │ │ ├── randomx_squared.cpp │ │ │ │ ├── randomx_squared.h │ │ │ │ ├── rx4096/ │ │ │ │ │ └── ar_rx4096_nif.c │ │ │ │ ├── rx512/ │ │ │ │ │ └── ar_rx512_nif.c │ │ │ │ └── rxsquared/ │ │ │ │ └── ar_rxsquared_nif.c │ │ │ ├── secp256k1/ │ │ │ │ └── secp256k1_nif.c │ │ │ └── vdf/ │ │ │ ├── ar_vdf_nif.c │ │ │ ├── sha256-armv8.S │ │ │ ├── vdf.cpp │ │ │ ├── vdf.h │ │ │ ├── vdf_fused_arm.cpp │ │ │ ├── vdf_fused_x86.cpp │ │ │ └── vdf_hiopt_arm.cpp │ │ ├── include/ │ │ │ ├── ar.hrl │ │ │ ├── ar_blacklist_middleware.hrl │ │ │ ├── ar_block.hrl │ │ │ ├── ar_chain_stats.hrl │ │ │ ├── ar_chunk_storage.hrl │ │ │ ├── ar_consensus.hrl │ │ │ ├── ar_data_discovery.hrl │ │ │ ├── ar_data_sync.hrl │ │ │ ├── ar_header_sync.hrl │ │ │ ├── ar_inflation.hrl │ │ │ ├── ar_mining.hrl │ │ │ ├── ar_mining_cache.hrl │ │ │ ├── ar_peers.hrl │ │ │ ├── ar_poa.hrl │ │ │ ├── ar_pool.hrl │ │ │ ├── ar_pricing.hrl │ │ │ ├── ar_repack.hrl │ │ │ ├── ar_sup.hrl │ │ │ ├── ar_sync_buckets.hrl │ │ │ ├── ar_vdf.hrl │ │ │ ├── ar_verify_chunks.hrl │ │ │ ├── ar_wallets.hrl │ │ │ └── user_default.hrl │ │ ├── src/ │ │ │ ├── ar.erl │ │ │ ├── ar_base32.erl │ │ │ ├── ar_bench_hash.erl │ │ │ ├── ar_bench_packing.erl │ │ │ ├── ar_bench_timer.erl │ │ │ ├── ar_bench_vdf.erl │ │ │ ├── ar_blacklist_middleware.erl │ │ │ ├── ar_block.erl │ │ │ ├── ar_block_cache.erl │ │ │ ├── ar_block_index.erl │ │ │ ├── ar_block_pre_validator.erl │ │ │ ├── ar_block_pre_validator_sup.erl │ │ │ ├── ar_block_propagation_worker.erl │ │ │ ├── ar_block_time_history.erl │ │ │ ├── ar_bridge.erl │ │ │ ├── ar_bridge_sup.erl │ │ │ ├── ar_chain_stats.erl │ │ │ ├── ar_chunk_copy.erl │ │ │ ├── ar_chunk_storage.erl │ │ │ ├── ar_chunk_storage_sup.erl │ │ │ ├── ar_chunk_visualization.erl │ │ │ ├── ar_cli_parser.erl │ │ │ ├── ar_config.erl │ │ │ ├── ar_coordination.erl │ │ │ ├── ar_data_discovery.erl │ │ │ ├── ar_data_doctor.erl │ │ │ ├── ar_data_root_sync.erl │ │ │ ├── ar_data_root_sync_sup.erl │ │ │ ├── ar_data_sync.erl │ │ │ ├── ar_data_sync_coordinator.erl │ │ │ ├── ar_data_sync_sup.erl │ │ │ ├── ar_data_sync_worker.erl │ │ │ ├── ar_deep_hash.erl │ │ │ ├── ar_device_lock.erl │ │ │ ├── ar_diff_dag.erl │ │ │ ├── ar_difficulty.erl │ │ │ ├── ar_disk_cache.erl │ │ │ ├── ar_disksup.erl │ │ │ ├── ar_doctor_bench.erl │ │ │ ├── ar_doctor_dump.erl │ │ │ ├── ar_doctor_inspect.erl │ │ │ ├── ar_doctor_merge.erl │ │ │ ├── ar_domain.erl │ │ │ ├── ar_entropy_cache.erl │ │ │ ├── ar_entropy_gen.erl │ │ │ ├── ar_entropy_storage.erl │ │ │ ├── ar_ets_intervals.erl │ │ │ ├── ar_events.erl │ │ │ ├── ar_events_sup.erl │ │ │ ├── ar_footprint_record.erl │ │ │ ├── ar_fork.erl │ │ │ ├── ar_fraction.erl │ │ │ ├── ar_global_sync_record.erl │ │ │ ├── ar_header_sync.erl │ │ │ ├── ar_header_sync_sup.erl │ │ │ ├── ar_http.erl │ │ │ ├── ar_http_iface_client.erl │ │ │ ├── ar_http_iface_middleware.erl │ │ │ ├── ar_http_iface_rate_limiter_middleware.erl │ │ │ ├── ar_http_iface_server.erl │ │ │ ├── ar_http_req.erl │ │ │ ├── ar_http_sup.erl │ │ │ ├── ar_http_util.erl │ │ │ ├── ar_ignore_registry.erl │ │ │ ├── ar_inflation.erl │ │ │ ├── ar_info.erl │ │ │ ├── ar_intervals.erl │ │ │ ├── ar_join.erl │ │ │ ├── ar_kv.erl │ │ │ ├── ar_kv_sup.erl │ │ │ ├── ar_localnet.erl │ │ │ ├── ar_localnet_mining_server.erl │ │ │ ├── ar_localnet_mining_sup.erl │ │ │ ├── ar_logger.erl │ │ │ ├── ar_mempool.erl │ │ │ ├── ar_merkle.erl │ │ │ ├── ar_metrics.erl │ │ │ ├── ar_metrics_collector.erl │ │ │ ├── ar_mine_randomx.erl │ │ │ ├── ar_mining_cache.erl │ │ │ ├── ar_mining_hash.erl │ │ │ ├── ar_mining_io.erl │ │ │ ├── ar_mining_server.erl │ │ │ ├── ar_mining_server_behaviour.erl │ │ │ ├── ar_mining_stats.erl │ │ │ ├── ar_mining_sup.erl │ │ │ ├── ar_mining_worker.erl │ │ │ ├── ar_network_middleware.erl │ │ │ ├── ar_node.erl │ │ │ ├── ar_node_sup.erl │ │ │ ├── ar_node_utils.erl │ │ │ ├── ar_node_worker.erl │ │ │ ├── ar_nonce_limiter.erl │ │ │ ├── ar_nonce_limiter_client.erl │ │ │ ├── ar_nonce_limiter_server.erl │ │ │ ├── ar_nonce_limiter_server_worker.erl │ │ │ ├── ar_nonce_limiter_sup.erl │ │ │ ├── ar_packing_server.erl │ │ │ ├── ar_packing_sup.erl │ │ │ ├── ar_patricia_tree.erl │ │ │ ├── ar_peer_intervals.erl │ │ │ ├── ar_peer_worker.erl │ │ │ ├── ar_peer_worker_sup.erl │ │ │ ├── ar_peers.erl │ │ │ ├── ar_poa.erl │ │ │ ├── ar_poller.erl │ │ │ ├── ar_poller_sup.erl │ │ │ ├── ar_poller_worker.erl │ │ │ ├── ar_pool.erl │ │ │ ├── ar_pool_cm_job_poller.erl │ │ │ ├── ar_pool_job_poller.erl │ │ │ ├── ar_pricing.erl │ │ │ ├── ar_pricing_transition.erl │ │ │ ├── ar_process_sampler.erl │ │ │ ├── ar_prometheus_cowboy_handler.erl │ │ │ ├── ar_prometheus_cowboy_labels.erl │ │ │ ├── ar_rate_limiter.erl │ │ │ ├── ar_repack.erl │ │ │ ├── ar_repack_fsm.erl │ │ │ ├── ar_repack_io.erl │ │ │ ├── ar_replica_2_9.erl │ │ │ ├── ar_retarget.erl │ │ │ ├── ar_rewards.erl │ │ │ ├── ar_rx4096_nif.erl │ │ │ ├── ar_rx512_nif.erl │ │ │ ├── ar_rxsquared_nif.erl │ │ │ ├── ar_semaphore.erl │ │ │ ├── ar_serialize.erl │ │ │ ├── ar_shutdown_manager.erl │ │ │ ├── ar_storage.erl │ │ │ ├── ar_storage_module.erl │ │ │ ├── ar_storage_sup.erl │ │ │ ├── ar_sup.erl │ │ │ ├── ar_sync_buckets.erl │ │ │ ├── ar_sync_record.erl │ │ │ ├── ar_sync_record_sup.erl │ │ │ ├── ar_testnet.erl │ │ │ ├── ar_timer.erl │ │ │ ├── ar_tx.erl │ │ │ ├── ar_tx_blacklist.erl │ │ │ ├── ar_tx_db.erl │ │ │ ├── ar_tx_emitter.erl │ │ │ ├── ar_tx_emitter_sup.erl │ │ │ ├── ar_tx_emitter_worker.erl │ │ │ ├── ar_tx_poller.erl │ │ │ ├── ar_tx_replay_pool.erl │ │ │ ├── ar_tx_validator.erl │ │ │ ├── ar_unbalanced_merkle.erl │ │ │ ├── ar_util.erl │ │ │ ├── ar_vdf.erl │ │ │ ├── ar_vdf_nif.erl │ │ │ ├── ar_verify_chunks.erl │ │ │ ├── ar_verify_chunks_reporter.erl │ │ │ ├── ar_verify_chunks_sup.erl │ │ │ ├── ar_wallet.erl │ │ │ ├── ar_wallets.erl │ │ │ ├── ar_watchdog.erl │ │ │ ├── ar_weave.erl │ │ │ ├── ar_webhook.erl │ │ │ ├── ar_webhook_sup.erl │ │ │ ├── arweave.app.src │ │ │ ├── e2e/ │ │ │ │ ├── ar_e2e.erl │ │ │ │ ├── ar_repack_in_place_mine_tests.erl │ │ │ │ ├── ar_repack_mine_tests.erl │ │ │ │ ├── ar_sync_pack_mine_tests.erl │ │ │ │ └── fixtures/ │ │ │ │ └── wallets/ │ │ │ │ ├── wallet_a.json │ │ │ │ ├── wallet_b.json │ │ │ │ ├── wallet_c.json │ │ │ │ └── wallet_d.json │ │ │ ├── rsa_pss.erl │ │ │ ├── secp256k1_nif.erl │ │ │ └── user_default.erl │ │ └── test/ │ │ ├── ar_base64_compatibility_tests.erl │ │ ├── ar_canary.erl │ │ ├── ar_config_tests.erl │ │ ├── ar_config_tests_config_fixture.json │ │ ├── ar_coordinated_mining_tests.erl │ │ ├── ar_data_roots_sync_tests.erl │ │ ├── ar_data_sync_disk_pool_rotation_test.erl │ │ ├── ar_data_sync_enqueue_intervals_test.erl │ │ ├── ar_data_sync_mines_off_only_last_chunks_test.erl │ │ ├── ar_data_sync_mines_off_only_second_last_chunks_test.erl │ │ ├── ar_data_sync_records_footprints_test.erl │ │ ├── ar_data_sync_recovers_from_corruption_test.erl │ │ ├── ar_data_sync_syncs_after_joining_test.erl │ │ ├── ar_data_sync_syncs_data_test.erl │ │ ├── ar_difficulty_tests.erl │ │ ├── ar_ecdsa_tests.erl │ │ ├── ar_forced_validation_tests.erl │ │ ├── ar_fork_recovery_tests.erl │ │ ├── ar_get_chunk_tests.erl │ │ ├── ar_header_sync_tests.erl │ │ ├── ar_http_iface_tests.erl │ │ ├── ar_http_util_tests.erl │ │ ├── ar_info_tests.erl │ │ ├── ar_mempool_tests.erl │ │ ├── ar_mine_randomx_tests.erl │ │ ├── ar_mine_vdf_tests.erl │ │ ├── ar_mining_io_tests.erl │ │ ├── ar_mining_server_tests.erl │ │ ├── ar_mining_worker_tests.erl │ │ ├── ar_node_tests.erl │ │ ├── ar_nonce_limiter_tests.erl │ │ ├── ar_packing_tests.erl │ │ ├── ar_peer_intervals_discovery_test.erl │ │ ├── ar_poa_tests.erl │ │ ├── ar_poller_tests.erl │ │ ├── ar_post_block_tests.erl │ │ ├── ar_pricing_tests.erl │ │ ├── ar_reject_chunks_tests.erl │ │ ├── ar_replica_2_9_nif_tests.erl │ │ ├── ar_semaphore_tests.erl │ │ ├── ar_serialize_tests.erl │ │ ├── ar_start_from_block_tests.erl │ │ ├── ar_sync_record_tests.erl │ │ ├── ar_test_data_sync.erl │ │ ├── ar_test_inet_mock.erl │ │ ├── ar_test_node.erl │ │ ├── ar_test_runner.erl │ │ ├── ar_tx_blacklist_tests.erl │ │ ├── ar_tx_replay_pool_tests.erl │ │ ├── ar_tx_tests.erl │ │ ├── ar_vdf_block_validation_tests.erl │ │ ├── ar_vdf_external_update_tests.erl │ │ ├── ar_vdf_server_tests.erl │ │ ├── ar_vdf_tests.erl │ │ ├── ar_wallet_tests.erl │ │ ├── ar_wallet_tests_ES256K_fixture.json │ │ ├── ar_wallet_tests_Ed25519_fixture.json │ │ ├── ar_wallet_tests_PS256_65537_fixture.json │ │ ├── ar_webhook_tests.erl │ │ └── fixtures/ │ │ └── ar_packing_tests/ │ │ ├── spora25.100kb │ │ ├── spora25.256kb │ │ ├── spora26.100kb │ │ ├── spora26.256kb │ │ ├── unpacked.100kb │ │ └── unpacked.256kb │ ├── arweave_config/ │ │ ├── README.md │ │ ├── include/ │ │ │ ├── arweave_config.hrl │ │ │ └── arweave_config_spec.hrl │ │ ├── priv/ │ │ │ └── .gitkeep │ │ ├── src/ │ │ │ ├── arweave_config.app.src │ │ │ ├── arweave_config.erl │ │ │ ├── arweave_config_arguments.erl │ │ │ ├── arweave_config_arguments_legacy.erl │ │ │ ├── arweave_config_bootstrap.erl │ │ │ ├── arweave_config_environment.erl │ │ │ ├── arweave_config_file.erl │ │ │ ├── arweave_config_file_path.erl │ │ │ ├── arweave_config_format_json.erl │ │ │ ├── arweave_config_format_legacy.erl │ │ │ ├── arweave_config_format_toml.erl │ │ │ ├── arweave_config_format_yaml.erl │ │ │ ├── arweave_config_fsm.erl │ │ │ ├── arweave_config_http_server.erl │ │ │ ├── arweave_config_legacy.erl │ │ │ ├── arweave_config_parameters.erl │ │ │ ├── arweave_config_parser.erl │ │ │ ├── arweave_config_serializer.erl │ │ │ ├── arweave_config_signal_handler.erl │ │ │ ├── arweave_config_spec.erl │ │ │ ├── arweave_config_specs/ │ │ │ │ ├── arweave_config_spec_default.erl │ │ │ │ ├── arweave_config_spec_deprecated.erl │ │ │ │ ├── arweave_config_spec_enabled.erl │ │ │ │ ├── arweave_config_spec_environment.erl │ │ │ │ ├── arweave_config_spec_handle_get.erl │ │ │ │ ├── arweave_config_spec_handle_set.erl │ │ │ │ ├── arweave_config_spec_inherit.erl │ │ │ │ ├── arweave_config_spec_legacy.erl │ │ │ │ ├── arweave_config_spec_long_argument.erl │ │ │ │ ├── arweave_config_spec_long_description.erl │ │ │ │ ├── arweave_config_spec_nargs.erl │ │ │ │ ├── arweave_config_spec_parameter_key.erl │ │ │ │ ├── arweave_config_spec_runtime.erl │ │ │ │ ├── arweave_config_spec_short_argument.erl │ │ │ │ ├── arweave_config_spec_short_description.erl │ │ │ │ └── arweave_config_spec_type.erl │ │ │ ├── arweave_config_store.erl │ │ │ ├── arweave_config_sup.erl │ │ │ └── arweave_config_type.erl │ │ └── test/ │ │ ├── arweave_config_SUITE.erl │ │ ├── arweave_config_arguments_SUITE.erl │ │ ├── arweave_config_arguments_legacy_SUITE.erl │ │ ├── arweave_config_bootstrap_SUITE.erl │ │ ├── arweave_config_environment_SUITE.erl │ │ ├── arweave_config_file_SUITE.erl │ │ ├── arweave_config_file_SUITE_data/ │ │ │ ├── config_invalid.json │ │ │ ├── config_invalid.toml │ │ │ ├── config_invalid.yaml │ │ │ ├── config_unsupported.xml │ │ │ ├── config_valid.json │ │ │ ├── config_valid.toml │ │ │ └── config_valid.yaml │ │ ├── arweave_config_format_SUITE.erl │ │ ├── arweave_config_fsm_SUITE.erl │ │ ├── arweave_config_http_server_SUITE.erl │ │ ├── arweave_config_legacy_SUITE.erl │ │ ├── arweave_config_serializer_SUITE.erl │ │ ├── arweave_config_spec_SUITE.erl │ │ ├── arweave_config_store_SUITE.erl │ │ └── arweave_config_type_SUITE.erl │ ├── arweave_diagnostic/ │ │ ├── README.md │ │ ├── include/ │ │ │ └── .gitkeep │ │ ├── priv/ │ │ │ └── .gitkeep │ │ └── src/ │ │ ├── arweave_diagnostic.app.src │ │ └── arweave_diagnostic.erl │ ├── arweave_limiter/ │ │ ├── include/ │ │ │ └── .gitkeep │ │ ├── priv/ │ │ │ └── .gitkeep │ │ ├── src/ │ │ │ ├── arweave_limiter.app.src │ │ │ ├── arweave_limiter.erl │ │ │ ├── arweave_limiter_group.erl │ │ │ ├── arweave_limiter_metrics.erl │ │ │ ├── arweave_limiter_metrics_collector.erl │ │ │ ├── arweave_limiter_sup.erl │ │ │ └── arweave_limiter_time.erl │ │ └── test/ │ │ ├── arweave_limiter_group_tests.erl │ │ └── arweave_limiter_metrics_collector_tests.erl │ └── randomx_square_latency_tester/ │ ├── .gitignore │ ├── Makefile │ └── main.cpp ├── ar-rebar3 ├── arweave-server ├── arweave_styleguide.md ├── bin/ │ ├── arweave │ ├── benchmark-hash │ ├── benchmark-packing │ ├── benchmark-vdf │ ├── console │ ├── create-ecdsa-wallet │ ├── create-wallet │ ├── data-doctor │ ├── debug-logs │ ├── e2e │ ├── e2e_shell │ ├── gen-dev-certs │ ├── localnet_shell │ ├── logs │ ├── shell │ ├── start │ ├── start-localnet │ ├── stop │ └── test ├── config/ │ ├── sys.config │ ├── vm.args │ ├── vm.args.dev │ └── vm.args.src ├── default.nix ├── deploy/ │ ├── Dockerfile.base.ubuntu20.04 │ ├── Dockerfile.base.ubuntu22.04 │ ├── Dockerfile.rocky │ ├── Dockerfile.ubuntu │ ├── Makefile │ ├── build.sh │ └── create_storage_modules.sh ├── doc/ │ ├── ar-ipfs-howto.md │ ├── gateway_setup_guide.md │ ├── path-manifest-schema.md │ └── transaction_blacklists.md ├── erlang_ls.config ├── flake.nix ├── genesis_data/ │ ├── genesis_txs/ │ │ ├── -M5_EBM4MayX8ZpuLFoANHO00c4pdrSmAQbPYv7fq4U.json │ │ ├── -wzIQJ19Hq8Zyf1L85Ga3uGTrdWA2W-UNyr8aH4a4iE.json │ │ ├── 00nFXThK86Aog_HfLJc9j0nnXzXSlU6VdGC8qZc5ekI.json │ │ ├── 06dr4mrXcKlfPbK8t9vWOBCDJznyG-AsKxED-Jr0U88.json │ │ ├── 07u3F6WH-ohqBclh6UanAQ9Tau089eLJrIYM-8qkAbw.json │ │ ├── 0EzNUQy_5b7CwNNLVAi7CnameMgnxVh-XyahT2kn74Y.json │ │ ├── 0FJrLrxrFkVTBwRrzCCh88Gm2tG1xPxg8s_IuRZDVzw.json │ │ ├── 0Mxvgz6_wL0FBOxJmHcRcNwiaV8B90whDxG4Vh_GFic.json │ │ ├── 0O-UnzBvSFYoMQrbcsKHRH_YqNNylC1n9KWXmm-rr90.json │ │ ├── 0_GKZOdtRH-nc094U5kFBlvQSjPz_oX0tcIroqLFD3U.json │ │ ├── 0biLy8DoOhucpeYzOj5jnopxxwe0XDRfCOMjyz_a74U.json │ │ ├── 0mFNtCi-u34uwOj3BimQTPOT9PgLGE8uqCbtXhnwoKI.json │ │ ├── 0ogs8DTdSrNxfE2LzrScPvnyf7CQ7jMdFaS_l0-K-GU.json │ │ ├── 0ooE635sVsd6vdhX3Pb8Ufvuqd7XRjfUbG2eXde_CmI.json │ │ ├── 0qob-AeHGTS5EDamY6Mtsnxf1MCyUk18l09bqHAYQjU.json │ │ ├── 128KaPgVaZyrl8Vuzt795ZlWidERzih15pNDAJgahI0.json │ │ ├── 1Lwuom2q3FFI2pZz5EYgOzJRymgVWE3F9ZIl4vi3-kU.json │ │ ├── 1Q2plP5JFTLwdTC27VfIgDJ-ri5h3mVsKxZploTrRmQ.json │ │ ├── 1QoMjs6Q3XKklJ9LfovRmGbe4bAy9xY247JfDZqN3Eo.json │ │ ├── 1nu07yo-0eB5GLxIJzzlxZW6nFTFiZ3XCDobJUcNyP4.json │ │ ├── 1qVeYpf2sY8Qkz0iVomVPVb15NA7QUtF3eFDoMwa8PI.json │ │ ├── 1xh_NCIFYbprcgNM4AVvZ47jRxsQmJYvCG-L-oEK4iE.json │ │ ├── 1yvqJKdnb9SRRKoBg1m0kWAsSh9S0R5r9T9TE0YHfRQ.json │ │ ├── 21Kfm2Apa8QWeqdMqyQAcxg9HbiluZXfQFu4-6xe-AY.json │ │ ├── 24VRr4yT-_fOndcFYtK2oSO-p9Pm6lNtzQv8E-U43Bc.json │ │ ├── 2vn7V0FR0JMXrVbj3Ofvc_2nvrFYCCpRoFjc7UYpJcA.json │ │ ├── 328-6fOVCfCid4QTxHjkAMkQLMHZgDg-hZo5PnVfp2Q.json │ │ ├── 3BSgxVi4vtVtgMBtDE8xPMqU0PmkiKtKX6P_Iw0kMsM.json │ │ ├── 3MMMUrHDmjbCn_-TOZJJHvjLBp8PffZKUNfm_Ziy0Vk.json │ │ ├── 3Q5gJrbqc-PeOvD4QQ4WCNp-f5cYzTyHyg6P9b-WvwM.json │ │ ├── 3T6mnguMWl8GeiqZWiBZrGXHHtwm12mIWciusoSACkQ.json │ │ ├── 3khTH_o8WZHSCzP-AThkmt7zZL-d_lcqUKC8nz7c8lk.json │ │ ├── 3ku6XelnvBsaRjoNxDWb_kT_PRlQ88U0pbWURziCj7s.json │ │ ├── 4LwZwAVcaBXhXsP5b4mnE11tUXefuRUTtTibtvoozDQ.json │ │ ├── 4UEhkNbsGdJUjx1lJQgX9KorwSf_RRZG8VMW6jMmf8Y.json │ │ ├── 4bPVo0hCI3E-ry2mBjvOZsBpNwPM108NT0vnJCxCeJw.json │ │ ├── 4ewYAvsgaT-6Oy23qPqK29O_AgfvNbhLvol13yN1PdQ.json │ │ ├── 4gLPD5njSRtiaJwjcjmNOyI5Vw8sFBQQWOefmy4SPmQ.json │ │ ├── 4pNPqxodBesN6jQl51nH17GA1fWYfHVm8cIEfusnPLY.json │ │ ├── 576xa7WLVidNoEcYPhAm7OlyYgbrp7Z1RBIfqLbVFzw.json │ │ ├── 5FL2C4l-5cTl9wg4CblgIxzko8hGsB5URVA_yTAd4Nk.json │ │ ├── 5Hatfzkj7ivvIsUIDjdOSp-4CdkClH6B7S_SNX0B2-o.json │ │ ├── 5OdjYWAipCjWzpqfNoNhyJ673d4pRMNva8la_SFfu_c.json │ │ ├── 5WKzIeQrDGC86IQvl2NhRtgPNKHGRA9oyjRByV1F7p4.json │ │ ├── 5dsjbEwH2r-EWCkfOznV4JkCOLSK9vNY-0iqPr4RZUM.json │ │ ├── 5mt79Uz6p83vdLtYRiByyWLqLI2GZBeSTutDRmzw7tM.json │ │ ├── 5qRekKepIlFbUhGMq_nNy89bzx_K44e4GmUKYAe9MRU.json │ │ ├── 5ynd-L6Z1vrR7Vlyr-rkrga_Jw2ibALkIgldNmsVRcQ.json │ │ ├── 6GNIVQ-23jPJTxQkQITbSKE7SYm6J3MF4qbSgH3-AXU.json │ │ ├── 6J1sN2nhGpqe9iJwgdfnxxCK4af88__HoEG8MLeqtyM.json │ │ ├── 6NaT-Mz8QAiQS8atFaOu_ezqZnfu_XaQb-Grng-hvHc.json │ │ ├── 6YbxtptbO-sidrnYdgn0G_CiNBh-az5ZzWrSCP9DYKA.json │ │ ├── 71M1E7A4e0PFW_6C0gly77iCg7ykX17647i00eEiA-s.json │ │ ├── 7SfLhJLtevo0zu-1bo8q6zX98WbGgpDNuY6PXbzS_j0.json │ │ ├── 7kT0is0QnxdjqkPi0BKamhLW6z6_SK55LMAVKQC6F0M.json │ │ ├── 87ieWrloTFUdW7YjJqJcINd1M_PBWCzA1dIRFzF4RKM.json │ │ ├── 8b-7D96aRFJgDm8z5Tg47vBbdjseW0rRi17TYDcaQ5Q.json │ │ ├── 8gTAwQ3f17PKI9KCX1cjuXCs9F8Hcdz8KyhsecKuCJ0.json │ │ ├── 8rKBfpmkPlxnnYr6t0xIpUDubdidK0Fpnois7-xQJtc.json │ │ ├── 8y-ghHqMT2lEHQn86jRXkQ8I5cLWWtKW1CQROp8mzIs.json │ │ ├── 96Ijx5TWSxZmZaDH1pteGHFjIYY0aHmGWNHiMYeSYIM.json │ │ ├── 98kadyXY0OPfEZKeeZcCyQ7z5mRToZklK-D6f1a-Lxw.json │ │ ├── 9JWfraRekKtgXiIjssn0tVSzhaCaN682jECsrKtR0_E.json │ │ ├── AN48OPO2-1mh4PKtpyoNm7SWJK2j8dF0-TFLU7Z1C9g.json │ │ ├── AX6ZZxDpFlNhoN5Am5Hi4DER4zOBGVnQm_bse5PfHNw.json │ │ ├── Achd6pqJVZ-1vNMLC977Lu8f20eBmgAv4dIddXql51s.json │ │ ├── Ah6I8y8q0jb15KXjn0PyNfe7FR3v2xobg09Lfj7n1Mo.json │ │ ├── AoSTMf_ZxlcY12bK6_sWj02kssD00K4E-vkHx2vRxG4.json │ │ ├── B4e9FBfqZGBszHAhZqTq-TNjb-oG7rYdlMWrQa4CPZU.json │ │ ├── BQ2RVL6XY99AIkPKDBCfUfRmJGejkZ8YKgKZc2LewhU.json │ │ ├── BRD5ARo8tiY64RqIoxYZ6jwbE-LQT_7jA513nHwWyRE.json │ │ ├── BYJCPwCLpd9a5K1HFy5F6ZvnemPiPFtV4hz5wMHr1NI.json │ │ ├── ByvrfeR4UNmWJwF2fU41mBo6ThFl49u24rEGpbeSI0Q.json │ │ ├── C3auX8HXhc2dChmvSBUfgGyYynuAr6P3g0p7420GG78.json │ │ ├── CEXuGv3KvVtkf5gkV0ip3g1FF-i12WIDo6IOigORIZA.json │ │ ├── CMr-rV5FdlQcRBo4loZzj66EFqwHBmA36tWiRMKGigQ.json │ │ ├── COXhhpbcLSEe2iP2kp4SDj5NjjBAC8CucsAgOHRF_lc.json │ │ ├── CSkFcCmNgvnp7jp7aK0tEGsLWiZVMF-QBkEFaJrAG48.json │ │ ├── CUu1gtu6L5tJxkOAu13tNBGDKECohV8M4qgCOOPNtas.json │ │ ├── CZ181FVir4NaSJ7JsVb50-xCaZtd3dmKbDer7jpTSyI.json │ │ ├── CbV_CDXgVNjV6fyoBDkYmbAcaC5VsLDYXgEIwj2Ewyo.json │ │ ├── DC6gmByeCki7uyXHJhX_A9x3pkMgmJ8Tv6wDRnh7vGs.json │ │ ├── DDrS8BD0XTUVJt5E8kwisVTBX4PBWp0lCnSkSD3PJto.json │ │ ├── DJf1SRoKaPo1h3F-7oKIMu4A-r9dXXMjE57WQilPdTk.json │ │ ├── DMtXbcR_qHwdYXvkuCGOQARs_QtN9iWPw4x6TTaWOcw.json │ │ ├── DQ6WaBfLEMEFhKoMoutuPyO_zFg1hWTDXT13CD8n1nw.json │ │ ├── DTGNdsYZDXoU1nE82yEjG5ZEssxwUmkFTkM3_i6oSx8.json │ │ ├── DkBAprUInkCbFa6A_WJJNL1z_PnhEavvyZtF09lmyvw.json │ │ ├── DpEoi9F4g952ajGuT4g1HWY-xndyE77dn0VfdNXkrC8.json │ │ ├── Dxrsx0xuPVY7oz9yHbL6wOFxo6ws7ycVe778C2bc9J8.json │ │ ├── EPZ0hBh1wp-7T4JED4v6DOItd-9MNWkRfbLyizDLBsE.json │ │ ├── EQh5rYFJ5Z5yESi4DIuvl2n6iVZS899tA6V6rf2Xwhk.json │ │ ├── EUMtkWCJU0L23RnhXKfQ1wtD3Jh2O-vpFnLcQXynoAQ.json │ │ ├── Eeo6rANLMAXonDFLDG2nu7n99O3Ymfk01wYXJBbEixY.json │ │ ├── EnPMt9yzTsxLPR5mD9zUvndxicdYBUNzOlcCPvQlOK8.json │ │ ├── EvKHSfokNyuiTarFKOuQ_-SaBwtllGpQGc7IFkRfBfc.json │ │ ├── F5R2EA-gM8AtQ9_NymKwtr_Im3_ljMR38ndzCs5c77Y.json │ │ ├── FTYnf3Z3QqEpNzTigfAlGTkgpgCWtFA7R8i-I1ik_Vo.json │ │ ├── FkZzg_-5eSdFlbq9XnHe3wRhYidHJPXwUQ6YLuJijS0.json │ │ ├── FmfkuPmh0vkdv_qbjXBUX1sQ-DmwBFbjuC4punobGy0.json │ │ ├── G1GqspPmLkJTiT35QUTWBT4def7j5ORSfHCtrYzrrng.json │ │ ├── G5FyMvm8E0_07vFgz-XISJN3VEviSrbtih9_Wptef9w.json │ │ ├── GlWMQUuiL80knS07G7NpoYat3w18VMuyLEuC_Pmijng.json │ │ ├── GypgExivgblZSA-1n7KjdI0SJOyXwFJkuzzPWS4NID8.json │ │ ├── HFUR5ZwLihdaonJWHRHBuLay6cw8ZMV0bM870xhE6Qk.json │ │ ├── HSlgnBu2Yxros7zyehPgiu2u7h80dJfCCqrA88UnkB4.json │ │ ├── HTt6lPYQfcIgUxKPjUt3aQrpwE5e3UA4UT2EI9RxSbw.json │ │ ├── H_0S6x36tsFH-x1h77jV_zzGGp97V8UjmgC0RZYwbtM.json │ │ ├── I6s8Z6gEPLQABFstkCoLVv_gdQNGb-uuMMut-R7q2hA.json │ │ ├── IACLRsWq-T6aesGEAjfFTZJd2sy7sFvWL7O6FI9A39U.json │ │ ├── IJsiiIbd-Qs39TAJ67hiRJFsBye_rgQdU9GBid_PnZw.json │ │ ├── IQgiEwMLp1bb6muuB_G7Q3sRaaZ3OZHUSjgshUq5YMU.json │ │ ├── ISiC3yaTW9KnZmgs39osghIg0HP8ISh77bzH7u2m55Q.json │ │ ├── IpwG_74praZjsu9L91_KWYHrVTpEDwyHZrsHgum4Z8o.json │ │ ├── IvyUOghXQ31LnYE3bYEkS82gTAvpIa1rGGQKmiJuuMk.json │ │ ├── IwSIt1P5I_mM-gAeAvXiyxRVb73hqkQAMfxLIHbbZYk.json │ │ ├── Juzb8MlmGd2qomIUwgfGzIFO7c7ZcY87kJPmqpSkt18.json │ │ ├── K47jh6Jr6TmZeZ_TadmyLLy1V6ZvLNpvV5FWcICohnk.json │ │ ├── KOm2FJzmNXa_yjYC-58DkysCdk7FRFMcRmBx3DF6S9A.json │ │ ├── KPNGfBMOznCXZwOVvCXHRR6sVJx1akVkmXTV98lCMKY.json │ │ ├── K_ae8Bfvql0dGhIfRH-R7W-zWoeB95kYGJNi3HjFyrs.json │ │ ├── Kgr-XWwHYos5Y95ZJ9mAUwjYjj_rP0I-GnWctQDNlp8.json │ │ ├── KhQeu3CG_X1zoHbyy99GUlC9gVFFexf6vVPOlLgCj9I.json │ │ ├── Kl1zrMIDIC9yW8yLMnSKQYDoV0PY41ymzJQw91qaZvY.json │ │ ├── L8tkBBP7fyYfK4txqP-fGk_ODOU4UfIgFV79O-qd5vY.json │ │ ├── L9J9SkTWI_Fx5KhujeWGokIchHTSFlSIC0blr0JIz80.json │ │ ├── LBTipZADoYfO-9UecE07Z83ijiLl0f2wAGXyRFQqKCY.json │ │ ├── LC-_5GDhs09OvN7r8GPmjMa6A9xSeVtsAmDgYCgspvc.json │ │ ├── LFQ5iV6E5wyBbJmJoFJdH39ZxfW-y7mZFKou2H-ONvg.json │ │ ├── LJ2QSdjHftgyCOSgy9Ub0OkTTN25rxCY7D7mt6u8Uy8.json │ │ ├── LUdFh6g9auj1LRtk8IUwLoY3e91jIkcSyPKuQQekPY4.json │ │ ├── LiitFWnODMUA7esa_f49IiMEdN7cTKoKw1cgG2J_eNE.json │ │ ├── LixFbPqM1ZZ-5JWo339FMfPCpD_6M85rVK8IVmmt8m8.json │ │ ├── M7oOLbk7TPBanLCS0pzkJSbV1CYoJabbsSDe_pCjhEo.json │ │ ├── MOoLwb8S881q3-gM4GK7DuCEoh5CZnF1tMIZG300X58.json │ │ ├── MPP4fxmSkvM2BVq8rumeT5yvDNu3QAT_kqpOlAq5s2E.json │ │ ├── M_wQsQbFGtGiEaH0uW2swBubAnFab3ZcCN8IYWZvVzo.json │ │ ├── Mk8XJgQPSOIsx_QX_XDPxdEG5NcKgO92q9i37uLZsrs.json │ │ ├── Ms9gCRdVwT9u8-ewYd6c-T0bet-n24n_q_Hn0-BlMow.json │ │ ├── Mv-TFhA3639O4JbKzoO3wo8LNPcFwA_vaaOLHfWRfSo.json │ │ ├── N3lqe8CUwPfChinYVV4OZZQNjtXc26JkOJyqgoKhq7E.json │ │ ├── N6-1fOVDkoeDwKyoNdLxCVoyy-c0EF178A_oQeEchs8.json │ │ ├── NBxewjnZAfekK0hKmwL_OpF1521JTeIpLk2a2TLDnTk.json │ │ ├── NE7AIvW60iQL_6aagNTSiaMpmLfAfRwbxau5FZLA10g.json │ │ ├── NEXnMz8Yuw-xfIPprKT2iwx5A1UjWwRHCH7XCpeXIPg.json │ │ ├── NPLj86idALmTczSq2vrZdTs0bjI-e-KI0j3EOWWpu54.json │ │ ├── NptjIrqZrQMSdLbXAGyQCr8audCzArV3EofsjRCqrQw.json │ │ ├── NvGRQrdis2HV22enpSpPqsb0M8s-pN_nl7eJtalZyC4.json │ │ ├── O6qlkPRgr7H3WLHjVov-CTm-q66Q4TuvhP6GC-c5ZjY.json │ │ ├── OILhne7UcvACtB4peA4osAjRMthaZZSW9OWhe3NpLBw.json │ │ ├── OIOqGvvuafD_5J9QzfxyPiNlnqzIcL96i6u4PTUeDmA.json │ │ ├── OaumRLT8oE6J8gqrQ9DrY_grMuSfWtai95VnqrX24hs.json │ │ ├── Osgzf9EDK9j7TMlqSJ_5Y1rzZgOA6qfR7ktiakLPk4A.json │ │ ├── P_pvvzlCIX7Yaiuv6zt1voLcn69gb9jAHPRhHaHjLng.json │ │ ├── PjeEg7GpKT8twlBkp8GHAsEqfMvmNd3RaAx-l0R_i2w.json │ │ ├── PySb_0NIjROmsIgwz4kMwC9MVmeY1MwuKdil0WeUzxw.json │ │ ├── QDBM2PowqCX0eUCKzgV-DgdzeDz5TXLKYS3HVXLyqoo.json │ │ ├── QDbVk-efwdVbHDGL1vZO3mQ3g65ol5RR-1wOvPLUkkE.json │ │ ├── QJlE99-614f6XzZ-7VctQjX9DYe5wnO21aHSgg1RhnA.json │ │ ├── QR75we1zHW-qO7dsI932kXX0YrAIyuC2XIDRhfmK-fE.json │ │ ├── R0Mhun4e-WmLLGxnJq4SDTRqyNvTDTKC-uXuol1s63A.json │ │ ├── R2h2i6y-KFxuHukxmHIjSncPZSiS4tpuzH0tD1NAooI.json │ │ ├── RU5mkM_3UrjRMffwgj7ovDMYxxjhfXvliozhpIqw0sA.json │ │ ├── S5Uv2W6erubrzYjzm9QHKij51XE-j-GFdYwcV2uPIAA.json │ │ ├── SBhaeMSTQm3rS6puYacdT-4wzlnkBlZ1agn6IW6Oyg8.json │ │ ├── SCN8yn0cQASui1DeV4mMYeQrRn8eXKr7Cp9ll7L3UfI.json │ │ ├── SHxtj5_gLdJMI-6CcspsDbFBuU_74df3I4-sAJkAr6w.json │ │ ├── SJXMM0tlXown7l3ffjhsiKf311FDTRa7QkKX8tgyEZ8.json │ │ ├── SWNkfm9ZZPCiYKFg6oIW_IgqJp5Ypbp-Fs9S7YgPm0c.json │ │ ├── TFX7m_Kf56rV6LNuyQ31NeVoDHJ3x0YqhIv4-IBQ-3s.json │ │ ├── TGdhJ01pPw49A0ZIaCCcYBnL-RPK_3KZH3cA6E9dVqc.json │ │ ├── TGp-18LYjSWQQ36gs5prU-vDgteOL79aywxXoDS-w0c.json │ │ ├── TUIdVI5yQH50laHvkxgAnTV6uuE2LXXH3pxIe6Q2S7I.json │ │ ├── TkN4QLdC4tu-_Po50RYwF33shyHcanHSe_BKpryK0JA.json │ │ ├── Tnf6b1F67AEV2r9Flj8ktSSHYoV8SeL9dFvHRkavlZo.json │ │ ├── UMk64563QZfxgZr_vKOTDrcp5XJNENF82Pji4a078YY.json │ │ ├── UYoJMT0QxMtB6ctUB-9iQlcx6fF8R3s8ahM4_iF4wiQ.json │ │ ├── UdCfZG1jBYUKgeLc13zjRxmQHO4_13B-NigE57jmJ5A.json │ │ ├── VUfaTp1eAzjnbxLR6xx_qQGVn_WOTna3rTolM8wY5BA.json │ │ ├── VuXQZjhUaZ2Hyi6Pl8_VTOu2mUWjoEemYb5TKXPFOS0.json │ │ ├── WE5eBi6hEq90HQvDjtJr-EmZATWJthgxh3HPPuQ7410.json │ │ ├── WsYJKhqhppBF6_eGbd0OACdu3LU6-CUuMcLeG3ST2qc.json │ │ ├── X9biR_ZA-rnpzk4gfLi0-pBSsjjT2l9Rk0VfYwf1WMo.json │ │ ├── Xjz72yVLd_Qzl8_GfSPqZA1MAkxxhjr2Lsf2tGCj_ZQ.json │ │ ├── XtDRu-1SyoRL21gpKcxWtxyksVwTF9kvW26hvQ_bPzE.json │ │ ├── XxgirNr3QGaJTKxPWqK9byYLj7SdbfZudKd9rbynWyM.json │ │ ├── Y0PLaTBQ73JXn_jHvldOKC3jdbqDbqTMkcW0x65_Jek.json │ │ ├── YIEEyYfNIRSjzm_gzv6l5CelyL4AOzKX9M4XPXRk2Yo.json │ │ ├── YfHEyNUGsOUiuqCgHV127cg2Z5Yap9tcQB1LH7tq9ZA.json │ │ ├── Ykh5TAI6koBN4UTQZ3GNIDr_uHNjlpHH9HsvtEkoWLA.json │ │ ├── YlalzFjBD8CgZxDlI6eNWE3PIIflHGzXyY9VzPPeCFo.json │ │ ├── YukfPvGxtYmXFF6wJjDiZcvqmH5YItxwsoLbMxWCVFg.json │ │ ├── Yzj2WZ-3q5vKkBJtrmGlVjZND7iqtzvMRafS0TnQiLE.json │ │ ├── Z5e9G5QMZ_scJQ62qoqUs2XSuhknTuuAIhhGmfg3Ye8.json │ │ ├── Z6IgRWClifhTSnomxJet2WLw8UUaslmqAi2nynj3Ke4.json │ │ ├── Z7gfizrPOypT4Pagg3oli5g8wA8pbKB0ZJnrw-FVyys.json │ │ ├── ZAk05et7CFN69E9NwET2mSRI0ISRigjMEjcy8kbO-Y8.json │ │ ├── ZC44Bxrx6AtNJYLwhvpALuINZRBXklme3tpeJbJ2rdw.json │ │ ├── ZEB62vqKvkPK2s_RmxgQ2IhafMxJ_TXCGswrrKLhYiQ.json │ │ ├── Znw-6H_ayGJBReeQm9z9WKulBH1ZzrOovdMsNPcIe_Y.json │ │ ├── _01J_SIBJ164H0EedSfQ8h0dMfqet66WKHwcOFQEsMc.json │ │ ├── _Hf1lw_E6Lyd-0PGkCRQaN10cdEx4M-hl9y-zWiDo8k.json │ │ ├── _QEE09XylMYgab9MYPvrrMy7v1jKWh0bGwqFvsBsO8s.json │ │ ├── _fLFu_BOzTEPdX35rqUruuyNxi7f_La8T1_JG7pIPd0.json │ │ ├── _u44CiJCcYiOrGffgZoQSmUrJe8CfYD7Nw0MdPX0tUw.json │ │ ├── aGqWG70qjD5P8spXLMtyXnYxS9k7Net-u932EyIFl28.json │ │ ├── aPxbCROotxwkdovWbQEhw18UNAzVy-AmjYwjo9lb5u4.json │ │ ├── b96k6w6qUyLSSWZlmupyBmav6XYMsdt0xTc2yIUZtOA.json │ │ ├── bnT7410oaZtnCdurp5jNgLKju9d_RRxhgggnxa5frMQ.json │ │ ├── bqhG__MMablNhNpiSp8nopeKDCzXy97jLuSBlsKk_u8.json │ │ ├── cIXdvNTNHJSmA6Rt5UgSNfMcGfvxDnYTa3a1ulS1SiY.json │ │ ├── cgU_TlXi5gJ7hShSBYsS4UVi-sLTtfFv1y1sy2nNhos.json │ │ ├── chdl-kIl4zG7VcJbKk0Q_5TeGwuH8Xp2YFPLRJJKTWw.json │ │ ├── dYBZuFcCEgGVcfXgS9tmeJsue_qwaCRO3Mg2OHCZh_A.json │ │ ├── daTnztzTMlA8Ras9XgQ05Fr9ZYwOg4-UDfjW875yQeQ.json │ │ ├── dn3p_BqD1gIcZQqdA8r6TucwycKGave22IqNjzKSHqI.json │ │ ├── drYsyF85HcvC7LM1hkzPPgTj3_zp3amcNVNobBmOxvc.json │ │ ├── duSw-WaGKAabAztyg2zkj6hjgaVaRGBrJuvZ5Gd2Pzk.json │ │ ├── dv28G4IsYul7liWrycsx4UKSYHA4sWUY6xFQzRPi4p4.json │ │ ├── eGhF0za2qN5WuadlVZ1iak1S5LxXswHRzIa3j_P-sUM.json │ │ ├── eJ2aSQ4nm-i8XAZW2pcRq6GoEjW9K8EBM6w7rLiuSHw.json │ │ ├── efqI0eDfp0OcYB-Ms5ELukIUr8-qtlX7Ica-ikhVZLU.json │ │ ├── ez-ItWkyBvBZ6J7_Mobrpqc9RTp6I2JBmkPDV_xCQVY.json │ │ ├── f3jE7NK419FZzwkx9VjTkrcX5FEgl2Ky3KSK0vH-wj0.json │ │ ├── f6MY8LMCwGbKZqXd4dkCROQK0qFMjS5OJAbZq-UhMGA.json │ │ ├── fBVa04p7MEL8BsPpyD_Pwv3uqBnBMVzG9YpXsCwZLtc.json │ │ ├── fkbFeVpiaAOtvt_-M9_U4HzbA8Elh5sa8xJXObrItYM.json │ │ ├── fx1EmDF4yioha3ms_VbddDQjl4bt6pBLpFCESuEIT6E.json │ │ ├── g19-Tkf4xuM9golcjx0mA1RkJUYocQJ3uYnH8MU1ePs.json │ │ ├── g6TUtTIi_rwlAHNuO6ACsQqIChWACugTPmZxaaJltDM.json │ │ ├── g8ZQaQTNUbg-jGeE61og18FrGqpFeZxjFDypGuhT7zI.json │ │ ├── gE-2fjp2ncJ0ZRg12UBfqnCBb75OtAOksEX3wGZguqw.json │ │ ├── gXd75eQL5Yzcn1ba51nORAvb6f_surSnz3xcNlLAxEQ.json │ │ ├── gbYMogbLVx3rOmm7K-o3nfGPKauLMLkGMSXcKkXW13Q.json │ │ ├── gyG1bGFt7qkMyUCrKiEfMzMzc3_3PooewqNeJpy-3Xk.json │ │ ├── h0MlFXsvtNQlFwgTh6y7-gjXEj0CbGECgz77EwQsca0.json │ │ ├── h0sgGEeQQcmSxg8uyiCOigWtI_r2ex-58nk1xso004c.json │ │ ├── h37LQjpChpTPMquvaxpfFeKt_7oAB5ElDzsdbCQ61n0.json │ │ ├── h7qIFbn0LoexuVwBcjKW7v5A65iQDQFYZUQjuowfIbk.json │ │ ├── hRTkBAH0k74HlmlWXTWmetXcIFXvM_Zrz3i1JXULZSM.json │ │ ├── hX6nohfkKZ_9ajziHJ6g5V5cIe1EX9H9rg7eScK988s.json │ │ ├── i9xaFWy0avtyCCxQdmWfGNDgh-PaJgIHkNK1pcJzmV8.json │ │ ├── iPb5JLzNajAzUNByVeIGSEPR0rzGOV5iIYjWpi99APQ.json │ │ ├── iRF6OnneKHJLhLMdCXpo6LsxVyWIGyklFEpu1bN3cyE.json │ │ ├── ijroBK9n_uKCS97V7iege_5Av2E-tm6ujquAazT_sBI.json │ │ ├── iuTLZ3xxGpaBCggV5xfUkJ6hMdUQKHw6f_vEn6sbmPo.json │ │ ├── j2IiBCd5Vf2Q8ciTVxeHbN6JgrXUFiv0xtoMTA_VtqQ.json │ │ ├── j3l4tvphmVOyVyFkNdS7ulmexBqPqEvsSJrBsjAFJXc.json │ │ ├── k6UueT0FWSSUbAAH4Uc1Oz6BivunVR0nSMTEILnB_dQ.json │ │ ├── kXu3jTQwgYsphIUFbaVGg9rNiil96fNjw0RBa6oPRtU.json │ │ ├── kcb41aN752OE__qEKDQAsbpzCUXMdlzI3clCBuxdVts.json │ │ ├── lFqBd1sEhgw1e_adedkee2hXP9beiNYbF625KV0vObU.json │ │ ├── lq4SrnweWCHnEhw_AV69gMLyBrPxYOmOdVdRIXkHwOg.json │ │ ├── lsuH-ITPI--6KSzhIFclsEAWOSoRQu-8tlnOSxj_Er0.json │ │ ├── luQlV_58e9qjm7EZpoO6f5Y1j349Q34UwTW1Lx9J_vE.json │ │ ├── luyHFFFOvjKPqi6nVrxngcHaQ3RwbMDMqVTLqPagHy0.json │ │ ├── m1Vv28IVJIuYiToBhxFVp3dA47je3L8WkzSjggAWXAo.json │ │ ├── m5zFPHB-2VjCgTLStD9TLZwD1CHfLELPKkVXFJGIptM.json │ │ ├── mGAMsTqBzau-MjTkMS5Z3g2_nUD-qQWeLtq6qlzkVl0.json │ │ ├── mJUxc7XyUp1HV_VRoi_54geidr26I9PUaiNL4msSNxk.json │ │ ├── mcFln0_6FIuLwE9GtMRzmdQts4QALV3dxQkXdgSdO2s.json │ │ ├── mvGgGlFTDJ0ukM6Bssd8G8B5PrEppr4Sg1_NTvzzV1U.json │ │ ├── n6TKbsqmGl2m3yH15RAe405vYZQ7DStlvYsHCHp1D0U.json │ │ ├── nXGMduBKL3mpsnFNPctfjEa9Z9zlMpdxcRrdkK95D80.json │ │ ├── nh2sbgjxu6MmU8yGV00w7X4q4XCJETeYE3zVtcj2ldk.json │ │ ├── o0nw6fU4gPL7Ae45x1BEQr5GkXSzZUrWnZrdIWqgx6w.json │ │ ├── oMP40Kgd9MxLfksmW_HAlGe8Rn1Px8tpF-NOHBfe9oo.json │ │ ├── oRvFwVpHVeo0iysSg2jFOAZKE-hKwbm6mGeZ6VUZmxk.json │ │ ├── oWWJcAiBCxhtWkIqwir4-vTvD3JFpHgZRNIpS-Xjzp4.json │ │ ├── ocUISm-0ItAS-N3Ydwe1swo4JmoVpRzWzngFt-pDwfo.json │ │ ├── opfZTSNdqaxXZUmaKROD2sd4QkyNDnZE3u1A95eSw4E.json │ │ ├── p9PJG5GkKZAxLyPJyDYw4_1CmhodHGGGqB785duwVwM.json │ │ ├── pVZkxPK8F9VFM5lDp0oTBThaw1RvmwG64wIHFChYJKA.json │ │ ├── piTZgtn2oBsWKt09CV8LqH3I3JaVdRjFwjOAJmC-Xp4.json │ │ ├── puLpw8OIIYCOatImKjpV5s0JWyKFq6bXFMz_qSf6mUA.json │ │ ├── qU2Gu35-s9wMH1N4g_zMYKCqIStYzBZmRx0XlcIpjyk.json │ │ ├── qX9u_AprdhyXAPGfh3C94x9AbxwWx9nJSs7g8FSwITM.json │ │ ├── qyMWe-VUOzHXkQviMhNS0wJI_27nvCgDY9iiKANk-lI.json │ │ ├── r8Yq7Lvx0FjFYyXBLn29UM5Evv4AtGLZ00LCtE_hC60.json │ │ ├── rC7TOXwflo7w9Ky0ljTYlzdbR0A3g2GVRbRJbIIuBfY.json │ │ ├── rRoy9jsUZ-Y10NIBksSD3P4HcVDfZheloItTTnc8_ZQ.json │ │ ├── rTY6dpq4KEhZtB-5moP1mWN1CtrTKurv7QSY8wAN758.json │ │ ├── rvbM0iB1HJ1YadedIDWjJ95J2XBHWwPAJD4VfpdQpxQ.json │ │ ├── sB51Zz1HRjpwrWFhW6ZE2E-n5hl3joqxPQgnMCLX4ZM.json │ │ ├── sfAY_3fQ41LahxW45rXfndEzeHD1eeWJgI9ZaM3slFU.json │ │ ├── snWRgSI3vlTOy3RRkuNckM-ws-5lpFiPMpYlLx_zPyk.json │ │ ├── tOIFTqEef5fQYPzhlkC2Um7rddT6MyrHPzUWXDv_mJc.json │ │ ├── tVLYd_62zbU-VPzQPOMHUo9TJR1dvSZ_pAHrC5Ubs8Q.json │ │ ├── ud3zGJZA5tPRoitGG1c6HWm9W7iRS4ZF3u6PbZ-blns.json │ │ ├── un3O49lggBX9raJKb6yuql_QTgZYWakWw5ydwUgUuXY.json │ │ ├── utAoO_xht393CbJ_7P_ektVYeEpkySWLM-066yJ5HyI.json │ │ ├── v2UplxDprWwaIwbB6z3KNEj3GjloqM8SinvVahZ1Wpk.json │ │ ├── vQ4zTq--De8FHdVnE7sYCemwiaqoZDS4emR_y6o6ZFA.json │ │ ├── vWeY4yJSJF9LXogRZb3Qr6QyLtEIL_8IY4bzJ2e7O5I.json │ │ ├── vaJOh_TzVSoEgbgDyKz6ABzd_wt2-ouBTe0gA1F3oMY.json │ │ ├── wFjsB5Y9GV61NqjCeyPCdkfXKUJOYccq8Bl9aljvwGc.json │ │ ├── wUhEm861foyWdxy0SI7CvXRcWuohItlX6Ydqo2NvtY8.json │ │ ├── weff0Y0_3-H7Vy1HrbpIzUmbTM1rZ8Lw0wgDGYmlsrM.json │ │ ├── wmZTwziFc_VlvYJz_4nyxYd3WxznBmsn5QQyRKDcWXU.json │ │ ├── wnOghJX4aZlbm7SDDb4UUX8_6GZYpYYx3GireamHwAc.json │ │ ├── x8KM69OVm6lzslK6ccAE-3EX5sW6CUHBZB-1hbc-J0A.json │ │ ├── xC7ski_qpcrRwRkxxHwPZd2lOX6Q---2qdQ4Rr-wxAM.json │ │ ├── xSkMzFablxREj8H_RwoMseAFk-TCwaLVIZMHqXh5DHY.json │ │ ├── xYpSRRpO8ejUGeohlRutNt9qUMgvuZJGkPGCyu1kSas.json │ │ ├── xavUY4L0L0nLNVvHiYfBqGL5iqUvdwQ-iY_nLLMB6J4.json │ │ ├── xiQYsaUMtlIq9DvTyucB4gu0BFC-qnFRIDclLv8wUT8.json │ │ ├── y-k4KjdSmwYmIugoObrtx5JWYczlEZBzwBHGMLqNP-0.json │ │ ├── y0PrXtX7PonEbIG3uEdu-k-McGeLLAjzUriUTCMTGcw.json │ │ ├── y6WPKL6MHzZp2ktvb1cETmNMBJyCEPlxdisKlroEBtc.json │ │ ├── ydvI6weQPIRj2hcNg4RPqzDpFOhqiTc9iDqQ-fUUl4I.json │ │ ├── z7Xvravldr4BhTI4KPOEWtG325_1ORaLQ4aUPOAe_us.json │ │ ├── zCOtSnXKGGhXgrWld31Ak9qQA_SjpOqB6n-9sF74rhk.json │ │ ├── zUFRBcWpPAUyMlojffeTnPgsLo6YgU6JaJgOR0mpBuM.json │ │ └── zwl046ia6I5VWLRYPJzBI70ypBQN2VlvLH9a_ndNKxA.json │ ├── genesis_wallets.csv │ ├── hash_list_1_0 │ └── not_found.html ├── http_iface_docs.md ├── http_post_unsigned_tx_docs.md ├── localnet_snapshot/ │ ├── ar_tx_blacklist/ │ │ ├── ar_tx_blacklist │ │ ├── ar_tx_blacklist_offsets │ │ ├── ar_tx_blacklist_pending_data │ │ ├── ar_tx_blacklist_pending_headers │ │ └── ar_tx_blacklist_pending_restore_headers │ ├── data_sync_state │ ├── header_sync_state │ ├── mempool │ ├── peers │ ├── rocksdb/ │ │ ├── account_tree_db/ │ │ │ ├── 000009.sst │ │ │ ├── CURRENT │ │ │ ├── IDENTITY │ │ │ ├── LOCK │ │ │ ├── MANIFEST-000004 │ │ │ └── OPTIONS-000007 │ │ ├── ar_storage_block_db/ │ │ │ ├── CURRENT │ │ │ ├── IDENTITY │ │ │ ├── LOCK │ │ │ ├── MANIFEST-000004 │ │ │ └── OPTIONS-000007 │ │ ├── ar_storage_tx_confirmation_db/ │ │ │ ├── CURRENT │ │ │ ├── IDENTITY │ │ │ ├── LOCK │ │ │ ├── MANIFEST-000004 │ │ │ └── OPTIONS-000007 │ │ ├── ar_storage_tx_db/ │ │ │ ├── CURRENT │ │ │ ├── IDENTITY │ │ │ ├── LOCK │ │ │ ├── MANIFEST-000004 │ │ │ └── OPTIONS-000007 │ │ ├── block_index_db/ │ │ │ ├── 000009.sst │ │ │ ├── 000011.sst │ │ │ ├── 000013.sst │ │ │ ├── 000015.sst │ │ │ ├── 000017.sst │ │ │ ├── 000019.sst │ │ │ ├── CURRENT │ │ │ ├── IDENTITY │ │ │ ├── LOCK │ │ │ ├── MANIFEST-000004 │ │ │ └── OPTIONS-000007 │ │ ├── block_time_history_db/ │ │ │ ├── CURRENT │ │ │ ├── IDENTITY │ │ │ ├── LOCK │ │ │ ├── MANIFEST-000004 │ │ │ └── OPTIONS-000007 │ │ └── reward_history_db/ │ │ ├── CURRENT │ │ ├── IDENTITY │ │ ├── LOCK │ │ ├── MANIFEST-000004 │ │ └── OPTIONS-000007 │ └── seed_txs/ │ ├── -B7wF8TF5AodemKM2UjeFySwA_-Q12Ai8z9FSqgIEyA.json │ ├── 0KMeq830vwvxUUM7RLCwE0ve4i0h_XHugbUTCkPNH-M.json │ ├── 1QGjyW1AEFlrFAs6VtUcmwOVOEZJjxaBR_z61W9mftI.json │ ├── 1VknqhhAXRQ6hzeZL-IMVBznTFCdiWcwlXhzpLKS8Zk.json │ ├── 1fzKf0Ygc-z3ejpZ1ZLOiNBYDRzViGRdPLtUqRS1nKY.json │ ├── 2dxNaIAvkAuL_N2qpTGSl7d7rU3Hu7d4l4IkYb9jgDU.json │ ├── 35wYULjhQBiTFh9u-PJz6ki0v7Zi1whk_AhowUt99Ac.json │ ├── 3DSCNJ5H9Hpyy7auT9qG5vom9jHBrCgjs48w_R6iSJg.json │ ├── 4QcodvSlgZnuz5uWGmBARsGUJ5XaYORIO5jYM1dTucI.json │ ├── 4tlIV1x4YRWtNMut11ox9SS-lWt3xIzcXnrBBbNxGYs.json │ ├── 5iK4mPnFqGdUxpiZmGtTbj7xoSC2una7sjsbUyZkOmM.json │ ├── 7M4KyVB4Wr-Le3Knb7JExgnsXTtG7718JIlhVBNstlE.json │ ├── 7fat_nqzDJCTfJMqyEpOcavt1cZNM-tfSzASJd0wrHo.json │ ├── 8CPVZq-zPdMQ2to1P91vl6XBXyL7sLH8-vNclnOCug8.json │ ├── 8TiSScQCv06oS9b8Tt5WBnf7sUVgzPAFGJ3Lq2bt8rY.json │ ├── 8qtH9T9jgYLHH-xi39w1OCNJykqew1O5qzrDkhAxN0U.json │ ├── 9hX3cS3Vjr6vAqJW3WtPN665NpLJegcxyaDZO2esElM.json │ ├── A5oMEDa7ZEm1kjPlXpwjuZd40rqP6eo3GobNGQY4HlY.json │ ├── Ace-njSprwHMwZaW5nuD0y1lKFoaafU3T8d7PLBeEIA.json │ ├── AoCuo7S7ewDIqhYheBX6AjShrbyTgIv6Fp1AwQgmGqg.json │ ├── BFfNP1eCeYIkLiWWAVvHNLzk1N2pxkOChFzQbdv1IiA.json │ ├── B_F4zIV1I5DXM-lR-Ko1tVUTTSmLCOYR7PoY8V8wFas.json │ ├── CCH2h2MzMP7WMh0Xf3GYL7zZDbU7E4CZPJWngp1qmDc.json │ ├── CQv9OVOCzntq2DRqNJ9j_WnWPcsniyGRXpt4i_a8Iy0.json │ ├── Cdcx7-UZJN324I9L47rrph9dIVy8RwfJa9mY7cJp9gk.json │ ├── D29DVKVYAe74sAj9NBQ351rI6SseWZ5MMsSedGtydS8.json │ ├── D_3jwPKLfcTpWcrDV1Q7k3D4sMtyfw7vd45D2C9pUNA.json │ ├── DlRct3GdPx7oYi3MSdmv16CgGWqhLJjbrKcIfU0E48I.json │ ├── EDt8sO0AWKJyNeUxd-U6ihy0rgRKUPjpfRGarEHlOCs.json │ ├── EayO1EsmOinnbi-NVa2V7cVraoI0TZ6xE5-sNU7fc94.json │ ├── F3c9tsVvmCiFNxK7hVEzROraVm477QdyQ8t6afBs5E4.json │ ├── FIrCkHY8jVkXcIkWYbMpuQSRYxavkOQ3wtUZPwMS1hM.json │ ├── FbeSRhJR00VPygimhm47VwirSeBATnlf240hv4a2G4E.json │ ├── G6JD1n-FXMSyTSryo0HoX7L3i7e4KEFK_ekDMEn9Bcg.json │ ├── HOMVwtocaJIRPdCeKgzorJZJq1jw_lVGz0pQ3POj7No.json │ ├── Hiu5cti9FefwcvT6xRCIoADUMkuDEm_6pZo92CK3fiw.json │ ├── HoEZ6sK46bzTg4Jzrfy1kHFzkFQgI2UMm9pm0qJS3as.json │ ├── Hs-Yj4ZE9ACfQIjzS8E-qvxSkQALsCIDHwcLEMnlz90.json │ ├── Hv0Q5APV6ARfDXDpxI-07R1YFSJAQpxTFh1Z8_nCk3U.json │ ├── I4ifBnOF6OQFautfisGFTVIn2NsrvqrdnQ-O7JOMouE.json │ ├── Ie-fxxzdBweiA0N1ZbzUqXhNI310uDUmaBc3ajlV6YY.json │ ├── IeEkQUBq3aE2CSbCF2Bk126lLaLZEYjUPJ_IO601tZg.json │ ├── IqJf6iISeiEj3oof9491-jQX4drDZ92VoFuZqNmoixk.json │ ├── JDG-HBsrHGDodot2clC3nNkRKV5cvuhRWZjCwVFHG_Q.json │ ├── JDS1sGkpC0ua7UGfpLEJSF-jXUnjAs2fa5V7y6rccdY.json │ ├── JNCYRy7XYR_20vvXEAwpT43ovKB23np9yE9cqQfsIJk.json │ ├── JUf6alhhrfuL22XuQ0yrZ6_xBFBIQqi85wRxv2nUCMs.json │ ├── JeP9HaxmjN-TcbCkhKDIQejkGdKTlOgp68O5cy_2GRc.json │ ├── JfTiLBj5Gxr1v7JwoNf9-7sRAiLOrg1AZ6kqwSkEpTc.json │ ├── Jo3rf0JPJR2kCHBqZG71xouWzuOSY-MXJufpfzFl7sE.json │ ├── K0w8hOO1oCu4sQipWDQGyEFvn6kAXO-M93neMZmRoUc.json │ ├── Lt7WJclVu4iYHqGHIYIBia3ABMnvmd5cW4ELIzUTfPE.json │ ├── MCCCpl9AGNAzy3WvM5lniJ88iC3-8NPiiWIsxcLZZxQ.json │ ├── MGDpPk3LsexVpFBF43-FIIvc0vyeEDroYcIONJ6abd0.json │ ├── MQD8-8yIZwNC4A006TC1FVZSyCDHeIAN6YpDbTiX2RU.json │ ├── MklsZ_cDz470C40UGZUJoVfMeVA89-r7SHxuomBeCPM.json │ ├── NBjbIMFIdd6jFhSZ20izEke9Ju8jMuvYl8O4bqe4wC4.json │ ├── NixeAD5Y_8sQfcrMBWkODQuoXgJouUBmQmQzwTzlaKU.json │ ├── OGA55Jyg2c-Jhkx5zDNyiDvbFZiRXF0S_JESMhWAWcs.json │ ├── P5KQo3QSWLzTLWkq3wgJlii11CEUSKMG_O2NMN6y_8c.json │ ├── PgxqlgdluUGnmGCal3dgB6PYCd5S7FtBpI0zKDc8-AY.json │ ├── QAQ-134At0mSPVrwBzTTUalyL_zqE_dMR_WggkZvF5E.json │ ├── QyQL1TYdwmguUIBjTV-shWqrwS6AwxhZ6lf7Rx-vxH0.json │ ├── R5utplMYRQsJwA9Y63cL3Na4mXtYzE4gWG6g6zwgEQE.json │ ├── RJzScDd1IYIVaVOMo8zV2sXaGE4ZtKxwO2ONPFK-ou4.json │ ├── Re-7lkSGlYP4SFddz0rrXIF0r4MVYZuagjkVpEm79bY.json │ ├── TMjINkrJIS3kbGu8bmcVt_34TaFN8lINFQPR_YGzHss.json │ ├── TNj-jk-KpKzz84xb1SRiKqyp8LNBnONxA9SIXs3XU7k.json │ ├── Tg9QZvUPJoAZKRkPhPgQrgnlTY6s9UxRSQaMw6shhOU.json │ ├── U2DZlRhnzhZrC7GsVNX0TxnXbHh03P3g-cU4fkHpiXA.json │ ├── U4o0STLxwOEf42F4DF22ooOoA5Ykdp5j_D1io-4w1lc.json │ ├── UH3C65dDo62rp5ciK3XzyhufE71xorL7r7MWVwdhavk.json │ ├── U_1PPd40n2grpuhkMJcMXPVuJhtaQoUWei63iN2rS7o.json │ ├── U_UF7e-hOd5uLIj10fYZVxQ5mXyZUxvMxhWWgAMaj0s.json │ ├── UbW68tRQtThl9ah8tJb-X_af5M8FHYARiGZFiPGk_90.json │ ├── VL10zUkfmLz5eRxQsZi0G5wsfo8mvyN3p82updP17D4.json │ ├── ViCjDXb4IEZcXBtlYvTm3HCB6cf4gDbrXCCdvVVgB1g.json │ ├── WJTACYoRG89VIpjzsIZLIy93U7HoC4OJyLy6WAlqv-0.json │ ├── WwgngUwH7mXX15tdbfcjG_9gX2t8N8wbbfW2N34b3dA.json │ ├── XrtNbxWFUGlP-SYqQm8aYawQJU6H7CSyHpRZM1iLdKg.json │ ├── YMnQwrWWVRmkMs0B41lz-VdixskatlPcY7j4r0iSLbQ.json │ ├── YcTBCg3mLRFByb1cnjrq9DzEBnnOT9jQtfYEE34QZ1M.json │ ├── Yk_dta-f75GShvyUvXq132pohaNpiQgerfIKJA0vdCw.json │ ├── Zu9CSLWidXEnbSAQVuXGk62eMrVAGQb4qHmrtQrOQIQ.json │ ├── _AiF52l4uqTkKOVpQw9hr6l6FCIdWs8PCFtFxEBOopU.json │ ├── _BN_07s59sawk5e9YcjHTX2qtYX9q7nCBYrlSWXoEsc.json │ ├── _KI9ocPARF5JjaDPIbtpqw2hj_qRonw-AERjWOs5ZYM.json │ ├── _gduN41u7Xxac_Gm3pBQI3icoKhOfiRV2TKhDnlyakU.json │ ├── bcbIZq2gy8ivQiUlEch7tjNoCcUMTTLhInMlj9P2P88.json │ ├── bhEMgsj4Yf5tdCDlwK9KpHmsgVLAsBDPOLtYeUDLw0M.json │ ├── cTmKy32Fbmlybl-WtbyuVFNhO11Efr4e_rGbzwAkPbs.json │ ├── clMyhm_qgwUJq68xb8Yf9EEaN3F7jgdqgKnKgjVRom8.json │ ├── d8CQoDBSrekoGZXqTatc7Y5JkHtNviX1D3JD-fxFDmU.json │ ├── dMTZgKHD-NkP3iM5RjFNhppiwfTlYd-Imi9aA6IK0So.json │ ├── eGYHUFl46laNa8v_WjdadvCkIErWqmx0hoia7PCSmSw.json │ ├── ehTWq16I6ixhFOVkpTKi7s4jgYjNzGJ5CoJW3xjHDTE.json │ ├── fAnOUj-jmlzPMtIN90ZvowG9VUmBtD36MZ8-tRP1Ut4.json │ ├── fr3nkF8AHXTcq9bT_b7x2X7Mun2A--Ssb7eyoKgQEwI.json │ ├── goAmthhGPdbYUqbAymyG_MjBUWVdS9OBm78mOoiITHo.json │ ├── hB1Hj0mfuh_x3ijhqkw1s3wdCh8qdPz_IMs0MPraVCk.json │ ├── hMfNPSlINViUDVnor18GgPs0Ut0i9XY7dwM9MVOL-2I.json │ ├── hPnpcoVcfRdkyUyhYSFNhsEcz7nQU0UU-fPSiRalDvw.json │ ├── hQvPcHPcBhyxv7GPx-E3bZWiNBhnCpFIDwWa3XBcYEU.json │ ├── hXNDNwQ6zA7aHAqvfBj_az9CovV37bJywdgPdb_ooIA.json │ ├── hxyn3yZ7-LCgKqfkCljyM7Hq7HJnmPnEKaXoybXJjHo.json │ ├── iWUFDucATDE8gjbsL-9KpOIW9l8Ipsh1wliv4e05xhg.json │ ├── ivWTdg5M9XqjP-Iu4C97r3qZQhotJgfF17g__7EH7VM.json │ ├── jOFeroI0Oz4TWcOx8mgv4iOZLv6ncbRXFRtJfqS4Pq0.json │ ├── jStDc8gP5lyHVSFIJiT_2RrXhT26GpAhNItDEje07_Y.json │ ├── kLP-8ILxdLSAQsrC6IwvfqQL6Loq2Q6lqOzwrnb6QoE.json │ ├── kVNsLH0kpIkFnBBGWxoIajVLSpvzmsKHpsATPAcR86Y.json │ ├── ks0ODNqrNY4CCDxJcrgRY324WykCeTiSH4Tmdi30I2E.json │ ├── lF7NSIz6CNf8WsMNQl8It8HbJem3MAllokozblLdU5A.json │ ├── ldoaD2NbG9VRhLOXddM1ypoAU3W5gR_zabUWZa4r6lM.json │ ├── lxtOUAEj-E1jb6J8uGCRlRgJDHJyFOu0O73jQHnAhpg.json │ ├── m1DnUoXf7wMtIGkkDZAALobw0GbGehfEMX_jNLvs3i8.json │ ├── n1GVITzrvCF95Vz7l6hH7fdYzebDDAJav5z4-9C7lB4.json │ ├── ntnx85KcYZ_ZhR6dL2A_p8foCmStgD-69ODoOUdipiA.json │ ├── o9ArU5IxydvpJo2iiPI-p4EGBwlpBlyFIfbnz8Qrg6c.json │ ├── oNZMr_dB-L40nSUj6Fc19-FGteHQu7ZaRZu9_mgM1BI.json │ ├── oO7raEVlJC6KhfK-UbNuppzbYPGdKWbh1e6rOymd_-o.json │ ├── oiYeEvWqOkaHzCSunznZ09U_tuHqP1UyZkRrKYHgNBw.json │ ├── ojgJyXT8qwRXj1hOVx2gbeJDT0xEOIye0o9EbfU2LRM.json │ ├── p0MVPvnv_lkWwfhSuSCgQ3NUj83shBffAx1NKPn4oy8.json │ ├── p4oyXU5C3T0ZycNhEwBZ0MbpV0j3voWV4mr__3fhOek.json │ ├── q8aw85uHTIPxuXcv2Awts4JVVHEMCl7J-61WfnvbYuQ.json │ ├── qDEFXj8hSgOuuqWM52y6pbUX1cyp7bS4qItfctgtVx8.json │ ├── qHvSpQXYh9RZmXIoIOexmDs0iQgjCubl6KSsgg7cDz8.json │ ├── rAARxLc7tOdjUXEdNmSpOtsJIAw0XS229YHO1KOeUqI.json │ ├── rTaanqa6Z5KxtBV4Kj2Fu2KKqAWlstE0JeUbZ3AuN3o.json │ ├── rY4cJeAtYkg3bnTdqk4Vb0ojEcfS76L4B-iqyvQZ2VA.json │ ├── rcc-B4OWqf0dbVY7Eq6q3pRDHLUjJ8tix8UeLQ4D68w.json │ ├── sEw-yqeADuF0n_M6jTPLrOgH3coalIQHYPLrwM87nmo.json │ ├── sMF6pWIkJFygBbR2IS10liEsjsLAMDja_E9_yUvUgeI.json │ ├── t81tluHdoePSxjq7qG-6TMqBKmQLYr5gupmfvW25Y_o.json │ ├── tn3FQGSVFt_TE5nyQNpuf_gnHdaWF85hZg1iE5hPQSE.json │ ├── uNiZ8TfAQ8GWjtbqhVi90qO3U5dl9afmKE1-KbHQYM4.json │ ├── uOqsnEjVGQCbtrKI7QbHYxbbLUdCKC-792SgZr5KUKM.json │ ├── ujON59jsellR3M8hq9unBPISOwRgEVUogdi3FG_pVMk.json │ ├── uoTzfoaN81h2_JyFkrvXTLFMnoSlWiuc9Yu1CmsFkH0.json │ ├── vDtQzZ9jl6r7yzczhoKhvzekCQYx-qskwYdzQO92eWo.json │ ├── vFP1U-4lk3GypDZFceLvRXjoadcB2FRKrcNQf9WjzpQ.json │ ├── vYnzbcbBQbPQB7GKrXzPlz1MuT9cfnNI_NBVajaTnPg.json │ ├── vvPtX1U0EZS9PMsQBVk3mjD9yS6EHIt0FXdKf2dOELw.json │ ├── wntmnG9yRP9aoioRDILKkmSZqdemR-XDCIKJS-wpRYw.json │ ├── xCUsF5aatMdiiUAkGjg29_TiQGKqXpbzoMsB0yI-Dd8.json │ ├── xK4fFG-PbnQx6EGmmj1A0JVWQ9Bg7q-FncaU7hHk9ds.json │ ├── xaB3eS6qbtKSrfFACMcYpgxWRtaJfT1kmOVpyaE45tI.json │ ├── y9wJkLq6Q0hKSDD67ilFqtMMatw9qpsKM9W2uy2Rfjc.json │ ├── ycjvsn3A9cUMjnbDaSUpf1HRQd4duP9AL1YVwSjwuAQ.json │ ├── yo8VtPVXWBpTqLbLL-ZeOmZTW2HTqTzsf9RPzgHM-bQ.json │ ├── zNae10gPNkFt5aRVaSL2eSgxZiRDG79B9oDIeYqyzDY.json │ └── zavm_CqSq0KuWfc-E0JccEyrrQzjigxt7yuW1ceYjE0.json ├── nix/ │ ├── README.md │ ├── arweave.nix │ ├── generate-config.nix │ ├── module.nix │ └── options.nix ├── notebooks/ │ ├── README.md │ ├── autoredenomination_localnet.ipynb │ ├── pricing_transition_localnet.ipynb │ └── test.ipynb ├── priv/ │ └── templates/ │ ├── README.md │ ├── extended_bin │ └── vm_args ├── rebar.config ├── rebar3 ├── release_notes/ │ ├── N.2.9.5/ │ │ └── README.md │ ├── N.2.9.5-alpha5/ │ │ └── README.md │ ├── N.2.9.5-alpha6/ │ │ └── README.md │ ├── N.2.9.5.1/ │ │ └── README.md │ └── README.md ├── scripts/ │ ├── full_test_modules.txt │ ├── github_workflow.sh │ ├── ierl_kernel.sh │ ├── list_test_modules.sh │ ├── run_notebook.sh │ ├── run_notebook_headless.sh │ ├── setup_notebook_env.sh │ ├── surefire_to_html.py │ ├── system_info.sh │ └── testnet/ │ └── benchmark └── testnet/ ├── assert_testnet.sh ├── backup_data.sh ├── clear_data.sh ├── config/ │ ├── testnet-1.json │ ├── testnet-2.json │ ├── testnet-3.json │ ├── testnet-4.json │ ├── testnet-5.json │ └── testnet-6.json ├── rebuild_mainnet.sh ├── rebuild_testnet.sh ├── restore_data.sh ├── start_mainnet.sh └── start_testnet.sh ================================================ FILE CONTENTS ================================================ ================================================ FILE: .cursor/BUGBOT.md ================================================ Always suggest one or two ways to patch the issues you are highlighting. ================================================ FILE: .cursor/rules/build.mdc ================================================ --- name: build description: "Arweave build commands and structure" alwaysApply: true --- # Arweave Project Rules To better reason about the Arweave protocol, **read `.cursor/rules/protocol.mdc`** in this directory. ## Build Commands - To compile the project: `./ar-rebar3 prod compile` ## Project Structure - This is an Erlang/OTP project using rebar3 - It contains multiple apps in the `apps` directory - The main app is `arweave` - Source files: `apps//src/` - Test files: `apps//test/` - Include files: `apps//include/` ## Running Tests - To run tests for a specific module: `./bin/test ` - Example: `./bin/test ar_mining_io_tests` - To run tests for a specific test: `./bin/test :` - Example: `./bin/test ar_mining_io_tests:read_recall_range_test_` - Multiple modules and/or tests can be run together - Example: `./bin/test ar_mining_io_tests ar_data_sync_root_tests:data_roots_syncs_from_peer_test_` ## Testing Notes - Test profile uses smaller values for constants like `?PARTITION_SIZE` and `?REPLICA_2_9_ENTROPY_COUNT` (defined in rebar.config) ================================================ FILE: .cursor/rules/protocol.mdc ================================================ # Arweave protocol technical details and caveats Recall bytes always point to chunks, not sub-chunks. We always consider all sub-chunks of every chunk durining mining. Nonce rem SubChunkCount determines which sub-chunk goes into the proof. ================================================ FILE: .gitattributes ================================================ localnet_snapshot/** filter=lfs diff=lfs merge=lfs -text ================================================ FILE: .github/workflows/e2e-test.yml ================================================ name: "Arweave e2e Tests Suites" on: workflow_dispatch: schedule: - cron: "0 13 * * *" jobs: build: runs-on: [self-hosted, ubuntu, amd64, build-runner] steps: - uses: actions/checkout@v4 with: submodules: "recursive" lfs: true # only arweave dependencies are being cached, # those are not updated everyday and this is # unecessary to fetch them everytime. - uses: actions/cache@v4 id: cache with: path: | _build/default/lib/accept _build/default/lib/b64fast _build/default/lib/cowboy _build/default/lib/cowlib _build/default/lib/gun _build/default/lib/jiffy _build/default/lib/prometheus _build/default/lib/prometheus_cowboy _build/default/lib/prometheus_httpd _build/default/lib/prometheus_process_collector _build/default/lib/quantile_estimator _build/default/lib/ranch _build/default/lib/.rebar3 _build/default/lib/recon _build/default/lib/rocksdb _build/default/plugins/ _build/default/plugins/aleppo _build/default/plugins/geas _build/default/plugins/geas_rebar3 _build/default/plugins/hex_core _build/default/plugins/katana_code _build/default/plugins/pc _build/default/plugins/.rebar3 _build/default/plugins/rebar3_archive_plugin _build/default/plugins/rebar3_elvis_plugin _build/default/plugins/rebar3_hex _build/default/plugins/samovar _build/default/plugins/verl _build/default/plugins/zipper key: deps-cache-${{ hashFiles('rebar.lock') }} restore-keys: | deps-cache-${{ hashFiles('rebar.lock') }} - name: Get dependencies if: steps.cache.outputs.cache-hit != 'true' run: ./ar-rebar3 test get-deps - uses: actions/cache@v4 if: steps.cache.outputs.cache-hit != 'true' with: path: | _build/default/lib/accept _build/default/lib/b64fast _build/default/lib/cowboy _build/default/lib/cowlib _build/default/lib/gun _build/default/lib/jiffy _build/default/lib/prometheus _build/default/lib/prometheus_cowboy _build/default/lib/prometheus_httpd _build/default/lib/prometheus_process_collector _build/default/lib/quantile_estimator _build/default/lib/ranch _build/default/lib/.rebar3 _build/default/lib/recon _build/default/lib/rocksdb _build/default/plugins/ _build/default/plugins/aleppo _build/default/plugins/geas _build/default/plugins/geas_rebar3 _build/default/plugins/hex_core _build/default/plugins/katana_code _build/default/plugins/pc _build/default/plugins/.rebar3 _build/default/plugins/rebar3_archive_plugin _build/default/plugins/rebar3_elvis_plugin _build/default/plugins/rebar3_hex _build/default/plugins/samovar _build/default/plugins/verl _build/default/plugins/zipper key: deps-cache-${{ hashFiles('rebar.lock') }} - name: Compile arweave release run: ./ar-rebar3 default release - name: Build arweave test sources run: ./ar-rebar3 test compile - name: Build arweave e2e test sources run: ./ar-rebar3 e2e compile # some artifacts are compiled and only available # in arweave directy (libraries) - name: Prepare artifacts run: | chmod -R u+w ./_build tar czfp _build.tar.gz ./_build ./bin/arweave tar czfp apps.tar.gz ./apps # to avoid reusing artifacts from someone else # and generating issues, an unique artifact is # produced using github checksum. - name: upload artifacts uses: actions/upload-artifact@v4 with: name: build-${{ github.sha }} if-no-files-found: error include-hidden-files: true retention-days: 7 overwrite: true path: | _build.tar.gz apps.tar.gz e2e-tests: needs: build runs-on: [self-hosted, ubuntu, amd64, build-runner] strategy: max-parallel: 4 matrix: core_test_mod: [ ar_sync_pack_mine_tests, ar_repack_mine_tests, ar_repack_in_place_mine_tests ] steps: - uses: actions/checkout@v4 with: submodules: "recursive" lfs: true - name: Download artifact uses: actions/download-artifact@v4 with: name: build-${{ github.sha }} # Both artifacts (_build and apps dir) are # required. - name: Extract artifact run: | tar zxfp _build.tar.gz tar zxfp apps.tar.gz - name: ${{ matrix.core_test_mod }}.erl id: tests run: bash scripts/github_workflow.sh "e2e" "${{ matrix.core_test_mod }}" # this part of the job produces test artifacts from logs # generated by the tests. It also collect dumps and the files # present in .tmp (temporary arweave data store) - name: upload artifacts in case of failure uses: actions/upload-artifact@v4 if: always() && failure() with: name: "logs-${{ matrix.core_test_mod }}-${{ github.run_attempt }}-${{ job.status }}-${{ runner.name }}-${{ github.sha }}" retention-days: 7 overwrite: true include-hidden-files: true path: | ./logs *.out *.dump ================================================ FILE: .github/workflows/gitstamp.yaml ================================================ # See: https://github.com/weavery/gitstamp-action --- name: Gitstamp on: push: branches: - 'master' - 'release/**' - 'releases/**' pull_request_target: types: [closed] jobs: gitstamp: runs-on: [self-hosted, ubuntu, amd64, build-runner] name: Timestamp commit with Gitstamp steps: - name: Clone repository uses: actions/checkout@v2 - name: Submit Gitstamp transaction uses: weavery/gitstamp-action@v1 with: wallet-key: ${{ secrets.GITSTAMP_KEYFILE }} commit-link: true ================================================ FILE: .github/workflows/release.yml ================================================ ###################################################################### # All releases are generated using this workflow. The goal is to # make our life easier when releasing a new version of Arweave. # Instead of doing the process manually, some builders will be used # to produce the necessary tarballs, create the checksums, packs them # together and finally create the release on github side with a # message from the repository (in release_notes directory). ###################################################################### name: Release on: push: tags: - N.** workflow_dispatch: inputs: tag: required: true type: string make_latest: required: false default: true type: boolean prerelease: required: false default: true type: boolean draft: required: false default: false type: boolean jobs: # prepare ubuntu 22.04 release (jammy) on amd64 arch ubuntu-jammy-release: uses: ./.github/workflows/x-release-linux.yml secrets: inherit with: os_arch: amd64 os_release: jammy os_name: ubuntu tag: ${{ github.ref_name || inputs.tag }} # prepare ubuntu 24.04 release (noble) on amd64 arch ubuntu-noble-release: uses: ./.github/workflows/x-release-linux.yml secrets: inherit with: os_arch: amd64 os_release: noble os_name: ubuntu tag: ${{ github.ref_name || inputs.tag }} # prepare rocky 9 release on amd64 arch rockylinux-9-release: uses: ./.github/workflows/x-release-linux.yml secrets: inherit with: os_arch: x86_64 os_release: 9 os_name: rockylinux tag: ${{ github.ref_name || inputs.tag }} # prepare macos release on arm64 arch macos-release: uses: ./.github/workflows/x-release-macos.yml secrets: inherit with: tag: ${{ github.ref_name || inputs.tag }} # craft the release using the previous builds release: needs: - ubuntu-jammy-release - ubuntu-noble-release - rockylinux-9-release - macos-release permissions: contents: write packages: write runs-on: - self-hosted - release-runner - amd64 steps: # a new checkout is required to have the release # message from releases_notes directory. - uses: actions/checkout@v4 with: submodules: "recursive" lfs: true # let fetch ubuntu jammy tarball - name: Download Ubuntu Jammy Release uses: actions/download-artifact@v5 with: name: arweave-ubuntu-jammy-amd64 path: ./arweave-ubuntu-jammy-amd64 # let fetch ubuntu noble tarball - name: Download Ubuntu Noble Release uses: actions/download-artifact@v5 with: name: arweave-ubuntu-noble-amd64 path: ./arweave-ubuntu-noble-amd64 # let fetch rocky 9 tarball - name: Download Rockylinux 9 Release uses: actions/download-artifact@v5 with: name: arweave-rockylinux-9-x86_64 path: ./arweave-rockylinux-9-x86_64 # let fetch macos tarball - name: Download MacOS Release uses: actions/download-artifact@v5 with: name: arweave-macos-26-arm64 path: ./arweave-macos-26-arm64 # now this part is a bit tricky. it will rename # all tarball using the tag pushed. To avoid # some weird behaviors, the name is sanitized # by removing few symbols and replacing them # with a dash (-). - name: Prepare Release Tarballs and Checksums run: | #!/bin/sh set -eux # define variables releasedir="$(pwd)/_releases" ref=${{ github.ref_name }} release_name=$(echo ${ref} | sed -Ee 's!(/|:|@|\[|\]|\(|\)|\~)!-!g' -e 's!^N\.!!') # prepare release directory mkdir -p "${releasedir}" # prepare a release using a directory and a postfix name _prepare_release() { local dir=${1} local name=${2} local checksum cd ${dir} # rename arweave tarball cp arweave*.tar.gz ${releasedir}/arweave-${release_name}.${name}.tar.gz # prepare checksum file checksum=$(cat arweave*.tar.gz.SHA256 | awk '{ print $1 }') echo "${checksum} arweave-${release_name}.${name}.tar.gz" \ > ${releasedir}/arweave-${release_name}.${name}.tar.gz.SHA256 cd .. } # prepare releases for each distribution _prepare_release arweave-ubuntu-jammy-amd64 ubuntu22.x86_64 _prepare_release arweave-ubuntu-noble-amd64 ubuntu24.x86_64 _prepare_release arweave-rockylinux-9-x86_64 rocky9.x86_64 _prepare_release arweave-macos-26-arm64 macos.arm64 # check if the checksums are correct cd ${releasedir} cat *.SHA256 > checksums.txt cat *.SHA256 > SHA256 sha256sum -c checksums.txt sha256sum -c SHA256 cd .. # Release the version based on the new tag - name: Release the new version on github uses: softprops/action-gh-release@v2 if: startsWith(github.ref, 'refs/tags/') with: name: Release ${{ github.ref_name }} body_path: release_notes/${{ github.ref_name }}/README.md files: | _releases/*.tar.gz _releases/checksums.txt _releases/SHA256 LICENSE.md make_latest: ${{ inputs.make_latest || true }} prerelease: ${{ inputs.prerelease || true }} draft: ${{ inputs.draft || false }} ================================================ FILE: .github/workflows/test-amd64-ubuntu-22.04.yml ================================================ ###################################################################### # Test suite for Ubuntu 22.04. This is the official OS supported by # arweave team. The complete test suite should be executed. ###################################################################### name: "Test on arch:amd64 distribution:ubuntu release:22.04" on: push: branches: ["**"] workflow_dispatch: jobs: build: uses: ./.github/workflows/x-build.yml secrets: inherit test-canary: needs: [build] uses: ./.github/workflows/x-test-canary.yml secrets: inherit common-test: needs: [test-canary] uses: ./.github/workflows/x-common-test.yml secrets: inherit test: needs: [test-canary] uses: ./.github/workflows/x-test-full.yml secrets: inherit ================================================ FILE: .github/workflows/test-arm64-macos-26.yml ================================================ ###################################################################### # Test suite for MacOS. The support of MacOS is mainly for the VDF # part, it should not be required to do the full test suite. ###################################################################### name: "Test on arch:arm64 distribution:macos release:26" on: push: branches: ["**"] workflow_dispatch: jobs: build: uses: ./.github/workflows/x-build.yml secrets: inherit with: os_arch: arm64 os_name: macos os_release: 26 test-canary: needs: [build] uses: ./.github/workflows/x-test-canary.yml secrets: inherit with: os_arch: arm64 os_name: macos os_release: 26 common-test: needs: [test-canary] uses: ./.github/workflows/x-common-test.yml secrets: inherit with: os_arch: arm64 os_name: macos os_release: 26 test: needs: [test-canary] uses: ./.github/workflows/x-test-vdf.yml secrets: inherit with: os_arch: arm64 os_name: macos os_release: 26 ================================================ FILE: .github/workflows/x-build.yml ================================================ ###################################################################### # Common way to build arweave. This template should be compatible with # any kind of systems but it expects the images/vm/servers used to # compile already have every requirement installed. ###################################################################### name: "arweave-build-template" on: workflow_call: inputs: os_arch: description: "operating system architecture" default: "amd64" type: "string" os_name: description: "operating system name" default: "ubuntu" type: "string" os_release: description: "operating system release" default: "22.04" type: "string" env: cache_key: ${{ inputs.os_arch }}-${{ inputs.os_name }}-${{ inputs.os_release }} jobs: build-template: runs-on: - self-hosted - build-runner - ${{ inputs.os_arch }} - ${{ inputs.os_name }} - ${{ inputs.os_release }} steps: # On standalone runners, it is always required to cleanup # first. By default executed on all runners, to start with # clean environment. - name: cleanup if: always() run: | rm -rf "${GITHUB_WORKSPACE}" && mkdir -p "${GITHUB_WORKSPACE}" # checkout arweave repository and extract it in # working directory. - name: checkout arweave repository uses: actions/checkout@v4 with: submodules: "recursive" lfs: true # before doing anything, we would like to know what kind of # software and libraries are present on the system for debugging # purpose - name: get software and libraries information run: | sh ./scripts/system_info.sh > _version.yaml cat _version.yaml # only arweave dependencies are being cached, # those are not updated everyday and this is # unecessary to fetch them everytime. - name: extract cache uses: actions/cache@v4 id: cache with: path: | _build/default/lib/accept _build/default/lib/b64fast _build/default/lib/cowboy _build/default/lib/cowlib _build/default/lib/gun _build/default/lib/jiffy _build/default/lib/prometheus _build/default/lib/prometheus_cowboy _build/default/lib/prometheus_httpd _build/default/lib/prometheus_process_collector _build/default/lib/quantile_estimator _build/default/lib/ranch _build/default/lib/.rebar3 _build/default/lib/recon _build/default/lib/rocksdb _build/default/plugins/ _build/default/plugins/aleppo _build/default/plugins/geas _build/default/plugins/geas_rebar3 _build/default/plugins/hex_core _build/default/plugins/katana_code _build/default/plugins/pc _build/default/plugins/.rebar3 _build/default/plugins/rebar3_archive_plugin _build/default/plugins/rebar3_elvis_plugin _build/default/plugins/rebar3_hex _build/default/plugins/samovar _build/default/plugins/verl _build/default/plugins/zipper key: deps-cache-${{ hashFiles('rebar.lock') }}-${{ env.cache_key }} restore-keys: | deps-cache-${{ hashFiles('rebar.lock') }}-${{ env.cache_key }} - name: Get dependencies if: steps.cache.outputs.cache-hit != 'true' run: ./ar-rebar3 test get-deps - uses: actions/cache@v4 if: steps.cache.outputs.cache-hit != 'true' with: path: | _build/default/lib/accept _build/default/lib/b64fast _build/default/lib/cowboy _build/default/lib/cowlib _build/default/lib/gun _build/default/lib/jiffy _build/default/lib/prometheus _build/default/lib/prometheus_cowboy _build/default/lib/prometheus_httpd _build/default/lib/prometheus_process_collector _build/default/lib/quantile_estimator _build/default/lib/ranch _build/default/lib/.rebar3 _build/default/lib/recon _build/default/lib/rocksdb _build/default/plugins/ _build/default/plugins/aleppo _build/default/plugins/geas _build/default/plugins/geas_rebar3 _build/default/plugins/hex_core _build/default/plugins/katana_code _build/default/plugins/pc _build/default/plugins/.rebar3 _build/default/plugins/rebar3_archive_plugin _build/default/plugins/rebar3_elvis_plugin _build/default/plugins/rebar3_hex _build/default/plugins/samovar _build/default/plugins/verl _build/default/plugins/zipper key: deps-cache-${{ hashFiles('rebar.lock') }}-${{ env.cache_key }} - name: Compile arweave release run: ./ar-rebar3 default release - name: Build arweave test sources run: ./ar-rebar3 test release # some artifacts are compiled and only available # in arweave directory (libraries). - name: Prepare artifacts run: | chmod -R u+w ./_build # rebar is using a lot of absolute symlink, # this can generate issue on standalone worker # to avoid this problem, links must be # deferenced with -h flag. tar czfhp _build.tar.gz ./_build ./bin/arweave tar czfhp apps.tar.gz ./apps # to avoid reusing artifacts from someone else # and generating issues, an unique artifact is # produced using github checksum. - name: upload artifacts uses: actions/upload-artifact@v4 with: name: build-${{ github.sha }}-${{ env.cache_key }} if-no-files-found: error retention-days: 1 overwrite: true path: | _version.yaml _build.tar.gz apps.tar.gz ================================================ FILE: .github/workflows/x-common-test.yml ================================================ ###################################################################### # Full Arweave Test Suite. Mostly used on Linux like systems. ###################################################################### name: "arweave-common-test-suite" on: workflow_call: inputs: os_arch: description: "operating system architecture" default: "amd64" type: "string" os_name: description: "operating system name" default: "ubuntu" type: "string" os_release: description: "operating system release" default: "22.04" type: "string" env: cache_key: ${{ inputs.os_arch }}-${{ inputs.os_name }}-${{ inputs.os_release }} #################################################################### # Test modules (note: that _tests are implicitly run by a matching # prefix name #################################################################### jobs: common-test-suite: runs-on: - self-hosted - build-runner - ${{ inputs.os_arch }} - ${{ inputs.os_name }} - ${{ inputs.os_release }} steps: - name: cleanup if: always() run: | rm -rf "${GITHUB_WORKSPACE}" && mkdir -p "${GITHUB_WORKSPACE}" - uses: actions/checkout@v4 with: submodules: "recursive" lfs: true - name: Download artifact uses: actions/download-artifact@v4 with: name: build-${{ github.sha }}-${{ env.cache_key }} # Both artifacts (_build and apps dir) are # required. - name: Extract artifact run: | tar zxfp _build.tar.gz tar zxfp apps.tar.gz # This is a temporary fix to prevent test failures when # executing the test suite on a system like MacOS. The libraries # must be rebuilt due to CMake full path usage, if CMake is # re-used on another user (then with a different full path), it # will fail. Here, we force to clean all external libraries # objects before executing the test. - name: temporary external libraries fix run: make -C apps/arweave/lib clean # execute common test suite with test profile and coverage # enabled. - name: run common test suite id: common-tests run: | ./rebar3 as test ct \ --cover \ --verbose \ --logdir _ct_logs # execute eunit in standalone on only a small subset of # application available. Because arweave main application is # using eunit for its test suite in a specific way, testing this # application will crash the test suite. - name: run eunit test suite id: eunit run: | ./rebar3 as test eunit \ --cover \ --application arweave_config # generate coverage report, it will be stored in # _build/test/cover directory - name: run cover id: cover run: | ./rebar3 as test cover \ --verbose # generate markdown/html like report to have a quick view of # what happened during the tests - name: generate workflow summary run: |- if test -f ./_build/test/cover/index.html then echo "# Coverage Summary" >> $GITHUB_STEP_SUMMARY echo >> $GITHUB_STEP_SUMMARY cat ./_build/test/cover/index.html >> $GITHUB_STEP_SUMMARY fi if test -d ./_build/test/surefire then echo "# Eunit Report" >> $GITHUB_STEP_SUMMARY echo >> $GITHUB_STEP_SUMMARY for f in ./_build/test/surefire/*.xml do ./scripts/surefire_to_html.py "${f}" >> $GITHUB_STEP_SUMMARY done fi # upload test coverage report as artifact. - name: upload coverage report uses: actions/upload-artifact@v4 with: name: "coverage-common_test-${{ github.run_attempt }}-${{ job.status }}-${{ runner.name }}-${{ github.sha }}" retention-days: 7 overwrite: true include-hidden-files: true path: | ./_build/test/cover ./_build/test/surefire # this part of the job produces test artifacts from logs # generated by the tests. It also collect dumps and the files # present in .tmp (temporary arweave data store) - name: upload artifacts in case of failure uses: actions/upload-artifact@v4 if: always() with: name: "logs-common_test-${{ github.run_attempt }}-${{ job.status }}-${{ runner.name }}-${{ github.sha }}" retention-days: 7 overwrite: true include-hidden-files: true path: | ./_ct_logs ./logs ================================================ FILE: .github/workflows/x-release-linux.yml ================================================ ###################################################################### # Release template for Linux based systems, including at this time # Ubuntu and Rockylinux. ###################################################################### name: "arweave-release-linux-template" on: workflow_call: inputs: tag: required: true type: string os_arch: description: "operating system architecture" default: "amd64" type: "string" os_name: description: "operating system name" default: "ubuntu" type: "string" os_release: description: "operating system release" default: "22.04" type: "string" jobs: linux-release: runs-on: - self-hosted - release-runner - ${{ inputs.os_arch }} - ${{ inputs.os_name }} - ${{ inputs.os_release }} steps: - name: cleanup if: always() run: | rm -rf "${GITHUB_WORKSPACE}" && mkdir -p "${GITHUB_WORKSPACE}" - uses: actions/checkout@v4 with: submodules: "recursive" lfs: true - name: Create Arweave Release run: | set -eux ./rebar3 as prod tar f=$(ls _build/prod/rel/arweave/arweave-*.tar.gz) sha256sum ${f} > ${f}.SHA256 - name: Prepare tarball tests run: | set -eux f=$(ls ${PWD}/_build/prod/rel/arweave/arweave-*.tar.gz) mkdir _extract cd _extract tar zxf "${f}" - name: Test vdf openssl run: | cd _extract ./bin/arweave benchmark vdf mode openssl verify true 2>&1 \ | grep -E "^VDF step computed in .* seconds." - name: Test vdf fused run: | cd _extract ./bin/arweave benchmark vdf mode openssl verify true 2>&1 \ | grep -E "^VDF step computed in .* seconds." - name: Test create rsa key run: | cd _extract mkdir _rsa set +e (./bin/arweave wallet create rsa _rsa 2>&1) >/dev/null set -e test 1 = $(ls _rsa/wallets/arweave_keyfile*.json | wc -l) - name: Test create ecdsa run: | cd _extract mkdir _ecdsa set +e (./bin/arweave wallet create ecdsa _ecdsa 2>&1) >/dev/null set -e test 1 = $(ls _ecdsa/wallets/arweave_keyfile*.json | wc -l) - name: Test arweave execution run: | cd _extract ./bin/arweave foreground 2>&1 \ | grep -Ee '^Usage: arweave-server' \ -e '^Compatible with network: arweave.N.1' - name: Cleanup tests if: always() run: | rm -rf _extract - name: Upload Arweave artifacts uses: actions/upload-artifact@v4 with: name: arweave-${{ inputs.os_name }}-${{ inputs.os_release }}-${{ inputs.os_arch }} if-no-files-found: error path: | _build/prod/rel/arweave/arweave-*.tar.gz _build/prod/rel/arweave/arweave-*.tar.gz.SHA256 ================================================ FILE: .github/workflows/x-release-macos.yml ================================================ ###################################################################### # release template for MacOS. ###################################################################### name: "arweave-release-macos-template" on: workflow_call: inputs: tag: required: true type: string os_arch: description: "operating system architecture" default: "arm64" type: "string" os_name: description: "operating system name" default: "macos" type: "string" os_release: description: "operating system release" default: "26" type: "string" jobs: macos-release: runs-on: - self-hosted - release-runner - ${{ inputs.os_arch }} - ${{ inputs.os_name }} steps: - name: cleanup if: always() run: | rm -rf "${GITHUB_WORKSPACE}" && mkdir -p "${GITHUB_WORKSPACE}" - uses: actions/checkout@v4 with: submodules: "recursive" lfs: true - name: Create Arweave Release run: | set -eux ./rebar3 as prod tar f=$(ls _build/prod/rel/arweave/arweave-*.tar.gz) sha256sum ${f} > ${f}.SHA256 - name: Prepare tarball tests run: | set -eux f=$(ls ${PWD}/_build/prod/rel/arweave/arweave-*.tar.gz) mkdir _extract cd _extract tar zxf "${f}" - name: Test vdf openssl run: | cd _extract ./bin/arweave benchmark vdf mode openssl verify true 2>&1 \ | grep -E "^VDF step computed in .* seconds." - name: Test vdf fused run: | cd _extract ./bin/arweave benchmark vdf mode openssl verify true 2>&1 \ | grep -E "^VDF step computed in .* seconds." 2>&1 \ - name: Test vdf hiopt_m4 run: | cd _extract ./bin/arweave benchmark vdf mode openssl hiopt_m4 true 2>&1 \ | grep -E "^VDF step computed in .* seconds." - name: Test create rsa key run: | cd _extract mkdir _rsa set +e (./bin/arweave wallet create rsa _rsa 2>&1) >/dev/null set -e test 1 = $(ls _rsa/wallets/arweave_keyfile*.json | wc -l) - name: Test create ecdsa run: | cd _extract mkdir _ecdsa set +e (./bin/arweave wallet create ecdsa _ecdsa 2>&1) >/dev/null set -e test 1 = $(ls _ecdsa/wallets/arweave_keyfile*.json | wc -l) - name: Test arweave execution run: | cd _extract ./bin/arweave foreground 2>&1 \ | grep -Ee '^Usage: arweave-server' \ -e '^Compatible with network: arweave.N.1' - name: Cleanup tests if: always() run: | rm -rf _extract - name: Upload Arweave artifacts uses: actions/upload-artifact@v4 with: name: arweave-${{ inputs.os_name }}-${{ inputs.os_release }}-${{ inputs.os_arch }} if-no-files-found: error path: | _build/prod/rel/arweave/arweave-*.tar.gz _build/prod/rel/arweave/arweave-*.tar.gz.SHA256 ================================================ FILE: .github/workflows/x-test-canary.yml ================================================ ###################################################################### # A canary test suite, checking if the test suite is correctly # working. ###################################################################### name: "arweave-test-suite-canary-template" on: workflow_call: inputs: os_arch: description: "operating system architecture" default: "amd64" type: "string" os_name: description: "operating system name" default: "ubuntu" type: "string" os_release: description: "operating system release" default: "22.04" type: "string" env: cache_key: ${{ inputs.os_arch }}-${{ inputs.os_name }}-${{ inputs.os_release }} #################################################################### # Canary testing, should fail. #################################################################### jobs: canary: runs-on: - self-hosted - build-runner - ${{ inputs.os_arch }} - ${{ inputs.os_name }} - ${{ inputs.os_release }} steps: - name: cleanup if: always() run: | rm -rf "${GITHUB_WORKSPACE}" && mkdir -p "${GITHUB_WORKSPACE}" - uses: actions/checkout@v4 with: submodules: "recursive" lfs: true - name: Download artifact uses: actions/download-artifact@v4 with: name: build-${{ github.sha }}-${{ env.cache_key }} # Both artifacts (_build and apps dir) are # required. - name: Extract artifact run: | tar zxfp _build.tar.gz tar zxfp apps.tar.gz - id: canary name: ar_canary.erl continue-on-error: true run: bash scripts/github_workflow.sh "tests" "ar_canary" - name: should fail run: | if test "${{ steps.canary.outcome }}" = "failure" then exit 0 else exit 1 fi ================================================ FILE: .github/workflows/x-test-full.yml ================================================ ###################################################################### # Full Arweave Test Suite. Mostly used on Linux like systems. ###################################################################### name: "arweave-test-suite-full" on: workflow_call: inputs: os_arch: description: "operating system architecture" default: "amd64" type: "string" os_name: description: "operating system name" default: "ubuntu" type: "string" os_release: description: "operating system release" default: "22.04" type: "string" env: cache_key: ${{ inputs.os_arch }}-${{ inputs.os_name }}-${{ inputs.os_release }} #################################################################### # Test modules (note: that _tests are implicitly run by a matching # prefix name #################################################################### jobs: full-test-modules: runs-on: - self-hosted - build-runner - ${{ inputs.os_arch }} - ${{ inputs.os_name }} - ${{ inputs.os_release }} outputs: core_test_mods: ${{ steps.core-test-mods.outputs.core_test_mods }} steps: - name: cleanup if: always() run: | rm -rf "${GITHUB_WORKSPACE}" && mkdir -p "${GITHUB_WORKSPACE}" - uses: actions/checkout@v4 with: submodules: "recursive" - name: load full test modules id: core-test-mods run: | echo "core_test_mods=$(bash scripts/list_test_modules.sh json)" >> "$GITHUB_OUTPUT" eunit-tests-suite: needs: full-test-modules runs-on: - self-hosted - build-runner - ${{ inputs.os_arch }} - ${{ inputs.os_name }} - ${{ inputs.os_release }} strategy: fail-fast: false max-parallel: 12 matrix: core_test_mod: ${{ fromJson(needs.full-test-modules.outputs.core_test_mods) }} steps: - name: cleanup if: always() run: | rm -rf "${GITHUB_WORKSPACE}" && mkdir -p "${GITHUB_WORKSPACE}" - uses: actions/checkout@v4 with: submodules: "recursive" lfs: true - name: Download artifact uses: actions/download-artifact@v4 with: name: build-${{ github.sha }}-${{ env.cache_key }} # Both artifacts (_build and apps dir) are # required. - name: Extract artifact run: | tar zxfp _build.tar.gz tar zxfp apps.tar.gz - name: ${{ matrix.core_test_mod }}.erl id: tests run: bash scripts/github_workflow.sh "tests" "${{ matrix.core_test_mod }}" # this part of the job produces test artifacts from logs # generated by the tests. It also collect dumps and the files # present in .tmp (temporary arweave data store) - name: upload artifacts in case of failure uses: actions/upload-artifact@v4 if: always() && failure() with: name: "logs-${{ matrix.core_test_mod }}-${{ github.run_attempt }}-${{ job.status }}-${{ runner.name }}-${{ github.sha }}" retention-days: 7 overwrite: true include-hidden-files: true path: | ./logs *.out *.dump notebook-pricing-localnet: runs-on: - self-hosted - build-runner - ${{ inputs.os_arch }} - ${{ inputs.os_name }} - ${{ inputs.os_release }} steps: - name: cleanup if: always() run: | rm -rf "${GITHUB_WORKSPACE}" && mkdir -p "${GITHUB_WORKSPACE}" - uses: actions/checkout@v4 with: submodules: "recursive" lfs: true - name: Fetch Git LFS objects run: | git lfs install --local git lfs pull git lfs checkout - name: Setup Python uses: actions/setup-python@v5 with: python-version: "3.11" - name: Download artifact uses: actions/download-artifact@v4 with: name: build-${{ github.sha }}-${{ env.cache_key }} # Both artifacts (_build and apps dir) are # required. - name: Extract artifact run: | tar zxfp _build.tar.gz tar zxfp apps.tar.gz - name: Setup notebook env run: scripts/setup_notebook_env.sh - name: Run pricing transition notebook headless run: scripts/run_notebook_headless.sh pricing_transition_localnet notebook-autoredenomination-localnet: runs-on: - self-hosted - build-runner - ${{ inputs.os_arch }} - ${{ inputs.os_name }} - ${{ inputs.os_release }} steps: - name: cleanup if: always() run: | rm -rf "${GITHUB_WORKSPACE}" && mkdir -p "${GITHUB_WORKSPACE}" - uses: actions/checkout@v4 with: submodules: "recursive" lfs: true - name: Fetch Git LFS objects run: | git lfs install --local git lfs pull git lfs checkout - name: Setup Python uses: actions/setup-python@v5 with: python-version: "3.11" - name: Download artifact uses: actions/download-artifact@v4 with: name: build-${{ github.sha }}-${{ env.cache_key }} # Both artifacts (_build and apps dir) are # required. - name: Extract artifact run: | tar zxfp _build.tar.gz tar zxfp apps.tar.gz - name: Setup notebook env run: scripts/setup_notebook_env.sh - name: Run autoredenomination notebook headless run: scripts/run_notebook_headless.sh autoredenomination_localnet ================================================ FILE: .github/workflows/x-test-on-demand.yml ================================================ ###################################################################### # Full Arweave Test Suite. On demand. ###################################################################### name: "arweave-test-suite-full" on: workflow_call: inputs: test: type: "string" required: true os_arch: description: "operating system architecture" default: "amd64" type: "string" os_name: description: "operating system name" default: "ubuntu" type: "string" os_release: description: "operating system release" default: "22.04" type: "string" env: cache_key: ${{ inputs.os_arch }}-${{ inputs.os_name }}-${{ inputs.os_release }} #################################################################### # Test modules (note: that _tests are implicitly run by a matching # prefix name #################################################################### jobs: eunit-tests-suite: runs-on: - self-hosted - build-runner - ${{ inputs.os_arch }} - ${{ inputs.os_name }} - ${{ inputs.os_release }} strategy: fail-fast: false steps: - name: cleanup if: always() run: | rm -rf "${GITHUB_WORKSPACE}" && mkdir -p "${GITHUB_WORKSPACE}" - uses: actions/checkout@v4 with: submodules: "recursive" lfs: true - name: Download artifact uses: actions/download-artifact@v4 with: name: build-${{ github.sha }}-${{ env.cache_key }} # Both artifacts (_build and apps dir) are # required. - name: Extract artifact run: | tar zxfp _build.tar.gz tar zxfp apps.tar.gz - name: ${{ inputs.test }}.erl id: tests run: bash scripts/github_workflow.sh "tests" "${{ inputs.test }}" # this part of the job produces test artifacts from logs # generated by the tests. It also collect dumps and the files # present in .tmp (temporary arweave data store) - name: upload artifacts in case of failure uses: actions/upload-artifact@v4 if: always() && failure() with: name: "logs-${{ inputs.test }}-${{ github.run_attempt }}-${{ job.status }}-${{ runner.name }}-${{ github.sha }}" retention-days: 7 overwrite: true include-hidden-files: true path: | ./logs *.out *.dump ================================================ FILE: .github/workflows/x-test-vdf.yml ================================================ ###################################################################### # Arweave Test Suite dedicated for VDF testing. ###################################################################### name: "arweave-test-suite-vdf" on: workflow_call: inputs: os_arch: description: "operating system architecture" default: "amd64" type: "string" os_name: description: "operating system name" default: "ubuntu" type: "string" os_release: description: "operating system release" default: "22.04" type: "string" env: cache_key: ${{ inputs.os_arch }}-${{ inputs.os_name }}-${{ inputs.os_release }} #################################################################### # Test modules (note: that _tests are implicitly run by a matching # prefix name #################################################################### jobs: eunit-tests-suite: runs-on: - self-hosted - build-runner - ${{ inputs.os_arch }} - ${{ inputs.os_name }} - ${{ inputs.os_release }} strategy: fail-fast: false max-parallel: 12 matrix: core_test_mod: [ # modules ar, ar_block, ar_block_cache, ar_chain_stats, ar_chunk_copy, ar_chunk_storage, ar_deep_hash, ar_device_lock, ar_diff_dag, ar_entropy_gen, ar_entropy_storage, ar_ets_intervals, ar_events, ar_footprint_record, ar_inflation, ar_intervals, ar_join, ar_kv, arweave_limiter_group, arweave_limiter_metrics_collector, ar_merkle, ar_node, ar_node_utils, ar_nonce_limiter, ar_patricia_tree, ar_peers, ar_peer_intervals, ar_pricing, ar_retarget, ar_serialize, ar_tx_db, ar_unbalanced_merkle, ar_util, ar_wallet, ar_webhook, ar_pool, # standard ar_base64_compatibility_tests, ar_config_tests, ar_difficulty_tests, ar_forced_validation_tests, ar_header_sync_tests, ar_http_iface_tests, ar_http_util_tests, ar_info_tests, ar_mempool_tests, ar_mine_vdf_tests, ar_semaphore_tests, ar_start_from_block_tests, ar_tx_blacklist_tests, ar_vdf_tests, # long running ar_vdf_block_validation_tests, ar_vdf_server_tests, ar_vdf_external_update_tests, ar_cli_parser ] steps: - name: cleanup if: always() run: | rm -rf "${GITHUB_WORKSPACE}" && mkdir -p "${GITHUB_WORKSPACE}" - uses: actions/checkout@v4 with: submodules: "recursive" lfs: true - name: Download artifact uses: actions/download-artifact@v4 with: name: build-${{ github.sha }}-${{ env.cache_key }} # Both artifacts (_build and apps dir) are # required. - name: Extract artifact run: | tar zxfp _build.tar.gz tar zxfp apps.tar.gz - name: ${{ matrix.core_test_mod }}.erl id: tests run: bash scripts/github_workflow.sh "tests" "${{ matrix.core_test_mod }}" # this part of the job produces test artifacts from logs # generated by the tests. It also collect dumps and the files # present in .tmp (temporary arweave data store) - name: upload artifacts in case of failure uses: actions/upload-artifact@v4 if: always() && failure() with: name: "logs-${{ matrix.core_test_mod }}-${{ github.run_attempt }}-${{ job.status }}-${{ runner.name }}-${{ github.sha }}" retention-days: 7 overwrite: true include-hidden-files: true path: | ./logs *.out *.dump ================================================ FILE: .gitignore ================================================ *.beam *.log *.dat .vscode *.out *.code-workspace .arweave.plt testlog debug_logs apps/arweave/priv/tls apps/arweave/priv/*.so _build releases lib /result .rebar3 apps/arweave/c_src/**/*.o apps/arweave/c_src/tests/tests data_test_* erl_crash.dump tags metrics blocks txs hash_lists wallet_lists /wallets logs* !bin/logs ebin metrics_* release/output .DS_Store .tmp node_modules screenlog.0 _* *.swp *~ localnet_snapshot/wallets/arweave_keyfile_* !**/_*.json .jupyter/migrated notebooks/.ipynb_checkpoints ================================================ FILE: .gitmodules ================================================ [submodule "lib/RandomX"] path = apps/arweave/lib/RandomX url = https://github.com/ArweaveTeam/RandomX.git [submodule "apps/arweave/lib/secp256k1"] path = apps/arweave/lib/secp256k1 url = https://github.com/bitcoin-core/secp256k1 [submodule "apps/arweave/lib/openssl-sha-lite"] path = apps/arweave/lib/openssl-sha-lite url = https://github.com/ArweaveTeam/openssl-sha-lite ================================================ FILE: .jupyter/jupyter_server_config.py ================================================ import os def _strip_outputs_pre_save(model, **kwargs): if os.getenv("NOTEBOOK_SAVE_OUTPUTS") == "1": return if model.get("type") != "notebook": return content = model.get("content") if not content: return for cell in content.get("cells", []): if cell.get("cell_type") != "code": continue cell["outputs"] = [] cell["execution_count"] = None metadata = cell.get("metadata") if isinstance(metadata, dict): metadata.pop("execution", None) c.FileContentsManager.pre_save_hook = _strip_outputs_pre_save ================================================ FILE: CANARY.md ================================================ # Arweave Team Warrant Canary - The Arweave Team has not been contacted by any law enforcement officials regarding the project. Last update: 22 March 2024. - The Arweave Team has not been asked to break the encryption of the system by any party. Last update: 22 March 2024. - The Arweave Team has not been asked to reveal the identities of any backers. Last update: 22 March 2024. - The Arweave Team is not in any way under duress. Last update: 22 March 2024. ================================================ FILE: CONTRIBUTING.md ================================================ # Contributing This is a quick overview for what you should know when contributing to this Git repository. - There is a code style guide in `arweave_styleguide.md`. Please note that we're using tabs for indentation. - Make sure the tests pass (see [README](README.md) for how to run the tests). - You can discuss development and get help from the Arweave organization and community in the `#dev` channel on [our Discord server](https://discord.gg/3UTNZky). ## Workflow 1. Fork the main Git repo `https://github.com/ArweaveTeam/arweave.git` 2. Branch out from `master`. 3. Add your changes. 4. Run the tests (see above). 5. Rebase your branch on the upstream `master` if the upstream `master` has moved since you branched out. 6. Create a PR back to the upstream `master`. Happy hacking! :) ================================================ FILE: LICENSE.md ================================================ GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Lesser General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. {description} Copyright (C) {year} {fullname} This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. {signature of Ty Coon}, 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. ================================================ FILE: README.md ================================================ # Arweave Server This is the repository for the official Erlang implementation of the Arweave protocol and a gateway implementation. Arweave is a distributed, cryptographically verified permanent archive built on a cryptocurrency that aims to, for the first time, provide feasible data permanence. By leveraging our novel Blockweave datastructure, data is stored in a decentralised, peer-to-peer manner where miners are incentivised to store rare data. # Contributing For instructions on how to build and update the source, please refer to the [Development Docs](https://docs.arweave.org/developers/development/getting-started) # Contact If you have questions or comments about Arweave you can get in touch by finding us on [Twitter](https://twitter.com/ArweaveTeam/), [Reddit](https://www.reddit.com/r/arweave), [Discord](https://discord.gg/DjAFMJc) or by emailing us at team@arweave.org. For more information about the Arweave project visit [https://www.arweave.org](https://www.arweave.org/) or have a look at our [yellow paper](https://yellow-paper.arweave.dev). # License The Arweave project is released under GNU General Public License v2.0. See [LICENSE](LICENSE.md) for full license conditions. ================================================ FILE: apps/arweave/c_src/Makefile ================================================ # Based on c_src.mk from erlang.mk by Loic Hoguin CURDIR := $(shell pwd) BASEDIR := $(abspath $(CURDIR)/..) PROJECT ?= $(notdir $(BASEDIR)) PROJECT := $(strip $(PROJECT)) ifeq ($(MODE), debug) CFLAGS ?= -O0 -g CXXFLAGS ?= -O0 -g else CFLAGS ?= -O3 CXXFLAGS ?= -O3 endif UNAME_SYS := $(shell uname -s) # Configure SHA external libraries, we are using OPENSSL_LITE # by default, for all systems RANDOMX_LDFLAGS = ../lib/openssl-sha-lite/libcrypto.a # Set default libs path for secp256k1 implementation SECP256K1_LDLIBS = -L /usr/lib -L /usr/local/lib ifeq ($(UNAME_SYS), Linux) # _mm_crc32_u32 support CFLAGS += -msse4.2 CXXFLAGS += -msse4.2 endif ERTS_INCLUDE_DIR ?= $(shell erl -noshell -eval 'io:format("~ts/erts-~ts/include/", [code:root_dir(), erlang:system_info(version)]).' -s init stop) ERL_INTERFACE_INCLUDE_DIR ?= $(shell erl -noshell -eval 'io:format("~ts", [code:lib_dir(erl_interface, include)]).' -s init stop) ERL_INTERFACE_LIB_DIR ?= $(shell erl -noshell -eval 'io:format("~ts", [code:lib_dir(erl_interface, lib)]).' -s init stop) # System type and C compiler/flags. ifeq ($(UNAME_SYS), Darwin) OSX_CPU_ARCH ?= x86_64 # nix systems may not have sysctl where uname -m will return the correct arch SYSCTL_EXISTS := $(shell which sysctl 2>/dev/null) ifneq ($(shell uname -m | egrep "arm64"),) OSX_CPU_ARCH = arm64 else ifdef SYSCTL_EXISTS ifneq ($(shell sysctl -n machdep.cpu.brand_string | egrep "M(1|2)"),) OSX_CPU_ARCH = arm64 endif endif endif CC ?= cc CFLAGS += -std=c99 -arch $(OSX_CPU_ARCH) -finline-functions -Wall -Wmissing-prototypes CXXFLAGS += -arch $(OSX_CPU_ARCH) -finline-functions -Wall LDFLAGS ?= -arch $(OSX_CPU_ARCH) LDFLAGS += -undefined suppress # on MacOS, some libs are also present in /opt/homebrew/lib SECP256K1_LDLIBS += -L /opt/homebrew/lib else ifeq ($(UNAME_SYS), FreeBSD) CC ?= cc CFLAGS += -std=c99 -finline-functions -Wall -Wmissing-prototypes CXXFLAGS += -finline-functions -Wall else ifeq ($(UNAME_SYS), Linux) CC ?= gcc CFLAGS += -std=c99 -finline-functions -Wall -Wmissing-prototypes CXXFLAGS += -finline-functions -Wall endif ifneq (, $(shell which pkg-config)) CFLAGS += -I../lib/openssl-sha-lite/include CXXFLAGS += -I../lib/openssl-sha-lite/include endif C_SRC_DIR = $(CURDIR) SECP256K1_CFLAGS += $(CFLAGS) SECP256K1_LDLIBS += $(LDFLAGS) CFLAGS += -fPIC -I $(ERTS_INCLUDE_DIR) -I $(ERL_INTERFACE_INCLUDE_DIR) -I /usr/local/include -I ../lib/RandomX/src -I $(C_SRC_DIR) CXXFLAGS += -fPIC -I $(ERTS_INCLUDE_DIR) -I $(ERL_INTERFACE_INCLUDE_DIR) -I ../lib/RandomX/src -std=c++11 LDLIBS += -L $(ERL_INTERFACE_LIB_DIR) -L /usr/local/lib -lei RX512_OUTPUT ?= $(CURDIR)/../priv/rx512_arweave.so RX4096_OUTPUT ?= $(CURDIR)/../priv/rx4096_arweave.so RXSQUARED_OUTPUT ?= $(CURDIR)/../priv/rxsquared_arweave.so VDF_OUTPUT ?= $(CURDIR)/../priv/vdf_arweave.so COMMON_RANDOMX_SOURCES = $(wildcard $(C_SRC_DIR)/randomx/*.c $(C_SRC_DIR)/randomx/*.cpp) RX512_SOURCES = $(COMMON_RANDOMX_SOURCES) $(wildcard $(C_SRC_DIR)/*.c $(C_SRC_DIR)/randomx/rx512/*.c) RX4096_SOURCES = $(COMMON_RANDOMX_SOURCES) $(wildcard $(C_SRC_DIR)/*.c $(C_SRC_DIR)/randomx/rx4096/*.c) RXSQUARED_SOURCES = $(COMMON_RANDOMX_SOURCES) $(wildcard $(C_SRC_DIR)/*.c $(C_SRC_DIR)/randomx/rxsquared/*.c) VDF_SOURCES = $(wildcard $(C_SRC_DIR)/*.c $(C_SRC_DIR)/vdf/*.c $(C_SRC_DIR)/vdf/*.cpp) RX512_OBJECTS = $(addsuffix .o, $(basename $(RX512_SOURCES))) RX4096_OBJECTS = $(addsuffix .o, $(basename $(RX4096_SOURCES))) RXSQUARED_OBJECTS = $(addsuffix .o, $(basename $(RXSQUARED_SOURCES))) VDF_OBJECTS = $(addsuffix .o, $(basename $(VDF_SOURCES))) # NOTE tabs here will cause build fail ifeq ($(UNAME_SYS), Linux) $(C_SRC_DIR)/vdf/vdf_fused_x86.o: CXXFLAGS += -msha endif ifeq ($(UNAME_SYS), Darwin) $(C_SRC_DIR)/vdf/vdf_fused_arm.o: CXXFLAGS += -march=armv8-a+crypto $(C_SRC_DIR)/vdf/vdf_hiopt_arm.o: CXXFLAGS += -march=armv8-a+crypto endif ifeq ($(UNAME_SYS), Darwin) VDF_ARM_ASM_OBJ = $(C_SRC_DIR)/vdf/sha256-armv8.o VDF_OBJECTS += $(VDF_ARM_ASM_OBJ) $(VDF_ARM_ASM_OBJ): $(C_SRC_DIR)/vdf/sha256-armv8.S @echo "Assembling ARM64 specific file: $<" clang -O3 -arch arm64 -c $(C_SRC_DIR)/vdf/sha256-armv8.S -o $(VDF_ARM_ASM_OBJ) endif # Verbosity. c_verbose_0 = @echo " C " $(?F); c_verbose = $(c_verbose_$(V)) cpp_verbose_0 = @echo " CPP " $(?F); cpp_verbose = $(cpp_verbose_$(V)) link_verbose_0 = @echo " LD " $(@F); link_verbose = $(link_verbose_$(V)) COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c $(RX512_OUTPUT): $(RX512_OBJECTS) @mkdir -p $(BASEDIR)/priv/ $(link_verbose) $(CXX) $(RX512_OBJECTS) $(RANDOMX_LDFLAGS) $(LDFLAGS) $(LDLIBS) ../lib/RandomX/build512/librandomx512.a -shared -o $(RX512_OUTPUT) $(RX4096_OUTPUT): $(RX4096_OBJECTS) @mkdir -p $(BASEDIR)/priv/ $(link_verbose) $(CXX) $(RX4096_OBJECTS) $(RANDOMX_LDFLAGS) $(LDFLAGS) $(LDLIBS) ../lib/RandomX/build4096/librandomx4096.a -shared -o $(RX4096_OUTPUT) $(RXSQUARED_OUTPUT): $(RXSQUARED_OBJECTS) @mkdir -p $(BASEDIR)/priv/ $(link_verbose) $(CXX) $(RXSQUARED_OBJECTS) $(RANDOMX_LDFLAGS) $(LDFLAGS) $(LDLIBS) ../lib/RandomX/buildsquared/librandomxsquared.a -shared -o $(RXSQUARED_OUTPUT) $(VDF_OUTPUT): $(VDF_OBJECTS) @mkdir -p $(BASEDIR)/priv/ $(link_verbose) $(CXX) $(VDF_OBJECTS) $(RANDOMX_LDFLAGS) $(LDFLAGS) $(LDLIBS) -shared -o $(VDF_OUTPUT) SECP256K1_SOURCES = $(wildcard $(C_SRC_DIR)/*.c $(C_SRC_DIR)/secp256k1/*.c) SECP256K1_OBJECTS = $(addsuffix .o, $(basename $(SECP256K1_SOURCES))) SECP256K1_CFLAGS += -fPIC -I $(ERTS_INCLUDE_DIR) -I $(ERL_INTERFACE_INCLUDE_DIR) -I /usr/local/include -I $(CURDIR)/../lib/secp256k1/src -I $(CURDIR)/../lib/secp256k1/include -I $(C_SRC_DIR) SECP256K1_LDLIBS += -L $(ERL_INTERFACE_LIB_DIR) SECP256K1_OUTPUT ?= $(CURDIR)/../priv/secp256k1_arweave.so $(SECP256K1_OUTPUT): $(SECP256K1_OBJECTS) @mkdir -p $(BASEDIR)/priv/ $(link_verbose) $(CXX) $(SECP256K1_OBJECTS) $(SECP256K1_LDLIBS) ../lib/secp256k1/build/lib/libsecp256k1.a -shared -o $(SECP256K1_OUTPUT) %secp256k1_nif.o: %secp256k1_nif.c $(c_verbose) $(CC) $(SECP256K1_CFLAGS) -c $(OUTPUT_OPTION) $< %.o: %.c $(COMPILE_C) $(OUTPUT_OPTION) $< %.o: %.cc $(COMPILE_CPP) $(OUTPUT_OPTION) $< %.o: %.C $(COMPILE_CPP) $(OUTPUT_OPTION) $< %.o: %.cpp $(COMPILE_CPP) $(OUTPUT_OPTION) $< all: $(RX512_OUTPUT) $(RX4096_OUTPUT) $(RXSQUARED_OUTPUT) $(VDF_OUTPUT) $(SECP256K1_OUTPUT) clean: @rm -f $(RX512_OUTPUT) $(RX4096_OUTPUT) $(RXSQUARED_OUTPUT) $(VDF_OUTPUT) $(RX512_OBJECTS) $(RX4096_OBJECTS) $(RXSQUARED_OBJECTS) $(VDF_OBJECTS) $(SECP256K1_OUTPUT) $(SECP256K1_OBJECTS) ================================================ FILE: apps/arweave/c_src/ar_nif.c ================================================ #include "ar_nif.h" #include // Utility functions. ERL_NIF_TERM solution_tuple(ErlNifEnv* envPtr, ERL_NIF_TERM hashTerm) { return enif_make_tuple2(envPtr, enif_make_atom(envPtr, "true"), hashTerm); } ERL_NIF_TERM ok_tuple(ErlNifEnv* envPtr, ERL_NIF_TERM term) { return enif_make_tuple2(envPtr, enif_make_atom(envPtr, "ok"), term); } ERL_NIF_TERM ok_tuple2(ErlNifEnv* envPtr, ERL_NIF_TERM term1, ERL_NIF_TERM term2) { return enif_make_tuple3(envPtr, enif_make_atom(envPtr, "ok"), term1, term2); } ERL_NIF_TERM error_tuple(ErlNifEnv* envPtr, const char* reason) { ERL_NIF_TERM reasonTerm = enif_make_string(envPtr, reason, ERL_NIF_LATIN1); return enif_make_tuple2(envPtr, enif_make_atom(envPtr, "error"), reasonTerm); } ERL_NIF_TERM make_output_binary(ErlNifEnv* envPtr, unsigned char *dataPtr, size_t size) { ERL_NIF_TERM outputTerm; unsigned char *outputTermDataPtr; outputTermDataPtr = enif_make_new_binary(envPtr, size, &outputTerm); memcpy(outputTermDataPtr, dataPtr, size); return outputTerm; } ================================================ FILE: apps/arweave/c_src/ar_nif.h ================================================ #ifndef AR_NIF_H #define AR_NIF_H #include ERL_NIF_TERM solution_tuple(ErlNifEnv*, ERL_NIF_TERM); ERL_NIF_TERM ok_tuple(ErlNifEnv*, ERL_NIF_TERM); ERL_NIF_TERM ok_tuple2(ErlNifEnv*, ERL_NIF_TERM, ERL_NIF_TERM); ERL_NIF_TERM error_tuple(ErlNifEnv*, const char*); ERL_NIF_TERM make_output_binary(ErlNifEnv*, unsigned char*, size_t); #endif // AR_NIF_H ================================================ FILE: apps/arweave/c_src/randomx/ar_randomx_impl.h ================================================ #ifndef AR_RANDOMX_IMPL_H #define AR_RANDOMX_IMPL_H // Thif file includes the full definitions of any function that is shared between the // rx512 and rx4096 shared libraries. Although ugly this was the only way I could get // everything to work without causing symbol conflicts or seg faults once the two .so's // are loaded into arweave and the NIFs registered. There may be a better way! #include #include // From RandomX/src/jit_compiler.hpp // needed for the JIT compiler to work on OpenBSD, NetBSD and Apple Silicon #if defined(__OpenBSD__) || defined(__NetBSD__) || (defined(__APPLE__) && defined(__aarch64__)) #define RANDOMX_FORCE_SECURE #endif typedef enum { FALSE, TRUE } boolean; struct workerThread { ErlNifTid threadId; ErlNifThreadOpts *optsPtr; randomx_cache *cachePtr; randomx_dataset *datasetPtr; unsigned long datasetInitStartItem; unsigned long datasetInitItemCount; }; typedef enum { HASHING_MODE_FAST = 0, HASHING_MODE_LIGHT = 1, } hashing_mode; struct state { ErlNifRWLock* lockPtr; int isRandomxReleased; hashing_mode mode; randomx_dataset* datasetPtr; randomx_cache* cachePtr; }; ErlNifResourceType* stateType; static ERL_NIF_TERM init_nif(ErlNifEnv* envPtr, int argc, const ERL_NIF_TERM argv[]); static ERL_NIF_TERM info_nif(const char* rxSize, ErlNifEnv* envPtr, int argc, const ERL_NIF_TERM argv[]); static ERL_NIF_TERM hash_nif(ErlNifEnv* envPtr, int argc, const ERL_NIF_TERM argv[]); static int load(ErlNifEnv* envPtr, void** priv, ERL_NIF_TERM info); static void state_dtor(ErlNifEnv* envPtr, void* objPtr); static boolean init_dataset( randomx_dataset *datasetPtr, randomx_cache *cachePtr, unsigned int numWorkers ); static void *init_dataset_thread(void *objPtr); static ERL_NIF_TERM init_failed(ErlNifEnv *envPtr, struct state *statePtr, const char* reason); static randomx_vm* create_vm(struct state* statePtr, int fullMemEnabled, int jitEnabled, int largePagesEnabled, int hardwareAESEnabled, int* isRandomxReleased); static void destroy_vm(struct state* statePtr, randomx_vm* vmPtr); static ERL_NIF_TERM init_nif(ErlNifEnv* envPtr, int argc, const ERL_NIF_TERM argv[]) { ErlNifBinary key; hashing_mode mode; struct state *statePtr; ERL_NIF_TERM resource; unsigned int numWorkers; int jitEnabled, largePagesEnabled; randomx_flags flags; if (!enif_inspect_binary(envPtr, argv[0], &key)) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[1], &mode)) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[2], &jitEnabled)) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[3], &largePagesEnabled)) { return enif_make_badarg(envPtr); } if (!enif_get_uint(envPtr, argv[4], &numWorkers)) { return enif_make_badarg(envPtr); } statePtr = enif_alloc_resource(stateType, sizeof(struct state)); statePtr->cachePtr = NULL; statePtr->datasetPtr = NULL; statePtr->isRandomxReleased = 0; statePtr->mode = mode; statePtr->lockPtr = enif_rwlock_create("state_rw_lock"); if (statePtr->lockPtr == NULL) { return init_failed(envPtr, statePtr, "enif_rwlock_create failed"); } flags = RANDOMX_FLAG_DEFAULT; if (jitEnabled) { flags |= RANDOMX_FLAG_JIT; #ifdef RANDOMX_FORCE_SECURE flags |= RANDOMX_FLAG_SECURE; #endif } if (largePagesEnabled) { flags |= RANDOMX_FLAG_LARGE_PAGES; } statePtr->cachePtr = randomx_alloc_cache(flags); if (statePtr->cachePtr == NULL) { return init_failed(envPtr, statePtr, "randomx_alloc_cache failed"); } randomx_init_cache( statePtr->cachePtr, key.data, key.size); if (mode == HASHING_MODE_FAST) { statePtr->datasetPtr = randomx_alloc_dataset(flags); if (statePtr->datasetPtr == NULL) { return init_failed(envPtr, statePtr, "randomx_alloc_dataset failed"); } if (!init_dataset(statePtr->datasetPtr, statePtr->cachePtr, numWorkers)) { return init_failed(envPtr, statePtr, "init_dataset failed"); } randomx_release_cache(statePtr->cachePtr); statePtr->cachePtr = NULL; } else { statePtr->datasetPtr = NULL; } resource = enif_make_resource(envPtr, statePtr); enif_release_resource(statePtr); return ok_tuple(envPtr, resource); } static ERL_NIF_TERM info_nif( const char* rxSize, ErlNifEnv* envPtr, int argc, const ERL_NIF_TERM argv[]) { struct state* statePtr; unsigned int datasetSize; unsigned int scratchpadSize; hashing_mode hashingMode; ERL_NIF_TERM hashingModeTerm; if (argc != 1) { return enif_make_badarg(envPtr); } if (!enif_get_resource(envPtr, argv[0], stateType, (void**) &statePtr)) { return error_tuple(envPtr, "failed to read state"); } hashingMode = statePtr->mode; if (hashingMode == HASHING_MODE_FAST) { if (statePtr->datasetPtr == NULL) { return error_tuple(envPtr, "dataset is not initialized for fast hashing mode"); } if (statePtr->cachePtr != NULL) { return error_tuple(envPtr, "cache is initialized for fast hashing mode"); } hashingModeTerm = enif_make_atom(envPtr, "fast"); datasetSize = randomx_dataset_item_count(); scratchpadSize = randomx_get_scratchpad_size(); } else if (hashingMode == HASHING_MODE_LIGHT) { if (statePtr->datasetPtr != NULL) { return error_tuple(envPtr, "dataset is initialized for light hashing mode"); } if (statePtr->cachePtr == NULL) { return error_tuple(envPtr, "cache is not initialized for light hashing mode"); } hashingModeTerm = enif_make_atom(envPtr, "light"); datasetSize = 0; scratchpadSize = randomx_get_scratchpad_size(); } else { return error_tuple(envPtr, "invalid hashing mode"); } ERL_NIF_TERM infoTerm = enif_make_tuple4(envPtr, enif_make_atom(envPtr, rxSize), hashingModeTerm, enif_make_uint(envPtr, datasetSize), enif_make_uint(envPtr, scratchpadSize)); return ok_tuple(envPtr, infoTerm); } static ERL_NIF_TERM hash_nif( ErlNifEnv* envPtr, int argc, const ERL_NIF_TERM argv[] ) { int jitEnabled, largePagesEnabled, hardwareAESEnabled; unsigned char hashPtr[RANDOMX_HASH_SIZE]; struct state* statePtr; ErlNifBinary inputData; if (argc != 5) { return enif_make_badarg(envPtr); } if (!enif_get_resource(envPtr, argv[0], stateType, (void**) &statePtr)) { return error_tuple(envPtr, "failed to read state"); } if (!enif_inspect_binary(envPtr, argv[1], &inputData)) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[2], &jitEnabled)) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[3], &largePagesEnabled)) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[4], &hardwareAESEnabled)) { return enif_make_badarg(envPtr); } int isRandomxReleased; randomx_vm *vmPtr = create_vm(statePtr, (statePtr->mode == HASHING_MODE_FAST), jitEnabled, largePagesEnabled, hardwareAESEnabled, &isRandomxReleased); if (vmPtr == NULL) { if (isRandomxReleased != 0) { return error_tuple(envPtr, "state has been released"); } return error_tuple(envPtr, "randomx_create_vm failed"); } randomx_calculate_hash(vmPtr, inputData.data, inputData.size, hashPtr); destroy_vm(statePtr, vmPtr); return ok_tuple(envPtr, make_output_binary(envPtr, hashPtr, RANDOMX_HASH_SIZE)); } static int load(ErlNifEnv* envPtr, void** priv, ERL_NIF_TERM info) { int flags = ERL_NIF_RT_CREATE; stateType = enif_open_resource_type(envPtr, NULL, "state", state_dtor, flags, NULL); if (stateType == NULL) { return 1; } return 0; } static void state_dtor(ErlNifEnv* envPtr, void* objPtr) { struct state *statePtr = (struct state*) objPtr; if (statePtr->datasetPtr != NULL) { randomx_release_dataset(statePtr->datasetPtr); statePtr->datasetPtr = NULL; } if (statePtr->cachePtr != NULL) { randomx_release_cache(statePtr->cachePtr); statePtr->cachePtr = NULL; } statePtr->isRandomxReleased = 1; if (statePtr->lockPtr != NULL) { enif_rwlock_destroy(statePtr->lockPtr); statePtr->lockPtr = NULL; } } static boolean init_dataset( randomx_dataset *datasetPtr, randomx_cache *cachePtr, unsigned int numWorkers ) { struct workerThread **workerPtrPtr; struct workerThread *workerPtr; unsigned long itemsPerThread; unsigned long itemsRemainder; unsigned long startItem; boolean anyThreadFailed; workerPtrPtr = enif_alloc(sizeof(struct workerThread *) * numWorkers); itemsPerThread = randomx_dataset_item_count() / numWorkers; itemsRemainder = randomx_dataset_item_count() % numWorkers; startItem = 0; for (int i = 0; i < numWorkers; i++) { workerPtrPtr[i] = enif_alloc(sizeof(struct workerThread)); workerPtr = workerPtrPtr[i]; workerPtr->cachePtr = cachePtr; workerPtr->datasetPtr = datasetPtr; workerPtr->datasetInitStartItem = startItem; if (i + 1 == numWorkers) { workerPtr->datasetInitItemCount = itemsPerThread + itemsRemainder; } else { workerPtr->datasetInitItemCount = itemsPerThread; } startItem += workerPtr->datasetInitItemCount; workerPtr->optsPtr = enif_thread_opts_create("init_fast_worker"); if (0 != enif_thread_create( "init_dataset_worker", &(workerPtr->threadId), &init_dataset_thread, workerPtr, workerPtr->optsPtr)) { enif_thread_opts_destroy(workerPtr->optsPtr); enif_free(workerPtrPtr[i]); workerPtrPtr[i] = NULL; } } anyThreadFailed = FALSE; for (int i = 0; i < numWorkers; i++) { workerPtr = workerPtrPtr[i]; if (workerPtr == NULL) { anyThreadFailed = TRUE; } else if (0 != enif_thread_join(workerPtr->threadId, NULL)) { anyThreadFailed = TRUE; } if (workerPtr != NULL) { enif_thread_opts_destroy(workerPtr->optsPtr); enif_free(workerPtr); } } enif_free(workerPtrPtr); return !anyThreadFailed; } static void *init_dataset_thread(void *objPtr) { struct workerThread *workerPtr = (struct workerThread*) objPtr; randomx_init_dataset( workerPtr->datasetPtr, workerPtr->cachePtr, workerPtr->datasetInitStartItem, workerPtr->datasetInitItemCount); return NULL; } static ERL_NIF_TERM init_failed(ErlNifEnv *envPtr, struct state *statePtr, const char* reason) { if (statePtr->lockPtr != NULL) { enif_rwlock_destroy(statePtr->lockPtr); statePtr->lockPtr = NULL; } if (statePtr->cachePtr != NULL) { randomx_release_cache(statePtr->cachePtr); statePtr->cachePtr = NULL; } if (statePtr->datasetPtr != NULL) { randomx_release_dataset(statePtr->datasetPtr); statePtr->datasetPtr = NULL; } enif_release_resource(statePtr); return error_tuple(envPtr, reason); } static randomx_vm* create_vm(struct state* statePtr, int fullMemEnabled, int jitEnabled, int largePagesEnabled, int hardwareAESEnabled, int* isRandomxReleased) { enif_rwlock_rlock(statePtr->lockPtr); *isRandomxReleased = statePtr->isRandomxReleased; if (statePtr->isRandomxReleased != 0) { enif_rwlock_runlock(statePtr->lockPtr); return NULL; } randomx_flags flags = RANDOMX_FLAG_DEFAULT; if (fullMemEnabled) { flags |= RANDOMX_FLAG_FULL_MEM; } if (hardwareAESEnabled) { flags |= RANDOMX_FLAG_HARD_AES; } if (jitEnabled) { flags |= RANDOMX_FLAG_JIT; #ifdef RANDOMX_FORCE_SECURE flags |= RANDOMX_FLAG_SECURE; #endif } if (largePagesEnabled) { flags |= RANDOMX_FLAG_LARGE_PAGES; } randomx_vm *vmPtr = randomx_create_vm(flags, statePtr->cachePtr, statePtr->datasetPtr); if (vmPtr == NULL) { enif_rwlock_runlock(statePtr->lockPtr); return NULL; } return vmPtr; } static void destroy_vm(struct state* statePtr, randomx_vm* vmPtr) { randomx_destroy_vm(vmPtr); enif_rwlock_runlock(statePtr->lockPtr); } #endif ================================================ FILE: apps/arweave/c_src/randomx/crc32.h ================================================ #ifndef CRC32_H #define CRC32_H #if defined(__x86_64__) || defined(__i386__) || defined(_M_X64) || defined(_M_IX86) #include #define crc32(a, b) _mm_crc32_u32(a, b) #elif defined(__aarch64__) || defined(__arm__) || defined(_M_ARM64) || defined(_M_ARM) #include #define crc32(a, b) __crc32cw(a, b) #else // TODO make support for soft crc32 #error "Unsupported architecture for CRC32 operations." #endif #endif // CRC32_H ================================================ FILE: apps/arweave/c_src/randomx/feistel_msgsize_key_cipher.cpp ================================================ #include #include "feistel_msgsize_key_cipher.h" // NOTE feistel_encrypt_block/feistel_decrypt_block with less than 2 blocks have no sense void feistel_hash(const unsigned char *in_r, const unsigned char *in_k, unsigned char *out) { SHA256_CTX sha256; SHA256_Init(&sha256); SHA256_Update(&sha256, in_r, 32); SHA256_Update(&sha256, in_k, 32); SHA256_Final(out, &sha256); } // size_t key_len, void feistel_encrypt_block(const unsigned char *in_left, const unsigned char *in_right, const unsigned char *in_key, unsigned char *out_left, unsigned char *out_right) { // size_t round_count = key_len / FEISTEL_BLOCK_LENGTH; // unsigned char temp; unsigned char key_hash[FEISTEL_BLOCK_LENGTH]; unsigned char left[FEISTEL_BLOCK_LENGTH]; unsigned char right[FEISTEL_BLOCK_LENGTH]; const unsigned char *key = in_key; feistel_hash(in_right, key, key_hash); key += FEISTEL_BLOCK_LENGTH; for(int j = 0; j < FEISTEL_BLOCK_LENGTH; j++) { // temp = in_left[j] ^ key_hash[j]; right[j] = in_left[j] ^ key_hash[j]; left[j] = in_right[j]; // right[j] = temp; } // NOTE will be unused by arweave // for (size_t i = 1; i < round_count - 1; i++) { // feistel_hash(right, key, key_hash); // key += FEISTEL_BLOCK_LENGTH; // for(int j = 0; j < FEISTEL_BLOCK_LENGTH; j++) { // temp = left[j] ^ key_hash[j]; // left[j] = right[j]; // right[j] = temp; // } // } feistel_hash(right, key, key_hash); for(int j = 0; j < FEISTEL_BLOCK_LENGTH; j++) { // temp = left[j] ^ key_hash[j]; out_right[j] = left[j] ^ key_hash[j]; out_left[j] = right[j]; // out_right[j] = temp; } } void feistel_decrypt_block(const unsigned char *in_left, const unsigned char *in_right, const unsigned char *in_key, unsigned char *out_left, unsigned char *out_right) { // size_t round_count = key_len / FEISTEL_BLOCK_LENGTH; // unsigned char temp; unsigned char key_hash[FEISTEL_BLOCK_LENGTH]; unsigned char left[FEISTEL_BLOCK_LENGTH]; unsigned char right[FEISTEL_BLOCK_LENGTH]; // const unsigned char *key = in_key + FEISTEL_BLOCK_LENGTH + 2*FEISTEL_BLOCK_LENGTH*(round_count - 1); const unsigned char *key = in_key + FEISTEL_BLOCK_LENGTH; feistel_hash(in_left, key, key_hash); key -= FEISTEL_BLOCK_LENGTH; for(int j = 0; j < FEISTEL_BLOCK_LENGTH; j++) { // temp = in_right[j] ^ key_hash[j]; left[j] = in_right[j] ^ key_hash[j]; right[j] = in_left[j]; // left[j] = temp; } // NOTE will be unused by arweave // for (size_t i = 1; i < round_count - 1; i++) { // feistel_hash(left, key, key_hash); // key -= FEISTEL_BLOCK_LENGTH; // for(int j = 0; j < FEISTEL_BLOCK_LENGTH; j++) { // temp = right[j] ^ key_hash[j]; // right[j] = left[j]; // left[j] = temp; // } // } feistel_hash(left, key, key_hash); for(int j = 0; j < FEISTEL_BLOCK_LENGTH; j++) { // temp = right[j] ^ key_hash[j]; out_left[j] = right[j] ^ key_hash[j]; out_right[j] = left[j]; // out_left[j] = temp; } } // feistel_encrypt accepts padded message with 2*FEISTEL_BLOCK_LENGTH = 64 bytes // in_key_length == plaintext_len // CBC void feistel_encrypt(const unsigned char *plaintext, const size_t plaintext_len, const unsigned char *in_key, unsigned char *ciphertext) { size_t block_count = plaintext_len / (2*FEISTEL_BLOCK_LENGTH); unsigned char feed_key[2*FEISTEL_BLOCK_LENGTH] = {0}; const unsigned char *in = plaintext; unsigned char *out = ciphertext; const unsigned char *key = in_key; feistel_encrypt_block(in, in + FEISTEL_BLOCK_LENGTH, key, out, out + FEISTEL_BLOCK_LENGTH); in += 2*FEISTEL_BLOCK_LENGTH; key += 2*FEISTEL_BLOCK_LENGTH; for(size_t i = 1; i < block_count; i++) { for(int j = 0; j < 2*FEISTEL_BLOCK_LENGTH; j++) { feed_key[j] = key[j] ^ out[j]; } out += 2*FEISTEL_BLOCK_LENGTH; feistel_encrypt_block(in, in + FEISTEL_BLOCK_LENGTH, feed_key, out, out + FEISTEL_BLOCK_LENGTH); in += 2*FEISTEL_BLOCK_LENGTH; key += 2*FEISTEL_BLOCK_LENGTH; } } void feistel_decrypt(const unsigned char *ciphertext, const size_t ciphertext_len, const unsigned char *in_key, unsigned char *plaintext) { size_t block_count = ciphertext_len / (2*FEISTEL_BLOCK_LENGTH); unsigned char feed_key[2*FEISTEL_BLOCK_LENGTH] = {0}; const unsigned char *in = ciphertext + ciphertext_len - 2*FEISTEL_BLOCK_LENGTH; unsigned char *out = plaintext + ciphertext_len - 2*FEISTEL_BLOCK_LENGTH; const unsigned char *key = in_key + ciphertext_len - 2*FEISTEL_BLOCK_LENGTH; for(size_t i = 0; i < block_count-1; i++) { for(int j = 0; j < 2*FEISTEL_BLOCK_LENGTH; j++) { feed_key[j] = key[j] ^ in[j - 2*FEISTEL_BLOCK_LENGTH]; } feistel_decrypt_block(in, in + FEISTEL_BLOCK_LENGTH, feed_key, out, out + FEISTEL_BLOCK_LENGTH); in -= 2*FEISTEL_BLOCK_LENGTH; key -= 2*FEISTEL_BLOCK_LENGTH; out -= 2*FEISTEL_BLOCK_LENGTH; } feistel_decrypt_block(in, in + FEISTEL_BLOCK_LENGTH, key, out, out + FEISTEL_BLOCK_LENGTH); } ================================================ FILE: apps/arweave/c_src/randomx/feistel_msgsize_key_cipher.h ================================================ #ifndef FEISTEL_MSGSIZE_KEY_CIPHER_H #define FEISTEL_MSGSIZE_KEY_CIPHER_H #define FEISTEL_BLOCK_LENGTH 32 #if defined(__cplusplus) extern "C" { #endif void feistel_encrypt(const unsigned char *plaintext, const size_t plaintext_len, const unsigned char *key, unsigned char *ciphertext); void feistel_decrypt(const unsigned char *ciphertext, const size_t ciphertext_len, const unsigned char *key, unsigned char *plaintext); #if defined(__cplusplus) } #endif #endif // FEISTEL_MSGSIZE_KEY_CIPHER_H ================================================ FILE: apps/arweave/c_src/randomx/randomx_long_with_entropy.cpp ================================================ #include #include "randomx_long_with_entropy.h" #include "vm_interpreted.hpp" #include "vm_interpreted_light.hpp" #include "vm_compiled.hpp" #include "vm_compiled_light.hpp" #include "blake2/blake2.h" #include "feistel_msgsize_key_cipher.h" // NOTE. possible optimisation with outputEntropySize // can improve performance for less memcpy (has almost no impact because randomx is too long 99+%) extern "C" { const unsigned char *randomx_calculate_hash_long_with_entropy_get_entropy(randomx_vm *machine, const unsigned char *input, const size_t inputSize, const int randomxProgramCount) { assert(machine != nullptr); assert(inputSize == 0 || input != nullptr); alignas(16) uint64_t tempHash[8]; int blakeResult = randomx_blake2b(tempHash, sizeof(tempHash), input, inputSize, nullptr, 0); assert(blakeResult == 0); machine->initScratchpad(&tempHash); machine->resetRoundingMode(); for (int chain = 0; chain < randomxProgramCount - 1; ++chain) { machine->run(&tempHash); blakeResult = randomx_blake2b(tempHash, sizeof(tempHash), machine->getRegisterFile(), sizeof(randomx::RegisterFile), nullptr, 0); assert(blakeResult == 0); } machine->run(&tempHash); unsigned char output[64]; machine->getFinalResult(output, RANDOMX_HASH_SIZE); return (const unsigned char*)machine->getScratchpad(); } // feistel_encrypt accepts padded message with 2*FEISTEL_BLOCK_LENGTH = 64 bytes RANDOMX_EXPORT void randomx_encrypt_chunk(randomx_vm *machine, const unsigned char *input, const size_t inputSize, const unsigned char *inChunk, const size_t inChunkSize, unsigned char *outChunk, const int randomxProgramCount) { assert(inChunkSize <= RANDOMX_ENTROPY_SIZE); assert(inChunkSize % (2*FEISTEL_BLOCK_LENGTH) == 0); const unsigned char *outputEntropy = randomx_calculate_hash_long_with_entropy_get_entropy(machine, input, inputSize, randomxProgramCount); feistel_encrypt((const unsigned char*)inChunk, inChunkSize, outputEntropy, (unsigned char*)outChunk); } RANDOMX_EXPORT void randomx_decrypt_chunk(randomx_vm *machine, const unsigned char *input, const size_t inputSize, const unsigned char *inChunk, const size_t inChunkSize, unsigned char *outChunk, const int randomxProgramCount) { assert(inChunkSize <= RANDOMX_ENTROPY_SIZE); assert(inChunkSize % (2*FEISTEL_BLOCK_LENGTH) == 0); const unsigned char *outputEntropy = randomx_calculate_hash_long_with_entropy_get_entropy(machine, input, inputSize, randomxProgramCount); feistel_decrypt((const unsigned char*)inChunk, inChunkSize, outputEntropy, (unsigned char*)outChunk); } } ================================================ FILE: apps/arweave/c_src/randomx/randomx_long_with_entropy.h ================================================ #ifndef RANDOMX_LONG_WITH_ENTROPY_H #define RANDOMX_LONG_WITH_ENTROPY_H #include "randomx.h" #define RANDOMX_ENTROPY_SIZE (256*1024) #if defined(__cplusplus) extern "C" { #endif RANDOMX_EXPORT void randomx_encrypt_chunk(randomx_vm *machine, const unsigned char *input, const size_t inputSize, const unsigned char *inChunk, const size_t inChunkSize, unsigned char *outChunk, const int randomxProgramCount); RANDOMX_EXPORT void randomx_decrypt_chunk(randomx_vm *machine, const unsigned char *input, const size_t inputSize, const unsigned char *inChunk, const size_t outChunkSize, unsigned char *outChunk, const int randomxProgramCount); #if defined(__cplusplus) } #endif #endif // RANDOMX_LONG_WITH_ENTROPY_H ================================================ FILE: apps/arweave/c_src/randomx/randomx_squared.cpp ================================================ #include #include #include "crc32.h" #include "randomx_squared.h" #include "feistel_msgsize_key_cipher.h" // imports from randomx #include "vm_compiled.hpp" #include "blake2/blake2.h" extern "C" { void _rsp_mix_entropy_near( const unsigned char *inEntropy, unsigned char *outEntropy, const size_t entropySize ) { // NOTE we can't use _mm_crc32_u64, because it output only final 32-bit result // NOTE commented variant is more readable but unoptimized unsigned int state = ~0; // unsigned int state = 0; const unsigned int *inEntropyPtr = (const unsigned int*)inEntropy; unsigned int *outEntropyPtr = (unsigned int*)outEntropy; for(size_t i=0;iresetRoundingMode(); for (int chain = 0; chain < programCount-1; chain++) { machine->run(tempHash); int blakeResult = randomx_blake2b( tempHash, 64, machine->getRegisterFile(), sizeof(randomx::RegisterFile), nullptr, 0 ); assert(blakeResult == 0); } machine->run(tempHash); int blakeResult = randomx_blake2b( tempHash, 64, machine->getRegisterFile(), sizeof(randomx::RegisterFile), nullptr, 0 ); assert(blakeResult == 0); _rsp_mix_entropy_near( (const unsigned char*)machine->getScratchpad(), (unsigned char*)(void*)machine->getScratchpad(), scratchpadSize); } void _copy_chunk_cross_lane( randomx_vm** inSet, randomx_vm** outSet, size_t srcPos, size_t dstPos, size_t length, size_t scratchpadSize ) { while (length > 0) { int srcLane = (int)(srcPos / scratchpadSize); size_t offsetInSrcLane = srcPos % scratchpadSize; int dstLane = (int)(dstPos / scratchpadSize); size_t offsetInDstLane = dstPos % scratchpadSize; size_t srcLaneRemain = scratchpadSize - offsetInSrcLane; size_t dstLaneRemain = scratchpadSize - offsetInDstLane; size_t chunkSize = length; if (chunkSize > srcLaneRemain) { chunkSize = srcLaneRemain; } if (chunkSize > dstLaneRemain) { chunkSize = dstLaneRemain; } unsigned char* srcSp = (unsigned char*)(void*) inSet[srcLane]->getScratchpad(); unsigned char* dstSp = (unsigned char*)(void*) outSet[dstLane]->getScratchpad(); memcpy(dstSp + offsetInDstLane, srcSp + offsetInSrcLane, chunkSize); srcPos += chunkSize; dstPos += chunkSize; length -= chunkSize; } } void _rsp_mix_entropy_far( randomx_vm** inSet, randomx_vm** outSet, int count, size_t scratchpadSize, size_t jumpSize, size_t blockSize) { size_t totalSize = (size_t)count * scratchpadSize; size_t entropySize = totalSize; size_t numJumps = entropySize / jumpSize; size_t numBlocksPerJump = jumpSize / blockSize; size_t leftover = jumpSize % blockSize; size_t outOffset = 0; for (size_t offset = 0; offset < numBlocksPerJump; ++offset) { for (size_t i = 0; i < numJumps; ++i) { size_t srcPos = i * jumpSize + offset * blockSize; _copy_chunk_cross_lane(inSet, outSet, srcPos, outOffset, blockSize, scratchpadSize); outOffset += blockSize; } } if (leftover > 0) { for (size_t i = 0; i < numJumps; ++i) { size_t srcPos = i * jumpSize + numBlocksPerJump * blockSize; _copy_chunk_cross_lane(inSet, outSet, srcPos, outOffset, leftover, scratchpadSize); outOffset += leftover; } } } int rsp_fused_entropy( randomx_vm** vmList, size_t scratchpadSize, int subChunkCount, int subChunkSize, int laneCount, int rxDepth, int randomxProgramCount, int blockSize, const unsigned char* keyData, size_t keySize, unsigned char* outEntropy ) { struct vm_hash_t { alignas(16) uint64_t tempHash[8]; // 64 bytes }; vm_hash_t* vmHashes = new (std::nothrow) vm_hash_t[laneCount]; if (!vmHashes) { return 0; } // Initialize the scratchaps for each lane for (int i = 0; i < laneCount; i++) { // laneSeed = sha256(<>) // laneSeed should be unique - i.e. now two lanes across all entropies and all // replicas should have the same seed. Current key (as off 2025-01-01) is // <> where entropy index is unique within // a given partition. unsigned char laneSeed[32]; { SHA256_CTX sha256; SHA256_Init(&sha256); SHA256_Update(&sha256, keyData, keySize); unsigned char laneIndex = (unsigned char)i + 1; SHA256_Update(&sha256, &laneIndex, 1); SHA256_Final(laneSeed, &sha256); } int blakeResult = randomx_blake2b( vmHashes[i].tempHash, sizeof(vmHashes[i].tempHash), laneSeed, 32, nullptr, 0 ); if (blakeResult != 0) { delete[] vmHashes; return 0; } vmList[i]->initScratchpad(&vmHashes[i].tempHash); } for (int d = 0; d < rxDepth; d++) { for (int lane = 0; lane < laneCount; lane++) { _rsp_exec_inplace( vmList[lane], vmHashes[lane].tempHash, randomxProgramCount, scratchpadSize); } _rsp_mix_entropy_far(&vmList[0], &vmList[laneCount], laneCount, scratchpadSize, scratchpadSize, blockSize); if (d + 1 < rxDepth) { d++; for (int lane = 0; lane < laneCount; lane++) { _rsp_exec_inplace( vmList[lane+laneCount], vmHashes[lane].tempHash, randomxProgramCount, scratchpadSize); } _rsp_mix_entropy_far(&vmList[laneCount], &vmList[0], laneCount, scratchpadSize, scratchpadSize, blockSize); } } // NOTE still unoptimal. Last copy can be performed from scratchpad to output. // But requires +1 variation (set to buffer) if ((rxDepth % 2) == 0) { unsigned char* outEntropyPtr = outEntropy; for (int i = 0; i < laneCount; i++) { void* sp = (void*)vmList[i]->getScratchpad(); memcpy(outEntropyPtr, sp, scratchpadSize); outEntropyPtr += scratchpadSize; } } else { unsigned char* outEntropyPtr = outEntropy; for (int i = laneCount; i < 2*laneCount; i++) { void* sp = (void*)vmList[i]->getScratchpad(); memcpy(outEntropyPtr, sp, scratchpadSize); outEntropyPtr += scratchpadSize; } } delete[] vmHashes; return 1; } // TODO optimized packing_apply_to_subchunk (NIF only uses slice) } ================================================ FILE: apps/arweave/c_src/randomx/randomx_squared.h ================================================ #ifndef RANDOMX_SQUARED_H #define RANDOMX_SQUARED_H #include "randomx.h" #if defined(__cplusplus) extern "C" { #endif RANDOMX_EXPORT int rsp_fused_entropy( randomx_vm** vmList, size_t scratchpadSize, int subChunkCount, int subChunkSize, int laneCount, int rxDepth, int randomxProgramCount, int blockSize, const unsigned char* keyData, size_t keySize, unsigned char* outEntropy // We'll pass in a pointer for final scratchpad data ); // TODO optimized packing_apply_to_subchunk (NIF only uses slice) #if defined(__cplusplus) } #endif #endif // RANDOMX_SQUARED_H ================================================ FILE: apps/arweave/c_src/randomx/rx4096/ar_rx4096_nif.c ================================================ #include #include #include #include "../randomx_long_with_entropy.h" #include "../feistel_msgsize_key_cipher.h" #include "../ar_randomx_impl.h" const int PACKING_KEY_SIZE = 32; const int MAX_CHUNK_SIZE = 256*1024; static int rx4096_load(ErlNifEnv* envPtr, void** priv, ERL_NIF_TERM info); static ERL_NIF_TERM rx4096_info_nif(ErlNifEnv* envPtr, int argc, const ERL_NIF_TERM argv[]); static ERL_NIF_TERM rx4096_init_nif(ErlNifEnv* envPtr, int argc, const ERL_NIF_TERM argv[]); static ERL_NIF_TERM rx4096_hash_nif(ErlNifEnv* envPtr, int argc, const ERL_NIF_TERM argv[]); static ERL_NIF_TERM rx4096_encrypt_composite_chunk_nif( ErlNifEnv* envPtr, int argc, const ERL_NIF_TERM argv[] ); static ERL_NIF_TERM rx4096_decrypt_composite_chunk_nif( ErlNifEnv* envPtr, int argc, const ERL_NIF_TERM argv[] ); static ERL_NIF_TERM rx4096_decrypt_composite_sub_chunk_nif( ErlNifEnv* envPtr, int argc, const ERL_NIF_TERM argv[] ); static ERL_NIF_TERM rx4096_reencrypt_composite_chunk_nif( ErlNifEnv* envPtr, int argc, const ERL_NIF_TERM argv[] ); static int rx4096_load(ErlNifEnv* envPtr, void** priv, ERL_NIF_TERM info) { return load(envPtr, priv, info); } static ERL_NIF_TERM rx4096_info_nif(ErlNifEnv* envPtr, int argc, const ERL_NIF_TERM argv[]) { return info_nif("rx4096", envPtr, argc, argv); } static ERL_NIF_TERM rx4096_init_nif(ErlNifEnv* envPtr, int argc, const ERL_NIF_TERM argv[]) { return init_nif(envPtr, argc, argv); } static ERL_NIF_TERM rx4096_hash_nif(ErlNifEnv* envPtr, int argc, const ERL_NIF_TERM argv[]) { return hash_nif(envPtr, argc, argv); } static ERL_NIF_TERM encrypt_composite_chunk(ErlNifEnv* envPtr, randomx_vm *vmPtr, ErlNifBinary *inputDataPtr, ErlNifBinary *inputChunkPtr, const int subChunkCount, const int iterations, const int randomxRoundCount, const int jitEnabled, const int largePagesEnabled, const int hardwareAESEnabled) { unsigned char *paddedChunk = (unsigned char*)malloc(MAX_CHUNK_SIZE); if (inputChunkPtr->size == MAX_CHUNK_SIZE) { memcpy(paddedChunk, inputChunkPtr->data, inputChunkPtr->size); } else { memset(paddedChunk, 0, MAX_CHUNK_SIZE); memcpy(paddedChunk, inputChunkPtr->data, inputChunkPtr->size); } ERL_NIF_TERM encryptedChunkTerm; unsigned char* encryptedChunk = enif_make_new_binary(envPtr, MAX_CHUNK_SIZE, &encryptedChunkTerm); // MAX_CHUNK_SIZE / subChunkCount is a multiple of 64 so all sub-chunks // are of the same size. uint32_t subChunkSize = MAX_CHUNK_SIZE / subChunkCount; uint32_t offset = 0; unsigned char key[PACKING_KEY_SIZE]; // Encrypt each sub-chunk independently and then concatenate the encrypted sub-chunks // to yield encrypted composite chunk. for (int i = 0; i < subChunkCount; i++) { unsigned char* subChunk = paddedChunk + offset; unsigned char* encryptedSubChunk = (unsigned char*)malloc(subChunkSize); // 3 bytes is sufficient to represent offsets up to at most MAX_CHUNK_SIZE. int offsetByteSize = 3; unsigned char offsetBytes[offsetByteSize]; // Byte string representation of the sub-chunk start offset: i * subChunkSize. for (int k = 0; k < offsetByteSize; k++) { offsetBytes[k] = ((offset + subChunkSize) >> (8 * (offsetByteSize - 1 - k))) & 0xFF; } // Sub-chunk encryption key is the SHA256 hash of the concatenated // input data and the sub-chunk start offset. SHA256_CTX sha256; SHA256_Init(&sha256); SHA256_Update(&sha256, inputDataPtr->data, inputDataPtr->size); SHA256_Update(&sha256, offsetBytes, offsetByteSize); SHA256_Final(key, &sha256); // Sequentially encrypt each sub-chunk 'iterations' times. for (int j = 0; j < iterations; j++) { randomx_encrypt_chunk( vmPtr, key, PACKING_KEY_SIZE, subChunk, subChunkSize, encryptedSubChunk, randomxRoundCount); if (j < iterations - 1) { memcpy(subChunk, encryptedSubChunk, subChunkSize); } } memcpy(encryptedChunk + offset, encryptedSubChunk, subChunkSize); free(encryptedSubChunk); offset += subChunkSize; } free(paddedChunk); return encryptedChunkTerm; } static ERL_NIF_TERM decrypt_composite_chunk(ErlNifEnv* envPtr, randomx_vm *vmPtr, ErlNifBinary *inputDataPtr, ErlNifBinary *inputChunkPtr, const int outChunkLen, const int subChunkCount, const int iterations, const int randomxRoundCount, const int jitEnabled, const int largePagesEnabled, const int hardwareAESEnabled) { unsigned char *chunk = (unsigned char*)malloc(MAX_CHUNK_SIZE); memcpy(chunk, inputChunkPtr->data, inputChunkPtr->size); ERL_NIF_TERM decryptedChunkTerm; unsigned char* decryptedChunk = enif_make_new_binary(envPtr, outChunkLen, &decryptedChunkTerm); unsigned char* decryptedSubChunk; // outChunkLen / subChunkCount is a multiple of 64 so all sub-chunks // are of the same size. uint32_t subChunkSize = outChunkLen / subChunkCount; uint32_t offset = 0; unsigned char key[PACKING_KEY_SIZE]; // Decrypt each sub-chunk independently and then concatenate the decrypted sub-chunks // to yield encrypted composite chunk. for (int i = 0; i < subChunkCount; i++) { unsigned char* subChunk = chunk + offset; decryptedSubChunk = (unsigned char*)malloc(subChunkSize); // 3 bytes is sufficient to represent offsets up to at most MAX_CHUNK_SIZE. int offsetByteSize = 3; unsigned char offsetBytes[offsetByteSize]; // Byte string representation of the sub-chunk start offset: i * subChunkSize. for (int k = 0; k < offsetByteSize; k++) { offsetBytes[k] = ((offset + subChunkSize) >> (8 * (offsetByteSize - 1 - k))) & 0xFF; } // Sub-chunk encryption key is the SHA256 hash of the concatenated // input data and the sub-chunk start offset. SHA256_CTX sha256; SHA256_Init(&sha256); SHA256_Update(&sha256, inputDataPtr->data, inputDataPtr->size); SHA256_Update(&sha256, offsetBytes, offsetByteSize); SHA256_Final(key, &sha256); // Sequentially decrypt each sub-chunk 'iterations' times. for (int j = 0; j < iterations; j++) { randomx_decrypt_chunk( vmPtr, key, PACKING_KEY_SIZE, subChunk, subChunkSize, decryptedSubChunk, randomxRoundCount); if (j < iterations - 1) { memcpy(subChunk, decryptedSubChunk, subChunkSize); } } memcpy(decryptedChunk + offset, decryptedSubChunk, subChunkSize); free(decryptedSubChunk); offset += subChunkSize; } free(chunk); return decryptedChunkTerm; } static ERL_NIF_TERM rx4096_encrypt_composite_chunk_nif( ErlNifEnv* envPtr, int argc, const ERL_NIF_TERM argv[] ) { // RandomX rounds per sub-chunk. int randomxRoundCount; // RandomX iterations (randomxRoundCount each) per sub-chunk. int iterations; // The number of sub-chunks in the chunk. int subChunkCount; int jitEnabled, largePagesEnabled, hardwareAESEnabled; struct state* statePtr; ErlNifBinary inputData; ErlNifBinary inputChunk; if (argc != 9) { return enif_make_badarg(envPtr); } if (!enif_get_resource(envPtr, argv[0], stateType, (void**) &statePtr)) { return error_tuple(envPtr, "failed to read state"); } if (!enif_inspect_binary(envPtr, argv[1], &inputData)) { return enif_make_badarg(envPtr); } if (!enif_inspect_binary(envPtr, argv[2], &inputChunk) || inputChunk.size == 0 || inputChunk.size > MAX_CHUNK_SIZE) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[3], &jitEnabled)) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[4], &largePagesEnabled)) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[5], &hardwareAESEnabled)) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[6], &randomxRoundCount)) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[7], &iterations) || iterations < 1) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[8], &subChunkCount) || subChunkCount < 1 || MAX_CHUNK_SIZE % subChunkCount != 0 || (MAX_CHUNK_SIZE / subChunkCount) % 64 != 0 || subChunkCount > (MAX_CHUNK_SIZE / 64)) { return enif_make_badarg(envPtr); } int isRandomxReleased; randomx_vm *vmPtr = create_vm(statePtr, (statePtr->mode == HASHING_MODE_FAST), jitEnabled, largePagesEnabled, hardwareAESEnabled, &isRandomxReleased); if (vmPtr == NULL) { if (isRandomxReleased != 0) { return error_tuple(envPtr, "state has been released"); } return error_tuple(envPtr, "randomx_create_vm failed"); } ERL_NIF_TERM encryptedChunkTerm = encrypt_composite_chunk(envPtr, vmPtr, &inputData, &inputChunk, subChunkCount, iterations, randomxRoundCount, jitEnabled, largePagesEnabled, hardwareAESEnabled); destroy_vm(statePtr, vmPtr); return ok_tuple(envPtr, encryptedChunkTerm); } static ERL_NIF_TERM rx4096_decrypt_composite_chunk_nif( ErlNifEnv* envPtr, int argc, const ERL_NIF_TERM argv[] ) { int outChunkLen; // RandomX rounds per sub-chunk. int randomxRoundCount; // RandomX iterations (randomxRoundCount each) per sub-chunk. int iterations; // The number of sub-chunks in the chunk. int subChunkCount; int jitEnabled, largePagesEnabled, hardwareAESEnabled; struct state* statePtr; ErlNifBinary inputData; ErlNifBinary inputChunk; if (argc != 10) { return enif_make_badarg(envPtr); } if (!enif_get_resource(envPtr, argv[0], stateType, (void**) &statePtr)) { return error_tuple(envPtr, "failed to read state"); } if (!enif_inspect_binary(envPtr, argv[1], &inputData)) { return enif_make_badarg(envPtr); } if (!enif_inspect_binary(envPtr, argv[2], &inputChunk) || inputChunk.size == 0 || inputChunk.size > MAX_CHUNK_SIZE) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[3], &outChunkLen) || outChunkLen > MAX_CHUNK_SIZE || outChunkLen < 64 || inputChunk.size != outChunkLen) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[4], &jitEnabled)) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[5], &largePagesEnabled)) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[6], &hardwareAESEnabled)) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[7], &randomxRoundCount)) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[8], &iterations) || iterations < 1) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[9], &subChunkCount) || subChunkCount < 1 || outChunkLen % subChunkCount != 0 || (outChunkLen / subChunkCount) % 64 != 0 || subChunkCount > (outChunkLen / 64)) { return enif_make_badarg(envPtr); } int isRandomxReleased; randomx_vm *vmPtr = create_vm(statePtr, (statePtr->mode == HASHING_MODE_FAST), jitEnabled, largePagesEnabled, hardwareAESEnabled, &isRandomxReleased); if (vmPtr == NULL) { if (isRandomxReleased != 0) { return error_tuple(envPtr, "state has been released"); } return error_tuple(envPtr, "randomx_create_vm failed"); } ERL_NIF_TERM decryptedChunkTerm = decrypt_composite_chunk(envPtr, vmPtr, &inputData, &inputChunk, outChunkLen, subChunkCount, iterations, randomxRoundCount, jitEnabled, largePagesEnabled, hardwareAESEnabled); destroy_vm(statePtr, vmPtr); return ok_tuple(envPtr, decryptedChunkTerm); } static ERL_NIF_TERM rx4096_decrypt_composite_sub_chunk_nif( ErlNifEnv* envPtr, int argc, const ERL_NIF_TERM argv[] ) { int outChunkLen; // RandomX rounds per sub-chunk. int randomxRoundCount; // RandomX iterations (randomxRoundCount each) per sub-chunk. int iterations; // The relative sub-chunk start offset. We add the chunk size to it, encode the result, // add it to the base packing key, and SHA256-hash it to get the packing key. uint32_t offset; int jitEnabled, largePagesEnabled, hardwareAESEnabled; struct state* statePtr; ErlNifBinary inputData; ErlNifBinary inputChunk; if (argc != 10) { return enif_make_badarg(envPtr); } if (!enif_get_resource(envPtr, argv[0], stateType, (void**) &statePtr)) { return error_tuple(envPtr, "failed to read state"); } if (!enif_inspect_binary(envPtr, argv[1], &inputData)) { return enif_make_badarg(envPtr); } if (!enif_inspect_binary(envPtr, argv[2], &inputChunk)) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[3], &outChunkLen) || outChunkLen > MAX_CHUNK_SIZE || outChunkLen < 64 || inputChunk.size != outChunkLen ) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[4], &jitEnabled)) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[5], &largePagesEnabled)) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[6], &hardwareAESEnabled)) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[7], &randomxRoundCount)) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[8], &iterations) || iterations < 1) { return enif_make_badarg(envPtr); } if (!enif_get_uint(envPtr, argv[9], &offset) || offset < 0 || offset > MAX_CHUNK_SIZE) { return enif_make_badarg(envPtr); } int isRandomxReleased; ERL_NIF_TERM decryptedSubChunkTerm; unsigned char* decryptedSubChunk = enif_make_new_binary(envPtr, outChunkLen, &decryptedSubChunkTerm); uint32_t subChunkSize = outChunkLen; unsigned char key[PACKING_KEY_SIZE]; randomx_vm *vmPtr = create_vm(statePtr, (statePtr->mode == HASHING_MODE_FAST), jitEnabled, largePagesEnabled, hardwareAESEnabled, &isRandomxReleased); if (vmPtr == NULL) { if (isRandomxReleased != 0) { return error_tuple(envPtr, "state has been released"); } return error_tuple(envPtr, "randomx_create_vm failed"); } unsigned char* subChunk = (unsigned char*)malloc(inputChunk.size); memcpy(subChunk, inputChunk.data, inputChunk.size); // 3 bytes is sufficient to represent offsets up to at most MAX_CHUNK_SIZE. int offsetByteSize = 3; unsigned char offsetBytes[offsetByteSize]; for (int k = 0; k < offsetByteSize; k++) { offsetBytes[k] = ((offset + subChunkSize) >> (8 * (offsetByteSize - 1 - k))) & 0xFF; } // Sub-chunk encryption key is the SHA256 hash of the concatenated // input data and the sub-chunk start offset. SHA256_CTX sha256; SHA256_Init(&sha256); SHA256_Update(&sha256, inputData.data, inputData.size); SHA256_Update(&sha256, offsetBytes, offsetByteSize); SHA256_Final(key, &sha256); // Sequentially decrypt each sub-chunk 'iterations' times. for (int j = 0; j < iterations; j++) { randomx_decrypt_chunk(vmPtr, key, PACKING_KEY_SIZE, subChunk, subChunkSize, decryptedSubChunk, randomxRoundCount); if (j < iterations - 1) { memcpy(subChunk, decryptedSubChunk, subChunkSize); } } free(subChunk); destroy_vm(statePtr, vmPtr); return ok_tuple(envPtr, decryptedSubChunkTerm); } static ERL_NIF_TERM rx4096_reencrypt_composite_chunk_nif( ErlNifEnv* envPtr, int argc, const ERL_NIF_TERM argv[] ) { int decryptRandomxRoundCount, encryptRandomxRoundCount; int jitEnabled, largePagesEnabled, hardwareAESEnabled; int decryptSubChunkCount, encryptSubChunkCount, decryptIterations, encryptIterations; struct state* statePtr; ErlNifBinary decryptKey; ErlNifBinary encryptKey; ErlNifBinary inputChunk; ERL_NIF_TERM inputChunkTerm; if (argc != 13) { return enif_make_badarg(envPtr); } if (!enif_get_resource(envPtr, argv[0], stateType, (void**) &statePtr)) { return error_tuple(envPtr, "failed to read state"); } if (!enif_inspect_binary(envPtr, argv[1], &decryptKey)) { return enif_make_badarg(envPtr); } if (!enif_inspect_binary(envPtr, argv[2], &encryptKey)) { return enif_make_badarg(envPtr); } if (!enif_inspect_binary(envPtr, argv[3], &inputChunk) || inputChunk.size != MAX_CHUNK_SIZE) { return enif_make_badarg(envPtr); } inputChunkTerm = argv[3]; if (!enif_get_int(envPtr, argv[4], &jitEnabled)) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[5], &largePagesEnabled)) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[6], &hardwareAESEnabled)) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[7], &decryptRandomxRoundCount)) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[8], &encryptRandomxRoundCount)) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[9], &decryptIterations) || decryptIterations < 1) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[10], &encryptIterations) || encryptIterations < 1) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[11], &decryptSubChunkCount) || decryptSubChunkCount < 1 || MAX_CHUNK_SIZE % decryptSubChunkCount != 0 || (MAX_CHUNK_SIZE / decryptSubChunkCount) % 64 != 0 || decryptSubChunkCount > (MAX_CHUNK_SIZE / 64)) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[12], &encryptSubChunkCount) || encryptSubChunkCount < 1 || MAX_CHUNK_SIZE % encryptSubChunkCount != 0 || (MAX_CHUNK_SIZE / encryptSubChunkCount) % 64 != 0 || encryptSubChunkCount > (MAX_CHUNK_SIZE / 64)) { return enif_make_badarg(envPtr); } int isRandomxReleased; randomx_vm *vmPtr = create_vm(statePtr, (statePtr->mode == HASHING_MODE_FAST), jitEnabled, largePagesEnabled, hardwareAESEnabled, &isRandomxReleased); if (vmPtr == NULL) { if (isRandomxReleased != 0) { return error_tuple(envPtr, "state has been released"); } return error_tuple(envPtr, "randomx_create_vm failed"); } int keysMatch = 0; if (decryptKey.size == encryptKey.size) { if (memcmp(decryptKey.data, encryptKey.data, decryptKey.size) == 0) { keysMatch = 1; } } int encryptionsMatch = 0; if (keysMatch && (decryptSubChunkCount == encryptSubChunkCount) && (decryptRandomxRoundCount == encryptRandomxRoundCount)) { encryptionsMatch = 1; } if (encryptionsMatch && (encryptIterations <= decryptIterations)) { destroy_vm(statePtr, vmPtr); return enif_make_badarg(envPtr); } unsigned char decryptedChunk[MAX_CHUNK_SIZE]; ErlNifBinary *decryptedChunkBinPtr; ERL_NIF_TERM decryptedChunkTerm; if (!encryptionsMatch) { decryptedChunkTerm = decrypt_composite_chunk(envPtr, vmPtr, &decryptKey, &inputChunk, inputChunk.size, decryptSubChunkCount, decryptIterations, decryptRandomxRoundCount, jitEnabled, largePagesEnabled, hardwareAESEnabled); ErlNifBinary decryptedChunkBin; if (!enif_inspect_binary(envPtr, decryptedChunkTerm, &decryptedChunkBin)) { destroy_vm(statePtr, vmPtr); return enif_make_badarg(envPtr); } decryptedChunkBinPtr = &decryptedChunkBin; } else { decryptedChunkBinPtr = &inputChunk; decryptedChunkTerm = inputChunkTerm; } int iterations = encryptIterations; if (encryptionsMatch) { iterations = encryptIterations - decryptIterations; } ERL_NIF_TERM reencryptedChunkTerm = encrypt_composite_chunk(envPtr, vmPtr, &encryptKey, decryptedChunkBinPtr, encryptSubChunkCount, iterations, encryptRandomxRoundCount, jitEnabled, largePagesEnabled, hardwareAESEnabled); destroy_vm(statePtr, vmPtr); return ok_tuple2(envPtr, reencryptedChunkTerm, decryptedChunkTerm); } static ErlNifFunc rx4096_funcs[] = { {"rx4096_info_nif", 1, rx4096_info_nif, ERL_NIF_DIRTY_JOB_CPU_BOUND}, {"rx4096_init_nif", 5, rx4096_init_nif, ERL_NIF_DIRTY_JOB_CPU_BOUND}, {"rx4096_hash_nif", 5, rx4096_hash_nif, ERL_NIF_DIRTY_JOB_CPU_BOUND}, {"rx4096_encrypt_composite_chunk_nif", 9, rx4096_encrypt_composite_chunk_nif, ERL_NIF_DIRTY_JOB_CPU_BOUND}, {"rx4096_decrypt_composite_chunk_nif", 10, rx4096_decrypt_composite_chunk_nif, ERL_NIF_DIRTY_JOB_CPU_BOUND}, {"rx4096_decrypt_composite_sub_chunk_nif", 10, rx4096_decrypt_composite_sub_chunk_nif, ERL_NIF_DIRTY_JOB_CPU_BOUND}, {"rx4096_reencrypt_composite_chunk_nif", 13, rx4096_reencrypt_composite_chunk_nif, ERL_NIF_DIRTY_JOB_CPU_BOUND} }; ERL_NIF_INIT(ar_rx4096_nif, rx4096_funcs, rx4096_load, NULL, NULL, NULL); ================================================ FILE: apps/arweave/c_src/randomx/rx512/ar_rx512_nif.c ================================================ #include #include #include #include "../randomx_long_with_entropy.h" #include "../feistel_msgsize_key_cipher.h" #include "../ar_randomx_impl.h" const int MAX_CHUNK_SIZE = 256*1024; static int rx512_load(ErlNifEnv* envPtr, void** priv, ERL_NIF_TERM info); static ERL_NIF_TERM rx512_info_nif(ErlNifEnv* envPtr, int argc, const ERL_NIF_TERM argv[]); static ERL_NIF_TERM rx512_init_nif(ErlNifEnv* envPtr, int argc, const ERL_NIF_TERM argv[]); static ERL_NIF_TERM rx512_hash_nif(ErlNifEnv* envPtr, int argc, const ERL_NIF_TERM argv[]); static ERL_NIF_TERM rx512_encrypt_chunk_nif(ErlNifEnv* envPtr, int argc, const ERL_NIF_TERM argv[]); static ERL_NIF_TERM rx512_decrypt_chunk_nif(ErlNifEnv* envPtr, int argc, const ERL_NIF_TERM argv[]); static ERL_NIF_TERM rx512_reencrypt_chunk_nif(ErlNifEnv* envPtr, int argc, const ERL_NIF_TERM argv[]); static int rx512_load(ErlNifEnv* envPtr, void** priv, ERL_NIF_TERM info) { return load(envPtr, priv, info); } static ERL_NIF_TERM rx512_info_nif(ErlNifEnv* envPtr, int argc, const ERL_NIF_TERM argv[]) { return info_nif("rx512", envPtr, argc, argv); } static ERL_NIF_TERM rx512_init_nif(ErlNifEnv* envPtr, int argc, const ERL_NIF_TERM argv[]) { return init_nif(envPtr, argc, argv); } static ERL_NIF_TERM rx512_hash_nif(ErlNifEnv* envPtr, int argc, const ERL_NIF_TERM argv[]) { return hash_nif(envPtr, argc, argv); } static ERL_NIF_TERM decrypt_chunk(ErlNifEnv* envPtr, randomx_vm *machine, const unsigned char *input, const size_t inputSize, const unsigned char *inChunk, const size_t inChunkSize, unsigned char* outChunk, const size_t outChunkSize, const int randomxProgramCount) { randomx_decrypt_chunk( machine, input, inputSize, inChunk, inChunkSize, outChunk, randomxProgramCount); return make_output_binary(envPtr, outChunk, outChunkSize); } static ERL_NIF_TERM encrypt_chunk(ErlNifEnv* envPtr, randomx_vm *machine, const unsigned char *input, const size_t inputSize, const unsigned char *inChunk, const size_t inChunkSize, const int randomxProgramCount) { ERL_NIF_TERM encryptedChunkTerm; unsigned char* encryptedChunk = enif_make_new_binary( envPtr, MAX_CHUNK_SIZE, &encryptedChunkTerm); if (inChunkSize < MAX_CHUNK_SIZE) { unsigned char *paddedInChunk = (unsigned char*)malloc(MAX_CHUNK_SIZE); memset(paddedInChunk, 0, MAX_CHUNK_SIZE); memcpy(paddedInChunk, inChunk, inChunkSize); randomx_encrypt_chunk( machine, input, inputSize, paddedInChunk, MAX_CHUNK_SIZE, encryptedChunk, randomxProgramCount); free(paddedInChunk); } else { randomx_encrypt_chunk( machine, input, inputSize, inChunk, inChunkSize, encryptedChunk, randomxProgramCount); } return encryptedChunkTerm; } static ERL_NIF_TERM rx512_encrypt_chunk_nif( ErlNifEnv* envPtr, int argc, const ERL_NIF_TERM argv[] ) { int randomxRoundCount, jitEnabled, largePagesEnabled, hardwareAESEnabled; struct state* statePtr; ErlNifBinary inputData; ErlNifBinary inputChunk; if (argc != 7) { return enif_make_badarg(envPtr); } if (!enif_get_resource(envPtr, argv[0], stateType, (void**) &statePtr)) { return error_tuple(envPtr, "failed to read state"); } if (!enif_inspect_binary(envPtr, argv[1], &inputData)) { return enif_make_badarg(envPtr); } if (!enif_inspect_binary(envPtr, argv[2], &inputChunk) || inputChunk.size == 0 || inputChunk.size > MAX_CHUNK_SIZE) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[3], &randomxRoundCount)) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[4], &jitEnabled)) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[5], &largePagesEnabled)) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[6], &hardwareAESEnabled)) { return enif_make_badarg(envPtr); } int isRandomxReleased; randomx_vm *vmPtr = create_vm(statePtr, (statePtr->mode == HASHING_MODE_FAST), jitEnabled, largePagesEnabled, hardwareAESEnabled, &isRandomxReleased); if (vmPtr == NULL) { if (isRandomxReleased != 0) { return error_tuple(envPtr, "state has been released"); } return error_tuple(envPtr, "randomx_create_vm failed"); } ERL_NIF_TERM outChunkTerm = encrypt_chunk(envPtr, vmPtr, inputData.data, inputData.size, inputChunk.data, inputChunk.size, randomxRoundCount); destroy_vm(statePtr, vmPtr); return ok_tuple(envPtr, outChunkTerm); } static ERL_NIF_TERM rx512_decrypt_chunk_nif( ErlNifEnv* envPtr, int argc, const ERL_NIF_TERM argv[] ) { int outChunkLen, randomxRoundCount, jitEnabled, largePagesEnabled, hardwareAESEnabled; struct state* statePtr; ErlNifBinary inputData; ErlNifBinary inputChunk; if (argc != 8) { return enif_make_badarg(envPtr); } if (!enif_get_resource(envPtr, argv[0], stateType, (void**) &statePtr)) { return error_tuple(envPtr, "failed to read state"); } if (!enif_inspect_binary(envPtr, argv[1], &inputData)) { return enif_make_badarg(envPtr); } if (!enif_inspect_binary(envPtr, argv[2], &inputChunk) || inputChunk.size == 0 || inputChunk.size > MAX_CHUNK_SIZE) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[3], &outChunkLen)) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[4], &randomxRoundCount)) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[5], &jitEnabled)) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[6], &largePagesEnabled)) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[7], &hardwareAESEnabled)) { return enif_make_badarg(envPtr); } int isRandomxReleased; randomx_vm *vmPtr = create_vm(statePtr, (statePtr->mode == HASHING_MODE_FAST), jitEnabled, largePagesEnabled, hardwareAESEnabled, &isRandomxReleased); if (vmPtr == NULL) { if (isRandomxReleased != 0) { return error_tuple(envPtr, "state has been released"); } return error_tuple(envPtr, "randomx_create_vm failed"); } // NOTE. Because randomx_decrypt_chunk will unpack padding too, decrypt always uses the // full 256KB chunk size. We'll then truncate the output to the correct feistel-padded // outChunkSize. unsigned char outChunk[MAX_CHUNK_SIZE]; ERL_NIF_TERM decryptedChunkTerm = decrypt_chunk(envPtr, vmPtr, inputData.data, inputData.size, inputChunk.data, inputChunk.size, outChunk, outChunkLen, randomxRoundCount); destroy_vm(statePtr, vmPtr); return ok_tuple(envPtr, decryptedChunkTerm); } static ERL_NIF_TERM rx512_reencrypt_chunk_nif( ErlNifEnv* envPtr, int argc, const ERL_NIF_TERM argv[] ) { int chunkSize, decryptRandomxRoundCount, encryptRandomxRoundCount; int jitEnabled, largePagesEnabled, hardwareAESEnabled; struct state* statePtr; ErlNifBinary decryptKey; ErlNifBinary encryptKey; ErlNifBinary inputChunk; if (argc != 10) { return enif_make_badarg(envPtr); } if (!enif_get_resource(envPtr, argv[0], stateType, (void**) &statePtr)) { return error_tuple(envPtr, "failed to read state"); } if (!enif_inspect_binary(envPtr, argv[1], &decryptKey)) { return enif_make_badarg(envPtr); } if (!enif_inspect_binary(envPtr, argv[2], &encryptKey)) { return enif_make_badarg(envPtr); } if (!enif_inspect_binary(envPtr, argv[3], &inputChunk) || inputChunk.size == 0) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[4], &chunkSize) || chunkSize == 0 || chunkSize > MAX_CHUNK_SIZE) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[5], &decryptRandomxRoundCount)) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[6], &encryptRandomxRoundCount)) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[7], &jitEnabled)) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[8], &largePagesEnabled)) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[9], &hardwareAESEnabled)) { return enif_make_badarg(envPtr); } int isRandomxReleased; randomx_vm *vmPtr = create_vm(statePtr, (statePtr->mode == HASHING_MODE_FAST), jitEnabled, largePagesEnabled, hardwareAESEnabled, &isRandomxReleased); if (vmPtr == NULL) { if (isRandomxReleased != 0) { return error_tuple(envPtr, "state has been released"); } return error_tuple(envPtr, "randomx_create_vm failed"); } // NOTE. Because randomx_decrypt_chunk will unpack padding too, decrypt always uses the // full 256KB chunk size. We'll then truncate the output to the correct feistel-padded // outChunkSize. unsigned char decryptedChunk[MAX_CHUNK_SIZE]; ERL_NIF_TERM decryptedChunkTerm = decrypt_chunk(envPtr, vmPtr, decryptKey.data, decryptKey.size, inputChunk.data, inputChunk.size, decryptedChunk, chunkSize, decryptRandomxRoundCount); ERL_NIF_TERM reencryptedChunkTerm = encrypt_chunk(envPtr, vmPtr, encryptKey.data, encryptKey.size, decryptedChunk, chunkSize, encryptRandomxRoundCount); destroy_vm(statePtr, vmPtr); return ok_tuple2(envPtr, reencryptedChunkTerm, decryptedChunkTerm); } static ErlNifFunc rx512_funcs[] = { {"rx512_info_nif", 1, rx512_info_nif, ERL_NIF_DIRTY_JOB_CPU_BOUND}, {"rx512_init_nif", 5, rx512_init_nif, ERL_NIF_DIRTY_JOB_CPU_BOUND}, {"rx512_hash_nif", 5, rx512_hash_nif, ERL_NIF_DIRTY_JOB_CPU_BOUND}, {"rx512_encrypt_chunk_nif", 7, rx512_encrypt_chunk_nif, ERL_NIF_DIRTY_JOB_CPU_BOUND}, {"rx512_decrypt_chunk_nif", 8, rx512_decrypt_chunk_nif, ERL_NIF_DIRTY_JOB_CPU_BOUND}, {"rx512_reencrypt_chunk_nif", 10, rx512_reencrypt_chunk_nif, ERL_NIF_DIRTY_JOB_CPU_BOUND} }; ERL_NIF_INIT(ar_rx512_nif, rx512_funcs, rx512_load, NULL, NULL, NULL); ================================================ FILE: apps/arweave/c_src/randomx/rxsquared/ar_rxsquared_nif.c ================================================ #include #include #include #include "../randomx_long_with_entropy.h" #include "../feistel_msgsize_key_cipher.h" #include "../randomx_squared.h" #include "../ar_randomx_impl.h" const int PACKING_KEY_SIZE = 32; const int MAX_CHUNK_SIZE = 256*1024; static int rxsquared_load(ErlNifEnv* envPtr, void** priv, ERL_NIF_TERM info); static ERL_NIF_TERM rxsquared_info_nif(ErlNifEnv* envPtr, int argc, const ERL_NIF_TERM argv[]); static ERL_NIF_TERM rxsquared_init_nif(ErlNifEnv* envPtr, int argc, const ERL_NIF_TERM argv[]); static ERL_NIF_TERM rxsquared_hash_nif(ErlNifEnv* envPtr, int argc, const ERL_NIF_TERM argv[]); static int rxsquared_load(ErlNifEnv* envPtr, void** priv, ERL_NIF_TERM info) { return load(envPtr, priv, info); } static ERL_NIF_TERM rxsquared_info_nif(ErlNifEnv* envPtr, int argc, const ERL_NIF_TERM argv[]) { return info_nif("rxsquared", envPtr, argc, argv); } static ERL_NIF_TERM rxsquared_init_nif(ErlNifEnv* envPtr, int argc, const ERL_NIF_TERM argv[]) { return init_nif(envPtr, argc, argv); } static ERL_NIF_TERM rxsquared_hash_nif(ErlNifEnv* envPtr, int argc, const ERL_NIF_TERM argv[]) { return hash_nif(envPtr, argc, argv); } static ERL_NIF_TERM rsp_feistel_encrypt_nif( ErlNifEnv* envPtr, int argc, const ERL_NIF_TERM argv[]) { ErlNifBinary inMsgBin; ErlNifBinary inKeyBin; ERL_NIF_TERM outMsgTerm; unsigned char* outMsgData; if (argc != 2) { return enif_make_badarg(envPtr); } if (!enif_inspect_binary(envPtr, argv[0], &inMsgBin)) { return enif_make_badarg(envPtr); } if (!enif_inspect_binary(envPtr, argv[1], &inKeyBin)) { return enif_make_badarg(envPtr); } size_t msgSize = inMsgBin.size; if (inKeyBin.size != msgSize) { return enif_make_badarg(envPtr); } if (msgSize % 64 != 0) { return enif_make_badarg(envPtr); } outMsgData = enif_make_new_binary(envPtr, msgSize, &outMsgTerm); if (outMsgData == NULL) { return enif_make_badarg(envPtr); } feistel_encrypt(inMsgBin.data, msgSize, inKeyBin.data, outMsgData); return ok_tuple(envPtr, outMsgTerm); } static ERL_NIF_TERM rsp_feistel_decrypt_nif( ErlNifEnv* envPtr, int argc, const ERL_NIF_TERM argv[]) { ErlNifBinary inMsgBin; ErlNifBinary inKeyBin; ERL_NIF_TERM outMsgTerm; unsigned char* outMsgData; if (argc != 2) { return enif_make_badarg(envPtr); } if (!enif_inspect_binary(envPtr, argv[0], &inMsgBin)) { return enif_make_badarg(envPtr); } if (!enif_inspect_binary(envPtr, argv[1], &inKeyBin)) { return enif_make_badarg(envPtr); } size_t msgSize = inMsgBin.size; if (inKeyBin.size != msgSize) { return enif_make_badarg(envPtr); } if (msgSize % 64 != 0) { return enif_make_badarg(envPtr); } outMsgData = enif_make_new_binary(envPtr, msgSize, &outMsgTerm); if (outMsgData == NULL) { return enif_make_badarg(envPtr); } feistel_decrypt(inMsgBin.data, msgSize, inKeyBin.data, outMsgData); return ok_tuple(envPtr, outMsgTerm); } static ERL_NIF_TERM rsp_fused_entropy_nif(ErlNifEnv* envPtr, int argc, const ERL_NIF_TERM argv[]) { if (argc != 10) { return enif_make_badarg(envPtr); } // 1. Parse the state resource struct state* statePtr; if (!enif_get_resource(envPtr, argv[0], stateType, (void**)&statePtr)) { return error_tuple(envPtr, "failed_to_read_state"); } // 2. Parse each integer int subChunkCount; if (!enif_get_int(envPtr, argv[1], &subChunkCount)) { return enif_make_badarg(envPtr); } int subChunkSize; if (!enif_get_int(envPtr, argv[2], &subChunkSize)) { return enif_make_badarg(envPtr); } int laneCount; if (!enif_get_int(envPtr, argv[3], &laneCount)) { return enif_make_badarg(envPtr); } int rxDepth; if (!enif_get_int(envPtr, argv[4], &rxDepth)) { return enif_make_badarg(envPtr); } int jitEnabled; if (!enif_get_int(envPtr, argv[5], &jitEnabled)) { return enif_make_badarg(envPtr); } int largePagesEnabled; if (!enif_get_int(envPtr, argv[6], &largePagesEnabled)) { return enif_make_badarg(envPtr); } int hardwareAESEnabled; if (!enif_get_int(envPtr, argv[7], &hardwareAESEnabled)) { return enif_make_badarg(envPtr); } int randomxProgramCount; if (!enif_get_int(envPtr, argv[8], &randomxProgramCount)) { return enif_make_badarg(envPtr); } // 3. Parse key as a binary ErlNifBinary keyBin; if (!enif_inspect_binary(envPtr, argv[9], &keyBin)) { return enif_make_badarg(envPtr); } // 4. Create VMs int totalVMs = 2 * laneCount; randomx_vm** vmList = (randomx_vm**)calloc(totalVMs, sizeof(randomx_vm*)); if (!vmList) { return error_tuple(envPtr, "vmList_alloc_failed"); } size_t scratchpadSize = randomx_get_scratchpad_size(); // 5. Pre-allocate the final output binary to store all scratchpads size_t outEntropySize = scratchpadSize * laneCount; ERL_NIF_TERM outEntropyTerm; unsigned char* outEntropy = enif_make_new_binary(envPtr, outEntropySize, &outEntropyTerm); if (!outEntropy) { free(vmList); return enif_make_badarg(envPtr); } // 6. Create the randomx_vm objects int isRandomxReleased = 0; for (int i = 0; i < totalVMs; i++) { vmList[i] = create_vm( statePtr, (statePtr->mode == HASHING_MODE_FAST), jitEnabled, largePagesEnabled, hardwareAESEnabled, &isRandomxReleased ); if (!vmList[i]) { // Clean up partial for (int j = 0; j < i; j++) { destroy_vm(statePtr, vmList[j]); } free(vmList); if (isRandomxReleased != 0) { return error_tuple(envPtr, "state_has_been_released"); } return error_tuple(envPtr, "randomx_create_vm_failed"); } } // 7. Call the pure C++ function that does the heavy logic and returns bool int success = rsp_fused_entropy( vmList, scratchpadSize, subChunkCount, subChunkSize, laneCount, rxDepth, randomxProgramCount, 6, keyBin.data, keyBin.size, outEntropy // final buffer for the output entropy ); // 8. If the function returned false, we interpret that as an error if (!success) { // Cleanup for (int i = 0; i < totalVMs; i++) { if (vmList[i]) { destroy_vm(statePtr, vmList[i]); } } free(vmList); return error_tuple(envPtr, "cxx_fused_entropy_failed"); } // 9. If success, destroy VMs and return {ok, outEntropyTerm} for (int i = 0; i < totalVMs; i++) { destroy_vm(statePtr, vmList[i]); } free(vmList); return ok_tuple(envPtr, outEntropyTerm); } static ErlNifFunc rxsquared_funcs[] = { {"rxsquared_info_nif", 1, rxsquared_info_nif, ERL_NIF_DIRTY_JOB_CPU_BOUND}, {"rxsquared_init_nif", 5, rxsquared_init_nif, ERL_NIF_DIRTY_JOB_CPU_BOUND}, {"rxsquared_hash_nif", 5, rxsquared_hash_nif, ERL_NIF_DIRTY_JOB_CPU_BOUND}, {"rsp_fused_entropy_nif", 10, rsp_fused_entropy_nif, ERL_NIF_DIRTY_JOB_CPU_BOUND}, {"rsp_feistel_encrypt_nif", 2, rsp_feistel_encrypt_nif, ERL_NIF_DIRTY_JOB_CPU_BOUND}, {"rsp_feistel_decrypt_nif", 2, rsp_feistel_decrypt_nif, ERL_NIF_DIRTY_JOB_CPU_BOUND} }; ERL_NIF_INIT(ar_rxsquared_nif, rxsquared_funcs, rxsquared_load, NULL, NULL, NULL); ================================================ FILE: apps/arweave/c_src/secp256k1/secp256k1_nif.c ================================================ #define _GNU_SOURCE #include #include #include #include #include #include #include #include #include #include #define SECP256K1_PUBKEY_UNCOMPRESSED_SIZE 65 #define SECP256K1_PUBKEY_COMPRESSED_SIZE 33 #define SECP256K1_SIGNATURE_COMPACT_SIZE 64 #define SECP256K1_SIGNATURE_RECOVERABLE_SIZE 65 #define SECP256K1_PRIVKEY_SIZE 32 #define SECP256K1_CONTEXT_SEED_SIZE 32 #define SECP256K1_DIGEST_SIZE 32 static int secp256k1_load(ErlNifEnv* env, void** priv, ERL_NIF_TERM load_info) { return 0; } static int fill_devurandom(void* buffer, size_t size) { int fd = open("/dev/urandom", O_RDONLY | O_CLOEXEC); if (fd == -1) { return 0; } size_t offset = 0; while (offset < size) { ssize_t result = read(fd, (char*)buffer + offset, size - offset); if (result == -1) { if (errno == EINTR) continue; goto error; } // EOF if (result == 0) { goto error; } offset += (size_t)result; } close(fd); return 1; error: close(fd); return 0; } static int fill_random(void* buffer, size_t size) { #if defined(__linux__) || defined(__FreeBSD__) size_t offset = 0; while (offset < size) { ssize_t result = getrandom((char*)buffer + offset, size - offset, 0); if (result == -1) { if (errno == EINTR) continue; if (errno == ENOSYS) return fill_devurandom(buffer, size); return 0; } offset += (size_t)result; } #elif defined(__APPLE__) size_t offset = 0; while (offset < size) { // max allowed length is 256 bytes size_t chunk = (size - offset > 256) ? 256 : (size - offset); if (getentropy((char*)buffer + offset, chunk) == -1) { if (errno == ENOSYS) return fill_devurandom(buffer, size); return 0; } offset += chunk; } #else // Unsupported platform return 0; #endif return 1; } /* Cleanses memory to prevent leaking sensitive info. Won't be optimized out. */ static void secure_erase(void *ptr, size_t len) { #if defined(__GNUC__) /* We use a memory barrier that scares the compiler away from optimizing out the memset. * * Quoting Adam Langley in commit ad1907fe73334d6c696c8539646c21b11178f20f * in BoringSSL (ISC License): * As best as we can tell, this is sufficient to break any optimisations that * might try to eliminate "superfluous" memsets. * This method used in memzero_explicit() the Linux kernel, too. Its advantage is that it is * pretty efficient, because the compiler can still implement the memset() efficiently, * just not remove it entirely. See "Dead Store Elimination (Still) Considered Harmful" by * Yang et al. (USENIX Security 2017) for more background. */ memset(ptr, 0, len); __asm__ __volatile__("" : : "r"(ptr) : "memory"); #else void *(*volatile const volatile_memset)(void *, int, size_t) = memset; volatile_memset(ptr, 0, len); #endif } static ERL_NIF_TERM sign_recoverable(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { if (argc != 2) { return enif_make_badarg(env); } ErlNifBinary Digest, PrivateBytes; if (!enif_inspect_binary(env, argv[0], &Digest)) { return enif_make_badarg(env); } if (Digest.size != SECP256K1_DIGEST_SIZE) { return enif_make_badarg(env); } if (!enif_inspect_binary(env, argv[1], &PrivateBytes)) { return enif_make_badarg(env); } if (PrivateBytes.size != SECP256K1_PRIVKEY_SIZE) { return enif_make_badarg(env); } char *error = NULL; unsigned char seed[SECP256K1_CONTEXT_SEED_SIZE]; unsigned char digest[SECP256K1_DIGEST_SIZE]; unsigned char privbytes[SECP256K1_PRIVKEY_SIZE]; unsigned char signature_compact[SECP256K1_SIGNATURE_COMPACT_SIZE]; unsigned char signature_recoverable[SECP256K1_SIGNATURE_RECOVERABLE_SIZE]; int recid; secp256k1_ecdsa_recoverable_signature s; secp256k1_context* ctx = secp256k1_context_create(SECP256K1_CONTEXT_NONE); memcpy(digest, Digest.data, SECP256K1_DIGEST_SIZE); memcpy(privbytes, PrivateBytes.data, SECP256K1_PRIVKEY_SIZE); if (!secp256k1_ec_seckey_verify(ctx, privbytes)) { error = "secp256k1 key is invalid."; goto cleanup; } if (!fill_random(seed, sizeof(seed))) { error = "Failed to generate random seed for context."; goto cleanup; } if (!secp256k1_context_randomize(ctx, seed)) { error = "Failed to randomize context."; goto cleanup; } if(!secp256k1_ecdsa_sign_recoverable(ctx, &s, digest, privbytes, NULL, NULL)) { error = "Failed to create signature."; goto cleanup; } if(!secp256k1_ecdsa_recoverable_signature_serialize_compact(ctx, signature_compact, &recid, &s)) { error = "Failed to serialize signature."; goto cleanup; } memcpy(signature_recoverable, signature_compact, SECP256K1_SIGNATURE_COMPACT_SIZE); signature_recoverable[64] = (unsigned char)(recid); ERL_NIF_TERM signature_term = make_output_binary(env, signature_recoverable, SECP256K1_SIGNATURE_RECOVERABLE_SIZE); cleanup: secp256k1_context_destroy(ctx); secure_erase(seed, sizeof(seed)); secure_erase(privbytes, sizeof(privbytes)); memset(signature_compact, 0, SECP256K1_SIGNATURE_COMPACT_SIZE); memset(signature_recoverable, 0, SECP256K1_SIGNATURE_RECOVERABLE_SIZE); if (error) { return error_tuple(env, error); } return ok_tuple(env, signature_term); } static ERL_NIF_TERM recover_pk_and_verify(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { if (argc != 2) { return enif_make_badarg(env); } ErlNifBinary Digest, Signature; if (!enif_inspect_binary(env, argv[0], &Digest)) { return enif_make_badarg(env); } if (Digest.size != SECP256K1_DIGEST_SIZE) { return enif_make_badarg(env); } if (!enif_inspect_binary(env, argv[1], &Signature)) { return enif_make_badarg(env); } if (Signature.size != SECP256K1_SIGNATURE_RECOVERABLE_SIZE) { return enif_make_badarg(env); } char *error = NULL; unsigned char digest[SECP256K1_DIGEST_SIZE]; unsigned char signature_recoverable[SECP256K1_SIGNATURE_RECOVERABLE_SIZE]; unsigned char signature_compact[SECP256K1_SIGNATURE_COMPACT_SIZE]; unsigned char pubbytes[SECP256K1_PUBKEY_COMPRESSED_SIZE]; int recid; secp256k1_ecdsa_recoverable_signature rs; secp256k1_ecdsa_signature s; secp256k1_pubkey pubkey; memcpy(digest, Digest.data, SECP256K1_DIGEST_SIZE); memcpy(signature_recoverable, Signature.data, SECP256K1_SIGNATURE_RECOVERABLE_SIZE); memcpy(signature_compact, signature_recoverable, SECP256K1_SIGNATURE_COMPACT_SIZE); recid = (int)signature_recoverable[64]; if (recid < 0 || recid > 3) { error = "Invalid signature recid. recid >= 0 && recid <= 3."; goto cleanup; } if (!secp256k1_ecdsa_recoverable_signature_parse_compact(secp256k1_context_static, &rs, signature_compact, recid)) { error = "Failed to deserialize/parse recoverable signature."; goto cleanup; } if (!secp256k1_ecdsa_recover(secp256k1_context_static, &pubkey, &rs, digest)) { error = "Failed to recover public key."; goto cleanup; } size_t l = SECP256K1_PUBKEY_COMPRESSED_SIZE; if (!secp256k1_ec_pubkey_serialize(secp256k1_context_static, pubbytes, &l, &pubkey, SECP256K1_EC_COMPRESSED)) { error = "Failed to serialize the recovered public key."; goto cleanup; } if (!secp256k1_ecdsa_recoverable_signature_convert(secp256k1_context_static, &s, &rs)) { error = "Failed to convert recoverable signature to compact signature."; goto cleanup; } // NOTE. https://github.com/bitcoin-core/secp256k1/blob/f79f46c70386c693ff4e7aef0b9e7923ba284e56/src/secp256k1.c#L461 // Verify performs check for low-s int is_valid = secp256k1_ecdsa_verify(secp256k1_context_static, &s, digest, &pubkey); ERL_NIF_TERM pubkey_term = make_output_binary(env, pubbytes, SECP256K1_PUBKEY_COMPRESSED_SIZE); cleanup: memset(digest, 0, SECP256K1_DIGEST_SIZE); memset(pubbytes, 0, SECP256K1_PUBKEY_COMPRESSED_SIZE); memset(signature_compact, 0, SECP256K1_SIGNATURE_COMPACT_SIZE); memset(signature_recoverable, 0, SECP256K1_SIGNATURE_RECOVERABLE_SIZE); if (error) { return error_tuple(env, error); } if (is_valid) { return ok_tuple2(env, enif_make_atom(env, "true"), pubkey_term); } return ok_tuple2(env, enif_make_atom(env, "false"), pubkey_term); } static ErlNifFunc nif_funcs[] = { {"sign_recoverable", 2, sign_recoverable}, {"recover_pk_and_verify", 2, recover_pk_and_verify} }; ERL_NIF_INIT(secp256k1_nif, nif_funcs, secp256k1_load, NULL, NULL, NULL) ================================================ FILE: apps/arweave/c_src/vdf/ar_vdf_nif.c ================================================ #include #include #include #include #include "vdf.h" #if defined(__x86_64__) || defined(__amd64__) || defined(__i386__) #include #endif #if defined(__linux__) #include #endif #if defined(__APPLE__) #include #include #endif //////////////////////////////////////////////////////////////////////////////////////////////////// // SHA //////////////////////////////////////////////////////////////////////////////////////////////////// typedef void (*vdf_sha2_fn)( unsigned char* saltBuffer, unsigned char* seed, unsigned char* out, unsigned char* outCheckpoint, int checkpointCount, int skipCheckpointCount, int hashingIterations ); static vdf_sha2_fn vdf_sha2_fused_ptr = NULL; static vdf_sha2_fn vdf_sha2_hiopt_ptr = NULL; static int vdf_load(ErlNifEnv* env, void** priv, ERL_NIF_TERM load_info) { #if defined(__x86_64__) || defined(__i386__) { unsigned int eax, ebx, ecx, edx; // leaf 7, subleaf 0 if (__get_cpuid_count(7, 0, &eax, &ebx, &ecx, &edx) && (ebx & (1u << 29))) { printf("VDF arch x86\n"); vdf_sha2_fused_ptr = vdf_sha2_fused_x86; vdf_sha2_hiopt_ptr = vdf_sha2_fused_x86; // fallback return 0; } } #endif #if defined(__aarch64__) || defined(__arm__) #if defined(__linux__) if (getauxval(AT_HWCAP) & HWCAP_SHA2) { printf("VDF arch ARM linux\n"); vdf_sha2_fused_ptr = vdf_sha2_fused_arm; vdf_sha2_hiopt_ptr = vdf_sha2_hiopt_arm; return 0; } #elif defined(__APPLE__) { int val = 0; size_t len = sizeof(val); if (sysctlbyname("hw.optional.arm.FEAT_SHA256", &val, &len, NULL, 0) == 0 && val != 0) { printf("VDF arch ARM macos\n"); vdf_sha2_fused_ptr = vdf_sha2_fused_arm; vdf_sha2_hiopt_ptr = vdf_sha2_hiopt_arm; return 0; } } #endif #endif printf("VDF arch unknown\n"); vdf_sha2_fused_ptr = vdf_sha2; vdf_sha2_hiopt_ptr = vdf_sha2; return 0; } static ERL_NIF_TERM vdf_sha2_nif(ErlNifEnv* envPtr, int argc, const ERL_NIF_TERM argv[]) { ErlNifBinary Salt, Seed; int checkpointCount; int skipCheckpointCount; int hashingIterations; if (argc != 5) { return enif_make_badarg(envPtr); } if (!enif_inspect_binary(envPtr, argv[0], &Salt)) { return enif_make_badarg(envPtr); } if (Salt.size != SALT_SIZE) { return enif_make_badarg(envPtr); } if (!enif_inspect_binary(envPtr, argv[1], &Seed)) { return enif_make_badarg(envPtr); } if (Seed.size != VDF_SHA_HASH_SIZE) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[2], &checkpointCount)) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[3], &skipCheckpointCount)) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[4], &hashingIterations)) { return enif_make_badarg(envPtr); } unsigned char temp_result[VDF_SHA_HASH_SIZE]; size_t outCheckpointSize = VDF_SHA_HASH_SIZE*checkpointCount; ERL_NIF_TERM outputTermCheckpoint; unsigned char* outCheckpoint = enif_make_new_binary(envPtr, outCheckpointSize, &outputTermCheckpoint); vdf_sha2(Salt.data, Seed.data, temp_result, outCheckpoint, checkpointCount, skipCheckpointCount, hashingIterations); return ok_tuple2(envPtr, make_output_binary(envPtr, temp_result, VDF_SHA_HASH_SIZE), outputTermCheckpoint); } static ERL_NIF_TERM vdf_sha2_fused_nif(ErlNifEnv* envPtr, int argc, const ERL_NIF_TERM argv[]) { ErlNifBinary Salt, Seed; int checkpointCount; int skipCheckpointCount; int hashingIterations; if (argc != 5) { return enif_make_badarg(envPtr); } if (!enif_inspect_binary(envPtr, argv[0], &Salt)) { return enif_make_badarg(envPtr); } if (Salt.size != SALT_SIZE) { return enif_make_badarg(envPtr); } if (!enif_inspect_binary(envPtr, argv[1], &Seed)) { return enif_make_badarg(envPtr); } if (Seed.size != VDF_SHA_HASH_SIZE) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[2], &checkpointCount)) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[3], &skipCheckpointCount)) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[4], &hashingIterations)) { return enif_make_badarg(envPtr); } unsigned char temp_result[VDF_SHA_HASH_SIZE]; size_t outCheckpointSize = VDF_SHA_HASH_SIZE*checkpointCount; ERL_NIF_TERM outputTermCheckpoint; unsigned char* outCheckpoint = enif_make_new_binary(envPtr, outCheckpointSize, &outputTermCheckpoint); vdf_sha2_fused_ptr(Salt.data, Seed.data, temp_result, outCheckpoint, checkpointCount, skipCheckpointCount, hashingIterations); return ok_tuple2(envPtr, make_output_binary(envPtr, temp_result, VDF_SHA_HASH_SIZE), outputTermCheckpoint); } static ERL_NIF_TERM vdf_sha2_hiopt_nif(ErlNifEnv* envPtr, int argc, const ERL_NIF_TERM argv[]) { ErlNifBinary Salt, Seed; int checkpointCount; int skipCheckpointCount; int hashingIterations; if (argc != 5) { return enif_make_badarg(envPtr); } if (!enif_inspect_binary(envPtr, argv[0], &Salt)) { return enif_make_badarg(envPtr); } if (Salt.size != SALT_SIZE) { return enif_make_badarg(envPtr); } if (!enif_inspect_binary(envPtr, argv[1], &Seed)) { return enif_make_badarg(envPtr); } if (Seed.size != VDF_SHA_HASH_SIZE) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[2], &checkpointCount)) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[3], &skipCheckpointCount)) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[4], &hashingIterations)) { return enif_make_badarg(envPtr); } unsigned char temp_result[VDF_SHA_HASH_SIZE]; size_t outCheckpointSize = VDF_SHA_HASH_SIZE*checkpointCount; ERL_NIF_TERM outputTermCheckpoint; unsigned char* outCheckpoint = enif_make_new_binary(envPtr, outCheckpointSize, &outputTermCheckpoint); vdf_sha2_hiopt_ptr(Salt.data, Seed.data, temp_result, outCheckpoint, checkpointCount, skipCheckpointCount, hashingIterations); return ok_tuple2(envPtr, make_output_binary(envPtr, temp_result, VDF_SHA_HASH_SIZE), outputTermCheckpoint); } static ERL_NIF_TERM vdf_parallel_sha_verify_with_reset_nif( ErlNifEnv* envPtr, int argc, const ERL_NIF_TERM argv[] ) { ErlNifBinary Salt, Seed, InCheckpoint, InRes, ResetSalt, ResetSeed; int checkpointCount; int skipCheckpointCount; int hashingIterations; int maxThreadCount; if (argc != 10) { return enif_make_badarg(envPtr); } if (!enif_inspect_binary(envPtr, argv[0], &Salt)) { return enif_make_badarg(envPtr); } if (Salt.size != SALT_SIZE) { return enif_make_badarg(envPtr); } if (!enif_inspect_binary(envPtr, argv[1], &Seed)) { return enif_make_badarg(envPtr); } if (Seed.size != VDF_SHA_HASH_SIZE) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[2], &checkpointCount)) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[3], &skipCheckpointCount)) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[4], &hashingIterations)) { return enif_make_badarg(envPtr); } if (!enif_inspect_binary(envPtr, argv[5], &InCheckpoint)) { return enif_make_badarg(envPtr); } if (InCheckpoint.size != checkpointCount*VDF_SHA_HASH_SIZE) { return enif_make_badarg(envPtr); } if (!enif_inspect_binary(envPtr, argv[6], &InRes)) { return enif_make_badarg(envPtr); } if (InRes.size != VDF_SHA_HASH_SIZE) { return enif_make_badarg(envPtr); } if (!enif_inspect_binary(envPtr, argv[7], &ResetSalt)) { return enif_make_badarg(envPtr); } if (ResetSalt.size != 32) { return enif_make_badarg(envPtr); } if (!enif_inspect_binary(envPtr, argv[8], &ResetSeed)) { return enif_make_badarg(envPtr); } if (ResetSeed.size != VDF_SHA_HASH_SIZE) { return enif_make_badarg(envPtr); } if (!enif_get_int(envPtr, argv[9], &maxThreadCount)) { return enif_make_badarg(envPtr); } if (maxThreadCount < 1) { return enif_make_badarg(envPtr); } // NOTE last paramemter will be array later size_t outCheckpointSize = VDF_SHA_HASH_SIZE*(1+checkpointCount)*(1+skipCheckpointCount); ERL_NIF_TERM outputTermCheckpoint; unsigned char* outCheckpoint = enif_make_new_binary( envPtr, outCheckpointSize, &outputTermCheckpoint); bool res = vdf_parallel_sha_verify_with_reset( Salt.data, Seed.data, checkpointCount, skipCheckpointCount, hashingIterations, InRes.data, InCheckpoint.data, outCheckpoint, ResetSalt.data, ResetSeed.data, maxThreadCount); // TODO return all checkpoints if (!res) { return error_tuple(envPtr, "verification failed"); } return ok_tuple(envPtr, outputTermCheckpoint); } static ErlNifFunc nif_funcs[] = { {"vdf_sha2_nif", 5, vdf_sha2_nif, ERL_NIF_DIRTY_JOB_CPU_BOUND}, {"vdf_sha2_fused_nif", 5, vdf_sha2_fused_nif, ERL_NIF_DIRTY_JOB_CPU_BOUND}, {"vdf_sha2_hiopt_nif", 5, vdf_sha2_hiopt_nif, ERL_NIF_DIRTY_JOB_CPU_BOUND}, {"vdf_parallel_sha_verify_with_reset_nif", 10, vdf_parallel_sha_verify_with_reset_nif, ERL_NIF_DIRTY_JOB_CPU_BOUND} }; ERL_NIF_INIT(ar_vdf_nif, nif_funcs, vdf_load, NULL, NULL, NULL); ================================================ FILE: apps/arweave/c_src/vdf/sha256-armv8.S ================================================ .text .globl _sha256_block_vdf_order .align 6 _sha256_block_vdf_order: stp x29,x30,[sp,#-16]! add x29,sp,#0 adr x3,K2564 ld1 {v4.16b,v5.16b,v6.16b,v7.16b},[x1] ld1 {v0.4s,v1.4s},[x0] ld1 {v16.4s},[x3],#16 add v16.4s,v16.4s,v4.4s orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s ld1 {v17.4s},[x3],#16 add v17.4s,v17.4s,v5.4s orr v2.16b,v0.16b,v0.16b .long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s orr v20.16b,v0.16b,v0.16b orr v21.16b,v1.16b,v1.16b ld1 {v18.4s,v19.4s},[x0] Loop_hw: sub x2,x2,#1 // rev32 v4.16b,v4.16b // rev32 v5.16b,v5.16b // rev32 v6.16b,v6.16b // rev32 v7.16b,v7.16b ld1 {v4.16b,v5.16b},[x1] orr v0.16b,v20.16b,v20.16b orr v1.16b,v21.16b,v21.16b .long 0x5e2828a4 //sha256su0 v4.16b,v5.16b .long 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b ld1 {v16.4s},[x3],#16 .long 0x5e2828c5 //sha256su0 v5.16b,v6.16b .long 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v6.4s .long 0x5e2828e6 //sha256su0 v6.16b,v7.16b orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s .long 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v7.4s .long 0x5e282887 //sha256su0 v7.16b,v4.16b orr v2.16b,v0.16b,v0.16b .long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s .long 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v4.4s .long 0x5e2828a4 //sha256su0 v4.16b,v5.16b orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s .long 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v5.4s .long 0x5e2828c5 //sha256su0 v5.16b,v6.16b orr v2.16b,v0.16b,v0.16b .long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s .long 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v6.4s .long 0x5e2828e6 //sha256su0 v6.16b,v7.16b orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s .long 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v7.4s .long 0x5e282887 //sha256su0 v7.16b,v4.16b orr v2.16b,v0.16b,v0.16b .long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s .long 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v4.4s .long 0x5e2828a4 //sha256su0 v4.16b,v5.16b orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s .long 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v5.4s .long 0x5e2828c5 //sha256su0 v5.16b,v6.16b orr v2.16b,v0.16b,v0.16b .long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s .long 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v6.4s .long 0x5e2828e6 //sha256su0 v6.16b,v7.16b orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s .long 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v7.4s .long 0x5e282887 //sha256su0 v7.16b,v4.16b orr v2.16b,v0.16b,v0.16b .long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s .long 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v4.4s orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v5.4s orr v2.16b,v0.16b,v0.16b .long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v6.4s orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s add v17.4s,v17.4s,v7.4s orr v2.16b,v0.16b,v0.16b .long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s add v6.4s,v0.4s,v18.4s add v7.4s,v1.4s,v19.4s //64B ld1 {v16.4s},[x3],#16 orr v2.16b,v6.16b,v6.16b orr v0.16b,v6.16b,v6.16b orr v1.16b,v7.16b,v7.16b //.long 0x5e1040e0 //sha256h v0.16b,v7.16b,v16.4s .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s ld1 {v16.4s},[x3],#16 orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s ld1 {v16.4s},[x3],#16 orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s ld1 {v16.4s},[x3],#16 orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s ld1 {v16.4s},[x3],#16 orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s ld1 {v16.4s},[x3],#16 orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s ld1 {v16.4s},[x3],#16 orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s ld1 {v16.4s},[x3],#16 orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s ld1 {v16.4s},[x3],#16 orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s ld1 {v16.4s},[x3],#16 orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s ld1 {v16.4s},[x3],#16 orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s ld1 {v16.4s},[x3],#16 orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s ld1 {v16.4s},[x3],#16 orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s ld1 {v16.4s},[x3],#16 orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s ld1 {v16.4s},[x3],#16 orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s ld1 {v16.4s},[x3] sub x3,x3,#128*4-48 // rewind orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s add v6.4s,v0.4s,v6.4s add v7.4s,v1.4s,v7.4s cbnz x2,Loop_hw st1 {v6.4s,v7.4s},[x0] ldr x29,[sp],#16 ret K2564: .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .long 0xC28A2F98,0x71374491,0xB5C0FBCF,0xE9B5DBA5 //64B .long 0x3956C25B,0x59F111F1,0x923F82A4,0xAB1C5ED5 .long 0xD807AA98,0x12835B01,0x243185BE,0x550C7DC3 .long 0x72BE5D74,0x80DEB1FE,0x9BDC06A7,0xC19BF374 .long 0x649B69C1,0xF0FE4786,0x0FE1EDC6,0x240CF254 .long 0x4FE9346F,0x6CC984BE,0x61B9411E,0x16F988FA .long 0xF2C65152,0xA88E5A6D,0xB019FC65,0xB9D99EC7 .long 0x9A1231C3,0xE70EEAA0,0xFDB1232B,0xC7353EB0 .long 0x3069BAD5,0xCB976D5F,0x5A0F118F,0xDC1EEEFD .long 0x0A35B689,0xDE0B7A04,0x58F4CA9D,0xE15D5B16 .long 0x007F3E86,0x37088980,0xA507EA32,0x6FAB9537 .long 0x17406110,0x0D8CD6F1,0xCDAA3B6D,0xC0BBBE37 .long 0x83613BDA,0xDB48A363,0x0B02E931,0x6FD15CA7 .long 0x521AFACA,0x31338431,0x6ED41A95,0x6D437890 .long 0xC39C91F2,0x9ECCABBD,0xB5C9A0E6,0x532FB63C .long 0xD2C741C6,0x07237EA3,0xA4954B68,0x4C191D76 .long 0 //terminator ================================================ FILE: apps/arweave/c_src/vdf/vdf.cpp ================================================ #include #include #include #include #include #include #include #include "vdf.h" extern "C" { struct vdf_sha_thread_arg { unsigned char* saltBuffer; unsigned char* seed; unsigned char* outCheckpoint; int checkpointCount; int skipCheckpointCount; int hashingIterations; unsigned char* out; }; struct vdf_sha_verify_thread_arg; class vdf_verify_job { public: unsigned char* startSaltBuffer; unsigned char* seed; unsigned char* inCheckpointSha; unsigned char* outCheckpointSha; unsigned char* inCheckpointRandomx; unsigned char* resetStepNumberBin256; unsigned char* resetSeed; int checkpointCount; int skipCheckpointCount; int hashingIterationsSha; int hashingIterationsRandomx; std::vector _vdf_sha_verify_thread_arg_list; volatile bool verifyRes; std::mutex lock; }; struct vdf_sha_verify_thread_arg { std::thread* thread; volatile bool in_progress; vdf_verify_job* job; int checkpointIdx; }; //////////////////////////////////////////////////////////////////////////////////////////////////// // SHA //////////////////////////////////////////////////////////////////////////////////////////////////// // NOTE saltBuffer is mutable in progress void _vdf_sha2(unsigned char* saltBuffer, unsigned char* seed, unsigned char* out, unsigned char* outCheckpoint, int checkpointCount, int skipCheckpointCount, int hashingIterations) { unsigned char tempOut[VDF_SHA_HASH_SIZE]; // 2 different branches for different optimisation cases if (skipCheckpointCount == 0) { for(int checkpointIdx = 0; checkpointIdx <= checkpointCount; checkpointIdx++) { unsigned char* locIn = checkpointIdx == 0 ? seed : (outCheckpoint + VDF_SHA_HASH_SIZE*(checkpointIdx-1)); unsigned char* locOut = checkpointIdx == checkpointCount ? out : (outCheckpoint + VDF_SHA_HASH_SIZE*checkpointIdx); { SHA256_CTX sha256; SHA256_Init(&sha256); SHA256_Update(&sha256, saltBuffer, SALT_SIZE); SHA256_Update(&sha256, locIn, VDF_SHA_HASH_SIZE); // -1 memcpy SHA256_Final(tempOut, &sha256); } for(int i = 2; i < hashingIterations; i++) { SHA256_CTX sha256; SHA256_Init(&sha256); SHA256_Update(&sha256, saltBuffer, SALT_SIZE); SHA256_Update(&sha256, tempOut, VDF_SHA_HASH_SIZE); SHA256_Final(tempOut, &sha256); } { SHA256_CTX sha256; SHA256_Init(&sha256); SHA256_Update(&sha256, saltBuffer, SALT_SIZE); SHA256_Update(&sha256, tempOut, VDF_SHA_HASH_SIZE); SHA256_Final(locOut, &sha256); } long_add(saltBuffer, 1); } } else { for(int checkpointIdx = 0; checkpointIdx <= checkpointCount; checkpointIdx++) { unsigned char* locIn = checkpointIdx == 0 ? seed : (outCheckpoint + VDF_SHA_HASH_SIZE*(checkpointIdx-1)); unsigned char* locOut = checkpointIdx == checkpointCount ? out : (outCheckpoint + VDF_SHA_HASH_SIZE*checkpointIdx); { SHA256_CTX sha256; SHA256_Init(&sha256); SHA256_Update(&sha256, saltBuffer, SALT_SIZE); SHA256_Update(&sha256, locIn, VDF_SHA_HASH_SIZE); // -1 memcpy SHA256_Final(tempOut, &sha256); } // 1 skip on start for(int i = 1; i < hashingIterations; i++) { SHA256_CTX sha256; SHA256_Init(&sha256); SHA256_Update(&sha256, saltBuffer, SALT_SIZE); SHA256_Update(&sha256, tempOut, VDF_SHA_HASH_SIZE); SHA256_Final(tempOut, &sha256); } long_add(saltBuffer, 1); for(int j = 1; j < skipCheckpointCount; j++) { // no skips for(int i = 0; i < hashingIterations; i++) { SHA256_CTX sha256; SHA256_Init(&sha256); SHA256_Update(&sha256, saltBuffer, SALT_SIZE); SHA256_Update(&sha256, tempOut, VDF_SHA_HASH_SIZE); SHA256_Final(tempOut, &sha256); } long_add(saltBuffer, 1); } // 1 skip on end for(int i = 1; i < hashingIterations; i++) { SHA256_CTX sha256; SHA256_Init(&sha256); SHA256_Update(&sha256, saltBuffer, SALT_SIZE); SHA256_Update(&sha256, tempOut, VDF_SHA_HASH_SIZE); SHA256_Final(tempOut, &sha256); } { SHA256_CTX sha256; SHA256_Init(&sha256); SHA256_Update(&sha256, saltBuffer, SALT_SIZE); SHA256_Update(&sha256, tempOut, VDF_SHA_HASH_SIZE); SHA256_Final(locOut, &sha256); } long_add(saltBuffer, 1); } } } // use // unsigned char out[VDF_SHA_HASH_SIZE]; // unsigned char* outCheckpoint = (unsigned char*)malloc(checkpointCount*VDF_SHA_HASH_SIZE); // free(outCheckpoint); // for call void vdf_sha2(unsigned char* saltBuffer, unsigned char* seed, unsigned char* out, unsigned char* outCheckpoint, int checkpointCount, int skipCheckpointCount, int hashingIterations) { unsigned char saltBufferStack[SALT_SIZE]; // ensure 1 L1 cache page used // no access to heap, except of 0-iteration memcpy(saltBufferStack, saltBuffer, SALT_SIZE); _vdf_sha2(saltBufferStack, seed, out, outCheckpoint, checkpointCount, skipCheckpointCount, hashingIterations); } //////////////////////////////////////////////////////////////////////////////////////////////////// // Verify SHA //////////////////////////////////////////////////////////////////////////////////////////////////// void _vdf_sha_verify_thread(vdf_sha_verify_thread_arg* _arg) { vdf_sha_verify_thread_arg* arg = _arg; while(true) { if (!arg->job->verifyRes) { return; } unsigned char expdOut[VDF_SHA_HASH_SIZE]; unsigned char* in = arg->checkpointIdx == 0 ? arg->job->seed : (arg->job->inCheckpointSha + (arg->checkpointIdx-1)*VDF_SHA_HASH_SIZE); unsigned char* out = arg->job->inCheckpointSha + arg->checkpointIdx*VDF_SHA_HASH_SIZE; unsigned char* outFullCheckpoint = arg->job->outCheckpointSha + arg->checkpointIdx*(1+arg->job->skipCheckpointCount)*VDF_SHA_HASH_SIZE; unsigned char saltBuffer[SALT_SIZE]; memcpy(saltBuffer, arg->job->startSaltBuffer, SALT_SIZE); long_add(saltBuffer, arg->checkpointIdx*(1+arg->job->skipCheckpointCount)); // unrolled for memcpy inject // _vdf_sha2(saltBuffer, in, expdOut, NULL, 0, arg->job->skipCheckpointCount, arg->job->hashingIterationsSha); // do not rewrite in unsigned char inCopy[VDF_SHA_HASH_SIZE]; memcpy(inCopy, in, VDF_SHA_HASH_SIZE); for(int i=0;i<=arg->job->skipCheckpointCount;i++) { _vdf_sha2(saltBuffer, inCopy, expdOut, NULL, 0, 0, arg->job->hashingIterationsSha); memcpy(outFullCheckpoint, expdOut, VDF_SHA_HASH_SIZE); outFullCheckpoint += VDF_SHA_HASH_SIZE; memcpy(inCopy, expdOut, VDF_SHA_HASH_SIZE); // NOTE long_add included } // 0 == equal if (0 != memcmp(expdOut, out, VDF_SHA_HASH_SIZE)) { arg->job->verifyRes = false; return; } { const std::lock_guard lock(arg->job->lock); bool found = false; for(int i=arg->checkpointIdx+1;ijob->checkpointCount;i++) { vdf_sha_verify_thread_arg* new_arg = &arg->job->_vdf_sha_verify_thread_arg_list[i]; if (!new_arg->in_progress) { new_arg->in_progress = true; arg = new_arg; found = true; break; } } if (!found) break; } } // TODO steal job from other hash function } void reset_mix(unsigned char* res, unsigned char* prevOutput, unsigned char* resetSeed) { SHA256_CTX sha256; SHA256_Init(&sha256); SHA256_Update(&sha256, prevOutput, VDF_SHA_HASH_SIZE); SHA256_Update(&sha256, resetSeed, VDF_SHA_HASH_SIZE); SHA256_Final(res, &sha256); } bool fast_rev_cmp256(unsigned char* a, unsigned char* b) { for(int i=31; i>=0; i--) { if (a[i] != b[i]) return false; } return true; } void _vdf_sha_verify_with_reset_thread(vdf_sha_verify_thread_arg* _arg) { vdf_sha_verify_thread_arg* arg = _arg; while(true) { if (!arg->job->verifyRes) { return; } unsigned char expdOut[VDF_SHA_HASH_SIZE]; unsigned char* in = arg->checkpointIdx == 0 ? arg->job->seed : (arg->job->inCheckpointSha + (arg->checkpointIdx-1)*VDF_SHA_HASH_SIZE); unsigned char* out = arg->job->inCheckpointSha + arg->checkpointIdx*VDF_SHA_HASH_SIZE; unsigned char* outFullCheckpoint = arg->job->outCheckpointSha + arg->checkpointIdx*(1+arg->job->skipCheckpointCount)*VDF_SHA_HASH_SIZE; unsigned char saltBuffer[SALT_SIZE]; memcpy(saltBuffer, arg->job->startSaltBuffer, SALT_SIZE); long_add(saltBuffer, arg->checkpointIdx*(1+arg->job->skipCheckpointCount)); // unrolled for memcpy inject and reset_mix // _vdf_sha2(saltBuffer, in, expdOut, NULL, 0, arg->job->skipCheckpointCount, arg->job->hashingIterationsSha); // do not rewrite in unsigned char inCopy[VDF_SHA_HASH_SIZE]; memcpy(inCopy, in, VDF_SHA_HASH_SIZE); if (fast_rev_cmp256(saltBuffer, arg->job->resetStepNumberBin256)) { reset_mix(inCopy, inCopy, arg->job->resetSeed); } for(int i=0;i<=arg->job->skipCheckpointCount;i++) { _vdf_sha2(saltBuffer, inCopy, expdOut, NULL, 0, 0, arg->job->hashingIterationsSha); memcpy(outFullCheckpoint, expdOut, VDF_SHA_HASH_SIZE); outFullCheckpoint += VDF_SHA_HASH_SIZE; memcpy(inCopy, expdOut, VDF_SHA_HASH_SIZE); if (fast_rev_cmp256(saltBuffer, arg->job->resetStepNumberBin256)) { reset_mix(inCopy, inCopy, arg->job->resetSeed); } // NOTE long_add included } // 0 == equal if (0 != memcmp(expdOut, out, VDF_SHA_HASH_SIZE)) { arg->job->verifyRes = false; return; } { const std::lock_guard lock(arg->job->lock); bool found = false; for(int i=arg->checkpointIdx+1;ijob->checkpointCount;i++) { vdf_sha_verify_thread_arg* new_arg = &arg->job->_vdf_sha_verify_thread_arg_list[i]; if (!new_arg->in_progress) { new_arg->in_progress = true; arg = new_arg; found = true; break; } } if (!found) break; } } // TODO steal job from other hash function } bool vdf_parallel_sha_verify_with_reset(unsigned char* startSaltBuffer, unsigned char* seed, int checkpointCount, int skipCheckpointCount, int hashingIterations, unsigned char* inRes, unsigned char* inCheckpoint, unsigned char* outCheckpoint, unsigned char* resetStepNumberBin256, unsigned char* resetSeed, int maxThreadCount) { int freeThreadCount = maxThreadCount; vdf_verify_job job; job.startSaltBuffer = startSaltBuffer; job.seed = seed; job.inCheckpointSha = inCheckpoint; job.outCheckpointSha = outCheckpoint; job.checkpointCount = checkpointCount; job.skipCheckpointCount = skipCheckpointCount; job.hashingIterationsSha = hashingIterations; job.resetStepNumberBin256 = resetStepNumberBin256; job.resetSeed = resetSeed; job.verifyRes = true; job._vdf_sha_verify_thread_arg_list .resize(checkpointCount); for (int checkpointIdx=0;checkpointIdxcheckpointIdx = checkpointIdx; _vdf_sha_verify_thread_arg ->thread = NULL; _vdf_sha_verify_thread_arg ->in_progress = false; _vdf_sha_verify_thread_arg ->job = &job; } for (int checkpointIdx=0;checkpointIdx 0) { freeThreadCount--; const std::lock_guard lock(job.lock); _vdf_sha_verify_thread_arg->in_progress = true; _vdf_sha_verify_thread_arg->thread = new std::thread(_vdf_sha_verify_with_reset_thread, _vdf_sha_verify_thread_arg); } if (freeThreadCount == 0) break; } if (job.verifyRes) { unsigned char expdOut[VDF_SHA_HASH_SIZE]; unsigned char* sha_temp_result; if (checkpointCount == 0) { sha_temp_result = seed; } else { sha_temp_result = inCheckpoint + (checkpointCount-1)*VDF_SHA_HASH_SIZE; } unsigned char finalSaltBuffer[SALT_SIZE]; memcpy(finalSaltBuffer, startSaltBuffer, SALT_SIZE); long_add(finalSaltBuffer, checkpointCount*(1+skipCheckpointCount)); unsigned char* outFullCheckpoint = outCheckpoint + checkpointCount*(1+skipCheckpointCount)*VDF_SHA_HASH_SIZE; // unrolled for memcpy inject // _vdf_sha2(finalSaltBuffer, sha_temp_result, expdOut, NULL, 0, skipCheckpointCount, hashingIterations); // do not rewrite in unsigned char inCopy[VDF_SHA_HASH_SIZE]; memcpy(inCopy, sha_temp_result, VDF_SHA_HASH_SIZE); if (fast_rev_cmp256(finalSaltBuffer, resetStepNumberBin256)) { reset_mix(inCopy, inCopy, resetSeed); } for(int i=0;i<=skipCheckpointCount;i++) { _vdf_sha2(finalSaltBuffer, inCopy, expdOut, NULL, 0, 0, hashingIterations); memcpy(outFullCheckpoint, expdOut, VDF_SHA_HASH_SIZE); outFullCheckpoint += VDF_SHA_HASH_SIZE; memcpy(inCopy, expdOut, VDF_SHA_HASH_SIZE); if (fast_rev_cmp256(finalSaltBuffer, resetStepNumberBin256)) { reset_mix(inCopy, inCopy, resetSeed); } // NOTE long_add included } if (0 != memcmp(expdOut, inRes, VDF_SHA_HASH_SIZE)) { job.verifyRes = false; } } for (int checkpointIdx=0;checkpointIdxthread) { _vdf_sha_verify_thread_arg->thread->join(); free(_vdf_sha_verify_thread_arg->thread); } } return job.verifyRes; } } ================================================ FILE: apps/arweave/c_src/vdf/vdf.h ================================================ #ifndef VDF_H #define VDF_H #include const int SALT_SIZE = 32; const int VDF_SHA_HASH_SIZE = 32; static inline void long_add(unsigned char* saltBuffer, int checkpointIdx) { unsigned int acc = checkpointIdx; // big endian from erlang for(int i=SALT_SIZE-1;i>=0;i--) { unsigned int value = saltBuffer[i]; value += acc; saltBuffer[i] = value & 0xFF; acc = value >> 8; if (acc == 0) break; } } #if defined(__cplusplus) extern "C" { #endif // out checkpoint should return all checkpoints including skipCheckpointCount void vdf_sha2(unsigned char* saltBuffer, unsigned char* seed, unsigned char* out, unsigned char* outCheckpoint, int checkpointCount, int skipCheckpointCount, int hashingIterations); void vdf_sha2_fused_x86(unsigned char* saltBuffer, unsigned char* seed, unsigned char* out, unsigned char* outCheckpoint, int checkpointCount, int skipCheckpointCount, int hashingIterations); void vdf_sha2_fused_arm(unsigned char* saltBuffer, unsigned char* seed, unsigned char* out, unsigned char* outCheckpoint, int checkpointCount, int skipCheckpointCount, int hashingIterations); void vdf_sha2_hiopt_arm(unsigned char* saltBuffer, unsigned char* seed, unsigned char* out, unsigned char* outCheckpoint, int checkpointCount, int skipCheckpointCount, int hashingIterations); bool vdf_parallel_sha_verify_with_reset(unsigned char* startSaltBuffer, unsigned char* seed, int checkpointCount, int skipCheckpointCount, int hashingIterations, unsigned char* inRes, unsigned char* inCheckpoint, unsigned char* outCheckpoint, unsigned char* resetSalt, unsigned char* resetSeed, int maxThreadCount); #if defined(__cplusplus) } #endif #endif ================================================ FILE: apps/arweave/c_src/vdf/vdf_fused_arm.cpp ================================================ #include #include #include "vdf.h" #if defined(__aarch64__) || defined(__arm__) #include #include static const uint32_t K[] = { 0x428A2F98, 0x71374491, 0xB5C0FBCF, 0xE9B5DBA5, 0x3956C25B, 0x59F111F1, 0x923F82A4, 0xAB1C5ED5, 0xD807AA98, 0x12835B01, 0x243185BE, 0x550C7DC3, 0x72BE5D74, 0x80DEB1FE, 0x9BDC06A7, 0xC19BF174, 0xE49B69C1, 0xEFBE4786, 0x0FC19DC6, 0x240CA1CC, 0x2DE92C6F, 0x4A7484AA, 0x5CB0A9DC, 0x76F988DA, 0x983E5152, 0xA831C66D, 0xB00327C8, 0xBF597FC7, 0xC6E00BF3, 0xD5A79147, 0x06CA6351, 0x14292967, 0x27B70A85, 0x2E1B2138, 0x4D2C6DFC, 0x53380D13, 0x650A7354, 0x766A0ABB, 0x81C2C92E, 0x92722C85, 0xA2BFE8A1, 0xA81A664B, 0xC24B8B70, 0xC76C51A3, 0xD192E819, 0xD6990624, 0xF40E3585, 0x106AA070, 0x19A4C116, 0x1E376C08, 0x2748774C, 0x34B0BCB5, 0x391C0CB3, 0x4ED8AA4A, 0x5B9CCA4F, 0x682E6FF3, 0x748F82EE, 0x78A5636F, 0x84C87814, 0x8CC70208, 0x90BEFFFA, 0xA4506CEB, 0xBEF9A3F7, 0xC67178F2 }; void sha2_p2_32_32_rev_norm (unsigned char *output, const unsigned char *input1, const unsigned char *input2) { uint32_t state[8] = { 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19 }; uint32x4_t STATE0, STATE1, ABEF_SAVE, CDGH_SAVE; uint32x4_t MSG0, MSG1, MSG2, MSG3; uint32x4_t TMP0, TMP2; // Load state STATE0 = vld1q_u32(&state[0]); STATE1 = vld1q_u32(&state[4]); // Save current state ABEF_SAVE = STATE0; CDGH_SAVE = STATE1; // Load input1 (32 bytes) and input2 (32 bytes) into two message blocks // These constitute our 64-byte block MSG0 = vld1q_u32((const uint32_t *)(input1 + 0)); MSG1 = vld1q_u32((const uint32_t *)(input1 + 16)); MSG2 = vld1q_u32((const uint32_t *)(input2 + 0)); MSG3 = vld1q_u32((const uint32_t *)(input2 + 16)); // Adjust endianness MSG0 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG0))); MSG1 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG1))); MSG2 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG2))); MSG3 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG3))); // Rounds 1-4 TMP0 = vaddq_u32(MSG0, vld1q_u32(&K[0])); TMP2 = STATE0; MSG0 = vsha256su0q_u32(MSG0, MSG1); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG0 = vsha256su1q_u32(MSG0, MSG2, MSG3); // Rounds 5-8 TMP0 = vaddq_u32(MSG1, vld1q_u32(&K[4])); TMP2 = STATE0; MSG1 = vsha256su0q_u32(MSG1, MSG2); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG1 = vsha256su1q_u32(MSG1, MSG3, MSG0); // Rounds 9-12 TMP0 = vaddq_u32(MSG2, vld1q_u32(&K[8])); TMP2 = STATE0; MSG2 = vsha256su0q_u32(MSG2, MSG3); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG2 = vsha256su1q_u32(MSG2, MSG0, MSG1); // Rounds 13-16 TMP0 = vaddq_u32(MSG3, vld1q_u32(&K[12])); TMP2 = STATE0; MSG3 = vsha256su0q_u32(MSG3, MSG0); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG3 = vsha256su1q_u32(MSG3, MSG1, MSG2); // Rounds 17-20 TMP0 = vaddq_u32(MSG0, vld1q_u32(&K[16])); TMP2 = STATE0; MSG0 = vsha256su0q_u32(MSG0, MSG1); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG0 = vsha256su1q_u32(MSG0, MSG2, MSG3); // Rounds 21-24 TMP0 = vaddq_u32(MSG1, vld1q_u32(&K[20])); TMP2 = STATE0; MSG1 = vsha256su0q_u32(MSG1, MSG2); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG1 = vsha256su1q_u32(MSG1, MSG3, MSG0); // Rounds 25-28 TMP0 = vaddq_u32(MSG2, vld1q_u32(&K[24])); TMP2 = STATE0; MSG2 = vsha256su0q_u32(MSG2, MSG3); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG2 = vsha256su1q_u32(MSG2, MSG0, MSG1); // Rounds 29-32 TMP0 = vaddq_u32(MSG3, vld1q_u32(&K[28])); TMP2 = STATE0; MSG3 = vsha256su0q_u32(MSG3, MSG0); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG3 = vsha256su1q_u32(MSG3, MSG1, MSG2); // Rounds 33-36 TMP0 = vaddq_u32(MSG0, vld1q_u32(&K[32])); TMP2 = STATE0; MSG0 = vsha256su0q_u32(MSG0, MSG1); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG0 = vsha256su1q_u32(MSG0, MSG2, MSG3); // Rounds 37-40 TMP0 = vaddq_u32(MSG1, vld1q_u32(&K[36])); TMP2 = STATE0; MSG1 = vsha256su0q_u32(MSG1, MSG2); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG1 = vsha256su1q_u32(MSG1, MSG3, MSG0); // Rounds 41-44 TMP0 = vaddq_u32(MSG2, vld1q_u32(&K[40])); TMP2 = STATE0; MSG2 = vsha256su0q_u32(MSG2, MSG3); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG2 = vsha256su1q_u32(MSG2, MSG0, MSG1); // Rounds 45-48 TMP0 = vaddq_u32(MSG3, vld1q_u32(&K[44])); TMP2 = STATE0; MSG3 = vsha256su0q_u32(MSG3, MSG0); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG3 = vsha256su1q_u32(MSG3, MSG1, MSG2); // Rounds 49-52 TMP0 = vaddq_u32(MSG0, vld1q_u32(&K[48])); TMP2 = STATE0; STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); // Rounds 53-56 TMP0 = vaddq_u32(MSG1, vld1q_u32(&K[52])); TMP2 = STATE0; STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); // Rounds 57-60 TMP0 = vaddq_u32(MSG2, vld1q_u32(&K[56])); TMP2 = STATE0; STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); // Rounds 61-64 TMP0 = vaddq_u32(MSG3, vld1q_u32(&K[60])); TMP2 = STATE0; STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); // Update state STATE0 = vaddq_u32(STATE0, ABEF_SAVE); STATE1 = vaddq_u32(STATE1, CDGH_SAVE); // Now we need to process the padding block // For a 64-byte input (32+32), the padding block consists of 0x80 followed by zeros // and the 64-bit length (512 bits) // TODO merge with endian fixes uint8_t padding[64] = {0}; padding[0] = 0x80; // Padding start marker // Set the 64-bit length value (512 bits = 0x0200) padding[56] = 0x00; padding[57] = 0x00; padding[58] = 0x00; padding[59] = 0x00; padding[60] = 0x00; padding[61] = 0x00; padding[62] = 0x02; padding[63] = 0x00; // Save current state ABEF_SAVE = STATE0; CDGH_SAVE = STATE1; // Load padding block MSG0 = vld1q_u32((const uint32_t *)(padding + 0)); MSG1 = vld1q_u32((const uint32_t *)(padding + 16)); MSG2 = vld1q_u32((const uint32_t *)(padding + 32)); MSG3 = vld1q_u32((const uint32_t *)(padding + 48)); MSG0 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG0))); MSG1 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG1))); MSG2 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG2))); MSG3 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG3))); // Rounds 1-4 TMP0 = vaddq_u32(MSG0, vld1q_u32(&K[0])); TMP2 = STATE0; MSG0 = vsha256su0q_u32(MSG0, MSG1); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG0 = vsha256su1q_u32(MSG0, MSG2, MSG3); // Rounds 5-8 TMP0 = vaddq_u32(MSG1, vld1q_u32(&K[4])); TMP2 = STATE0; MSG1 = vsha256su0q_u32(MSG1, MSG2); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG1 = vsha256su1q_u32(MSG1, MSG3, MSG0); // Rounds 9-12 TMP0 = vaddq_u32(MSG2, vld1q_u32(&K[8])); TMP2 = STATE0; MSG2 = vsha256su0q_u32(MSG2, MSG3); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG2 = vsha256su1q_u32(MSG2, MSG0, MSG1); // Rounds 13-16 TMP0 = vaddq_u32(MSG3, vld1q_u32(&K[12])); TMP2 = STATE0; MSG3 = vsha256su0q_u32(MSG3, MSG0); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG3 = vsha256su1q_u32(MSG3, MSG1, MSG2); // Rounds 17-20 TMP0 = vaddq_u32(MSG0, vld1q_u32(&K[16])); TMP2 = STATE0; MSG0 = vsha256su0q_u32(MSG0, MSG1); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG0 = vsha256su1q_u32(MSG0, MSG2, MSG3); // Rounds 21-24 TMP0 = vaddq_u32(MSG1, vld1q_u32(&K[20])); TMP2 = STATE0; MSG1 = vsha256su0q_u32(MSG1, MSG2); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG1 = vsha256su1q_u32(MSG1, MSG3, MSG0); // Rounds 25-28 TMP0 = vaddq_u32(MSG2, vld1q_u32(&K[24])); TMP2 = STATE0; MSG2 = vsha256su0q_u32(MSG2, MSG3); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG2 = vsha256su1q_u32(MSG2, MSG0, MSG1); // Rounds 29-32 TMP0 = vaddq_u32(MSG3, vld1q_u32(&K[28])); TMP2 = STATE0; MSG3 = vsha256su0q_u32(MSG3, MSG0); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG3 = vsha256su1q_u32(MSG3, MSG1, MSG2); // Rounds 33-36 TMP0 = vaddq_u32(MSG0, vld1q_u32(&K[32])); TMP2 = STATE0; MSG0 = vsha256su0q_u32(MSG0, MSG1); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG0 = vsha256su1q_u32(MSG0, MSG2, MSG3); // Rounds 37-40 TMP0 = vaddq_u32(MSG1, vld1q_u32(&K[36])); TMP2 = STATE0; MSG1 = vsha256su0q_u32(MSG1, MSG2); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG1 = vsha256su1q_u32(MSG1, MSG3, MSG0); // Rounds 41-44 TMP0 = vaddq_u32(MSG2, vld1q_u32(&K[40])); TMP2 = STATE0; MSG2 = vsha256su0q_u32(MSG2, MSG3); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG2 = vsha256su1q_u32(MSG2, MSG0, MSG1); // Rounds 45-48 TMP0 = vaddq_u32(MSG3, vld1q_u32(&K[44])); TMP2 = STATE0; MSG3 = vsha256su0q_u32(MSG3, MSG0); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG3 = vsha256su1q_u32(MSG3, MSG1, MSG2); // Rounds 49-52 TMP0 = vaddq_u32(MSG0, vld1q_u32(&K[48])); TMP2 = STATE0; STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); // Rounds 53-56 TMP0 = vaddq_u32(MSG1, vld1q_u32(&K[52])); TMP2 = STATE0; STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); // Rounds 57-60 TMP0 = vaddq_u32(MSG2, vld1q_u32(&K[56])); TMP2 = STATE0; STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); // Rounds 61-64 TMP0 = vaddq_u32(MSG3, vld1q_u32(&K[60])); TMP2 = STATE0; STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); // Update state STATE0 = vaddq_u32(STATE0, ABEF_SAVE); STATE1 = vaddq_u32(STATE1, CDGH_SAVE); /* write 32-bit words little-endian */ vst1q_u32((uint32_t *)output, STATE0); vst1q_u32((uint32_t *)(output+16), STATE1); } void sha2_p2_32_32_norm_loop(unsigned char *tempOut, const unsigned char *saltBuffer, int iterations) { uint32_t state[8] = { 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19 }; uint32x4_t STATE0, STATE1, ABEF_SAVE, CDGH_SAVE; uint32x4_t MSG0, MSG1, MSG2, MSG3; uint32x4_t TMP0, TMP2; MSG2 = vld1q_u32((const uint32_t *)(tempOut + 0)); MSG3 = vld1q_u32((const uint32_t *)(tempOut + 16)); for (int i = 0; i < iterations; ++i) { STATE0 = vld1q_u32(&state[0]); STATE1 = vld1q_u32(&state[4]); // Save current state ABEF_SAVE = STATE0; CDGH_SAVE = STATE1; // Load input1 (32 bytes) and input2 (32 bytes) into two message blocks // These constitute our 64-byte block MSG0 = vld1q_u32((const uint32_t *)(saltBuffer + 0)); MSG1 = vld1q_u32((const uint32_t *)(saltBuffer + 16)); // Adjust endianness MSG0 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG0))); MSG1 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG1))); // Rounds 1-4 TMP0 = vaddq_u32(MSG0, vld1q_u32(&K[0])); TMP2 = STATE0; MSG0 = vsha256su0q_u32(MSG0, MSG1); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG0 = vsha256su1q_u32(MSG0, MSG2, MSG3); // Rounds 5-8 TMP0 = vaddq_u32(MSG1, vld1q_u32(&K[4])); TMP2 = STATE0; MSG1 = vsha256su0q_u32(MSG1, MSG2); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG1 = vsha256su1q_u32(MSG1, MSG3, MSG0); // Rounds 9-12 TMP0 = vaddq_u32(MSG2, vld1q_u32(&K[8])); TMP2 = STATE0; MSG2 = vsha256su0q_u32(MSG2, MSG3); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG2 = vsha256su1q_u32(MSG2, MSG0, MSG1); // Rounds 13-16 TMP0 = vaddq_u32(MSG3, vld1q_u32(&K[12])); TMP2 = STATE0; MSG3 = vsha256su0q_u32(MSG3, MSG0); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG3 = vsha256su1q_u32(MSG3, MSG1, MSG2); // Rounds 17-20 TMP0 = vaddq_u32(MSG0, vld1q_u32(&K[16])); TMP2 = STATE0; MSG0 = vsha256su0q_u32(MSG0, MSG1); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG0 = vsha256su1q_u32(MSG0, MSG2, MSG3); // Rounds 21-24 TMP0 = vaddq_u32(MSG1, vld1q_u32(&K[20])); TMP2 = STATE0; MSG1 = vsha256su0q_u32(MSG1, MSG2); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG1 = vsha256su1q_u32(MSG1, MSG3, MSG0); // Rounds 25-28 TMP0 = vaddq_u32(MSG2, vld1q_u32(&K[24])); TMP2 = STATE0; MSG2 = vsha256su0q_u32(MSG2, MSG3); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG2 = vsha256su1q_u32(MSG2, MSG0, MSG1); // Rounds 29-32 TMP0 = vaddq_u32(MSG3, vld1q_u32(&K[28])); TMP2 = STATE0; MSG3 = vsha256su0q_u32(MSG3, MSG0); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG3 = vsha256su1q_u32(MSG3, MSG1, MSG2); // Rounds 33-36 TMP0 = vaddq_u32(MSG0, vld1q_u32(&K[32])); TMP2 = STATE0; MSG0 = vsha256su0q_u32(MSG0, MSG1); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG0 = vsha256su1q_u32(MSG0, MSG2, MSG3); // Rounds 37-40 TMP0 = vaddq_u32(MSG1, vld1q_u32(&K[36])); TMP2 = STATE0; MSG1 = vsha256su0q_u32(MSG1, MSG2); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG1 = vsha256su1q_u32(MSG1, MSG3, MSG0); // Rounds 41-44 TMP0 = vaddq_u32(MSG2, vld1q_u32(&K[40])); TMP2 = STATE0; MSG2 = vsha256su0q_u32(MSG2, MSG3); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG2 = vsha256su1q_u32(MSG2, MSG0, MSG1); // Rounds 45-48 TMP0 = vaddq_u32(MSG3, vld1q_u32(&K[44])); TMP2 = STATE0; MSG3 = vsha256su0q_u32(MSG3, MSG0); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG3 = vsha256su1q_u32(MSG3, MSG1, MSG2); // Rounds 49-52 TMP0 = vaddq_u32(MSG0, vld1q_u32(&K[48])); TMP2 = STATE0; STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); // Rounds 53-56 TMP0 = vaddq_u32(MSG1, vld1q_u32(&K[52])); TMP2 = STATE0; STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); // Rounds 57-60 TMP0 = vaddq_u32(MSG2, vld1q_u32(&K[56])); TMP2 = STATE0; STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); // Rounds 61-64 TMP0 = vaddq_u32(MSG3, vld1q_u32(&K[60])); TMP2 = STATE0; STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); // Update state STATE0 = vaddq_u32(STATE0, ABEF_SAVE); STATE1 = vaddq_u32(STATE1, CDGH_SAVE); // Now we need to process the padding block // For a 64-byte input (32+32), the padding block consists of 0x80 followed by zeros // and the 64-bit length (512 bits) // TODO merge with endian fixes uint8_t padding[64] = {0}; padding[0] = 0x80; // Padding start marker // Set the 64-bit length value (512 bits = 0x0200) padding[56] = 0x00; padding[57] = 0x00; padding[58] = 0x00; padding[59] = 0x00; padding[60] = 0x00; padding[61] = 0x00; padding[62] = 0x02; padding[63] = 0x00; // Save current state ABEF_SAVE = STATE0; CDGH_SAVE = STATE1; // Load padding block MSG0 = vld1q_u32((const uint32_t *)(padding + 0)); MSG1 = vld1q_u32((const uint32_t *)(padding + 16)); MSG2 = vld1q_u32((const uint32_t *)(padding + 32)); MSG3 = vld1q_u32((const uint32_t *)(padding + 48)); MSG0 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG0))); MSG1 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG1))); MSG2 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG2))); MSG3 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG3))); // Rounds 1-4 TMP0 = vaddq_u32(MSG0, vld1q_u32(&K[0])); TMP2 = STATE0; MSG0 = vsha256su0q_u32(MSG0, MSG1); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG0 = vsha256su1q_u32(MSG0, MSG2, MSG3); // Rounds 5-8 TMP0 = vaddq_u32(MSG1, vld1q_u32(&K[4])); TMP2 = STATE0; MSG1 = vsha256su0q_u32(MSG1, MSG2); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG1 = vsha256su1q_u32(MSG1, MSG3, MSG0); // Rounds 9-12 TMP0 = vaddq_u32(MSG2, vld1q_u32(&K[8])); TMP2 = STATE0; MSG2 = vsha256su0q_u32(MSG2, MSG3); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG2 = vsha256su1q_u32(MSG2, MSG0, MSG1); // Rounds 13-16 TMP0 = vaddq_u32(MSG3, vld1q_u32(&K[12])); TMP2 = STATE0; MSG3 = vsha256su0q_u32(MSG3, MSG0); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG3 = vsha256su1q_u32(MSG3, MSG1, MSG2); // Rounds 17-20 TMP0 = vaddq_u32(MSG0, vld1q_u32(&K[16])); TMP2 = STATE0; MSG0 = vsha256su0q_u32(MSG0, MSG1); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG0 = vsha256su1q_u32(MSG0, MSG2, MSG3); // Rounds 21-24 TMP0 = vaddq_u32(MSG1, vld1q_u32(&K[20])); TMP2 = STATE0; MSG1 = vsha256su0q_u32(MSG1, MSG2); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG1 = vsha256su1q_u32(MSG1, MSG3, MSG0); // Rounds 25-28 TMP0 = vaddq_u32(MSG2, vld1q_u32(&K[24])); TMP2 = STATE0; MSG2 = vsha256su0q_u32(MSG2, MSG3); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG2 = vsha256su1q_u32(MSG2, MSG0, MSG1); // Rounds 29-32 TMP0 = vaddq_u32(MSG3, vld1q_u32(&K[28])); TMP2 = STATE0; MSG3 = vsha256su0q_u32(MSG3, MSG0); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG3 = vsha256su1q_u32(MSG3, MSG1, MSG2); // Rounds 33-36 TMP0 = vaddq_u32(MSG0, vld1q_u32(&K[32])); TMP2 = STATE0; MSG0 = vsha256su0q_u32(MSG0, MSG1); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG0 = vsha256su1q_u32(MSG0, MSG2, MSG3); // Rounds 37-40 TMP0 = vaddq_u32(MSG1, vld1q_u32(&K[36])); TMP2 = STATE0; MSG1 = vsha256su0q_u32(MSG1, MSG2); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG1 = vsha256su1q_u32(MSG1, MSG3, MSG0); // Rounds 41-44 TMP0 = vaddq_u32(MSG2, vld1q_u32(&K[40])); TMP2 = STATE0; MSG2 = vsha256su0q_u32(MSG2, MSG3); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG2 = vsha256su1q_u32(MSG2, MSG0, MSG1); // Rounds 45-48 TMP0 = vaddq_u32(MSG3, vld1q_u32(&K[44])); TMP2 = STATE0; MSG3 = vsha256su0q_u32(MSG3, MSG0); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG3 = vsha256su1q_u32(MSG3, MSG1, MSG2); // Rounds 49-52 TMP0 = vaddq_u32(MSG0, vld1q_u32(&K[48])); TMP2 = STATE0; STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); // Rounds 53-56 TMP0 = vaddq_u32(MSG1, vld1q_u32(&K[52])); TMP2 = STATE0; STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); // Rounds 57-60 TMP0 = vaddq_u32(MSG2, vld1q_u32(&K[56])); TMP2 = STATE0; STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); // Rounds 61-64 TMP0 = vaddq_u32(MSG3, vld1q_u32(&K[60])); TMP2 = STATE0; STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); // Update state MSG2 = vaddq_u32(STATE0, ABEF_SAVE); MSG3 = vaddq_u32(STATE1, CDGH_SAVE); } vst1q_u32((uint32_t *)tempOut, MSG2); vst1q_u32((uint32_t *)(tempOut+16), MSG3); } void sha2_p2_32_32_norm_rev (unsigned char *output, const unsigned char *input1, const unsigned char *input2) { uint32_t state[8] = { 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19 }; uint32x4_t STATE0, STATE1, ABEF_SAVE, CDGH_SAVE; uint32x4_t MSG0, MSG1, MSG2, MSG3; uint32x4_t TMP0, TMP2; // Load state STATE0 = vld1q_u32(&state[0]); STATE1 = vld1q_u32(&state[4]); // Save current state ABEF_SAVE = STATE0; CDGH_SAVE = STATE1; // Load input1 (32 bytes) and input2 (32 bytes) into two message blocks // These constitute our 64-byte block MSG0 = vld1q_u32((const uint32_t *)(input1 + 0)); MSG1 = vld1q_u32((const uint32_t *)(input1 + 16)); MSG2 = vld1q_u32((const uint32_t *)(input2 + 0)); MSG3 = vld1q_u32((const uint32_t *)(input2 + 16)); // Adjust endianness MSG0 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG0))); MSG1 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG1))); // Rounds 1-4 TMP0 = vaddq_u32(MSG0, vld1q_u32(&K[0])); TMP2 = STATE0; MSG0 = vsha256su0q_u32(MSG0, MSG1); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG0 = vsha256su1q_u32(MSG0, MSG2, MSG3); // Rounds 5-8 TMP0 = vaddq_u32(MSG1, vld1q_u32(&K[4])); TMP2 = STATE0; MSG1 = vsha256su0q_u32(MSG1, MSG2); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG1 = vsha256su1q_u32(MSG1, MSG3, MSG0); // Rounds 9-12 TMP0 = vaddq_u32(MSG2, vld1q_u32(&K[8])); TMP2 = STATE0; MSG2 = vsha256su0q_u32(MSG2, MSG3); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG2 = vsha256su1q_u32(MSG2, MSG0, MSG1); // Rounds 13-16 TMP0 = vaddq_u32(MSG3, vld1q_u32(&K[12])); TMP2 = STATE0; MSG3 = vsha256su0q_u32(MSG3, MSG0); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG3 = vsha256su1q_u32(MSG3, MSG1, MSG2); // Rounds 17-20 TMP0 = vaddq_u32(MSG0, vld1q_u32(&K[16])); TMP2 = STATE0; MSG0 = vsha256su0q_u32(MSG0, MSG1); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG0 = vsha256su1q_u32(MSG0, MSG2, MSG3); // Rounds 21-24 TMP0 = vaddq_u32(MSG1, vld1q_u32(&K[20])); TMP2 = STATE0; MSG1 = vsha256su0q_u32(MSG1, MSG2); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG1 = vsha256su1q_u32(MSG1, MSG3, MSG0); // Rounds 25-28 TMP0 = vaddq_u32(MSG2, vld1q_u32(&K[24])); TMP2 = STATE0; MSG2 = vsha256su0q_u32(MSG2, MSG3); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG2 = vsha256su1q_u32(MSG2, MSG0, MSG1); // Rounds 29-32 TMP0 = vaddq_u32(MSG3, vld1q_u32(&K[28])); TMP2 = STATE0; MSG3 = vsha256su0q_u32(MSG3, MSG0); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG3 = vsha256su1q_u32(MSG3, MSG1, MSG2); // Rounds 33-36 TMP0 = vaddq_u32(MSG0, vld1q_u32(&K[32])); TMP2 = STATE0; MSG0 = vsha256su0q_u32(MSG0, MSG1); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG0 = vsha256su1q_u32(MSG0, MSG2, MSG3); // Rounds 37-40 TMP0 = vaddq_u32(MSG1, vld1q_u32(&K[36])); TMP2 = STATE0; MSG1 = vsha256su0q_u32(MSG1, MSG2); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG1 = vsha256su1q_u32(MSG1, MSG3, MSG0); // Rounds 41-44 TMP0 = vaddq_u32(MSG2, vld1q_u32(&K[40])); TMP2 = STATE0; MSG2 = vsha256su0q_u32(MSG2, MSG3); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG2 = vsha256su1q_u32(MSG2, MSG0, MSG1); // Rounds 45-48 TMP0 = vaddq_u32(MSG3, vld1q_u32(&K[44])); TMP2 = STATE0; MSG3 = vsha256su0q_u32(MSG3, MSG0); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG3 = vsha256su1q_u32(MSG3, MSG1, MSG2); // Rounds 49-52 TMP0 = vaddq_u32(MSG0, vld1q_u32(&K[48])); TMP2 = STATE0; STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); // Rounds 53-56 TMP0 = vaddq_u32(MSG1, vld1q_u32(&K[52])); TMP2 = STATE0; STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); // Rounds 57-60 TMP0 = vaddq_u32(MSG2, vld1q_u32(&K[56])); TMP2 = STATE0; STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); // Rounds 61-64 TMP0 = vaddq_u32(MSG3, vld1q_u32(&K[60])); TMP2 = STATE0; STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); // Update state STATE0 = vaddq_u32(STATE0, ABEF_SAVE); STATE1 = vaddq_u32(STATE1, CDGH_SAVE); // Now we need to process the padding block // For a 64-byte input (32+32), the padding block consists of 0x80 followed by zeros // and the 64-bit length (512 bits) // TODO merge with endian fixes uint8_t padding[64] = {0}; padding[0] = 0x80; // Padding start marker // Set the 64-bit length value (512 bits = 0x0200) padding[56] = 0x00; padding[57] = 0x00; padding[58] = 0x00; padding[59] = 0x00; padding[60] = 0x00; padding[61] = 0x00; padding[62] = 0x02; padding[63] = 0x00; // Save current state ABEF_SAVE = STATE0; CDGH_SAVE = STATE1; // Load padding block MSG0 = vld1q_u32((const uint32_t *)(padding + 0)); MSG1 = vld1q_u32((const uint32_t *)(padding + 16)); MSG2 = vld1q_u32((const uint32_t *)(padding + 32)); MSG3 = vld1q_u32((const uint32_t *)(padding + 48)); MSG0 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG0))); MSG1 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG1))); MSG2 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG2))); MSG3 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG3))); // Rounds 1-4 TMP0 = vaddq_u32(MSG0, vld1q_u32(&K[0])); TMP2 = STATE0; MSG0 = vsha256su0q_u32(MSG0, MSG1); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG0 = vsha256su1q_u32(MSG0, MSG2, MSG3); // Rounds 5-8 TMP0 = vaddq_u32(MSG1, vld1q_u32(&K[4])); TMP2 = STATE0; MSG1 = vsha256su0q_u32(MSG1, MSG2); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG1 = vsha256su1q_u32(MSG1, MSG3, MSG0); // Rounds 9-12 TMP0 = vaddq_u32(MSG2, vld1q_u32(&K[8])); TMP2 = STATE0; MSG2 = vsha256su0q_u32(MSG2, MSG3); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG2 = vsha256su1q_u32(MSG2, MSG0, MSG1); // Rounds 13-16 TMP0 = vaddq_u32(MSG3, vld1q_u32(&K[12])); TMP2 = STATE0; MSG3 = vsha256su0q_u32(MSG3, MSG0); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG3 = vsha256su1q_u32(MSG3, MSG1, MSG2); // Rounds 17-20 TMP0 = vaddq_u32(MSG0, vld1q_u32(&K[16])); TMP2 = STATE0; MSG0 = vsha256su0q_u32(MSG0, MSG1); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG0 = vsha256su1q_u32(MSG0, MSG2, MSG3); // Rounds 21-24 TMP0 = vaddq_u32(MSG1, vld1q_u32(&K[20])); TMP2 = STATE0; MSG1 = vsha256su0q_u32(MSG1, MSG2); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG1 = vsha256su1q_u32(MSG1, MSG3, MSG0); // Rounds 25-28 TMP0 = vaddq_u32(MSG2, vld1q_u32(&K[24])); TMP2 = STATE0; MSG2 = vsha256su0q_u32(MSG2, MSG3); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG2 = vsha256su1q_u32(MSG2, MSG0, MSG1); // Rounds 29-32 TMP0 = vaddq_u32(MSG3, vld1q_u32(&K[28])); TMP2 = STATE0; MSG3 = vsha256su0q_u32(MSG3, MSG0); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG3 = vsha256su1q_u32(MSG3, MSG1, MSG2); // Rounds 33-36 TMP0 = vaddq_u32(MSG0, vld1q_u32(&K[32])); TMP2 = STATE0; MSG0 = vsha256su0q_u32(MSG0, MSG1); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG0 = vsha256su1q_u32(MSG0, MSG2, MSG3); // Rounds 37-40 TMP0 = vaddq_u32(MSG1, vld1q_u32(&K[36])); TMP2 = STATE0; MSG1 = vsha256su0q_u32(MSG1, MSG2); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG1 = vsha256su1q_u32(MSG1, MSG3, MSG0); // Rounds 41-44 TMP0 = vaddq_u32(MSG2, vld1q_u32(&K[40])); TMP2 = STATE0; MSG2 = vsha256su0q_u32(MSG2, MSG3); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG2 = vsha256su1q_u32(MSG2, MSG0, MSG1); // Rounds 45-48 TMP0 = vaddq_u32(MSG3, vld1q_u32(&K[44])); TMP2 = STATE0; MSG3 = vsha256su0q_u32(MSG3, MSG0); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); MSG3 = vsha256su1q_u32(MSG3, MSG1, MSG2); // Rounds 49-52 TMP0 = vaddq_u32(MSG0, vld1q_u32(&K[48])); TMP2 = STATE0; STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); // Rounds 53-56 TMP0 = vaddq_u32(MSG1, vld1q_u32(&K[52])); TMP2 = STATE0; STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); // Rounds 57-60 TMP0 = vaddq_u32(MSG2, vld1q_u32(&K[56])); TMP2 = STATE0; STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); // Rounds 61-64 TMP0 = vaddq_u32(MSG3, vld1q_u32(&K[60])); TMP2 = STATE0; STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); // Update state STATE0 = vaddq_u32(STATE0, ABEF_SAVE); STATE1 = vaddq_u32(STATE1, CDGH_SAVE); /* big-endian, canonical SHA-256 layout */ uint32_t tmp[8]; vst1q_u32(&tmp[0], STATE0); vst1q_u32(&tmp[4], STATE1); for (int i = 0; i < 8; i++) { output[(i<<2)+0] = (uint8_t)(tmp[i] >> 24); output[(i<<2)+1] = (uint8_t)(tmp[i] >> 16); output[(i<<2)+2] = (uint8_t)(tmp[i] >> 8); output[(i<<2)+3] = (uint8_t)(tmp[i] ); } } void _vdf_sha2_fused_arm(unsigned char* saltBuffer, unsigned char* seed, unsigned char* out, unsigned char* outCheckpoint, int checkpointCount, int skipCheckpointCount, int hashingIterations) { unsigned char tempOut[VDF_SHA_HASH_SIZE]; // 2 different branches for different optimisation cases if (skipCheckpointCount == 0) { for(int checkpointIdx = 0; checkpointIdx <= checkpointCount; checkpointIdx++) { unsigned char* locIn = checkpointIdx == 0 ? seed : (outCheckpoint + VDF_SHA_HASH_SIZE*(checkpointIdx-1)); unsigned char* locOut = checkpointIdx == checkpointCount ? out : (outCheckpoint + VDF_SHA_HASH_SIZE*checkpointIdx); sha2_p2_32_32_rev_norm(tempOut, saltBuffer, locIn); sha2_p2_32_32_norm_loop(tempOut, saltBuffer, hashingIterations-2); sha2_p2_32_32_norm_rev(locOut, saltBuffer, tempOut); long_add(saltBuffer, 1); } } else { for(int checkpointIdx = 0; checkpointIdx <= checkpointCount; checkpointIdx++) { unsigned char* locIn = checkpointIdx == 0 ? seed : (outCheckpoint + VDF_SHA_HASH_SIZE*(checkpointIdx-1)); unsigned char* locOut = checkpointIdx == checkpointCount ? out : (outCheckpoint + VDF_SHA_HASH_SIZE*checkpointIdx); sha2_p2_32_32_rev_norm(tempOut, saltBuffer, locIn); // 1 skip on start sha2_p2_32_32_norm_loop(tempOut, saltBuffer, hashingIterations-1); long_add(saltBuffer, 1); for(int j = 1; j < skipCheckpointCount; j++) { // no skips sha2_p2_32_32_norm_loop(tempOut, saltBuffer, hashingIterations); long_add(saltBuffer, 1); } // 1 skip on end sha2_p2_32_32_norm_loop(tempOut, saltBuffer, hashingIterations-1); sha2_p2_32_32_norm_rev(locOut, saltBuffer, tempOut); long_add(saltBuffer, 1); } } } void vdf_sha2_fused_arm(unsigned char* saltBuffer, unsigned char* seed, unsigned char* out, unsigned char* outCheckpoint, int checkpointCount, int skipCheckpointCount, int hashingIterations) { unsigned char saltBufferStack[SALT_SIZE]; // ensure 1 L1 cache page used // no access to heap, except of 0-iteration memcpy(saltBufferStack, saltBuffer, SALT_SIZE); _vdf_sha2_fused_arm(saltBufferStack, seed, out, outCheckpoint, checkpointCount, skipCheckpointCount, hashingIterations); } #endif ================================================ FILE: apps/arweave/c_src/vdf/vdf_fused_x86.cpp ================================================ #include #include #include "vdf.h" #if defined(__x86_64__) || defined(__i386__) #include // NOTE spaces here, because tabs are much more difficult for backslash alignment #define sha256_compress1(state, data) { \ __m128i STATE0, STATE1, ABEF_SAVE, CDGH_SAVE; \ __m128i MSG, TMP, MASK; \ __m128i TMSG0, TMSG1, TMSG2, TMSG3; \ \ \ TMP = _mm_loadu_si128((const __m128i*) &state[0]); \ STATE1= _mm_loadu_si128((const __m128i*) &state[4]); \ \ MASK = _mm_set_epi64x(0x0c0d0e0f08090a0bULL, 0x0405060700010203ULL); \ \ \ TMP = _mm_shuffle_epi32(TMP, 0xB1); \ STATE1= _mm_shuffle_epi32(STATE1, 0x1B); \ STATE0= _mm_alignr_epi8(TMP, STATE1, 8); \ STATE1= _mm_blend_epi16(STATE1, TMP, 0xF0); \ \ \ { \ \ ABEF_SAVE = STATE0; \ CDGH_SAVE = STATE1; \ \ \ MSG = _mm_loadu_si128((const __m128i*) (data + 0)); \ TMSG0 = _mm_shuffle_epi8(MSG, MASK); \ \ MSG = _mm_add_epi32(TMSG0, _mm_set_epi64x(0xE9B5DBA5B5C0FBCFULL, \ 0x71374491428A2F98ULL)); \ STATE1= _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); \ MSG = _mm_shuffle_epi32(MSG, 0x0E); \ STATE0= _mm_sha256rnds2_epu32(STATE0, STATE1, MSG); \ \ \ TMSG1 = _mm_loadu_si128((const __m128i*) (data + 16)); \ TMSG1 = _mm_shuffle_epi8(TMSG1, MASK); \ MSG = _mm_add_epi32(TMSG1, _mm_set_epi64x(0xAB1C5ED5923F82A4ULL, \ 0x59F111F13956C25BULL)); \ STATE1= _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); \ MSG = _mm_shuffle_epi32(MSG, 0x0E); \ STATE0= _mm_sha256rnds2_epu32(STATE0, STATE1, MSG); \ TMSG0= _mm_sha256msg1_epu32(TMSG0, TMSG1); \ \ \ TMSG2 = _mm_loadu_si128((const __m128i*) (data + 32)); \ TMSG2 = _mm_shuffle_epi8(TMSG2, MASK); \ MSG = _mm_add_epi32(TMSG2, _mm_set_epi64x(0x550C7DC3243185BEULL, \ 0x12835B01D807AA98ULL)); \ STATE1= _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); \ MSG = _mm_shuffle_epi32(MSG, 0x0E); \ STATE0= _mm_sha256rnds2_epu32(STATE0, STATE1, MSG); \ TMSG1= _mm_sha256msg1_epu32(TMSG1, TMSG2); \ \ \ TMSG3 = _mm_loadu_si128((const __m128i*) (data + 48)); \ TMSG3 = _mm_shuffle_epi8(TMSG3, MASK); \ MSG = _mm_add_epi32(TMSG3, _mm_set_epi64x(0xC19BF1749BDC06A7ULL, \ 0x80DEB1FE72BE5D74ULL)); \ STATE1= _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); \ TMP = _mm_alignr_epi8(TMSG3, TMSG2, 4); \ TMSG0= _mm_add_epi32(TMSG0, TMP); \ TMSG0= _mm_sha256msg2_epu32(TMSG0, TMSG3); \ MSG = _mm_shuffle_epi32(MSG, 0x0E); \ STATE0= _mm_sha256rnds2_epu32(STATE0, STATE1, MSG); \ TMSG2= _mm_sha256msg1_epu32(TMSG2, TMSG3); \ \ \ MSG = _mm_add_epi32(TMSG0, _mm_set_epi64x(0x240CA1CC0FC19DC6ULL, \ 0xEFBE4786E49B69C1ULL)); \ STATE1= _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); \ TMP = _mm_alignr_epi8(TMSG0, TMSG3, 4); \ TMSG1= _mm_add_epi32(TMSG1, TMP); \ TMSG1= _mm_sha256msg2_epu32(TMSG1, TMSG0); \ MSG = _mm_shuffle_epi32(MSG, 0x0E); \ STATE0= _mm_sha256rnds2_epu32(STATE0, STATE1, MSG); \ TMSG3= _mm_sha256msg1_epu32(TMSG3, TMSG0); \ \ \ MSG = _mm_add_epi32(TMSG1, _mm_set_epi64x(0x76F988DA5CB0A9DCULL, \ 0x4A7484AA2DE92C6FULL)); \ STATE1= _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); \ TMP = _mm_alignr_epi8(TMSG1, TMSG0, 4); \ TMSG2= _mm_add_epi32(TMSG2, TMP); \ TMSG2= _mm_sha256msg2_epu32(TMSG2, TMSG1); \ MSG = _mm_shuffle_epi32(MSG, 0x0E); \ STATE0= _mm_sha256rnds2_epu32(STATE0, STATE1, MSG); \ TMSG0= _mm_sha256msg1_epu32(TMSG0, TMSG1); \ \ \ MSG = _mm_add_epi32(TMSG2, _mm_set_epi64x(0xBF597FC7B00327C8ULL, \ 0xA831C66D983E5152ULL)); \ STATE1= _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); \ TMP = _mm_alignr_epi8(TMSG2, TMSG1, 4); \ TMSG3= _mm_add_epi32(TMSG3, TMP); \ TMSG3= _mm_sha256msg2_epu32(TMSG3, TMSG2); \ MSG = _mm_shuffle_epi32(MSG, 0x0E); \ STATE0= _mm_sha256rnds2_epu32(STATE0, STATE1, MSG); \ TMSG1= _mm_sha256msg1_epu32(TMSG1, TMSG2); \ \ \ MSG = _mm_add_epi32(TMSG3, _mm_set_epi64x(0x1429296706CA6351ULL, \ 0xD5A79147C6E00BF3ULL)); \ STATE1= _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); \ TMP = _mm_alignr_epi8(TMSG3, TMSG2, 4); \ TMSG0= _mm_add_epi32(TMSG0, TMP); \ TMSG0= _mm_sha256msg2_epu32(TMSG0, TMSG3); \ MSG = _mm_shuffle_epi32(MSG, 0x0E); \ STATE0= _mm_sha256rnds2_epu32(STATE0, STATE1, MSG); \ TMSG2= _mm_sha256msg1_epu32(TMSG2, TMSG3); \ \ \ MSG = _mm_add_epi32(TMSG0, _mm_set_epi64x(0x53380D134D2C6DFCULL, \ 0x2E1B213827B70A85ULL)); \ STATE1= _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); \ TMP = _mm_alignr_epi8(TMSG0, TMSG3, 4); \ TMSG1= _mm_add_epi32(TMSG1, TMP); \ TMSG1= _mm_sha256msg2_epu32(TMSG1, TMSG0); \ MSG = _mm_shuffle_epi32(MSG, 0x0E); \ STATE0= _mm_sha256rnds2_epu32(STATE0, STATE1, MSG); \ TMSG3= _mm_sha256msg1_epu32(TMSG3, TMSG0); \ \ \ MSG = _mm_add_epi32(TMSG1, _mm_set_epi64x(0x92722C8581C2C92EULL, \ 0x766A0ABB650A7354ULL)); \ STATE1= _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); \ TMP = _mm_alignr_epi8(TMSG1, TMSG0, 4); \ TMSG2= _mm_add_epi32(TMSG2, TMP); \ TMSG2= _mm_sha256msg2_epu32(TMSG2, TMSG1); \ MSG = _mm_shuffle_epi32(MSG, 0x0E); \ STATE0= _mm_sha256rnds2_epu32(STATE0, STATE1, MSG); \ TMSG0= _mm_sha256msg1_epu32(TMSG0, TMSG1); \ \ \ MSG = _mm_add_epi32(TMSG2, _mm_set_epi64x(0xC76C51A3C24B8B70ULL, \ 0xA81A664BA2BFE8A1ULL)); \ STATE1= _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); \ TMP = _mm_alignr_epi8(TMSG2, TMSG1, 4); \ TMSG3= _mm_add_epi32(TMSG3, TMP); \ TMSG3= _mm_sha256msg2_epu32(TMSG3, TMSG2); \ MSG = _mm_shuffle_epi32(MSG, 0x0E); \ STATE0= _mm_sha256rnds2_epu32(STATE0, STATE1, MSG); \ TMSG1= _mm_sha256msg1_epu32(TMSG1, TMSG2); \ \ \ MSG = _mm_add_epi32(TMSG3, _mm_set_epi64x(0x106AA070F40E3585ULL, \ 0xD6990624D192E819ULL)); \ STATE1= _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); \ TMP = _mm_alignr_epi8(TMSG3, TMSG2, 4); \ TMSG0= _mm_add_epi32(TMSG0, TMP); \ TMSG0= _mm_sha256msg2_epu32(TMSG0, TMSG3); \ MSG = _mm_shuffle_epi32(MSG, 0x0E); \ STATE0= _mm_sha256rnds2_epu32(STATE0, STATE1, MSG); \ TMSG2= _mm_sha256msg1_epu32(TMSG2, TMSG3); \ \ \ MSG = _mm_add_epi32(TMSG0, _mm_set_epi64x(0x34B0BCB52748774CULL, \ 0x1E376C0819A4C116ULL)); \ STATE1= _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); \ TMP = _mm_alignr_epi8(TMSG0, TMSG3, 4); \ TMSG1= _mm_add_epi32(TMSG1, TMP); \ TMSG1= _mm_sha256msg2_epu32(TMSG1, TMSG0); \ MSG = _mm_shuffle_epi32(MSG, 0x0E); \ STATE0= _mm_sha256rnds2_epu32(STATE0, STATE1, MSG); \ TMSG3= _mm_sha256msg1_epu32(TMSG3, TMSG0); \ \ \ MSG = _mm_add_epi32(TMSG1, _mm_set_epi64x(0x682E6FF35B9CCA4FULL, \ 0x4ED8AA4A391C0CB3ULL)); \ STATE1= _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); \ TMP = _mm_alignr_epi8(TMSG1, TMSG0, 4); \ TMSG2= _mm_add_epi32(TMSG2, TMP); \ TMSG2= _mm_sha256msg2_epu32(TMSG2, TMSG1); \ MSG = _mm_shuffle_epi32(MSG, 0x0E); \ STATE0= _mm_sha256rnds2_epu32(STATE0, STATE1, MSG); \ \ \ MSG = _mm_add_epi32(TMSG2, _mm_set_epi64x(0x8CC7020884C87814ULL, \ 0x78A5636F748F82EEULL)); \ STATE1= _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); \ TMP = _mm_alignr_epi8(TMSG2, TMSG1, 4); \ TMSG3= _mm_add_epi32(TMSG3, TMP); \ TMSG3= _mm_sha256msg2_epu32(TMSG3, TMSG2); \ MSG = _mm_shuffle_epi32(MSG, 0x0E); \ STATE0= _mm_sha256rnds2_epu32(STATE0, STATE1, MSG); \ \ \ MSG = _mm_add_epi32(TMSG3, _mm_set_epi64x(0xC67178F2BEF9A3F7ULL, \ 0xA4506CEB90BEFFFAULL)); \ STATE1= _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); \ MSG = _mm_shuffle_epi32(MSG, 0x0E); \ STATE0= _mm_sha256rnds2_epu32(STATE0, STATE1, MSG); \ \ \ STATE0 = _mm_add_epi32(STATE0, ABEF_SAVE); \ STATE1 = _mm_add_epi32(STATE1, CDGH_SAVE); \ } \ \ \ TMP = _mm_shuffle_epi32(STATE0, 0x1B); \ STATE1 = _mm_shuffle_epi32(STATE1, 0xB1); \ STATE0 = _mm_blend_epi16(TMP, STATE1, 0xF0); \ STATE1 = _mm_alignr_epi8(STATE1, TMP, 8); \ \ \ _mm_storeu_si128((__m128i*) &state[0], STATE0); \ _mm_storeu_si128((__m128i*) &state[4], STATE1); \ } #define sha256_compress1_2(state, data1, data2) { \ __m128i STATE0, STATE1, ABEF_SAVE, CDGH_SAVE; \ __m128i MSG, TMP, MASK; \ __m128i TMSG0, TMSG1, TMSG2, TMSG3; \ \ \ TMP = _mm_loadu_si128((const __m128i*) &state[0]); \ STATE1= _mm_loadu_si128((const __m128i*) &state[4]); \ \ MASK = _mm_set_epi64x(0x0c0d0e0f08090a0bULL, 0x0405060700010203ULL); \ \ \ TMP = _mm_shuffle_epi32(TMP, 0xB1); \ STATE1= _mm_shuffle_epi32(STATE1, 0x1B); \ STATE0= _mm_alignr_epi8(TMP, STATE1, 8); \ STATE1= _mm_blend_epi16(STATE1, TMP, 0xF0); \ \ \ { \ \ ABEF_SAVE = STATE0; \ CDGH_SAVE = STATE1; \ \ \ MSG = _mm_loadu_si128((const __m128i*) (data1 + 0)); \ TMSG0 = _mm_shuffle_epi8(MSG, MASK); \ \ MSG = _mm_add_epi32(TMSG0, _mm_set_epi64x(0xE9B5DBA5B5C0FBCFULL, \ 0x71374491428A2F98ULL)); \ STATE1= _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); \ MSG = _mm_shuffle_epi32(MSG, 0x0E); \ STATE0= _mm_sha256rnds2_epu32(STATE0, STATE1, MSG); \ \ \ TMSG1 = _mm_loadu_si128((const __m128i*) (data1 + 16)); \ TMSG1 = _mm_shuffle_epi8(TMSG1, MASK); \ MSG = _mm_add_epi32(TMSG1, _mm_set_epi64x(0xAB1C5ED5923F82A4ULL, \ 0x59F111F13956C25BULL)); \ STATE1= _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); \ MSG = _mm_shuffle_epi32(MSG, 0x0E); \ STATE0= _mm_sha256rnds2_epu32(STATE0, STATE1, MSG); \ TMSG0= _mm_sha256msg1_epu32(TMSG0, TMSG1); \ \ \ TMSG2 = _mm_loadu_si128((const __m128i*) (data2 + 0)); \ TMSG2 = _mm_shuffle_epi8(TMSG2, MASK); \ MSG = _mm_add_epi32(TMSG2, _mm_set_epi64x(0x550C7DC3243185BEULL, \ 0x12835B01D807AA98ULL)); \ STATE1= _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); \ MSG = _mm_shuffle_epi32(MSG, 0x0E); \ STATE0= _mm_sha256rnds2_epu32(STATE0, STATE1, MSG); \ TMSG1= _mm_sha256msg1_epu32(TMSG1, TMSG2); \ \ \ TMSG3 = _mm_loadu_si128((const __m128i*) (data2 + 16)); \ TMSG3 = _mm_shuffle_epi8(TMSG3, MASK); \ MSG = _mm_add_epi32(TMSG3, _mm_set_epi64x(0xC19BF1749BDC06A7ULL, \ 0x80DEB1FE72BE5D74ULL)); \ STATE1= _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); \ TMP = _mm_alignr_epi8(TMSG3, TMSG2, 4); \ TMSG0= _mm_add_epi32(TMSG0, TMP); \ TMSG0= _mm_sha256msg2_epu32(TMSG0, TMSG3); \ MSG = _mm_shuffle_epi32(MSG, 0x0E); \ STATE0= _mm_sha256rnds2_epu32(STATE0, STATE1, MSG); \ TMSG2= _mm_sha256msg1_epu32(TMSG2, TMSG3); \ \ \ MSG = _mm_add_epi32(TMSG0, _mm_set_epi64x(0x240CA1CC0FC19DC6ULL, \ 0xEFBE4786E49B69C1ULL)); \ STATE1= _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); \ TMP = _mm_alignr_epi8(TMSG0, TMSG3, 4); \ TMSG1= _mm_add_epi32(TMSG1, TMP); \ TMSG1= _mm_sha256msg2_epu32(TMSG1, TMSG0); \ MSG = _mm_shuffle_epi32(MSG, 0x0E); \ STATE0= _mm_sha256rnds2_epu32(STATE0, STATE1, MSG); \ TMSG3= _mm_sha256msg1_epu32(TMSG3, TMSG0); \ \ \ MSG = _mm_add_epi32(TMSG1, _mm_set_epi64x(0x76F988DA5CB0A9DCULL, \ 0x4A7484AA2DE92C6FULL)); \ STATE1= _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); \ TMP = _mm_alignr_epi8(TMSG1, TMSG0, 4); \ TMSG2= _mm_add_epi32(TMSG2, TMP); \ TMSG2= _mm_sha256msg2_epu32(TMSG2, TMSG1); \ MSG = _mm_shuffle_epi32(MSG, 0x0E); \ STATE0= _mm_sha256rnds2_epu32(STATE0, STATE1, MSG); \ TMSG0= _mm_sha256msg1_epu32(TMSG0, TMSG1); \ \ \ MSG = _mm_add_epi32(TMSG2, _mm_set_epi64x(0xBF597FC7B00327C8ULL, \ 0xA831C66D983E5152ULL)); \ STATE1= _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); \ TMP = _mm_alignr_epi8(TMSG2, TMSG1, 4); \ TMSG3= _mm_add_epi32(TMSG3, TMP); \ TMSG3= _mm_sha256msg2_epu32(TMSG3, TMSG2); \ MSG = _mm_shuffle_epi32(MSG, 0x0E); \ STATE0= _mm_sha256rnds2_epu32(STATE0, STATE1, MSG); \ TMSG1= _mm_sha256msg1_epu32(TMSG1, TMSG2); \ \ \ MSG = _mm_add_epi32(TMSG3, _mm_set_epi64x(0x1429296706CA6351ULL, \ 0xD5A79147C6E00BF3ULL)); \ STATE1= _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); \ TMP = _mm_alignr_epi8(TMSG3, TMSG2, 4); \ TMSG0= _mm_add_epi32(TMSG0, TMP); \ TMSG0= _mm_sha256msg2_epu32(TMSG0, TMSG3); \ MSG = _mm_shuffle_epi32(MSG, 0x0E); \ STATE0= _mm_sha256rnds2_epu32(STATE0, STATE1, MSG); \ TMSG2= _mm_sha256msg1_epu32(TMSG2, TMSG3); \ \ \ MSG = _mm_add_epi32(TMSG0, _mm_set_epi64x(0x53380D134D2C6DFCULL, \ 0x2E1B213827B70A85ULL)); \ STATE1= _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); \ TMP = _mm_alignr_epi8(TMSG0, TMSG3, 4); \ TMSG1= _mm_add_epi32(TMSG1, TMP); \ TMSG1= _mm_sha256msg2_epu32(TMSG1, TMSG0); \ MSG = _mm_shuffle_epi32(MSG, 0x0E); \ STATE0= _mm_sha256rnds2_epu32(STATE0, STATE1, MSG); \ TMSG3= _mm_sha256msg1_epu32(TMSG3, TMSG0); \ \ \ MSG = _mm_add_epi32(TMSG1, _mm_set_epi64x(0x92722C8581C2C92EULL, \ 0x766A0ABB650A7354ULL)); \ STATE1= _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); \ TMP = _mm_alignr_epi8(TMSG1, TMSG0, 4); \ TMSG2= _mm_add_epi32(TMSG2, TMP); \ TMSG2= _mm_sha256msg2_epu32(TMSG2, TMSG1); \ MSG = _mm_shuffle_epi32(MSG, 0x0E); \ STATE0= _mm_sha256rnds2_epu32(STATE0, STATE1, MSG); \ TMSG0= _mm_sha256msg1_epu32(TMSG0, TMSG1); \ \ \ MSG = _mm_add_epi32(TMSG2, _mm_set_epi64x(0xC76C51A3C24B8B70ULL, \ 0xA81A664BA2BFE8A1ULL)); \ STATE1= _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); \ TMP = _mm_alignr_epi8(TMSG2, TMSG1, 4); \ TMSG3= _mm_add_epi32(TMSG3, TMP); \ TMSG3= _mm_sha256msg2_epu32(TMSG3, TMSG2); \ MSG = _mm_shuffle_epi32(MSG, 0x0E); \ STATE0= _mm_sha256rnds2_epu32(STATE0, STATE1, MSG); \ TMSG1= _mm_sha256msg1_epu32(TMSG1, TMSG2); \ \ \ MSG = _mm_add_epi32(TMSG3, _mm_set_epi64x(0x106AA070F40E3585ULL, \ 0xD6990624D192E819ULL)); \ STATE1= _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); \ TMP = _mm_alignr_epi8(TMSG3, TMSG2, 4); \ TMSG0= _mm_add_epi32(TMSG0, TMP); \ TMSG0= _mm_sha256msg2_epu32(TMSG0, TMSG3); \ MSG = _mm_shuffle_epi32(MSG, 0x0E); \ STATE0= _mm_sha256rnds2_epu32(STATE0, STATE1, MSG); \ TMSG2= _mm_sha256msg1_epu32(TMSG2, TMSG3); \ \ \ MSG = _mm_add_epi32(TMSG0, _mm_set_epi64x(0x34B0BCB52748774CULL, \ 0x1E376C0819A4C116ULL)); \ STATE1= _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); \ TMP = _mm_alignr_epi8(TMSG0, TMSG3, 4); \ TMSG1= _mm_add_epi32(TMSG1, TMP); \ TMSG1= _mm_sha256msg2_epu32(TMSG1, TMSG0); \ MSG = _mm_shuffle_epi32(MSG, 0x0E); \ STATE0= _mm_sha256rnds2_epu32(STATE0, STATE1, MSG); \ TMSG3= _mm_sha256msg1_epu32(TMSG3, TMSG0); \ \ \ MSG = _mm_add_epi32(TMSG1, _mm_set_epi64x(0x682E6FF35B9CCA4FULL, \ 0x4ED8AA4A391C0CB3ULL)); \ STATE1= _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); \ TMP = _mm_alignr_epi8(TMSG1, TMSG0, 4); \ TMSG2= _mm_add_epi32(TMSG2, TMP); \ TMSG2= _mm_sha256msg2_epu32(TMSG2, TMSG1); \ MSG = _mm_shuffle_epi32(MSG, 0x0E); \ STATE0= _mm_sha256rnds2_epu32(STATE0, STATE1, MSG); \ \ \ MSG = _mm_add_epi32(TMSG2, _mm_set_epi64x(0x8CC7020884C87814ULL, \ 0x78A5636F748F82EEULL)); \ STATE1= _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); \ TMP = _mm_alignr_epi8(TMSG2, TMSG1, 4); \ TMSG3= _mm_add_epi32(TMSG3, TMP); \ TMSG3= _mm_sha256msg2_epu32(TMSG3, TMSG2); \ MSG = _mm_shuffle_epi32(MSG, 0x0E); \ STATE0= _mm_sha256rnds2_epu32(STATE0, STATE1, MSG); \ \ \ MSG = _mm_add_epi32(TMSG3, _mm_set_epi64x(0xC67178F2BEF9A3F7ULL, \ 0xA4506CEB90BEFFFAULL)); \ STATE1= _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); \ MSG = _mm_shuffle_epi32(MSG, 0x0E); \ STATE0= _mm_sha256rnds2_epu32(STATE0, STATE1, MSG); \ \ \ STATE0 = _mm_add_epi32(STATE0, ABEF_SAVE); \ STATE1 = _mm_add_epi32(STATE1, CDGH_SAVE); \ } \ \ \ TMP = _mm_shuffle_epi32(STATE0, 0x1B); \ STATE1 = _mm_shuffle_epi32(STATE1, 0xB1); \ STATE0 = _mm_blend_epi16(TMP, STATE1, 0xF0); \ STATE1 = _mm_alignr_epi8(STATE1, TMP, 8); \ \ \ _mm_storeu_si128((__m128i*) &state[0], STATE0); \ _mm_storeu_si128((__m128i*) &state[4], STATE1); \ } // Optimized sha2_p2_32_32: Computes SHA-256 on the concatenation of two 32-byte inputs. #define sha2_p2_32_32(output, input1, input2) { \ \ uint32_t state[8]; \ \ state[0] = 0x6a09e667U; \ state[1] = 0xbb67ae85U; \ state[2] = 0x3c6ef372U; \ state[3] = 0xa54ff53aU; \ state[4] = 0x510e527fU; \ state[5] = 0x9b05688cU; \ state[6] = 0x1f83d9abU; \ state[7] = 0x5be0cd19U; \ \ sha256_compress1_2(state, input1, input2); \ \ uint8_t block2[64] = { 0 }; \ block2[0] = 0x80; \ \ uint64_t bit_len = 64ULL * 8ULL; \ block2[56] = (uint8_t)(bit_len >> 56); \ block2[57] = (uint8_t)(bit_len >> 48); \ block2[58] = (uint8_t)(bit_len >> 40); \ block2[59] = (uint8_t)(bit_len >> 32); \ block2[60] = (uint8_t)(bit_len >> 24); \ block2[61] = (uint8_t)(bit_len >> 16); \ block2[62] = (uint8_t)(bit_len >> 8); \ block2[63] = (uint8_t)(bit_len); \ \ \ sha256_compress1(state, block2); \ \ \ output[ 0] = (uint8_t)(state[0] >> 24); \ output[ 1] = (uint8_t)(state[0] >> 16); \ output[ 2] = (uint8_t)(state[0] >> 8); \ output[ 3] = (uint8_t)(state[0]); \ \ output[ 4] = (uint8_t)(state[1] >> 24); \ output[ 5] = (uint8_t)(state[1] >> 16); \ output[ 6] = (uint8_t)(state[1] >> 8); \ output[ 7] = (uint8_t)(state[1]); \ \ output[ 8] = (uint8_t)(state[2] >> 24); \ output[ 9] = (uint8_t)(state[2] >> 16); \ output[10] = (uint8_t)(state[2] >> 8); \ output[11] = (uint8_t)(state[2]); \ \ output[12] = (uint8_t)(state[3] >> 24); \ output[13] = (uint8_t)(state[3] >> 16); \ output[14] = (uint8_t)(state[3] >> 8); \ output[15] = (uint8_t)(state[3]); \ \ output[16] = (uint8_t)(state[4] >> 24); \ output[17] = (uint8_t)(state[4] >> 16); \ output[18] = (uint8_t)(state[4] >> 8); \ output[19] = (uint8_t)(state[4]); \ \ output[20] = (uint8_t)(state[5] >> 24); \ output[21] = (uint8_t)(state[5] >> 16); \ output[22] = (uint8_t)(state[5] >> 8); \ output[23] = (uint8_t)(state[5]); \ \ output[24] = (uint8_t)(state[6] >> 24); \ output[25] = (uint8_t)(state[6] >> 16); \ output[26] = (uint8_t)(state[6] >> 8); \ output[27] = (uint8_t)(state[6]); \ \ output[28] = (uint8_t)(state[7] >> 24); \ output[29] = (uint8_t)(state[7] >> 16); \ output[30] = (uint8_t)(state[7] >> 8); \ output[31] = (uint8_t)(state[7]); \ \ } // TODO make even better impl with ideas from ARM impl void _vdf_sha2_fused_x86(unsigned char* saltBuffer, unsigned char* seed, unsigned char* out, unsigned char* outCheckpoint, int checkpointCount, int skipCheckpointCount, int hashingIterations) { // 2 different branches for different optimisation cases if (skipCheckpointCount == 0) { for(int checkpointIdx = 0; checkpointIdx <= checkpointCount; checkpointIdx++) { unsigned char* locIn = checkpointIdx == 0 ? seed : (outCheckpoint + VDF_SHA_HASH_SIZE*(checkpointIdx-1)); unsigned char* locOut = checkpointIdx == checkpointCount ? out : (outCheckpoint + VDF_SHA_HASH_SIZE*checkpointIdx); memcpy(locOut, locIn, VDF_SHA_HASH_SIZE); for(int i = 0; i < hashingIterations; i++) { sha2_p2_32_32(locOut, saltBuffer, locOut); } long_add(saltBuffer, 1); } } else { for(int checkpointIdx = 0; checkpointIdx <= checkpointCount; checkpointIdx++) { unsigned char* locIn = checkpointIdx == 0 ? seed : (outCheckpoint + VDF_SHA_HASH_SIZE*(checkpointIdx-1)); unsigned char* locOut = checkpointIdx == checkpointCount ? out : (outCheckpoint + VDF_SHA_HASH_SIZE*checkpointIdx); memcpy(locOut, locIn, VDF_SHA_HASH_SIZE); // 1 skip on start for(int i = 0; i < hashingIterations; i++) { sha2_p2_32_32(locOut, saltBuffer, locOut); } long_add(saltBuffer, 1); for(int j = 1; j < skipCheckpointCount; j++) { // no skips for(int i = 0; i < hashingIterations; i++) { sha2_p2_32_32(locOut, saltBuffer, locOut); } long_add(saltBuffer, 1); } // 1 skip on end for(int i = 0; i < hashingIterations; i++) { sha2_p2_32_32(locOut, saltBuffer, locOut); } long_add(saltBuffer, 1); } } } void vdf_sha2_fused_x86(unsigned char* saltBuffer, unsigned char* seed, unsigned char* out, unsigned char* outCheckpoint, int checkpointCount, int skipCheckpointCount, int hashingIterations) { unsigned char saltBufferStack[SALT_SIZE]; // ensure 1 L1 cache page used // no access to heap, except of 0-iteration memcpy(saltBufferStack, saltBuffer, SALT_SIZE); _vdf_sha2_fused_x86(saltBufferStack, seed, out, outCheckpoint, checkpointCount, skipCheckpointCount, hashingIterations); } #endif ================================================ FILE: apps/arweave/c_src/vdf/vdf_hiopt_arm.cpp ================================================ #include #include #include "vdf.h" #if defined(__aarch64__) || defined(__arm__) extern "C" { void sha256_block_vdf_order(unsigned int* cth, const void* in, size_t diff_num); void reverse_endianness_asm(const uint32_t h[8], uint8_t* md) { uint64_t h1, h2; __asm__ volatile ( "LDP %[t1], %[t2], [%[in]], #16\n\t" "REV32 %[t1], %[t1]\n\t" "REV32 %[t2], %[t2]\n\t" "STP %[t1], %[t2], [%[out]], #16\n\t" "LDP %[t1], %[t2], [%[in]]\n\t" "REV32 %[t1], %[t1]\n\t" "REV32 %[t2], %[t2]\n\t" "STP %[t1], %[t2], [%[out]]\n\t" // : [t1] "+r" (h1), [t2] "+r" (h2), [in] "+r" (h), [out] "+r" (md) : : "memory" ); } } //sha256 h0-h7 unsigned int H07[8] = { 0x6a09e667U,0xbb67ae85U,0x3c6ef372U,0xa54ff53aU, 0x510e527fU,0x9b05688cU,0x1f83d9abU,0x5be0cd19U }; //////////////////////////////////////////////////////////////////////////////////////////////////// // SHA //////////////////////////////////////////////////////////////////////////////////////////////////// // NOTE saltBuffer is mutable in progress void _vdf_sha2_hiopt_arm(unsigned char* saltBuffer, unsigned char* seed, unsigned char* out, unsigned char* outCheckpoint, int checkpointCount, int skipCheckpointCount, int hashingIterations) { //unsigned char tempOut[VDF_SHA_HASH_SIZE]; // 2 different branches for different optimisation cases unsigned int sha256[8]; //one sha256 block unsigned char inBuffer[64]; if (skipCheckpointCount == 0) { for (int checkpointIdx = 0; checkpointIdx <= checkpointCount; checkpointIdx++) { unsigned char* locIn = checkpointIdx == 0 ? seed : (outCheckpoint + VDF_SHA_HASH_SIZE * (checkpointIdx - 1)); unsigned char* locOut = checkpointIdx == checkpointCount ? out : (outCheckpoint + VDF_SHA_HASH_SIZE * checkpointIdx); memcpy(sha256, H07, 32); reverse_endianness_asm((uint32_t*)saltBuffer, inBuffer); reverse_endianness_asm((uint32_t*)locIn, &inBuffer[32]); sha256_block_vdf_order(sha256, inBuffer, hashingIterations); reverse_endianness_asm(sha256, locOut); long_add(saltBuffer, 1); } } else { for (int checkpointIdx = 0; checkpointIdx <= checkpointCount; checkpointIdx++) { unsigned char* locIn = checkpointIdx == 0 ? seed : (outCheckpoint + VDF_SHA_HASH_SIZE * (checkpointIdx - 1)); unsigned char* locOut = checkpointIdx == checkpointCount ? out : (outCheckpoint + VDF_SHA_HASH_SIZE * checkpointIdx); // 1 skip on start memcpy(sha256, H07, 32); reverse_endianness_asm((uint32_t*)saltBuffer, inBuffer); reverse_endianness_asm((uint32_t*)locIn, &inBuffer[32]); sha256_block_vdf_order(sha256, inBuffer, hashingIterations); memcpy(&inBuffer[32], sha256, 32); long_add(saltBuffer, 1); reverse_endianness_asm((uint32_t*)saltBuffer, inBuffer); for (int j = 1; j < skipCheckpointCount; j++) { // no skips memcpy(sha256, H07, 32); sha256_block_vdf_order(sha256, inBuffer, hashingIterations); memcpy(&inBuffer[32], sha256, 32); long_add(saltBuffer, 1); reverse_endianness_asm((uint32_t*)saltBuffer, inBuffer); } // 1 skip on end memcpy(sha256, H07, 32); sha256_block_vdf_order(sha256, inBuffer, hashingIterations); reverse_endianness_asm(sha256, locOut); long_add(saltBuffer, 1); } } } // use // unsigned char out[VDF_SHA_HASH_SIZE]; // unsigned char* outCheckpoint = (unsigned char*)malloc(checkpointCount*VDF_SHA_HASH_SIZE); // free(outCheckpoint); // for call void vdf_sha2_hiopt_arm(unsigned char* saltBuffer, unsigned char* seed, unsigned char* out, unsigned char* outCheckpoint, int checkpointCount, int skipCheckpointCount, int hashingIterations) { unsigned char saltBufferStack[SALT_SIZE]; // ensure 1 L1 cache page used // no access to heap, except of 0-iteration memcpy(saltBufferStack, saltBuffer, SALT_SIZE); _vdf_sha2_hiopt_arm(saltBufferStack, seed, out, outCheckpoint, checkpointCount, skipCheckpointCount, hashingIterations); } #endif ================================================ FILE: apps/arweave/include/ar.hrl ================================================ -ifndef(AR_HRL). -define(AR_HRL, true). %%% A collection of record structures used throughout the Arweave server. %% True if arweave was launched with -setcookie=test %% (e.g. bin/test or bin/shell) -define(IS_TEST, erlang:get_cookie() == test). %% Default gen_server:call timeout. %% Is used to safely replace deprecated `infinity` timeout, that was used in %% multiple places, with a more reasonable value. %% Is a subject for future changes. -define(DEFAULT_CALL_TIMEOUT, 600000). %% The mainnet name. Does not change at the hard forks. -ifndef(NETWORK_NAME). -ifdef(AR_TEST). -define(NETWORK_NAME, "arweave.localtest"). -else. -define(NETWORK_NAME, "arweave.N.1"). -endif. -endif. %% When a request is received without specifing the X-Network header, this network name %% is assumed. -ifndef(DEFAULT_NETWORK_NAME). -define(DEFAULT_NETWORK_NAME, "arweave.N.1"). -endif. %% The current release number of the arweave client software. %% @deprecated Not used apart from being included in the /info response. -define(CLIENT_VERSION, 5). %% The current build number -- incremented for every release. -define(RELEASE_NUMBER, 91). -define(DEFAULT_REQUEST_HEADERS, [ {<<"X-Network">>, ?NETWORK_NAME}, {<<"X-Version">>, <<"8">>}, {<<"X-Block-Format">>, <<"3">>} ]). -define(CORS_HEADERS, #{<<"access-control-allow-origin">> => <<"*">>}). -ifdef(FORKS_RESET). -define(FORK_1_6, 0). -else. %%% FORK INDEX %%% @deprecated Fork heights from 1.7 on are defined in the ar_fork module. -define(FORK_1_6, 95000). -endif. %% The hashing algorithm used to calculate wallet addresses. -define(HASH_ALG, sha256). -define(DEEP_HASH_ALG, sha384). -define(MERKLE_HASH_ALG, sha384). -define(RSA_SIGN_ALG, rsa). -define(RSA_PRIV_KEY_SZ, 4096). -define(ECDSA_SIGN_ALG, ecdsa). -define(ECDSA_TYPE_BYTE, <<2>>). -define(EDDSA_SIGN_ALG, eddsa). -define(EDDSA_TYPE_BYTE, <<3>>). %% The default key type used by transactions that do not specify a signature type. -define(DEFAULT_KEY_TYPE, {?RSA_SIGN_ALG, 65537}). -define(RSA_KEY_TYPE, {?RSA_SIGN_ALG, 65537}). -define(ECDSA_KEY_TYPE, {?ECDSA_SIGN_ALG, secp256k1}). -define(RSA_BLOCK_SIG_SIZE, 512). -define(ECDSA_PUB_KEY_SIZE, 33). -define(ECDSA_SIG_SIZE, 65). %% The difficulty a new weave is started with. -define(DEFAULT_DIFF, 6). -ifndef(TARGET_BLOCK_TIME). -define(TARGET_BLOCK_TIME, 120). -endif. -ifndef(RETARGET_BLOCKS). -define(RETARGET_BLOCKS, 10). -endif. %% We only do retarget if the time it took to mine ?RETARGET_BLOCKS is more than %% 1.1 times bigger or smaller than ?TARGET_BLOCK_TIME * ?RETARGET_BLOCKS. Was used before %% the fork 2.5 where we got rid of the floating point calculations. -define(RETARGET_TOLERANCE, 0.1). -define(JOIN_CLOCK_TOLERANCE, 15). -define(MAX_BLOCK_PROPAGATION_TIME, 60). -define(CLOCK_DRIFT_MAX, 5). %% The total supply of tokens in the Genesis block. -define(GENESIS_TOKENS, 55000000). %% Winstons per AR. -define(WINSTON_PER_AR, 1000000000000). %% The number of bytes in a gibibyte. -define(KiB, (1024)). -define(MiB, (1024 * ?KiB)). -define(GiB, (1024 * ?MiB)). -define(TiB, (1024 * ?GiB)). %% How far into the past or future the block can be in order to be accepted for %% processing. -ifdef(AR_TEST). -define(STORE_BLOCKS_BEHIND_CURRENT, 10). -else. -define(STORE_BLOCKS_BEHIND_CURRENT, 50). -endif. %% The maximum lag when fork recovery (chain reorganisation) is performed. -ifdef(AR_TEST). -define(CHECKPOINT_DEPTH, 4). -else. -define(CHECKPOINT_DEPTH, 18). -endif. %% The recommended depth of the block to use as an anchor for transactions. %% The corresponding block hash is returned by the GET /tx_anchor endpoint. -ifdef(AR_TEST). -define(SUGGESTED_TX_ANCHOR_DEPTH, 5). -else. -define(SUGGESTED_TX_ANCHOR_DEPTH, 6). -endif. %% The number of blocks returned in the /info 'recent' field -ifdef(AR_TEST). -define(RECENT_BLOCKS_WITHOUT_TIMESTAMP, 2). -else. -define(RECENT_BLOCKS_WITHOUT_TIMESTAMP, 5). -endif. %% How long to wait before giving up on unit test(s). -define(TEST_SUITE_TIMEOUT, 90 * 60). %% 90 minutes %% How long to wait before giving up on e2e test(s). -define(E2E_TEST_SUITE_TIMEOUT, 6 * 60 * 60). %% 6 hours %% Default test timeout to use if a test starts a node. We keep having test failures due to %% the timeout elapsing, and I think it may be that sometimes on the runner it just takes a %% while to launch a test node. -define(TEST_NODE_TIMEOUT, 300). %% 5 minutes %% The maximum byte size of a single POST body. -define(MAX_BODY_SIZE, 15 * ?MiB). %% The maximum allowed size in bytes for the data field of %% a format=1 transaction. -define(TX_DATA_SIZE_LIMIT, 10 * ?MiB). %% The maximum allowed size in bytes for the combined data fields of %% the format=1 transactions included in a block. Must be greater than %% or equal to ?TX_DATA_SIZE_LIMIT. -define(BLOCK_TX_DATA_SIZE_LIMIT, ?TX_DATA_SIZE_LIMIT). %% The maximum number of transactions (both format=1 and format=2) in a block. -ifdef(AR_TEST). -define(BLOCK_TX_COUNT_LIMIT, 10). -else. -define(BLOCK_TX_COUNT_LIMIT, 1000). -endif. %% The base transaction size the transaction fee must pay for. -define(TX_SIZE_BASE, 3210). %% Mempool Limits. %% %% The reason we have two different mempool limits has to do with the way %% format=2 transactions are distributed. To achieve faster consensus and %% reduce the network congestion, the miner does not gossip data of format=2 %% transactions, but serves it later to those who request it after the %% corresponding transaction is included into a block. A single mempool limit %% would therefore be reached much quicker by a peer accepting format=2 %% transactions with data. This would prevent this miner from accepting any %% further transactions. Having a separate limit for data allows the miner %% to continue accepting transaction headers. %% The maximum allowed size of transaction headers stored in mempool. %% The data field of a format=1 transaction is considered to belong to %% its headers. -ifdef(AR_TEST). -define(MEMPOOL_HEADER_SIZE_LIMIT, 50 * ?MiB). -else. -define(MEMPOOL_HEADER_SIZE_LIMIT, 250 * ?MiB). -endif. %% The maximum allowed size of transaction data stored in mempool. %% The format=1 transactions are not counted as their data is considered %% to be part of the header. -ifdef(AR_TEST). -define(MEMPOOL_DATA_SIZE_LIMIT, 50 * ?MiB). -else. -define(MEMPOOL_DATA_SIZE_LIMIT, 500 * ?MiB). -endif. %% Default timeout for establishing an HTTP connection. -define(HTTP_REQUEST_CONNECT_TIMEOUT, 10 * 1000). %% Default timeout used when sending to and receiving from a TCP socket %% when making an HTTP request. -define(HTTP_REQUEST_SEND_TIMEOUT, 60 * 1000). %% The time in milliseconds to wait before retrying %% a failed join (block index download) attempt. -define(REJOIN_TIMEOUT, 10 * 1000). %% How many times to retry fetching the block index from each of %% the peers before giving up. -define(REJOIN_RETRIES, 3). %% Maximum allowed number of accepted requests per minute per IP. -ifdef(AR_TEST). -define(DEFAULT_REQUESTS_PER_MINUTE_LIMIT, 100_000). -else. -define(DEFAULT_REQUESTS_PER_MINUTE_LIMIT, 900). -endif. %% Number of seconds an IP address should be completely banned from doing %% HTTP requests after posting an invalid block. -define(BAD_BLOCK_BAN_TIME, 24 * 60 * 60). %% A part of transaction propagation delay independent from the size, in seconds. -ifdef(AR_TEST). -define(BASE_TX_PROPAGATION_DELAY, 0). -else. -ifndef(BASE_TX_PROPAGATION_DELAY). -define(BASE_TX_PROPAGATION_DELAY, 30). -endif. -endif. %% A conservative assumption of the network speed used to %% estimate the transaction propagation delay. It does not include %% the base delay, the time the transaction spends in the priority %% queue, and the time it takes to propagate the transaction to peers. -ifdef(AR_TEST). -define(TX_PROPAGATION_BITS_PER_SECOND, 1000000000). -else. -define(TX_PROPAGATION_BITS_PER_SECOND, 3000000). % 3 mbps -endif. %% The number of peers to send new blocks to in parallel. -define(BLOCK_PROPAGATION_PARALLELIZATION, 20). %% The maximum number of peers to propagate txs to, by default. -define(DEFAULT_MAX_PROPAGATION_PEERS, 16). %% The maximum number of peers to propagate blocks to, by default. -define(DEFAULT_MAX_BLOCK_PROPAGATION_PEERS, 1000). %% When the transaction data size is smaller than this number of bytes, %% the transaction is gossiped to the peer without a prior check if the peer %% already has this transaction. -define(TX_SEND_WITHOUT_ASKING_SIZE_LIMIT, 1000). %% Block headers directory, relative to the data dir. -define(BLOCK_DIR, "blocks"). %% Transaction headers directory, relative to the data dir. -define(TX_DIR, "txs"). %% Disk cache directory, relative to the data dir. -define(DISK_CACHE_DIR, "disk_cache"). %% Block headers directory, relative to the disk cache directory. -define(DISK_CACHE_BLOCK_DIR, "blocks"). %% Transaction headers directory, relative to the disk cache directory. -define(DISK_CACHE_TX_DIR, "txs"). %% Backup block hash list storage directory, relative to the data dir. -define(HASH_LIST_DIR, "hash_lists"). %% Directory for storing miner wallets, relative to the data dir. -define(WALLET_DIR, "wallets"). %% Directory for storing unique wallet lists, relative to the data dir. -define(WALLET_LIST_DIR, "wallet_lists"). %% Directory for storing data chunks, relative to the data dir. -define(DATA_CHUNK_DIR, "data_chunks"). %% Directory for RocksDB key-value storages, relative to the data dir. -define(ROCKS_DB_DIR, "rocksdb"). %% Log output directory, NOT relative to the data dir. -define(LOG_DIR, "logs"). %% The directory for persisted metrics, NOT relative to the data dir. -define(METRICS_DIR, "metrics"). %% The ID and module for the default storage module. -define(DEFAULT_MODULE, "default"). %% Default TCP port. -define(DEFAULT_HTTP_IFACE_PORT, 1984). %% Number of transaction propagation processes to spawn. %% Each emitter picks the most valued transaction from the queue %% and propagates it to the chosen peers. %% Can be overriden by a command line argument. -define(NUM_EMITTER_PROCESSES, 16). %% The adjustment of difficutly going from SHA-384 to RandomX. -define(RANDOMX_DIFF_ADJUSTMENT, (-14)). %% Max allowed difficulty multiplication and division factors, before the fork 2.4. -define(DIFF_ADJUSTMENT_DOWN_LIMIT, 2). -define(DIFF_ADJUSTMENT_UP_LIMIT, 4). %% Maximum size of a single data chunk, in bytes. -define(DATA_CHUNK_SIZE, (256 * 1024)). %% The maximum allowed packing difficulty. -define(MAX_PACKING_DIFFICULTY, 32). %% The number of sub-chunks in a compositely packed chunk. %% The composite packing with the packing difficulty 1 matches approximately the non-composite %% 2.6 packing in terms of computational costs. -define(COMPOSITE_PACKING_SUB_CHUNK_COUNT, 32). %% The size of a unit sub-chunk in the compositely packed chunk. -define(COMPOSITE_PACKING_SUB_CHUNK_SIZE, (?DATA_CHUNK_SIZE div ?COMPOSITE_PACKING_SUB_CHUNK_COUNT)). %% The number of RandomX rounds used for a single iteration of packing of a single sub-chunk %% during the composite packing. -define(COMPOSITE_PACKING_ROUND_COUNT, 10). %% Maximum size of a `data_path`, in bytes. -define(MAX_PATH_SIZE, (256 * 1024)). %% The size of data chunk hashes, in bytes. -define(CHUNK_ID_HASH_SIZE, 32). -define(NOTE_SIZE, 32). %% Disk cache size in MB -ifdef(AR_TEST). -define(DISK_CACHE_SIZE, 1). -define(DISK_CACHE_CLEAN_PERCENT_MAX, 20). -else. -define(DISK_CACHE_SIZE, 5120). -define(DISK_CACHE_CLEAN_PERCENT_MAX, 20). -endif. %% The speed in chunks/s of moving the fork 2.5 packing threshold. -ifdef(AR_TEST). -define(PACKING_2_5_THRESHOLD_CHUNKS_PER_SECOND, 1). -else. -define(PACKING_2_5_THRESHOLD_CHUNKS_PER_SECOND, 10). -endif. %% The data_root of the system "padding" nodes inserted in the transaction Merkle trees %% since the 2.5 fork block. User transactions cannot set <<>> for data_root unless %% data_size == 0. The motivation is to place all chunks including those %% smaller than 256 KiB into the 256 KiB buckets on the weave, to even out their chances to be %% picked as recall chunks and therefore equally incentivize the storage. -define(PADDING_NODE_DATA_ROOT, <<>>). -ifndef(INITIAL_VDF_DIFFICULTY). -define(INITIAL_VDF_DIFFICULTY, 600_000). -endif. %% @doc A chunk with the proofs of its presence in the weave at a particular offset. -record(poa, { %% DEPRECATED. Not used since the fork 2.4. option = 1, %% The path through the Merkle tree of transactions' "data_root"s. %% Proofs the inclusion of the "data_root" in the corresponding "tx_root" %% under the particular offset. tx_path = <<>>, %% The path through the Merkle tree of the identifiers of the chunks %% of the corresponding transaction. Proofs the inclusion of the chunk %% in the corresponding "data_root" under a particular offset. data_path = <<>>, %% When packing difficulty is 0 chunk stores a full ?DATA_CHUNK_SIZE-sized packed chunk. %% When packing difficulty >= 1, chunk stores a ?COMPOSITE_PACKING_SUB_CHUNK_SIZE-sized %% packed sub-chunk. chunk = <<>>, %% When packing difficulty is 0 unpacked_chunk is <<>>. %% When packing difficulty >= 1, unpacked_chunk stores a full 0-padded %% ?DATA_CHUNK_SIZE-sized unpacked chunk. unpacked_chunk = <<>> }). %% @doc The information which simplifies validation of the nonce limiting procedures. -record(nonce_limiter_info, { %% The output of the latest step - the source of the entropy for the mining nonces. output = <<>>, %% The output of the latest step of the previous block. prev_output = <<>>, %% The hash of the latest block mined below the current reset line. seed = <<>>, %% The hash of the latest block mined below the future reset line. next_seed = <<>>, %% The weave size of the latest block mined below the current reset line. partition_upper_bound = 0, %% The weave size of the latest block mined below the future reset line. next_partition_upper_bound = 0, %% The global sequence number of the nonce limiter step at which the block was found. global_step_number = 1, %% ?VDF_CHECKPOINT_COUNT_IN_STEP checkpoints from the most recent step in the nonce %% limiter process. last_step_checkpoints = [], %% A list of the output of each step of the nonce limiting process. Note: each step %% has ?VDF_CHECKPOINT_COUNT_IN_STEP checkpoints, the last of which is that step's output. steps = [], %% The fields added at the fork 2.7 %% The number of SHA2-256 iterations in a single VDF checkpoint. The protocol aims to keep the %% checkoint calculation time to around 40ms by varying this paramter. Note: there are %% 25 checkpoints in a single VDF step - so the protocol aims to keep the step calculation at %% 1 second by varying this parameter. vdf_difficulty = ?INITIAL_VDF_DIFFICULTY, %% The VDF difficulty scheduled for to be applied after the next VDF reset line. next_vdf_difficulty = ?INITIAL_VDF_DIFFICULTY }). %% @doc A VDF session. -record(vdf_session, { step_number, seed, step_checkpoints_map = #{}, steps, prev_session_key, upper_bound, next_upper_bound, vdf_difficulty, next_vdf_difficulty }). %% @doc The format of the nonce limiter update provided by the configured trusted peer. -record(nonce_limiter_update, { session_key, session, is_partial = true }). %% @doc The format of the response to nonce limiter updates by configured trusted peers. -record(nonce_limiter_update_response, { session_found = true, step_number, postpone = 0, format = 2 }). %% @doc A compact announcement of a new block gossiped to peers. Peers %% who have not received this block yet and decide to receive it from us, %% should reply with a #block_announcement_response. -record(block_announcement, { indep_hash, previous_block, recall_byte, tx_prefixes = [], % 8 byte prefixes of transaction identifiers. recall_byte2, solution_hash }). %% @doc A reply to a block announcement when we are willing to receive this %% block from the announcing peer. -record(block_announcement_response, { missing_chunk = false, missing_tx_indices = [], % Missing transactions' indices, 0 =<, =< 999. missing_chunk2 }). %% @doc A block (txs is a list of tx records) or a block shadow (txs is a list of %% transaction identifiers). -record(block, { %% The nonce chosen to solve the mining problem. nonce, %% `indep_hash` of the previous block in the weave. previous_block = <<>>, %% POSIX time of block discovery. timestamp, %% POSIX time of the last difficulty retarget. last_retarget, %% Mining difficulty, the number `hash` must be greater than. diff, height = 0, %% Mining solution hash. hash = <<>>, %% The block identifier. indep_hash, %% The list of transaction identifiers or transactions (tx records). txs = [], %% The Merkle root of the tree of Merkle roots of block's transactions' data. tx_root = <<>>, %% The Merkle tree of Merkle roots of block's transactions' data. Used internally, %% not gossiped. tx_tree = [], %% Deprecated. Not used, not gossiped. hash_list = unset, %% The Merkle root of the block index - the list of %% {`indep_hash`, `weave_size`, `tx_root`} triplets describing the past blocks %% excluding this one. hash_list_merkle = <<>>, %% The root hash of the Merkle Patricia Tree containing all wallet (account) balances and %% the identifiers of the last transactions posted by them, if any wallet_list, %% The mining address. Before the fork 2.6, either the atom 'unclaimed' or %% a SHA2-256 hash of the RSA PSS public key. In 2.6, 'unclaimed' is not supported. reward_addr = unclaimed, %% Miner-specified tags (a list of strings) to store with the block. tags = [], %% The number of Winston in the endowment pool. reward_pool, %% The total number of bytes whose storage is incentivized. weave_size, %% The total number of bytes added to the storage incentivization by this block. block_size, %% The sum of the average number of hashes computed by the network to produce the past %% blocks including this one. cumulative_diff, %% The list of {{`tx_id`, `data_root`}, `offset`} pairs. Used internally, not gossiped. size_tagged_txs = unset, %% The first proof of access. poa = #poa{}, %% The estimated USD to AR conversion rate used in the pricing calculations. %% A tuple {Dividend, Divisor}. %% Used until the transition to the new fee calculation method is complete. usd_to_ar_rate, %% The estimated USD to AR conversion rate scheduled to be used a bit later, used to %% compute the necessary fee for the currently signed txs. A tuple {Dividend, Divisor}. %% Used until the transition to the new fee calculation method is complete. scheduled_usd_to_ar_rate, %% The offset on the weave separting the data which has to be packed for mining after the %% fork 2.5 from the data which does not have to be packed yet. It is set to the %% weave_size of the 50th previous block at the hard fork block and moves down at a speed %% of ?PACKING_2_5_THRESHOLD_CHUNKS_PER_SECOND chunks/s. The motivation behind the %% threshold is a smooth transition to the new algorithm - big miners who might not want %% to adopt the new algorithm are still incentivized to upgrade and stay in the network %% for some time. packing_2_5_threshold, %% The offset on the weave separating the data which has to be split according to the %% stricter rules introduced in the fork 2.5 from the historical data. The new rules %% require all chunk sizes to be 256 KiB excluding the last or the only chunks of the %% corresponding transactions and the second last chunks of their transactions where they %% exceed 256 KiB in size when combined with the following (last) chunk. Furthermore, the %% new chunks may not be smaller than their Merkle proofs unless they are the last chunks. %% The motivation is to be able to put all chunks into 256 KiB buckets. It makes all %% chunks equally attractive because they have equal chances of being chosen as recall %% chunks. Moreover, every chunk costs the same in terms of storage and computation %% expenditure when packed (smaller chunks are simply padded before packing). strict_data_split_threshold, %% Used internally by tests. account_tree, %% %% The fields below were added at the fork 2.6. %% %% A part of the solution hash preimage. Used for the initial solution validation %% without a data chunk. hash_preimage = <<>>, %% The absolute recall offset. recall_byte, %% The total amount of winston the miner receives for this block. reward = 0, %% The solution hash of the previous block. previous_solution_hash = <<>>, %% The sequence number of the mining partition where the block was found. partition_number, %% The nonce limiter information. nonce_limiter_info = #nonce_limiter_info{}, %% The second proof of access (empty when the solution was found with only one chunk). poa2 = #poa{}, %% The absolute second recall offset. recall_byte2, %% The block signature. signature = <<>>, %% {KeyType, PubKey} - the public key the block was signed with. %% The only supported KeyType is currently {rsa, 65537}. reward_key, %% The estimated number of Winstons it costs the network to store one gibibyte %% for one minute. price_per_gib_minute = 0, %% The updated estimation of the number of Winstons it costs the network to store %% one gibibyte for one minute. scheduled_price_per_gib_minute = 0, %% The recursive hash of the network hash rates, block rewards, mining addresses, %% and denominations. %% Note that the length of the reward history has increased from %% ?LEGACY_REWARD_HISTORY_BLOCKS to ?REWARD_HISTORY_BLOCKS in 2.8. %% Before 2.8 every new hash was computed over the latest ?REWARD_HISTORY_BLOCKS. %% After 2.8 the new hash is computed from the new history element and the previous hash. reward_history_hash, %% The network hash rates, block rewards, and mining addresses from the latest %% ?REWARD_HISTORY_BLOCKS + ar_block:get_consensus_window_size() blocks. Used internally, not gossiped. reward_history = [], %% The total number of Winston emitted when the endowment was not sufficient %% to compensate mining. debt_supply = 0, %% An additional multiplier for the transaction fees doubled every time the %% endowment pool becomes empty. kryder_plus_rate_multiplier = 1, %% A lock controlling the updates of kryder_plus_rate_multiplier. It is set to 1 %% after the update and back to 0 when the endowment pool is bigger than %% ?RESET_KRYDER_PLUS_LATCH_THRESHOLD (redenominated according to the denomination %% used at the time). kryder_plus_rate_multiplier_latch = 0, %% The code for the denomination of AR in base units. %% 1 is the default which corresponds to the original denomination of 1^12 base units. %% Every time the available supply falls below ?REDENOMINATION_THRESHOLD, %% the denomination is multiplied by 1000, the code is incremented. %% Transaction denomination code must not exceed the block's denomination code. denomination = 1, %% The biggest known redenomination height (0 means there were no redenominations yet). redenomination_height = 0, %% The proof of signing the same block several times or extending two equal forks. double_signing_proof, %% The cumulative difficulty of the previous block. previous_cumulative_diff = 0, %% %% The fields below were added at the fork 2.7 (note that 2.6.8 was a hard fork too). %% %% The merkle trees of the data written after this weave offset may be constructed %% in a way where some subtrees are "rebased", i.e., their offsets start from 0 as if %% they were the leftmost subtree of the entire tree. The merkle paths for the chunks %% belonging to the subtrees will include a 32-byte 0-sequence preceding the pivot to %% the corresponding subtree. The rebases allow for flexible combination of data before %% registering it on the weave, extremely useful e.g., for the bundling services. merkle_rebase_support_threshold, %% The SHA2-256 of the packed chunk. chunk_hash, %% The SHA2-256 of the packed chunk2, when present. chunk2_hash, %% The hashes of the history of block times (in seconds), VDF times (in steps), %% and solution types (one-chunk vs two-chunk) of the latest %% ?BLOCK_TIME_HISTORY_BLOCKS blocks. block_time_history_hash, %% The block times (in seconds), VDF times (in steps), and solution types (one-chunk vs %% two-chunk) of the latest ?BLOCK_TIME_HISTORY_BLOCKS blocks. %% Used internally, not gossiped. block_time_history = [], % {block_interval, vdf_interval, chunk_count} %% %% The fields below were added at the fork 2.8. %% %% The packing difficulty of the replica the block was mined with. %% Applies to both poa1 and poa2. %% %% Packing difficulty 0 denotes the usual pre-2.8 packing scheme. %% Packing difficulty 1 refers to the new composite packing of approximately the same %% computational cost as the difficulty 0 packing. Packing difficulty 2 is the composite %% packing where each sub-chunk is hashed twice as many times. The maximum allowed %% value is 32. %% %% When packing_difficulty >= 1, both poa1 and poa2 contain the unpacked chunks. %% The values of the "chunk" fields are now 8192-byte packed sub-chunks. %% %% If the block is associated with the new replication format (replica_format=1,) %% the packing difficulty is constant and determines the number of nonces %% (also, sub-chunks) in the recall range and their mining difficulty, in line with %% the chosen computational difficulty of the entropy computation. packing_difficulty = 0, %% The SHA2-256 of the unpacked 0-padded (if less than 256 KiB) chunk. %% undefined when packing_difficulty == 0, has a value otherwise. unpacked_chunk_hash, %% The SHA2-256 of the unpacked 0-padded (if less than 256 KiB) chunk2. %% undefined when packing_difficulty == 0 or recall_byte2 == undefined, %% has a value otherwise. unpacked_chunk2_hash, %% The replica format 0 is the inefficient "packing" where every chunk is packed %% independently. The replica format 1 is new the blazing fast replication format. replica_format = 0, %% Used internally, not gossiped. Convenient for validating potentially non-unique %% merkle proofs assigned to the different signatures of the same solution %% (see validate_poa_against_cached_poa in ar_block_pre_validator.erl). poa_cache, %% Used internally, not gossiped. Convenient for validating potentially non-unique %% merkle proofs assigned to the different signatures of the same solution %% (see validate_poa_against_cached_poa in ar_block_pre_validator.erl). poa2_cache, %% Used internally, not gossiped. receive_timestamp }). %% @doc A transaction. -record(tx, { %% 1 or 2. format = 1, %% The transaction identifier. id = <<>>, %% Either the identifier of the previous transaction from %% the same wallet or the identifier of one of the %% last ar_block:get_max_tx_anchor_depth() blocks. last_tx = <<>>, %% The public key the transaction is signed with. owner = <<>>, %% The owner address. Used as a cache to avoid recomputing it, not serialized. owner_address = not_set, %% A list of arbitrary key-value pairs. Keys and values are binaries. tags = [], %% The address of the recipient, if any. The SHA2-256 hash of the public key. target = <<>>, %% The amount of Winstons to send to the recipient, if any. quantity = 0, %% The data to upload, if any. For v2 transactions, the field is optional - a fee %% is charged based on the "data_size" field, data itself may be uploaded any time %% later in chunks. data = <<>>, %% Size in bytes of the transaction data. data_size = 0, %% Deprecated. Not used, not gossiped. data_tree = [], %% The Merkle root of the Merkle tree of data chunks. data_root = <<>>, %% The signature. signature = <<>>, %% The fee in Winstons. reward = 0, %% The code for the denomination of AR in base units. %% %% 1 corresponds to the original denomination of 1^12 base units. %% Every time the available supply falls below ?REDENOMINATION_THRESHOLD, %% the denomination is multiplied by 1000, the code is incremented. %% %% 0 is the default denomination code. It is treated as the denomination code of the %% current block. We do NOT default to 1 because we want to distinguish between the %% transactions with the explicitly assigned denomination (the denomination then becomes %% a part of the signature preimage) and transactions signed the way they were signed %% before the upgrade. The motivation is to keep supporting legacy client libraries after %% redenominations and at the same time protect users from an attack where %% a post-redenomination transaction is included in a pre-redenomination block. The attack %% is prevented by forbidding inclusion of transactions with denomination=0 in the 100 %% blocks preceding the redenomination block. %% %% Transaction denomination code must not exceed the block's denomination code. denomination = 0, %% The type of signature this transaction was signed with. A system field, %% not used by the protocol yet. signature_type = ?DEFAULT_KEY_TYPE }). %% @doc The data_path field will only be not_found if the chunk record is corrupt/invalid. %% This can happen if the chunk entry exists in the chunks_index but not in the chunk_data_db. %% In this case: %% - not_set means that a field has not been queried yet. %% - not_found means that the field has been queried but could not be found. -record(chunk_metadata, { chunk_data_key = not_set :: not_set | binary(), tx_root = not_set :: not_set | binary(), tx_path = not_set :: not_set | binary(), data_root = not_set :: not_set | binary(), data_path = not_set :: not_set | not_found | binary(), chunk_size = not_set :: not_set | non_neg_integer() }). -record(chunk_offsets, { absolute_offset = not_set :: not_set | non_neg_integer(), bucket_end_offset = not_set :: not_set | non_neg_integer(), padded_end_offset = not_set :: not_set | non_neg_integer(), relative_offset = not_set :: not_set | non_neg_integer() }). %% A macro to convert AR into Winstons. -define(AR(AR), (?WINSTON_PER_AR * AR)). %% A macro to return whether a term is a block record. -define(IS_BLOCK(X), (is_record(X, block))). %% Convert a v2.0 block index into an old style block hash list. -define(BI_TO_BHL(BI), ([BH || {BH, _, _} <- BI])). %% Pattern matches on ok-tuple and returns the value. -define(OK(Tuple), begin (case (Tuple) of {ok, SuccessValue} -> (SuccessValue) end) end). %% The messages to be stored inside the genesis block. -define(GENESIS_BLOCK_MESSAGES, []). %% Minimum number of characters for internal API secret. Used in the optional HTTP API %% for signing transactions. -define(INTERNAL_API_SECRET_MIN_LEN, 16). %% The frequency of issuing a reminder to the console and the logfile %% about the insufficient disk space, in milliseconds. -define(DISK_SPACE_WARNING_FREQUENCY, 24 * 60 * 60 * 1000). %% Use a standard way of logging. %% For more details see https://erlang.org/doc/man/logger.html#macros. -include_lib("kernel/include/logger.hrl"). -endif. ================================================ FILE: apps/arweave/include/ar_blacklist_middleware.hrl ================================================ -define(THROTTLE_PERIOD, 30000). -define(BAN_CLEANUP_INTERVAL, 60000). -define(RPM_BY_PATH(Path), fun() -> ?RPM_BY_PATH(Path, #{})() end). -define(RPM_BY_PATH(Path, LimitByIP), fun() -> {ok, Config} = arweave_config:get_env(), ?RPM_BY_PATH(Path, LimitByIP, Config#config.requests_per_minute_limit)() end). -ifdef(AR_TEST). -define(RPM_BY_PATH(Path, LimitByIP, DefaultPathLimit), fun() -> case Path of [<<"chunk">> | _] -> {chunk, maps:get(chunk, LimitByIP, DefaultPathLimit)}; % ~50 MB/s. [<<"chunk2">> | _] -> {chunk, maps:get(chunk, LimitByIP, DefaultPathLimit)}; % ~50 MB/s. [<<"data_sync_record">> | _] -> {data_sync_record, maps:get(data_sync_record, LimitByIP, DefaultPathLimit)}; [<<"recent_hash_list_diff">> | _] -> {recent_hash_list_diff, maps:get(recent_hash_list_diff, LimitByIP, DefaultPathLimit)}; [<<"hash_list">>] -> {block_index, maps:get(block_index, LimitByIP, DefaultPathLimit)}; [<<"hash_list2">>] -> {block_index, maps:get(block_index, LimitByIP, DefaultPathLimit)}; [<<"block_index">>] -> {block_index, maps:get(block_index, LimitByIP, DefaultPathLimit)}; [<<"block_index2">>] -> {block_index, maps:get(block_index, LimitByIP, DefaultPathLimit)}; [<<"wallet_list">>] -> {wallet_list, maps:get(wallet_list, LimitByIP, DefaultPathLimit)}; [<<"block">>, _Type, _ID, <<"wallet_list">>] -> {wallet_list, maps:get(wallet_list, LimitByIP, DefaultPathLimit)}; [<<"block">>, _Type, _ID, <<"hash_list">>] -> {block_index, maps:get(block_index, LimitByIP, DefaultPathLimit)}; [<<"vdf">>] -> {get_vdf, maps:get(get_vdf, LimitByIP, DefaultPathLimit)}; [<<"vdf">>, <<"session">>] -> {get_vdf_session, maps:get(get_vdf_session, LimitByIP, DefaultPathLimit)}; [<<"vdf2">>, <<"session">>] -> {get_vdf_session, maps:get(get_vdf_session, LimitByIP, DefaultPathLimit)}; [<<"vdf3">>, <<"session">>] -> {get_vdf_session, maps:get(get_vdf_session, LimitByIP, DefaultPathLimit)}; [<<"vdf4">>, <<"session">>] -> {get_vdf_session, maps:get(get_vdf_session, LimitByIP, DefaultPathLimit)}; [<<"vdf">>, <<"previous_session">>] -> {get_previous_vdf_session, maps:get(get_previous_vdf_session, LimitByIP, DefaultPathLimit)}; [<<"vdf2">>, <<"previous_session">>] -> {get_previous_vdf_session, maps:get(get_previous_vdf_session, LimitByIP, DefaultPathLimit)}; [<<"vdf4">>, <<"previous_session">>] -> {get_previous_vdf_session, maps:get(get_previous_vdf_session, LimitByIP, DefaultPathLimit)}; _ -> {default, maps:get(default, LimitByIP, DefaultPathLimit)} end end). -else. -define(RPM_BY_PATH(Path, LimitByIP, DefaultPathLimit), fun() -> case Path of [<<"chunk">> | _] -> {chunk, maps:get(chunk, LimitByIP, 12000)}; % ~50 MB/s. [<<"chunk2">> | _] -> {chunk, maps:get(chunk, LimitByIP, 12000)}; % ~50 MB/s. [<<"data_sync_record">> | _] -> {data_sync_record, maps:get(data_sync_record, LimitByIP, 40)}; [<<"footprints">> | _] -> %% 262144 * 1024 (chunks per footprint) * 200 (rpm) / 60 (seconds) =~ 800 MB/s {footprints, maps:get(footprints, LimitByIP, 200)}; [<<"recent_hash_list_diff">> | _] -> {recent_hash_list_diff, maps:get(recent_hash_list_diff, LimitByIP, 240)}; [<<"hash_list">>] -> {block_index, maps:get(block_index, LimitByIP, 2)}; [<<"hash_list2">>] -> {block_index, maps:get(block_index, LimitByIP, 2)}; [<<"block_index">>] -> {block_index, maps:get(block_index, LimitByIP, 2)}; [<<"block_index2">>] -> {block_index, maps:get(block_index, LimitByIP, 2)}; [<<"wallet_list">>] -> {wallet_list, maps:get(wallet_list, LimitByIP, 2)}; [<<"block">>, _Type, _ID, <<"wallet_list">>] -> {wallet_list, maps:get(wallet_list, LimitByIP, 2)}; [<<"block">>, _Type, _ID, <<"hash_list">>] -> {block_index, maps:get(block_index, LimitByIP, 2)}; [<<"vdf">>] -> {get_vdf, maps:get(get_vdf, LimitByIP, 180)}; [<<"vdf">>, <<"session">>] -> {get_vdf_session, maps:get(get_vdf_session, LimitByIP, 60)}; [<<"vdf2">>, <<"session">>] -> {get_vdf_session, maps:get(get_vdf_session, LimitByIP, 60)}; [<<"vdf3">>, <<"session">>] -> {get_vdf_session, maps:get(get_vdf_session, LimitByIP, 60)}; [<<"vdf4">>, <<"session">>] -> {get_vdf_session, maps:get(get_vdf_session, LimitByIP, 60)}; [<<"vdf">>, <<"previous_session">>] -> {get_previous_vdf_session, maps:get(get_previous_vdf_session, LimitByIP, 60)}; [<<"vdf2">>, <<"previous_session">>] -> {get_previous_vdf_session, maps:get(get_previous_vdf_session, LimitByIP, 60)}; [<<"vdf4">>, <<"previous_session">>] -> {get_previous_vdf_session, maps:get(get_previous_vdf_session, LimitByIP, 60)}; _ -> {default, maps:get(default, LimitByIP, DefaultPathLimit)} end end). -endif. ================================================ FILE: apps/arweave/include/ar_block.hrl ================================================ %% Size in bytes of the timestamp and last_retarget block fields. -define(TIMESTAMP_FIELD_SIZE_LIMIT, 12). ================================================ FILE: apps/arweave/include/ar_chain_stats.hrl ================================================ -ifndef(AR_CHAIN_STATS_HRL). -define(AR_CHAIN_STATS_HRL, true). -define(RECENT_FORKS_AGE, 60 * 60 * 24 * 30). %% last 30 days of forks -ifdef(AR_TEST). -define(RECENT_FORKS_LENGTH, 5). -else. -define(RECENT_FORKS_LENGTH, 20). %% only return the last 20 forks -endif. -record(fork, { id, height, timestamp, block_ids }). -endif. ================================================ FILE: apps/arweave/include/ar_chunk_storage.hrl ================================================ -define(OFFSET_SIZE, 3). % Sufficient to represent a number up to 256 * 1024 (?DATA_CHUNK_SIZE). -define(OFFSET_BIT_SIZE, (?OFFSET_SIZE * 8)). -define(CHUNK_DIR, "chunk_storage"). ================================================ FILE: apps/arweave/include/ar_consensus.hrl ================================================ %% The number of RandomX hashes to compute to pack a chunk. -define(PACKING_DIFFICULTY, 20). %% The number of RandomX hashes to compute to pack a chunk after the fork 2.6. %% we want packing x30 longer than regular one %% 8 iterations - 2 ms %% 360 iterations - 59 ms %% 360/8 = 45 -define(PACKING_DIFFICULTY_2_6, 45). -define(RANDOMX_PACKING_ROUNDS, 8 * (?PACKING_DIFFICULTY)). -define(RANDOMX_PACKING_ROUNDS_2_6, 8 * (?PACKING_DIFFICULTY_2_6)). %% Stop supporting the legacy non-composite packing after this number of blocks %% passed since the fork 2.8. 365 * 24 * 60 * 60 / 128 = 246375. -define(SPORA_PACKING_EXPIRATION_PERIOD_BLOCKS, (246375 * 4)). %% Stop supporting the composite packing ~60 days have passed since 2.9 fork. %% 30 days = 30 * 24 * 60 * 60 / 128 = 20250. -ifndef(COMPOSITE_PACKING_EXPIRATION_PERIOD_BLOCKS). -define(COMPOSITE_PACKING_EXPIRATION_PERIOD_BLOCKS, (20250 * 2)). -endif. %% The number of times we apply an RX hash in each RX2 lane in-between every pair %% of mixings. -define(REPLICA_2_9_RANDOMX_PROGRAM_COUNT, 6). %% The number of RX2 lanes. -define(REPLICA_2_9_RANDOMX_LANE_COUNT, 4). %% The RX2 depth: the number of RX2 rounds. A round of RX2 has: %% 1. REPLICA_2_9_RANDOMX_LANE_COUNT lanes %% 2. Each lane evaluates REPLICA_2_9_RANDOMX_PROGRAM_COUNT RandomX programs %% 3. The output entropy of each lane is then mixed with crc32 (aka "near mix") %% 4. The the mixed output from all lanes is then shuffled (aka "far mix") -define(REPLICA_2_9_RANDOMX_DEPTH, 3). %% The size in bytes of the component RX2 scratchpad (aka the output from each RX2 lane). This %% is NOT the total output entropy (that size is defined in REPLICA_2_9_ENTROPY_SIZE). -define(RANDOMX_SCRATCHPAD_SIZE, 2097152). %% The size in bytes of the total RX2 entropy (# of lanes * scratchpad size). -ifdef(AR_TEST). %% 32_768 bytes worth of entropy. -define(REPLICA_2_9_ENTROPY_SIZE, (4 * ?COMPOSITE_PACKING_SUB_CHUNK_SIZE)). -else. %% 8_388_608 bytes worth of entropy. -define(REPLICA_2_9_ENTROPY_SIZE, ( ?REPLICA_2_9_RANDOMX_LANE_COUNT * ?RANDOMX_SCRATCHPAD_SIZE )). -endif. %% The number of entropies generated per partition. %% The value is chosen depending on the PARTITION_SIZE and REPLICA_2_9_ENTROPY_SIZE constants %% such that %% 1. Entropy Partition Size = %% REPLICA_2_9_ENTROPY_COUNT * REPLICA_2_9_ENTROPY_SIZE >= PARTITION_SIZE %% 2. Sector Size = %% REPLICA_2_9_ENTROPY_COUNT * COMPOSITE_PACKING_SUB_CHUNK_SIZE and %% is divisible by DATA_CHUNK_SIZE %% This proves very convenient for chunk-by-chunk syncing. %% %% Equation to solve for REPLICA_2_9_ENTROPY_COUNT: %% round(PARTITION_SIZE / REPLICA_2_9_ENTROPY_SIZE) to nearest multiple of 32 %% %% e.g. %% 3_600_000_000_000 / 8_388_608 = 429153.4423828125 %% (429_153 + 31) = 429_184 (nearest multiple of 32) %% %% Entropy Partition Size is 429_184 * 8_388_608 = 3_600_256_335_872 %% Sector Size is 429_184 * 8192 = 3_515_875_328 %% %% Each slice of an entropy is distributed to a different sector such that consecutive slices %% map to chunks that are as far as possible from each other within a partition. With %% an entropy size of 8_388_608 bytes and a slice size of 8192 bytes, there are 1024 slices per %% entropy, which yields 1024 sectors per partition. -ifndef(REPLICA_2_9_ENTROPY_COUNT). -define(REPLICA_2_9_ENTROPY_COUNT, 429_184). -endif. %% The effective packing difficulty of the new replication format (replica_format=1.) %% Determines the recall range size and the mining difficulty of the mining nonces. -ifndef(REPLICA_2_9_PACKING_DIFFICULTY). -define(REPLICA_2_9_PACKING_DIFFICULTY, 10). -endif. %% The size of the mining partition. The weave is broken down into partitions %% of equal size. A miner can search for a solution in each of the partitions %% in parallel, per mining address. -ifndef(PARTITION_SIZE). -define(PARTITION_SIZE, 3_600_000_000_000). % 90% of 4 TB. -endif. %% The size of a recall range. The first range is randomly chosen from the given %% mining partition. The second range is chosen from the entire weave. -ifdef(AR_TEST). -define(RECALL_RANGE_SIZE, (128 * 1024)). -else. -define(RECALL_RANGE_SIZE, 26_214_400). % == 25 * 1024 * 1024 -endif. %% The size of a recall range before the fork 2.8. -ifdef(AR_TEST). -define(LEGACY_RECALL_RANGE_SIZE, (512 * 1024)). -else. -define(LEGACY_RECALL_RANGE_SIZE, 104_857_600). % == 100 * 1024 * 1024 -endif. -ifndef(STRICT_DATA_SPLIT_THRESHOLD). %% The threshold was determined on the mainnet at the 2.5 fork block. The chunks %% submitted after the threshold must adhere to stricter validation rules. %% This offset is about half way through partition 8 -define(STRICT_DATA_SPLIT_THRESHOLD, 30_607_159_107_830). -endif. -ifdef(FORKS_RESET). -ifdef(AR_TEST). -define(MERKLE_REBASE_SUPPORT_THRESHOLD, (ar_block:strict_data_split_threshold() * 2)). -else. -define(MERKLE_REBASE_SUPPORT_THRESHOLD, 0). -endif. -else. %% The threshold was determined on the mainnet at the 2.7 fork block. The chunks %% submitted after the threshold must adhere to a different set of validation rules. -define(MERKLE_REBASE_SUPPORT_THRESHOLD, 151066495197430). -endif. %% Recall bytes are only picked from the subspace up to the size %% of the weave at the block of the depth defined by this constant. -ifdef(AR_TEST). -define(SEARCH_SPACE_UPPER_BOUND_DEPTH, 3). -else. -define(SEARCH_SPACE_UPPER_BOUND_DEPTH, 50). -endif. %% The maximum mining difficulty. 2 ^ 256. The network difficulty %% may theoretically be at most ?MAX_DIFF - 1. -define(MAX_DIFF, ( 115792089237316195423570985008687907853269984665640564039457584007913129639936 )). %% Increase the difficulty of PoA1 solutions by this multiplier (e.g. 100x). -ifndef(POA1_DIFF_MULTIPLIER). -define(POA1_DIFF_MULTIPLIER, 100). -endif. %% The number of nonce limiter steps sharing the entropy. We add the entropy %% from a past block every so often. If we did not add any entropy at all, even %% a slight speedup of the nonce limiting function (considering its low cost) allows %% one to eventually pre-compute a very significant amount of nonces opening up %% the possibility of mining without the speed limitation. On the other hand, %% adding the entropy at certain blocks (rather than nonce limiter steps) allows %% miners to use extra bandwidth (bearing almost no additional costs) to compute %% nonces on the short forks with different-entropy nonce limiting chains. -ifndef(NONCE_LIMITER_RESET_FREQUENCY). -define(NONCE_LIMITER_RESET_FREQUENCY, (10 * 120)). -endif. %% The maximum number of one-step checkpoints the block header may include. -ifndef(NONCE_LIMITER_MAX_CHECKPOINTS_COUNT). -define(NONCE_LIMITER_MAX_CHECKPOINTS_COUNT, 10800). -endif. %% The minimum difficulty allowed. -ifndef(SPORA_MIN_DIFFICULTY). -define(SPORA_MIN_DIFFICULTY(Height), fun() -> Forks = { ar_fork:height_2_4(), ar_fork:height_2_6() }, case Forks of {_Fork_2_4, Fork_2_6} when Height >= Fork_2_6 -> 2; {Fork_2_4, _Fork_2_6} when Height >= Fork_2_4 -> 21 end end()). -else. -define(SPORA_MIN_DIFFICULTY(_Height), ?SPORA_MIN_DIFFICULTY). -endif. %%%=================================================================== %%% Pre-fork 2.6 constants. %%%=================================================================== %% The size of the search space - a share of the weave randomly sampled %% at every block. The solution must belong to the search space. -define(SPORA_SEARCH_SPACE_SIZE(SearchSpaceUpperBound), fun() -> %% The divisor must be equal to SPORA_SEARCH_SPACE_SHARE %% defined in c_src/ar_mine_randomx.h. SearchSpaceUpperBound div 10 % 10% of the weave. end()). %% The number of contiguous subspaces of the search space, a roughly equal %% share of the search space is sampled from each of the subspaces. %% Must be equal to SPORA_SUBSPACES_COUNT defined in c_src/ar_mine_randomx.h. -define(SPORA_SEARCH_SPACE_SUBSPACES_COUNT, 1024). %% The key to initialize the RandomX state from, for RandomX packing. -define(RANDOMX_PACKING_KEY, <<"default arweave 2.5 pack key">>). -define(RANDOMX_HASHING_MODE_FAST, 0). -define(RANDOMX_HASHING_MODE_LIGHT, 1). %% The original plan was to cap the proof at 262144 (also the maximum chunk size). %% The maximum tree depth is then (262144 - 64) / (32 + 32 + 32) = 2730. %% Later we added support for offset rebases by recognizing the extra 32 bytes, %% possibly at every branching point, as indicating a rebase. To preserve the depth maximum, %% we now cap the size at 2730 * (96 + 32) + 65 = 349504. -define(MAX_DATA_PATH_SIZE, 349504). %% We may have at most 1000 transactions + 1000 padding nodes => depth=11 %% => at most 11 * 96 + 64 bytes worth of the proof. Due to its small size, we %% extend it somewhat for better future-compatibility. -define(MAX_TX_PATH_SIZE, 2176). ================================================ FILE: apps/arweave/include/ar_data_discovery.hrl ================================================ %% The size in bytes of a bucket used to group peers' sync records. When we want to sync %% an interval, we process it bucket by bucket: for every bucket, a few peers who are known to %% to have some data there are asked for the intervals they have and check which of them %% cross the desired interval. -ifdef(AR_TEST). -define(NETWORK_DATA_BUCKET_SIZE, 10_000_000). % 10 MB -else. -define(NETWORK_DATA_BUCKET_SIZE, 10_000_000_000). % 10 GB -endif. %% Similar to ?NETWORK_DATA_BUCKET_SIZE, except for a footprint bucket %% contains several "footprints" - sets of chunks spread out across the partition. -ifdef(AR_TEST). -define(NETWORK_FOOTPRINT_BUCKET_SIZE, 36). % 12 (footprints) * 3 (chunks); ~10 MB -else. -define(NETWORK_FOOTPRINT_BUCKET_SIZE, 37888). % 37 (footprints) * 1024 (chunks); ~10 GB -endif. %% The maximum number of synced intervals shared with peers. -ifdef(AR_TEST). -define(MAX_SHARED_SYNCED_INTERVALS_COUNT, 20). -else. -define(MAX_SHARED_SYNCED_INTERVALS_COUNT, 10_000). -endif. %% The upper limit for the size of a sync record serialized using Erlang Term Format. -define(MAX_ETF_SYNC_RECORD_SIZE, 80 * ?MAX_SHARED_SYNCED_INTERVALS_COUNT). %% byte_size(ar_serialize:jsonify(jiffy:encode(#{ packing => "replica_2_9_" ++ binary_to_list(crypto:strong_rand_bytes(32)), intervals => [[integer_to_list(trunc(math:pow(2, 256) - 1)), integer_to_list(trunc(math:pow(2, 256) - 1))] || _ <- lists:seq(1, 512)] }))). %% 243238 -define(MAX_FOOTPRINT_PAYLOAD_SIZE, 250_000). %% The upper limit for the size of the serialized (in Erlang Term Format) sync buckets. -define(MAX_SYNC_BUCKETS_SIZE, 100_000). %% How many peers with the biggest synced shares in the given bucket to query per bucket %% per sync job iteration. -define(QUERY_BEST_PEERS_COUNT, 15). %% The number of the release adding support for the %% GET /data_sync_record/[start]/[end]/[limit] endpoint. -define(GET_SYNC_RECORD_RIGHT_BOUND_SUPPORT_RELEASE, 83). %% The number of the release adding support for endpoints: %% GET /footprints/[partition]/[footprint] %% GET /footprint_buckets -define(GET_FOOTPRINT_SUPPORT_RELEASE, 91). ================================================ FILE: apps/arweave/include/ar_data_sync.hrl ================================================ %% The size in bits of the offset key in kv databases. -define(OFFSET_KEY_BITSIZE, 256). %% The size in bits of the key prefix used in prefix bloom filter %% when looking up chunks by offsets from kv database. %% 29 bytes of the prefix correspond to the 16777216 (16 Mib) max distance %% between the keys with the same prefix. The prefix should be bigger than %% max chunk size (256 KiB) so that the chunk in question is likely to be %% found in the filter and smaller than an SST table (200 MiB) so that the %% filter lookup can narrow the search down to a single table. -define(OFFSET_KEY_PREFIX_BITSIZE, 232). %% The upper size limit for a serialized chunk with its proof %% as it travels around the network. %% %% It is computed as ?MAX_PATH_SIZE (data_path) + DATA_CHUNK_SIZE (chunk) + %% 32 * 1000 (tx_path, considering the 1000 txs per block limit), %% multiplied by 1.34 (Base64), rounded to the nearest 50000 - %% the difference is sufficient to fit an offset, a data_root, %% and special JSON chars. -define(MAX_SERIALIZED_CHUNK_PROOF_SIZE, 750000). %% Transaction data bigger than this limit is not served in %% GET /tx//data endpoint. Clients interested in downloading %% such data should fetch it chunk by chunk. -define(MAX_SERVED_TX_DATA_SIZE, 12 * 1024 * 1024). %% The time to wait until the next full disk pool scan. -ifdef(AR_TEST). -define(DISK_POOL_SCAN_DELAY_MS, 2000). -else. -define(DISK_POOL_SCAN_DELAY_MS, 10000). -endif. %% How often to measure the number of chunks in the disk pool index. -ifdef(AR_TEST). -define(RECORD_DISK_POOL_CHUNKS_COUNT_FREQUENCY_MS, 1000). -else. -define(RECORD_DISK_POOL_CHUNKS_COUNT_FREQUENCY_MS, 5000). -endif. %% How long to keep the offsets of the recently processed "matured" chunks in a cache. %% We use the cache to quickly skip matured chunks when scanning the disk pool. -define(CACHE_RECENTLY_PROCESSED_DISK_POOL_OFFSET_LIFETIME_MS, 30 * 60 * 1000). %% The frequency of removing expired data roots from the disk pool. -define(REMOVE_EXPIRED_DATA_ROOTS_FREQUENCY_MS, 60000). %% The frequency of storing the server state on disk. -define(STORE_STATE_FREQUENCY_MS, 30000). %% The maximum number of chunks currently being downloaded or processed. -ifdef(AR_TEST). -define(SYNC_BUFFER_SIZE, 100). -else. -define(SYNC_BUFFER_SIZE, 1000). -endif. %% Defines how long we keep the interval excluded from syncing. %% If we cannot find an interval by peers, we temporarily exclude %% it from the sought ranges to prevent the syncing process from slowing down. -ifdef(AR_TEST). -define(EXCLUDE_MISSING_INTERVAL_TIMEOUT_MS, 5000). -else. -define(EXCLUDE_MISSING_INTERVAL_TIMEOUT_MS, 10 * 60 * 1000). -endif. %% Let at least this many chunks stack up, per storage module, then write them on disk in the %% ascending order, to reduce out-of-order disk writes causing fragmentation. -ifdef(AR_TEST). -define(STORE_CHUNK_QUEUE_FLUSH_SIZE_THRESHOLD, 2). -else. -define(STORE_CHUNK_QUEUE_FLUSH_SIZE_THRESHOLD, 100). % ~ 25 MB worth of chunks. -endif. %% If a chunk spends longer than this in the store queue, write it on disk without waiting %% for ?STORE_CHUNK_QUEUE_FLUSH_SIZE_THRESHOLD chunks to stack up. -ifdef(AR_TEST). -define(STORE_CHUNK_QUEUE_FLUSH_TIME_THRESHOLD, 1000). -else. -define(STORE_CHUNK_QUEUE_FLUSH_TIME_THRESHOLD, 2_000). % 2 seconds. -endif. %% @doc The state of the server managing data synchronization. -record(sync_data_state, { %% The last entries of the block index. %% Used to determine orphaned data upon startup or chain reorg. block_index, %% The current weave size. The upper limit for the absolute chunk end offsets. weave_size, %% A reference to the on-disk key-value storage mapping %% AbsoluteChunkEndOffset %% => {ChunkDataKey, TXRoot, DataRoot, TXPath, ChunkOffset, ChunkSize} %% %% Chunks themselves and their DataPaths are stored separately (in chunk_data_db) %% because the offsets may change after a reorg. However, after the offset falls below %% DiskPoolThreshold, the chunk is packed for mining and recorded in the fast storage %% under the offset key. %% %% The index is used to look up the chunk by a random offset when a peer %% asks for it and to look up chunks of a transaction. chunks_index, %% A reference to the on-disk key-value storage mapping %% << DataRoot/binary, TXSize/binary, AbsoluteTXStartOffset/binary >> => TXPath. %% %% The index is used to look up tx_root for a submitted chunk and compute %% AbsoluteChunkEndOffset for the accepted chunk. %% %% We need the index because users should be able to submit their data without %% monitoring the chain, otherwise chain reorganisations might make the experience %% very unnerving. The index is NOT consulted when serving random chunks therefore %% it is possible to develop a lightweight client which would sync and serve random %% portions of the weave without maintaining this index. data_root_index, data_root_index_old, %% A reference to the on-disk key-value storage mapping %% AbsoluteBlockStartOffset => {TXRoot, BlockSize, DataRootIndexKeySet}. %% Each key in DataRootIndexKeySet is a << DataRoot/binary, TXSize:256 >> binary. %% Used to remove orphaned entries from DataRootIndex. data_root_offset_index, %% A reference to the on-disk key value storage mapping %% << DataRootTimestamp:256, ChunkDataIndexKey/binary >> => %% {RelativeChunkEndOffset, ChunkSize, DataRoot, TXSize, ChunkDataKey, IsStrictSplit}. %% %% The index is used to keep track of pending, orphaned, and recent chunks. %% A periodic process iterates over chunks from earliest to latest, consults %% DiskPoolDataRoots and data_root_index to decide whether each chunk needs to %% be removed from disk as orphaned, reincluded into the weave (by updating chunks_index), %% or removed from disk_pool_chunks_index by expiration. disk_pool_chunks_index, disk_pool_chunks_index_old, %% One of the keys from disk_pool_chunks_index or the atom "first". %% The disk pool is processed chunk by chunk going from the oldest entry to the newest, %% trying not to block the syncing process if the disk pool accumulates a lot of orphaned %% and pending chunks. The cursor remembers the key after the last processed on the %% previous iteration. After reaching the last key in the storage, we go back to %% the first one. Not stored. disk_pool_cursor, %% The weave offset for the disk pool - chunks above this offset are stored there. disk_pool_threshold = 0, %% A reference to the on-disk key value storage mapping %% TXID => {AbsoluteTXEndOffset, TXSize}. %% Is used to serve transaction data by TXID. tx_index, %% A reference to the on-disk key value storage mapping %% AbsoluteTXStartOffset => TXID. Is used to cleanup orphaned transactions from tx_index. tx_offset_index, %% A reference to the on-disk key value storage mapping %% << Timestamp:256, DataPathHash/binary >> to raw chunk data (possibly packed). %% %% Is used to store disk pool chunks (their global offsets cannot be determined with %% certainty yet). %% %% The timestamp prefix is used to make the written entries sorted from the start, %% to minimize the LSTM compaction overhead. chunk_data_db, %% A reference to the on-disk key value storage mapping migration names to their stages. migrations_index, %% A flag indicating the process has started collecting the intervals for syncing. %% We consult the other storage modules first, then search among the network peers. sync_status = undefined, %% The offsets of the chunks currently scheduled for (re-)packing (keys) and %% some chunk metadata needed for storing the chunk once it is packed. packing_map = #{}, %% The queue with unique {Start, End, Peer} triplets. Sync jobs are taking intervals %% from this queue and syncing them. sync_intervals_queue = gb_sets:new(), %% A compact set of non-overlapping intervals containing all the intervals from the %% sync intervals queue. We use it to quickly check which intervals have been queued %% already and avoid syncing the same interval twice. sync_intervals_queue_intervals = ar_intervals:new(), %% A key marking the beginning of a full disk pool scan. disk_pool_full_scan_start_key = none, %% The timestamp of the beginning of a full disk pool scan. Used to measure %% the time it takes to scan the current disk pool - if it is too short, we postpone %% the next scan to save some disk IO. disk_pool_full_scan_start_timestamp, %% A cache of the offsets of the recently "matured" chunks. We use it to quickly %% skip matured chunks when scanning the disk pool. The reason the chunk is still %% in the disk pool is some of its offsets have not matured yet (the same data can be %% submitted several times). recently_processed_disk_pool_offsets = #{}, %% A registry of the currently processed disk pool chunks consulted by different %% disk pool jobs to avoid double-processing. currently_processed_disk_pool_keys = sets:new(), %% A flag used to temporarily pause all disk pool jobs. disk_pool_scan_pause = false, %% The mining address the chunks are packed with in 2.6. mining_address, %% The identifier of the storage module the process is responsible for. store_id, %% The start offset of the range the module is responsible for. range_start = -1, %% The end offset of the range the module is responsible for. range_end = -1, %% The list of {StoreID, {Start, End}} - the ranges we want to copy %% from the other storage modules (possibly, (re)packing the data in the process). unsynced_intervals_from_other_storage_modules = [], %% The list of identifiers of the non-default storage modules intersecting with the given %% storage module to be searched for missing data before attempting to sync the data %% from the network. other_storage_modules_with_unsynced_intervals = [], %% The priority queue of chunks sorted by offset. The motivation is to have chunks %% stack up, per storage module, before writing them on disk so that we can write %% them in the ascending order and reduce out-of-order disk writes causing fragmentation. store_chunk_queue = gb_sets:new(), %% The length of the store chunk queue. store_chunk_queue_len = 0, %% The threshold controlling the brief accumuluation of the chunks in the queue before %% the actual disk dump, to reduce the chance of out-of-order write causing disk %% fragmentation. store_chunk_queue_threshold = ?STORE_CHUNK_QUEUE_FLUSH_SIZE_THRESHOLD, %% The phase of the syncing process. %% The phases are: %% - normal: normal left-to-right syncing (normally, of the unpacked data). %% - footprint: footprint-based syncing of replica 2.9 data. sync_phase = undefined }). ================================================ FILE: apps/arweave/include/ar_header_sync.hrl ================================================ %% The frequency of processing items in the queue. -ifdef(AR_TEST). -define(PROCESS_ITEM_INTERVAL_MS, 1000). -else. -define(PROCESS_ITEM_INTERVAL_MS, 100). -endif. %% The frequency of checking if there are headers to sync after everything %% is synced. Also applies to a fresh node without any data waiting for a block index. %% Another case is when the process misses a few blocks (e.g. blocks were sent while the %% supervisor was restarting it after a crash). -ifdef(AR_TEST). -define(CHECK_AFTER_SYNCED_INTERVAL_MS, 500). -else. -define(CHECK_AFTER_SYNCED_INTERVAL_MS, 5000). -endif. %% The initial value for the exponential backoff for failing requests. -ifdef(AR_TEST). -define(INITIAL_BACKOFF_INTERVAL_S, 1). -else. -define(INITIAL_BACKOFF_INTERVAL_S, 30). -endif. %% The maximum exponential backoff interval for failing requests. -ifdef(AR_TEST). -define(MAX_BACKOFF_INTERVAL_S, 2). -else. -define(MAX_BACKOFF_INTERVAL_S, 2 * 60 * 60). -endif. %% The frequency of storing the server state on disk. -define(STORE_HEADER_STATE_FREQUENCY_MS, 30000). ================================================ FILE: apps/arweave/include/ar_inflation.hrl ================================================ -ifndef(AR_INFLATION_HRL). -define(AR_INFLATION_HRL, true). -include_lib("arweave/include/ar.hrl"). %% An approximation of the natural logarithm of 2, %% expressed as a decimal fraction, with the precision of math:log. -define(LN2, {6931471805599453, 10000000000000000}). %% The precision of computing the natural exponent as a decimal fraction, %% expressed as the maximal power of the argument in the Taylor series. -define(INFLATION_NATURAL_EXPONENT_DECIMAL_FRACTION_PRECISION, 24). %% The tolerance used in the inflation schedule tests. -define(DEFAULT_TOLERANCE_PERCENT, 0.001). %% Height at which the 1.5.0.0 fork takes effect. -ifdef(AR_TEST). %% The inflation tests serve as a documentation of how rewards are computed. %% Therefore, we keep the mainnet value in these tests. Other tests have %% FORK 1.6 height set to zero from now on. -define(FORK_15_HEIGHT, 95000). -else. -define(FORK_15_HEIGHT, ?FORK_1_6). -endif. %% Blocks per year prior to 1.5.0.0 release. -define(PRE_15_BLOCKS_PER_YEAR, 525600 / (120 / 60)). %% Blocks per year prior to 2.5.0.0 release. -define(PRE_25_BLOCKS_PER_YEAR, (525600 / (120 / 60))). %% The number of extra tokens to grant for blocks between the 1.5.0.0 release %% and the end of year one. %% %% calculate_post_15_y1_extra() -> %% Pre15 = erlang:trunc(sum_rewards(fun calculate/1, 0, ?FORK_15_HEIGHT)), %% Base = erlang:trunc(sum_rewards(fun calculate_base/1, 0, ?FORK_15_HEIGHT)), %% Post15Diff = Base - Pre15, %% erlang:trunc(Post15Diff / (?BLOCKS_PER_YEAR - ?FORK_15_HEIGHT)). -define(POST_15_Y1_EXTRA, 13275279633337). -endif. ================================================ FILE: apps/arweave/include/ar_mining.hrl ================================================ -ifndef(AR_MINING_HRL). -define(AR_MINING_HRL, true). -define(GC_LOG_THRESHOLD, 1000). %% fields prefixed with cm_ are only set when a solution is distributed across miners as part %% of a coordinated mining set. -record(mining_candidate, { cache_ref = not_set, %% not serialized chunk1 = not_set, %% not serialized chunk2 = not_set, %% not serialized cm_diff = not_set, %% serialized. set to the difficulty used by the H1 miner cm_h1_list = [], %% serialized. list of {h1, nonce} pairs cm_lead_peer = not_set, %% not serialized. if set, this candidate came from another peer h0 = not_set, %% serialized h1 = not_set, %% serialized h2 = not_set, %% serialized mining_address = not_set, %% serialized next_seed = not_set, %% serialized next_vdf_difficulty = not_set, %% serialized nonce = not_set, %% serialized nonce_limiter_output = not_set, %% serialized partition_number = not_set, %% serialized partition_number2 = not_set, %% serialized partition_upper_bound = not_set, %% serialized poa2 = not_set, %% serialized preimage = not_set, %% serialized. this can be either the h1 or h2 preimage seed = not_set, %% serialized session_key = not_set, %% serialized start_interval_number = not_set, %% serialized step_number = not_set, %% serialized packing_difficulty = 0, %% serialized replica_format = 0, %% serialized label = <<"not_set">> %% not atom, for prevent atom table pollution DoS }). -record(mining_solution, { last_step_checkpoints = [], merkle_rebase_threshold = 0, next_seed = << 0:(8 * 48) >>, next_vdf_difficulty = 0, nonce = 0, nonce_limiter_output = << 0:256 >>, partition_number = 0, poa1 = #poa{}, poa2 = #poa{}, preimage = << 0:256 >>, recall_byte1 = 0, recall_byte2 = undefined, solution_hash = << 0:256 >>, start_interval_number = 0, step_number = 0, steps = [], seed = << 0:(8 * 48) >>, mining_address = << 0:256 >>, partition_upper_bound = 0, packing_difficulty = 0, replica_format = 0 }). -endif. ================================================ FILE: apps/arweave/include/ar_mining_cache.hrl ================================================ -ifndef(AR_MINING_CACHE_HRL). -define(AR_MINING_CACHE_HRL, true). -record(ar_mining_cache_value, { chunk1 :: binary() | undefined, chunk2 :: binary() | undefined, chunk1_failed = false :: boolean(), chunk2_failed = false :: boolean(), h1 :: binary() | undefined, h1_passes_diff_checks = false :: boolean(), h2 :: binary() | undefined }). -record(ar_mining_cache_session, { mining_cache = #{} :: #{term() => #ar_mining_cache_value{}}, mining_cache_size_bytes = 0 :: non_neg_integer(), reserved_mining_cache_bytes = 0 :: non_neg_integer() }). -record(ar_mining_cache, { name = not_set :: term(), mining_cache_sessions = #{} :: #{term() => #ar_mining_cache_session{}}, mining_cache_sessions_queue = queue:new() :: queue:queue(), mining_cache_limit_bytes = 0 :: non_neg_integer() }). -endif. ================================================ FILE: apps/arweave/include/ar_peers.hrl ================================================ -ifndef(AR_PEERS_HRL). -define(AR_PEERS_HRL, true). -include_lib("ar.hrl"). -record(performance, { version = 3, release = -1, total_bytes = 0, total_throughput = 0.0, %% bytes per millisecond total_transfers = 0, average_latency = 0.0, %% milliseconds average_throughput = 0.0, %% bytes per millisecond average_success = 1.0, lifetime_rating = 0, %% longer time window current_rating = 0 %% shorter time window }). -endif. ================================================ FILE: apps/arweave/include/ar_poa.hrl ================================================ -ifndef(AR_POA_HRL). -define(AR_POA_HRL, true). -include("ar.hrl"). -record(chunk_proof, { metadata :: #chunk_metadata{}, seek_byte :: non_neg_integer(), tx_start_offset :: non_neg_integer(), tx_end_offset :: non_neg_integer(), block_start_offset :: non_neg_integer(), block_end_offset :: non_neg_integer(), chunk_id :: binary(), chunk_start_offset :: non_neg_integer(), chunk_end_offset :: non_neg_integer(), validate_data_path_ruleset :: 'offset_rebase_support_ruleset' | 'strict_data_split_ruleset' | 'strict_borders_ruleset', tx_path_is_valid = not_validated :: 'not_validated' | 'valid' | 'invalid', data_path_is_valid = not_validated :: 'not_validated' | 'valid' | 'invalid', chunk_is_valid = not_validated :: 'not_validated' | 'valid' | 'invalid' }). -endif. ================================================ FILE: apps/arweave/include/ar_pool.hrl ================================================ %% The number of VDF steps ("jobs") the pool server serves at a time. -define(GET_JOBS_COUNT, 10). %% The time in seconds the pool server waits before giving up on replying with %% new jobs when the client already has the newest job. -define(GET_JOBS_TIMEOUT_S, 2). %% The frequency in milliseconds of asking the pool or CM exit node about new jobs. -define(FETCH_JOBS_FREQUENCY_MS, 500). %% The time in milliseconds we wait before retrying a failed fetch jobs request. -define(FETCH_JOBS_RETRY_MS, 2000). %% The frequency in milliseconds of asking the pool or CM exit node about new CM jobs. -define(FETCH_CM_JOBS_FREQUENCY_MS, 1000). %% The time in milliseconds we wait before retrying a failed fetch CM jobs request. -define(FETCH_CM_JOBS_RETRY_MS, 2000). %% @doc A collection of mining jobs. -record(jobs, { jobs = [], %% The information about a single VDF output (a "job"). partial_diff = {0, 0}, %% Partial difficulty. seed = <<>>, next_seed = <<>>, interval_number = 0, next_vdf_difficulty = 0 }). %% @doc A mining job. -record(job, { output = <<>>, global_step_number = 0, partition_upper_bound = 0 }). %% @doc Partial solution validation response. -record(partial_solution_response, { indep_hash = <<>>, status = <<>> }). %% @doc A set of coordinated mining jobs provided by the pool. %% %% Miners fetch and submit pool CM jobs via the same POST /pool_cm_jobs endpoint. %% When miners fetch jobs, they specify the partitions and leave the job fields empty. %% When miners submit jobs, they leave the partitions field empty. -record(pool_cm_jobs, { h1_to_h2_jobs = [], % [#mining_candidate{}] h1_read_jobs = [], % [#mining_candidate{}] %% A list of {[{bucket, ...}, {bucketsize, ...}, {addr, ...}]} or %% {[{bucket, ...}, {bucketsize, ...}, {addr, ...}, {pdiff, ...}]} JSON structs. partitions = [] }). ================================================ FILE: apps/arweave/include/ar_pricing.hrl ================================================ %% @doc Pricing macros. %% For a new account, we charge the fee equal to the price of uploading %% this number of bytes. The fee is about 0.1$ at the time. -define(NEW_ACCOUNT_FEE_DATA_SIZE_EQUIVALENT, 20_000_000). %% The target number of replications. -ifdef(AR_TEST). -define(N_REPLICATIONS, fun(_MACRO_Height) -> 200 end). -else. -define(N_REPLICATIONS, fun(MACRO_Height) -> MACRO_Forks = { ar_fork:height_2_5(), ar_fork:height_2_6() }, case MACRO_Forks of {_MACRO_Fork_2_5, MACRO_Fork_2_6} when MACRO_Height >= MACRO_Fork_2_6 -> 20; {MACRO_Fork_2_5, _MACRO_Fork_2_6} when MACRO_Height >= MACRO_Fork_2_5 -> 45; _ -> 10 end end). -endif. %% The miners always receive ?MINER_FEE_SHARE of the transaction fees, even %% when the fees are bigger than the required minimum. -define(MINER_FEE_SHARE, {1, 21}). %% When a double-signing proof is provided, we reward the prover with the %% ?DOUBLE_SIGNING_PROVER_REWARD_SHARE of the minimum reward among the preceding %% ?DOUBLE_SIGNING_REWARD_SAMPLE_SIZE blocks. -ifdef(AR_TEST). -define(DOUBLE_SIGNING_REWARD_SAMPLE_SIZE, 2). -else. -define(DOUBLE_SIGNING_REWARD_SAMPLE_SIZE, 100). -endif. %% When a double-signing proof is provided, we reward the prover with the %% ?DOUBLE_SIGNING_PROVER_REWARD_SHARE of the minimum reward among the preceding %% ?DOUBLE_SIGNING_REWARD_SAMPLE_SIZE blocks. -define(DOUBLE_SIGNING_PROVER_REWARD_SHARE, {1, 2}). %% Every transaction fee has to be at least %% X + X * ?MINER_MINIMUM_ENDOWMENT_CONTRIBUTION_SHARE %% where X is the amount sent to the endowment pool. -define(MINER_MINIMUM_ENDOWMENT_CONTRIBUTION_SHARE, {1, 20}). %% The fixed USD to AR rate used after the fork 2.6 until the automatic transition to the new %% pricing scheme is complete. We fix the rate because the network difficulty is expected %% fluctuate a lot around the fork. -define(FORK_2_6_PRE_TRANSITION_USD_TO_AR_RATE, {1, 10}). %% The number of recent blocks with the reserved (temporarily locked) mining rewards. -ifdef(AR_TEST). % testnet value should have same ratio 30:1 to VDF_DIFFICULTY_RETARGET % BUT. For tests we are using old value -define(LOCKED_REWARDS_BLOCKS, 3). -else. -ifndef(LOCKED_REWARDS_BLOCKS). -define(LOCKED_REWARDS_BLOCKS, (30 * 24 * 30)). -endif. -endif. %% The number of recent blocks contributing data points to the continuous estimation %% of the average price of storing a gibibyte for a minute. A recent subset of the %% reward history is used for tracking the reserved mining rewards. -ifdef(AR_TEST). -define(REWARD_HISTORY_BLOCKS, 3). -else. -ifndef(REWARD_HISTORY_BLOCKS). -define(REWARD_HISTORY_BLOCKS, (3 * 30 * 24 * 30)). -endif. -endif. %% The REWARD_HISTORY_BLOCKS before 2.8. -ifdef(AR_TEST). -define(LEGACY_REWARD_HISTORY_BLOCKS, 3). -else. -ifndef(LEGACY_REWARD_HISTORY_BLOCKS). -define(LEGACY_REWARD_HISTORY_BLOCKS, (30 * 24 * 30)). -endif. -endif. %% The prices are re-estimated every so many blocks. -ifdef(AR_TEST). -define(PRICE_ADJUSTMENT_FREQUENCY, 2). -else. -ifndef(PRICE_ADJUSTMENT_FREQUENCY). -define(PRICE_ADJUSTMENT_FREQUENCY, 50). -endif. -endif. %% An approximation of the natural logarithm of ?PRICE_DECAY_ANNUAL (0.995), %% expressed as a decimal fraction, with the precision of math:log. -define(LN_PRICE_DECAY_ANNUAL, {-5012541823544286, 1000000000000000000}). %% The assumed annual decay rate of the Arweave prices, expressed as a decimal fraction. -define(PRICE_DECAY_ANNUAL, {995, 1000}). % 0.995, i.e., 0.5% annual decay rate. %% The precision of computing the natural exponent as a decimal fraction, %% expressed as the maximal power of the argument in the Taylor series. -define(TX_PRICE_NATURAL_EXPONENT_DECIMAL_FRACTION_PRECISION, 9). %% When/if the endowment fund runs empty, we increase storage fees and lock a "Kryder+ rate %% multiplier latch" to make sure we do not increase the fees several times while the %% endowment size remains low. Once the endowment is bigger than this constant again, %% we open the latch (and will increase the fees again when/if the endowment is empty). %% The value is redenominated according the denomination used at the time. -ifdef(AR_TEST). -define(RESET_KRYDER_PLUS_LATCH_THRESHOLD, 100_000_000_000). -else. -define(RESET_KRYDER_PLUS_LATCH_THRESHOLD, 10_000_000_000_000_000). -endif. %% The total supply, in Winston (the sum of genesis balances + the total emission). %% Does NOT include the additional emission which may start in the far future if and when %% the endowment pool runs empty. -ifdef(AR_TEST). %% The debug constant is not always actually equal to the sum of genesis balances plust %% the total emission. We just set a relatively low value so that we can reproduce %% autoredenomination in tests. -define(TOTAL_SUPPLY, 1500000000000). -else. -ifdef(FORKS_RESET). %% This value should be ideally adjusted if the genesis balances %% of a new weave differ from those in mainnet. -define(TOTAL_SUPPLY, 66000015859279336957). -else. -define(TOTAL_SUPPLY, 66_000_015_859_279_336_957). -endif. -endif. %% Re-denominate AR (multiply by 1000) when the available supply falls below this %% number of units. -ifdef(AR_TEST). -define(REDENOMINATION_THRESHOLD, 1350000000000). -else. -define(REDENOMINATION_THRESHOLD, 1000_000_000_000_000_000). -endif. %% The number of blocks which has to pass after we assign the redenomination height before %% the redenomination occurs. Transactions without an explicitly assigned denomination are %% not allowed in these blocks. The motivation is to protect the legacy libraries' users from %% an attack where a post-redenomination transaction is included in a pre-redenomination %% block, potentially charging the user a thousand times the intended fee or transfer amount. -ifdef(AR_TEST). -define(REDENOMINATION_DELAY_BLOCKS, 2). -else. -define(REDENOMINATION_DELAY_BLOCKS, 100). -endif. %% USD to AR exchange rates by height defined together with INITIAL_USD_TO_AR_HEIGHT %% and INITIAL_USD_TO_AR_DIFF. The protocol uses these constants to estimate the %% USD to AR rate at any block based on the change in the network difficulty and inflation %% rewards. -define(INITIAL_USD_TO_AR(Height), fun() -> Forks = { ar_fork:height_2_4(), ar_fork:height_2_5() }, case Forks of {_Fork_2_4, Fork_2_5} when Height >= Fork_2_5 -> {1, 65}; {Fork_2_4, _Fork_2_5} when Height >= Fork_2_4 -> ?INITIAL_USD_TO_AR_PRE_FORK_2_5 end end). %% The original USD to AR conversion rate, defined as a fraction. Set up at fork 2.4. %% Used until the fork 2.5. -define(INITIAL_USD_TO_AR_PRE_FORK_2_5, {1, 5}). %% The network difficulty at the time when the USD to AR exchange rate was %% ?INITIAL_USD_TO_AR(Height). Used to account for the change in the network %% difficulty when estimating the new USD to AR rate. -define(INITIAL_USD_TO_AR_DIFF(Height), fun() -> Forks = { ar_fork:height_1_9(), ar_fork:height_2_2(), ar_fork:height_2_5() }, case Forks of {_Fork_1_9, _Fork_2_2, Fork_2_5} when Height >= Fork_2_5 -> 32; {_Fork_1_9, Fork_2_2, _Fork_2_5} when Height >= Fork_2_2 -> 34; {Fork_1_9, _Fork_2_2, _Fork_2_5} when Height < Fork_1_9 -> 28; _ -> 29 end end). %% The network height at the time when the USD to AR exchange rate was %% ?INITIAL_USD_TO_AR(Height). Used to account for the change in inflation %% rewards when estimating the new USD to AR rate. -define(INITIAL_USD_TO_AR_HEIGHT(Height), fun() -> Forks = { ar_fork:height_1_9(), ar_fork:height_2_2(), ar_fork:height_2_5(), ar_fork:height_2_6() }, %% In case the fork heights are reset to 0 (e.g. on testnets), %% set the initial height to 1 - the height where the inflation %% emission essentially begins. case Forks of {_Fork_1_9, _Fork_2_2, _Fork_2_5, Fork_2_6} when Height >= Fork_2_6 -> max(Fork_2_6, 1); {_Fork_1_9, _Fork_2_2, Fork_2_5, _Fork_2_6} when Height >= Fork_2_5 -> max(Fork_2_5, 1); {_Fork_1_9, Fork_2_2, _Fork_2_5, _Fork_2_6} when Height >= Fork_2_2 -> max(Fork_2_2, 1); {Fork_1_9, _Fork_2_2, _Fork_2_5, _Fork_2_6} when Height < Fork_1_9 -> max(ar_fork:height_1_8(), 1); {Fork_1_9, _Fork_2_2, _Fork_2_5, _Fork_2_6} -> max(Fork_1_9, 1) end end). %% The base wallet generation fee in USD, defined as a fraction. %% The amount in AR depends on the current difficulty and height. %% Used until the transition to the new fee calculation method is complete. -define(WALLET_GEN_FEE_USD, {1, 10}). %% The estimated historical price of storing 1 GB of data for the year 2018, %% expressed as a decimal fraction. %% Used until the transition to the new fee calculation method is complete. -define(USD_PER_GBY_2018, {1045, 1000000}). % 0.001045 %% The estimated historical price of storing 1 GB of data for the year 2019, %% expressed as a decimal fraction. %% Used until the transition to the new fee calculation method is complete. -define(USD_PER_GBY_2019, {925, 1000000}). % 0.000925 -define(STATIC_2_6_8_FEE_WINSTON, 858_000_000_000). %% The largest possible multiplier for a one-step increase of the USD to AR Rate. -define(USD_TO_AR_MAX_ADJUSTMENT_UP_MULTIPLIER, {1005, 1000}). %% The largest possible multiplier for a one-step decrease of the USD to AR Rate. -define(USD_TO_AR_MAX_ADJUSTMENT_DOWN_MULTIPLIER, {995, 1000}). %% Reduce the USD to AR fraction if both the dividend and the devisor become bigger than this. -ifdef(AR_TEST). -define(USD_TO_AR_FRACTION_REDUCTION_LIMIT, 100). -else. -define(USD_TO_AR_FRACTION_REDUCTION_LIMIT, 1000000). -endif. %% Every transaction fee has to be at least X + X * ?MINING_REWARD_MULTIPLIER %% where X is the amount sent to the endowment pool. %% Used until the transition to the new fee calculation method is complete. -ifdef(AR_TEST). -define(MINING_REWARD_MULTIPLIER, {2, 10000}). -else. -define(MINING_REWARD_MULTIPLIER, {2, 10}). -endif. %% The USD to AR exchange rate for a new chain, e.g. a testnet. -define(NEW_WEAVE_USD_TO_AR_RATE, ?INITIAL_USD_TO_AR_PRE_FORK_2_5). %% Initial $/AR exchange rate. Used until the fork 2.4. -define(INITIAL_USD_PER_AR(Height), fun() -> Forks = { ar_fork:height_1_9(), ar_fork:height_2_2() }, case Forks of {Fork_1_9, _Fork_2_2} when Height < Fork_1_9 -> 1.5; {_Fork_1_9, Fork_2_2} when Height >= Fork_2_2 -> 4; _ -> 1.2 end end). %% Base wallet generation fee. Used until fork 2.2. -define(WALLET_GEN_FEE, 250000000000). ================================================ FILE: apps/arweave/include/ar_repack.hrl ================================================ -ifndef(AR_REPACK_HRL). -define(AR_REPACK_HRL, true). -record(repack_chunk, { state = needs_chunk :: needs_chunk | invalid | entropy_only | already_repacked | needs_data_path | needs_repack | needs_entropy | needs_encipher | needs_write | error, metadata = not_set :: not_set | not_found | #chunk_metadata{}, offsets = not_set :: not_set | not_found | #chunk_offsets{}, %% source_packing is used to track the current packing format of the chunk. It starts %% set to the original format of the chunk, but will be updated as the chunk is %% repacked (sometimes through intermediate formats) and ultimately will be set equal %% to target_packing. source_packing = not_set :: not_set | not_found | ar_packing:packing(), target_packing = not_set :: not_set | not_found | ar_packing:packing(), chunk = not_set :: not_set | not_found | binary(), source_entropy = not_set :: not_set | binary(), target_entropy = not_set :: not_set | binary() }). -endif. ================================================ FILE: apps/arweave/include/ar_sup.hrl ================================================ %% The number of milliseconds the supervisor gives every process for shutdown. -ifdef(AR_TEST). -define(SHUTDOWN_TIMEOUT, 30_000). -else. -define(SHUTDOWN_TIMEOUT, 300_000). -endif. -define(CHILD(I, Type), #{ id => I, start => {I, start_link, []}, restart => permanent, shutdown => ?SHUTDOWN_TIMEOUT, type => Type, modules => [I] }). -define(CHILD_WITH_ARGS(I, Type, Name, Args), #{ id => Name, start => {I, start_link, Args}, restart => permanent, shutdown => ?SHUTDOWN_TIMEOUT, type => Type, modules => [Name] }). %% From the Erlang docs: %% %% An integer time-out value means that the supervisor tells the child process to terminate %% by calling exit(Child,shutdown) and then wait for an exit signal with reason shutdown back %% from the child process. If no exit signal is received within the specified number of %% milliseconds, the child process is unconditionally terminated using exit(Child,kill). %% If the child process is another supervisor, the shutdown time must be set to infinity to %% give the subtree ample time to shut down. -define(CHILD_SUP(I, Type), #{ id => I, start => {I, start_link, []}, restart => permanent, shutdown => infinity, type => Type, modules => [I] }). ================================================ FILE: apps/arweave/include/ar_sync_buckets.hrl ================================================ %% The size in bytes of a bucket in sync buckets. The bigger the bucket, %% the more compact the structure is, but also the higher the number of "misses" %% encountered when asking peers about the presence of particular chunks. %% If the serialized buckets do not fit in ?MAX_SYNC_BUCKETS_SIZE, the bucket %% size is doubled until they fit. -ifdef(AR_TEST). -define(DEFAULT_SYNC_BUCKET_SIZE, 10000000). -else. -define(DEFAULT_SYNC_BUCKET_SIZE, 10_000_000_000). % 10 GB -endif. %% The maximum ratio between a peer's reported bucket size and the expected bucket size. -define(MAX_SYNC_BUCKET_SIZE_RATIO, 4096). ================================================ FILE: apps/arweave/include/ar_vdf.hrl ================================================ % 25 checkpoints 40 ms each = 1000 ms -define(VDF_CHECKPOINT_COUNT_IN_STEP, 25). -define(VDF_BYTE_SIZE, 32). %% Typical ryzen 5900X iterations for 1 sec -define(VDF_SHA_1S, 15_000_000). -ifndef(VDF_DIFFICULTY). -define(VDF_DIFFICULTY, ?VDF_SHA_1S div ?VDF_CHECKPOINT_COUNT_IN_STEP). -endif. -ifdef(AR_TEST). % NOTE. VDF_DIFFICULTY_RETARGET should be > 10 because it's > 10 in mainnet % So VDF difficulty should change slower than difficulty -define(VDF_DIFFICULTY_RETARGET, 20). -define(VDF_HISTORY_CUT, 2). -else. -ifndef(VDF_DIFFICULTY_RETARGET). -define(VDF_DIFFICULTY_RETARGET, 720). -endif. -ifndef(VDF_HISTORY_CUT). -define(VDF_HISTORY_CUT, 50). -endif. -endif. ================================================ FILE: apps/arweave/include/ar_verify_chunks.hrl ================================================ -ifndef(AR_VERIFY_CHUNKS_HRL). -define(AR_VERIFY_CHUNKS_HRL, true). -record(verify_report, { start_time :: non_neg_integer(), total_error_bytes = 0 :: non_neg_integer(), total_error_chunks = 0 :: non_neg_integer(), error_bytes = #{} :: #{atom() => non_neg_integer()}, error_chunks = #{} :: #{atom() => non_neg_integer()}, bytes_processed = 0 :: non_neg_integer(), progress = 0 :: non_neg_integer(), status = not_ready :: not_ready | running| done }). -record(sample_report, { samples = 0 :: non_neg_integer(), total = 0 :: non_neg_integer(), success = 0 :: non_neg_integer(), failure = 0 :: non_neg_integer() }). -define(SAMPLE_CHUNK_COUNT, 1000). -endif. ================================================ FILE: apps/arweave/include/ar_wallets.hrl ================================================ %% @doc The maximum number of wallets served via /wallet_list/[/]. -ifdef(AR_TEST). -define(WALLET_LIST_CHUNK_SIZE, 2). -else. -define(WALLET_LIST_CHUNK_SIZE, 2500). -endif. %% @doc The upper limit for the size of the response fetched from %% /wallet_list/[/], when serialized using Erlang Term Format. %% The actual size of the binary for so many wallets is a few kilobytes smaller, %% so the response may contain some metadata. -define(MAX_SERIALIZED_WALLET_LIST_CHUNK_SIZE, ?WALLET_LIST_CHUNK_SIZE * 202). % = 505000 ================================================ FILE: apps/arweave/include/user_default.hrl ================================================ %% %% This file is only intended to be included into user_default.erl file. %% The reason to incluide these headers into user_default module is to %% enable records to be rendered properly in the REPL. %% It might be a good idea to also include some third-party libraries headers %% here as well (e.g. cowboy' request, etc.) %% -include_lib("arweave/include/ar.hrl"). -include_lib("arweave/include/ar_blacklist_middleware.hrl"). -include_lib("arweave/include/ar_block.hrl"). -include_lib("arweave/include/ar_chain_stats.hrl"). -include_lib("arweave/include/ar_chunk_storage.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("arweave/include/ar_consensus.hrl"). -include_lib("arweave/include/ar_data_discovery.hrl"). -include_lib("arweave/include/ar_data_sync.hrl"). -include_lib("arweave/include/ar_header_sync.hrl"). -include_lib("arweave/include/ar_inflation.hrl"). -include_lib("arweave/include/ar_mining.hrl"). -include_lib("arweave/include/ar_peers.hrl"). -include_lib("arweave/include/ar_poa.hrl"). -include_lib("arweave/include/ar_pool.hrl"). -include_lib("arweave/include/ar_pricing.hrl"). -include_lib("arweave/include/ar_sup.hrl"). -include_lib("arweave/include/ar_sync_buckets.hrl"). -include_lib("arweave/include/ar_vdf.hrl"). -include_lib("arweave/include/ar_verify_chunks.hrl"). -include_lib("arweave/include/ar_wallets.hrl"). ================================================ FILE: apps/arweave/src/ar.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @copyright 2025 (c) Arweave %%% @author Arweave Team %%% @doc Arweave server entrypoint and basic utilities. %%% @end %%%=================================================================== -module(ar). -behaviour(application). -compile(warnings_as_errors). -export([ benchmark_hash/0, benchmark_hash/1, benchmark_packing/0, benchmark_packing/1, benchmark_vdf/0, benchmark_vdf/1, console/1, console/2, create_ecdsa_wallet/0, create_ecdsa_wallet/1, create_wallet/0, create_wallet/1, docs/0, e2e/0, e2e/1, main/1, prep_stop/1, shell/0, shell_e2e/0, shell_localnet/0, shell_localnet/1, shutdown/1, start/1, start/2, start_dependencies/0, stop/1, stop_dependencies/0, stop_shell/0, stop_shell_e2e/0, stop_shell_localnet/0, tests/0, tests/1 ]). -include("ar.hrl"). -include("ar_consensus.hrl"). -include("ar_verify_chunks.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("eunit/include/eunit.hrl"). %% Supported feature flags (default behaviour) % http_logging (false) % disk_logging (false) % miner_logging (true) % subfield_queries (false) % blacklist (true) % time_syncing (true) %%-------------------------------------------------------------------- %% @doc Command line program entrypoint. Takes a list of arguments. %% @end %%-------------------------------------------------------------------- main("") -> ar_cli_parser:show_help(), init:stop(1); main(Args) -> % arweave_config must be the first application started, it % will keep the configuration for all other arweave % applications or processes. arweave_config:start(), % let parse the arguments and initialize arweave_config. The % idea here is to let full control over the configuration to % arweave_config and then return the correct configuration % file. In case of error, the application is stopped. case arweave_config_bootstrap:start(Args) of {ok, Config} -> start(Config); Else -> ar_cli_parser:show_help(), init:stop(1), {error, Else} end. %%-------------------------------------------------------------------- %% @doc Start an Arweave node on this BEAM. %% @end %%-------------------------------------------------------------------- start(Port) when is_integer(Port) -> start(#config{ port = Port }); start(Config) -> %% Start the logging system. case os:getenv("TERM") of "dumb" -> % Set logger to output all levels of logs to the console % when running in a dumb terminal. logger:add_handler(console, logger_std_h, #{level => all}); _-> ok end, case ar_config:validate_config(Config) of true -> ok; false -> timer:sleep(2000), init:stop(1) end, Config2 = ar_config:set_dependent_flags(Config), ok = arweave_config:set_env(Config2), filelib:ensure_dir(Config2#config.log_dir ++ "/"), warn_if_single_scheduler(), case Config2#config.nonce_limiter_server_trusted_peers of [] -> VDFSpeed = ar_bench_vdf:run_benchmark(), ?LOG_INFO([{event, vdf_benchmark}, {vdf_s, VDFSpeed / 1000000}]); _ -> ok end, start_dependencies(). %%-------------------------------------------------------------------- %% @hidden %% @doc application `start/2' callback. function used to start arweave %% application using `application:start/1' or while using OTP. %% @end %%-------------------------------------------------------------------- start(normal, _Args) -> % Load configuration from environment variable, it will % impact only feature supporting arweave_config. arweave_config_environment:load(), % Load the old configuration from arweave_config. {ok, Config} = arweave_config:get_env(), % arweave_config can now switch in runtime mode. Setting % parameters without "runtime" flag set to true will fail now. arweave_config:runtime(), %% Set erlang socket backend persistent_term:put({kernel, inet_backend}, Config#config.'socket.backend'), %% Configure logger ar_logger:init(Config), ?LOG_INFO("========== Starting Arweave Node =========="), ar_config:log_config(Config), %% Start the Prometheus metrics subsystem. prometheus_registry:register_collector(prometheus_process_collector), prometheus_registry:register_collector(ar_metrics_collector), %% Register custom metrics. ar_metrics:register(), %% Start other apps which we depend on. set_mining_address(Config), ar_chunk_storage:run_defragmentation(), %% Start Arweave. ar_sup:start_link(). set_mining_address(#config{ mining_addr = not_set } = C) -> case ar_wallet:get_or_create_wallet([{?RSA_SIGN_ALG, 65537}]) of {error, Reason} -> ar:console("~nFailed to create a wallet, reason: ~p.~n", [io_lib:format("~p", [Reason])]), timer:sleep(500), init:stop(1); W -> Addr = ar_wallet:to_address(W), ar:console("~nSetting the mining address to ~s.~n", [ar_util:encode(Addr)]), C2 = C#config{ mining_addr = Addr }, arweave_config:set_env(C2), set_mining_address(C2) end; set_mining_address(#config{ mine = false }) -> ok; set_mining_address(#config{ mining_addr = Addr, cm_exit_peer = CmExitPeer, is_pool_client = PoolClient }) -> case ar_wallet:load_key(Addr) of not_found -> case {CmExitPeer, PoolClient} of {not_set, false} -> ar:console("~nThe mining key for the address ~s was not found." " Make sure you placed the file in [data_dir]/~s (the node is looking for" " [data_dir]/~s/[mining_addr].json or " "[data_dir]/~s/arweave_keyfile_[mining_addr].json file)." " Do not specify \"mining_addr\" if you want one to be generated.~n~n", [ar_util:encode(Addr), ?WALLET_DIR, ?WALLET_DIR, ?WALLET_DIR]), init:stop(1); _ -> ok end; _Key -> ok end. create_wallet([DataDir]) -> create_wallet(DataDir, ?RSA_KEY_TYPE); create_wallet(_) -> create_wallet_fail(?RSA_KEY_TYPE). create_ecdsa_wallet() -> create_wallet_fail(?ECDSA_KEY_TYPE). create_ecdsa_wallet([DataDir]) -> create_wallet(DataDir, ?ECDSA_KEY_TYPE); create_ecdsa_wallet(_) -> create_wallet_fail(?ECDSA_KEY_TYPE). create_wallet(DataDir, KeyType) -> case filelib:is_dir(DataDir) of false -> create_wallet_fail(KeyType); true -> ok = arweave_config:set_env(#config{ data_dir = DataDir }), case ar_wallet:new_keyfile(KeyType) of {error, Reason} -> ar:console("Failed to create a wallet, reason: ~p.~n~n", [io_lib:format("~p", [Reason])]), timer:sleep(500), init:stop(1); W -> Addr = ar_wallet:to_address(W), ar:console("Created a wallet with address ~s.~n", [ar_util:encode(Addr)]), init:stop(1) end end. create_wallet() -> create_wallet_fail(?RSA_KEY_TYPE). create_wallet_fail(?RSA_KEY_TYPE) -> io:format("Usage: ./bin/create-wallet [data_dir]~n"), init:stop(1); create_wallet_fail(?ECDSA_KEY_TYPE) -> io:format("Usage: ./bin/create-ecdsa-wallet [data_dir]~n"), init:stop(1). benchmark_vdf() -> benchmark_vdf([]). benchmark_vdf(Args) -> ok = arweave_config:set_env(#config{}), ar_bench_vdf:run_benchmark_from_cli(Args), init:stop(1). benchmark_hash() -> benchmark_hash([]). benchmark_hash(Args) -> ar_bench_hash:run_benchmark_from_cli(Args), init:stop(1). benchmark_packing() -> benchmark_packing([]). benchmark_packing(Args) -> ar_bench_packing:run_benchmark_from_cli(Args), init:stop(1). shutdown([NodeName]) -> rpc:cast(NodeName, init, stop, []). prep_stop(State) -> % the service will be stopped, ar_shutdown_manager % must be noticed and its state modified. _ = ar_shutdown_manager:shutdown(), % When arweave is stopped, the first step is to stop % accepting connections from other peers, and then % start the shutdown procedure. ok = ranch:suspend_listener(ar_http_iface_listener), % all timers/intervals must be stopped. ar_timer:terminate_timers(), State. stop(_State) -> ?LOG_INFO([{stop, ?MODULE}]). stop_dependencies() -> ?LOG_INFO("========== Stopping Arweave Node =========="), application:stop(arweave_limiter), {ok, [_Kernel, _Stdlib, _SASL, _OSMon | Deps]} = application:get_key(arweave, applications), lists:foreach(fun(Dep) -> application:stop(Dep) end, lists:reverse(Deps)). start_dependencies() -> ok = arweave_limiter:start(), {ok, _} = application:ensure_all_started(arweave, permanent), ok. %% One scheduler => one dirty scheduler => Calculating a RandomX hash, e.g. %% for validating a block, will be blocked on initializing a RandomX dataset, %% which takes minutes. warn_if_single_scheduler() -> case erlang:system_info(schedulers_online) of 1 -> ?LOG_WARNING( "WARNING: Running only one CPU core / Erlang scheduler may cause issues."); _ -> ok end. shell() -> ar_test_runner:start_shell(test). shell_e2e() -> ar_test_runner:start_shell(e2e). stop_shell() -> ar_test_runner:stop_shell(test). stop_shell_e2e() -> ar_test_runner:stop_shell(e2e). %% @doc Run unit tests. %% Usage: ./bin/test [module | module:test ...] tests() -> ar_test_runner:run(test). tests(Args) -> ar_test_runner:run(test, Args). shell_localnet() -> shell_localnet([]). shell_localnet(Args) -> try case Args of [] -> ar_localnet:start(), io:format("Shell is ready.~n"); [SnapshotDir] -> ar_localnet:start(SnapshotDir), io:format("Shell is ready.~n"); _ -> io:format("Usage: ./bin/localnet_shell [snapshot_dir]~n"), erlang:error({invalid_args, Args}) end catch Type:Reason:S -> io:format("Failed to start localnet due to ~p:~p:~p~n", [Type, Reason, S]), init:stop(1) end. stop_shell_localnet() -> ar_test_node:stop(), init:stop(). %% @doc Run e2e tests. %% Usage: ./bin/e2e [module | module:test ...] e2e() -> ar_test_runner:run(e2e). e2e(Args) -> ar_test_runner:run(e2e, Args). %% @doc Generate the project documentation. docs() -> Mods = lists:filter( fun(File) -> filename:extension(File) == ".erl" end, element(2, file:list_dir("apps/arweave/src")) ), edoc:files( ["apps/arweave/src/" ++ Mod || Mod <- Mods], [ {dir, "source_code_docs"}, {hidden, true}, {private, true} ] ). -ifdef(AR_TEST). console(Format) -> ?LOG_INFO(io_lib:format(Format, [])). console(Format, Params) -> ?LOG_INFO(io_lib:format(Format, Params)). -else. console(Format) -> io:format(Format). console(Format, Params) -> io:format(Format, Params). -endif. ================================================ FILE: apps/arweave/src/ar_base32.erl ================================================ %% @doc This module is very strongly inspired by OTP's base64 source code. %% See https://github.com/erlang/otp/blob/93ec8bb2dbba9456395a54551fe9f1e0f86184b1/lib/stdlib/src/base64.erl#L66-L80 -module(ar_base32). -export([encode/1]). %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Encode data into a lowercase unpadded RFC 4648 base32 alphabet encode(Bin) when is_binary(Bin) -> encode_binary(Bin, <<>>). %%%=================================================================== %%% Private functions. %%%=================================================================== encode_binary(<<>>, A) -> A; encode_binary(<>, A) -> <>; encode_binary(<>, A) -> BB = (B1 bsl 8) bor B2, <>; encode_binary(<>, A) -> BB = (B1 bsl 16) bor (B2 bsl 8) bor B3, <>; encode_binary(<>, A) -> BB = (B1 bsl 24) bor (B2 bsl 16) bor (B3 bsl 8) bor B4, <>; encode_binary(<>, A) -> BB = (B1 bsl 32) bor (B2 bsl 24) bor (B3 bsl 16) bor (B4 bsl 8) bor B5, encode_binary( Ls, <> ). -compile({inline, [{b32e, 1}]}). b32e(X) -> element(X+1, { $a, $b, $c, $d, $e, $f, $g, $h, $i, $j, $k, $l, $m, $n, $o, $p, $q, $r, $s, $t, $u, $v, $w, $x, $y, $z, $2, $3, $4, $5, $6, $7, $8, $9 }). ================================================ FILE: apps/arweave/src/ar_bench_hash.erl ================================================ -module(ar_bench_hash). -export([run_benchmark_from_cli/1, run_benchmark/1]). -include_lib("arweave/include/ar_consensus.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). run_benchmark_from_cli(Args) -> RandomX = get_flag_value(Args, "randomx", "512"), JIT = list_to_integer(get_flag_value(Args, "jit", "1")), LargePages = list_to_integer(get_flag_value(Args, "large_pages", "1")), HardwareAES = list_to_integer(get_flag_value(Args, "hw_aes", "1")), RandomXMode = case RandomX of "512" -> rx512; "4096" -> rx4096; "squared" -> rxsquared; _ -> show_help() end, Schedulers = erlang:system_info(dirty_cpu_schedulers_online), RandomXState = ar_mine_randomx:init_fast2( RandomXMode, ?RANDOMX_PACKING_KEY, JIT, LargePages, Schedulers), {H0, H1} = run_benchmark(RandomXState, JIT, LargePages, HardwareAES), H0String = io_lib:format("~.3f", [H0 / 1000]), H1String = io_lib:format("~.3f", [H1 / 1000]), ar:console("Hashing benchmark~nH0: ~s ms~nH1/H2: ~s ms~n", [H0String, H1String]). get_flag_value([], _, DefaultValue) -> DefaultValue; get_flag_value([Flag | [Value | _Tail]], TargetFlag, _DefaultValue) when Flag == TargetFlag -> Value; get_flag_value([_ | Tail], TargetFlag, DefaultValue) -> get_flag_value(Tail, TargetFlag, DefaultValue). show_help() -> io:format("~nUsage: benchmark-hash [options]~n"), io:format("Options:~n"), io:format(" randomx <512|4096> (default: 512)~n"), io:format(" jit <0|1> (default: 1)~n"), io:format(" large_pages <0|1> (default: 1)~n"), io:format(" hw_aes <0|1> (default: 1)~n"), init:stop(1). run_benchmark(RandomXState) -> run_benchmark(RandomXState, ar_mine_randomx:jit(), ar_mine_randomx:large_pages(), ar_mine_randomx:hardware_aes()). run_benchmark(RandomXState, JIT, LargePages, HardwareAES) -> NonceLimiterOutput = crypto:strong_rand_bytes(32), Seed = crypto:strong_rand_bytes(32), MiningAddr = crypto:strong_rand_bytes(32), Iterations = 1000, {H0Time, _} = timer:tc(fun() -> lists:foreach( fun(I) -> Data = << NonceLimiterOutput:32/binary, I:256, Seed:32/binary, MiningAddr/binary >>, ar_mine_randomx:hash(RandomXState, Data, JIT, LargePages, HardwareAES) end, lists:seq(1, Iterations)) end), H0Microseconds = H0Time / Iterations, H0 = crypto:strong_rand_bytes(32), Chunk = crypto:strong_rand_bytes(?DATA_CHUNK_SIZE), {H1Time, _} = timer:tc(fun() -> lists:foreach( fun(_) -> Nonce = rand:uniform(1000), Preimage = crypto:hash(sha256, << H0:32/binary, Nonce:64, Chunk/binary >>), crypto:hash(sha256, << H0:32/binary, Preimage/binary >>) end, lists:seq(1, Iterations)) end), H1Microseconds = H1Time / Iterations, {H0Microseconds, H1Microseconds}. ================================================ FILE: apps/arweave/src/ar_bench_packing.erl ================================================ -module(ar_bench_packing). -export([show_help/0, run_benchmark_from_cli/1, run_benchmark/1]). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave/include/ar_consensus.hrl"). -include_lib("arweave/include/ar_chunk_storage.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("kernel/include/file.hrl"). -define(TB, 1_000_000_000_000). -define(FOOTPRINTS_PER_ITERATION, 1). %%%=================================================================== %%% CLI and Entry Points %%%=================================================================== run_benchmark_from_cli(Args) -> Config = parse_cli_args(Args), validate_config(Config), run_benchmark(Config). parse_cli_args(Args) -> Threads = list_to_integer(get_flag_value(Args, "threads", integer_to_list(erlang:system_info(dirty_cpu_schedulers_online)))), Samples = list_to_integer(get_flag_value(Args, "samples", "20")), LargePages = list_to_integer(get_flag_value(Args, "large_pages", "1")), RatedSpeedMB = list_to_integer(get_flag_value(Args, "rated_speed", "250")), ReadLoadThreads = list_to_integer(get_flag_value(Args, "read_load", "2")), ReadFileGB = list_to_integer(get_flag_value(Args, "read_file_gb", "4")), Dir = get_flag_value(Args, "dir", undefined), {Dir, Threads, Samples, LargePages, RatedSpeedMB, ReadLoadThreads, ReadFileGB}. validate_config({Dir, _Threads, _Samples, _LargePages, _RatedSpeedMB, _ReadLoadThreads, _ReadFileGB}) -> case Dir of undefined -> io:format("~nNo directory specified - will benchmark entropy generation only.~n"), io:format("For disk I/O benchmark, specify: dir /path/to/storage~n~n"); _ -> case filelib:ensure_dir(filename:join(Dir, "dummy")) of ok -> ok; {error, Reason} -> io:format("Error: Could not ensure directory ~p exists: ~p~n", [Dir, Reason]), erlang:halt(1) end end. get_flag_value([], _, DefaultValue) -> DefaultValue; get_flag_value([Flag, Value | _Tail], TargetFlag, _DefaultValue) when Flag == TargetFlag -> Value; get_flag_value([_ | Tail], TargetFlag, DefaultValue) -> get_flag_value(Tail, TargetFlag, DefaultValue). show_help() -> io:format("~nUsage: benchmark packing [options]~n~n"), io:format("Options:~n"), io:format(" threads Number of threads. Default: number of CPU cores.~n"), io:format(" samples Number of samples to average. Default: 20.~n"), io:format(" large_pages Use large pages for RandomX (0=off, 1=on). Default: 1.~n"), io:format(" rated_speed Expected disk write speed in MB/s. Benchmark will exclude samples\n"), io:format(" that are too fast as they are likely cached. Default: 250.~n"), io:format(" read_load Background read threads (simulates other disk activity). Default: 2.~n"), io:format(" read_file_gb Size of read load file in GB (larger = less caching). Default: 4.~n"), io:format(" dir Directory to write to (optional).~n~n"), io:format("Examples:~n"), io:format(" benchmark packing threads 8 dir /mnt/storage1~n"), io:format(" benchmark packing rated_speed 246 dir /tmp/bench~n~n"), io:format("For more information, see the Benchmarking section at docs.arweave.org~n~n"), init:stop(1). %%%=================================================================== %%% Main Benchmark Orchestration %%%=================================================================== run_benchmark({Dir, Threads, TargetSamples, LargePages, RatedSpeedMB, ReadLoadThreads, ReadFileGB}) -> configure_randomx(LargePages), print_header(Threads, TargetSamples, RatedSpeedMB, ReadLoadThreads, ReadFileGB, Dir), ar:console("~nInitializing...~n"), RandomXState = init_randomx_state(Threads), RewardAddress = crypto:strong_rand_bytes(32), ChunkDir = init_chunk_dir(Dir), ReadPids = start_read_load(ReadLoadThreads, ReadFileGB, Dir), %% Phase 1: Entropy Preparation print_cache_fill_start(ChunkDir), MinDiskMs = calculate_min_disk_ms(RatedSpeedMB), {AllEntropyResults, ValidEntropyResults} = collect_entropy_samples( TargetSamples, MinDiskMs, Threads, RandomXState, RewardAddress, ChunkDir), print_entropy_results(AllEntropyResults, ValidEntropyResults, ChunkDir, Threads), %% Phase 2: Packing (only if disk I/O enabled) case ChunkDir of undefined -> ok; _ -> %% Sync and drop page cache so reads hit disk sync_and_drop_cache(), close_file_handles(), PackingResults = collect_packing_samples( TargetSamples, Threads, RandomXState, ChunkDir), print_packing_results(PackingResults, Threads) end, stop_read_load(ReadPids), close_file_handles(), ar:console("~n"). configure_randomx(LargePages) -> case LargePages of 1 -> arweave_config:set_env(#config{disable = [], enable = [randomx_large_pages]}); 0 -> arweave_config:set_env(#config{disable = [randomx_large_pages], enable = []}) end. calculate_mib_per_iteration() -> BytesPerIteration = ?FOOTPRINTS_PER_ITERATION * ar_block:get_replica_2_9_footprint_size(), BytesPerIteration div ?MiB. calculate_min_disk_ms(RatedSpeedMB) -> %% Convert MB/s (decimal, marketing) to MiB/s (binary) %% 1 MiB = 1.048576 MB, so MiB/s = MB/s / 1.048576 RatedSpeedMiB = RatedSpeedMB / 1.048576, %% Add 10% margin - writes up to 10% faster than rated still count as valid EffectiveSpeed = RatedSpeedMiB * 1.1, %% Calculate minimum expected time for a "real" disk write calculate_mib_per_iteration() / EffectiveSpeed * 1000. init_chunk_dir(undefined) -> undefined; init_chunk_dir(Dir) -> ChunkDir = filename:join(Dir, "benchmark_chunk_storage"), filelib:ensure_dir(filename:join(ChunkDir, "dummy")), clear_dir(ChunkDir), ChunkDir. init_randomx_state(Threads) -> try ar_mine_randomx:init_fast(rxsquared, ?RANDOMX_PACKING_KEY, Threads) catch error:{badmatch, {error, Reason}} -> ar:console("~nError: Failed to initialize RandomX: ~p~n", [Reason]), ar:console("~nTry running with large_pages 0 if large pages are not supported.~n~n"), erlang:halt(1), undefined end. collect_entropy_samples(TargetSamples, MinDiskMs, Threads, RandomXState, RewardAddress, ChunkDir) -> collect_entropy_samples_loop( 0, TargetSamples, MinDiskMs, Threads, RandomXState, RewardAddress, ChunkDir, [], [], 0, []). collect_entropy_samples_loop(_Iteration, TargetSamples, _MinDiskMs, _Threads, _RandomXState, _RewardAddr, _ChunkDir, AllResults, ValidResults, _CachedCount, _AllDiskMs) when length(ValidResults) >= TargetSamples -> ar:console("~n"), {lists:reverse(AllResults), lists:reverse(ValidResults)}; collect_entropy_samples_loop(_Iteration, _TargetSamples, _MinDiskMs, _Threads, _RandomXState, _RewardAddr, ChunkDir, _AllResults, [], CachedCount, AllDiskMs) when CachedCount >= 100, ChunkDir /= undefined -> %% 100 consecutive cached iterations without finding a valid sample print_rated_speed_too_low_error(AllDiskMs), erlang:halt(1); collect_entropy_samples_loop(Iteration, TargetSamples, MinDiskMs, Threads, RandomXState, RewardAddr, ChunkDir, AllResults, ValidResults, CachedCount, AllDiskMs) -> {_EntropyMs, DiskMs} = Result = run_entropy_iteration(Iteration, Threads, RandomXState, RewardAddr, ChunkDir), {NewValidResults, NewCachedCount} = process_entropy_sample(Result, MinDiskMs, ChunkDir, ValidResults, CachedCount), NewAllDiskMs = case DiskMs > 0 of true -> [DiskMs | AllDiskMs]; false -> AllDiskMs end, collect_entropy_samples_loop( Iteration + 1, TargetSamples, MinDiskMs, Threads, RandomXState, RewardAddr, ChunkDir, [Result | AllResults], NewValidResults, NewCachedCount, NewAllDiskMs). process_entropy_sample({EntropyMs, DiskMs} = Result, MinDiskMs, ChunkDir, ValidResults, CachedCount) -> case ChunkDir of undefined -> %% CPU-only: all samples count print_entropy_cpu_sample(length(ValidResults) + 1, EntropyMs), {[Result | ValidResults], 0}; _ -> case DiskMs >= MinDiskMs of true -> print_entropy_valid_sample(length(ValidResults) + 1, EntropyMs, DiskMs), {[Result | ValidResults], 0}; false -> print_cached_sample(), {ValidResults, CachedCount + 1} end end. %%%=================================================================== %%% Phase 1: Entropy Preparation - Iteration Execution %%%=================================================================== run_entropy_iteration(Iteration, _Threads, RandomXState, RewardAddr, ChunkDir) -> {EntropyMs, Entropies} = time_entropy_generation(Iteration, RandomXState, RewardAddr), DiskMs = time_entropy_disk_write(Iteration, ChunkDir, Entropies), {EntropyMs, DiskMs}. time_entropy_generation(Iteration, RandomXState, RewardAddr) -> StartTime = erlang:monotonic_time(microsecond), Entropies = generate_all_footprints(Iteration, RandomXState, RewardAddr), EndTime = erlang:monotonic_time(microsecond), {(EndTime - StartTime) / 1000, Entropies}. time_entropy_disk_write(_Iteration, undefined, _Entropies) -> 0; time_entropy_disk_write(Iteration, ChunkDir, Entropies) -> BaseOffset = Iteration * ?FOOTPRINTS_PER_ITERATION * ?DATA_CHUNK_SIZE, StartTime = erlang:monotonic_time(microsecond), write_all_entropies(ChunkDir, Entropies, BaseOffset), EndTime = erlang:monotonic_time(microsecond), (EndTime - StartTime) / 1000. %%%=================================================================== %%% Phase 2: Packing - Sample Collection %%%=================================================================== collect_packing_samples(TargetSamples, Threads, RandomXState, ChunkDir) -> ar:console("~n=== Phase 2: Packing Benchmark ===~n"), ar:console("~nRunning packing benchmark:"), %% Generate a new reward address for unpacking entropy UnpackRewardAddr = crypto:strong_rand_bytes(32), collect_packing_samples_loop(0, TargetSamples, Threads, RandomXState, UnpackRewardAddr, ChunkDir, []). collect_packing_samples_loop(SampleNum, TargetSamples, _Threads, _RandomXState, _UnpackRewardAddr, _ChunkDir, Results) when SampleNum >= TargetSamples -> ar:console("~n"), lists:reverse(Results); collect_packing_samples_loop(SampleNum, TargetSamples, Threads, RandomXState, UnpackRewardAddr, ChunkDir, Results) -> Result = run_packing_iteration(SampleNum, Threads, RandomXState, UnpackRewardAddr, ChunkDir), print_packing_sample(SampleNum + 1, Result, Threads), collect_packing_samples_loop(SampleNum + 1, TargetSamples, Threads, RandomXState, UnpackRewardAddr, ChunkDir, [Result | Results]). run_packing_iteration(Iteration, _Threads, RandomXState, UnpackRewardAddr, ChunkDir) -> BaseOffset = Iteration * ?FOOTPRINTS_PER_ITERATION * ?DATA_CHUNK_SIZE, %% Step 1: Generate unpack entropy (parallelized across threads) {UnpackEntropyMs, UnpackEntropies} = time_entropy_generation(Iteration, RandomXState, UnpackRewardAddr), %% Step 2-5: Walk through entropy using map_entropies (same pattern as phase 1) {DecipherMs, ReadMs, EncipherMs, WriteMs} = pack_all_chunks(ChunkDir, UnpackEntropies, BaseOffset), {UnpackEntropyMs, DecipherMs, ReadMs, EncipherMs, WriteMs}. pack_all_chunks(_ChunkDir, [], _BaseOffset) -> {0, 0, 0, 0}; pack_all_chunks(ChunkDir, [Footprint | Rest], BaseOffset) -> Offsets = ar_entropy_gen:entropy_offsets(BaseOffset + ?DATA_CHUNK_SIZE, ?PARTITION_SIZE), {D1, R1, E1, W1} = ar_entropy_gen:map_entropies( Footprint, Offsets, 0, [], <<>>, fun pack_chunk_callback/5, [ChunkDir], {0, 0, 0, 0}), {D2, R2, E2, W2} = pack_all_chunks(ChunkDir, Rest, BaseOffset + ?DATA_CHUNK_SIZE), {D1 + D2, R1 + R2, E1 + E2, W1 + W2}. pack_chunk_callback(UnpackEntropy, BucketEndOffset, _RewardAddr, ChunkDir, {DAcc, RAcc, EAcc, WAcc}) -> {DecipherMs, ReadMs, EncipherMs, WriteMs} = pack_single_chunk(ChunkDir, BucketEndOffset, UnpackEntropy), {DAcc + DecipherMs, RAcc + ReadMs, EAcc + EncipherMs, WAcc + WriteMs}. pack_single_chunk(ChunkDir, PaddedEndOffset, UnpackEntropy) -> %% Step 2: Generate random packed chunk and decipher it RandomPackedChunk = crypto:strong_rand_bytes(?DATA_CHUNK_SIZE), DecipherStart = erlang:monotonic_time(microsecond), UnpackedChunk = ar_packing_server:exor_replica_2_9_chunk(RandomPackedChunk, UnpackEntropy), DecipherEnd = erlang:monotonic_time(microsecond), DecipherMs = (DecipherEnd - DecipherStart) / 1000, %% Step 3: Read pack entropy from disk ChunkFileStart = ar_chunk_storage:get_chunk_file_start(PaddedEndOffset), {Position, _ChunkOffset} = ar_chunk_storage:get_position_and_relative_chunk_offset( ChunkFileStart, PaddedEndOffset), Filepath = filename:join(ChunkDir, integer_to_list(ChunkFileStart)), FH = get_file_handle(Filepath), ReadStart = erlang:monotonic_time(microsecond), {ok, EntropyWithHeader} = file:pread(FH, Position, ?OFFSET_BIT_SIZE div 8 + ?DATA_CHUNK_SIZE), << _StoredChunkOffset:?OFFSET_BIT_SIZE, PackEntropy/binary >> = EntropyWithHeader, ReadEnd = erlang:monotonic_time(microsecond), ReadMs = (ReadEnd - ReadStart) / 1000, %% Step 4: Encipher the unpacked chunk with pack entropy EncipherStart = erlang:monotonic_time(microsecond), PackedChunk = ar_packing_server:exor_replica_2_9_chunk(UnpackedChunk, PackEntropy), EncipherEnd = erlang:monotonic_time(microsecond), EncipherMs = (EncipherEnd - EncipherStart) / 1000, %% Step 5: Write the packed chunk WriteStart = erlang:monotonic_time(microsecond), ok = file:pwrite(FH, Position + (?OFFSET_BIT_SIZE div 8), PackedChunk), WriteEnd = erlang:monotonic_time(microsecond), WriteMs = (WriteEnd - WriteStart) / 1000, {DecipherMs, ReadMs, EncipherMs, WriteMs}. %%%=================================================================== %%% Entropy Generation %%%=================================================================== generate_all_footprints(Iteration, RandomXState, RewardAddr) -> FootprintIds = lists:seq(0, ?FOOTPRINTS_PER_ITERATION - 1), ar_util:pmap( fun(FootprintId) -> UniqueId = Iteration * ?FOOTPRINTS_PER_ITERATION + FootprintId, generate_footprint(RandomXState, RewardAddr, UniqueId) end, FootprintIds, infinity). generate_footprint(RandomXState, RewardAddr, UniqueId) -> SubChunkIndices = lists:seq(0, ?COMPOSITE_PACKING_SUB_CHUNK_COUNT - 1), ar_util:pmap( fun(SubChunkIndex) -> AbsoluteOffset = (UniqueId + 1) * ?DATA_CHUNK_SIZE, SubChunkOffset = SubChunkIndex * ?COMPOSITE_PACKING_SUB_CHUNK_SIZE, Key = ar_replica_2_9:get_entropy_key(RewardAddr, AbsoluteOffset, SubChunkOffset), ar_mine_randomx:randomx_generate_replica_2_9_entropy(RandomXState, Key) end, SubChunkIndices, infinity). %%%=================================================================== %%% Disk I/O - Chunk Storage %%%=================================================================== write_all_entropies(_ChunkDir, [], _BaseOffset) -> ok; write_all_entropies(ChunkDir, [Footprint | Rest], BaseOffset) -> Offsets = ar_entropy_gen:entropy_offsets(BaseOffset + ?DATA_CHUNK_SIZE, ?PARTITION_SIZE), ar_entropy_gen:map_entropies( Footprint, Offsets, 0, [], <<>>, fun write_chunk_callback/5, [ChunkDir], ok), write_all_entropies(ChunkDir, Rest, BaseOffset + ?DATA_CHUNK_SIZE). write_chunk_callback(ChunkEntropy, BucketEndOffset, _RewardAddr, ChunkDir, ok) -> write_chunk(ChunkDir, BucketEndOffset, ChunkEntropy), ok. write_chunk(ChunkDir, PaddedEndOffset, Chunk) -> ChunkFileStart = ar_chunk_storage:get_chunk_file_start(PaddedEndOffset), {Position, ChunkOffset} = ar_chunk_storage:get_position_and_relative_chunk_offset( ChunkFileStart, PaddedEndOffset), Filepath = filename:join(ChunkDir, integer_to_list(ChunkFileStart)), FH = get_file_handle(Filepath), ok = file:pwrite(FH, Position, [<< ChunkOffset:?OFFSET_BIT_SIZE >> | Chunk]). get_file_handle(Filepath) -> case erlang:get({write_handle, Filepath}) of undefined -> {ok, FH} = file:open(Filepath, [read, write, raw, binary]), erlang:put({write_handle, Filepath}, FH), FH; FH -> FH end. close_file_handles() -> lists:foreach( fun({write_handle, _} = Key) -> file:close(erlang:get(Key)), erlang:erase(Key); (_) -> ok end, erlang:get_keys()). sync_and_drop_cache() -> ar:console("~nSyncing and dropping page cache", []), lists:foreach( fun({write_handle, _} = Key) -> FH = erlang:get(Key), %% Sync to disk file:sync(FH), %% Get file size for advise case file:position(FH, eof) of {ok, Size} -> %% Tell kernel we don't need these pages cached file:advise(FH, 0, Size, dont_need); _ -> ok end, ar:console(".", []); (_) -> ok end, erlang:get_keys()), ar:console("~n", []). clear_dir(Dir) -> case file:list_dir(Dir) of {ok, Files} -> lists:foreach(fun(File) -> file:delete(filename:join(Dir, File)) end, Files); {error, enoent} -> ok end. %%%=================================================================== %%% Disk I/O - Read Load Simulation %%%=================================================================== start_read_load(0, _ReadFileGB, _Dir) -> []; start_read_load(_N, _ReadFileGB, undefined) -> []; start_read_load(NumThreads, ReadFileSizeGB, Dir) -> ReadFile = create_read_load_file(Dir, ReadFileSizeGB), spawn_read_load_threads(NumThreads, ReadFile). create_read_load_file(Dir, SizeGB) -> ReadFile = filename:join(Dir, "benchmark_read_load.bin"), SizeMB = SizeGB * 1024, file:delete(ReadFile), ar:console("Creating read load file...~n"), {ok, FH} = file:open(ReadFile, [write, raw, binary]), lists:foreach( fun(_) -> file:write(FH, crypto:strong_rand_bytes(?MiB)) end, lists:seq(1, SizeMB)), file:close(FH), ReadFile. spawn_read_load_threads(NumThreads, ReadFile) -> [spawn_link(fun() -> read_load_loop(ReadFile) end) || _ <- lists:seq(1, NumThreads)]. read_load_loop(ReadFile) -> {ok, FH} = file:open(ReadFile, [read, raw, binary, {read_ahead, 0}]), {ok, FileInfo} = file:read_file_info(ReadFile), FileSize = FileInfo#file_info.size, read_load_loop(FH, FileSize). read_load_loop(FH, FileSize) -> %% Random read of 4-64KB (typical RocksDB read sizes) ReadSize = 4096 + rand:uniform(60 * 1024), MaxOffset = max(0, FileSize - ReadSize), Offset = rand:uniform(MaxOffset + 1) - 1, file:pread(FH, Offset, ReadSize), read_load_loop(FH, FileSize). stop_read_load(Pids) -> lists:foreach(fun(Pid) -> exit(Pid, kill) end, Pids). %%%=================================================================== %%% Output - Progress and Results %%%=================================================================== print_header(Threads, TargetSamples, RatedSpeedMB, ReadLoadThreads, ReadFileGB, Dir) -> ar:console("~n=== Replica 2.9 Preparation Benchmark ===~n"), ar:console("See docs.arweave.org for more information.~n~n"), ar:console("Configuration:~n"), ar:console(" Threads: ~p~n", [Threads]), ar:console(" Samples: ~p~n", [TargetSamples]), ar:console(" Data per iteration: ~p MiB~n", [calculate_mib_per_iteration()]), ar:console(" Rated disk speed: ~p MB/s~n", [RatedSpeedMB]), ar:console(" Read load threads: ~p~n", [ReadLoadThreads]), ar:console(" Read file size: ~p GB~n", [ReadFileGB]), case Dir of undefined -> ar:console(" Directory: (none - CPU benchmark only)~n"); _ -> ar:console(" Directory: ~p~n", [Dir]) end. print_cache_fill_start(undefined) -> ok; print_cache_fill_start(_ChunkDir) -> ar:console("~n=== Phase 1: Entropy Preparation ===~n"), ar:console("~nFilling write cache", []). print_entropy_cpu_sample(SampleNum, EntropyMs) -> EntropyRate = calculate_mib_per_iteration() / (EntropyMs / 1000), ar:console("~nSample ~p: Entropy: ~p MiB/s", [SampleNum, round(EntropyRate)]). print_entropy_valid_sample(SampleNum, EntropyMs, DiskMs) -> case SampleNum of 1 -> ar:console("~n~nRunning entropy benchmark:"); _ -> ok end, MiBPerIteration = calculate_mib_per_iteration(), EntropyRate = MiBPerIteration / (EntropyMs / 1000), DiskRate = MiBPerIteration / (DiskMs / 1000), ar:console("~nSample ~p: Entropy: ~p MiB/s, Write: ~p MiB/s", [SampleNum, round(EntropyRate), round(DiskRate)]). print_cached_sample() -> ar:console(".", []). print_rated_speed_too_low_error(AllDiskMs) -> MiBPerIteration = calculate_mib_per_iteration(), MinDiskMs = lists:min(AllDiskMs), MinWriteSpeed = MiBPerIteration / (MinDiskMs / 1000), ar:console("~n~n=== Benchmark Stopped ===~n~n"), ar:console("Benchmark is unable to proceed as the configured rated_speed is too low.~n"), ar:console("The slowest write speed observed was ~p MiB/s (~p MB/s).~n~n", [round(MinWriteSpeed), round(MinWriteSpeed * 1.048576)]), ar:console("Please re-run the benchmark with a rated_speed that more correctly~n"), ar:console("reflects the rated speed of the disk being written to.~n~n"). print_entropy_results(AllResults, ValidResults, ChunkDir, Threads) -> MiBPerIteration = calculate_mib_per_iteration(), {AllEntropyTimes, _} = lists:unzip(AllResults), {ValidEntropyTimes, ValidDiskTimes} = lists:unzip(ValidResults), TotalIterations = length(AllResults), ValidCount = length(ValidResults), CachedCount = TotalIterations - ValidCount, AvgEntropyMs = lists:sum(AllEntropyTimes) / TotalIterations, AvgDiskMs = safe_average(ValidDiskTimes), AvgValidEntropyMs = case ValidCount > 0 of true -> lists:sum(ValidEntropyTimes) / ValidCount; false -> AvgEntropyMs end, EntropyRate = MiBPerIteration / (AvgValidEntropyMs / 1000), ar:console("~n--- Entropy Preparation Results ---~n~n"), ar:console("Data per iteration: ~p MiB (~p chunks)~n", [MiBPerIteration, MiBPerIteration * 4]), ar:console("Total iterations: ~p (~p excluded, ~p samples)~n", [TotalIterations, CachedCount, ValidCount]), ar:console("~n"), ar:console("Entropy generation: ~.2f MiB/s (~p threads)~n", [EntropyRate, Threads]), case ChunkDir of undefined -> ar:console("~nNo disk I/O measured.~n"), print_preparation_extrapolation(EntropyRate); _ -> DiskRate = MiBPerIteration / (AvgDiskMs / 1000), ar:console("Disk write: ~.2f MiB/s~n", [DiskRate]), {EffectiveRate, Bottleneck} = case EntropyRate < DiskRate of true -> {EntropyRate, "CPU (entropy generation)"}; false -> {DiskRate, "Disk Write"} end, ar:console("~nBottleneck: ~s~n", [Bottleneck]), ar:console("Effective rate: ~.2f MiB/s~n", [EffectiveRate]), print_preparation_extrapolation(EffectiveRate) end. print_preparation_extrapolation(EffectiveRate) -> PartitionSizeTB = ?PARTITION_SIZE / ?TB, TotalSeconds = ?PARTITION_SIZE / (EffectiveRate * ?MiB), ar:console("~nEstimated preparation time for ~.1f TB partition: ~s~n", [PartitionSizeTB, format_duration(TotalSeconds)]). print_packing_sample(SampleNum, {UnpackEntropyMs, DecipherMs, ReadMs, EncipherMs, WriteMs}, _Threads) -> MiBPerIteration = calculate_mib_per_iteration(), UnpackEntropyRate = MiBPerIteration / (UnpackEntropyMs / 1000), DecipherRate = MiBPerIteration / (DecipherMs / 1000), ReadRate = MiBPerIteration / (ReadMs / 1000), EncipherRate = MiBPerIteration / (EncipherMs / 1000), WriteRate = MiBPerIteration / (WriteMs / 1000), ar:console("~nSample ~p: Entropy: ~p MiB/s, Decipher: ~p MiB/s, Read: ~p MiB/s, Encipher: ~p MiB/s, Write: ~p MiB/s", [SampleNum, round(UnpackEntropyRate), round(DecipherRate), round(ReadRate), round(EncipherRate), round(WriteRate)]). print_packing_results(Results, Threads) -> MiBPerIteration = calculate_mib_per_iteration(), SampleCount = length(Results), {UnpackEntropyTimes, DecipherTimes, ReadTimes, EncipherTimes, WriteTimes} = lists:foldl( fun({UE, D, R, E, W}, {UEAcc, DAcc, RAcc, EAcc, WAcc}) -> {UEAcc + UE, DAcc + D, RAcc + R, EAcc + E, WAcc + W} end, {0, 0, 0, 0, 0}, Results), AvgUnpackEntropyMs = UnpackEntropyTimes / SampleCount, AvgDecipherMs = DecipherTimes / SampleCount, AvgReadMs = ReadTimes / SampleCount, AvgEncipherMs = EncipherTimes / SampleCount, AvgWriteMs = WriteTimes / SampleCount, UnpackEntropyRate = MiBPerIteration / (AvgUnpackEntropyMs / 1000), DecipherRate = MiBPerIteration / (AvgDecipherMs / 1000), ReadRate = MiBPerIteration / (AvgReadMs / 1000), EncipherRate = MiBPerIteration / (AvgEncipherMs / 1000), WriteRate = MiBPerIteration / (AvgWriteMs / 1000), %% Identify bottleneck (slowest operation = lowest rate) {EffectiveRate, Bottleneck} = find_bottleneck([ {UnpackEntropyRate, io_lib:format("CPU (unpack entropy, ~p threads)", [Threads])}, {DecipherRate, "CPU (decipher)"}, {ReadRate, "Disk read"}, {EncipherRate, "CPU (encipher)"}, {WriteRate, "Disk write"} ]), ar:console("~n--- Packing Results ---~n~n"), ar:console("Samples: ~p~n", [SampleCount]), ar:console("~n"), ar:console("Unpack entropy: ~.2f MiB/s (~p threads)~n", [UnpackEntropyRate, Threads]), ar:console("Decipher: ~.2f MiB/s~n", [DecipherRate]), ar:console("Read: ~.2f MiB/s~n", [ReadRate]), ar:console("Encipher: ~.2f MiB/s~n", [EncipherRate]), ar:console("Write: ~.2f MiB/s~n", [WriteRate]), ar:console("~nBottleneck: ~s~n", [Bottleneck]), ar:console("Effective rate: ~.2f MiB/s~n", [EffectiveRate]), print_packing_extrapolation(EffectiveRate). find_bottleneck([{Rate, Name} | Rest]) -> find_bottleneck(Rest, Rate, Name). find_bottleneck([], MinRate, MinName) -> {MinRate, MinName}; find_bottleneck([{Rate, Name} | Rest], MinRate, MinName) -> case Rate < MinRate of true -> find_bottleneck(Rest, Rate, Name); false -> find_bottleneck(Rest, MinRate, MinName) end. print_packing_extrapolation(EffectiveRate) -> PartitionSizeTB = ?PARTITION_SIZE / ?TB, TotalSeconds = ?PARTITION_SIZE / (EffectiveRate * ?MiB), ar:console("Estimated packing time for ~.1f TB partition: ~s~n", [PartitionSizeTB, format_duration(TotalSeconds)]). %%%=================================================================== %%% Utilities %%%=================================================================== safe_average([]) -> 0; safe_average(List) -> lists:sum(List) / length(List). format_duration(Seconds) when Seconds < 60 -> io_lib:format("~.1f seconds", [Seconds]); format_duration(Seconds) when Seconds < 3600 -> io_lib:format("~.1f minutes", [Seconds / 60]); format_duration(Seconds) when Seconds < 86400 -> io_lib:format("~.1f hours", [Seconds / 3600]); format_duration(Seconds) -> Days = Seconds / 86400, io_lib:format("~.1f days (~p hours)", [Days, trunc(Days * 24)]). ================================================ FILE: apps/arweave/src/ar_bench_timer.erl ================================================ -module(ar_bench_timer). -export([initialize/0, reset/0, record/3, start/1, stop/1, get_timing_data/0, print_timing_data/0, get_total/1, get_max/1, get_min/1, get_avg/1]). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave/include/ar_vdf.hrl"). -include_lib("arweave/include/ar_consensus.hrl"). record(Key, Fun, Args) -> {Time, Result} = timer:tc(Fun, Args), update_total(Key, Time), Result. start(Key) -> StartTime = erlang:timestamp(), ets:insert(start_time, {real_key(Key), StartTime}). stop(Key) -> case ets:lookup(start_time, real_key(Key)) of [{_, StartTime}] -> EndTime = erlang:timestamp(), ElapsedTime = timer:now_diff(EndTime, StartTime), % io:format("Elapsed ~p: ~p -> ~p = ~p~n", [Key, StartTime, EndTime, ElapsedTime]), update_total(Key, ElapsedTime), ElapsedTime; [] -> % Key not found, throw an error {error, {not_started, Key}} end. update_total(Key, ElapsedTime) -> ets:update_counter(total_time, real_key(Key), {2, ElapsedTime}, {real_key(Key), 0}). get_total([]) -> 0; get_total(Times) when is_list(Times) -> lists:sum(Times); get_total(Key) -> get_total(get_times(Key)). get_max([]) -> 0; get_max(Times) when is_list(Times) -> lists:max(Times); get_max(Key) -> get_max(get_times(Key)). get_min([]) -> 0; get_min(Times) when is_list(Times) -> lists:min(Times); get_min(Key) -> get_min(get_times(Key)). get_avg([]) -> 0; get_avg(Times) when is_list(Times) -> TotalTime = lists:sum(Times), case length(Times) of 0 -> 0; N -> TotalTime / N end; get_avg(Key) -> get_avg(get_times(Key)). get_times(Key) -> [Match || [Match] <- ets:match(total_time, {{Key, '_'}, '$1'})]. get_timing_keys() -> Keys = [Key || {{Key, _PID}, _Value} <- get_timing_data()], UniqueKeys = sets:to_list(sets:from_list(Keys)), UniqueKeys. get_timing_data() -> ets:tab2list(total_time). print_timing_data() -> lists:foreach(fun(Key) -> Seconds = get_total(Key) / 1000000, ?LOG_ERROR("~p: ~p", [Key, Seconds]) end, get_timing_keys()). reset() -> ets:delete_all_objects(total_time), ets:delete_all_objects(start_time). initialize() -> ets:new(total_time, [set, named_table, public]), ets:new(start_time, [set, named_table, public]). real_key(Key) -> {Key, self()}. ================================================ FILE: apps/arweave/src/ar_bench_vdf.erl ================================================ -module(ar_bench_vdf). -export([run_benchmark/0, run_benchmark_from_cli/1]). -include_lib("arweave/include/ar_vdf.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). run_benchmark_from_cli(Args) -> Mode = list_to_atom(get_flag_value(Args, "mode", "default")), Difficulty = list_to_integer(get_flag_value(Args, "difficulty", integer_to_list(?VDF_DIFFICULTY))), Verify = list_to_atom(get_flag_value(Args, "verify", "false")), run_benchmark(Mode, Difficulty, Verify). get_flag_value([], _, DefaultValue) -> DefaultValue; get_flag_value([Flag | [Value | _Tail]], TargetFlag, _DefaultValue) when Flag == TargetFlag -> Value; get_flag_value([_ | Tail], TargetFlag, DefaultValue) -> get_flag_value(Tail, TargetFlag, DefaultValue). show_help() -> io:format("~nUsage: benchmark vdf [options]~n"), io:format("Options:~n"), io:format(" mode (default: default)~n"), io:format(" difficulty (default: ~p)~n", [?VDF_DIFFICULTY]), io:format(" verify (default: false)~n"), init:stop(1). run_benchmark() -> run_benchmark(none, ?VDF_DIFFICULTY, false). run_benchmark(Mode, Difficulty, Verify) -> case Mode of none -> %% Run as part of startup, use whatever is set in the config ok; openssl -> ok = arweave_config:set_env(#config{ vdf = openssl }); fused -> ok = arweave_config:set_env(#config{ vdf = fused }); hiopt_m4 -> ok = arweave_config:set_env(#config{ vdf = hiopt_m4 }); default -> ok = arweave_config:set_env(#config{}) end, Input = crypto:strong_rand_bytes(32), {Time, {ok, Output, Checkpoints}} = timer:tc(fun() -> ar_vdf:compute2(1, Input, Difficulty) end), io:format("~n~n"), maybe_verify(Verify, Input, Difficulty, Output, Checkpoints), io:format("VDF step computed in ~.2f seconds.~n~n", [Time / 1000000]), case Time > 1150000 of true -> io:format("WARNING: your VDF computation speed is low - consider fetching " "VDF outputs from an external source (see vdf_server_trusted_peer " "and vdf_client_peer command line parameters).~n~n"); false -> ok end, Time. maybe_verify(true, Input, Difficulty, Output, Checkpoints) -> {ok, VerifyOutput, VerifyCheckpoints} = ar_vdf:debug_sha2(1, Input, Difficulty), case Output == VerifyOutput of true -> io:format("Output matches.~n"); false -> io:format("Output mismatch. Expected: ~p, Got: ~p~n", [ar_util:encode(Output), ar_util:encode(VerifyOutput)]) end, case Checkpoints == VerifyCheckpoints of true -> io:format("Checkpoints match.~n"); false -> io:format("Checkpoints mismatch. Expected: ~p, Got: ~p~n", [Checkpoints, VerifyCheckpoints]) end; maybe_verify(false, _Input, _Difficulty, _Output, _Checkpoints) -> ok. ================================================ FILE: apps/arweave/src/ar_blacklist_middleware.erl ================================================ -module(ar_blacklist_middleware). -export([start/0, ban_peer/2, is_peer_banned/1, cleanup_ban/1]). -export([start_link/0]). -ifdef(AR_TEST). -export([reset/0]). -endif. -include_lib("arweave/include/ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("arweave/include/ar_blacklist_middleware.hrl"). -include_lib("eunit/include/eunit.hrl"). start_link() -> {ok, spawn_link(fun() -> start() end)}. start() -> ?LOG_INFO([{start, ?MODULE}, {pid, self()}]), {ok, _} = ar_timer:apply_after( ?BAN_CLEANUP_INTERVAL, ?MODULE, cleanup_ban, [ets:whereis(?MODULE)], #{ skip_on_shutdown => false } ). reset() -> true = ets:delete_all_objects(?MODULE), ok. %% Ban a peer completely for TTLSeconds seoncds. Since we cannot trust the port, %% we ban the whole IP address. ban_peer(Peer, TTLSeconds) -> ?LOG_DEBUG([{event, ban_peer}, {peer, ar_util:format_peer(Peer)}, {seconds, TTLSeconds}]), Key = {ban, peer_to_ip_addr(Peer)}, Expires = os:system_time(seconds) + TTLSeconds, ets:insert(?MODULE, {Key, Expires}). is_peer_banned(Peer) -> Key = {ban, peer_to_ip_addr(Peer)}, case ets:lookup(?MODULE, Key) of [] -> not_banned; [_] -> banned end. cleanup_ban(TableID) -> case ets:whereis(?MODULE) of TableID -> Now = os:system_time(seconds), Folder = fun ({{ban, _} = Key, Expires}, Acc) when Expires < Now -> [Key | Acc]; (_, Acc) -> Acc end, RemoveKeys = ets:foldl(Folder, [], ?MODULE), Delete = fun(Key) -> ets:delete(?MODULE, Key) end, lists:foreach(Delete, RemoveKeys), _ = ar_timer:apply_after( ?BAN_CLEANUP_INTERVAL, ?MODULE, cleanup_ban, [TableID], #{ skip_on_shutdown => true } ); _ -> table_owner_died end. %private functions peer_to_ip_addr({A, B, C, D, _}) -> {A, B, C, D}. ================================================ FILE: apps/arweave/src/ar_block.erl ================================================ -module(ar_block). -export([get_consensus_window_size/0, get_max_tx_anchor_depth/0, partition_size/0, get_replica_2_9_entropy_sector_size/0, get_replica_2_9_entropy_partition_size/0, get_sub_chunks_per_replica_2_9_entropy/0, get_replica_2_9_entropy_count/0, get_replica_2_9_footprint_size/0, strict_data_split_threshold/0, block_field_size_limit/1, verify_timestamp/2, get_max_timestamp_deviation/0, verify_last_retarget/2, verify_weave_size/3, verify_cumulative_diff/2, verify_block_hash_list_merkle/2, compute_hash_list_merkle/1, compute_h0/2, compute_h0/5, compute_h0/6, compute_h1/3, compute_h2/3, compute_solution_h/2, indep_hash/1, indep_hash/2, indep_hash2/2, get_block_signature_preimage/4, generate_signed_hash/1, verify_signature/3, get_reward_key/2, generate_block_data_segment/1, generate_block_data_segment/2, generate_block_data_segment_base/1, get_recall_range/3, get_recall_range/5, verify_tx_root/1, hash_wallet_list/1, generate_hash_list_for_block/2, generate_tx_root_for_block/1, generate_tx_root_for_block/2, generate_size_tagged_list_from_txs/2, generate_tx_tree/1, generate_tx_tree/2, test_wallet_list_performance/0, test_wallet_list_performance/1, test_wallet_list_performance/2, test_wallet_list_performance/3, poa_to_list/1, shift_packing_2_5_threshold/1, get_packing_threshold/2, compute_next_vdf_difficulty/1, validate_proof_size/1, vdf_step_number/1, get_packing/3, validate_replica_format/3, get_max_nonce/1, get_recall_range_size/1, get_recall_byte/3, get_sub_chunk_size/1, get_nonces_per_chunk/1, get_nonces_per_recall_range/1, get_sub_chunk_index/2, get_chunk_padded_offset/1, get_double_signing_condition/4]). -include("ar.hrl"). -include("ar_consensus.hrl"). -include("ar_block.hrl"). -include("ar_vdf.hrl"). -include_lib("eunit/include/eunit.hrl"). %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Return the number of blocks we track during consensus. The node %% does not accept new blocks originating from blocks older than the oldest %% block in this window. get_consensus_window_size() -> ?STORE_BLOCKS_BEHIND_CURRENT. %% @doc Return the maximum allowed block depth of the transaction block anchor. get_max_tx_anchor_depth() -> ar_block:get_consensus_window_size(). %% @doc Expose constants through a function to allow mocking/injection in tests. partition_size() -> ?PARTITION_SIZE. strict_data_split_threshold() -> ?STRICT_DATA_SPLIT_THRESHOLD. %% @doc Return the 2.9 entropy sector size - the largest total size in bytes of the contiguous %% area where the 2.9 entropy of every chunk is unique. -spec get_replica_2_9_entropy_sector_size() -> pos_integer(). get_replica_2_9_entropy_sector_size() -> ?REPLICA_2_9_ENTROPY_COUNT * ?COMPOSITE_PACKING_SUB_CHUNK_SIZE. %% @doc Return the size of the 2.9 entropy partition. -spec get_replica_2_9_entropy_partition_size() -> pos_integer(). get_replica_2_9_entropy_partition_size() -> ?REPLICA_2_9_ENTROPY_COUNT * ?REPLICA_2_9_ENTROPY_SIZE. %% @doc Return the number of sub-chunks per entropy. We'll generally create 32x entropies %% in order to fully encipher this many chunks. -spec get_sub_chunks_per_replica_2_9_entropy() -> pos_integer(). get_sub_chunks_per_replica_2_9_entropy() -> ?REPLICA_2_9_ENTROPY_SIZE div ?COMPOSITE_PACKING_SUB_CHUNK_SIZE. %% @doc Return the total size in bytes for a full footprint of entropy. -spec get_replica_2_9_footprint_size() -> pos_integer(). get_replica_2_9_footprint_size() -> ?REPLICA_2_9_ENTROPY_SIZE * ?COMPOSITE_PACKING_SUB_CHUNK_COUNT. %% @doc Return the number of entropies per partition. -spec get_replica_2_9_entropy_count() -> pos_integer(). get_replica_2_9_entropy_count() -> ?REPLICA_2_9_ENTROPY_COUNT div ?COMPOSITE_PACKING_SUB_CHUNK_COUNT. %% @doc Check whether the block fields conform to the specified size limits. block_field_size_limit(B = #block{ reward_addr = unclaimed }) -> block_field_size_limit(B#block{ reward_addr = <<>> }); block_field_size_limit(B) -> DiffBytesLimit = case ar_fork:height_1_8() of Height when B#block.height >= Height -> 78; _ -> 10 end, {ChunkSize, DataPathSize} = case B#block.poa of POA when is_record(POA, poa) -> { byte_size((B#block.poa)#poa.chunk), byte_size((B#block.poa)#poa.data_path) }; _ -> {0, 0} end, RewardAddrCheck = byte_size(B#block.reward_addr) =< 32, Check = (byte_size(B#block.nonce) =< 512) and (byte_size(B#block.previous_block) =< 48) and (byte_size(integer_to_binary(B#block.timestamp)) =< ?TIMESTAMP_FIELD_SIZE_LIMIT) and (byte_size(integer_to_binary(B#block.last_retarget)) =< ?TIMESTAMP_FIELD_SIZE_LIMIT) and (byte_size(integer_to_binary(B#block.diff)) =< DiffBytesLimit) and (byte_size(integer_to_binary(B#block.height)) =< 20) and (byte_size(B#block.hash) =< 48) and (byte_size(B#block.indep_hash) =< 48) and RewardAddrCheck and validate_tags_size(B) and (byte_size(integer_to_binary(B#block.weave_size)) =< 64) and (byte_size(integer_to_binary(B#block.block_size)) =< 64) and (ChunkSize =< ?DATA_CHUNK_SIZE) and (DataPathSize =< ?MAX_PATH_SIZE), case Check of false -> ?LOG_INFO( [ {event, received_block_with_invalid_field_size}, {nonce, byte_size(B#block.nonce)}, {previous_block, byte_size(B#block.previous_block)}, {timestamp, byte_size(integer_to_binary(B#block.timestamp))}, {last_retarget, byte_size(integer_to_binary(B#block.last_retarget))}, {diff, byte_size(integer_to_binary(B#block.diff))}, {height, byte_size(integer_to_binary(B#block.height))}, {hash, byte_size(B#block.hash)}, {indep_hash, byte_size(B#block.indep_hash)}, {reward_addr, byte_size(B#block.reward_addr)}, {tags, byte_size(list_to_binary(B#block.tags))}, {weave_size, byte_size(integer_to_binary(B#block.weave_size))}, {block_size, byte_size(integer_to_binary(B#block.block_size))} ] ); _ -> ok end, Check. %% @doc Verify the block timestamp is not too far in the future nor too far in %% the past. We calculate the maximum reasonable clock difference between any %% two nodes. This is a simplification since there is a chaining effect in the %% network which we don't take into account. Instead, we assume two nodes can %% deviate JOIN_CLOCK_TOLERANCE seconds in the opposite direction from each %% other. verify_timestamp(#block{ timestamp = Timestamp }, #block{ timestamp = PrevTimestamp }) -> MaxNodesClockDeviation = get_max_timestamp_deviation(), case Timestamp >= PrevTimestamp - MaxNodesClockDeviation of false -> false; true -> CurrentTime = os:system_time(seconds), Timestamp =< CurrentTime + MaxNodesClockDeviation end. %% @doc Return the largest possible value by which the previous block's timestamp %% may exceed the next block's timestamp. get_max_timestamp_deviation() -> ?JOIN_CLOCK_TOLERANCE * 2 + ?CLOCK_DRIFT_MAX. %% @doc Verify the retarget timestamp on NewB is correct. verify_last_retarget(NewB, OldB) -> case ar_retarget:is_retarget_height(NewB#block.height) of true -> NewB#block.last_retarget == NewB#block.timestamp; false -> NewB#block.last_retarget == OldB#block.last_retarget end. %% @doc Verify the new weave size is computed correctly given the previous block %% and the list of transactions of the new block. verify_weave_size(NewB, OldB, TXs) -> BlockSize = lists:foldl( fun(TX, Acc) -> Acc + ar_tx:get_weave_size_increase(TX, NewB#block.height) end, 0, TXs ), (NewB#block.height < ar_fork:height_2_6() orelse BlockSize == NewB#block.block_size) andalso NewB#block.weave_size == OldB#block.weave_size + BlockSize. %% @doc Verify the new cumulative difficulty is computed correctly. verify_cumulative_diff(NewB, OldB) -> NewB#block.cumulative_diff == ar_difficulty:next_cumulative_diff( OldB#block.cumulative_diff, NewB#block.diff, NewB#block.height ). %% @doc Verify the root of the new block tree is computed correctly. verify_block_hash_list_merkle(NewB, CurrentB) -> true = NewB#block.height > ar_fork:height_2_0(), NewB#block.hash_list_merkle == ar_unbalanced_merkle:root(CurrentB#block.hash_list_merkle, {CurrentB#block.indep_hash, CurrentB#block.weave_size, CurrentB#block.tx_root}, fun ar_unbalanced_merkle:hash_block_index_entry/1). %% @doc Compute the root of the new block tree given the previous block. compute_hash_list_merkle(B) -> ar_unbalanced_merkle:root( B#block.hash_list_merkle, {B#block.indep_hash, B#block.weave_size, B#block.tx_root}, fun ar_unbalanced_merkle:hash_block_index_entry/1 ). %% @doc Compute "h0" - a cryptographic hash used as a source of entropy when choosing %% two recall ranges on the weave as unlocked by the given nonce limiter output. compute_h0(B, PrevB) -> #block{ nonce_limiter_info = NonceLimiterInfo, partition_number = PartitionNumber, reward_addr = MiningAddr, packing_difficulty = PackingDifficulty } = B, PrevNonceLimiterInfo = PrevB#block.nonce_limiter_info, Seed = PrevNonceLimiterInfo#nonce_limiter_info.seed, NonceLimiterOutput = NonceLimiterInfo#nonce_limiter_info.output, compute_h0(NonceLimiterOutput, PartitionNumber, Seed, MiningAddr, PackingDifficulty). compute_h0(NonceLimiterOutput, PartitionNumber, Seed, MiningAddr, PackingDifficulty) -> compute_h0(NonceLimiterOutput, PartitionNumber, Seed, MiningAddr, PackingDifficulty, ar_packing_server:get_packing_state()). %% @doc Compute "h0" - a cryptographic hash used as a source of entropy when choosing %% two recall ranges on the weave as unlocked by the given nonce limiter output. compute_h0(NonceLimiterOutput, PartitionNumber, Seed, MiningAddr, PackingDifficulty, PackingState) -> Preimage = case PackingDifficulty of 0 -> << NonceLimiterOutput:32/binary, PartitionNumber:256, Seed:32/binary, MiningAddr/binary >>; _ -> << NonceLimiterOutput:32/binary, PartitionNumber:256, Seed:32/binary, MiningAddr/binary, PackingDifficulty:8 >> end, RandomXState = ar_packing_server:get_randomx_state_for_h0(PackingDifficulty, PackingState), ar_mine_randomx:hash(RandomXState, Preimage). %% @doc Compute "h1" - a cryptographic hash which is either the hash of a solution not %% involving the second chunk or a carrier of the information about the first chunk %% used when computing the solution hash off the second chunk. compute_h1(H0, Nonce, Chunk) -> Preimage = crypto:hash(sha256, << H0:32/binary, Nonce:64, Chunk/binary >>), {compute_solution_h(H0, Preimage), Preimage}. %% @doc Compute "h2" - the hash of a solution involving the second chunk. compute_h2(H1, Chunk, H0) -> Preimage = crypto:hash(sha256, << H1:32/binary, Chunk/binary >>), {compute_solution_h(H0, Preimage), Preimage}. %% @doc Compute the solution hash from the preimage and H0. compute_solution_h(H0, Preimage) -> crypto:hash(sha256, << H0:32/binary, Preimage/binary >>). compute_next_vdf_difficulty(PrevB) -> Height = PrevB#block.height + 1, #nonce_limiter_info{ vdf_difficulty = VDFDifficulty, next_vdf_difficulty = NextVDFDifficulty } = PrevB#block.nonce_limiter_info, case ar_block_time_history:has_history(Height) of true -> case (Height rem ?VDF_DIFFICULTY_RETARGET == 0) andalso (VDFDifficulty == NextVDFDifficulty) of false -> NextVDFDifficulty; true -> case Height < ar_fork:height_2_7_1() of true -> HistoryPart = lists:nthtail(?VDF_HISTORY_CUT, ar_block_time_history:get_history(PrevB)), {IntervalTotal, VDFIntervalTotal} = lists:foldl( fun({BlockInterval, VDFInterval, _ChunkCount}, {Acc1, Acc2}) -> { Acc1 + BlockInterval, Acc2 + VDFInterval } end, {0, 0}, HistoryPart ), NewVDFDifficulty = (VDFIntervalTotal * VDFDifficulty) div IntervalTotal, ?LOG_DEBUG([{event, vdf_difficulty_retarget}, {height, Height}, {old_vdf_difficulty, VDFDifficulty}, {new_vdf_difficulty, NewVDFDifficulty}, {interval_total, IntervalTotal}, {vdf_interval_total, VDFIntervalTotal}]), NewVDFDifficulty; false -> HistoryPartCut1 = lists:nthtail(?VDF_HISTORY_CUT, ar_block_time_history:get_history(PrevB)), HistoryPart = lists:sublist(HistoryPartCut1, ?VDF_DIFFICULTY_RETARGET), {IntervalTotal, VDFIntervalTotal} = lists:foldl( fun({BlockInterval, VDFInterval, _ChunkCount}, {Acc1, Acc2}) -> { Acc1 + BlockInterval, Acc2 + VDFInterval } end, {0, 0}, HistoryPart ), NewVDFDifficulty = (VDFIntervalTotal * VDFDifficulty) div IntervalTotal, EMAVDFDifficulty = (9*VDFDifficulty + NewVDFDifficulty) div 10, ?LOG_DEBUG([{event, vdf_difficulty_retarget}, {height, Height}, {old_vdf_difficulty, VDFDifficulty}, {new_vdf_difficulty, NewVDFDifficulty}, {ema_vdf_difficulty, EMAVDFDifficulty}, {interval_total, IntervalTotal}, {vdf_interval_total, VDFIntervalTotal}]), EMAVDFDifficulty end end; false -> ?VDF_DIFFICULTY end. validate_proof_size(PoA) -> byte_size(PoA#poa.tx_path) =< ?MAX_TX_PATH_SIZE andalso byte_size(PoA#poa.data_path) =< ?MAX_DATA_PATH_SIZE andalso byte_size(PoA#poa.chunk) =< ?DATA_CHUNK_SIZE andalso byte_size(PoA#poa.unpacked_chunk) =< ?DATA_CHUNK_SIZE. %% @doc Compute the block identifier (also referred to as "independent hash"). indep_hash(B) -> case B#block.height >= ar_fork:height_2_6() of true -> H = ar_block:generate_signed_hash(B), indep_hash2(H, B#block.signature); false -> BDS = ar_block:generate_block_data_segment(B), indep_hash(BDS, B) end. %% @doc Compute the hash signed by the block producer. generate_signed_hash(#block{ previous_block = PrevH, timestamp = TS, nonce = Nonce, height = Height, diff = Diff, cumulative_diff = CDiff, last_retarget = LastRetarget, hash = Hash, block_size = BlockSize, weave_size = WeaveSize, tx_root = TXRoot, wallet_list = WalletList, hash_list_merkle = HashListMerkle, reward_pool = RewardPool, packing_2_5_threshold = Packing_2_5_Threshold, reward_addr = Addr, reward_key = RewardKey, strict_data_split_threshold = StrictChunkThreshold, usd_to_ar_rate = {RateDividend, RateDivisor}, scheduled_usd_to_ar_rate = {ScheduledRateDividend, ScheduledRateDivisor}, tags = Tags, txs = TXs, reward = Reward, hash_preimage = HashPreimage, recall_byte = RecallByte, partition_number = PartitionNumber, recall_byte2 = RecallByte2, nonce_limiter_info = NonceLimiterInfo, previous_solution_hash = PreviousSolutionHash, price_per_gib_minute = PricePerGiBMinute, scheduled_price_per_gib_minute = ScheduledPricePerGiBMinute, reward_history_hash = RewardHistoryHash, block_time_history_hash = BlockTimeHistoryHash, debt_supply = DebtSupply, kryder_plus_rate_multiplier = KryderPlusRateMultiplier, kryder_plus_rate_multiplier_latch = KryderPlusRateMultiplierLatch, denomination = Denomination, redenomination_height = RedenominationHeight, double_signing_proof = DoubleSigningProof, previous_cumulative_diff = PrevCDiff, merkle_rebase_support_threshold = RebaseThreshold, poa = #poa{ data_path = DataPath, tx_path = TXPath }, poa2 = #poa{ data_path = DataPath2, tx_path = TXPath2 }, chunk_hash = ChunkHash, chunk2_hash = Chunk2Hash, packing_difficulty = PackingDifficulty, unpacked_chunk_hash = UnpackedChunkHash, unpacked_chunk2_hash = UnpackedChunk2Hash, replica_format = ReplicaFormat }) -> GetTXID = fun(TXID) when is_binary(TXID) -> TXID; (TX) -> TX#tx.id end, Nonce2 = binary:encode_unsigned(Nonce), %% The only block where reward_address may be unclaimed %% is the genesis block of a new weave. Addr2 = case Addr of unclaimed -> <<>>; _ -> Addr end, RewardKey2 = case RewardKey of undefined -> undefined; {_Type, Pub} -> Pub end, #nonce_limiter_info{ output = Output, global_step_number = N, seed = Seed, next_seed = NextSeed, partition_upper_bound = PartitionUpperBound, next_partition_upper_bound = NextPartitionUpperBound, steps = Steps, prev_output = PrevOutput, last_step_checkpoints = LastStepCheckpoints, vdf_difficulty = VDFDifficulty, next_vdf_difficulty = NextVDFDifficulty } = NonceLimiterInfo, {RebaseThresholdBin, DataPathBin, TXPathBin, DataPath2Bin, TXPath2Bin, ChunkHashBin, Chunk2HashBin, BlockTimeHistoryHashBin, VDFDifficultyBin, NextVDFDifficultyBin} = case Height >= ar_fork:height_2_7() of true -> {encode_int(RebaseThreshold, 16), ar_serialize:encode_bin(DataPath, 24), ar_serialize:encode_bin(TXPath, 24), ar_serialize:encode_bin(DataPath2, 24), ar_serialize:encode_bin(TXPath2, 24), << ChunkHash:32/binary >>, ar_serialize:encode_bin(Chunk2Hash, 8), << BlockTimeHistoryHash:32/binary >>, ar_serialize:encode_int(VDFDifficulty, 8), ar_serialize:encode_int(NextVDFDifficulty, 8)}; false -> {<<>>, <<>>, <<>>, <<>>, <<>>, <<>>, <<>>, <<>>, <<>>, <<>>} end, {PackingDifficultyBin, UnpackedChunkHashBin, UnpackedChunk2HashBin} = case Height >= ar_fork:height_2_8() of true -> {<< PackingDifficulty:8 >>, ar_serialize:encode_bin(UnpackedChunkHash, 8), ar_serialize:encode_bin(UnpackedChunk2Hash, 8)}; false -> {<<>>, <<>>, <<>>} end, ReplicaFormatBin = case Height >= ar_fork:height_2_9() of true -> << ReplicaFormat:8 >>; false -> <<>> end, %% The elements must be either fixed-size or separated by the size separators ( %% the ar_serialize:encode_* functions). Segment = << (encode_bin(PrevH, 8))/binary, (encode_int(TS, 8))/binary, (encode_bin(Nonce2, 16))/binary, (encode_int(Height, 8))/binary, (encode_int(Diff, 16))/binary, (encode_int(CDiff, 16))/binary, (encode_int(LastRetarget, 8))/binary, (encode_bin(Hash, 8))/binary, (encode_int(BlockSize, 16))/binary, (encode_int(WeaveSize, 16))/binary, (encode_bin(Addr2, 8))/binary, (encode_bin(TXRoot, 8))/binary, (encode_bin(WalletList, 8))/binary, (encode_bin(HashListMerkle, 8))/binary, (encode_int(RewardPool, 8))/binary, (encode_int(Packing_2_5_Threshold, 8))/binary, (encode_int(StrictChunkThreshold, 8))/binary, (encode_int(RateDividend, 8))/binary, (encode_int(RateDivisor, 8))/binary, (encode_int(ScheduledRateDividend, 8))/binary, (encode_int(ScheduledRateDivisor, 8))/binary, (encode_bin_list(Tags, 16, 16))/binary, (encode_bin_list([GetTXID(TX) || TX <- TXs], 16, 8))/binary, (encode_int(Reward, 8))/binary, (encode_int(RecallByte, 16))/binary, (encode_bin(HashPreimage, 8))/binary, (encode_int(RecallByte2, 16))/binary, (encode_bin(RewardKey2, 16))/binary, (encode_int(PartitionNumber, 8))/binary, Output:32/binary, N:64, Seed:48/binary, NextSeed:48/binary, PartitionUpperBound:256, NextPartitionUpperBound:256, (encode_bin(PrevOutput, 8))/binary, (length(Steps)):16, (iolist_to_binary(Steps))/binary, (length(LastStepCheckpoints)):16, (iolist_to_binary(LastStepCheckpoints))/binary, (encode_bin(PreviousSolutionHash, 8))/binary, (encode_int(PricePerGiBMinute, 8))/binary, (encode_int(ScheduledPricePerGiBMinute, 8))/binary, RewardHistoryHash:32/binary, (encode_int(DebtSupply, 8))/binary, KryderPlusRateMultiplier:24, KryderPlusRateMultiplierLatch:8, Denomination:24, (encode_int(RedenominationHeight, 8))/binary, (ar_serialize:encode_double_signing_proof(DoubleSigningProof, Height))/binary, (encode_int(PrevCDiff, 16))/binary, RebaseThresholdBin/binary, DataPathBin/binary, TXPathBin/binary, DataPath2Bin/binary, TXPath2Bin/binary, ChunkHashBin/binary, Chunk2HashBin/binary, BlockTimeHistoryHashBin/binary, VDFDifficultyBin/binary, NextVDFDifficultyBin/binary, PackingDifficultyBin/binary, UnpackedChunkHashBin/binary, UnpackedChunk2HashBin/binary, ReplicaFormatBin/binary >>, crypto:hash(sha256, Segment). %% @doc Compute the block identifier from the signed hash and block signature. indep_hash2(SignedH, Signature) -> crypto:hash(sha384, << SignedH:32/binary, Signature/binary >>). %% @doc Compute the block identifier of a pre-2.6 block. indep_hash(BDS, B) -> case B#block.height >= ar_fork:height_2_4() of true -> ar_deep_hash:hash([BDS, B#block.hash, B#block.nonce, ar_block:poa_to_list(B#block.poa)]); false -> ar_deep_hash:hash([BDS, B#block.hash, B#block.nonce]) end. %% @doc Return the signed block signature preimage. get_block_signature_preimage(CDiff, PrevCDiff, Preimage, Height) -> EncodedCDiff = ar_serialize:encode_int(CDiff, 16), EncodedPrevCDiff = ar_serialize:encode_int(PrevCDiff, 16), SignaturePreimage = << EncodedCDiff/binary, EncodedPrevCDiff/binary, Preimage/binary >>, case Height >= ar_fork:height_2_9() of false -> SignaturePreimage; true -> << 0:(32 * 8), SignaturePreimage/binary >> end. %% @doc Verify the block signature. verify_signature(BlockPreimage, PrevCDiff, #block{ signature = Signature, reward_key = {?RSA_KEY_TYPE, Pub} = RewardKey, reward_addr = RewardAddr, previous_solution_hash = PrevSolutionH, cumulative_diff = CDiff, height = Height }) when byte_size(Signature) == ?RSA_BLOCK_SIG_SIZE, byte_size(Pub) == ?RSA_BLOCK_SIG_SIZE -> SignaturePreimage = get_block_signature_preimage(CDiff, PrevCDiff, << PrevSolutionH/binary, BlockPreimage/binary >>, Height), ar_wallet:to_address(RewardKey) == RewardAddr andalso ar_wallet:verify(RewardKey, SignaturePreimage, Signature); verify_signature(BlockPreimage, PrevCDiff, #block{ signature = Signature, reward_key = {?ECDSA_KEY_TYPE, Pub} = RewardKey, reward_addr = RewardAddr, previous_solution_hash = PrevSolutionH, cumulative_diff = CDiff, height = Height }) when byte_size(Signature) == ?ECDSA_SIG_SIZE, byte_size(Pub) == ?ECDSA_PUB_KEY_SIZE -> SignaturePreimage = get_block_signature_preimage(CDiff, PrevCDiff, << PrevSolutionH/binary, BlockPreimage/binary >>, Height), case Height >= ar_fork:height_2_9() of true -> ar_wallet:to_address(RewardKey) == RewardAddr andalso ar_wallet:verify(RewardKey, SignaturePreimage, Signature); false -> false end; verify_signature(_BlockPreimage, _PrevCDiff, _B) -> false. %% @doc Return the key suitable for ar_wallet:sign/3 from the given public key. get_reward_key(Pub, Height) -> case Height >= ar_fork:height_2_9() of false -> {?DEFAULT_KEY_TYPE, Pub}; true -> case byte_size(Pub) of ?ECDSA_PUB_KEY_SIZE -> {?ECDSA_KEY_TYPE, Pub}; _ -> {?RSA_KEY_TYPE, Pub} end end. %% @doc Generate a block data segment for a pre-2.6 block. It is combined with a nonce %% when computing a solution candidate. generate_block_data_segment(B) -> generate_block_data_segment(generate_block_data_segment_base(B), B). %% @doc Generate a pre-2.6 block data segment given the computed "base". generate_block_data_segment(BDSBase, B) -> Props = [ BDSBase, integer_to_binary(B#block.timestamp), integer_to_binary(B#block.last_retarget), integer_to_binary(B#block.diff), integer_to_binary(B#block.cumulative_diff), integer_to_binary(B#block.reward_pool), B#block.wallet_list, B#block.hash_list_merkle ], ar_deep_hash:hash(Props). %% @doc Generate a hash, which is used to produce a block data segment %% when combined with the time-dependent parameters, which frequently %% change during mining - timestamp, last retarget timestamp, difficulty, %% cumulative difficulty, (before the fork 2.4, also miner's wallet, reward pool). %% Also excludes the merkle root of the block index, which is hashed with the rest %% as the last step - it was used before the fork 2.4 to allow verifiers to quickly %% validate PoW against the current state. After the fork 2.4, the hash of the %% previous block prefixes the solution hash preimage of the new block. generate_block_data_segment_base(B) -> GetTXID = fun(TXID) when is_binary(TXID) -> TXID; (TX) -> TX#tx.id end, case B#block.height >= ar_fork:height_2_4() of true -> Props = [ integer_to_binary(B#block.height), B#block.previous_block, B#block.tx_root, lists:map(GetTXID, B#block.txs), integer_to_binary(B#block.block_size), integer_to_binary(B#block.weave_size), case B#block.reward_addr of unclaimed -> <<"unclaimed">>; _ -> B#block.reward_addr end, encode_tags(B) ], Props2 = case B#block.height >= ar_fork:height_2_5() of true -> {RateDividend, RateDivisor} = B#block.usd_to_ar_rate, {ScheduledRateDividend, ScheduledRateDivisor} = B#block.scheduled_usd_to_ar_rate, [ integer_to_binary(RateDividend), integer_to_binary(RateDivisor), integer_to_binary(ScheduledRateDividend), integer_to_binary(ScheduledRateDivisor), integer_to_binary(B#block.packing_2_5_threshold), integer_to_binary(B#block.strict_data_split_threshold) | Props ]; false -> Props end, ar_deep_hash:hash(Props2); false -> ar_deep_hash:hash([ integer_to_binary(B#block.height), B#block.previous_block, B#block.tx_root, lists:map(GetTXID, B#block.txs), integer_to_binary(B#block.block_size), integer_to_binary(B#block.weave_size), case B#block.reward_addr of unclaimed -> <<"unclaimed">>; _ -> B#block.reward_addr end, encode_tags(B), poa_to_list(B#block.poa) ]) end. %% @doc Return {RecallRange1Start, RecallRange2Start} - the start offsets %% of the two recall ranges. -ifdef(LOCALNET). get_recall_range(H0, PartitionNumber, PartitionUpperBound, not_set, not_set) -> RecallRange1Offset = binary:decode_unsigned(binary:part(H0, 0, 8), big), RecallRange1Start = PartitionNumber * ar_block:partition_size() + RecallRange1Offset rem min(ar_block:partition_size(), PartitionUpperBound), RecallRange2Start = binary:decode_unsigned(H0, big) rem PartitionUpperBound, {RecallRange1Start, RecallRange2Start}; %% In LOCALNET mode, RecallRange1 and RecallRange2 are passed through directly. %% In normal mode, they are computed from H0 and PartitionNumber. get_recall_range(_H0, _PartitionNumber, _PartitionUpperBound, RecallRange1, RecallRange2) -> {RecallRange1, RecallRange2}. -else. get_recall_range(H0, PartitionNumber, PartitionUpperBound, _RecallRange1, _RecallRange2) -> RecallRange1Offset = binary:decode_unsigned(binary:part(H0, 0, 8), big), RecallRange1Start = PartitionNumber * ar_block:partition_size() + RecallRange1Offset rem min(ar_block:partition_size(), PartitionUpperBound), RecallRange2Start = binary:decode_unsigned(H0, big) rem PartitionUpperBound, {RecallRange1Start, RecallRange2Start}. -endif. %% @doc Compatibility version for 3 arguments. get_recall_range(H0, PartitionNumber, PartitionUpperBound) -> get_recall_range(H0, PartitionNumber, PartitionUpperBound, not_set, not_set). vdf_step_number(#block{ nonce_limiter_info = Info }) -> Info#nonce_limiter_info.global_step_number. get_packing(PackingDifficulty, MiningAddress, 0) -> case PackingDifficulty >= 1 of true -> {composite, MiningAddress, PackingDifficulty}; false -> {spora_2_6, MiningAddress} end; get_packing(_PackingDifficulty, MiningAddress, 1) -> {replica_2_9, MiningAddress}. validate_replica_format(Height, PackingDifficulty, 1) -> Height >= ar_fork:height_2_9() andalso PackingDifficulty == ?REPLICA_2_9_PACKING_DIFFICULTY; validate_replica_format(Height, 0, 0) -> %% Support for spora_2_6 discontinued at %% ar_fork:height_2_8() + ?SPORA_PACKING_EXPIRATION_PERIOD_BLOCKS. Height - ?SPORA_PACKING_EXPIRATION_PERIOD_BLOCKS < ar_fork:height_2_8(); validate_replica_format(Height, CompositePackingDifficulty, 0) -> case Height - ?COMPOSITE_PACKING_EXPIRATION_PERIOD_BLOCKS < ar_fork:height_2_9() of true -> %% Composite is still supported - difficulty 1 through 32 Height >= ar_fork:height_2_8() andalso CompositePackingDifficulty =< ?MAX_PACKING_DIFFICULTY; false -> %% Composite packing is no longer supported. false end; validate_replica_format(_, _, _) -> false. get_recall_range_size(0) -> ?LEGACY_RECALL_RANGE_SIZE; get_recall_range_size(PackingDifficulty) -> ?RECALL_RANGE_SIZE div PackingDifficulty. get_recall_byte(RecallRangeStart, Nonce, 0) -> RecallRangeStart + Nonce * ?DATA_CHUNK_SIZE; get_recall_byte(RecallRangeStart, Nonce, _PackingDifficulty) -> ChunkNumber = Nonce div ?COMPOSITE_PACKING_SUB_CHUNK_COUNT, RecallRangeStart + ChunkNumber * ?DATA_CHUNK_SIZE. %% @doc Return the number of bytes per sub-chunk. This also drives how far each mining nonce %% increments the recall byte. get_sub_chunk_size(0) -> ?DATA_CHUNK_SIZE; get_sub_chunk_size(_PackingDifficulty) -> ?COMPOSITE_PACKING_SUB_CHUNK_SIZE. %% @doc Return the number of mining nonces contained in each data chunk. get_nonces_per_chunk(0) -> 1; get_nonces_per_chunk(_PackingDifficulty) -> ?COMPOSITE_PACKING_SUB_CHUNK_COUNT. get_nonces_per_recall_range(PackingDifficulty) -> max(1, get_recall_range_size(PackingDifficulty) div get_sub_chunk_size(PackingDifficulty)). %% @doc For packing difficulty 0 (aka spora_2_6 packing), there is one nonce per chunk, so %% the max nonce is the same as the max chunk number. For packing difficulty >= 1 (aka %% composite packing and the 2.9 replication), there are ?COMPOSITE_PACKING_SUB_CHUNK_COUNT %% nonces per chunk. get_max_nonce(PackingDifficulty) -> %% The max(...) is included mostly for testing, where the recall range can be less than %% a chunk. max(get_nonces_per_chunk(PackingDifficulty) - 1, get_nonces_per_recall_range(PackingDifficulty) - 1). %% @doc Return the 0-based sub-chunk index the mining nonce is pointing to. get_sub_chunk_index(0, _Nonce) -> -1; get_sub_chunk_index(_PackingDifficulty, Nonce) -> Nonce rem ?COMPOSITE_PACKING_SUB_CHUNK_COUNT. %% @doc Return Offset if it is smaller than or equal to ar_block:strict_data_split_threshold(). %% Otherwise, return the offset of the last byte of the chunk + the size of the padding. -spec get_chunk_padded_offset(Offset :: non_neg_integer()) -> non_neg_integer(). get_chunk_padded_offset(Offset) -> case Offset > ar_block:strict_data_split_threshold() of true -> ar_poa:get_padded_offset(Offset, ar_block:strict_data_split_threshold()); false -> Offset end. %% @doc Return true if the given cumulative difficulty - previous cumulative difficulty %% pairs satisfy the double signing condition. -spec get_double_signing_condition( CDiff1 :: non_neg_integer(), PrevCDiff1 :: non_neg_integer(), CDiff2 :: non_neg_integer(), PrevCDiff2 :: non_neg_integer() ) -> boolean(). get_double_signing_condition(CDiff1, PrevCDiff1, CDiff2, PrevCDiff2) -> CDiff1 == CDiff2 orelse (CDiff1 > PrevCDiff2 andalso CDiff2 > PrevCDiff1). %%%=================================================================== %%% Private functions. %%%=================================================================== validate_tags_size(B) -> case B#block.height >= ar_fork:height_2_5() of true -> Tags = B#block.tags, validate_tags_length(Tags, 0) andalso byte_size(list_to_binary(Tags)) =< 2048; false -> byte_size(list_to_binary(B#block.tags)) =< 2048 end. validate_tags_length(_, N) when N > 2048 -> false; validate_tags_length([_ | Tags], N) -> validate_tags_length(Tags, N + 1); validate_tags_length([], _) -> true. encode_int(N, S) -> ar_serialize:encode_int(N, S). encode_bin(N, S) -> ar_serialize:encode_bin(N, S). encode_bin_list(L, LS, ES) -> ar_serialize:encode_bin_list(L, LS, ES). hash_wallet_list(WalletList) -> ar_patricia_tree:compute_hash(WalletList, fun (Addr, {Balance, LastTX}) -> EncodedBalance = binary:encode_unsigned(Balance), ar_deep_hash:hash([Addr, EncodedBalance, LastTX]); (Addr, {Balance, LastTX, Denomination, MiningPermission}) -> MiningPermissionBin = case MiningPermission of true -> <<1>>; false -> <<0>> end, Preimage = << (ar_serialize:encode_bin(Addr, 8))/binary, (ar_serialize:encode_int(Balance, 8))/binary, (ar_serialize:encode_bin(LastTX, 8))/binary, (ar_serialize:encode_int(Denomination, 8))/binary, MiningPermissionBin/binary >>, crypto:hash(sha384, Preimage) end ). %% @doc Generate the TX tree and set the TX root for a block. generate_tx_tree(B) -> SizeTaggedTXs = generate_size_tagged_list_from_txs(B#block.txs, B#block.height), SizeTaggedDataRoots = [{Root, Offset} || {{_, Root}, Offset} <- SizeTaggedTXs], generate_tx_tree(B, SizeTaggedDataRoots). generate_tx_tree(B, SizeTaggedDataRoots) -> {Root, Tree} = ar_merkle:generate_tree(SizeTaggedDataRoots), B#block{ tx_tree = Tree, tx_root = Root }. generate_size_tagged_list_from_txs(TXs, Height) -> lists:reverse( element(2, lists:foldl( fun(TX, {Pos, List}) -> DataSize = TX#tx.data_size, End = Pos + DataSize, case Height >= ar_fork:height_2_5() of true -> Padding = ar_tx:get_weave_size_increase(DataSize, Height) - DataSize, %% Encode the padding information in the Merkle tree. case Padding > 0 of true -> PaddingRoot = ?PADDING_NODE_DATA_ROOT, {End + Padding, [{{padding, PaddingRoot}, End + Padding}, {{TX#tx.id, get_tx_data_root(TX)}, End} | List]}; false -> {End, [{{TX#tx.id, get_tx_data_root(TX)}, End} | List]} end; false -> {End, [{{TX#tx.id, get_tx_data_root(TX)}, End} | List]} end end, {0, []}, lists:sort(TXs) ) ) ). %% @doc Find the appropriate block hash list for a block, from a block index. generate_hash_list_for_block(_BlockOrHash, []) -> []; generate_hash_list_for_block(B, BI) when ?IS_BLOCK(B) -> generate_hash_list_for_block(B#block.indep_hash, BI); generate_hash_list_for_block(Hash, BI) -> do_generate_hash_list_for_block(Hash, BI). do_generate_hash_list_for_block(_, []) -> error(cannot_generate_hash_list); do_generate_hash_list_for_block(IndepHash, [{IndepHash, _, _} | BI]) -> ?BI_TO_BHL(BI); do_generate_hash_list_for_block(IndepHash, [_ | Rest]) -> do_generate_hash_list_for_block(IndepHash, Rest). encode_tags(B) -> case B#block.height >= ar_fork:height_2_5() of true -> B#block.tags; false -> ar_tx:tags_to_list(B#block.tags) end. poa_to_list(POA) -> [ integer_to_binary(POA#poa.option), POA#poa.tx_path, POA#poa.data_path, POA#poa.chunk ]. %% @doc Compute the 2.5 packing threshold. get_packing_threshold(B, SearchSpaceUpperBound) -> #block{ height = Height, packing_2_5_threshold = PrevPackingThreshold } = B, Fork_2_5 = ar_fork:height_2_5(), case Height + 1 == Fork_2_5 of true -> SearchSpaceUpperBound; false -> case Height + 1 > Fork_2_5 of true -> ar_block:shift_packing_2_5_threshold(PrevPackingThreshold); false -> undefined end end. %% @doc Move the fork 2.5 packing threshold. shift_packing_2_5_threshold(0) -> 0; shift_packing_2_5_threshold(Threshold) -> TargetTime = ar_testnet:target_block_time(ar_fork:height_2_5()), Shift = (?DATA_CHUNK_SIZE) * (?PACKING_2_5_THRESHOLD_CHUNKS_PER_SECOND) * TargetTime, max(0, Threshold - Shift). verify_tx_root(B) -> B#block.tx_root == generate_tx_root_for_block(B). %% @doc Given a list of TXs in various formats, or a block, generate the %% correct TX merkle tree root. generate_tx_root_for_block(B) when is_record(B, block) -> generate_tx_root_for_block(B#block.txs, B#block.height). generate_tx_root_for_block(TXIDs = [TXID | _], Height) when is_binary(TXID) -> generate_tx_root_for_block(ar_storage:read_tx(TXIDs), Height); generate_tx_root_for_block([], _Height) -> <<>>; generate_tx_root_for_block(TXs = [TX | _], Height) when is_record(TX, tx) -> SizeTaggedTXs = generate_size_tagged_list_from_txs(TXs, Height), SizeTaggedDataRoots = [{Root, Offset} || {{_, Root}, Offset} <- SizeTaggedTXs], {Root, _Tree} = ar_merkle:generate_tree(SizeTaggedDataRoots), Root. get_tx_data_root(#tx{ format = 2, data_root = DataRoot }) -> DataRoot; get_tx_data_root(TX) -> (ar_tx:generate_chunk_tree(TX))#tx.data_root. %%%=================================================================== %%% Tests. %%%=================================================================== hash_list_gen_test_() -> {timeout, 120, fun test_hash_list_gen/0}. test_hash_list_gen() -> [B0] = ar_weave:init(), ar_test_node:start(B0), ar_test_node:mine(), BI1 = ar_test_node:wait_until_height(main, 1), B1 = ar_storage:read_block(hd(BI1)), ar_test_node:mine(), BI2 = ar_test_node:wait_until_height(main, 2), B2 = ar_storage:read_block(hd(BI2)), ?assertEqual([B0#block.indep_hash], generate_hash_list_for_block(B1, BI2)), ?assertEqual([H || {H, _, _} <- BI1], generate_hash_list_for_block(B2#block.indep_hash, BI2)). generate_size_tagged_list_from_txs_test() -> Fork_2_5 = ar_fork:height_2_5(), ?assertEqual([], generate_size_tagged_list_from_txs([], Fork_2_5)), ?assertEqual([], generate_size_tagged_list_from_txs([], Fork_2_5 - 1)), EmptyV1Root = (ar_tx:generate_chunk_tree(#tx{}))#tx.data_root, ?assertEqual([{{<<>>, EmptyV1Root}, 0}], generate_size_tagged_list_from_txs([#tx{}], Fork_2_5)), ?assertEqual([{{<<>>, <<>>}, 0}], generate_size_tagged_list_from_txs([#tx{ format = 2 }], Fork_2_5)), ?assertEqual([{{<<>>, <<>>}, 0}], generate_size_tagged_list_from_txs([#tx{ format = 2}], Fork_2_5 - 1)), ?assertEqual([{{<<>>, <<"r">>}, 1}, {{padding, <<>>}, 262144}], generate_size_tagged_list_from_txs([#tx{ format = 2, data_root = <<"r">>, data_size = 1 }], Fork_2_5)), ?assertEqual([ {{<<"1">>, <<"r">>}, 1}, {{padding, <<>>}, 262144}, {{<<"2">>, <<>>}, 262144}, {{<<"3">>, <<>>}, 262144 * 5}, {{<<"4">>, <<>>}, 262144 * 5}, {{<<"5">>, <<>>}, 262144 * 5}, {{<<"6">>, <<>>}, 262144 * 6}], generate_size_tagged_list_from_txs([ #tx{ id = <<"1">>, format = 2, data_root = <<"r">>, data_size = 1 }, #tx{ id = <<"2">>, format = 2 }, #tx{ id = <<"3">>, format = 2, data_size = 262144 * 4 }, #tx{ id = <<"4">>, format = 2 }, #tx{ id = <<"5">>, format = 2 }, #tx{ id = <<"6">>, format = 2, data_size = 262144 }], Fork_2_5)). test_wallet_list_performance() -> test_wallet_list_performance(250_000, ar_deep_hash, mixed). test_wallet_list_performance(Length) -> test_wallet_list_performance(Length, ar_deep_hash, mixed). test_wallet_list_performance(Length, Algo) -> test_wallet_list_performance(Length, Algo, mixed). test_wallet_list_performance(Length, Algo, Denominations) -> SupportedAlgos = [ar_deep_hash, no_ar_deep_hash_sha384, sha256], case lists:member(Algo, SupportedAlgos) of false -> io:format("Supported Algo: ~p~n", [SupportedAlgos]); true -> SupportedDenominations = [old, new, mixed], case lists:member(Denominations, SupportedDenominations) of false -> io:format("Supported Algo: ~p~n", [SupportedDenominations]); true -> test_wallet_list_performance2(Length, Algo, Denominations) end end. test_wallet_list_performance2(Length, Algo, Denominations) -> io:format("# ~B wallets, denominations: ~p, algo: ~p~n", [Length, Denominations, Algo]), io:format("============~n"), WL = [random_wallet() || _ <- lists:seq(1, Length)], {Time1, T1} = timer:tc( fun() -> lists:foldl( fun({A, B, LastTX}, Acc) -> case Denominations of old -> ar_patricia_tree:insert(A, {B, LastTX}, Acc); new -> ar_patricia_tree:insert(A, {B, LastTX, 1 + rand:uniform(10), true}, Acc); mixed -> case rand:uniform(2) == 1 of true -> ar_patricia_tree:insert(A, {B, LastTX}, Acc); false -> ar_patricia_tree:insert(A, {B, LastTX, 1 + rand:uniform(10), true}, Acc) end end end, ar_patricia_tree:new(), WL ) end ), io:format("tree buildup | ~f seconds~n", [Time1 / 1000000]), {Time2, Binary} = timer:tc( fun() -> ar_serialize:jsonify( ar_serialize:wallet_list_to_json_struct(unclaimed, false, T1) ) end ), io:format("serialization | ~f seconds~n", [Time2 / 1000000]), io:format(" | ~B bytes~n", [byte_size(Binary)]), ComputeHashFun = fun (Addr, {Balance, LastTX}) -> case Algo of ar_deep_hash -> EncodedBalance = binary:encode_unsigned(Balance), ar_deep_hash:hash([Addr, EncodedBalance, LastTX]); _ -> Denomination = 0, MiningPermissionBin = <<1>>, Preimage = << (ar_serialize:encode_bin(Addr, 8))/binary, (ar_serialize:encode_int(Balance, 8))/binary, (ar_serialize:encode_bin(LastTX, 8))/binary, (ar_serialize:encode_int(Denomination, 8))/binary, MiningPermissionBin/binary >>, case Algo of no_ar_deep_hash_sha384 -> crypto:hash(sha384, Preimage); sha256 -> crypto:hash(sha256, Preimage) end end; (Addr, {Balance, LastTX, Denomination, MiningPermission}) -> MiningPermissionBin = case MiningPermission of true -> <<1>>; false -> <<0>> end, Preimage = << (ar_serialize:encode_bin(Addr, 8))/binary, (ar_serialize:encode_int(Balance, 8))/binary, (ar_serialize:encode_bin(LastTX, 8))/binary, (ar_serialize:encode_int(Denomination, 8))/binary, MiningPermissionBin/binary >>, case Algo of sha256 -> crypto:hash(sha256, Preimage); _ -> crypto:hash(sha384, Preimage) end end, {Time3, {_, T2, _}} = timer:tc(fun() -> ar_patricia_tree:compute_hash(T1, ComputeHashFun) end), io:format("root hash from scratch | ~f seconds~n", [Time3 / 1000000]), {Time4, T3} = timer:tc( fun() -> lists:foldl( fun({A, B, LastTX}, Acc) -> ar_patricia_tree:insert(A, {B, LastTX}, Acc) end, T2, [random_wallet() || _ <- lists:seq(1, 2000)] ) end ), io:format("2000 inserts | ~f seconds~n", [Time4 / 1000000]), {Time5, _} = timer:tc(fun() -> ar_patricia_tree:compute_hash(T3, ComputeHashFun) end), io:format("recompute hash after 2k inserts | ~f seconds~n", [Time5 / 1000000]), {Time6, T4} = timer:tc( fun() -> {A, B, LastTX} = random_wallet(), ar_patricia_tree:insert(A, {B, LastTX}, T2) end ), io:format("1 insert | ~f seconds~n", [Time6 / 1000000]), {Time7, _} = timer:tc(fun() -> ar_patricia_tree:compute_hash(T4, ComputeHashFun) end), io:format("recompute hash after 1 insert | ~f seconds~n", [Time7 / 1000000]). random_wallet() -> { crypto:strong_rand_bytes(32), rand:uniform(1000000000000000000), crypto:strong_rand_bytes(32) }. validate_replica_format_test_() -> [ ar_test_node:test_with_mocked_functions([ {ar_fork, height_2_8, fun() -> 10 end}, {ar_fork, height_2_9, fun() -> 20 end} ], fun test_validate_replica_format/0, 30) ]. test_validate_replica_format() -> %% pre 2.8, only spora_2_6 is supported ?assertEqual(true, validate_replica_format(0, 0, 0)), ?assertEqual(false, validate_replica_format(0, 1, 0)), ?assertEqual(false, validate_replica_format(0, 33, 0)), ?assertEqual(false, validate_replica_format(0, 25, 0)), ?assertEqual(false, validate_replica_format(0, 0, 1)), ?assertEqual(false, validate_replica_format(0, 1, 1)), ?assertEqual(false, validate_replica_format(0, 33, 1)), ?assertEqual(false, validate_replica_format(0, 25, 1)), %% post-2.8, pre-2.9, spora_2_6 and composite are supported ?assertEqual(true, validate_replica_format(15, 0, 0)), ?assertEqual(true, validate_replica_format(15, 1, 0)), ?assertEqual(false, validate_replica_format(15, 33, 0)), ?assertEqual(false, validate_replica_format(15, 100, 0)), ?assertEqual(false, validate_replica_format(15, 0, 1)), ?assertEqual(false, validate_replica_format(15, 1, 1)), ?assertEqual(false, validate_replica_format(15, 33, 1)), ?assertEqual(false, validate_replica_format(15, 25, 1)), %% post-2.9, pre-composite expiration ?assertEqual(true, validate_replica_format(25, 0, 0)), ?assertEqual(true, validate_replica_format(25, 1, 0)), ?assertEqual(false, validate_replica_format(25, 33, 0)), ?assertEqual(false, validate_replica_format(25, 100, 0)), ?assertEqual(false, validate_replica_format(25, 0, 1)), ?assertEqual(false, validate_replica_format(25, 1, 1)), ?assertEqual(false, validate_replica_format(25, 33, 1)), ?assertEqual(true, validate_replica_format(25, 2, 1)), %% 2 in tests. %% post-2.9, post-composite expiration CompositeExpiration = ar_fork:height_2_9() + ?COMPOSITE_PACKING_EXPIRATION_PERIOD_BLOCKS, ?assertEqual(true, validate_replica_format(CompositeExpiration, 0, 0)), ?assertEqual(false, validate_replica_format(CompositeExpiration, 1, 0)), ?assertEqual(false, validate_replica_format(CompositeExpiration, 33, 0)), ?assertEqual(false, validate_replica_format(CompositeExpiration, 25, 0)), ?assertEqual(false, validate_replica_format(CompositeExpiration, 0, 1)), ?assertEqual(false, validate_replica_format(CompositeExpiration, 1, 1)), ?assertEqual(false, validate_replica_format(CompositeExpiration, 33, 1)), ?assertEqual(true, validate_replica_format(CompositeExpiration, 2, 1)), %% post-2.9, post-spora expiration SporaExpiration = ar_fork:height_2_8() + ?SPORA_PACKING_EXPIRATION_PERIOD_BLOCKS, ?assertEqual(false, validate_replica_format(SporaExpiration, 0, 0)), ?assertEqual(false, validate_replica_format(SporaExpiration, 1, 0)), ?assertEqual(false, validate_replica_format(SporaExpiration, 33, 0)), ?assertEqual(false, validate_replica_format(SporaExpiration, 25, 0)), ?assertEqual(false, validate_replica_format(SporaExpiration, 0, 1)), ?assertEqual(false, validate_replica_format(SporaExpiration, 1, 1)), ?assertEqual(false, validate_replica_format(SporaExpiration, 33, 1)), ?assertEqual(true, validate_replica_format(SporaExpiration, 2, 1)). ================================================ FILE: apps/arweave/src/ar_block_cache.erl ================================================ %%% @doc The module maintains a DAG of blocks that have passed the PoW validation, in ETS. %%% NOTE It is not safe to call functions which modify the state from different processes. -module(ar_block_cache). -export([new/2, initialize_from_list/2, add/2, mark_nonce_limiter_validated/2, add_validated/2, mark_tip/2, get/2, get_earliest_not_validated_from_longest_chain/1, get_longest_chain_cache/1, get_block_and_status/2, remove/2, get_checkpoint_block/1, prune/2, get_by_solution_hash/5, is_known_solution_hash/2, get_siblings/2, get_fork_blocks/2, update_timestamp/3, get_blocks_by_miner/2]). -include_lib("arweave/include/ar.hrl"). -include_lib("eunit/include/eunit.hrl"). %% The expiration time in seconds for every "alternative" block (a block with non-unique %% solution). -define(ALTERNATIVE_BLOCK_EXPIRATION_TIME_SECONDS, 10). %% @doc Block validation status %% on_chain: block is validated and belongs to the tip fork %% validated: block is validated but does not belong to the tip fork %% not_validated: block is not validated yet %% none: null status %% @doc ETS table: block_cache %% {block, BlockHash} => {#block{}, block_status(), Timestamp, set(Children)} %% - Children is a set of all blocks that have this block as their previous block. Children is %% used to track branches in the chain that fork off this block (i.e. they are DAG children) %% max_cdiff => {CDiff, BlockHash} %% - maximum cumulative difficulty encountered and its BlockHash. This is used to determine %% whether we need to switch from the current tip to a fork tip. %% {solution, SolutionHash} => set(BlockHash) %% - all blocks with the same solution hash %% longest_chain => [{BlockHash, [TXIDs]}] %% - the top ar_block:get_consensus_window_size() blocks of the longest chain %% tip -> BlockHash %% - curent block chain tip %% links -> gb_set({Height, BlockHash}) %% - all blocks in the cache sorted by height. This is used when pruning the cache and %% discarding all blocks below a certain height (and all off-chain children of those blocks %% regardless of their height) %%%=================================================================== %%% Public API. %%%=================================================================== %% @doc Create a cache, initialize it with the given block. The block is marked as on-chain %% and as a tip block. new(Tab, B) -> #block{ indep_hash = H, hash = SolutionH, cumulative_diff = CDiff, height = Height } = B, ets:delete_all_objects(Tab), ar_ignore_registry:add(H), insert(Tab, [ {max_cdiff, {CDiff, H}}, {links, gb_sets:from_list([{Height, H}])}, {{solution, SolutionH}, sets:from_list([H])}, {tip, H}, {{block, H}, {B, on_chain, erlang:timestamp(), sets:new()}} ]). %% @doc Initialize a cache from the given list of validated blocks. Mark the latest %% block as the tip block. The given blocks must be sorted from newest to oldest. initialize_from_list(Tab, [B]) -> new(Tab, B); initialize_from_list(Tab, [#block{ indep_hash = H } = B | Blocks]) -> initialize_from_list(Tab, Blocks), add_validated(Tab, B), mark_tip(Tab, H). %% @doc Add a block to the cache. The block is marked as not validated yet. %% If the block is already present in the cache and has not been yet validated, it is %% overwritten. If the block is validated, we do nothing and issue a warning. add(Tab, #block{ indep_hash = H, hash = SolutionH, previous_block = PrevH, height = Height } = B) -> case ets:lookup(Tab, {block, H}) of [] -> ar_ignore_registry:add(H), RemainingHs = remove_expired_alternative_blocks(Tab, SolutionH), SolutionSet = sets:from_list([H | RemainingHs]), [{_, Set}] = ets:lookup(Tab, links), [{_, {PrevB, PrevStatus, PrevTimestamp, Children}}] = ets:lookup(Tab, {block, PrevH}), Set2 = gb_sets:insert({Height, H}, Set), Status = {not_validated, awaiting_nonce_limiter_validation}, %% If CDiff > MaxCDiff it means this block belongs to the heaviest fork we're aware %% of. If our current tip is not on this fork, ar_node_worker may switch to this fork. insert(Tab, [ {max_cdiff, maybe_increase_max_cdiff(Tab, B, Status)}, {links, Set2}, {{solution, SolutionH}, SolutionSet}, {{block, H}, {B, Status, erlang:timestamp(), sets:new()}}, {{block, PrevH}, {PrevB, PrevStatus, PrevTimestamp, sets:add_element(H, Children)}} ]); [{_, {_B, {not_validated, _} = CurrentStatus, CurrentTimestamp, Children}}] -> insert(Tab, {{block, H}, {B, CurrentStatus, CurrentTimestamp, Children}}); _ -> ?LOG_WARNING([{event, attempt_to_update_already_validated_cached_block}, {h, ar_util:encode(H)}, {height, Height}, {previous_block, ar_util:encode(PrevH)}]), ok end. %% @doc Check all blocks that share the same solution and remove those that expired. remove_expired_alternative_blocks(Tab, SolutionH) -> SolutionSet = case ets:lookup(Tab, {solution, SolutionH}) of [] -> sets:new(); [{_, SolutionSet2}] -> SolutionSet2 end, remove_expired_alternative_blocks2(Tab, sets:to_list(SolutionSet)). remove_expired_alternative_blocks2(_Tab, []) -> []; remove_expired_alternative_blocks2(Tab, [H | Hs]) -> [{_, {_B, Status, Timestamp, Children}}] = ets:lookup(Tab, {block, H}), case Status of on_chain -> [H | remove_expired_alternative_blocks2(Tab, Hs)]; _ -> LifetimeSeconds = get_alternative_block_lifetime(Tab, Children), {MegaSecs, Secs, MicroSecs} = Timestamp, ExpirationTimestamp = {MegaSecs, Secs + LifetimeSeconds, MicroSecs}, case timer:now_diff(erlang:timestamp(), ExpirationTimestamp) >= 0 of true -> ?LOG_INFO([{event, removing_expired_alternative_block_from_cache}, {block, ar_util:encode(H)}, {status, Status}]), remove(Tab, H), remove_expired_alternative_blocks2(Tab, Hs); false -> [H | remove_expired_alternative_blocks2(Tab, Hs)] end end. get_alternative_block_lifetime(Tab, Children) -> ForkLen = get_fork_length(Tab, sets:to_list(Children)), (?ALTERNATIVE_BLOCK_EXPIRATION_TIME_SECONDS) * ForkLen. get_fork_length(Tab, Branches) when is_list(Branches) -> 1 + lists:max([0 | [get_fork_length(Tab, Branch) || Branch <- Branches]]); get_fork_length(Tab, Branch) -> [{_, {_B, _Status, _Timestamp, Children}}] = ets:lookup(Tab, {block, Branch}), case sets:size(Children) == 0 of true -> 1; false -> 1 + get_fork_length(Tab, sets:to_list(Children)) end. %% @doc Update the status of the given block to 'nonce_limiter_validated'. %% Do nothing if the block is not found in cache or if its status is %% not 'awaiting_nonce_limiter_validation'. mark_nonce_limiter_validated(Tab, H) -> case ets:lookup(Tab, {block, H}) of [{_, {B, {not_validated, awaiting_nonce_limiter_validation}, Timestamp, Children}}] -> insert(Tab, {{block, H}, {B, {not_validated, nonce_limiter_validated}, Timestamp, Children}}); _ -> ok end. %% @doc Add a validated block to the cache. If the block is already in the cache, it %% is overwritten. However, the function assumes the height, hash, previous hash, and %% the cumulative difficulty do not change. %% Raises previous_block_not_found if the previous block is not in the cache. %% Raises previous_block_not_validated if the previous block is not validated. add_validated(Tab, B) -> #block{ indep_hash = H, hash = SolutionH, previous_block = PrevH, height = Height } = B, case ets:lookup(Tab, {block, PrevH}) of [] -> error(previous_block_not_found); [{_, {_PrevB, {not_validated, _}, _Timestamp, _Children}}] -> error(previous_block_not_validated); [{_, {PrevB, PrevStatus, PrevTimestamp, PrevChildren}}] -> case ets:lookup(Tab, {block, H}) of [] -> RemainingHs = remove_expired_alternative_blocks(Tab, SolutionH), SolutionSet = sets:from_list([H | RemainingHs]), [{_, Set}] = ets:lookup(Tab, links), Status = validated, insert(Tab, [ {{block, PrevH}, {PrevB, PrevStatus, PrevTimestamp, sets:add_element(H, PrevChildren)}}, {{block, H}, {B, Status, erlang:timestamp(), sets:new()}}, {max_cdiff, maybe_increase_max_cdiff(Tab, B, Status)}, {links, gb_sets:insert({Height, H}, Set)}, {{solution, SolutionH}, SolutionSet} ]); [{_, {_B, on_chain, Timestamp, Children}}] -> insert(Tab, [ {{block, PrevH}, {PrevB, PrevStatus, PrevTimestamp, sets:add_element(H, PrevChildren)}}, {{block, H}, {B, on_chain, Timestamp, Children}} ]); [{_, {_B, _Status, Timestamp, Children}}] -> insert(Tab, [ {{block, PrevH}, {PrevB, PrevStatus, PrevTimestamp, sets:add_element(H, PrevChildren)}}, {{block, H}, {B, validated, Timestamp, Children}} ]) end end. %% @doc Get the block from cache. Returns not_found if the block is not in cache. get(Tab, H) -> case ets:lookup(Tab, {block, H}) of [] -> not_found; [{_, {B, _Status, _Timestamp, _Children}}] -> B end. %% @doc Get the block and its status from cache. %% Returns not_found if the block is not in cache. get_block_and_status(Tab, H) -> case ets:lookup(Tab, {block, H}) of [] -> not_found; [{_, {B, Status, Timestamp, _Children}}] -> {B, {Status, Timestamp}} end. %% @doc Get a {block, previous blocks, status} tuple for the earliest block from %% the longest chain, which has not been validated yet. The previous blocks are %% sorted from newest to oldest. The last one is a block from the current fork. %% status is a tuple that indicates where in the validation process block is. get_earliest_not_validated_from_longest_chain(Tab) -> [{_, Tip}] = ets:lookup(Tab, tip), [{_, {CDiff, H}}] = ets:lookup(Tab, max_cdiff), [{_, {#block{ cumulative_diff = TipCDiff }, _, _, _}}] = ets:lookup(Tab, {block, Tip}), case TipCDiff >= CDiff of true -> %% Current Tip is tip of the longest chain not_found; false -> [{_, {B, Status, Timestamp, _Children}}] = ets:lookup(Tab, {block, H}), case Status of {not_validated, _} -> get_earliest_not_validated(Tab, B, Status, Timestamp); _ -> not_found end end. %% @doc Return the list of {BH, TXIDs} pairs corresponding to the top up to the %% ar_block:get_consensus_window_size() blocks of the longest chain and the number of blocks %% in this list that are not on chain yet. %% %% The cache is updated via update_longest_chain_cache/1 which calls %% get_longest_chain_block_txs_pairs/7 get_longest_chain_cache(Tab) -> [{longest_chain, LongestChain}] = ets:lookup(Tab, longest_chain), LongestChain. get_longest_chain_block_txs_pairs(_Tab, _H, 0, _PrevStatus, _PrevH, Pairs, NotOnChainCount) -> {lists:reverse(Pairs), NotOnChainCount}; get_longest_chain_block_txs_pairs(Tab, H, N, PrevStatus, PrevH, Pairs, NotOnChainCount) -> case ets:lookup(Tab, {block, H}) of [{_, {B, {not_validated, awaiting_nonce_limiter_validation}, _Timestamp, _Children}}] -> get_longest_chain_block_txs_pairs(Tab, B#block.previous_block, ar_block:get_consensus_window_size(), none, none, [], 0); [{_, {B, Status, _Timestamp, _Children}}] -> case PrevStatus == on_chain andalso Status /= on_chain of true -> %% A reorg should have happened in the meantime - an unlikely %% event, retry. get_longest_chain_cache(Tab); false -> NotOnChainCount2 = case Status of on_chain -> NotOnChainCount; _ -> NotOnChainCount + 1 end, Pairs2 = [{B#block.indep_hash, [tx_id(TX) || TX <- B#block.txs]} | Pairs], get_longest_chain_block_txs_pairs(Tab, B#block.previous_block, N - 1, Status, H, Pairs2, NotOnChainCount2) end; [] -> case PrevStatus of on_chain -> case ets:lookup(Tab, {block, PrevH}) of [] -> %% The block has been pruned - %% an unlikely race condition so we retry. get_longest_chain_cache(Tab); [_] -> %% Pairs already contains the deepest block of the cache. {lists:reverse(Pairs), NotOnChainCount} end; _ -> %% The block has been invalidated - %% an unlikely race condition so we retry. get_longest_chain_cache(Tab) end end. tx_id(#tx{ id = ID }) -> ID; tx_id(TXID) -> TXID. %% @doc Mark the given block as the tip block. Mark the previous blocks as on-chain. %% Mark the on-chain blocks from other forks as validated. Raises invalid_tip if %% one of the preceeding blocks is not validated. Raises not_found if the block %% is not found. %% %% Setting a new tip can cause some branches to be invalidated by the checkpoint, so we need %% to recalculate max_cdiff. mark_tip(Tab, H) -> case ets:lookup(Tab, {block, H}) of [{_, {B, Status, Timestamp, Children}}] -> case is_valid_fork(Tab, B, Status) of true -> insert(Tab, [ {tip, H}, {{block, H}, {B, on_chain, Timestamp, Children}} | mark_on_chain(Tab, B) ]), %% We would only update max_cdiff if somehow the old max_cdiff was on a branch %% that has been invalidated due to the new tip causing the checkpoint to move. %% In practice we would not expect this to happen. [{_, {_CDiff, CDiffH}}] = ets:lookup(Tab, max_cdiff), case is_valid_fork(Tab, CDiffH) of true -> ok; false -> insert(Tab, {max_cdiff, find_max_cdiff(Tab, B#block.height)}) end; false -> error(invalid_tip) end; [] -> error(not_found) end. %% @doc Remove the block and all the blocks on top from the cache. remove(Tab, H) -> case ets:lookup(Tab, {block, H}) of [] -> ok; [{_, {#block{ previous_block = PrevH }, _Status, _Timestamp, _Children}}] -> [{_, C = {_, H2}}] = ets:lookup(Tab, max_cdiff), [{_, {PrevB, PrevBStatus, PrevTimestamp, PrevBChildren}}] = ets:lookup(Tab, {block, PrevH}), remove2(Tab, H), insert(Tab, [ {max_cdiff, case ets:lookup(Tab, {block, H2}) of [] -> find_max_cdiff(Tab, get_tip_height(Tab)); _ -> C end}, {{block, PrevH}, {PrevB, PrevBStatus, PrevTimestamp, sets:del_element(H, PrevBChildren)}} ]), ar_ignore_registry:remove(H), ok end. get_checkpoint_block(RecentBI) -> get_checkpoint_block2(RecentBI, 1, ?CHECKPOINT_DEPTH). %% @doc Prune the cache. Discard all blocks deeper than Depth from the tip and %% all of their children that are not on_chain. %% %% Height 99 A B' C %% \ / | %% 98 D' E %% \ / %% 97 F' %% %% B' is the Tip. prune(Tab, 1) will remove F', E, and C from the cache. prune(Tab, Depth) -> prune2(Tab, Depth, get_tip_height(Tab)). %% @doc Return true if there is at least one block in the cache with the given solution hash. is_known_solution_hash(Tab, SolutionH) -> case ets:lookup(Tab, {solution, SolutionH}) of [] -> false; [{_, _Set}] -> true end. %% @doc Return a block from the block cache meeting the following requirements: %% - hash == SolutionH; %% - indep_hash /= H. %% %% If there are several blocks, choose one with the same cumulative difficulty %% or CDiff > PrevCDiff2 and CDiff2 > PrevCDiff (double-signing). If there are no %% such blocks, return any other block matching the conditions above. Return not_found %% if there are no blocks matching those conditions. get_by_solution_hash(Tab, SolutionH, H, CDiff, PrevCDiff) -> case ets:lookup(Tab, {solution, SolutionH}) of [] -> not_found; [{_, Set}] -> get_by_solution_hash(Tab, SolutionH, H, CDiff, PrevCDiff, sets:to_list(Set), none) end. get_by_solution_hash(_Tab, _SolutionH, _H, _CDiff, _PrevCDiff, [], B) -> case B of none -> not_found; _ -> B end; get_by_solution_hash(Tab, SolutionH, H, CDiff, PrevCDiff, [H | L], B) -> get_by_solution_hash(Tab, SolutionH, H, CDiff, PrevCDiff, L, B); get_by_solution_hash(Tab, SolutionH, H, CDiff, PrevCDiff, [H2 | L], _B) -> case get(Tab, H2) of not_found -> %% An extremely unlikely race condition - simply retry. get_by_solution_hash(Tab, SolutionH, H, CDiff, PrevCDiff); #block{ cumulative_diff = CDiff } = B2 -> B2; #block{ cumulative_diff = CDiff2, previous_cumulative_diff = PrevCDiff2 } = B2 when CDiff2 > PrevCDiff, CDiff > PrevCDiff2 -> B2; B2 -> get_by_solution_hash(Tab, SolutionH, H, CDiff, PrevCDiff, L, B2) end. %% @doc Return the list of siblings of the given block, if any. get_siblings(Tab, B) -> H = B#block.indep_hash, PrevH = B#block.previous_block, case ets:lookup(Tab, {block, PrevH}) of [] -> []; [{_, {_B, _Status, _CurrentTimestamp, Children}}] -> sets:fold( fun(SibH, Acc) -> case SibH of H -> Acc; _ -> case ets:lookup(Tab, {block, SibH}) of [] -> Acc; [{_, {Sib, _, _, _}}] -> [Sib | Acc] end end end, [], Children ) end. update_timestamp(Tab, H, ReceiveTimestamp) -> case ets:lookup(Tab, {block, H}) of [] -> not_found; [{_, {B, Status, Timestamp, Children}}] -> case B#block.receive_timestamp of undefined -> insert(Tab, { {block, H}, { B#block{receive_timestamp = ReceiveTimestamp}, Status, Timestamp, Children } }, false); _ -> ok end end. %% @doc Return all blocks from the cache mined by the given address. get_blocks_by_miner(Tab, MinerAddr) -> case ets:lookup(Tab, links) of [{links, Set}] -> gb_sets:fold( fun({_Height, H}, Acc) -> case ets:lookup(Tab, {block, H}) of [{_, {B, _Status, _Timestamp, _Children}}] when B#block.reward_addr == MinerAddr -> [B | Acc]; _ -> Acc end end, [], Set ); _ -> [] end. %%%=================================================================== %%% Private functions. %%%=================================================================== insert(Tab, Args) -> insert(Tab, Args, true). insert(Tab, Args, UpdateCache) -> ets:insert(Tab, Args), case UpdateCache of true -> update_longest_chain_cache(Tab); false -> ok end. delete(Tab, Args) -> delete(Tab, Args, true). delete(Tab, Args, UpdateCache) -> ets:delete(Tab, Args), case UpdateCache of true -> update_longest_chain_cache(Tab); false -> ok end. get_earliest_not_validated(Tab, #block{ previous_block = PrevH } = B, Status, Timestamp) -> [{_, {PrevB, PrevStatus, PrevTimestamp, _Children}}] = ets:lookup(Tab, {block, PrevH}), case PrevStatus of {not_validated, _} -> get_earliest_not_validated(Tab, PrevB, PrevStatus, PrevTimestamp); _ -> {B, get_fork_blocks(Tab, B), {Status, Timestamp}} end. get_fork_blocks(Tab, #block{ previous_block = PrevH }) -> [{_, {PrevB, Status, _Timestamp, _Children}}] = ets:lookup(Tab, {block, PrevH}), case Status of on_chain -> [PrevB]; _ -> [PrevB | get_fork_blocks(Tab, PrevB)] end. mark_on_chain(Tab, #block{ previous_block = PrevH, indep_hash = H }) -> case ets:lookup(Tab, {block, PrevH}) of [{_, {_PrevB, {not_validated, _}, _Timestamp, _Children}}] -> error(invalid_tip); [{_, {_PrevB, on_chain, _Timestamp, Children}}] -> %% Mark the blocks from the previous main fork as validated, not on-chain. mark_off_chain(Tab, sets:del_element(H, Children)); [{_, {PrevB, validated, Timestamp, Children}}] -> [{{block, PrevH}, {PrevB, on_chain, Timestamp, Children}} | mark_on_chain(Tab, PrevB)] end. mark_off_chain(Tab, Set) -> sets:fold( fun(H, Acc) -> case ets:lookup(Tab, {block, H}) of [{_, {B, on_chain, Timestamp, Children}}] -> [{{block, H}, {B, validated, Timestamp, Children}} | mark_off_chain(Tab, Children)]; _ -> Acc end end, [], Set ). remove2(Tab, H) -> [{_, Set}] = ets:lookup(Tab, links), case ets:lookup(Tab, {block, H}) of not_found -> ok; [{_, {#block{ hash = SolutionH, height = Height }, _Status, _Timestamp, Children}}] -> %% Don't update the cache here. remove/2 will do it. delete(Tab, {block, H}, false), ar_ignore_registry:remove(H), remove_solution(Tab, H, SolutionH), insert(Tab, {links, gb_sets:del_element({Height, H}, Set)}, false), sets:fold( fun(Child, ok) -> remove2(Tab, Child) end, ok, Children ) end. remove_solution(Tab, H, SolutionH) -> [{_, SolutionSet}] = ets:lookup(Tab, {solution, SolutionH}), case sets:size(SolutionSet) of 1 -> delete(Tab, {solution, SolutionH}, false); _ -> SolutionSet2 = sets:del_element(H, SolutionSet), insert(Tab, {{solution, SolutionH}, SolutionSet2}, false) end. get_tip_height(Tab) -> [{_, Tip}] = ets:lookup(Tab, tip), [{_, {#block{ height = Height }, _Status, _Timestamp, _Children}}] = ets:lookup(Tab, {block, Tip}), Height. get_checkpoint_height(TipHeight) -> TipHeight - ?CHECKPOINT_DEPTH + 1. get_checkpoint_block2([{H, _, _}], _N, _CheckpointDepth) -> %% The genesis block. ar_block_cache:get(block_cache, H); get_checkpoint_block2([{H, _, _} | BI], N, CheckpointDepth) -> B = ar_block_cache:get(block_cache, H), get_checkpoint_block2(BI, N + 1, B, CheckpointDepth). get_checkpoint_block2([{H, _, _}], _N, B, _CheckpointDepth) -> %% The genesis block. case ar_block_cache:get(block_cache, H) of not_found -> B; B2 -> B2 end; get_checkpoint_block2([{H, _, _} | _], N, B, CheckpointDepth) when N == CheckpointDepth -> case ar_block_cache:get(block_cache, H) of not_found -> B; B2 -> B2 end; get_checkpoint_block2([{H, _, _} | BI], N, B, CheckpointDepth) -> case ar_block_cache:get(block_cache, H) of not_found -> B; B2 -> get_checkpoint_block2(BI, N + 1, B2, CheckpointDepth) end. %% @doc Return true if B is either on the main fork or on a fork which branched off at the %% checkpoint height or higher. is_valid_fork(Tab, H) -> [{_, {B, Status, _Timestamp, _Children}}] = ets:lookup(Tab, {block, H}), is_valid_fork(Tab, B, Status). is_valid_fork(Tab, B, Status) -> CheckpointHeight = get_checkpoint_height(get_tip_height(Tab)), is_valid_fork(Tab, B, Status, CheckpointHeight). is_valid_fork(_Tab, #block{ height = Height, indep_hash = H }, _Status, CheckpointHeight) when Height < CheckpointHeight -> ?LOG_WARNING([{event, found_invalid_heavy_fork}, {hash, ar_util:encode(H)}, {height, Height}, {checkpoint_height, CheckpointHeight}]), false; is_valid_fork(_Tab, _B, on_chain, _CheckpointHeight) -> true; is_valid_fork(Tab, B, _Status, CheckpointHeight) -> [{_, {PrevB, PrevStatus, _, _}}] = ets:lookup(Tab, {block, B#block.previous_block}), is_valid_fork(Tab, PrevB, PrevStatus, CheckpointHeight). maybe_increase_max_cdiff(Tab, B, Status) -> [{_, C}] = ets:lookup(Tab, max_cdiff), maybe_increase_max_cdiff(Tab, B, Status, C). maybe_increase_max_cdiff(Tab, B, Status, {MaxCDiff, _H} = C) -> case B#block.cumulative_diff > MaxCDiff andalso is_valid_fork(Tab, B, Status) of true -> {B#block.cumulative_diff, B#block.indep_hash}; false -> C end. find_max_cdiff(Tab, TipHeight) -> CheckpointHeight = get_checkpoint_height(TipHeight), [{_, Set}] = ets:lookup(Tab, links), gb_sets:fold( fun ({Height, _H}, Acc) when Height < CheckpointHeight -> Acc; ({_Height, H}, not_set) -> [{_, {#block{ cumulative_diff = CDiff }, _, _, _}}] = ets:lookup(Tab, {block, H}), {CDiff, H}; ({_Height, H}, Acc) -> [{_, {B, Status, _, _}}] = ets:lookup(Tab, {block, H}), maybe_increase_max_cdiff(Tab, B, Status, Acc) end, not_set, Set ). prune2(Tab, Depth, TipHeight) -> [{_, Set}] = ets:lookup(Tab, links), case gb_sets:is_empty(Set) of true -> ok; false -> {{Height, H}, Set2} = gb_sets:take_smallest(Set), case Height >= TipHeight - Depth of true -> ok; false -> insert(Tab, {links, Set2}, false), %% The lowest block must be on-chain by construction. [{_, {B, on_chain, _Timestamp, Children}}] = ets:lookup(Tab, {block, H}), #block{ hash = SolutionH } = B, sets:fold( fun(Child, ok) -> [{_, {_, Status, _, _}}] = ets:lookup(Tab, {block, Child}), case Status of on_chain -> ok; _ -> remove(Tab, Child) end end, ok, Children ), remove_solution(Tab, H, SolutionH), delete(Tab, {block, H}), ar_ignore_registry:remove(H), prune2(Tab, Depth, TipHeight) end end. update_longest_chain_cache(Tab) -> [{_, {_CDiff, H}}] = ets:lookup(Tab, max_cdiff), Result = get_longest_chain_block_txs_pairs(Tab, H, ar_block:get_consensus_window_size(), none, none, [], 0), case ets:update_element(Tab, longest_chain, {2, Result}) of true -> ok; false -> %% if insert_new fails it means another process added the longest_chain key %% between when we called update_element here. Extremely unlikely, really only %% possible when the node first starts up, and ultimately not super relevant since %% the cache will likely be refreshed again shortly. So we'll ignore. ets:insert_new(Tab, {longest_chain, Result}) end, Result. %%%=================================================================== %%% Tests. %%%=================================================================== checkpoint_test() -> ets:new(bcache_test, [set, named_table]), %% Height Block/Status %% %% 3 B3/on_chain %% | %% 2 B2/on_chain B2B/not_validated %% | | %% 1 B1/on_chain B1B/not_validated %% \ / %% 0 B0/on_chain new(bcache_test, B0 = random_block(0)), add(bcache_test, B1 = on_top(random_block(1), B0)), add(bcache_test, B1B = on_top(random_block(1), B0)), add(bcache_test, B2 = on_top(random_block(2), B1)), add(bcache_test, B2B = on_top(random_block(2), B1B)), add(bcache_test, B3 = on_top(random_block(3), B2)), mark_tip(bcache_test, block_id(B1)), mark_tip(bcache_test, block_id(B2)), mark_tip(bcache_test, block_id(B3)), ?assertMatch(not_found, get_earliest_not_validated_from_longest_chain(bcache_test)), assert_longest_chain([B3, B2, B1, B0], 0), assert_tip(block_id(B3)), assert_max_cdiff({3, block_id(B3)}), assert_is_valid_fork(true, on_chain, B0), assert_is_valid_fork(true, on_chain, B1), assert_is_valid_fork(true, not_validated, B1B), assert_is_valid_fork(true, on_chain, B2), assert_is_valid_fork(true, not_validated, B2B), assert_is_valid_fork(true, on_chain, B3), %% Add B4 as not_validated. No blocks are pushed beneath the checkpoint height since B4 %% has not been validated. %% %% Height Block/Status %% %% 4 B4/not_validated %% | %% 3 B3/on_chain %% | %% 2 B2/on_chain B2B/not_validated/invalid_fork %% | | %% 1 B1/on_chain B1B/not_validated/invalid_fork %% \ / %% 0 B0/on_chain/invalid_fork add(bcache_test, B4 = on_top(random_block(4), B3)), ?assertMatch({B4, [B3], {{not_validated, awaiting_nonce_limiter_validation}, _}}, get_earliest_not_validated_from_longest_chain(bcache_test)), assert_longest_chain([B3, B2, B1, B0], 0), assert_tip(block_id(B3)), assert_max_cdiff({4, block_id(B4)}), assert_is_valid_fork(true, on_chain, B0), assert_is_valid_fork(true, on_chain, B1), assert_is_valid_fork(true, not_validated, B1B), assert_is_valid_fork(true, on_chain, B2), assert_is_valid_fork(true, not_validated, B2B), assert_is_valid_fork(true, on_chain, B3), assert_is_valid_fork(true, not_validated, B4), %% Mark B4 as the tip, this pushes B0 below the checkpoint height and invalidates B1B and B2B. %% %% Height Block/Status %% %% 4 B4/on_chain %% | %% 3 B3/on_chain %% | %% 2 B2/on_chain B2B/not_validated/invalid_fork %% | | %% 1 B1/on_chain B1B/not_validated/invalid_fork %% \ / %% 0 B0/on_chain/invalid_fork mark_tip(bcache_test, block_id(B4)), ?assertMatch(not_found, get_earliest_not_validated_from_longest_chain(bcache_test)), assert_longest_chain([B4, B3, B2, B1, B0], 0), assert_tip(block_id(B4)), assert_max_cdiff({4, block_id(B4)}), assert_is_valid_fork(false, on_chain, B0), assert_is_valid_fork(true, on_chain, B1), assert_is_valid_fork(false, not_validated, B1B), assert_is_valid_fork(true, on_chain, B2), assert_is_valid_fork(false, not_validated, B2B), assert_is_valid_fork(true, on_chain, B3), assert_is_valid_fork(true, on_chain, B4), %% Add B3B with cdiff 5 to the invalid fork. Nothing should change. %% %% Height Block/Status %% %% 4 B4/on_chain %% | %% 3 B3/on_chain B3B/not_validated/invalid_fork %% | | %% 2 B2/on_chain B2B/not_validated/invalid_fork %% | | %% 1 B1/on_chain B1B/not_validated/invalid_fork %% \ / %% 0 B0/on_chain/invalid_fork add(bcache_test, B3B = on_top(random_block(5), B2B)), ?assertMatch(not_found, get_earliest_not_validated_from_longest_chain(bcache_test)), assert_longest_chain([B4, B3, B2, B1, B0], 0), assert_tip(block_id(B4)), assert_max_cdiff({4, block_id(B4)}), assert_is_valid_fork(false, on_chain, B0), assert_is_valid_fork(true, on_chain, B1), assert_is_valid_fork(false, not_validated, B1B), assert_is_valid_fork(true, on_chain, B2), assert_is_valid_fork(false, not_validated, B2B), assert_is_valid_fork(true, on_chain, B3), assert_is_valid_fork(true, on_chain, B4), assert_is_valid_fork(false, not_validated, B3B), %% Remove B4, this should revalidate the fork and make it the max_cdiff %% %% Height Block/Status %% %% 3 B3/on_chain B3B/not_validated %% | | %% 2 B2/on_chain B2B/not_validated %% | | %% 1 B1/on_chain B1B/not_validated %% \ / %% 0 B0/on_chain mark_tip(bcache_test, block_id(B3)), remove(bcache_test, block_id(B4)), ?assertMatch({B1B, [B0], {{not_validated, awaiting_nonce_limiter_validation}, _}}, get_earliest_not_validated_from_longest_chain(bcache_test)), assert_longest_chain([B0], 0), assert_tip(block_id(B3)), assert_max_cdiff({5, block_id(B3B)}), assert_is_valid_fork(true, on_chain, B0), assert_is_valid_fork(true, on_chain, B1), assert_is_valid_fork(true, not_validated, B1B), assert_is_valid_fork(true, on_chain, B2), assert_is_valid_fork(true, not_validated, B2B), assert_is_valid_fork(true, on_chain, B3), assert_is_valid_fork(true, not_validated, B3B), %% We should not be able to mark blocks on the invalid fork as the tip. %% %% Height Block/Status %% %% 4 B4/on_chain %% | %% 3 B3/on_chain B3B/not_validated/invalid_fork %% | | %% 2 B2/on_chain B2B/not_validated/invalid_fork %% | | %% 1 B1/on_chain B1B/not_validated/invalid_fork %% \ / %% 0 B0/on_chain/invalid_fork add(bcache_test, B4), mark_tip(bcache_test, block_id(B4)), ?assertException(error, invalid_tip, mark_tip(bcache_test, block_id(B1B))), ?assertMatch(not_found, get_earliest_not_validated_from_longest_chain(bcache_test)), assert_longest_chain([B4, B3, B2, B1, B0], 0), assert_tip(block_id(B4)), assert_max_cdiff({4, block_id(B4)}), assert_is_valid_fork(false, on_chain, B0), assert_is_valid_fork(true, on_chain, B1), assert_is_valid_fork(false, not_validated, B1B), assert_is_valid_fork(true, on_chain, B2), assert_is_valid_fork(false, not_validated, B2B), assert_is_valid_fork(true, on_chain, B3), assert_is_valid_fork(false, not_validated, B3B), ets:delete(bcache_test). checkpoint_invalidate_max_cdiff_test() -> ets:new(bcache_test, [set, named_table]), %% B2B is heaviest. %% %% Height Block/Status %% %% 4 B4/not_validated %% | %% 3 B3/on_chain %% | %% 2 B2/on_chain B2B/not_validated %% | | %% 1 B1/on_chain B1B/not_validated %% \ / %% 0 B0/on_chain new(bcache_test, B0 = random_block(0)), add(bcache_test, B1 = on_top(random_block(1), B0)), add(bcache_test, B2 = on_top(random_block(2), B1)), add(bcache_test, B3 = on_top(random_block(3), B2)), add(bcache_test, B4 = on_top(random_block(4), B3)), add(bcache_test, B1B = on_top(random_block(1), B0)), add(bcache_test, B2B = on_top(random_block(5), B1B)), mark_tip(bcache_test, block_id(B1)), mark_tip(bcache_test, block_id(B2)), mark_tip(bcache_test, block_id(B3)), ?assertMatch({B1B, [B0], {{not_validated, awaiting_nonce_limiter_validation}, _}}, get_earliest_not_validated_from_longest_chain(bcache_test)), assert_longest_chain([B0], 0), assert_tip(block_id(B3)), assert_max_cdiff({5, block_id(B2B)}), assert_is_valid_fork(true, on_chain, B0), assert_is_valid_fork(true, on_chain, B1), assert_is_valid_fork(true, not_validated, B1B), assert_is_valid_fork(true, on_chain, B2), assert_is_valid_fork(true, not_validated, B2B), assert_is_valid_fork(true, on_chain, B3), assert_is_valid_fork(true, not_validated, B4), %% B2B is still heaviest, but since B4 is the new tip, B2B's branch has been pushed below %% the checkpoint. %% %% Height Block/Status %% %% 4 B4/on_chain %% | %% 3 B3/on_chain %% | %% 2 B2/on_chain B2B/not_validated/invalid_fork %% | | %% 1 B1/on_chain B1B/not_validated/invalid_fork %% \ / %% 0 B0/on_chain/invalid_fork mark_tip(bcache_test, block_id(B4)), ?assertMatch(not_found, get_earliest_not_validated_from_longest_chain(bcache_test)), assert_longest_chain([B4, B3, B2, B1, B0], 0), assert_tip(block_id(B4)), assert_max_cdiff({4, block_id(B4)}), assert_is_valid_fork(false, on_chain, B0), assert_is_valid_fork(true, on_chain, B1), assert_is_valid_fork(false, not_validated, B1B), assert_is_valid_fork(true, on_chain, B2), assert_is_valid_fork(false, not_validated, B2B), assert_is_valid_fork(true, on_chain, B3), assert_is_valid_fork(true, on_chain, B4), ets:delete(bcache_test). checkpoint_invalidate_tip_test() -> ets:new(bcache_test, [set, named_table]), %% B2B is the tip %% %% Height Block/Status %% %% 4 B4/not_validated %% | %% 3 B3/validated %% | %% 2 B2/validated B2B/on_chain %% | | %% 1 B1/validated B1B/on_chain %% \ / %% 0 B0/on_chain new(bcache_test, B0 = random_block(0)), add(bcache_test, B1 = on_top(random_block(1), B0)), add(bcache_test, B2 = on_top(random_block(2), B1)), add(bcache_test, B3 = on_top(random_block(3), B2)), add(bcache_test, B4 = on_top(random_block(4), B3)), add(bcache_test, B1B = on_top(random_block(1), B0)), add(bcache_test, B2B = on_top(random_block(5), B1B)), mark_tip(bcache_test, block_id(B1)), mark_tip(bcache_test, block_id(B2)), mark_tip(bcache_test, block_id(B3)), mark_tip(bcache_test, block_id(B1B)), mark_tip(bcache_test, block_id(B2B)), ?assertMatch(not_found, get_earliest_not_validated_from_longest_chain(bcache_test)), assert_longest_chain([B2B, B1B, B0], 0), assert_tip(block_id(B2B)), assert_max_cdiff({5, block_id(B2B)}), assert_is_valid_fork(true, on_chain, B0), assert_is_valid_fork(true, validated, B1), assert_is_valid_fork(true, on_chain, B1B), assert_is_valid_fork(true, validated, B2), assert_is_valid_fork(true, on_chain, B2B), assert_is_valid_fork(true, validated, B3), assert_is_valid_fork(true, not_validated, B4), %% When we mark B4 as the tip it will also invalidate the B2B branch. %% %% Height Block/Status %% %% 4 B4/on_chain %% | %% 3 B3/on_chain %% | %% 2 B2/on_chain B2B/not_validated/invalid_fork %% | | %% 1 B1/on_chain B1B/not_validated/invalid_fork %% \ / %% 0 B0/on_chain/invalid_fork mark_tip(bcache_test, block_id(B4)), ?assertMatch(not_found, get_earliest_not_validated_from_longest_chain(bcache_test)), assert_longest_chain([B4, B3, B2, B1, B0], 0), assert_tip(block_id(B4)), assert_max_cdiff({4, block_id(B4)}), assert_is_valid_fork(false, on_chain, B0), assert_is_valid_fork(true, on_chain, B1), assert_is_valid_fork(false, validated, B1B), assert_is_valid_fork(true, on_chain, B2), assert_is_valid_fork(false, validated, B2B), assert_is_valid_fork(true, on_chain, B3), assert_is_valid_fork(true, on_chain, B4), ets:delete(bcache_test). block_cache_test() -> ets:new(bcache_test, [set, named_table]), %% Initialize block_cache from B1 %% %% Height Block/Status %% %% 0 B1/on_chain new(bcache_test, B1 = random_block(0)), ?assertEqual(not_found, get(bcache_test, crypto:strong_rand_bytes(48))), ?assertEqual(not_found, get_by_solution_hash(bcache_test, crypto:strong_rand_bytes(32), crypto:strong_rand_bytes(32), 1, 1)), ?assertEqual(B1, get(bcache_test, block_id(B1))), ?assertEqual(B1, get_by_solution_hash(bcache_test, B1#block.hash, crypto:strong_rand_bytes(32), 1, 1)), ?assertEqual(not_found, get_by_solution_hash(bcache_test, B1#block.hash, block_id(B1), 1, 1)), assert_longest_chain([B1], 0), assert_tip(block_id(B1)), assert_max_cdiff({0, block_id(B1)}), assert_is_valid_fork(true, on_chain, B1), ?assertEqual([], get_siblings(bcache_test, B1)), %% Re-adding B1 shouldn't change anything - i.e. nothing should be updated because the %% block is already on chain %% %% Height Block/Status %% %% 0 B1/on_chain add(bcache_test, B1#block{ txs = [crypto:strong_rand_bytes(32)] }), ?assertEqual(B1#block{ txs = [] }, get(bcache_test, block_id(B1))), ?assertEqual(B1#block{ txs = [] }, get_by_solution_hash(bcache_test, B1#block.hash, crypto:strong_rand_bytes(32), 1, 1)), assert_longest_chain([B1], 0), assert_max_cdiff({0, block_id(B1)}), assert_is_valid_fork(true, on_chain, B1), %% Same as above. %% %% Height Block/Status %% %% 0 B1/on_chain add(bcache_test, B1), ?assertEqual(not_found, get_earliest_not_validated_from_longest_chain(bcache_test)), assert_longest_chain([B1], 0), assert_tip(block_id(B1)), assert_max_cdiff({0, block_id(B1)}), assert_is_valid_fork(true, on_chain, B1), %% Add B2 as not_validated %% %% Height Block/Status %% %% 1 B2/not_validated %% | %% 0 B1/on_chain add(bcache_test, B2 = on_top(random_block(1), B1)), ExpectedStatus = awaiting_nonce_limiter_validation, ?assertMatch({B2, [B1], {{not_validated, ExpectedStatus}, _}}, get_earliest_not_validated_from_longest_chain(bcache_test)), assert_longest_chain([B1], 0), assert_tip(block_id(B1)), assert_max_cdiff({1, block_id(B2)}), assert_is_valid_fork(true, on_chain, B1), assert_is_valid_fork(true, not_validated, B2), ?assertEqual([], get_siblings(bcache_test, B2)), %% Add a TXID to B2, but still don't mark as validated %% %% Height Block/Status %% %% 1 B2/not_validated %% | %% 0 B1/on_chain TXID = crypto:strong_rand_bytes(32), add(bcache_test, B2#block{ txs = [TXID] }), ?assertEqual(B2#block{ txs = [TXID] }, get(bcache_test, block_id(B2))), ?assertEqual(B2#block{ txs = [TXID] }, get_by_solution_hash(bcache_test, B2#block.hash, crypto:strong_rand_bytes(32), 1, 1)), ?assertEqual(B2#block{ txs = [TXID] }, get_by_solution_hash(bcache_test, B2#block.hash, block_id(B1), 1, 1)), assert_longest_chain([B1], 0), assert_tip(block_id(B1)), assert_max_cdiff({1, block_id(B2)}), assert_is_valid_fork(true, on_chain, B1), assert_is_valid_fork(true, not_validated, B2), %% Remove B2 %% %% Height Block/Status %% %% 0 B1/on_chain remove(bcache_test, block_id(B2)), ?assertEqual(not_found, get(bcache_test, block_id(B2))), ?assertEqual(B1, get(bcache_test, block_id(B1))), assert_longest_chain([B1], 0), assert_tip(block_id(B1)), assert_max_cdiff({0, block_id(B1)}), assert_is_valid_fork(true, on_chain, B1), %% Add B and B1_2 creating a fork, with B1_2 at a higher difficulty. Nether are validated. %% %% Height Block/Status %% %% 1 B2/not_validated B1_2/not_validated %% \ / %% 0 B1/on_chain add(bcache_test, B2), add(bcache_test, B1_2 = (on_top(random_block(2), B1))#block{ hash = B1#block.hash }), ?assertEqual(B1, get_by_solution_hash(bcache_test, B1#block.hash, block_id(B1_2), 1, 1)), ?assertEqual(B1, get_by_solution_hash(bcache_test, B1#block.hash, crypto:strong_rand_bytes(32), B1#block.cumulative_diff, 1)), ?assertEqual(B1_2, get_by_solution_hash(bcache_test, B1#block.hash, crypto:strong_rand_bytes(32), B1_2#block.cumulative_diff, 1)), ?assert(lists:member(get_by_solution_hash(bcache_test, B1#block.hash, <<>>, 1, 1), [B1, B1_2])), assert_longest_chain([B1], 0), assert_tip(block_id(B1)), assert_max_cdiff({2, block_id(B1_2)}), assert_is_valid_fork(true, on_chain, B1), assert_is_valid_fork(true, not_validated, B2), assert_is_valid_fork(true, not_validated, B1_2), ?assertEqual([B2], get_siblings(bcache_test, B1_2)), ?assertEqual([B1_2], get_siblings(bcache_test, B2)), %% Even though B2 is marked as a tip, it is still lower difficulty than B1_2 so will %% not be included in the longest chain %% %% Height Block/Status %% %% 1 B2/on_chain B1_2/not_validated %% \ / %% 0 B1/on_chain mark_tip(bcache_test, block_id(B2)), ?assertEqual(B1_2, get(bcache_test, block_id(B1_2))), assert_longest_chain([B1], 0), assert_tip(block_id(B2)), assert_max_cdiff({2, block_id(B1_2)}), assert_is_valid_fork(true, on_chain, B1), assert_is_valid_fork(true, on_chain, B2), assert_is_valid_fork(true, not_validated, B1_2), %% Remove B1_2, causing B2 to now be the tip of the heaviest chain %% %% Height Block/Status %% %% 1 B2/on_chain %% \ %% 0 B1/on_chain remove(bcache_test, block_id(B1_2)), ?assertEqual(not_found, get(bcache_test, block_id(B1_2))), ?assertEqual(B1, get_by_solution_hash(bcache_test, B1#block.hash, crypto:strong_rand_bytes(32), 0, 0)), assert_longest_chain([B2, B1], 0), assert_tip(block_id(B2)), assert_max_cdiff({1, block_id(B2)}), assert_is_valid_fork(true, on_chain, B1), assert_is_valid_fork(true, on_chain, B2), %% Height Block/Status %% %% 1 B2/on_chain %% \ %% 0 B1/on_chain prune(bcache_test, 1), ?assertEqual(B1, get(bcache_test, block_id(B1))), ?assertEqual(B1, get_by_solution_hash(bcache_test, B1#block.hash, crypto:strong_rand_bytes(32), 0, 0)), assert_longest_chain([B2, B1], 0), assert_tip(block_id(B2)), assert_max_cdiff({1, block_id(B2)}), assert_is_valid_fork(true, on_chain, B1), assert_is_valid_fork(true, on_chain, B2), %% Height Block/Status %% %% 1 B2/on_chain prune(bcache_test, 0), ?assertEqual(not_found, get(bcache_test, block_id(B1))), ?assertEqual(not_found, get_by_solution_hash(bcache_test, B1#block.hash, <<>>, 0, 0)), assert_longest_chain([B2], 0), assert_tip(block_id(B2)), assert_max_cdiff({1, block_id(B2)}), assert_is_valid_fork(true, on_chain, B2), prune(bcache_test, 0), ?assertEqual(not_found, get(bcache_test, block_id(B1_2))), ?assertEqual(not_found, get_by_solution_hash(bcache_test, B1_2#block.hash, <<>>, 0, 0)), assert_longest_chain([B2], 0), assert_tip(block_id(B2)), assert_max_cdiff({1, block_id(B2)}), assert_is_valid_fork(true, on_chain, B2), %% B1_2->B1 fork is the heaviest, but only B1 is validated. B2_2->B2->B1 is longer but %% has a lower cdiff. %% %% Height Block/Status %% %% 2 B2_2/not_validated %% | %% 1 B2/on_chain B1_2/not_validated %% \ / %% 0 B1/on_chain new(bcache_test, B1), add(bcache_test, B1_2), add(bcache_test, B2), mark_tip(bcache_test, block_id(B2)), add(bcache_test, B2_2 = on_top(random_block(1), B2)), ?assertMatch({B1_2, [B1], {{not_validated, ExpectedStatus}, _Timestamp}}, get_earliest_not_validated_from_longest_chain(bcache_test)), assert_longest_chain([B1], 0), assert_tip(block_id(B2)), assert_max_cdiff({2, block_id(B1_2)}), assert_is_valid_fork(true, on_chain, B1), assert_is_valid_fork(true, on_chain, B2), assert_is_valid_fork(true, not_validated, B1_2), assert_is_valid_fork(true, not_validated, B2_2), %% B2_3->B2_2->B2->B1 is no longer and heavier but only B2->B1 are validated. %% %% Height Block/Status %% %% 3 B2_3/not_validated %% | %% 2 B2_2/not_validated %% | %% 1 B2/on_chain B1_2/not_validated %% \ / %% 0 B1/on_chain add(bcache_test, B2_3 = on_top(random_block(3), B2_2)), ?assertMatch({B2_2, [B2], {{not_validated, ExpectedStatus}, _Timestamp}}, get_earliest_not_validated_from_longest_chain(bcache_test)), ?assertException(error, invalid_tip, mark_tip(bcache_test, block_id(B2_3))), assert_longest_chain([B2, B1], 0), assert_tip(block_id(B2)), assert_max_cdiff({3, block_id(B2_3)}), assert_is_valid_fork(true, on_chain, B1), assert_is_valid_fork(true, on_chain, B2), assert_is_valid_fork(true, not_validated, B1_2), assert_is_valid_fork(true, not_validated, B2_2), assert_is_valid_fork(true, not_validated, B2_3), %% Now B2_2->B2->B1 are validated. %% %% Height Block/Status %% %% 3 B2_3/not_validated %% | %% 2 B2_2/validated %% | %% 1 B2/on_chain B1_2/not_validated %% \ / %% 0 B1/on_chain add_validated(bcache_test, B2_2), ?assertMatch({B2_2, {validated, _}}, get_block_and_status(bcache_test, B2_2#block.indep_hash)), ?assertMatch({B2_3, [B2_2, B2], {{not_validated, ExpectedStatus}, _}}, get_earliest_not_validated_from_longest_chain(bcache_test)), assert_longest_chain([B2_2, B2, B1], 1), assert_tip(block_id(B2)), assert_max_cdiff({3, block_id(B2_3)}), assert_is_valid_fork(true, on_chain, B1), assert_is_valid_fork(true, on_chain, B2), assert_is_valid_fork(true, not_validated, B1_2), assert_is_valid_fork(true, validated, B2_2), assert_is_valid_fork(true, not_validated, B2_3), %% Now the B3->B2->B1 fork is heaviest %% %% Height Block/Status %% %% 3 B2_3/not_validated %% | %% 2 B2_2/validated B3/on_chain %% \ / %% 1 B2/on_chain B1_2/not_validated %% \ / %% 0 B1/on_chain B3 = on_top(random_block(4), B2), B3ID = block_id(B3), add(bcache_test, B3), add_validated(bcache_test, B3), mark_tip(bcache_test, B3ID), ?assertEqual(not_found, get_earliest_not_validated_from_longest_chain(bcache_test)), assert_longest_chain([B3, B2, B1], 0), assert_tip(block_id(B3)), assert_max_cdiff({4, block_id(B3)}), assert_is_valid_fork(true, on_chain, B1), assert_is_valid_fork(true, on_chain, B2), assert_is_valid_fork(true, not_validated, B1_2), assert_is_valid_fork(true, validated, B2_2), assert_is_valid_fork(true, not_validated, B2_3), assert_is_valid_fork(true, on_chain, B3), ?assertEqual([B2_2], get_siblings(bcache_test, B3)), ?assertEqual([B3], get_siblings(bcache_test, B2_2)), ?assertEqual([B2], get_siblings(bcache_test, B1_2)), %% B3->B2->B1 fork is still heaviest %% %% Height Block/Status %% %% 3 B2_3/not_validated %% | %% 2 B2_2/on_chain B3/validated %% \ / %% 1 B2/on_chain B1_2/not_validated %% \ / %% 0 B1/on_chain mark_tip(bcache_test, block_id(B2_2)), ?assertEqual(not_found, get_earliest_not_validated_from_longest_chain(bcache_test)), assert_longest_chain([B3, B2, B1], 1), assert_tip(block_id(B2_2)), assert_max_cdiff({4, block_id(B3)}), assert_is_valid_fork(true, on_chain, B1), assert_is_valid_fork(true, on_chain, B2), assert_is_valid_fork(true, not_validated, B1_2), assert_is_valid_fork(true, on_chain, B2_2), assert_is_valid_fork(true, not_validated, B2_3), assert_is_valid_fork(true, validated, B3), %% Height Block/Status %% %% 3 B2_3/not_validated B4/not_validated %% | | %% 2 B2_2/on_chain B3/validated %% \ / %% 1 B2/on_chain B1_2/not_validated %% \ / %% 0 B1/on_chain add(bcache_test, B4 = on_top(random_block(5), B3)), ?assertMatch({B4, [B3, B2], {{not_validated, ExpectedStatus}, _Timestamp}}, get_earliest_not_validated_from_longest_chain(bcache_test)), assert_longest_chain([B3, B2, B1], 1), assert_tip(block_id(B2_2)), assert_max_cdiff({5, block_id(B4)}), assert_is_valid_fork(true, on_chain, B1), assert_is_valid_fork(true, on_chain, B2), assert_is_valid_fork(true, not_validated, B1_2), assert_is_valid_fork(true, on_chain, B2_2), assert_is_valid_fork(true, not_validated, B2_3), assert_is_valid_fork(true, validated, B3), assert_is_valid_fork(true, not_validated, B4), %% Height Block/Status %% %% 3 B2_3/not_validated B4/not_validated %% | | %% 2 B2_2/on_chain B3/validated %% \ / %% 1 B2/on_chain prune(bcache_test, 1), ?assertEqual(not_found, get(bcache_test, block_id(B1))), ?assertEqual(not_found, get_by_solution_hash(bcache_test, B1#block.hash, <<>>, 0, 0)), assert_longest_chain([B3, B2], 1), assert_tip(block_id(B2_2)), assert_max_cdiff({5, block_id(B4)}), assert_is_valid_fork(true, on_chain, B2), assert_is_valid_fork(true, on_chain, B2_2), assert_is_valid_fork(true, not_validated, B2_3), assert_is_valid_fork(true, validated, B3), assert_is_valid_fork(true, not_validated, B4), ?assertEqual([], get_siblings(bcache_test, B2_3)), ?assertEqual([], get_siblings(bcache_test, B4)), %% Height Block/Status %% %% 3 B2_3/on_chain %% | %% 2 B2_2/on_chain mark_tip(bcache_test, block_id(B2_3)), prune(bcache_test, 1), ?assertEqual(not_found, get(bcache_test, block_id(B2))), ?assertEqual(not_found, get_by_solution_hash(bcache_test, B2#block.hash, <<>>, 0, 0)), assert_longest_chain([B2_3, B2_2], 0), assert_tip(block_id(B2_3)), assert_max_cdiff({3, block_id(B2_3)}), assert_is_valid_fork(true, on_chain, B2_2), assert_is_valid_fork(true, on_chain, B2_3), %% Height Block/Status %% %% 3 B2_3/on_chain %% | %% 2 B2_2/on_chain prune(bcache_test, 1), ?assertEqual(not_found, get(bcache_test, block_id(B3))), ?assertEqual(not_found, get_by_solution_hash(bcache_test, B3#block.hash, <<>>, 0, 0)), assert_longest_chain([B2_3, B2_2], 0), assert_tip(block_id(B2_3)), assert_max_cdiff({3, block_id(B2_3)}), assert_is_valid_fork(true, on_chain, B2_2), assert_is_valid_fork(true, on_chain, B2_3), %% Height Block/Status %% %% 3 B2_3/on_chain %% | %% 2 B2_2/on_chain prune(bcache_test, 1), ?assertEqual(not_found, get(bcache_test, block_id(B4))), ?assertEqual(not_found, get_by_solution_hash(bcache_test, B4#block.hash, <<>>, 0, 0)), assert_longest_chain([B2_3, B2_2], 0), assert_tip(block_id(B2_3)), assert_max_cdiff({3, block_id(B2_3)}), assert_is_valid_fork(true, on_chain, B2_2), assert_is_valid_fork(true, on_chain, B2_3), %% Height Block/Status %% %% 3 B2_3/on_chain %% | %% 2 B2_2/on_chain prune(bcache_test, 1), ?assertEqual(B2_2, get(bcache_test, block_id(B2_2))), ?assertEqual(B2_2, get_by_solution_hash(bcache_test, B2_2#block.hash, <<>>, 0, 0)), assert_longest_chain([B2_3, B2_2], 0), assert_tip(block_id(B2_3)), assert_max_cdiff({3, block_id(B2_3)}), assert_is_valid_fork(true, on_chain, B2_2), assert_is_valid_fork(true, on_chain, B2_3), %% Height Block/Status %% %% 3 B2_3/on_chain %% | %% 2 B2_2/on_chain prune(bcache_test, 1), ?assertEqual(B2_3, get(bcache_test, block_id(B2_3))), ?assertEqual(B2_3, get_by_solution_hash(bcache_test, B2_3#block.hash, <<>>, 0, 0)), assert_longest_chain([B2_3, B2_2], 0), assert_tip(block_id(B2_3)), assert_max_cdiff({3, block_id(B2_3)}), assert_is_valid_fork(true, on_chain, B2_2), assert_is_valid_fork(true, on_chain, B2_3), %% Height Block/Status %% %% 3 B2_3/on_chain %% | %% 2 B2_2/on_chain remove(bcache_test, block_id(B3)), ?assertEqual(not_found, get(bcache_test, block_id(B3))), ?assertEqual(not_found, get_by_solution_hash(bcache_test, B3#block.hash, <<>>, 0, 0)), assert_longest_chain([B2_3, B2_2], 0), assert_tip(block_id(B2_3)), assert_max_cdiff({3, block_id(B2_3)}), assert_is_valid_fork(true, on_chain, B2_2), assert_is_valid_fork(true, on_chain, B2_3), %% Height Block/Status %% %% 3 B2_3/on_chain %% | %% 2 B2_2/on_chain remove(bcache_test, block_id(B3)), ?assertEqual(not_found, get(bcache_test, block_id(B4))), ?assertEqual(not_found, get_by_solution_hash(bcache_test, B4#block.hash, <<>>, 0, 0)), assert_longest_chain([B2_3, B2_2], 0), assert_tip(block_id(B2_3)), assert_max_cdiff({3, block_id(B2_3)}), assert_is_valid_fork(true, on_chain, B2_2), assert_is_valid_fork(true, on_chain, B2_3), %% Height Block/Status %% %% 1 B12/not_validated B13/on_chain %% \ / %% 0 B11/on_chain new(bcache_test, B11 = random_block(0)), add(bcache_test, B12 = on_top(random_block(1), B11)), add_validated(bcache_test, B13 = on_top(random_block(1), B11)), mark_tip(bcache_test, block_id(B13)), %% Although the first block at height 1 was the one added in B12, B13 then %% became the tip so we should not reorganize. ?assertEqual(not_found, get_earliest_not_validated_from_longest_chain(bcache_test)), %% The longest chain starts a the max_cdiff block which in this case is B12 since B13 %% was added second and has the same cdiff. So the longest chain stays as just [B11] assert_longest_chain([B11], 0), assert_tip(block_id(B13)), assert_max_cdiff({1, block_id(B12)}), assert_is_valid_fork(true, on_chain, B11), assert_is_valid_fork(true, not_validated, B12), assert_is_valid_fork(true, on_chain, B13), %% Height Block/Status %% %% 2 B14/not_validated %% | %% 1 B12/not_validated B13/on_chain %% \ / %% 0 B11/on_chain add(bcache_test, B14 = on_top(random_block_after_repacking(2), B13)), ?assertMatch({B14, [B13], {{not_validated, awaiting_nonce_limiter_validation}, _}}, get_earliest_not_validated_from_longest_chain(bcache_test)), assert_longest_chain([B13, B11], 0), assert_tip(block_id(B13)), assert_max_cdiff({2, block_id(B14)}), assert_is_valid_fork(true, on_chain, B11), assert_is_valid_fork(true, not_validated, B12), assert_is_valid_fork(true, on_chain, B13), assert_is_valid_fork(true, not_validated, B14), %% Height Block/Status %% %% 2 B14/not_validated %% | %% 1 B12/not_validated B13/on_chain %% \ / %% 0 B11/on_chain mark_nonce_limiter_validated(bcache_test, crypto:strong_rand_bytes(48)), mark_nonce_limiter_validated(bcache_test, block_id(B13)), ?assertMatch({B13, {on_chain, _}}, get_block_and_status(bcache_test, block_id(B13))), ?assertMatch({B14, {{not_validated, awaiting_nonce_limiter_validation}, _}}, get_block_and_status(bcache_test, block_id(B14))), assert_longest_chain([B13, B11], 0), assert_tip(block_id(B13)), assert_max_cdiff({2, block_id(B14)}), assert_is_valid_fork(true, on_chain, B11), assert_is_valid_fork(true, not_validated, B12), assert_is_valid_fork(true, on_chain, B13), assert_is_valid_fork(true, not_validated, B14), %% Height Block/Status %% %% 2 B14/not_validated %% | %% 1 B12/not_validated B13/on_chain %% \ / %% 0 B11/on_chain ?assertMatch({B14, {{not_validated, awaiting_nonce_limiter_validation}, _}}, get_block_and_status(bcache_test, block_id(B14))), ?assertMatch({B14, [B13], {{not_validated, awaiting_nonce_limiter_validation}, _}}, get_earliest_not_validated_from_longest_chain(bcache_test)), assert_longest_chain([B13, B11], 0), assert_tip(block_id(B13)), assert_max_cdiff({2, block_id(B14)}), assert_is_valid_fork(true, on_chain, B11), assert_is_valid_fork(true, not_validated, B12), assert_is_valid_fork(true, on_chain, B13), assert_is_valid_fork(true, not_validated, B14), %% Height Block/Status %% %% 2 B14/nonce_limiter_validated %% | %% 1 B12/not_validated B13/on_chain %% \ / %% 0 B11/on_chain mark_nonce_limiter_validated(bcache_test, block_id(B14)), ?assertMatch({B14, {{not_validated, nonce_limiter_validated}, _}}, get_block_and_status(bcache_test, block_id(B14))), ?assertMatch({B14, [B13], {{not_validated, nonce_limiter_validated}, _}}, get_earliest_not_validated_from_longest_chain(bcache_test)), %% Longest chain now includes B14 because its status changed to nonce_limiter_validated assert_longest_chain([B14, B13, B11], 1), assert_tip(block_id(B13)), assert_max_cdiff({2, block_id(B14)}), assert_is_valid_fork(true, on_chain, B11), assert_is_valid_fork(true, not_validated, B12), assert_is_valid_fork(true, on_chain, B13), assert_is_valid_fork(true, not_validated, B14), %% Height Block/Status %% %% 3 B15/not_validated %% | %% 2 B14/nonce_limiter_validated %% | %% 1 B12/not_validated B13/on_chain %% \ / %% 0 B11/on_chain add(bcache_test, B15 = on_top(random_block_after_repacking(3), B14)), ?assertMatch({B14, [B13], {{not_validated, nonce_limiter_validated}, _}}, get_earliest_not_validated_from_longest_chain(bcache_test)), assert_longest_chain([B14, B13, B11], 1), assert_tip(block_id(B13)), assert_max_cdiff({3, block_id(B15)}), assert_is_valid_fork(true, on_chain, B11), assert_is_valid_fork(true, not_validated, B12), assert_is_valid_fork(true, on_chain, B13), assert_is_valid_fork(true, not_validated, B14), assert_is_valid_fork(true, not_validated, B15), %% Height Block/Status %% %% 3 B15/not_validated %% | %% 2 B14/validated %% | %% 1 B12/not_validated B13/on_chain %% \ / %% 0 B11/on_chain add_validated(bcache_test, B14), ?assertMatch({B15, [B14, B13], {{not_validated, awaiting_nonce_limiter_validation}, _}}, get_earliest_not_validated_from_longest_chain(bcache_test)), ?assertMatch({B14, {validated, _}}, get_block_and_status(bcache_test, block_id(B14))), assert_longest_chain([B14, B13, B11], 1), assert_tip(block_id(B13)), assert_max_cdiff({3, block_id(B15)}), assert_is_valid_fork(true, on_chain, B11), assert_is_valid_fork(true, not_validated, B12), assert_is_valid_fork(true, on_chain, B13), assert_is_valid_fork(true, validated, B14), assert_is_valid_fork(true, not_validated, B15), %% Height Block/Status %% %% 3 B16/not_validated %% | %% 3 B15/not_validated %% | %% 2 B14/validated %% | %% 1 B12/not_validated B13/on_chain %% \ / %% 0 B11/on_chain add(bcache_test, B16 = on_top(random_block_after_repacking(4), B15)), ?assertMatch({B15, [B14, B13], {{not_validated, awaiting_nonce_limiter_validation}, _}}, get_earliest_not_validated_from_longest_chain(bcache_test)), assert_longest_chain([B14, B13, B11], 1), assert_tip(block_id(B13)), assert_max_cdiff({4, block_id(B16)}), assert_is_valid_fork(true, on_chain, B11), assert_is_valid_fork(true, not_validated, B12), assert_is_valid_fork(true, on_chain, B13), assert_is_valid_fork(true, validated, B14), assert_is_valid_fork(true, not_validated, B15), assert_is_valid_fork(true, not_validated, B16), %% Height Block/Status %% %% 3 B16/nonce_limiter_validated %% | %% 3 B15/not_validated %% | %% 2 B14/validated %% | %% 1 B12/not_validated B13/on_chain %% \ / %% 0 B11/on_chain mark_nonce_limiter_validated(bcache_test, block_id(B16)), ?assertMatch({B15, [B14, B13], {{not_validated, awaiting_nonce_limiter_validation}, _}}, get_earliest_not_validated_from_longest_chain(bcache_test)), ?assertMatch({B16, {{not_validated, nonce_limiter_validated}, _}}, get_block_and_status(bcache_test, block_id(B16))), assert_longest_chain([B14, B13, B11], 1), assert_tip(block_id(B13)), assert_max_cdiff({4, block_id(B16)}), assert_is_valid_fork(true, on_chain, B11), assert_is_valid_fork(true, not_validated, B12), assert_is_valid_fork(true, on_chain, B13), assert_is_valid_fork(true, validated, B14), assert_is_valid_fork(true, not_validated, B15), assert_is_valid_fork(true, not_validated, B16), %% Height Block/Status %% %% 3 B16/nonce_limiter_validated %% | %% 3 B15/not_validated %% | %% 2 B14/on_chain %% | %% 1 B12/not_validated B13/on_chain %% \ / %% 0 B11/on_chain mark_tip(bcache_test, block_id(B14)), ?assertMatch({B14, {on_chain, _}}, get_block_and_status(bcache_test, block_id(B14))), ?assertMatch({B15, [B14], {{not_validated, awaiting_nonce_limiter_validation}, _}}, get_earliest_not_validated_from_longest_chain(bcache_test)), assert_longest_chain([B14, B13, B11], 0), assert_tip(block_id(B14)), assert_max_cdiff({4, block_id(B16)}), assert_is_valid_fork(true, on_chain, B11), assert_is_valid_fork(true, not_validated, B12), assert_is_valid_fork(true, on_chain, B13), assert_is_valid_fork(true, on_chain, B14), assert_is_valid_fork(true, not_validated, B15), assert_is_valid_fork(true, not_validated, B16), ets:delete(bcache_test). assert_longest_chain(Chain, NotOnChainCount) -> ExpectedPairs = [{B#block.indep_hash, []} || B <- Chain], ?assertEqual({ExpectedPairs, NotOnChainCount}, get_longest_chain_cache(bcache_test)). assert_max_cdiff(ExpectedMaxCDiff) -> [{_, MaxCDiff}] = ets:lookup(bcache_test, max_cdiff), ?assertEqual(ExpectedMaxCDiff, MaxCDiff). assert_is_valid_fork(ExpectedFork, ExpectedStatus, B) -> [{_, {_, Status, _, _}}] = ets:lookup(bcache_test, {block, block_id(B)}), case ExpectedStatus of not_validated -> ?assertMatch({not_validated, _}, Status); _ -> ?assertEqual(ExpectedStatus, Status) end, ?assertEqual(ExpectedFork, is_valid_fork(bcache_test, B, Status)). assert_tip(ExpectedTip) -> [{_, Tip}] = ets:lookup(bcache_test, tip), ?assertEqual(ExpectedTip,Tip). random_block(CDiff) -> #block{ indep_hash = crypto:strong_rand_bytes(48), height = 0, cumulative_diff = CDiff, hash = crypto:strong_rand_bytes(32) }. random_block_after_repacking(CDiff) -> #block{ indep_hash = crypto:strong_rand_bytes(48), height = 0, cumulative_diff = CDiff, hash = crypto:strong_rand_bytes(32) }. block_id(#block{ indep_hash = H }) -> H. on_top(B, PrevB) -> B#block{ previous_block = PrevB#block.indep_hash, height = PrevB#block.height + 1, previous_cumulative_diff = PrevB#block.cumulative_diff }. %% @doc Test that get_blocks_by_miner returns the correct blocks for a given miner. get_blocks_by_miner_test() -> ets:new(bcache_test, [set, named_table]), new(bcache_test, B0 = random_block(0)), Tab = bcache_test, ?assertEqual([], get_blocks_by_miner(Tab, <<"miner1">>)), % Create some test blocks B1 = #block{ indep_hash = <<"hash1">>, reward_addr = <<"miner1">> }, B2 = #block{ indep_hash = <<"hash2">>, reward_addr = <<"miner2">> }, B3 = #block{ indep_hash = <<"hash3">>, reward_addr = <<"miner1">> }, % Add blocks to cache add(Tab, on_top(B1, B0)), add(Tab, on_top(B2, B0)), add(Tab, on_top(B3, B0)), B1_1 = B1#block{ height = 1, previous_block = B0#block.indep_hash, previous_cumulative_diff = B0#block.cumulative_diff }, B2_1 = B2#block{ height = 1, previous_block = B0#block.indep_hash, previous_cumulative_diff = B0#block.cumulative_diff }, B3_1 = B3#block{ height = 1, previous_block = B0#block.indep_hash, previous_cumulative_diff = B0#block.cumulative_diff }, % Test getting blocks by miner ?assertEqual([B1_1, B3_1], lists:sort(fun(A, B) -> A#block.indep_hash < B#block.indep_hash end, get_blocks_by_miner(Tab, <<"miner1">>))), ?assertEqual([B2_1], get_blocks_by_miner(Tab, <<"miner2">>)), ?assertEqual([], get_blocks_by_miner(Tab, <<"miner3">>)), ets:delete(Tab). ================================================ FILE: apps/arweave/src/ar_block_index.erl ================================================ -module(ar_block_index). -export([init/1, update/2, member/1, get_list/1, get_list_by_hash/1, get_element_by_height/1, get_block_bounds/1, get_block_bounds_with_height/1, get_intersection/2, get_intersection/1, get_range/2, get_last/0]). %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Store the given block index in ETS. init(BI) -> init(lists:reverse(BI), 0). %% @doc Insert the new block index elements from BI and remove the N orphaned ones. update([], 0) -> ok; update(BI, 0) -> {_WeaveSize, Height, _H, _TXRoot} = ets:last(block_index), update2(BI, Height + 1); update(BI, N) -> ets:delete(block_index, ets:last(block_index)), update(BI, N - 1). %% @doc Return true if the given block hash is found in the index. member(H) -> member(H, ets:last(block_index)). %% @doc Return the list of {H, WeaveSize, TXRoot} triplets up to the given Height (including) %% sorted from latest to earliest. get_list(Height) -> get_list([], ets:first(block_index), -1, Height). %% @doc Return the list of {H, WeaveSize, TXRoot} triplets up to the block with the given %% hash H (including) sorted from latest to earliest. get_list_by_hash(H) -> get_list_by_hash([], ets:first(block_index), -1, H). %% @doc Return the {H, WeaveSize, TXRoot} triplet for the given Height or not_found. get_element_by_height(Height) -> case catch ets:slot(block_index, Height) of {'EXIT', _} -> not_found; '$end_of_table' -> not_found; [{{WeaveSize, Height, H, TXRoot}}] -> {H, WeaveSize, TXRoot} end. %% @doc Return {BlockStartOffset, BlockEndOffset, TXRoot} where Offset >= BlockStartOffset, %% Offset < BlockEndOffset. get_block_bounds(Offset) -> {BlockStart, BlockEnd, TXRoot, _} = get_block_bounds_with_height(Offset), {BlockStart, BlockEnd, TXRoot}. %% @doc Return {BlockStartOffset, BlockEndOffset, TXRoot, Height} where Offset >= BlockStartOffset, %% Offset < BlockEndOffset. get_block_bounds_with_height(Offset) -> {WeaveSize, Height, _H, TXRoot} = Key = ets:next(block_index, {Offset, n, n, n}), case Height of 0 -> {0, WeaveSize, TXRoot, 0}; _ -> {PrevWeaveSize, _, _, _} = ets:prev(block_index, Key), {PrevWeaveSize, WeaveSize, TXRoot, Height} end. %% @doc Return {Height, {H, WeaveSize, TXRoot}} with the triplet present in both %% the cached block index and the given BI or no_intersection. get_intersection(Height, _BI) when Height < 0 -> no_intersection; get_intersection(_Height, []) -> no_intersection; get_intersection(Height, BI) -> ReverseBI = lists:reverse(BI), [{H, _, _} = Elem | ReverseBI2] = ReverseBI, case catch ets:slot(block_index, Height) of [{{_, Height, H, _} = Entry}] -> get_intersection(Height + 1, Elem, ReverseBI2, ets:next(block_index, Entry)); _ -> no_intersection end. %% @doc Return the {H, WeaveSize, TXRoot} triplet present in both %% the cached block index and the given BI or no_intersection. get_intersection([]) -> no_intersection; get_intersection(BI) -> {H, WeaveSize, _TXRoot} = lists:last(BI), get_intersection2({H, WeaveSize}, tl(lists:reverse(BI)), ets:next(block_index, {WeaveSize - 1, n, n, n})). %% @doc Return the list of {H, WeaveSize, TXRoot} for blocks with Height >= Start, =< End, %% sorted from the largest height to the smallest. get_range(Start, End) when Start > End -> []; get_range(Start, End) -> case catch ets:slot(block_index, Start) of [{{WeaveSize, _Height, H, TXRoot} = Entry}] -> lists:reverse([{H, WeaveSize, TXRoot} | get_range2(Start + 1, End, ets:next(block_index, Entry))]); _ -> {error, invalid_start} end. %% @doc Return the last element in the block index. get_last() -> ets:last(block_index). %%%=================================================================== %%% Private functions. %%%=================================================================== init([], _Height) -> ok; init([{H, WeaveSize, TXRoot} | BI], Height) -> ets:insert(block_index, {{WeaveSize, Height, H, TXRoot}}), init(BI, Height + 1). update2([], _Height) -> ok; update2([{H, WeaveSize, TXRoot} | BI], Height) -> ets:insert(block_index, {{WeaveSize, Height, H, TXRoot}}), update2(BI, Height + 1). member(H, {_, _, H, _}) -> true; member(_H, '$end_of_table') -> false; member(H, Key) -> member(H, ets:prev(block_index, Key)). get_list(BI, '$end_of_table', _Height, _MaxHeight) -> BI; get_list(BI, _Elem, Height, MaxHeight) when Height >= MaxHeight -> BI; get_list(BI, {WeaveSize, NextHeight, H, TXRoot} = Key, Height, MaxHeight) when NextHeight == Height + 1 -> get_list([{H, WeaveSize, TXRoot} | BI], ets:next(block_index, Key), Height + 1, MaxHeight); get_list(_BI, _Key, _Height, MaxHeight) -> %% An extremely unlikely race condition should have occured where some blocks were %% orphaned right after we passed some of them here, and new blocks have been added %% right before we reached the end of the table. get_list(MaxHeight). get_list_by_hash(BI, '$end_of_table', _Height, _H) -> BI; get_list_by_hash(BI, {WeaveSize, NextHeight, H, TXRoot}, Height, H) when NextHeight == Height + 1 -> [{H, WeaveSize, TXRoot} | BI]; get_list_by_hash(BI, {WeaveSize, NextHeight, H, TXRoot} = Key, Height, H2) when NextHeight == Height + 1 -> get_list_by_hash([{H, WeaveSize, TXRoot} | BI], ets:next(block_index, Key), Height + 1, H2); get_list_by_hash(_BI, _Key, _Height, H) -> %% An extremely unlikely race condition should have occured where some blocks were %% orphaned right after we passed some of them here, and new blocks have been added %% right before we reached the end of the table. get_list_by_hash(H). get_intersection(Height, Entry, _ReverseBI, '$end_of_table') -> {Height - 1, Entry}; get_intersection(Height, Entry, [], _Entry) -> {Height - 1, Entry}; get_intersection(Height, _Entry, [{H, _, _} = Elem | ReverseBI], {_, Height, H, _} = Entry) -> get_intersection(Height + 1, Elem, ReverseBI, ets:next(block_index, Entry)); get_intersection(Height, Entry, _ReverseBI, _TableEntry) -> {Height - 1, Entry}. get_intersection2(_, _, '$end_of_table') -> no_intersection; get_intersection2({_, WeaveSize}, _, {WeaveSize2, _, _, _}) when WeaveSize2 > WeaveSize -> no_intersection; get_intersection2({H, WeaveSize}, BI, {WeaveSize, _, H, TXRoot} = Elem) -> get_intersection3(ets:next(block_index, Elem), BI, {H, WeaveSize, TXRoot}); get_intersection2({H, WeaveSize}, BI, {WeaveSize, _, _, _} = Elem) -> get_intersection2({H, WeaveSize}, BI, ets:next(block_index, Elem)). get_intersection3({WeaveSize, _, H, TXRoot} = Key, [{H, WeaveSize, TXRoot} | BI], _Elem) -> get_intersection3(ets:next(block_index, Key), BI, {H, WeaveSize, TXRoot}); get_intersection3(_, _, {H, WeaveSize, TXRoot}) -> {H, WeaveSize, TXRoot}. get_range2(Start, End, _Elem) when Start > End -> []; get_range2(_Start, _End, '$end_of_table') -> []; get_range2(Start, End, {WeaveSize, _Height, H, TXRoot} = Elem) -> [{H, WeaveSize, TXRoot} | get_range2(Start + 1, End, ets:next(block_index, Elem))]. ================================================ FILE: apps/arweave/src/ar_block_pre_validator.erl ================================================ -module(ar_block_pre_validator). -behaviour(gen_server). -export([start_link/0, pre_validate/3]). -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2]). -include("ar.hrl"). -include("ar_consensus.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -record(state, { %% The priority queue storing the validation requests. pqueue = gb_sets:new(), %% The total size in bytes of the priority queue. size = 0, %% The map IP => the timestamp of the last block from this IP. ip_timestamps = #{}, throttle_by_ip_interval, %% The map SolutionHash => the timestamp of the last block with this solution hash. hash_timestamps = #{}, throttle_by_solution_interval }). %% The maximum size in bytes the blocks enqueued for pre-validation can occupy. -define(MAX_PRE_VALIDATION_QUEUE_SIZE, (200 * ?MiB)). %%%=================================================================== %%% Public interface. %%%=================================================================== start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). %% @doc Partially validate the received block. The validation consists of multiple %% stages. The process is aiming to increase resistance against DDoS attacks. %% The first stage is the quickest and performed synchronously when this function %% is called. Afterwards, the block is put in a limited-size priority queue. %% Bigger-height blocks from better-rated peers have higher priority. Additionally, %% the processing is throttled by IP and solution hash. %% Returns: ok, invalid, skipped pre_validate(B, Peer, ReceiveTimestamp) -> #block{ indep_hash = H } = B, case ar_ignore_registry:member(H) of true -> skipped; false -> Ref = make_ref(), ar_ignore_registry:add_ref(H, Ref), erlang:put(ignore_registry_ref, Ref), B2 = B#block{ receive_timestamp = ReceiveTimestamp }, case pre_validate_is_peer_banned(B2, Peer) of enqueued -> ?LOG_DEBUG([{event, enqueued_block}, {hash, ar_util:encode(H)}, {peer, ar_util:format_peer(Peer)}]), ok; Other -> ar_ignore_registry:remove_ref(H, Ref), Other end end. %%%=================================================================== %%% gen_server callbacks. %%%=================================================================== init([]) -> gen_server:cast(?MODULE, pre_validate), ok = ar_events:subscribe(block), {ok, Config} = arweave_config:get_env(), ThrottleBySolutionInterval = Config#config.block_throttle_by_solution_interval, ThrottleByIPInterval = Config#config.block_throttle_by_ip_interval, {ok, #state{ throttle_by_ip_interval = ThrottleByIPInterval, throttle_by_solution_interval = ThrottleBySolutionInterval }}. handle_cast(pre_validate, #state{ pqueue = Q, size = Size, ip_timestamps = IPTimestamps, hash_timestamps = HashTimestamps, throttle_by_ip_interval = ThrottleByIPInterval, throttle_by_solution_interval = ThrottleBySolutionInterval } = State) -> case gb_sets:is_empty(Q) of true -> ar_util:cast_after(50, ?MODULE, pre_validate), {noreply, State}; false -> {{_, {B, PrevB, SolutionResigned, Peer, Ref}}, Q2} = gb_sets:take_largest(Q), BlockSize = byte_size(term_to_binary(B)), Size2 = Size - BlockSize, BH = B#block.indep_hash, case ar_ignore_registry:permanent_member(BH) of true -> ?LOG_DEBUG([{event, indep_hash_already_processed2}, {hash, ar_util:encode(BH)}]), ar_ignore_registry:remove_ref(BH, Ref), gen_server:cast(?MODULE, pre_validate), {noreply, State#state{ pqueue = Q2, size = Size2 }}; false -> ThrottleByIPResult = throttle_by_ip(Peer, IPTimestamps, ThrottleByIPInterval), {IPTimestamps3, HashTimestamps3} = case ThrottleByIPResult of false -> ?LOG_DEBUG([{event, dropping_block}, {reason, throttle_by_ip}, {hash, ar_util:encode(BH)}, {peer, ar_util:format_peer(Peer)}]), ar_ignore_registry:remove_ref(BH, Ref), {IPTimestamps, HashTimestamps}; {true, IPTimestamps2} -> case throttle_by_solution_hash(B#block.hash, HashTimestamps, ThrottleBySolutionInterval) of {true, HashTimestamps2} -> ?LOG_INFO([{event, processing_block}, {peer, ar_util:format_peer(Peer)}, {height, B#block.height}, {step_number, ar_block:vdf_step_number(B)}, {block, ar_util:encode(BH)}, {miner_address, ar_util:encode(B#block.reward_addr)}, {previous_block, ar_util:encode(PrevB#block.indep_hash)}, {solution_hash, ar_util:encode(B#block.hash)}, {cdiff, B#block.cumulative_diff}, {prev_cdiff, PrevB#block.cumulative_diff}]), pre_validate_nonce_limiter_seed_data(B, PrevB, SolutionResigned, Peer), ar_ignore_registry:remove_ref(BH, Ref), record_block_pre_validation_time( B#block.receive_timestamp), {IPTimestamps2, HashTimestamps2}; false -> ?LOG_DEBUG([{event, dropping_block}, {reason, throttle_by_solution_hash}, {hash, ar_util:encode(BH)}, {peer, ar_util:format_peer(Peer)}]), ar_ignore_registry:remove_ref(BH, Ref), {IPTimestamps2, HashTimestamps} end end, gen_server:cast(?MODULE, pre_validate), {noreply, State#state{ pqueue = Q2, size = Size2, ip_timestamps = IPTimestamps3, hash_timestamps = HashTimestamps3 }} end end; handle_cast({enqueue, {B, PrevB, SolutionResigned, Peer, Ref}}, State) -> #state{ pqueue = Q, size = Size } = State, Priority = priority(B, Peer), BlockSize = byte_size(term_to_binary(B)), Size2 = Size + BlockSize, Q2 = gb_sets:add_element({Priority, {B, PrevB, SolutionResigned, Peer, Ref}}, Q), {Q3, Size3} = case Size2 > ?MAX_PRE_VALIDATION_QUEUE_SIZE of true -> drop_tail(Q2, Size2); false -> {Q2, Size2} end, {noreply, State#state{ pqueue = Q3, size = Size3 }}; handle_cast({may_be_remove_ip_timestamp, IP}, #state{ ip_timestamps = Timestamps, throttle_by_ip_interval = ThrottleInterval } = State) -> Now = os:system_time(millisecond), case maps:get(IP, Timestamps, not_set) of not_set -> {noreply, State}; Timestamp when Timestamp < Now - ThrottleInterval -> {noreply, State#state{ ip_timestamps = maps:remove(IP, Timestamps) }}; _ -> {noreply, State} end; handle_cast({may_be_remove_h_timestamp, H}, #state{ hash_timestamps = Timestamps, throttle_by_solution_interval = ThrottleInterval } = State) -> Now = os:system_time(millisecond), case maps:get(H, Timestamps, not_set) of not_set -> {noreply, State}; Timestamp when Timestamp < Now - ThrottleInterval -> {noreply, State#state{ hash_timestamps = maps:remove(H, Timestamps) }}; _ -> {noreply, State} end; handle_cast(Msg, State) -> ?LOG_ERROR([{event, unhandled_cast}, {module, ?MODULE}, {message, Msg}]), {noreply, State}. handle_call(Request, _From, State) -> ?LOG_WARNING([{event, unhandled_call}, {module, ?MODULE}, {request, Request}]), {reply, ok, State}. handle_info({event, block, _}, State) -> {noreply, State}; handle_info(Info, State) -> ?LOG_ERROR([{event, unhandled_info}, {module, ?MODULE}, {info, Info}]), {noreply, State}. terminate(Reason, _State) -> ?LOG_INFO([{module, ?MODULE},{pid, self()},{callback, terminate},{reason, Reason}]), ok. %%%=================================================================== %%% Private functions. %%%=================================================================== pre_validate_is_peer_banned(B, Peer) -> case ar_blacklist_middleware:is_peer_banned(Peer) of not_banned -> pre_validate_previous_block(B, Peer); banned -> ?LOG_DEBUG([{event, peer_banned}, {hash, ar_util:encode(B#block.indep_hash)}]), skipped end. pre_validate_previous_block(B, Peer) -> PrevH = B#block.previous_block, case ar_node:get_block_shadow_from_cache(PrevH) of not_found -> %% We have not seen the previous block yet - might happen if two %% successive blocks are distributed at the same time. Do not %% ban the peer as the block might be valid. If the network adopts %% this block, ar_poller will catch up. ?LOG_DEBUG([{event, previous_block_not_found}, {hash, ar_util:encode(B#block.indep_hash)}, {prev_hash, ar_util:encode(PrevH)}]), skipped; #block{ height = PrevHeight } = PrevB -> case B#block.height == PrevHeight + 1 of false -> ?LOG_DEBUG([{event, previous_block_height_mismatch}, {hash, ar_util:encode(B#block.indep_hash)}, {prev_hash, ar_util:encode(PrevH)}, {height, B#block.height}, {prev_height, PrevHeight}]), invalid; true -> true = B#block.height >= ar_fork:height_2_6(), PrevCDiff = B#block.previous_cumulative_diff, case PrevB#block.cumulative_diff == PrevCDiff of true -> pre_validate_proof_sizes(B, PrevB, Peer); false -> ?LOG_DEBUG([{event, previous_block_cumulative_diff_mismatch}, {hash, ar_util:encode(B#block.indep_hash)}, {prev_hash, ar_util:encode(PrevH)}, {cumulative_diff, PrevCDiff}, {prev_cumulative_diff, PrevB#block.cumulative_diff}]), invalid end end end. pre_validate_proof_sizes(B, PrevB, Peer) -> case ar_block:validate_proof_size(B#block.poa) andalso ar_block:validate_proof_size(B#block.poa2) of true -> may_be_pre_validate_first_chunk_hash(B, PrevB, Peer); false -> post_block_reject_warn(B, check_proof_size, Peer), ar_events:send(block, {rejected, invalid_proof_size, B#block.indep_hash, Peer}), invalid end. may_be_pre_validate_first_chunk_hash(B, PrevB, Peer) -> case crypto:hash(sha256, (B#block.poa)#poa.chunk) == B#block.chunk_hash of false -> post_block_reject_warn(B, check_first_chunk, Peer), ar_events:send(block, {rejected, invalid_first_chunk, B#block.indep_hash, Peer}), invalid; true -> may_be_pre_validate_second_chunk_hash(B, PrevB, Peer) end. may_be_pre_validate_second_chunk_hash(#block{ recall_byte2 = undefined } = B, PrevB, Peer) -> case B#block.height < ar_fork:height_2_7_2() orelse B#block.poa2 == #poa{} of false -> post_block_reject_warn(B, check_second_chunk, Peer), ar_events:send(block, {rejected, invalid_poa2_recall_byte2_undefined, B#block.indep_hash, Peer}), invalid; true -> %% The block is not supposed to have the second chunk. may_be_pre_validate_first_unpacked_chunk_hash(B, PrevB, Peer) end; may_be_pre_validate_second_chunk_hash(B, PrevB, Peer) -> case crypto:hash(sha256, (B#block.poa2)#poa.chunk) == B#block.chunk2_hash of false -> post_block_reject_warn(B, check_second_chunk, Peer), ar_events:send(block, {rejected, invalid_second_chunk, B#block.indep_hash, Peer}), invalid; true -> may_be_pre_validate_first_unpacked_chunk_hash(B, PrevB, Peer) end. may_be_pre_validate_first_unpacked_chunk_hash( #block{ packing_difficulty = PackingDifficulty } = B, PrevB, Peer) when PackingDifficulty >= 1 -> PoA = B#block.poa, case crypto:hash(sha256, PoA#poa.unpacked_chunk) == B#block.unpacked_chunk_hash %% The unpacked chunk is expected to be 0-padded when smaller than %% ?DATA_CHUNK_SIZE. andalso byte_size(PoA#poa.unpacked_chunk) == ?DATA_CHUNK_SIZE of false -> post_block_reject_warn(B, check_first_unpacked_chunk, Peer), ar_events:send(block, {rejected, invalid_first_unpacked_chunk, B#block.indep_hash, Peer}), invalid; true -> may_be_pre_validate_second_unpacked_chunk_hash(B, PrevB, Peer) end; may_be_pre_validate_first_unpacked_chunk_hash(B, PrevB, Peer) -> #block{ poa = PoA, poa2 = PoA2 } = B, #block{ unpacked_chunk_hash = UnpackedChunkHash, unpacked_chunk2_hash = UnpackedChunk2Hash } = B, case {UnpackedChunkHash, UnpackedChunk2Hash} == {undefined, undefined} of false -> post_block_reject_warn(B, check_first_unpacked_chunk_hash, Peer), ar_events:send(block, {rejected, invalid_first_unpacked_chunk_hash, B#block.indep_hash, Peer}), invalid; true -> pre_validate_indep_hash(B#block{ poa = PoA#poa{ unpacked_chunk = <<>> }, poa2 = PoA2#poa{ unpacked_chunk = <<>> } }, PrevB, Peer) end. may_be_pre_validate_second_unpacked_chunk_hash( #block{ recall_byte2 = RecallByte2 } = B, PrevB, Peer) when RecallByte2 /= undefined -> PoA2 = B#block.poa2, case crypto:hash(sha256, PoA2#poa.unpacked_chunk) == B#block.unpacked_chunk2_hash %% The unpacked chunk is expected to be 0-padded when smaller than %% ?DATA_CHUNK_SIZE. andalso byte_size(PoA2#poa.unpacked_chunk) == ?DATA_CHUNK_SIZE of false -> post_block_reject_warn(B, check_second_unpacked_chunk, Peer), ar_events:send(block, {rejected, invalid_second_unpacked_chunk, B#block.indep_hash, Peer}), invalid; true -> pre_validate_indep_hash(B, PrevB, Peer) end; may_be_pre_validate_second_unpacked_chunk_hash(B, PrevB, Peer) -> #block{ poa2 = PoA2 } = B, case B#block.unpacked_chunk2_hash == undefined of false -> post_block_reject_warn(B, check_second_unpacked_chunk_hash, Peer), ar_events:send(block, {rejected, invalid_second_unpacked_chunk_hash, B#block.indep_hash, Peer}), invalid; true -> pre_validate_indep_hash(B#block{ poa2 = PoA2#poa{ unpacked_chunk = <<>> } }, PrevB, Peer) end. pre_validate_indep_hash(#block{ indep_hash = H } = B, PrevB, Peer) -> case catch compute_hash(B, PrevB#block.cumulative_diff) of {ok, H} -> case ar_ignore_registry:permanent_member(H) of true -> ?LOG_DEBUG([{event, indep_hash_already_processed}, {hash, ar_util:encode(H)}]), skipped; false -> pre_validate_timestamp(B, PrevB, Peer) end; {error, invalid_signature} -> post_block_reject_warn(B, check_signature, Peer), ar_events:send(block, {rejected, invalid_signature, B#block.indep_hash, Peer}), invalid; {ok, _DifferentH} -> post_block_reject_warn(B, check_indep_hash, Peer), ar_events:send(block, {rejected, invalid_hash, B#block.indep_hash, Peer}), invalid end. pre_validate_timestamp(B, PrevB, Peer) -> #block{ indep_hash = H } = B, case ar_block:verify_timestamp(B, PrevB) of true -> pre_validate_existing_solution_hash(B, PrevB, Peer); false -> post_block_reject_warn(B, check_timestamp, Peer, [{block_time, B#block.timestamp}, {current_time, os:system_time(seconds)}]), ar_events:send(block, {rejected, invalid_timestamp, H, Peer}), invalid end. pre_validate_existing_solution_hash(B, PrevB, Peer) -> Height = B#block.height, SolutionH = B#block.hash, #block{ hash = SolutionH, nonce = Nonce, reward_addr = RewardAddr, hash_preimage = HashPreimage, recall_byte = RecallByte, partition_number = PartitionNumber, recall_byte2 = RecallByte2, nonce_limiter_info = #nonce_limiter_info{ output = Output, global_step_number = StepNumber, seed = Seed, partition_upper_bound = UpperBound, last_step_checkpoints = LastStepCheckpoints }, chunk_hash = ChunkHash, chunk2_hash = Chunk2Hash, unpacked_chunk_hash = UnpackedChunkHash, unpacked_chunk2_hash = UnpackedChunk2Hash, packing_difficulty = PackingDifficulty, replica_format = ReplicaFormat } = B, H = B#block.indep_hash, CDiff = B#block.cumulative_diff, PrevCDiff = PrevB#block.cumulative_diff, GetCachedSolution = case ar_block_cache:get_by_solution_hash(block_cache, SolutionH, H, CDiff, PrevCDiff) of not_found -> not_found; #block{ hash = SolutionH, nonce = Nonce, reward_addr = RewardAddr, hash_preimage = HashPreimage, recall_byte = RecallByte, partition_number = PartitionNumber, nonce_limiter_info = #nonce_limiter_info{ output = Output, last_step_checkpoints = LastStepCheckpoints, seed = Seed, partition_upper_bound = UpperBound, global_step_number = StepNumber }, chunk_hash = ChunkHash, chunk2_hash = Chunk2Hash, unpacked_chunk_hash = UnpackedChunkHash, unpacked_chunk2_hash = UnpackedChunk2Hash, poa = #poa{ chunk = Chunk }, poa2 = #poa{ chunk = Chunk2 }, recall_byte2 = RecallByte2, packing_difficulty = PackingDifficulty2, replica_format = ReplicaFormat } = CacheB -> LastStepPrevOutput = get_last_step_prev_output(B), LastStepPrevOutput2 = get_last_step_prev_output(CacheB), case LastStepPrevOutput == LastStepPrevOutput2 andalso (Height < ar_fork:height_2_9() orelse PackingDifficulty == PackingDifficulty2) of true -> B2 = B#block{ poa = (B#block.poa)#poa{ chunk = Chunk }, poa2 = (B#block.poa2)#poa{ chunk = Chunk2 } }, case validate_poa_against_cached_poa(B2, CacheB) of {true, B3} -> {valid, B3}; false -> {invalid, #{ code => check_resigned_solution_hash_poa_mismatch, b2 => B2, cache_b => CacheB, prev_b => PrevB }} end; false -> {invalid, #{ code => check_resigned_solution_hash_last_step_prev_output_mismatch, packing_difficulty => PackingDifficulty, packing_difficulty2 => PackingDifficulty2, last_step_prev_output => LastStepPrevOutput, last_step_prev_output2 => LastStepPrevOutput2, b => B, cache_b => CacheB, prev_b => PrevB }} end; CacheB2 -> {invalid, #{ code => check_resigned_solution_hash_block_mismatch, cache_b => CacheB2, b => B, prev_b => PrevB }} end, ValidatedCachedSolutionDiff = case GetCachedSolution of not_found -> not_found; {invalid, ExtraData} -> {invalid, ExtraData}; {valid, B4} -> case ar_node_utils:block_passes_diff_check(B) of true -> {valid, B4}; false -> {invalid, #{ code => check_resigned_solution_hash_diff_mismatch, b => B }} end end, case ValidatedCachedSolutionDiff of not_found -> pre_validate_nonce_limiter_global_step_number(B, PrevB, false, Peer); {invalid, ExtraData2} -> Code = maps:get(code, ExtraData2, check_resigned_solution_hash), {ok, Config} = arweave_config:get_env(), case lists:member(extended_block_validation_trace, Config#config.enable) of true -> post_block_reject_warn_and_error_dump(B, Code, Peer, ExtraData2); false -> post_block_reject_warn(B, Code, Peer) end, ar_events:send(block, {rejected, invalid_resigned_solution_hash, B#block.indep_hash, Peer}), invalid; {valid, B5} -> pre_validate_nonce_limiter_global_step_number(B5, PrevB, true, Peer) end. get_last_step_prev_output(B) -> #block{ nonce_limiter_info = Info } = B, #nonce_limiter_info{ steps = Steps, prev_output = PrevOutput } = Info, case Steps of [_, PrevStepOutput | _] -> PrevStepOutput; _ -> PrevOutput end. validate_poa_against_cached_poa(B, CacheB) -> #block{ poa_cache = {ArgCache, ChunkID}, poa2_cache = Cache2 } = CacheB, Args = erlang:append_element(erlang:insert_element(5, ArgCache, B#block.poa), ChunkID), case ar_poa:validate(Args) of {true, ChunkID} -> B2 = B#block{ poa_cache = {ArgCache, ChunkID} }, case B#block.recall_byte2 of undefined -> {true, B2}; _ -> {ArgCache2, Chunk2ID} = Cache2, Args2 = erlang:append_element( erlang:insert_element(5, ArgCache2, B#block.poa2), Chunk2ID), case ar_poa:validate(Args2) of {true, Chunk2ID} -> {true, B2#block{ poa2_cache = Cache2 }}; _ -> false end end; _ -> false end. pre_validate_nonce_limiter_global_step_number(B, PrevB, SolutionResigned, Peer) -> BlockInfo = B#block.nonce_limiter_info, StepNumber = ar_block:vdf_step_number(B), PrevBlockInfo = PrevB#block.nonce_limiter_info, PrevStepNumber = ar_block:vdf_step_number(PrevB), CurrentStepNumber = case ar_nonce_limiter:get_current_step_number(PrevB) of not_found -> %% Not necessarily computed already, but will be after we %% validate the previous block's chain. PrevStepNumber; N -> N end, IsAhead = ar_nonce_limiter:is_ahead_on_the_timeline( BlockInfo, PrevBlockInfo), MaxDistance = ?NONCE_LIMITER_MAX_CHECKPOINTS_COUNT, Steps = BlockInfo#nonce_limiter_info.steps, ExpectedStepCount = get_expected_step_count(StepNumber, PrevStepNumber, MaxDistance, Steps), PrevOutput = BlockInfo#nonce_limiter_info.prev_output, case IsAhead andalso StepNumber - CurrentStepNumber =< MaxDistance andalso length(Steps) == ExpectedStepCount andalso PrevOutput == PrevBlockInfo#nonce_limiter_info.output of false -> post_block_reject_warn(B, check_nonce_limiter_step_number, Peer, [{block_step_number, StepNumber}, {current_step_number, CurrentStepNumber}]), H = B#block.indep_hash, ar_events:send(block, {rejected, invalid_nonce_limiter_global_step_number, H, Peer}), invalid; true -> prometheus_gauge:set(block_vdf_advance, StepNumber - CurrentStepNumber), pre_validate_previous_solution_hash(B, PrevB, SolutionResigned, Peer) end. -ifdef(LOCALNET). %% In localnet we allow same-step blocks for faster block production. Consequent %% blocks on the same steps have the same "steps" and "expected step count" values. get_expected_step_count(StepNumber, PrevStepNumber, _MaxDistance, Steps) -> case StepNumber - PrevStepNumber > 0 of true -> StepNumber - PrevStepNumber; false -> length(Steps) end. -else. get_expected_step_count(StepNumber, PrevStepNumber, MaxDistance, _Steps) -> min(MaxDistance, StepNumber - PrevStepNumber). -endif. pre_validate_previous_solution_hash(B, PrevB, SolutionResigned, Peer) -> case B#block.previous_solution_hash == PrevB#block.hash of false -> post_block_reject_warn_and_error_dump(B, check_previous_solution_hash, Peer), ar_events:send(block, {rejected, invalid_previous_solution_hash, B#block.indep_hash, Peer}), invalid; true -> pre_validate_last_retarget(B, PrevB, SolutionResigned, Peer) end. pre_validate_last_retarget(B, PrevB, SolutionResigned, Peer) -> true = B#block.height >= ar_fork:height_2_6(), case ar_block:verify_last_retarget(B, PrevB) of true -> pre_validate_difficulty(B, PrevB, SolutionResigned, Peer); false -> post_block_reject_warn_and_error_dump(B, check_last_retarget, Peer), ar_events:send(block, {rejected, invalid_last_retarget, B#block.indep_hash, Peer}), invalid end. pre_validate_difficulty(B, PrevB, SolutionResigned, Peer) -> true = B#block.height >= ar_fork:height_2_6(), DiffValid = ar_retarget:validate_difficulty(B, PrevB), case DiffValid of true -> pre_validate_cumulative_difficulty(B, PrevB, SolutionResigned, Peer); _ -> post_block_reject_warn_and_error_dump(B, check_difficulty, Peer), ar_events:send(block, {rejected, invalid_difficulty, B#block.indep_hash, Peer}), invalid end. pre_validate_cumulative_difficulty(B, PrevB, SolutionResigned, Peer) -> true = B#block.height >= ar_fork:height_2_6(), case ar_block:verify_cumulative_diff(B, PrevB) of false -> post_block_reject_warn_and_error_dump(B, check_cumulative_difficulty, Peer), ar_events:send(block, {rejected, invalid_cumulative_difficulty, B#block.indep_hash, Peer}), invalid; true -> pre_validate_packing_difficulty(B, PrevB, SolutionResigned, Peer) end. pre_validate_packing_difficulty(B, PrevB, SolutionResigned, Peer) -> case ar_block:validate_replica_format(B#block.height, B#block.packing_difficulty, B#block.replica_format) of false -> post_block_reject_warn_and_error_dump(B, check_packing_difficulty, Peer), ar_events:send(block, {rejected, invalid_packing_difficulty, B#block.indep_hash, Peer}), invalid; true -> case SolutionResigned of true -> Ref = erlang:get(ignore_registry_ref), gen_server:cast(?MODULE, {enqueue, {B, PrevB, true, Peer, Ref}}), enqueued; false -> pre_validate_quick_pow(B, PrevB, false, Peer) end end. pre_validate_quick_pow(B, PrevB, SolutionResigned, Peer) -> #block{ hash_preimage = HashPreimage } = B, H0 = ar_block:compute_h0(B, PrevB), SolutionHash = ar_block:compute_solution_h(H0, HashPreimage), case ar_node_utils:block_passes_diff_check(SolutionHash, B) of false -> post_block_reject_warn_and_error_dump(B, check_hash_preimage, Peer), ar_events:send(block, {rejected, invalid_hash_preimage, B#block.indep_hash, Peer}), invalid; true -> Ref = erlang:get(ignore_registry_ref), gen_server:cast(?MODULE, {enqueue, {B, PrevB, SolutionResigned, Peer, Ref}}), enqueued end. pre_validate_nonce_limiter_seed_data(B, PrevB, SolutionResigned, Peer) -> Info = B#block.nonce_limiter_info, #nonce_limiter_info{ global_step_number = StepNumber, seed = Seed, next_seed = NextSeed, partition_upper_bound = PartitionUpperBound, vdf_difficulty = VDFDifficulty, next_partition_upper_bound = NextPartitionUpperBound } = Info, StepNumber = ar_block:vdf_step_number(B), ExpectedSeedData = ar_nonce_limiter:get_seed_data(StepNumber, PrevB), case ExpectedSeedData == {Seed, NextSeed, PartitionUpperBound, NextPartitionUpperBound, VDFDifficulty} of true -> pre_validate_partition_number(B, PrevB, PartitionUpperBound, SolutionResigned, Peer); false -> post_block_reject_warn_and_error_dump(B, check_nonce_limiter_seed_data, Peer), ar_events:send(block, {rejected, invalid_nonce_limiter_seed_data, B#block.indep_hash, Peer}), invalid end. pre_validate_partition_number(B, PrevB, PartitionUpperBound, SolutionResigned, Peer) -> Max = ar_node:get_max_partition_number(PartitionUpperBound), case B#block.partition_number > Max of true -> post_block_reject_warn_and_error_dump(B, check_partition_number, Peer), ar_events:send(block, {rejected, invalid_partition_number, B#block.indep_hash, Peer}), invalid; false -> pre_validate_nonce(B, PrevB, PartitionUpperBound, SolutionResigned, Peer) end. pre_validate_nonce(B, PrevB, PartitionUpperBound, SolutionResigned, Peer) -> Max = ar_block:get_max_nonce(B#block.packing_difficulty), case B#block.nonce > Max of true -> post_block_reject_warn_and_error_dump(B, check_nonce, Peer), ar_events:send(block, {rejected, invalid_nonce, B#block.indep_hash, Peer}), invalid; false -> case SolutionResigned of true -> accept_block(B, Peer, false); false -> pre_validate_pow_2_6(B, PrevB, PartitionUpperBound, Peer) end end. pre_validate_pow_2_6(B, PrevB, PartitionUpperBound, Peer) -> H0 = ar_block:compute_h0(B, PrevB), Chunk1 = (B#block.poa)#poa.chunk, {H1, Preimage1} = ar_block:compute_h1(H0, B#block.nonce, Chunk1), DiffPair = ar_difficulty:diff_pair(B), case H1 == B#block.hash andalso ar_node_utils:h1_passes_diff_check(H1, DiffPair, B#block.packing_difficulty) andalso Preimage1 == B#block.hash_preimage andalso B#block.recall_byte2 == undefined andalso B#block.chunk2_hash == undefined of true -> pre_validate_poa(B, PrevB, PartitionUpperBound, H0, H1, Peer); false -> Chunk2 = (B#block.poa2)#poa.chunk, {H2, Preimage2} = ar_block:compute_h2(H1, Chunk2, H0), case H2 == B#block.hash andalso ar_node_utils:h2_passes_diff_check(H2, DiffPair, B#block.packing_difficulty) andalso Preimage2 == B#block.hash_preimage of true -> pre_validate_poa(B, PrevB, PartitionUpperBound, H0, H1, Peer); false -> post_block_reject_warn_and_error_dump(B, check_pow, Peer), ar_events:send(block, {rejected, invalid_pow, B#block.indep_hash, Peer}), invalid end end. -ifdef(LOCALNET). %% On localnet we want to freely choose chunks, so we derive the recall range %% from the chosen chunk (recall_byte) rather than the other way around. get_precalculated_recall_range(B) -> case B#block.packing_difficulty of 0 -> {B#block.recall_byte - B#block.nonce * ?DATA_CHUNK_SIZE, case B#block.recall_byte2 of undefined -> not_set; _ -> B#block.recall_byte2 - B#block.nonce * ?DATA_CHUNK_SIZE end}; _ -> ChunkNumber = B#block.nonce div ?COMPOSITE_PACKING_SUB_CHUNK_COUNT, {B#block.recall_byte - ChunkNumber * ?DATA_CHUNK_SIZE, case B#block.recall_byte2 of undefined -> not_set; _ -> B#block.recall_byte2 - ChunkNumber * ?DATA_CHUNK_SIZE end} end. -else. get_precalculated_recall_range(_B) -> {not_set, not_set}. -endif. pre_validate_poa(B, PrevB, PartitionUpperBound, H0, H1, Peer) -> {PrecalculatedRecallRange1, PrecalculatedRecallRange2} = get_precalculated_recall_range(B), {RecallRange1Start, RecallRange2Start} = ar_block:get_recall_range(H0, B#block.partition_number, PartitionUpperBound, PrecalculatedRecallRange1, PrecalculatedRecallRange2), RecallByte1 = ar_block:get_recall_byte(RecallRange1Start, B#block.nonce, B#block.packing_difficulty), {BlockStart1, BlockEnd1, TXRoot1} = ar_block_index:get_block_bounds(RecallByte1), BlockSize1 = BlockEnd1 - BlockStart1, PackingDifficulty = B#block.packing_difficulty, Nonce = B#block.nonce, %% The packing difficulty >0 is only allowed after the 2.8 hard fork (validated earlier %% here), and the composite packing is only possible for packing difficulty >= 1. %% The new shared entropy format is supported starting from 2.9. Packing = ar_block:get_packing(PackingDifficulty, B#block.reward_addr, B#block.replica_format), SubChunkIndex = ar_block:get_sub_chunk_index(PackingDifficulty, Nonce), ArgCache = {BlockStart1, RecallByte1, TXRoot1, BlockSize1, Packing, SubChunkIndex}, case RecallByte1 == B#block.recall_byte andalso ar_poa:validate({BlockStart1, RecallByte1, TXRoot1, BlockSize1, B#block.poa, Packing, SubChunkIndex, not_set}) of error -> ?LOG_ERROR([{event, failed_to_validate_proof_of_access}, {block, ar_util:encode(B#block.indep_hash)}]), invalid; false -> post_block_reject_warn_and_error_dump(B, check_poa, Peer), ar_events:send(block, {rejected, invalid_poa, B#block.indep_hash, Peer}), invalid; {true, ChunkID} -> %% Cache the proof so that in case the miner signs additional blocks %% using the same solution, we can re-validate the potentially new %% proofs quickly, without re-validating the solution and re-unpacking %% the chunk. B2 = B#block{ poa_cache = {ArgCache, ChunkID} }, case B#block.hash == H1 of true -> pre_validate_nonce_limiter(B2, PrevB, Peer); false -> RecallByte2 = ar_block:get_recall_byte(RecallRange2Start, B#block.nonce, B#block.packing_difficulty), {BlockStart2, BlockEnd2, TXRoot2} = ar_block_index:get_block_bounds( RecallByte2), BlockSize2 = BlockEnd2 - BlockStart2, ArgCache2 = {BlockStart2, RecallByte2, TXRoot2, BlockSize2, Packing, SubChunkIndex}, case RecallByte2 == B#block.recall_byte2 andalso ar_poa:validate({BlockStart2, RecallByte2, TXRoot2, BlockSize2, B#block.poa2, Packing, SubChunkIndex, not_set}) of error -> ?LOG_ERROR([{event, failed_to_validate_proof_of_access}, {block, ar_util:encode(B#block.indep_hash)}]), invalid; false -> post_block_reject_warn_and_error_dump(B, check_poa2, Peer), ar_events:send(block, {rejected, invalid_poa2, B#block.indep_hash, Peer}), invalid; {true, Chunk2ID} -> %% Cache the proof so that in case the miner signs additional %% blocks using the same solution, we can re-validate the %% potentially new proofs quickly, without re-validating the %% solution and re-unpacking the chunk. B3 = B2#block{ poa2_cache = {ArgCache2, Chunk2ID} }, pre_validate_nonce_limiter(B3, PrevB, Peer) end end end. pre_validate_nonce_limiter(B, PrevB, Peer) -> PrevOutput = get_last_step_prev_output(B), case ar_nonce_limiter:validate_last_step_checkpoints(B, PrevB, PrevOutput) of {false, cache_mismatch, CachedSteps} -> ar_ignore_registry:add(B#block.indep_hash), post_block_reject_warn_and_error_dump(B, check_nonce_limiter_cache_mismatch, Peer, #{ prev_b => PrevB, cached_steps => CachedSteps }), ar_events:send(block, {rejected, invalid_nonce_limiter_cache_mismatch, B#block.indep_hash, Peer}), invalid; false -> post_block_reject_warn_and_error_dump(B, check_nonce_limiter, Peer), ar_events:send(block, {rejected, invalid_nonce_limiter, B#block.indep_hash, Peer}), invalid; {true, cache_match} -> accept_block(B, Peer, true); true -> accept_block(B, Peer, false) end. accept_block(B, Peer, Gossip) -> ar_ignore_registry:add(B#block.indep_hash), ar_events:send(block, {new, B, #{ source => {peer, Peer}, gossip => Gossip }}), ?LOG_INFO([{event, accepted_block}, {height, B#block.height}, {indep_hash, ar_util:encode(B#block.indep_hash)}]), ok. compute_hash(B, PrevCDiff) -> true = B#block.height >= ar_fork:height_2_6(), SignedH = ar_block:generate_signed_hash(B), case ar_block:verify_signature(SignedH, PrevCDiff, B) of false -> {error, invalid_signature}; true -> {ok, ar_block:indep_hash2(SignedH, B#block.signature)} end. post_block_reject_warn_and_error_dump(B, Step, Peer) -> post_block_reject_warn_and_error_dump(B, Step, Peer, #{}). post_block_reject_warn_and_error_dump(B, Step, Peer, ExtraData) -> {ok, Config} = arweave_config:get_env(), ID = binary_to_list(ar_util:encode(crypto:strong_rand_bytes(16))), File = filename:join(Config#config.data_dir, "invalid_block_dump_" ++ ID), file:write_file(File, term_to_binary({B, ExtraData})), post_block_reject_warn(B, Step, Peer), ?LOG_WARNING([{event, post_block_rejected}, {hash, ar_util:encode(B#block.indep_hash)}, {step, Step}, {peer, ar_util:format_peer(Peer)}, {error_dump, File}]). post_block_reject_warn(B, Step, Peer) -> ?LOG_WARNING([{event, post_block_rejected}, {hash, ar_util:encode(B#block.indep_hash)}, {step, Step}, {peer, ar_util:format_peer(Peer)}]). post_block_reject_warn(B, Step, Peer, Params) -> ?LOG_WARNING([{event, post_block_rejected}, {hash, ar_util:encode(B#block.indep_hash)}, {step, Step}, {params, Params}, {peer, ar_util:format_peer(Peer)}]). record_block_pre_validation_time(ReceiveTimestamp) -> TimeMs = timer:now_diff(erlang:timestamp(), ReceiveTimestamp) / 1000, prometheus_histogram:observe(block_pre_validation_time, TimeMs). priority(B, Peer) -> {B#block.height, get_peer_score(Peer)}. get_peer_score(Peer) -> get_peer_score(Peer, ar_peers:get_peers(lifetime), 0). get_peer_score(Peer, [Peer | _Peers], N) -> N; get_peer_score(Peer, [_Peer | Peers], N) -> get_peer_score(Peer, Peers, N - 1); get_peer_score(_Peer, [], N) -> N - rand:uniform(100). drop_tail(Q, Size) when Size =< ?MAX_PRE_VALIDATION_QUEUE_SIZE -> {Q, Size}; drop_tail(Q, Size) -> {{_Priority, {B, _PrevB, _SolutionResigned, _Peer, Ref}}, Q2} = gb_sets:take_smallest(Q), ar_ignore_registry:remove_ref(B#block.indep_hash, Ref), BlockSize = byte_size(term_to_binary(B)), drop_tail(Q2, Size - BlockSize). throttle_by_ip(Peer, Timestamps, ThrottleInterval) -> IP = get_ip(Peer), Now = os:system_time(millisecond), ar_util:cast_after(ThrottleInterval * 2, ?MODULE, {may_be_remove_ip_timestamp, IP}), case maps:get(IP, Timestamps, not_set) of not_set -> {true, maps:put(IP, Now, Timestamps)}; Timestamp when Timestamp < Now - ThrottleInterval -> {true, maps:put(IP, Now, Timestamps)}; _ -> false end. get_ip({A, B, C, D, _Port}) -> {A, B, C, D}. throttle_by_solution_hash(H, Timestamps, ThrottleInterval) -> Now = os:system_time(millisecond), ar_util:cast_after(ThrottleInterval * 2, ?MODULE, {may_be_remove_h_timestamp, H}), case maps:get(H, Timestamps, not_set) of not_set -> {true, maps:put(H, Now, Timestamps)}; Timestamp when Timestamp < Now - ThrottleInterval -> {true, maps:put(H, Now, Timestamps)}; _ -> false end. ================================================ FILE: apps/arweave/src/ar_block_pre_validator_sup.erl ================================================ -module(ar_block_pre_validator_sup). -behaviour(supervisor). -include_lib("arweave/include/ar_sup.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -export([start_link/0]). -export([init/1]). %%%=================================================================== %%% Public API. %%%=================================================================== start_link() -> supervisor:start_link({local, ?MODULE}, ?MODULE, []). %%%=================================================================== %%% Supervisor callbacks. %%%=================================================================== init([]) -> Children = [?CHILD(ar_block_pre_validator, worker)], {ok, {{one_for_one, 5, 10}, Children}}. ================================================ FILE: apps/arweave/src/ar_block_propagation_worker.erl ================================================ -module(ar_block_propagation_worker). -behaviour(gen_server). -export([start_link/1]). -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2]). -include_lib("arweave/include/ar.hrl"). -record(state, {}). %%%=================================================================== %%% Public interface. %%%=================================================================== start_link(Name) -> gen_server:start_link({local, Name}, ?MODULE, [], []). %%%=================================================================== %%% gen_server callbacks. %%%=================================================================== init([]) -> {ok, #state{}}. handle_call(Request, _From, State) -> ?LOG_WARNING([{event, unhandled_call}, {module, ?MODULE}, {request, Request}]), {reply, ok, State}. handle_cast({send_block, SendFun, RetryCount, From}, State) -> case SendFun() of {ok, {{<<"412">>, _}, _, _, _, _}} when RetryCount > 0 -> ar_util:cast_after(2000, self(), {send_block, SendFun, RetryCount - 1, From}), {noreply, State}; _ -> From ! {worker_sent_block, self()}, {noreply, State} end; handle_cast({send_block2, Peer, SendAnnouncementFun, SendFun, RetryCount, From}, State) -> case SendAnnouncementFun() of {ok, {{<<"412">>, _}, _, _, _, _}} when RetryCount > 0 -> ar_util:cast_after(2000, self(), {send_block2, Peer, SendAnnouncementFun, SendFun, RetryCount - 1, From}); {ok, {{<<"200">>, _}, _, Body, _, _}} -> case catch ar_serialize:binary_to_block_announcement_response(Body) of {'EXIT', Reason} -> ?LOG_INFO([{event, send_announcement_response}, {peer, ar_util:format_peer(Peer)}, {exit, Reason}]), ar_peers:issue_warning(Peer, block_announcement, Reason), From ! {worker_sent_block, self()}; {error, Reason} -> ?LOG_INFO([{event, send_announcement_response}, {peer, ar_util:format_peer(Peer)}, {error, Reason}]), ar_peers:issue_warning(Peer, block_announcement, Reason), From ! {worker_sent_block, self()}; {ok, #block_announcement_response{ missing_tx_indices = L, missing_chunk = MissingChunk, missing_chunk2 = MissingChunk2 }} -> case SendFun(MissingChunk, MissingChunk2, L) of {ok, {{<<"418">>, _}, _, Bin, _, _}} when RetryCount > 0 -> case parse_txids(Bin) of error -> ok; {ok, TXIDs} -> SendFun(MissingChunk, MissingChunk2, TXIDs) end; {ok, {{<<"419">>, _}, _, _, _, _}} when RetryCount > 0 -> SendFun(true, true, L); _ -> ok end, From ! {worker_sent_block, self()} end; _ -> %% 208 (the peer has already received this block) or %% an unexpected response. From ! {worker_sent_block, self()} end, {noreply, State}; handle_cast(Msg, State) -> ?LOG_WARNING([{event, unhandled_cast}, {module, ?MODULE}, {message, Msg}]), {noreply, State}. handle_info({gun_down, _PID, http, closed, _, _}, State) -> {noreply, State}; handle_info(Info, State) -> ?LOG_WARNING([{event, unhandled_info}, {module, ?MODULE}, {info, Info}]), {noreply, State}. terminate(Reason, _State) -> ?LOG_INFO([{event, terminate}, {module, ?MODULE}, {reason, Reason}]), ok. %%%=================================================================== %%% Internal functions %%%=================================================================== parse_txids(<< TXID:32/binary, Rest/binary >>) -> case parse_txids(Rest) of error -> error; {ok, TXIDs} -> {ok, [TXID | TXIDs]} end; parse_txids(<<>>) -> {ok, []}. ================================================ FILE: apps/arweave/src/ar_block_time_history.erl ================================================ -module(ar_block_time_history). -export([history_length/0, has_history/1, get_history/1, get_history_from_blocks/2, set_history/2, get_hashes/1, sum_history/1, compute_block_interval/1, validate_hashes/2, hash/1, update_history/2]). -include_lib("arweave/include/ar.hrl"). -ifdef(AR_TEST). -define(BLOCK_TIME_HISTORY_BLOCKS, 3). -else. -ifndef(BLOCK_TIME_HISTORY_BLOCKS). -define(BLOCK_TIME_HISTORY_BLOCKS, (30 * 24 * 30)). -endif. -endif. history_length() -> ?BLOCK_TIME_HISTORY_BLOCKS. has_history(Height) -> Height - history_length() > ar_fork:height_2_7(). get_history(B) -> lists:sublist(B#block.block_time_history, history_length()). get_history_from_blocks([], _PrevB) -> []; get_history_from_blocks([B | Blocks], PrevB) -> case B#block.height >= ar_fork:height_2_7() of false -> get_history_from_blocks(Blocks, B); true -> [{B#block.indep_hash, get_history_element(B, PrevB)} | get_history_from_blocks(Blocks, B)] end. set_history([], _History) -> []; set_history(Blocks, []) -> Blocks; set_history([B | Blocks], History) -> [B#block{ block_time_history = History } | set_history(Blocks, tl(History))]. get_hashes(Blocks) -> TipB = hd(Blocks), Len = min(TipB#block.height - ar_fork:height_2_7() + 1, ar_block:get_consensus_window_size()), [B#block.block_time_history_hash || B <- lists:sublist(Blocks, Len)]. sum_history(B) -> {IntervalTotal, VDFIntervalTotal, OneChunkCount, TwoChunkCount} = lists:foldl( fun({BlockInterval, VDFInterval, ChunkCount}, {Acc1, Acc2, Acc3, Acc4}) -> { Acc1 + BlockInterval, Acc2 + VDFInterval, case ChunkCount of 1 -> Acc3 + 1; _ -> Acc3 end, case ChunkCount of 1 -> Acc4; _ -> Acc4 + 1 end } end, {0, 0, 0, 0}, get_history(B) ), {IntervalTotal, VDFIntervalTotal, OneChunkCount, TwoChunkCount}. compute_block_interval(B) -> Height = B#block.height + 1, case has_history(Height) of true -> IntervalTotal = lists:foldl( fun({BlockInterval, _VDFInterval, _ChunkCount}, Acc) -> Acc + BlockInterval end, 0, get_history(B) ), IntervalTotal div history_length(); false -> 120 end. validate_hashes(_History, []) -> true; validate_hashes(History, [H | Hashes]) -> case validate_hash(H, History) of true -> validate_hashes(tl(History), Hashes); false -> false end. validate_hash(H, History) -> H == hash(History). hash(History) -> History2 = lists:sublist(History, history_length()), hash(History2, [ar_serialize:encode_int(length(History2), 8)]). hash([], IOList) -> crypto:hash(sha256, iolist_to_binary(IOList)); hash([{BlockInterval, VDFInterval, ChunkCount} | History], IOList) -> BlockIntervalBin = ar_serialize:encode_int(BlockInterval, 8), VDFIntervalBin = ar_serialize:encode_int(VDFInterval, 8), ChunkCountBin = ar_serialize:encode_int(ChunkCount, 8), hash(History, [BlockIntervalBin, VDFIntervalBin, ChunkCountBin | IOList]). update_history(B, PrevB) -> case B#block.height >= ar_fork:height_2_7() of false -> PrevB#block.block_time_history; true -> [get_history_element(B, PrevB) | PrevB#block.block_time_history] end. get_history_element(B, PrevB) -> BlockInterval = max(1, B#block.timestamp - PrevB#block.timestamp), VDFInterval = ar_block:vdf_step_number(B) - ar_block:vdf_step_number(PrevB), ChunkCount = case B#block.recall_byte2 of undefined -> 1; _ -> 2 end, {BlockInterval, VDFInterval, ChunkCount}. ================================================ FILE: apps/arweave/src/ar_bridge.erl ================================================ %% This Source Code Form is subject to the terms of the GNU General %% Public License, v. 2.0. If a copy of the GPLv2 was not distributed %% with this file, You can obtain one at %% https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html %%% @doc The module gossips blocks to peers. -module(ar_bridge). -behaviour(gen_server). -export([start_link/2, start_gossip/0, stop_gossip/0]). -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2]). -export([block_propagation_parallelization/0]). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -record(state, { block_propagation_queue = gb_sets:new(), workers, gossip = true }). %%%=================================================================== %%% API %%%=================================================================== block_propagation_parallelization() -> ?BLOCK_PROPAGATION_PARALLELIZATION. %%-------------------------------------------------------------------- %% @doc %% Starts the server %% %% @spec start_link() -> {ok, Pid} | ignore | {error, Error} %% @end %%-------------------------------------------------------------------- start_link(Name, Workers) -> gen_server:start_link({local, Name}, ?MODULE, Workers, []). start_gossip() -> gen_server:call(?MODULE, start_gossip). stop_gossip() -> gen_server:call(?MODULE, stop_gossip). %%% gen_server callbacks %%%=================================================================== %%-------------------------------------------------------------------- %% @private %% @doc %% Initializes the server %% %% @spec init(Args) -> {ok, State} | %% {ok, State, Timeout} | %% ignore | %% {stop, Reason} %% @end %%-------------------------------------------------------------------- init(Workers) -> ar_events:subscribe(block), WorkerMap = lists:foldl(fun(W, Acc) -> maps:put(W, free, Acc) end, #{}, Workers), State = #state{ workers = WorkerMap }, {ok, State}. %%-------------------------------------------------------------------- %% @private %% @doc %% Handling call messages %% %% @spec handle_call(Request, From, State) -> %% {reply, Reply, State} | %% {reply, Reply, State, Timeout} | %% {noreply, State} | %% {noreply, State, Timeout} | %% {stop, Reason, Reply, State} | %% {stop, Reason, State} %% @end %%-------------------------------------------------------------------- handle_call(start_gossip, _From, State) -> {reply, ok, State#state{ gossip = true }}; handle_call(stop_gossip, _From, State) -> {reply, ok, State#state{ gossip = false, block_propagation_queue = gb_sets:new() }}; handle_call(Request, _From, State) -> ?LOG_WARNING("unhandled call: ~p", [Request]), {reply, ok, State}. %%-------------------------------------------------------------------- %% @private %% @doc %% Handling cast messages %% %% @spec handle_cast(Msg, State) -> {noreply, State} | %% {noreply, State, Timeout} | %% {stop, Reason, State} %% @end %%-------------------------------------------------------------------- handle_cast({may_be_send_block, W}, State) -> #state{ workers = Workers, block_propagation_queue = Q } = State, case dequeue(Q) of empty -> {noreply, State}; {{_Priority, Peer, BlockData}, Q2} -> case maps:get(W, Workers) of free -> send_to_worker(Peer, BlockData, W), {noreply, State#state{ block_propagation_queue = Q2, workers = maps:put(W, busy, Workers) }}; busy -> {noreply, State} end end; handle_cast(Msg, State) -> ?LOG_WARNING([{event, unhandled_cast}, {module, ?MODULE}, {message, Msg}]), {noreply, State}. %%-------------------------------------------------------------------- %% @private %% @doc %% Handling all non call/cast messages %% %% @spec handle_info(Info, State) -> {noreply, State} | %% {noreply, State, Timeout} | %% {stop, Reason, State} %% @end %%-------------------------------------------------------------------- handle_info({event, block, {new, _B, #{ gossip := false }}}, State) -> {noreply, State}; handle_info({event, block, {new, _B, _}}, State = #state{ gossip = false }) -> {noreply, State}; handle_info({event, block, {new, B, _}}, State) -> #state{ block_propagation_queue = Q, workers = Workers } = State, case ar_block_cache:get(block_cache, B#block.previous_block) of not_found -> %% The cache should have been just pruned and this block is old. {noreply, State}; _ -> {ok, Config} = arweave_config:get_env(), TrustedPeers = ar_peers:get_trusted_peers(), SpecialPeers = Config#config.block_gossip_peers, Peers = ((SpecialPeers ++ ar_peers:get_peers(current)) -- TrustedPeers) ++ TrustedPeers, JSON = case B#block.height >= ar_fork:height_2_6() of true -> none; false -> block_to_json(B) end, Q2 = enqueue_block(Peers, B#block.height, {JSON, B}, Q), [gen_server:cast(?MODULE, {may_be_send_block, W}) || W <- maps:keys(Workers)], {noreply, State#state{ block_propagation_queue = Q2 }} end; handle_info({event, block, _}, State) -> {noreply, State}; handle_info({worker_sent_block, W}, #state{ workers = Workers, block_propagation_queue = Q } = State) -> case dequeue(Q) of empty -> {noreply, State#state{ workers = maps:put(W, free, Workers) }}; {{_Priority, Peer, BlockData}, Q2} -> send_to_worker(Peer, BlockData, W), {noreply, State#state{ block_propagation_queue = Q2, workers = maps:put(W, busy, Workers) }} end; handle_info(Info, State) -> ?LOG_WARNING([{event, unhandled_info}, {module, ?MODULE}, {info, Info}]), {noreply, State}. %%-------------------------------------------------------------------- %% @private %% @doc %% This function is called by a gen_server when it is about to %% terminate. It should be the opposite of Module:init/1 and do any %% necessary cleaning up. When it returns, the gen_server terminates %% with Reason. The return value is ignored. %% %% @spec terminate(Reason, State) -> void() %% @end %%-------------------------------------------------------------------- terminate(_Reason, _State) -> ?LOG_INFO([{event, ar_bridge_terminated}, {module, ?MODULE}]), ok. %%%=================================================================== %%% Internal functions %%%=================================================================== enqueue_block(Peers, Height, BlockData, Q) -> enqueue_block(Peers, Height, BlockData, Q, 0). enqueue_block([], _Height, _BlockData, Q, _N) -> Q; enqueue_block([Peer | Peers], Height, BlockData, Q, N) -> Priority = {N, Height}, enqueue_block(Peers, Height, BlockData, gb_sets:add_element({Priority, Peer, BlockData}, Q), N + 1). dequeue(Q) -> case gb_sets:is_empty(Q) of true -> empty; false -> gb_sets:take_smallest(Q) end. send_to_worker(Peer, {JSON, B}, W) -> #block{ height = Height, indep_hash = H, previous_block = PrevH, txs = TXs, hash = SolutionH } = B, Release = ar_peers:get_peer_release(Peer), Fork_2_6 = ar_fork:height_2_6(), SolutionH2 = case Height >= ar_fork:height_2_6() of true -> SolutionH; _ -> undefined end, case Release >= 52 orelse Height >= Fork_2_6 of true -> SendAnnouncementFun = fun() -> Announcement = #block_announcement{ indep_hash = H, previous_block = PrevH, recall_byte = B#block.recall_byte, recall_byte2 = B#block.recall_byte2, solution_hash = SolutionH2, tx_prefixes = [ar_node_worker:tx_id_prefix(ID) || #tx{ id = ID } <- TXs] }, ar_http_iface_client:send_block_announcement(Peer, Announcement) end, SendFun = fun(MissingChunk, MissingChunk2, MissingTXs) -> %% Some transactions might be absent from our mempool. We still gossip %% this block further and search for the missing transactions afterwads %% (the process is initiated by ar_node_worker). We are gradually moving %% to the new process where blocks are sent over POST /block2 along with %% all the missing transactions specified in the preceding %% POST /block_announcement reply. Once the network adopts the new release, %% we will turn off POST /block and remove the missing transactions search %% in ar_node_worker. case determine_included_transactions(TXs, MissingTXs) of missing -> case Height >= ar_fork:height_2_6() of true -> %% POST /block is not supported after 2.6. %% The recipient would have to download this block %% along with its transactions via ar_poller (which %% we made trustless in the 2.6 release). ok; false -> send_and_log(Peer, H, Height, json, JSON, n) end; TXs2 -> PoA = case MissingChunk of true -> B#block.poa; false -> (B#block.poa)#poa{ chunk = <<>> } end, PoA2 = case MissingChunk2 of false -> (B#block.poa2)#poa{ chunk = <<>> }; _ -> B#block.poa2 end, Bin = ar_serialize:block_to_binary(B#block{ txs = TXs2, poa = PoA, poa2 = PoA2 }), send_and_log(Peer, H, Height, binary, Bin, B#block.recall_byte) end end, gen_server:cast(W, {send_block2, Peer, SendAnnouncementFun, SendFun, 1, self()}); false -> SendFun = fun() -> send_and_log(Peer, H, Height, json, JSON, n) end, gen_server:cast(W, {send_block, SendFun, 1, self()}) end. send_and_log(Peer, H, Height, Format, Bin, RecallByte) -> {ok, Config} = arweave_config:get_env(), Reply = case Format of json -> ar_http_iface_client:send_block_json(Peer, H, Bin); binary -> ar_http_iface_client:send_block_binary(Peer, H, Bin, RecallByte) end, case lists:member(Peer, Config#config.block_gossip_peers) of true -> ?LOG_INFO([{event, sent_block_to_block_gossip_peer}, {format, Format}, {height, Height}, {block, ar_util:encode(H)}, {peer, ar_util:format_peer(Peer)}, {reply, ar_metrics:get_status_class(Reply)}]); false -> ok end. block_to_json(B) -> BDS = ar_block:generate_block_data_segment(B), {BlockProps} = ar_serialize:block_to_json_struct(B), PostProps = [ {<<"new_block">>, {BlockProps}}, %% Add the P2P port field to be backwards compatible with nodes %% running the old version of the P2P port feature. {<<"port">>, ?DEFAULT_HTTP_IFACE_PORT}, {<<"block_data_segment">>, ar_util:encode(BDS)} ], ar_serialize:jsonify({PostProps}). %% @doc Return the list of transactions to gossip or 'missing'. TXs is a list of possibly %% both tx records and transaction identifiers - whatever is found in the gossiped block. %% MissingTXs is a list of possibly both 0-based indices and tx identifiers. The items %% in the new list are in the same order they occur in TXs. Identifiers are simply placed %% as-is in the new list. The tx records might be converted to their identifiers (to avoid %% sending the entire transactions to peers who already know them) if either their 0-based %% indices or identifiers are found in MissingTXs. Elements in MissingTXs are assumed sorted %% in the order of their appearance in TXs. Return 'missing' if TXs contains an identifier ( %% not a tx record) which (or its index) is found in MissingTXs. determine_included_transactions(TXs, MissingTXs) -> determine_included_transactions(TXs, MissingTXs, [], 0). determine_included_transactions([], _MissingTXs, Included, _N) -> lists:reverse(Included); determine_included_transactions([TXIDOrTX | TXs], [], Included, N) -> determine_included_transactions(TXs, [], [tx_id(TXIDOrTX) | Included], N); determine_included_transactions([TXIDOrTX | TXs], [TXIDOrIndex | MissingTXs], Included, N) -> TXID = tx_id(TXIDOrTX), case TXIDOrIndex == N orelse TXIDOrIndex == TXID of true -> case TXID == TXIDOrTX of true -> missing; false -> determine_included_transactions(TXs, MissingTXs, [strip_v2_data(TXIDOrTX) | Included], N + 1) end; false -> determine_included_transactions(TXs, [TXIDOrIndex | MissingTXs], [TXID | Included], N + 1) end. tx_id(#tx{ id = TXID }) -> TXID; tx_id(TXID) -> TXID. strip_v2_data(#tx{ format = 2 } = TX) -> TX#tx{ data = <<>> }; strip_v2_data(TX) -> TX. ================================================ FILE: apps/arweave/src/ar_bridge_sup.erl ================================================ -module(ar_bridge_sup). -behaviour(supervisor). -export([start_link/0]). -export([init/1]). -include_lib("arweave/include/ar_sup.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). %%%=================================================================== %%% Public interface. %%%=================================================================== start_link() -> supervisor:start_link({local, ?MODULE}, ?MODULE, []). %% =================================================================== %% Supervisor callbacks. %% =================================================================== init([]) -> Children = lists:map( fun(Num) -> Name = list_to_atom("ar_block_propagation_worker" ++ integer_to_list(Num)), {Name, {ar_block_propagation_worker, start_link, [Name]}, permanent, ?SHUTDOWN_TIMEOUT, worker, [ar_block_propagation_worker]} end, lists:seq(1, ar_bridge:block_propagation_parallelization()) ), Workers = [element(1, El) || El <- Children], Children2 = [?CHILD_WITH_ARGS(ar_bridge, worker, ar_bridge, [ar_bridge, Workers]) | Children], {ok, {{one_for_one, 5, 10}, Children2}}. ================================================ FILE: apps/arweave/src/ar_chain_stats.erl ================================================ -module(ar_chain_stats). -behaviour(gen_server). -include("ar.hrl"). -include("ar_chain_stats.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("eunit/include/eunit.hrl"). -export([log_fork/2, log_fork/3, get_forks/1]). -export([start_link/0, init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2]). %%%=================================================================== %%% Public interface. %%%=================================================================== start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). log_fork(Orphans, ForkRootB) -> log_fork(Orphans, ForkRootB, os:system_time(millisecond)). log_fork([], _ForkRootB, _ForkTime) -> %% No fork to log ok; log_fork(Orphans, ForkRootB, ForkTime) -> gen_server:cast(?MODULE, {log_fork, Orphans, ForkRootB, ForkTime}). %% @doc Returns all forks that have been logged since the given start time %% (system time in seconds) get_forks(StartTime) -> case catch gen_server:call(?MODULE, {get_forks, StartTime}) of {'EXIT', {timeout, {gen_server, call, _}}} -> {error, timeout}; Reply -> Reply end. %%%=================================================================== %%% Generic server callbacks. %%%=================================================================== init([]) -> %% Trap exit to avoid corrupting any open files on quit.. process_flag(trap_exit, true), {ok, Config} = arweave_config:get_env(), ok = ar_kv:open(#{ path => filename:join([Config#config.data_dir, ?ROCKS_DB_DIR, "forks_db"]), name => forks_db}), {ok, #{}}. handle_call({get_forks, StartTime}, _From, State) -> {ok, ForksMap} = ar_kv:get_range(forks_db, <<(StartTime * 1000):64>>), %% Sort forks by their key (the timestamp when they were detected) - sorts in %% chronological / ascending order (i.e. first element of the list is the oldest fork) SortedForks = lists:sort(maps:to_list(ForksMap)), Forks = [binary_to_term(Fork, [safe]) || {_Timestamp, Fork} <- SortedForks], {reply, Forks, State}; handle_call(_Request, _From, State) -> {reply, ok, State}. handle_cast({log_fork, Orphans, ForkRootB, ForkTime}, State) -> do_log_fork(Orphans, ForkRootB, ForkTime), {noreply, State}; handle_cast(_Msg, State) -> {noreply, State}. handle_info(_Info, State) -> {noreply, State}. terminate(Reason, _state) -> ?LOG_INFO([{module, ?MODULE}, {pid, self()}, {callback, terminate},{reason, Reason}]), ok. %%%=================================================================== %%% Private functions. %%%=================================================================== do_log_fork(Orphans, ForkRootB, ForkTime) -> Fork = create_fork(Orphans, ForkRootB, ForkTime), ar_kv:put(forks_db, <>, term_to_binary(Fork)), record_fork_depth(Orphans, ForkRootB), ok. create_fork(Orphans, ForkRootB, ForkTime) -> ForkID = crypto:hash(sha256, list_to_binary(Orphans)), #fork{ id = ForkID, height = ForkRootB#block.height + 1, timestamp = ForkTime, block_ids = Orphans }. record_fork_depth(Orphans, ForkRootB) -> record_fork_depth(Orphans, ForkRootB, 0). record_fork_depth([], _ForkRootB, 0) -> ok; record_fork_depth([], _ForkRootB, N) -> ok; record_fork_depth([H | Orphans], ForkRootB, N) -> SolutionHashInfo = case ar_block_cache:get(block_cache, H) of not_found -> %% Should never happen, by construction. ?LOG_ERROR([{event, block_not_found_in_cache}, {h, ar_util:encode(H)}]), []; #block{ hash = SolutionH } -> [{solution_hash, ar_util:encode(SolutionH)}] end, LogInfo = [ {event, orphaning_block}, {block, ar_util:encode(H)}, {depth, N}, {fork_root, ar_util:encode(ForkRootB#block.indep_hash)}, {fork_height, ForkRootB#block.height + 1} | SolutionHashInfo], ?LOG_INFO(LogInfo), record_fork_depth(Orphans, ForkRootB, N + 1). %%%=================================================================== %%% Tests. %%%=================================================================== forks_test_() -> [ {timeout, 30, fun test_forks/0} ]. test_forks() -> clear_forks_db(), StartTimeSeconds = 60, ForkRootB1 = #block{ indep_hash = <<"1">>, height = 1 }, ForkRootB2= #block{ indep_hash = <<"2">>, height = 2 }, Orphans1 = [<<"a">>], Time1 = (StartTimeSeconds * 1000) + 5, log_fork(Orphans1, ForkRootB1, Time1), ExpectedFork1 = #fork{ id = crypto:hash(sha256, list_to_binary(Orphans1)), height = 2, block_ids = Orphans1, timestamp = Time1 }, assert_forks_equal([ExpectedFork1], get_forks(StartTimeSeconds)), Orphans2 = [<<"b">>, <<"c">>], Time2 = (StartTimeSeconds * 1000) + 10, log_fork(Orphans2, ForkRootB1, Time2), ExpectedFork2 = #fork{ id = crypto:hash(sha256, list_to_binary(Orphans2)), height = 2, block_ids = Orphans2, timestamp = Time2 }, assert_forks_equal([ExpectedFork1, ExpectedFork2], get_forks(StartTimeSeconds)), Orphans3 = [<<"b">>, <<"c">>, <<"d">>], Time3 = (StartTimeSeconds * 1000) + 15, log_fork(Orphans3, ForkRootB1, Time3), ExpectedFork3 = #fork{ id = crypto:hash(sha256, list_to_binary(Orphans3)), height = 2, block_ids = Orphans3, timestamp = Time3 }, assert_forks_equal( [ExpectedFork1, ExpectedFork2, ExpectedFork3], get_forks(StartTimeSeconds)), Orphans4 = [<<"e">>, <<"f">>, <<"g">>], Time4 = (StartTimeSeconds * 1000) + 1000, log_fork(Orphans4, ForkRootB2, Time4), ExpectedFork4 = #fork{ id = crypto:hash(sha256, list_to_binary(Orphans4)), height = 3, block_ids = Orphans4, timestamp = Time4 }, assert_forks_equal( [ExpectedFork1, ExpectedFork2, ExpectedFork3, ExpectedFork4], get_forks(StartTimeSeconds)), %% Same fork seen again - not sure this is possible, but since we're just tracking %% forks based on when they occur, it should be handled. Time5 = (StartTimeSeconds * 1000) + 1005, log_fork(Orphans3, ForkRootB1, Time5), ExpectedFork5 = ExpectedFork3#fork{timestamp = Time5}, assert_forks_equal( [ExpectedFork1, ExpectedFork2, ExpectedFork3, ExpectedFork4, ExpectedFork5], get_forks(StartTimeSeconds)), %% If the fork is empty, ignore it. Time6 = (StartTimeSeconds * 1000) + 1010, log_fork([], ForkRootB2, Time6), assert_forks_equal( [ExpectedFork1, ExpectedFork2, ExpectedFork3, ExpectedFork4, ExpectedFork5], get_forks(StartTimeSeconds)), %% Check that the cutoff time is handled correctly assert_forks_equal( [ExpectedFork4, ExpectedFork5], get_forks(StartTimeSeconds+1)), ok. assert_forks_equal(ExpectedForks, ActualForks) -> ?assertEqual(ExpectedForks, ActualForks). clear_forks_db() -> Time = os:system_time(millisecond), ar_kv:delete_range(forks_db, integer_to_binary(0), integer_to_binary(Time)). ================================================ FILE: apps/arweave/src/ar_chunk_copy.erl ================================================ %%% @doc The module maintains a queue of processes fetching data from the network %%% and from the local storage modules. -module(ar_chunk_copy). -behaviour(gen_server). -export([start_link/1, register_workers/0, read_range/4]). -export([init/1, handle_cast/2, handle_call/3, handle_info/2, terminate/2]). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave/include/ar_sup.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("eunit/include/eunit.hrl"). -define(READ_RANGE_CHUNKS, 400). -define(MAX_ACTIVE_TASKS, 10). -define(MAX_QUEUED_TASKS, 50). -record(worker_tasks, { worker, task_queue = queue:new(), active_count = 0 }). -record(state, { workers = #{} }). %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Start the server. start_link(WorkerMap) -> gen_server:start_link({local, ?MODULE}, ?MODULE, WorkerMap, []). register_workers() -> {Workers, WorkerMap} = register_read_workers(), ChunkCopy = ?CHILD_WITH_ARGS(ar_chunk_copy, worker, ar_chunk_copy, [WorkerMap]), Workers ++ [ChunkCopy]. register_read_workers() -> {ok, Config} = arweave_config:get_env(), StoreIDs = [ ar_storage_module:id(StorageModule) || StorageModule <- Config#config.storage_modules ] ++ [?DEFAULT_MODULE], {Workers, WorkerMap} = lists:foldl( fun(StoreID, {AccWorkers, AccWorkerMap}) -> Label = ar_storage_module:label(StoreID), Name = list_to_atom("ar_data_sync_worker_" ++ Label), Worker = ?CHILD_WITH_ARGS(ar_data_sync_worker, worker, Name, [Name, read]), {[ Worker | AccWorkers], AccWorkerMap#{StoreID => Name}} end, {[], #{}}, StoreIDs ), {Workers, WorkerMap}. %% @doc Returns true if we can accept new tasks. Will always return false if syncing is %% disabled (i.e. sync_jobs = 0). ready_for_work(StoreID) -> try gen_server:call(?MODULE, {ready_for_work, StoreID}, 1000) catch exit:{timeout,_} -> false end. read_range(Start, End, OriginStoreID, TargetStoreID) -> case ready_for_work(OriginStoreID) of true -> Args = {Start, End, OriginStoreID, TargetStoreID}, gen_server:cast(?MODULE, {read_range, Args}), true; false -> false end. %%%=================================================================== %%% Generic server callbacks. %%%=================================================================== init(WorkerMap) -> ?LOG_DEBUG([{event, init}, {module, ?MODULE}, {worker_map, WorkerMap}]), Workers = maps:fold( fun(StoreID, Name, Acc) -> Acc#{StoreID => #worker_tasks{worker = Name}} end, #{}, WorkerMap ), ar_util:cast_after(1000, self(), process_queues), {ok, #state{ workers = Workers }}. handle_call({ready_for_work, StoreID}, _From, State) -> {reply, do_ready_for_work(StoreID, State), State}; handle_call(Request, _From, State) -> ?LOG_WARNING([{event, unhandled_call}, {module, ?MODULE}, {request, Request}]), {reply, ok, State}. handle_cast({read_range, Args}, State) -> {noreply, enqueue_read_range(Args, State)}; handle_cast(process_queues, State) -> ar_util:cast_after(1000, self(), process_queues), {noreply, process_queues(State)}; handle_cast({task_completed, {read_range, {Worker, _, Args}}}, State) -> {noreply, task_completed(Args, State)}; handle_cast(Cast, State) -> ?LOG_WARNING([{event, unhandled_cast}, {module, ?MODULE}, {cast, Cast}]), {noreply, State}. handle_info(Message, State) -> ?LOG_WARNING([{event, unhandled_info}, {module, ?MODULE}, {message, Message}]), {noreply, State}. terminate(Reason, _State) -> ?LOG_DEBUG([{event, terminate}, {module, ?MODULE}, {reason, io_lib:format("~p", [Reason])}]), ok. %%%=================================================================== %%% Private functions. %%%=================================================================== do_ready_for_work(StoreID, State) -> Worker = maps:get(StoreID, State#state.workers, undefined), case Worker of undefined -> ?LOG_ERROR([{event, worker_not_found}, {module, ?MODULE}, {call, ready_for_work}, {store_id, StoreID}]), false; _ -> queue:len(Worker#worker_tasks.task_queue) < ?MAX_QUEUED_TASKS end. enqueue_read_range(Args, State) -> {_Start, _End, OriginStoreID, _TargetStoreID} = Args, Worker = maps:get(OriginStoreID, State#state.workers, undefined), case Worker of undefined -> ?LOG_ERROR([{event, worker_not_found}, {module, ?MODULE}, {call, enqueue_read_range}, {store_id, OriginStoreID}]), State; _ -> Worker2 = do_enqueue_read_range(Args, Worker), State#state{ workers = maps:put(OriginStoreID, Worker2, State#state.workers) } end. do_enqueue_read_range(Args, Worker) -> {Start, End, OriginStoreID, TargetStoreID} = Args, End2 = min(Start + (?READ_RANGE_CHUNKS * ?DATA_CHUNK_SIZE), End), Args2 = {Start, End2, OriginStoreID, TargetStoreID}, TaskQueue = queue:in(Args2, Worker#worker_tasks.task_queue), Worker2 = Worker#worker_tasks{task_queue = TaskQueue}, case End2 == End of true -> Worker2; false -> Args3 = {End2, End, OriginStoreID, TargetStoreID}, do_enqueue_read_range(Args3, Worker2) end. process_queues(State) -> Workers = State#state.workers, UpdatedWorkers = maps:map( fun(_Key, Worker) -> process_queue(Worker) end, Workers ), State#state{workers = UpdatedWorkers}. process_queue(Worker) -> case Worker#worker_tasks.active_count < ?MAX_ACTIVE_TASKS of true -> case queue:out(Worker#worker_tasks.task_queue) of {empty, _} -> Worker; {{value, Args}, Q2}-> gen_server:cast(Worker#worker_tasks.worker, {read_range, Args}), Worker2 = Worker#worker_tasks{ task_queue = Q2, active_count = Worker#worker_tasks.active_count + 1 }, process_queue(Worker2) end; false -> Worker end. task_completed(Args, State) -> {_Start, _End, OriginStoreID, _TargetStoreID} = Args, Worker = maps:get(OriginStoreID, State#state.workers, undefined), case Worker of undefined -> ?LOG_ERROR([{event, worker_not_found}, {module, ?MODULE}, {call, task_completed}, {store_id, OriginStoreID}]), State; _ -> ActiveCount = Worker#worker_tasks.active_count - 1, Worker2 = Worker#worker_tasks{active_count = ActiveCount}, Worker3 = process_queue(Worker2), State2 = State#state{ workers = maps:put(OriginStoreID, Worker3, State#state.workers) }, State2 end. %%%=================================================================== %%% Tests. Included in the module so they can reference private %%% functions. %%%=================================================================== helpers_test_() -> [ {timeout, 30, fun test_ready_for_work/0}, {timeout, 30, fun test_enqueue_read_range/0}, {timeout, 30, fun test_process_queue/0}, {timeout, 30, fun test_register_workers/0} ]. test_ready_for_work() -> State = #state{ workers = #{ "store1" => #worker_tasks{ task_queue = queue:from_list(lists:seq(1, ?MAX_QUEUED_TASKS - 1))}, "store2" => #worker_tasks{ task_queue = queue:from_list(lists:seq(1, ?MAX_QUEUED_TASKS))} } }, ?assertEqual(true, do_ready_for_work("store1", State)), ?assertEqual(false, do_ready_for_work("store2", State)). test_enqueue_read_range() -> ExpectedWorker = #worker_tasks{ task_queue = queue:from_list( [{ floor(2.5 * ?DATA_CHUNK_SIZE), floor((2.5 + ?READ_RANGE_CHUNKS) * ?DATA_CHUNK_SIZE), "store1", "store2" }, { floor((2.5 + ?READ_RANGE_CHUNKS) * ?DATA_CHUNK_SIZE), floor((2.5 + 2 * ?READ_RANGE_CHUNKS) * ?DATA_CHUNK_SIZE), "store1", "store2" }, { floor((2.5 + 2 * ?READ_RANGE_CHUNKS) * ?DATA_CHUNK_SIZE), floor((2.5 + 3 * ?READ_RANGE_CHUNKS) * ?DATA_CHUNK_SIZE), "store1", "store2" }] ) }, Worker = do_enqueue_read_range( { floor(2.5 * ?DATA_CHUNK_SIZE), floor((2.5 + 3 * ?READ_RANGE_CHUNKS) * ?DATA_CHUNK_SIZE), "store1", "store2" }, #worker_tasks{task_queue = queue:new()} ), ?assertEqual( queue:to_list(ExpectedWorker#worker_tasks.task_queue), queue:to_list(Worker#worker_tasks.task_queue)). test_process_queue() -> Worker1 = #worker_tasks{ active_count = ?MAX_ACTIVE_TASKS }, ?assertEqual(Worker1, process_queue(Worker1)), Worker2 = #worker_tasks{ active_count = ?MAX_ACTIVE_TASKS + 1 }, ?assertEqual(Worker2, process_queue(Worker2)), Worker3 = process_queue( #worker_tasks{ active_count = ?MAX_ACTIVE_TASKS - 2, task_queue = queue:from_list( [{floor(2.5 * ?DATA_CHUNK_SIZE), floor(12.5 * ?DATA_CHUNK_SIZE), "store1", "store2"}, {floor(12.5 * ?DATA_CHUNK_SIZE), floor(22.5 * ?DATA_CHUNK_SIZE), "store1", "store2"}, {floor(22.5 * ?DATA_CHUNK_SIZE), floor(30 * ?DATA_CHUNK_SIZE), "store1", "store2"}]) } ), ExpectedWorker3 = #worker_tasks{ active_count = ?MAX_ACTIVE_TASKS, task_queue = queue:from_list( [{floor(22.5 * ?DATA_CHUNK_SIZE), floor(30 * ?DATA_CHUNK_SIZE), "store1", "store2"}] ) }, ?assertEqual( ExpectedWorker3#worker_tasks.active_count, Worker3#worker_tasks.active_count), ?assertEqual( queue:to_list(ExpectedWorker3#worker_tasks.task_queue), queue:to_list(Worker3#worker_tasks.task_queue)). test_register_workers() -> {ok, Config} = arweave_config:get_env(), StoreIDs = [ ar_storage_module:id(StorageModule) || StorageModule <- Config#config.storage_modules], lists:foreach( fun(StoreID) -> ?assertEqual(true, ready_for_work(StoreID)) end, StoreIDs ++ [?DEFAULT_MODULE] ). ================================================ FILE: apps/arweave/src/ar_chunk_storage.erl ================================================ %% The blob storage optimized for fast reads. -module(ar_chunk_storage). -behaviour(gen_server). -export([start_link/2, name/1, register_workers/0, is_storage_supported/3, put/4, open_files/1, get/2, get/3, locate_chunk_on_disk/2, get_range/2, get_range/3, cut/2, delete/1, delete/2, set_entropy_complete/1, get_filepath/2, get_handle_by_filepath/1, close_file/2, close_files/1, list_files/2, run_defragmentation/0, get_position_and_relative_chunk_offset/2, get_storage_module_path/2, get_chunk_storage_path/2, get_chunk_bucket_start/1, get_chunk_bucket_end/1, get_chunk_byte_from_bucket_end/1, get_chunk_seek_offset/1, get_chunk_file_start/1, sync_record_id/1, write_chunk/4, record_chunk/5, read_offset/2]). -export([init/1, handle_cast/2, handle_call/3, handle_info/2, terminate/2]). %% Used in tests. -export([delete_chunk/2]). -include("ar.hrl"). -include("ar_sup.hrl"). -include("ar_consensus.hrl"). -include("ar_chunk_storage.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("kernel/include/file.hrl"). -record(state, { file_index, store_id, entropy_context = none, %% some data we need pass to ar_entropy_storage range_start, range_end }). %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Start the server. start_link(Name, StoreID) -> gen_server:start_link({local, Name}, ?MODULE, StoreID, []). %% @doc Return the name of the server serving the given StoreID. name(StoreID) -> list_to_atom("ar_chunk_storage_" ++ ar_storage_module:label(StoreID)). register_workers() -> {ok, Config} = arweave_config:get_env(), ConfiguredWorkers = lists:map( fun(StorageModule) -> StoreID = ar_storage_module:id(StorageModule), ChunkStorageName = ar_chunk_storage:name(StoreID), ?CHILD_WITH_ARGS(ar_chunk_storage, worker, ChunkStorageName, [ChunkStorageName, StoreID]) end, Config#config.storage_modules ), DefaultChunkStorageWorker = ?CHILD_WITH_ARGS(ar_chunk_storage, worker, ar_chunk_storage_default, [ar_chunk_storage_default, ?DEFAULT_MODULE]), RepackInPlaceWorkers = lists:map( fun({StorageModule, _Packing}) -> StoreID = ar_storage_module:id(StorageModule), %% Note: the config validation will prevent a StoreID from being used in both %% `storage_modules` and `repack_in_place_storage_modules`, so there's %% no risk of a `Name` clash with the workers spawned above. ChunkStorageName = ar_chunk_storage:name(StoreID), ?CHILD_WITH_ARGS(ar_chunk_storage, worker, ChunkStorageName, [ChunkStorageName, StoreID]) end, Config#config.repack_in_place_storage_modules ), ConfiguredWorkers ++ RepackInPlaceWorkers ++ [DefaultChunkStorageWorker]. %% @doc Return true if we can accept the chunk for storage. %% 256 KiB chunks are stored on disk in chunk_storage optimized for read speed. %% Unpacked chunks smaller than 256 KiB cannot be stored here currently, %% because the module does not keep track of the chunk sizes - all chunks %% are assumed to be 256 KiB. %% %% Put another way: %% 1. Small chunks from before the strict data split threshold are never packed and %% never mined, so we store them as unpacked chunks in the rocksdb only. %% 2. Small chunks after the strict data split threshold are: %% - stored in the rocksdb when they are unpacked %% - stored in chunk_storage as normal when they are packed -spec is_storage_supported( Offset :: non_neg_integer(), ChunkSize :: non_neg_integer(), Packing :: term() ) -> true | false. is_storage_supported(Offset, ChunkSize, Packing) -> case Offset > ar_block:strict_data_split_threshold() of true -> %% All chunks above ar_block:strict_data_split_threshold() are placed in 256 KiB %% buckets so technically can be stored in ar_chunk_storage. However, to avoid %% managing padding in ar_chunk_storage for unpacked chunks smaller than 256 KiB %% (we do not need fast random access to unpacked chunks after %% ar_block:strict_data_split_threshold() anyways), we put them to RocksDB. Packing /= unpacked orelse ChunkSize == (?DATA_CHUNK_SIZE); false -> ChunkSize == (?DATA_CHUNK_SIZE) end. %% @doc Store the chunk under the given end offset, %% bytes Offset - ?DATA_CHUNK_SIZE, Offset - ?DATA_CHUNK_SIZE + 1, .., Offset - 1. put(PaddedOffset, Chunk, Packing, StoreID) -> GenServerID = name(StoreID), case catch gen_server:call(GenServerID, {put, PaddedOffset, Chunk, Packing}, 180_000) of {'EXIT', {shutdown, {gen_server, call, _}}} -> %% Handle to avoid the large badmatch log on shutdown. {error, shutdown}; {'EXIT', {timeout, {gen_server, call, _}}} -> ?LOG_ERROR([{event, gen_server_timeout_putting_chunk}, {padded_offset, PaddedOffset}, {store_id, StoreID} ]), {error, timeout}; Reply -> Reply end. %% @doc Open all the storage files. The subsequent calls to get/1 in the %% caller process will use the opened file descriptors. open_files(StoreID) -> ets:foldl( fun ({{Key, ID}, Filepath}, _) when ID == StoreID -> case erlang:get({cfile, {Key, ID}}) of undefined -> case file:open(Filepath, [read, raw, binary]) of {ok, F} -> erlang:put({cfile, {Key, ID}}, F); _ -> ok end; _ -> ok end; (_, _) -> ok end, ok, chunk_storage_file_index ). %% @doc Return {PaddedEndOffset, Chunk} for the chunk containing the given byte. get(Byte, StoreID) -> case ar_sync_record:get_interval(Byte + 1, ar_chunk_storage, StoreID) of not_found -> not_found; {_End, IntervalStart} -> get(Byte, IntervalStart, StoreID) end. get(Byte, IntervalStart, StoreID) -> %% The synced ranges begin at IntervalStart => the chunk %% should begin at a multiple of ?DATA_CHUNK_SIZE to the right of IntervalStart. ChunkStart = Byte - (Byte - IntervalStart) rem ?DATA_CHUNK_SIZE, ChunkFileStart = get_chunk_file_start_by_start_offset(ChunkStart), case get(Byte, ChunkStart, ChunkFileStart, StoreID, 1) of [] -> not_found; [{PaddedEndOffset, Chunk}] -> {PaddedEndOffset, Chunk} end. locate_chunk_on_disk(PaddedEndOffset, StoreID) -> locate_chunk_on_disk(PaddedEndOffset, StoreID, #{}). locate_chunk_on_disk(PaddedEndOffset, StoreID, FileIndex) -> ChunkFileStart = get_chunk_file_start(PaddedEndOffset), Filepath = filepath(ChunkFileStart, FileIndex, StoreID), {Position, ChunkOffset} = get_position_and_relative_chunk_offset(ChunkFileStart, PaddedEndOffset), {ChunkFileStart, Filepath, Position, ChunkOffset}. %% @doc Return a list of {PaddedEndOffset, Chunk} pairs for the stored chunks %% inside the given range. The given interval does not have to cover every chunk %% completely - we return all chunks at the intersection with the range. get_range(Start, Size) -> get_range(Start, Size, ?DEFAULT_MODULE). %% @doc Return a list of {PaddedEndOffset, Chunk} pairs for the stored chunks %% inside the given range. The given interval does not have to cover every chunk %% completely - we return all chunks at the intersection with the range. The %% very last chunk might be outside of the interval - its start offset is %% at most Start + Size + ?DATA_CHUNK_SIZE - 1. get_range(Start, Size, StoreID) -> ?assert(Size < get_chunk_group_size()), case ar_sync_record:get_next_synced_interval(Start, infinity, ar_chunk_storage, StoreID) of {_End, IntervalStart} when Start + Size > IntervalStart -> Start2 = max(Start, IntervalStart), Size2 = Start + Size - Start2, ChunkStart = Start2 - (Start2 - IntervalStart) rem ?DATA_CHUNK_SIZE, ChunkFileStart = get_chunk_file_start_by_start_offset(ChunkStart), End = Start2 + Size2, LastChunkStart = (End - 1) - ((End - 1) - IntervalStart) rem ?DATA_CHUNK_SIZE, LastChunkFileStart = get_chunk_file_start_by_start_offset(LastChunkStart), ChunkCount = (LastChunkStart - ChunkStart) div ?DATA_CHUNK_SIZE + 1, case ChunkFileStart /= LastChunkFileStart of false -> %% All chunks are from the same chunk file. get(Start2, ChunkStart, ChunkFileStart, StoreID, ChunkCount); true -> SizeBeforeBorder = ChunkFileStart + get_chunk_group_size() - ChunkStart, ChunkCountBeforeBorder = max(SizeBeforeBorder, ?DATA_CHUNK_SIZE) div ?DATA_CHUNK_SIZE, StartAfterBorder = ChunkStart + ChunkCountBeforeBorder * ?DATA_CHUNK_SIZE, SizeAfterBorder = Size2 - ChunkCountBeforeBorder * ?DATA_CHUNK_SIZE + (Start2 - ChunkStart), get(Start2, ChunkStart, ChunkFileStart, StoreID, ChunkCountBeforeBorder) ++ get_range(StartAfterBorder, SizeAfterBorder, StoreID) end; _ -> [] end. %% @doc Close the file with the given Key. close_file(Key, StoreID) -> case erlang:erase({cfile, {Key, StoreID}}) of undefined -> ok; F -> file:close(F) end. %% @doc Close the files opened by open_files/1. close_files(StoreID) -> close_files(erlang:get_keys(), StoreID). %% @doc Soft-delete everything above the given end offset. cut(Offset, StoreID) -> ar_sync_record:cut(Offset, ar_chunk_storage, StoreID). %% @doc Remove the chunk with the given end offset. delete(Offset) -> delete(Offset, ?DEFAULT_MODULE). %% @doc Remove the chunk with the given end offset. delete(PaddedOffset, StoreID) -> GenServerID = name(StoreID), case catch gen_server:call(GenServerID, {delete, PaddedOffset}, 20000) of {'EXIT', {shutdown, {gen_server, call, _}}} -> %% Handle to avoid the large badmatch log on shutdown. {error, shutdown}; {'EXIT', {timeout, {gen_server, call, _}}} -> {error, timeout}; Reply -> Reply end. %% @doc Run defragmentation of chunk files if enabled run_defragmentation() -> {ok, Config} = arweave_config:get_env(), case Config#config.run_defragmentation of false -> ok; true -> ar:console("Defragmentation threshold: ~B bytes.~n", [Config#config.defragmentation_trigger_threshold]), DefragModules = modules_to_defrag(Config), Sizes = read_chunks_sizes(Config#config.data_dir), Files = files_to_defrag(DefragModules, Config#config.data_dir, Config#config.defragmentation_trigger_threshold, Sizes), ok = defrag_files(Files), ok = update_sizes_file(Files, #{}) end. get_storage_module_path(DataDir, ?DEFAULT_MODULE) -> DataDir; get_storage_module_path(DataDir, StoreID) -> filename:join([DataDir, "storage_modules", StoreID]). get_chunk_storage_path(DataDir, StoreID) -> filename:join([get_storage_module_path(DataDir, StoreID), ?CHUNK_DIR]). %% @doc Return the start and end offset of the bucket containing the given offset. %% A chunk bucket is a 0-based, 256-KiB wide, 256-KiB aligned range that %% ar_chunk_storage uses to index chunks. The bucket start does NOT necessarily %% match the chunk's start offset. -spec get_chunk_bucket_start(Offset :: non_neg_integer()) -> non_neg_integer(). get_chunk_bucket_start(Offset) -> PaddedEndOffset = ar_block:get_chunk_padded_offset(Offset), ar_util:floor_int(max(0, PaddedEndOffset - ?DATA_CHUNK_SIZE), ?DATA_CHUNK_SIZE). -spec get_chunk_bucket_end(Offset :: non_neg_integer()) -> non_neg_integer(). get_chunk_bucket_end(Offset) -> get_chunk_bucket_start(Offset) + ?DATA_CHUNK_SIZE. %% @doc Return the byte (>= ChunkStartOffset, < ChunkEndOffset) %% that necessarily belongs to the chunk stored in the bucket with the given bucket end %% offset. For buckets above the strict data split threshold, the byte is the first byte %% of the chunk that is mapped to the bucket. For buckets below the strict data split %% threshold, the byte is just guaranteed to belong to the chunk but is not necessarily the %% chunk's first byte. -spec get_chunk_byte_from_bucket_end(non_neg_integer()) -> non_neg_integer(). get_chunk_byte_from_bucket_end(BucketEndOffset) -> %% sanity checks BucketEndOffset = get_chunk_bucket_end(BucketEndOffset), %% end sanity checks get_chunk_seek_offset(BucketEndOffset) - 1. %% @doc Returns a byte that is guaranteed to be in the unpadded portion of the chunk %% identified by Offset. Offset can be any byte within the chunk - in either the unpadded %% part or the pad. This typically equates to the first byte of the chunk plus one. %% %% If Offset is before the ar_block:strict_data_split_threshold() we just return it because we don't %% have any information about where chunks start or end. -spec get_chunk_seek_offset(non_neg_integer()) -> non_neg_integer(). get_chunk_seek_offset(Offset) -> case Offset > ar_block:strict_data_split_threshold() of true -> ar_poa:get_padded_offset(Offset, ar_block:strict_data_split_threshold()) - (?DATA_CHUNK_SIZE) + 1; false -> Offset end. set_entropy_complete(StoreID) -> gen_server:cast(name(StoreID), entropy_complete). read_offset(PaddedOffset, StoreID) -> {_ChunkFileStart, Filepath, Position, _ChunkOffset} = ar_chunk_storage:locate_chunk_on_disk(PaddedOffset, StoreID), case file:open(Filepath, [read, raw, binary]) of {ok, F} -> Result = file:pread(F, Position, ?OFFSET_SIZE), file:close(F), Result; Error -> Error end. %%%=================================================================== %%% Generic server callbacks. %%%=================================================================== init(?DEFAULT_MODULE = StoreID) -> %% Trap exit to avoid corrupting any open files on quit.. process_flag(trap_exit, true), {ok, Config} = arweave_config:get_env(), DataDir = Config#config.data_dir, Dir = get_storage_module_path(DataDir, StoreID), ok = filelib:ensure_dir(Dir ++ "/"), ok = filelib:ensure_dir(filename:join(Dir, ?CHUNK_DIR) ++ "/"), FileIndex = read_file_index(Dir), FileIndex2 = maps:map( fun(Key, Filepath) -> Filepath2 = filename:join([DataDir, ?CHUNK_DIR, Filepath]), ets:insert(chunk_storage_file_index, {{Key, StoreID}, Filepath2}), Filepath2 end, FileIndex ), warn_custom_chunk_group_size(StoreID), {ok, #state{ file_index = FileIndex2, store_id = StoreID }}; init(StoreID) -> %% Trap exit to avoid corrupting any open files on quit.. process_flag(trap_exit, true), {ok, Config} = arweave_config:get_env(), DataDir = Config#config.data_dir, Dir = get_storage_module_path(DataDir, StoreID), ok = filelib:ensure_dir(Dir ++ "/"), ok = filelib:ensure_dir(filename:join(Dir, ?CHUNK_DIR) ++ "/"), FileIndex = read_file_index(Dir), FileIndex2 = maps:map( fun(Key, Filepath) -> ets:insert(chunk_storage_file_index, {{Key, StoreID}, Filepath}), Filepath end, FileIndex ), warn_custom_chunk_group_size(StoreID), {RangeStart, RangeEnd} = ar_storage_module:get_range(StoreID), State = #state{ file_index = FileIndex2, store_id = StoreID, range_start = RangeStart, range_end = RangeEnd }, EntropyContext = ar_entropy_gen:initialize_context( StoreID, ar_storage_module:get_packing(StoreID)), State2 = State#state{ entropy_context = EntropyContext }, {ok, State2}. warn_custom_chunk_group_size(StoreID) -> case StoreID == ?DEFAULT_MODULE andalso get_chunk_group_size() /= ?CHUNK_GROUP_SIZE of true -> %% This warning applies to all store ids, but we will only print it when loading %% the default StoreID to ensure it is only printed once. WarningMessage = "WARNING: changing chunk_storage_file_size is not " "recommended and may cause errors if different sizes are used for the same " "chunk storage files.", ar:console(WarningMessage), ?LOG_WARNING(WarningMessage); false -> ok end. handle_cast(entropy_complete, State) -> #state{ entropy_context = {_, RewardAddr} } = State, State2 = State#state{ entropy_context = {true, RewardAddr} }, {noreply, State2}; handle_cast(Cast, State) -> ?LOG_WARNING([{event, unhandled_cast}, {module, ?MODULE}, {cast, Cast}]), {noreply, State}. handle_call({put, PaddedEndOffset, Chunk, Packing}, _From, State) when byte_size(Chunk) == ?DATA_CHUNK_SIZE -> #state{ store_id = StoreID, entropy_context = EntropyContext, file_index = FileIndex } = State, Result = store_chunk( PaddedEndOffset, Chunk, Packing, StoreID, FileIndex, EntropyContext), case Result of {ok, FileIndex2, NewPacking} -> {reply, {ok, NewPacking}, State#state{ file_index = FileIndex2 }}; Error -> {reply, Error, State} end; handle_call({delete, PaddedEndOffset}, _From, State) -> #state{ store_id = StoreID } = State, StartOffset = PaddedEndOffset - ?DATA_CHUNK_SIZE, case ar_sync_record:delete(PaddedEndOffset, StartOffset, ar_chunk_storage, StoreID) of ok -> case ar_entropy_storage:delete_record(PaddedEndOffset, StoreID) of ok -> case delete_chunk(PaddedEndOffset, StoreID) of ok -> {reply, ok, State}; Error -> {reply, Error, State} end; Error2 -> {reply, Error2, State} end; Error3 -> {reply, Error3, State} end; handle_call(reset, _, #state{ store_id = StoreID, file_index = FileIndex } = State) -> maps:map( fun(_Key, Filepath) -> file:delete(Filepath) end, FileIndex ), ok = ar_sync_record:cut(0, ar_chunk_storage, StoreID), erlang:erase(), {reply, ok, State#state{ file_index = #{} }}; handle_call(Request, _From, State) -> ?LOG_WARNING([{event, unhandled_call}, {module, ?MODULE}, {request, Request}]), {reply, ok, State}. handle_info({Ref, _Reply}, State) when is_reference(Ref) -> ?LOG_ERROR([{event, stale_gen_server_call_reply}, {ref, Ref}, {reply, _Reply}]), %% A stale gen_server:call reply. {noreply, State}; handle_info({'EXIT', _PID, normal}, State) -> {noreply, State}; handle_info({entropy_generated, _Ref, _Entropy}, State) -> ?LOG_WARNING([{event, entropy_generation_timed_out}]), {noreply, State}; handle_info(Info, State) -> ?LOG_ERROR([{event, unhandled_info}, {info, io_lib:format("~p", [Info])}]), {noreply, State}. terminate(Reason, _State) -> sync_and_close_files(), ?LOG_INFO([{module, ?MODULE},{pid, self()},{callback, terminate},{reason, Reason}]), ok. %%%=================================================================== %%% Private functions. %%%=================================================================== get_chunk_group_size() -> {ok, Config} = arweave_config:get_env(), Config#config.chunk_storage_file_size. get_filepath(Name, StoreID) -> {ok, Config} = arweave_config:get_env(), DataDir = Config#config.data_dir, ChunkDir = get_chunk_storage_path(DataDir, StoreID), filename:join([ChunkDir, Name]). store_chunk(PaddedEndOffset, Chunk, Packing, StoreID, FileIndex, EntropyContext) -> case Packing == unpacked_padded of true -> ar_entropy_storage:record_chunk( PaddedEndOffset, Chunk, StoreID, FileIndex, EntropyContext); false -> record_chunk( PaddedEndOffset, Chunk, Packing, StoreID, FileIndex) end. record_chunk( PaddedEndOffset, Chunk, Packing, StoreID, FileIndex) -> case write_chunk(PaddedEndOffset, Chunk, FileIndex, StoreID) of {ok, Filepath} -> prometheus_counter:inc(chunks_stored, [ar_storage_module:packing_label(Packing), ar_storage_module:label(StoreID)]), case ar_sync_record:add( PaddedEndOffset, PaddedEndOffset - ?DATA_CHUNK_SIZE, sync_record_id(Packing), StoreID) of ok -> ChunkFileStart = get_chunk_file_start(PaddedEndOffset), ets:insert(chunk_storage_file_index, {{ChunkFileStart, StoreID}, Filepath}), {ok, maps:put(ChunkFileStart, Filepath, FileIndex), Packing}; Error -> Error end; Error2 -> Error2 end. sync_record_id(unpacked_padded) -> %% Entropy indexing changed between 2.9.0 and 2.9.1. So we'll use a new %% sync_record id (ar_chunk_storage_replica_2_9_1_unpacked) going forward. %% The old id (ar_chunk_storage_replica_2_9_unpacked) should not be used. ar_chunk_storage_replica_2_9_1_unpacked; sync_record_id(_Packing) -> ar_chunk_storage. get_chunk_file_start(EndOffset) -> StartOffset = EndOffset - ?DATA_CHUNK_SIZE, get_chunk_file_start_by_start_offset(StartOffset). get_chunk_file_start_by_start_offset(StartOffset) -> ar_util:floor_int(StartOffset, get_chunk_group_size()). write_chunk(PaddedOffset, Chunk, FileIndex, StoreID) -> {_ChunkFileStart, Filepath, Position, ChunkOffset} = locate_chunk_on_disk(PaddedOffset, StoreID, FileIndex), case get_handle_by_filepath(Filepath) of {error, _} = Error -> Error; F -> write_chunk2(PaddedOffset, ChunkOffset, Chunk, Filepath, F, Position) end. filepath(ChunkFileStart, FileIndex, StoreID) -> case maps:get(ChunkFileStart, FileIndex, not_found) of not_found -> filepath(ChunkFileStart, StoreID); Filepath -> Filepath end. filepath(ChunkFileStart, StoreID) -> get_filepath(integer_to_binary(ChunkFileStart), StoreID). get_handle_by_filepath(Filepath) -> case erlang:get({write_handle, Filepath}) of undefined -> case file:open(Filepath, [read, write, raw]) of {error, Reason} = Error -> ?LOG_ERROR([ {event, failed_to_open_chunk_file}, {file, Filepath}, {reason, io_lib:format("~p", [Reason])} ]), Error; {ok, F} -> erlang:put({write_handle, Filepath}, F), F end; F -> F end. write_chunk2(_PaddedOffset, ChunkOffset, Chunk, Filepath, F, Position) -> Result = file:pwrite(F, Position, [<< ChunkOffset:?OFFSET_BIT_SIZE >> | Chunk]), case Result of {error, _Reason} = Error -> Error; ok -> {ok, Filepath} end. get_special_zero_offset() -> ?DATA_CHUNK_SIZE. get_position_and_relative_chunk_offset(ChunkFileStart, Offset) -> BucketPickOffset = Offset - ?DATA_CHUNK_SIZE, get_position_and_relative_chunk_offset_by_start_offset(ChunkFileStart, BucketPickOffset). get_position_and_relative_chunk_offset_by_start_offset(ChunkFileStart, BucketPickOffset) -> BucketStart = ar_util:floor_int(BucketPickOffset, ?DATA_CHUNK_SIZE), ChunkOffset = case BucketPickOffset - BucketStart of 0 -> %% Represent 0 as the largest possible offset plus one, %% to distinguish zero offset from not yet written data. get_special_zero_offset(); Offset -> Offset end, RelativeOffset = BucketStart - ChunkFileStart, Position = RelativeOffset + ?OFFSET_SIZE * (RelativeOffset div ?DATA_CHUNK_SIZE), {Position, ChunkOffset}. delete_chunk(PaddedOffset, StoreID) -> {_ChunkFileStart, Filepath, Position, _ChunkOffset} = locate_chunk_on_disk(PaddedOffset, StoreID), case file:open(Filepath, [read, write, raw]) of {ok, F} -> ZeroChunk = case erlang:get(zero_chunk) of undefined -> OffsetBytes = << 0:?OFFSET_BIT_SIZE >>, ZeroBytes = << <<0>> || _ <- lists:seq(1, ?DATA_CHUNK_SIZE) >>, Chunk = << OffsetBytes/binary, ZeroBytes/binary >>, %% Cache the zero chunk in the process memory, constructing %% it is expensive. erlang:put(zero_chunk, Chunk), Chunk; Chunk -> Chunk end, ar_entropy_storage:acquire_semaphore(Filepath), Result = file:pwrite(F, Position, ZeroChunk), ar_entropy_storage:release_semaphore(Filepath), Result; {error, enoent} -> ok; Error -> Error end. get(Byte, Start, ChunkFileStart, StoreID, ChunkCount) -> ReadChunks = case erlang:get({cfile, {ChunkFileStart, StoreID}}) of undefined -> case ets:lookup(chunk_storage_file_index, {ChunkFileStart, StoreID}) of [] -> []; [{_, Filepath}] -> read_chunk(Byte, Start, ChunkFileStart, Filepath, ChunkCount, StoreID) end; File -> read_chunk2(Byte, Start, ChunkFileStart, File, ChunkCount, StoreID) end, case ar_storage_module:is_repack_in_place(StoreID) of true -> ReadChunks; false -> filter_by_sync_record(ReadChunks, Byte, Start, ChunkFileStart, StoreID, ChunkCount) end. read_chunk(Byte, Start, ChunkFileStart, Filepath, ChunkCount, StoreID) -> case file:open(Filepath, [read, raw, binary]) of {error, enoent} -> []; {error, Reason} -> ?LOG_ERROR([ {event, failed_to_open_chunk_file}, {byte, Byte}, {reason, io_lib:format("~p", [Reason])} ]), []; {ok, File} -> Result = read_chunk2(Byte, Start, ChunkFileStart, File, ChunkCount, StoreID), file:close(File), Result end. read_chunk2(Byte, Start, ChunkFileStart, File, ChunkCount, StoreID) -> {Position, _ChunkOffset} = get_position_and_relative_chunk_offset_by_start_offset(ChunkFileStart, Start), BucketStart = ar_util:floor_int(Start, ?DATA_CHUNK_SIZE), read_chunk3(Byte, Position, BucketStart, File, ChunkCount, StoreID). read_chunk3(Byte, Position, BucketStart, File, ChunkCount, StoreID) -> StartTime = erlang:monotonic_time(), case file:pread(File, Position, (?DATA_CHUNK_SIZE + ?OFFSET_SIZE) * ChunkCount) of {ok, << ChunkOffset:?OFFSET_BIT_SIZE, _Chunk/binary >> = Bin} -> StoreIDLabel = ar_storage_module:label(StoreID), ar_metrics:record_rate_metric( StartTime, byte_size(Bin), chunk_read_rate_bytes_per_second, [StoreIDLabel, raw]), prometheus_counter:inc(chunks_read, [StoreIDLabel], ChunkCount), case is_offset_valid(Byte, BucketStart, ChunkOffset) of true -> extract_end_offset_chunk_pairs(Bin, BucketStart, 1); false -> [] end; {error, Reason} -> ?LOG_ERROR([ {event, failed_to_read_chunk}, {byte, Byte}, {position, Position}, {reason, io_lib:format("~p", [Reason])} ]), []; eof -> [] end. extract_end_offset_chunk_pairs( << 0:?OFFSET_BIT_SIZE, _ZeroChunk:?DATA_CHUNK_SIZE/binary, Rest/binary >>, BucketStart, Shift ) -> extract_end_offset_chunk_pairs(Rest, BucketStart, Shift + 1); extract_end_offset_chunk_pairs( << ChunkOffset:?OFFSET_BIT_SIZE, Chunk:?DATA_CHUNK_SIZE/binary, Rest/binary >>, BucketStart, Shift ) -> ChunkOffsetLimit = ?DATA_CHUNK_SIZE, EndOffset = BucketStart + (ChunkOffset rem ChunkOffsetLimit) + (?DATA_CHUNK_SIZE * Shift), [{EndOffset, Chunk} | extract_end_offset_chunk_pairs(Rest, BucketStart, Shift + 1)]; extract_end_offset_chunk_pairs(<<>>, _BucketStart, _Shift) -> []; extract_end_offset_chunk_pairs(<< ChunkOffset:?OFFSET_BIT_SIZE, Chunk/binary >>, BucketStart, Shift) -> ?LOG_ERROR([{event, unexpected_chunk_data}, {chunk_offset, ChunkOffset}, {bucket_start, BucketStart}, {shift, Shift}, {chunk_size, byte_size(Chunk)}]), []. is_offset_valid(_Byte, _BucketStart, 0) -> %% 0 is interpreted as "data has not been written yet". false; is_offset_valid(Byte, BucketStart, ChunkOffset) -> Delta = Byte - (BucketStart + ChunkOffset rem ?DATA_CHUNK_SIZE), Delta >= 0 andalso Delta < ?DATA_CHUNK_SIZE. get_sync_record_intervals(Start, ChunkCount, StoreID) -> End = Start + (ChunkCount + 1) * ?DATA_CHUNK_SIZE, get_sync_record_intervals(Start, End, StoreID, ar_intervals:new()). get_sync_record_intervals(Start, End, _StoreID, Intervals) when Start >= End -> Intervals; get_sync_record_intervals(Start, End, StoreID, Intervals) -> case ar_sync_record:get_next_synced_interval(Start, End, ar_chunk_storage, StoreID) of not_found -> Intervals; {End2, Start2} -> get_sync_record_intervals(End2, End, StoreID, ar_intervals:add(Intervals, min(End, End2), Start2)) end. filter_by_sync_record(ReadChunks, Byte, Start, ChunkFileStart, StoreID, ChunkCount) -> prometheus_histogram:observe_duration(chunk_storage_sync_record_check_duration_milliseconds, [ChunkCount], fun() -> Intervals = get_sync_record_intervals(Start, ChunkCount, StoreID), filter_by_sync_record(ReadChunks, Intervals, Byte, Start, ChunkFileStart, StoreID, ChunkCount) end). filter_by_sync_record(Chunks, _Intervals, _Byte, _Start, _ChunkFileStart, _StoreID, 1) -> %% The code paths which query a single chunk have already implicitly checked that %% the chunk belongs to the sync_record. E.g. ar_chunk_storage:get/2 Chunks; filter_by_sync_record([], _Intervals, _Byte, _Start, _ChunkFileStart, _StoreID, _ChunkCount) -> []; filter_by_sync_record([{PaddedEndOffset, Chunk} | Rest], Intervals, Byte, Start, ChunkFileStart, StoreID, ChunkCount) -> case ar_intervals:is_inside(Intervals, PaddedEndOffset) of false -> %% The holes between chunks may be filled with entropy. filter_by_sync_record(Rest, Intervals, Byte, Start, ChunkFileStart, StoreID, ChunkCount); _ -> [{PaddedEndOffset, Chunk} | filter_by_sync_record(Rest, Intervals, Byte, Start, ChunkFileStart, StoreID, ChunkCount)] end. close_files([{cfile, {_, StoreID} = Key} | Keys], StoreID) -> file:close(erlang:get({cfile, Key})), close_files(Keys, StoreID); close_files([_ | Keys], StoreID) -> close_files(Keys, StoreID); close_files([], _StoreID) -> ok. read_file_index(Dir) -> ChunkDir = filename:join(Dir, ?CHUNK_DIR), {ok, Filenames} = file:list_dir(ChunkDir), lists:foldl( fun(Filename, Acc) -> case catch list_to_integer(Filename) of Key when is_integer(Key) -> maps:put(Key, filename:join(ChunkDir, Filename), Acc); _ -> Acc end end, #{}, Filenames ). sync_and_close_files() -> sync_and_close_files(erlang:get_keys()). sync_and_close_files([{write_handle, _} = Key | Keys]) -> F = erlang:get(Key), ok = file:sync(F), file:close(F), sync_and_close_files(Keys); sync_and_close_files([_ | Keys]) -> sync_and_close_files(Keys); sync_and_close_files([]) -> ok. list_files(DataDir, StoreID) -> Dir = get_storage_module_path(DataDir, StoreID), ok = filelib:ensure_dir(Dir ++ "/"), ok = filelib:ensure_dir(filename:join(Dir, ?CHUNK_DIR) ++ "/"), StorageIndex = read_file_index(Dir), maps:values(StorageIndex). files_to_defrag(StorageModules, DataDir, ByteSizeThreshold, Sizes) -> AllFiles = lists:flatmap( fun(StorageModule) -> list_files(DataDir, ar_storage_module:id(StorageModule)) end, StorageModules), lists:filter( fun(Filepath) -> case file:read_file_info(Filepath) of {ok, #file_info{ size = Size }} -> LastSize = maps:get(Filepath, Sizes, 1), Growth = (Size - LastSize) / LastSize, Size >= ByteSizeThreshold andalso Growth > 0.1; {error, Reason} -> ?LOG_ERROR([ {event, failed_to_read_chunk_file_info}, {file, Filepath}, {reason, io_lib:format("~p", [Reason])} ]), false end end, AllFiles). defrag_files([]) -> ok; defrag_files([Filepath | Rest]) -> ?LOG_DEBUG([{event, defragmenting_file}, {file, Filepath}]), ar:console("Defragmenting ~s...~n", [Filepath]), TmpFilepath = Filepath ++ ".tmp", DefragCmd = io_lib:format("rsync --sparse --quiet ~ts ~ts", [Filepath, TmpFilepath]), MoveDefragCmd = io_lib:format("mv ~ts ~ts", [TmpFilepath, Filepath]), %% We expect nothing to be returned on successful calls. [] = os:cmd(DefragCmd), [] = os:cmd(MoveDefragCmd), ar:console("Defragmented ~s...~n", [Filepath]), defrag_files(Rest). update_sizes_file([], Sizes) -> {ok, Config} = arweave_config:get_env(), SizesFile = filename:join(Config#config.data_dir, "chunks_sizes"), case file:open(SizesFile, [write, raw]) of {error, Reason} -> ?LOG_ERROR([ {event, failed_to_open_chunk_sizes_file}, {file, SizesFile}, {reason, io_lib:format("~p", [Reason])} ]), error; {ok, F} -> SizesBinary = erlang:term_to_binary(Sizes), ok = file:write(F, SizesBinary), file:close(F) end; update_sizes_file([Filepath | Rest], Sizes) -> case file:read_file_info(Filepath) of {ok, #file_info{ size = Size }} -> update_sizes_file(Rest, Sizes#{ Filepath => Size }); {error, Reason} -> ?LOG_ERROR([ {event, failed_to_read_chunk_file_info}, {file, Filepath}, {reason, io_lib:format("~p", [Reason])} ]), error end. read_chunks_sizes(DataDir) -> SizesFile = filename:join(DataDir, "chunks_sizes"), case file:read_file(SizesFile) of {ok, Content} -> erlang:binary_to_term(Content, [safe]); {error, enoent} -> #{}; {error, Reason} -> ?LOG_ERROR([ {event, failed_to_read_chunk_sizes_file}, {file, SizesFile}, {reason, io_lib:format("~p", [Reason])} ]), error end. modules_to_defrag(#config{defragmentation_modules = [_ | _] = Modules}) -> Modules; modules_to_defrag(#config{storage_modules = Modules}) -> Modules. %%%=================================================================== %%% Tests. %%%=================================================================== chunk_bucket_test() -> ar_test_node:test_with_mocked_functions([ {ar_block, strict_data_split_threshold, fun() -> 700_000 end} ], fun test_chunk_bucket/0, 30). test_chunk_bucket() -> case ar_block:strict_data_split_threshold() of 700_000 -> ok; _ -> throw(unexpected_strict_data_split_threshold) end, %% get_chunk_bucket_end pads the provided offset %% get_chunk_bucket_start does not pad the provided offset %% At and before the STRICT_DATA_SPLIT_THRESHOLD, offsets are not padded. ?assertEqual(262144, get_chunk_bucket_end(0)), ?assertEqual(0, get_chunk_bucket_start(0)), ?assertEqual(262144, get_chunk_bucket_end(1)), ?assertEqual(0, get_chunk_bucket_start(1)), ?assertEqual(262144, get_chunk_bucket_end(?DATA_CHUNK_SIZE - 1)), ?assertEqual(0, get_chunk_bucket_start(?DATA_CHUNK_SIZE - 1)), ?assertEqual(262144, get_chunk_bucket_end(?DATA_CHUNK_SIZE)), ?assertEqual(0, get_chunk_bucket_start(?DATA_CHUNK_SIZE)), ?assertEqual(262144, get_chunk_bucket_end(?DATA_CHUNK_SIZE + 1)), ?assertEqual(0, get_chunk_bucket_start(?DATA_CHUNK_SIZE + 1)), ?assertEqual(524288, get_chunk_bucket_end(2 * ?DATA_CHUNK_SIZE)), ?assertEqual(262144, get_chunk_bucket_start(2 * ?DATA_CHUNK_SIZE)), ?assertEqual(524288, get_chunk_bucket_end(2 * ?DATA_CHUNK_SIZE + 1)), ?assertEqual(262144, get_chunk_bucket_start(2 * ?DATA_CHUNK_SIZE + 1)), ?assertEqual(524288, get_chunk_bucket_end(ar_block:strict_data_split_threshold() - 1)), ?assertEqual(262144, get_chunk_bucket_start(ar_block:strict_data_split_threshold() - 1)), ?assertEqual(524288, get_chunk_bucket_end(ar_block:strict_data_split_threshold())), ?assertEqual(262144, get_chunk_bucket_start(ar_block:strict_data_split_threshold())), %% After the STRICT_DATA_SPLIT_THRESHOLD, offsets are padded. ?assertEqual(786432, get_chunk_bucket_end(ar_block:strict_data_split_threshold() + 1)), ?assertEqual(524288, get_chunk_bucket_start(ar_block:strict_data_split_threshold() + 1)), ?assertEqual(786432, get_chunk_bucket_end(3 * ?DATA_CHUNK_SIZE - 1)), ?assertEqual(524288, get_chunk_bucket_start(3 * ?DATA_CHUNK_SIZE - 1)), ?assertEqual(786432, get_chunk_bucket_end(3 * ?DATA_CHUNK_SIZE)), ?assertEqual(524288, get_chunk_bucket_start(3 * ?DATA_CHUNK_SIZE)), ?assertEqual(786432, get_chunk_bucket_end(3 * ?DATA_CHUNK_SIZE + 1)), ?assertEqual(524288, get_chunk_bucket_start(3 * ?DATA_CHUNK_SIZE + 1)), ?assertEqual(1048576, get_chunk_bucket_end(4 * ?DATA_CHUNK_SIZE - 1)), ?assertEqual(786432, get_chunk_bucket_start(4 * ?DATA_CHUNK_SIZE - 1)), ?assertEqual(1048576, get_chunk_bucket_end(4 * ?DATA_CHUNK_SIZE)), ?assertEqual(786432, get_chunk_bucket_start(4 * ?DATA_CHUNK_SIZE)), ?assertEqual(1048576, get_chunk_bucket_end(4 * ?DATA_CHUNK_SIZE + 1)), ?assertEqual(786432, get_chunk_bucket_start(4 * ?DATA_CHUNK_SIZE + 1)), ?assertEqual(1310720, get_chunk_bucket_end(5 * ?DATA_CHUNK_SIZE - 1)), ?assertEqual(1048576, get_chunk_bucket_start(5 * ?DATA_CHUNK_SIZE - 1)), ?assertEqual(1310720, get_chunk_bucket_end(5 * ?DATA_CHUNK_SIZE)), ?assertEqual(1048576, get_chunk_bucket_start(5 * ?DATA_CHUNK_SIZE)), ?assertEqual(1310720, get_chunk_bucket_end(5 * ?DATA_CHUNK_SIZE + 1)), ?assertEqual(1048576, get_chunk_bucket_start(5 * ?DATA_CHUNK_SIZE + 1)). get_chunk_byte_from_bucket_end_test() -> ar_test_node:test_with_mocked_functions([ {ar_block, strict_data_split_threshold, fun() -> 700_000 end} ], fun test_get_chunk_byte_from_bucket_end/0, 30). test_get_chunk_byte_from_bucket_end() -> ?assertEqual(262143, get_chunk_byte_from_bucket_end(262144)), ?assertEqual(524287, get_chunk_byte_from_bucket_end(524288)), ?assertEqual(700000, get_chunk_byte_from_bucket_end(786432)), ?assertEqual(962144, get_chunk_byte_from_bucket_end(1048576)), ?assertEqual(1224288, get_chunk_byte_from_bucket_end(1310720)), ?assertEqual(1486432, get_chunk_byte_from_bucket_end(1572864)), ?assertEqual(1748576, get_chunk_byte_from_bucket_end(1835008)), ?assertEqual(2010720, get_chunk_byte_from_bucket_end(2097152)), ?assertEqual(2272864, get_chunk_byte_from_bucket_end(2359296)). well_aligned_test_() -> {timeout, 20, fun test_well_aligned/0}. test_well_aligned() -> clear(?DEFAULT_MODULE), Packing = ar_storage_module:get_packing(?DEFAULT_MODULE), C1 = crypto:strong_rand_bytes(?DATA_CHUNK_SIZE), C2 = crypto:strong_rand_bytes(?DATA_CHUNK_SIZE), C3 = crypto:strong_rand_bytes(?DATA_CHUNK_SIZE), {ok, unpacked} = ar_chunk_storage:put(2 * ?DATA_CHUNK_SIZE, C1, Packing, ?DEFAULT_MODULE), assert_get(C1, 2 * ?DATA_CHUNK_SIZE), ?assertEqual(not_found, ar_chunk_storage:get(2 * ?DATA_CHUNK_SIZE, ?DEFAULT_MODULE)), ?assertEqual(not_found, ar_chunk_storage:get(2 * ?DATA_CHUNK_SIZE + 1, ?DEFAULT_MODULE)), ar_chunk_storage:delete(2 * ?DATA_CHUNK_SIZE), assert_get(not_found, 2 * ?DATA_CHUNK_SIZE), ar_chunk_storage:put(?DATA_CHUNK_SIZE, C2, Packing, ?DEFAULT_MODULE), assert_get(C2, ?DATA_CHUNK_SIZE), assert_get(not_found, 2 * ?DATA_CHUNK_SIZE), ar_chunk_storage:put(2 * ?DATA_CHUNK_SIZE, C1, Packing, ?DEFAULT_MODULE), assert_get(C1, 2 * ?DATA_CHUNK_SIZE), assert_get(C2, ?DATA_CHUNK_SIZE), ?assertEqual([{?DATA_CHUNK_SIZE, C2}, {2 * ?DATA_CHUNK_SIZE, C1}], ar_chunk_storage:get_range(0, 2 * ?DATA_CHUNK_SIZE)), ?assertEqual([{?DATA_CHUNK_SIZE, C2}, {2 * ?DATA_CHUNK_SIZE, C1}], ar_chunk_storage:get_range(1, 2 * ?DATA_CHUNK_SIZE)), ?assertEqual([{?DATA_CHUNK_SIZE, C2}, {2 * ?DATA_CHUNK_SIZE, C1}], ar_chunk_storage:get_range(1, 2 * ?DATA_CHUNK_SIZE - 1)), ?assertEqual([{?DATA_CHUNK_SIZE, C2}, {2 * ?DATA_CHUNK_SIZE, C1}], ar_chunk_storage:get_range(0, 3 * ?DATA_CHUNK_SIZE)), ?assertEqual([{?DATA_CHUNK_SIZE, C2}, {2 * ?DATA_CHUNK_SIZE, C1}], ar_chunk_storage:get_range(0, ?DATA_CHUNK_SIZE + 1)), ar_chunk_storage:put(3 * ?DATA_CHUNK_SIZE, C3, Packing, ?DEFAULT_MODULE), assert_get(C2, ?DATA_CHUNK_SIZE), assert_get(C1, 2 * ?DATA_CHUNK_SIZE), assert_get(C3, 3 * ?DATA_CHUNK_SIZE), ?assertEqual(not_found, ar_chunk_storage:get(3 * ?DATA_CHUNK_SIZE, ?DEFAULT_MODULE)), ?assertEqual(not_found, ar_chunk_storage:get(3 * ?DATA_CHUNK_SIZE + 1, ?DEFAULT_MODULE)), ar_chunk_storage:put(2 * ?DATA_CHUNK_SIZE, C2, Packing, ?DEFAULT_MODULE), assert_get(C2, ?DATA_CHUNK_SIZE), assert_get(C2, 2 * ?DATA_CHUNK_SIZE), assert_get(C3, 3 * ?DATA_CHUNK_SIZE), ar_chunk_storage:delete(?DATA_CHUNK_SIZE), assert_get(not_found, ?DATA_CHUNK_SIZE), ?assertEqual([], ar_chunk_storage:get_range(0, ?DATA_CHUNK_SIZE)), assert_get(C2, 2 * ?DATA_CHUNK_SIZE), assert_get(C3, 3 * ?DATA_CHUNK_SIZE), ?assertEqual([{2 * ?DATA_CHUNK_SIZE, C2}, {3 * ?DATA_CHUNK_SIZE, C3}], ar_chunk_storage:get_range(0, 4 * ?DATA_CHUNK_SIZE)), ?assertEqual([], ar_chunk_storage:get_range(7 * ?DATA_CHUNK_SIZE, 13 * ?DATA_CHUNK_SIZE)). not_aligned_test_() -> {timeout, 20, fun test_not_aligned/0}. test_not_aligned() -> clear(?DEFAULT_MODULE), Packing = ar_storage_module:get_packing(?DEFAULT_MODULE), C1 = crypto:strong_rand_bytes(?DATA_CHUNK_SIZE), C2 = crypto:strong_rand_bytes(?DATA_CHUNK_SIZE), C3 = crypto:strong_rand_bytes(?DATA_CHUNK_SIZE), ar_chunk_storage:put(2 * ?DATA_CHUNK_SIZE + 7, C1, Packing, ?DEFAULT_MODULE), assert_get(C1, 2 * ?DATA_CHUNK_SIZE + 7), ar_chunk_storage:delete(2 * ?DATA_CHUNK_SIZE + 7), assert_get(not_found, 2 * ?DATA_CHUNK_SIZE + 7), ar_chunk_storage:put(2 * ?DATA_CHUNK_SIZE + 7, C1, Packing, ?DEFAULT_MODULE), assert_get(C1, 2 * ?DATA_CHUNK_SIZE + 7), ?assertEqual(not_found, ar_chunk_storage:get(2 * ?DATA_CHUNK_SIZE + 7, ?DEFAULT_MODULE)), ?assertEqual(not_found, ar_chunk_storage:get(?DATA_CHUNK_SIZE + 7 - 1, ?DEFAULT_MODULE)), ?assertEqual(not_found, ar_chunk_storage:get(?DATA_CHUNK_SIZE, ?DEFAULT_MODULE)), ?assertEqual(not_found, ar_chunk_storage:get(?DATA_CHUNK_SIZE - 1, ?DEFAULT_MODULE)), ?assertEqual(not_found, ar_chunk_storage:get(0, ?DEFAULT_MODULE)), ?assertEqual(not_found, ar_chunk_storage:get(1, ?DEFAULT_MODULE)), ar_chunk_storage:put(?DATA_CHUNK_SIZE + 3, C2, Packing, ?DEFAULT_MODULE), assert_get(C2, ?DATA_CHUNK_SIZE + 3), ?assertEqual(not_found, ar_chunk_storage:get(0, ?DEFAULT_MODULE)), ?assertEqual(not_found, ar_chunk_storage:get(1, ?DEFAULT_MODULE)), ?assertEqual(not_found, ar_chunk_storage:get(2, ?DEFAULT_MODULE)), ar_chunk_storage:delete(2 * ?DATA_CHUNK_SIZE + 7), assert_get(C2, ?DATA_CHUNK_SIZE + 3), assert_get(not_found, 2 * ?DATA_CHUNK_SIZE + 7), ar_chunk_storage:put(3 * ?DATA_CHUNK_SIZE + 7, C3, Packing, ?DEFAULT_MODULE), assert_get(C3, 3 * ?DATA_CHUNK_SIZE + 7), ar_chunk_storage:put(3 * ?DATA_CHUNK_SIZE + 7, C1, Packing, ?DEFAULT_MODULE), assert_get(C1, 3 * ?DATA_CHUNK_SIZE + 7), ar_chunk_storage:put(4 * ?DATA_CHUNK_SIZE + ?DATA_CHUNK_SIZE div 2, C2, Packing, ?DEFAULT_MODULE), assert_get(C2, 4 * ?DATA_CHUNK_SIZE + ?DATA_CHUNK_SIZE div 2), ?assertEqual( not_found, ar_chunk_storage:get(4 * ?DATA_CHUNK_SIZE + ?DATA_CHUNK_SIZE div 2, ?DEFAULT_MODULE) ), ?assertEqual(not_found, ar_chunk_storage:get(3 * ?DATA_CHUNK_SIZE + 7, ?DEFAULT_MODULE)), ?assertEqual(not_found, ar_chunk_storage:get(3 * ?DATA_CHUNK_SIZE + 8, ?DEFAULT_MODULE)), ar_chunk_storage:put(5 * ?DATA_CHUNK_SIZE + ?DATA_CHUNK_SIZE div 2 + 1, C2, Packing, ?DEFAULT_MODULE), assert_get(C2, 5 * ?DATA_CHUNK_SIZE + ?DATA_CHUNK_SIZE div 2 + 1), assert_get(not_found, 2 * ?DATA_CHUNK_SIZE + 7), ar_chunk_storage:delete(4 * ?DATA_CHUNK_SIZE + ?DATA_CHUNK_SIZE div 2), assert_get(not_found, 4 * ?DATA_CHUNK_SIZE + ?DATA_CHUNK_SIZE div 2), assert_get(C2, 5 * ?DATA_CHUNK_SIZE + ?DATA_CHUNK_SIZE div 2 + 1), assert_get(C1, 3 * ?DATA_CHUNK_SIZE + 7), ?assertEqual([{3 * ?DATA_CHUNK_SIZE + 7, C1}], ar_chunk_storage:get_range(2 * ?DATA_CHUNK_SIZE + 7, 2 * ?DATA_CHUNK_SIZE)), ?assertEqual([{3 * ?DATA_CHUNK_SIZE + 7, C1}], ar_chunk_storage:get_range(2 * ?DATA_CHUNK_SIZE + 6, 2 * ?DATA_CHUNK_SIZE)), ?assertEqual([{3 * ?DATA_CHUNK_SIZE + 7, C1}, {5 * ?DATA_CHUNK_SIZE + ?DATA_CHUNK_SIZE div 2 + 1, C2}], %% The end offset of the second chunk is bigger than Start + Size but %% it is included because Start + Size is bigger than the start offset %% of the bucket where the last chunk is placed. ar_chunk_storage:get_range(2 * ?DATA_CHUNK_SIZE + 7, 2 * ?DATA_CHUNK_SIZE + 1)), ?assertEqual([{3 * ?DATA_CHUNK_SIZE + 7, C1}, {5 * ?DATA_CHUNK_SIZE + ?DATA_CHUNK_SIZE div 2 + 1, C2}], ar_chunk_storage:get_range(2 * ?DATA_CHUNK_SIZE + 7, 3 * ?DATA_CHUNK_SIZE)), ?assertEqual([{3 * ?DATA_CHUNK_SIZE + 7, C1}, {5 * ?DATA_CHUNK_SIZE + ?DATA_CHUNK_SIZE div 2 + 1, C2}], ar_chunk_storage:get_range(2 * ?DATA_CHUNK_SIZE + 7 - 1, 3 * ?DATA_CHUNK_SIZE)), ?assertEqual([{3 * ?DATA_CHUNK_SIZE + 7, C1}, {5 * ?DATA_CHUNK_SIZE + ?DATA_CHUNK_SIZE div 2 + 1, C2}], ar_chunk_storage:get_range(2 * ?DATA_CHUNK_SIZE, 4 * ?DATA_CHUNK_SIZE)). cross_file_aligned_test_() -> {timeout, 20, fun test_cross_file_aligned/0}. test_cross_file_aligned() -> clear(?DEFAULT_MODULE), Packing = ar_storage_module:get_packing(?DEFAULT_MODULE), C1 = crypto:strong_rand_bytes(?DATA_CHUNK_SIZE), C2 = crypto:strong_rand_bytes(?DATA_CHUNK_SIZE), ar_chunk_storage:put(get_chunk_group_size(), C1, Packing, ?DEFAULT_MODULE), assert_get(C1, get_chunk_group_size()), ?assertEqual(not_found, ar_chunk_storage:get(get_chunk_group_size(), ?DEFAULT_MODULE)), ?assertEqual(not_found, ar_chunk_storage:get(get_chunk_group_size() + 1, ?DEFAULT_MODULE)), ?assertEqual(not_found, ar_chunk_storage:get(0, ?DEFAULT_MODULE)), ?assertEqual(not_found, ar_chunk_storage:get(get_chunk_group_size() - ?DATA_CHUNK_SIZE - 1, ?DEFAULT_MODULE)), ar_chunk_storage:put(get_chunk_group_size() + ?DATA_CHUNK_SIZE, C2, Packing, ?DEFAULT_MODULE), assert_get(C2, get_chunk_group_size() + ?DATA_CHUNK_SIZE), assert_get(C1, get_chunk_group_size()), ?assertEqual([{get_chunk_group_size(), C1}, {get_chunk_group_size() + ?DATA_CHUNK_SIZE, C2}], ar_chunk_storage:get_range(get_chunk_group_size() - ?DATA_CHUNK_SIZE, 2 * ?DATA_CHUNK_SIZE)), ?assertEqual([{get_chunk_group_size(), C1}, {get_chunk_group_size() + ?DATA_CHUNK_SIZE, C2}], ar_chunk_storage:get_range(get_chunk_group_size() - 2 * ?DATA_CHUNK_SIZE - 1, 4 * ?DATA_CHUNK_SIZE)), ?assertEqual(not_found, ar_chunk_storage:get(0, ?DEFAULT_MODULE)), ?assertEqual(not_found, ar_chunk_storage:get(get_chunk_group_size() - ?DATA_CHUNK_SIZE - 1, ?DEFAULT_MODULE)), ar_chunk_storage:delete(get_chunk_group_size(), ?DEFAULT_MODULE), assert_get(not_found, get_chunk_group_size(), ?DEFAULT_MODULE), assert_get(C2, get_chunk_group_size() + ?DATA_CHUNK_SIZE), ar_chunk_storage:put(get_chunk_group_size(), C2, Packing, ?DEFAULT_MODULE), assert_get(C2, get_chunk_group_size()). cross_file_not_aligned_test_() -> {timeout, 20, fun test_cross_file_not_aligned/0}. test_cross_file_not_aligned() -> clear(?DEFAULT_MODULE), Packing = ar_storage_module:get_packing(?DEFAULT_MODULE), C1 = crypto:strong_rand_bytes(?DATA_CHUNK_SIZE), C2 = crypto:strong_rand_bytes(?DATA_CHUNK_SIZE), C3 = crypto:strong_rand_bytes(?DATA_CHUNK_SIZE), C4 = crypto:strong_rand_bytes(?DATA_CHUNK_SIZE), C5 = crypto:strong_rand_bytes(?DATA_CHUNK_SIZE), ar_chunk_storage:put(get_chunk_group_size() + 1, C1, Packing, ?DEFAULT_MODULE), assert_get(C1, get_chunk_group_size() + 1), ?assertEqual(not_found, ar_chunk_storage:get(get_chunk_group_size() + 1, ?DEFAULT_MODULE)), ?assertEqual(not_found, ar_chunk_storage:get(get_chunk_group_size() - ?DATA_CHUNK_SIZE, ?DEFAULT_MODULE)), ar_chunk_storage:put(2 * get_chunk_group_size() + ?DATA_CHUNK_SIZE div 2, C2, Packing, ?DEFAULT_MODULE), assert_get(C2, 2 * get_chunk_group_size() + ?DATA_CHUNK_SIZE div 2), ?assertEqual(not_found, ar_chunk_storage:get(get_chunk_group_size() + 1, ?DEFAULT_MODULE)), ar_chunk_storage:put(2 * get_chunk_group_size() - ?DATA_CHUNK_SIZE div 2, C3, Packing, ?DEFAULT_MODULE), assert_get(C2, 2 * get_chunk_group_size() + ?DATA_CHUNK_SIZE div 2), assert_get(C3, 2 * get_chunk_group_size() - ?DATA_CHUNK_SIZE div 2), ar_chunk_storage:put(2 * get_chunk_group_size() + 3 * ?DATA_CHUNK_SIZE div 2, C4, Packing, ?DEFAULT_MODULE), ar_chunk_storage:put(2 * get_chunk_group_size() + 5 * ?DATA_CHUNK_SIZE div 2, C5, Packing, ?DEFAULT_MODULE), ?assertEqual([{2 * get_chunk_group_size() + ?DATA_CHUNK_SIZE div 2, C2}, {2 * get_chunk_group_size() + 3 * ?DATA_CHUNK_SIZE div 2, C4}], ar_chunk_storage:get_range(2 * get_chunk_group_size() - ?DATA_CHUNK_SIZE div 2, ?DATA_CHUNK_SIZE * 2)), ?assertEqual([{2 * get_chunk_group_size() + ?DATA_CHUNK_SIZE div 2, C2}, {2 * get_chunk_group_size() + 3 * ?DATA_CHUNK_SIZE div 2, C4}, {2 * get_chunk_group_size() + 5 * ?DATA_CHUNK_SIZE div 2, C5}], ar_chunk_storage:get_range(2 * get_chunk_group_size() - ?DATA_CHUNK_SIZE div 2 + 10, ?DATA_CHUNK_SIZE * 2)), ?assertEqual([{2 * get_chunk_group_size() - ?DATA_CHUNK_SIZE div 2, C3}, {2 * get_chunk_group_size() + ?DATA_CHUNK_SIZE div 2, C2}], ar_chunk_storage:get_range(2 * get_chunk_group_size() - ?DATA_CHUNK_SIZE div 2 - ?DATA_CHUNK_SIZE, ?DATA_CHUNK_SIZE * 2)), ?assertEqual([{2 * get_chunk_group_size() - ?DATA_CHUNK_SIZE div 2, C3}, {2 * get_chunk_group_size() + ?DATA_CHUNK_SIZE div 2, C2}, {2 * get_chunk_group_size() + 3 * ?DATA_CHUNK_SIZE div 2, C4}], ar_chunk_storage:get_range(2 * get_chunk_group_size() - ?DATA_CHUNK_SIZE div 2 - ?DATA_CHUNK_SIZE + 10, ?DATA_CHUNK_SIZE * 2)), ?assertEqual(not_found, ar_chunk_storage:get(get_chunk_group_size() + 1, ?DEFAULT_MODULE)), ?assertEqual( not_found, ar_chunk_storage:get(get_chunk_group_size() + ?DATA_CHUNK_SIZE div 2 - 1, ?DEFAULT_MODULE) ), ar_chunk_storage:delete(2 * get_chunk_group_size() - ?DATA_CHUNK_SIZE div 2), assert_get(not_found, 2 * get_chunk_group_size() - ?DATA_CHUNK_SIZE div 2), assert_get(C2, 2 * get_chunk_group_size() + ?DATA_CHUNK_SIZE div 2), assert_get(C1, get_chunk_group_size() + 1), ar_chunk_storage:delete(get_chunk_group_size() + 1), assert_get(not_found, get_chunk_group_size() + 1), assert_get(not_found, 2 * get_chunk_group_size() - ?DATA_CHUNK_SIZE div 2), assert_get(C2, 2 * get_chunk_group_size() + ?DATA_CHUNK_SIZE div 2), ar_chunk_storage:delete(2 * get_chunk_group_size() + ?DATA_CHUNK_SIZE div 2), assert_get(not_found, 2 * get_chunk_group_size() + ?DATA_CHUNK_SIZE div 2), ar_chunk_storage:delete(get_chunk_group_size() + 1), ar_chunk_storage:delete(100 * get_chunk_group_size() + 1), ar_chunk_storage:put(2 * get_chunk_group_size() - ?DATA_CHUNK_SIZE div 2, C1, Packing, ?DEFAULT_MODULE), assert_get(C1, 2 * get_chunk_group_size() - ?DATA_CHUNK_SIZE div 2), ?assertEqual(not_found, ar_chunk_storage:get(2 * get_chunk_group_size() - ?DATA_CHUNK_SIZE div 2, ?DEFAULT_MODULE)). clear(StoreID) -> ok = gen_server:call(name(StoreID), reset). assert_get(Expected, Offset) -> assert_get(Expected, Offset, ?DEFAULT_MODULE). assert_get(Expected, Offset, StoreID) -> ExpectedResult = case Expected of not_found -> not_found; _ -> {Offset, Expected} end, ?assertEqual(ExpectedResult, ar_chunk_storage:get(Offset - 1, StoreID)), ?assertEqual(ExpectedResult, ar_chunk_storage:get(Offset - 2, StoreID)), ?assertEqual(ExpectedResult, ar_chunk_storage:get(Offset - ?DATA_CHUNK_SIZE, StoreID)), ?assertEqual(ExpectedResult, ar_chunk_storage:get(Offset - ?DATA_CHUNK_SIZE + 1, StoreID)), ?assertEqual(ExpectedResult, ar_chunk_storage:get(Offset - ?DATA_CHUNK_SIZE + 2, StoreID)), ?assertEqual(ExpectedResult, ar_chunk_storage:get(Offset - ?DATA_CHUNK_SIZE div 2, StoreID)), ?assertEqual(ExpectedResult, ar_chunk_storage:get(Offset - ?DATA_CHUNK_SIZE div 2 + 1, StoreID)), ?assertEqual(ExpectedResult, ar_chunk_storage:get(Offset - ?DATA_CHUNK_SIZE div 2 - 1, StoreID)), ?assertEqual(ExpectedResult, ar_chunk_storage:get(Offset - ?DATA_CHUNK_SIZE div 3, StoreID)). defrag_command_test() -> RandomID = crypto:strong_rand_bytes(16), Filepath = "test_defrag_" ++ binary_to_list(ar_util:encode(RandomID)), {ok, F} = file:open(Filepath, [binary, write]), {O1, C1} = {236, crypto:strong_rand_bytes(262144)}, {O2, C2} = {262144, crypto:strong_rand_bytes(262144)}, {O3, C3} = {262143, crypto:strong_rand_bytes(262144)}, file:pwrite(F, 1, <<"a">>), file:pwrite(F, 1000, <<"b">>), file:pwrite(F, 1000000, <<"cde">>), file:pwrite(F, 10000001, << O1:24, C1/binary, O2:24, C2/binary >>), file:pwrite(F, 30000001, << O3:24, C3/binary >>), file:close(F), defrag_files([Filepath]), {ok, F2} = file:open(Filepath, [binary, read]), ?assertEqual({ok, <<0>>}, file:pread(F2, 0, 1)), ?assertEqual({ok, <<"a">>}, file:pread(F2, 1, 1)), ?assertEqual({ok, <<0>>}, file:pread(F2, 2, 1)), ?assertEqual({ok, <<"b">>}, file:pread(F2, 1000, 1)), ?assertEqual({ok, <<"c">>}, file:pread(F2, 1000000, 1)), ?assertEqual({ok, <<"cde">>}, file:pread(F2, 1000000, 3)), ?assertEqual({ok, C1}, file:pread(F2, 10000001 + 3, 262144)), ?assertMatch({ok, << O1:24, _/binary >>}, file:pread(F2, 10000001, 10)), ?assertMatch({ok, << O1:24, C1:262144/binary, O2:24, C2:262144/binary, 0:((262144 + 3) * 2 * 8) >>}, file:pread(F2, 10000001, (262144 + 3) * 4)), ?assertMatch({ok, << O3:24, C3:262144/binary >>}, file:pread(F2, 30000001, 262144 + 3 + 100)). % End of file => +100 is ignored. ================================================ FILE: apps/arweave/src/ar_chunk_storage_sup.erl ================================================ -module(ar_chunk_storage_sup). -behaviour(supervisor). -export([start_link/0]). -export([init/1]). -include_lib("arweave/include/ar_sup.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). %%%=================================================================== %%% Public interface. %%%=================================================================== start_link() -> supervisor:start_link({local, ?MODULE}, ?MODULE, []). %% =================================================================== %% Supervisor callbacks. %% =================================================================== init([]) -> ets:new(chunk_storage_file_index, [set, public, named_table, {read_concurrency, true}]), Workers = ar_chunk_storage:register_workers() ++ ar_repack:register_workers() ++ ar_entropy_gen:register_workers(ar_entropy_gen) ++ ar_entropy_gen:register_workers(ar_entropy_storage), {ok, {{one_for_one, 5, 10}, Workers}}. ================================================ FILE: apps/arweave/src/ar_chunk_visualization.erl ================================================ -module(ar_chunk_visualization). -export([get_chunk_packings/3, get_chunk_packings/4, generate_bitmap/1, bitmap_to_binary/1, print_chunk_stats/1]). -include_lib("ar.hrl"). %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Build a list of lists where each inner list represents a sector worth of packing %% formats. Each sector row will form a row in the bitmap. get_chunk_packings(ModuleStart, ModuleEnd, StoreID) -> get_chunk_packings(ModuleStart, ModuleEnd, StoreID, false). get_chunk_packings(ModuleStart, ModuleEnd, StoreID, PrintProgress) -> Partition = ar_node:get_partition_number(ModuleStart), PartitionStart = ar_chunk_storage:get_chunk_bucket_start(ModuleStart), SectorSize = ar_block:get_replica_2_9_entropy_sector_size(), BucketsPerSector = SectorSize div ?DATA_CHUNK_SIZE, NumSectors = ar_block:get_replica_2_9_entropy_partition_size() div SectorSize, case PrintProgress of true -> ar:console("Partition ~p~n", [Partition]), ar:console("PartitionStart: ~p~n", [PartitionStart]), ar:console("SectorSize: ~p~n", [SectorSize]), ar:console("BucketsPerSector: ~p~n", [BucketsPerSector]), ar:console("NumSectors: ~p~n", [NumSectors]); _ -> ok end, lists:map( fun(SectorIndex) -> SectorStart = PartitionStart + SectorIndex * SectorSize, SectorEnd = SectorStart + SectorSize, %% Chunk Range will be a bit larger than the sector range to make sure we don't %% miss any chunks. ChunkRangeStart = ar_chunk_storage:get_chunk_byte_from_bucket_end(SectorStart), ChunkRangeEnd = ar_chunk_storage:get_chunk_byte_from_bucket_end(SectorEnd) + ?DATA_CHUNK_SIZE, case PrintProgress of true -> ar:console( "Partition ~p sector ~4B. Bucket Offsets ~p to ~p. " "Chunk Range ~p to ~p.~n", [ Partition, SectorIndex, SectorStart, SectorEnd, ChunkRangeStart, ChunkRangeEnd]); false -> ok end, {ok, MetadataRange} = ar_data_sync:get_chunk_metadata_range( ChunkRangeStart, ChunkRangeEnd, StoreID), % Initialize map with all bucket end offsets set to 'missing' BucketMap = lists:foldl( fun(J, Acc) -> BucketEndOffset = SectorStart + J * ?DATA_CHUNK_SIZE, case BucketEndOffset < ModuleStart orelse BucketEndOffset > ModuleEnd of true -> Acc; false -> maps:put(BucketEndOffset, missing, Acc) end end, #{}, lists:seq(1, BucketsPerSector)), % Process metadata to update the map UpdatedMap = maps:fold( fun(AbsoluteEndOffset, Metadata, Acc) -> BucketEndOffset = ar_chunk_storage:get_chunk_bucket_end(AbsoluteEndOffset), case maps:is_key(BucketEndOffset, Acc) of true -> IsRecorded = ar_sync_record:is_recorded( AbsoluteEndOffset, ar_data_sync, StoreID), maps:put(BucketEndOffset, normalize_sync_record(IsRecorded, AbsoluteEndOffset, Metadata), Acc); false -> Acc end end, BucketMap, MetadataRange), % Convert map to list in order of bucket end offsets lists:map( fun(J) -> BucketEndOffset = SectorStart + J * ?DATA_CHUNK_SIZE, case BucketEndOffset < ModuleStart orelse BucketEndOffset > ModuleEnd of true -> none; false -> maps:get(BucketEndOffset, UpdatedMap) end end, lists:seq(1, BucketsPerSector)) end, lists:seq(0, NumSectors - 1)). %% @doc Convert packing formats to RGB pixels. generate_bitmap(PackingRows) -> lists:map( fun(Row) -> lists:map(fun packing_color/1, Row) end, PackingRows). %% @doc Convert a bitmap (list of rows; each row a list of {R, G, B} tuples) %% into a binary PPM image. bitmap_to_binary(BitmapRows) -> Height = length(BitmapRows), Width = case BitmapRows of [Row | _] -> length(Row); [] -> 0 end, Header = io_lib:format("P6\n~w ~w\n255\n", [Width, Height]), %% Build pixel binary data (each pixel is 3 bytes: R,G,B) PixelData = [<> || Row <- BitmapRows, {R, G, B} <- Row], list_to_binary([Header, PixelData]). print_chunk_stats(ChunkPackings) -> Counts = chunk_statistics(ChunkPackings), Total = maps:fold(fun(_Format, Count, Acc) -> Count + Acc end, 0, Counts), ar:console("Total chunks: ~p~n", [Total]), ar:console("Chunk counts by packing format:~n"), lists:foreach( fun({Packing, Count}) -> Percentage = case Total of 0 -> 0.0; _ -> Count * 100 / Total end, ar:console("~p (~p): ~p chunks (~.2f%)~n", [ar_serialize:encode_packing(Packing, false), packing_color(Packing), Count, Percentage]) end, lists:sort( maps:to_list(Counts))). %%%=================================================================== %%% Private functions. %%%=================================================================== normalize_sync_record(false, _, _) -> missing; normalize_sync_record(_, _, not_found) -> error; normalize_sync_record({true, Packing}, PaddedEndOffset, Metadata) -> {_, _, _, _, _, ChunkSize} = Metadata, case ar_chunk_storage:is_storage_supported(PaddedEndOffset, ChunkSize, Packing) of true -> Packing; false -> too_small end; normalize_sync_record(_, _, _) -> error. %% @doc Returns a unique color (as an {R,G,B} tuple) for each recognized packing format. packing_color(missing) -> {0, 0, 0}; packing_color(error) -> {255, 0, 0}; packing_color(too_small) -> {255, 0, 255}; packing_color(unpacked) -> {255, 255, 255}; packing_color(unpacked_padded) -> {128, 128, 128}; packing_color(none) -> {0, 255, 255}; packing_color({Format, Addr, _PackingDifficulty}) -> packing_color({Format, Addr}); packing_color({Format, Addr}) -> BaseColor = packing_color(Format), %% Compute a hash from Addr and extract offsets Hash = erlang:phash2(Addr, 16777216), Roffset = Hash band 255, Goffset = (Hash bsr 8) band 255, Boffset = (Hash bsr 16) band 255, {(element(1, BaseColor) + Roffset) rem 256, (element(2, BaseColor) + Goffset) rem 256, (element(3, BaseColor) + Boffset) rem 256}; %% Base colors for known packing formats packing_color(replica_2_9) -> {0, 0, 255}; %% blue packing_color(spora_2_6) -> {0, 255, 0}; %% green packing_color(composite) -> {255, 255, 0}; %% yellow packing_color(_) -> {255, 0, 0}. %% red for unknown packings chunk_statistics(ChunkPackings) -> lists:foldl( fun(Row, AccCounts) -> lists:foldl( fun(Packing, RowAccCounts) -> maps:update_with(Packing, fun(N) -> N + 1 end, 1, RowAccCounts) end, AccCounts, Row) end, #{}, ChunkPackings). ================================================ FILE: apps/arweave/src/ar_cli_parser.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @copyright 2025 (c) Arweave %%% @author Arweave Team %%% @doc `ar_cli_parser' module legacy command line argument parser. %%% %%% This module has been created from `ar' module. The code is mostly %%% the same, except the exported interfaces have been renamed. %%% %%% @end %%%=================================================================== -module(ar_cli_parser). -compile(warnings_as_errors). -export([ eval/2, parse/2, show_help/0 ]). -include("ar.hrl"). -include("ar_consensus.hrl"). -include("ar_verify_chunks.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("eunit/include/eunit.hrl"). %%-------------------------------------------------------------------- %% @doc show help and stop the node. %% @end %%-------------------------------------------------------------------- show_help() -> io:format("Usage: arweave-server [options]~n"), io:format("Compatible with network: ~s~n", [?NETWORK_NAME]), io:format("Options:~n"), lists:foreach( fun({Opt, Desc}) -> io:format("\t~s~s~n", [ string:pad(Opt, 40, trailing, $ ), Desc ] ) end, [ {"config_file (path)", io_lib:format("Load the configuration from the " "specified JSON file.~n~n" "The configuration file is currently the only place where you may configure " "webhooks and tune semaphores.~n~n" "An example:~n~n" "{~n" " \"webhooks\": [~n" " {~n" " \"events\": [\"transaction\", \"block\"],~n" " \"url\": \"https://example.com/block_or_tx\",~n" " \"headers\": {~n" " \"Authorization\": \"Bearer 123\"~n" " }~n" " },~n" " {~n" " \"events\": [\"transaction_data\"],~n" " \"url\": \"http://127.0.0.1:1985/tx_data\"~n" " },~n" " {~n" " \"events\": [\"solution\"],~n" " \"url\": \"http://127.0.0.1:1985/solution\"~n" " },~n" " \"semaphores\": {\"post_tx\": 100}~n" "}~n~n" "100 means the node will validate up to 100 incoming transactions in " "parallel.~n" "The supported semaphore keys are get_chunk, get_and_pack_chunk, get_tx_data, " "post_chunk, post_tx, get_block_index, get_wallet_list, get_sync_record.~n~n", [])}, {"peer (IP:port)", "Join a network on a peer (or set of peers)."}, {"block_gossip_peer (IP:port)", "Optionally specify peer(s) to always" " send blocks to."}, {"local_peer (IP:port)", "The local network peer. Local peers do not rate limit " "each other so we recommend you connect all your nodes from the same " "network via this configuration parameter."}, {"sync_from_local_peers_only", "If set, the data (not headers) is only synced " "from the local network peers specified via the local_peer parameter."}, {"start_from_latest_state", "Start the node from the latest stored state."}, {"start_from_state (folder)", "Start the node from the state stored in the " "specified folder. This folder must be different from data_dir. " "Implicitly sets start_from_latest_state to true."}, {"start_from_block (block hash)", "Start the node from the state corresponding " "to the given block hash."}, {"start_from_block_index", "The legacy name for start_from_latest_state."}, {"mine", "Automatically start mining once the netwok has been joined."}, {"port", "The local port to use for mining. " "This port must be accessible by remote peers."}, {"data_dir", "The directory for storing the weave and the wallets (when generated)."}, {"log_dir", "The directory for logs. If the \"debug\" flag is set, the debug logs " "are written to logs/debug_logs/. The RocksDB logs are written to " "logs/rocksdb/."}, {"storage_module", "A storage module is responsible for syncronizing and storing " "a particular data range. The data and metadata related to the module " "are stored in a dedicated folder " "([data_dir]/storage_modules/storage_module_[partition_number]_[replica_type]/" ") where replica_type is either {mining_address} or" " {mining address}.{composite packing difficulty} or" " {mining address}.replica.2.9 or \"unpacked\"." " Example: storage_module 0,En2eqsVJARnTVOSh723PBXAKGmKgrGSjQ2YIGwE_ZRI.1. " "To configure a module of a custom size, set " "storage_module {number},{size_in_bytes},{replica_type}. For instance, " "storage_module " "22,1000000000000,En2eqsVJARnTVOSh723PBXAKGmKgrGSjQ2YIGwE_ZRI.1 will be " "syncing the weave data between the offsets 22 TB and 23 TB. Make sure " "the corresponding disk contains some extra space for the proofs and " "other metadata, about 10% of the configured size." "You may repack a storage module in-place. To do that, specify " "storage_module " "{partition_number},{packing},repack_in_place,{target_packing}. " "For example, if you want to repack a storage module " "22,En2eqsVJARnTVOSh723PBXAKGmKgrGSjQ2YIGwE_ZRI.1 to the new address " "Q5EfKawrRazp11HEDf_NJpxjYMV385j21nlQNjR8_pY, specify " "storage_module " "22,En2eqsVJARnTVOSh723PBXAKGmKgrGSjQ2YIGwE_ZRI.1,repack_in_place," "Q5EfKawrRazp11HEDf_NJpxjYMV385j21nlQNjR8_pY.replica.2.9. This storage module " "will only do the repacking - it won't be used for mining and won't " "serve any data to peers. Once the repacking is complete, a message will " "be logged to the file and written to the console. We suggest you rename " "the storage module folder according to the new packing then. " "Note: as of 2.9.1 you can only repack in place to the replica_2_9 " "format." }, {"repack_batch_size", io_lib:format("The number of batches to process at a time " "during in-place repacking. For each partition being repacked, a batch " "requires about 512 MiB of memory. Default: ~B.", [?DEFAULT_REPACK_BATCH_SIZE])}, {"repack_cache_size_mb", io_lib:format("The size (in MiB) of the cache for " "in-place repacking. The node will restrict the cache size to this amount for " "each partition being repacked. Default: ~B.", [?DEFAULT_REPACK_CACHE_SIZE_MB])}, {"polling (num)", lists:flatten( io_lib:format( "Ask some peers about new blocks every N seconds. Default is ~p.", [?DEFAULT_POLLING_INTERVAL] ) )}, {"block_pollers (num)", io_lib:format( "How many peer polling jobs to run. Default is ~p.", [?DEFAULT_BLOCK_POLLERS])}, {"no_auto_join", "Do not automatically join the network of your peers."}, {"join_workers (num)", io_lib:format("The number of workers fetching the recent " "blocks and transactions simultaneously when joining the network. " "Default: ~B. ", [?DEFAULT_JOIN_WORKERS])}, {"mining_addr (addr)", io_lib:format( "The address mining rewards should be credited to. If the \"mine\" flag" " is set but no mining_addr is specified, an RSA PSS key is created" " and stored in the [data_dir]/~s directory. If the directory already" " contains such keys, the one written later is picked, no new files are" " created. After the fork 2.6, the specified address is also a replication key, " "so it is used to prepare synced data for mining even if the \"mine\" flag is not " "specified. The data already packed with different addresses is not repacked.", [?WALLET_DIR])}, {"hashing_threads (num)", io_lib:format("The number of hashing processes to spawn." " Takes effect starting from the fork 2.6 block." " Default is ~B.", [?NUM_HASHING_PROCESSES])}, {"data_cache_size_limit (num)", "The approximate maximum number of data chunks " "kept in memory by the syncing processes."}, {"packing_cache_size_limit (num)", "The approximate maximum number of data chunks " "kept in memory by the packing process."}, {"mining_cache_size_mb (num)", "The total amount of cache " "(in MiB) allocated to store unprocessed chunks while mining. The mining " "server will only read new data when there is room in the cache to store " "more chunks. This cache is subdivided into sub-caches for each mined " "partition. When omitted, it is determined based on the number of " "mining partitions."}, {"max_emitters (num)", io_lib:format("The number of transaction propagation " "processes to spawn. Must be at least 1. Default is ~B.", [?NUM_EMITTER_PROCESSES])}, {"post_tx_timeout", io_lib:format("The time in seconds to wait for the available" " tx validation process before dropping the POST /tx request. Default is ~B." " By default ~B validation processes are running. You can override it by" " setting a different value for the post_tx key in the semaphores object" " in the configuration file.", [?DEFAULT_POST_TX_TIMEOUT, ?MAX_PARALLEL_POST_TX_REQUESTS])}, {"max_propagation_peers", io_lib:format( "The maximum number of peers to propagate transactions to. " "Default is ~B.", [?DEFAULT_MAX_PROPAGATION_PEERS])}, {"max_block_propagation_peers", io_lib:format( "The maximum number of best peers to propagate blocks to. " "Default is ~B.", [?DEFAULT_MAX_BLOCK_PROPAGATION_PEERS])}, {"sync_jobs (num)", io_lib:format( "The number of data syncing jobs to run. Default: ~B." " Each job periodically picks a range and downloads it from peers.", [?DEFAULT_SYNC_JOBS] )}, {"header_sync_jobs (num)", io_lib:format( "The number of header syncing jobs to run. Default: ~B." " Each job periodically picks the latest not synced block header" " and downloads it from peers.", [?DEFAULT_HEADER_SYNC_JOBS] )}, {"enable_data_roots_syncing [true|false]", "Enable or disable background data roots syncing. Default: true."}, {"data_sync_request_packed_chunks", "Enables requesting the packed chunks from peers."}, {"disk_pool_jobs (num)", io_lib:format( "The number of disk pool jobs to run. Default: ~B." " Disk pool jobs scan the disk pool to index no longer pending or" " orphaned chunks, schedule packing for chunks with a sufficient" " number of confirmations and remove abandoned chunks.", [?DEFAULT_DISK_POOL_JOBS] )}, {"load_mining_key (file)", "DEPRECATED. Does not take effect anymore."}, {"transaction_blacklist (file)", "A file containing blacklisted transactions. " "One Base64 encoded transaction ID per line."}, {"transaction_blacklist_url", "An HTTP endpoint serving a transaction blacklist."}, {"transaction_whitelist (file)", "A file containing whitelisted transactions. " "One Base64 encoded transaction ID per line. " "If a transaction is in both lists, it is " "considered whitelisted."}, {"transaction_whitelist_url", "An HTTP endpoint serving a transaction whitelist."}, {"disk_space_check_frequency (num)", io_lib:format( "The frequency in seconds of requesting the information " "about the available disk space from the operating system, " "used to decide on whether to continue syncing the historical " "data or clean up some space. Default is ~B.", [?DISK_SPACE_CHECK_FREQUENCY_MS div 1000] )}, {"init", "Start a new weave."}, {"internal_api_secret (secret)", lists:flatten(io_lib:format( "Enables the internal API endpoints, only accessible with this secret." " Min. ~B chars.", [?INTERNAL_API_SECRET_MIN_LEN]))}, {"enable (feature)", "Enable a specific (normally disabled) feature. For example, " "subfield_queries."}, {"disable (feature)", "Disable a specific (normally enabled) feature."}, {"requests_per_minute_limit (number)", "Limit the maximum allowed number of HTTP " "requests per IP address per minute. Default is 900."}, {"max_connections", io:format( "The number of connections to be handled concurrently. " "Its purpose is to prevent your system from being overloaded and " "ensuring all the connections are handled optimally. " "Default is ~p.", [?DEFAULT_COWBOY_TCP_MAX_CONNECTIONS] )}, {"disk_pool_data_root_expiration_time", "The time in seconds of how long a pending or orphaned data root is kept in " "the disk pool. The default is 2 * 60 * 60 (2 hours)."}, {"max_disk_pool_buffer_mb", "The max total size (in MiB)) of the pending chunks in the disk pool." "The default is 2000 (2 GiB)."}, {"max_disk_pool_data_root_buffer_mb", "The max size (in MiB) per data root of the pending chunks in the disk" " pool. The default is 50."}, {"max_duplicate_data_roots", io_lib:format( "The maximum number of duplicate data roots to inspect when " "checking whether a posted chunk is already synced. Default is ~B.", [?DEFAULT_MAX_DUPLICATE_DATA_ROOTS] )}, {"disk_cache_size_mb", lists:flatten(io_lib:format( "The maximum size (in MiB) of the disk space allocated for" " storing recent block and transaction headers. Default is ~B.", [?DISK_CACHE_SIZE] ) )}, {"packing_workers (num)", "The number of packing workers to spawn. The default is the number of " "logical CPU cores."}, {"replica_2_9_workers (num)", io_lib:format( "The number of replica 2.9 workers to spawn. Replica 2.9 workers are used " "to generate entropy for the replica.2.9 format. By default, at most one " "worker will be active per physical disk at a time. Default: ~B", [?DEFAULT_REPLICA_2_9_WORKERS] )}, {"disable_replica_2_9_device_limit", "Disable the device limit for the replica.2.9 format. By default, at most " "one worker will be active per physical disk at a time, setting this flag " "removes this limit allowing multiple workers to be active on a given " "physical disk." }, {"replica_2_9_entropy_cache_size_mb (num)", io_lib:format( "The maximum cache size (in MiB) to allocate for for replica.2.9 entropy. " "Each cached entropy is 256 MiB. The bigger the cache, the more replica.2.9 data " "can be synced concurrently. Default: ~B", [?DEFAULT_REPLICA_2_9_ENTROPY_CACHE_SIZE_MB] )}, {"max_vdf_validation_thread_count", io_lib:format("\tThe maximum number " "of threads used for VDF validation. Default: ~B", [?DEFAULT_MAX_NONCE_LIMITER_VALIDATION_THREAD_COUNT])}, {"max_vdf_last_step_validation_thread_count", io_lib:format( "\tThe maximum number of threads used for VDF last step " "validation. Default: ~B", [?DEFAULT_MAX_NONCE_LIMITER_LAST_STEP_VALIDATION_THREAD_COUNT])}, {"vdf_server_trusted_peer", "If the option is set, we expect the given " "peer(s) to push VDF updates to us; we will thus not compute VDF outputs " "ourselves. Recommended on CPUs without hardware extensions for computing" " SHA-2. We will nevertheless validate VDF chains in blocks. Also, we " "recommend you specify at least two trusted peers to aim for shorter " "mining downtime."}, {"vdf_client_peer", "If the option is set, the node will push VDF updates " "to this peer. You can specify several vdf_client_peer options."}, {"debug", "Enable extended logging."}, {"run_defragmentation", "Run defragmentation of chunk storage files."}, {"defragmentation_trigger_threshold", "File size threshold in bytes for it to be considered for defragmentation."}, {"block_throttle_by_ip_interval (number)", io_lib:format("The number of milliseconds that have to pass before " "we accept another block from the same IP address. Default: ~B.", [?DEFAULT_BLOCK_THROTTLE_BY_IP_INTERVAL_MS])}, {"block_throttle_by_solution_interval (number)", io_lib:format("The number of milliseconds that have to pass before " "we accept another block with the same solution hash. " "Default: ~B.", [?DEFAULT_BLOCK_THROTTLE_BY_SOLUTION_INTERVAL_MS])}, {"defragment_module", "Run defragmentation of the chunk storage files from the given storage module." " Assumes the run_defragmentation flag is provided."}, {"tls_cert_file", "Optional path to the TLS certificate file for TLS support, " "depends on 'tls_key_file' being set as well."}, {"tls_key_file", "The path to the TLS key file for TLS support, depends " "on 'tls_cert_file' being set as well."}, {"coordinated_mining", "Enable coordinated mining. If you are a solo pool miner " "coordinating on a replica with other pool miners, set this flag too. " "To connect the internal nodes, set cm_api_secret, cm_peer, " "and cm_exit_peer. Make sure every node specifies every other node in the " "cluster via cm_peer or cm_exit_peer. The same peer may be both cm_peer " "and cm_exit_peer. Also, set the mine flag on every CM peer. You may or " "may not set the mine flag on the exit peer."}, {"cm_api_secret", "Coordinated mining secret for authenticated " "requests between private peers. You need to also set coordinated_mining, " "cm_peer, and cm_exit_peer."}, {"cm_poll_interval", io_lib:format("The frequency in milliseconds of asking the " "other nodes in the coordinated mining setup about their partition " "tables. Default is ~B.", [?DEFAULT_CM_POLL_INTERVAL_MS])}, {"cm_out_batch_timeout (num)", io_lib:format("The frequency in milliseconds of " "sending other nodes in the coordinated mining setup a batch of H1 " "values to hash. A higher value reduces network traffic, a lower value " "reduces hashing latency. Default is ~B.", [?DEFAULT_CM_BATCH_TIMEOUT_MS])}, {"cm_peer (IP:port)", "The peer(s) to mine in coordination with. You need to also " "set coordinated_mining, cm_api_secret, and cm_exit_peer. The same " "peer may be specified as cm_peer and cm_exit_peer. If we are an exit " "peer, make sure to also set cm_peer for every miner we work with."}, {"cm_exit_peer (IP:port)", "The peer to send mining solutions to in the " "coordinated mining mode. You need to also set coordinated_mining, " "cm_api_secret, and cm_peer. If cm_exit_peer is not set, we are the " "exit peer. When is_pool_client is set, the exit peer " "is a proxy through which we communicate with the pool."}, {"is_pool_server", "Configure the node as a pool server. The pool node may not " "participate in the coordinated mining."}, {"is_pool_client", "Configure the node as a pool client. The node may be an " "exit peer in the coordinated mining setup or a standalone node."}, {"pool_api_key", "API key for the requests to the pool."}, {"pool_server_address", "The pool address"}, {"pool_worker_name", "(optional) The pool worker name. " "Useful if you have multiple machines (or replicas) " "and you want to monitor them separately on pool"}, {"rocksdb_flush_interval", "RocksDB flush interval in seconds"}, {"rocksdb_wal_sync_interval", "RocksDB WAL sync interval in seconds"}, {"verify", "Run in verify. There are two valid values 'purge' or 'log'. " "The node will run several checks on all listed storage_modules, and flag any " "errors. In 'log' mode the error are just logged, in 'purge' node the chunks " "are invalidated so that they have to be repacked. After completing a full " "verification cycle, you can restart the node in normal mode to have it " "resync and/or repack any flagged chunks. When running in verify mode several " "flags are disallowed. See the node output for details."}, {"verify_samples (num)", io_lib:format("Number of chunks to sample and unpack " "during 'verify'. Default is ~B.", [?SAMPLE_CHUNK_COUNT])}, {"vdf (mode)", io_lib:format("VDF implementation (openssl (default), openssllite," " fused, hiopt_m4). Default is openssl.", [])}, % Shutdown management {"network.tcp.shutdown.connection_timeout", io_lib:format( "Configure shutdown TCP connection timeout (seconds). " "Default is '~p'.", [?SHUTDOWN_TCP_CONNECTION_TIMEOUT] )}, {"network.tcp.shutdown.mode", io_lib:format( "Configure shutdown TCP mode (shutdown or close). " "Default is '~p'.", [?SHUTDOWN_TCP_MODE] )}, % Global socket configuration {"network.socket.backend", io_lib:format( "Configure Erlang default socket backend (inet or socket). " "Default is '~p'.", [?DEFAULT_SOCKET_BACKEND] )}, % Gun HTTP Client Tuning {"http_client.http.closing_timeout", io_lib:format( "Configure HTTP Client closing timeout parameter (milliseconds). " "Default is '~p'.", [?DEFAULT_GUN_HTTP_CLOSING_TIMEOUT] )}, {"http_client.http.keepalive", io_lib:format( "Configure HTTP Client keep alive parameter (seconds or infinity). " "Default is '~p'.", [?DEFAULT_GUN_HTTP_KEEPALIVE] )}, {"http_client.tcp.delay_send", io_lib:format( "Configure HTTP Client TCP delay send parameter (boolean). " "Default is '~p'.", [?DEFAULT_GUN_TCP_DELAY_SEND] )}, {"http_client.tcp.keepalive", io_lib:format( "Configure HTTP Client TCP keepalive parameter (boolean). " "Default is '~p'.", [?DEFAULT_GUN_TCP_KEEPALIVE] )}, {"http_client.tcp.linger", io_lib:format( "Configure HTTP Client TCP linger parameter (boolean). " "Default is '~p'.", [?DEFAULT_GUN_TCP_LINGER] )}, {"http_client.tcp.linger_timeout", io_lib:format( "Configure HTTP Client TCP linger timeout parameter (seconds). " "Default is '~p'.", [?DEFAULT_GUN_TCP_LINGER_TIMEOUT] )}, {"http_client.tcp.nodelay", io_lib:format( "Configure HTTP Client TCP nodelay parameter (boolean). " "Default is '~p'.", [?DEFAULT_GUN_TCP_NODELAY] )}, {"http_client.tcp.send_timeout_close", io_lib:format( "Configure HTTP Client TCP send timeout close parameter (boolean). " "Default is '~p'.", [?DEFAULT_GUN_TCP_SEND_TIMEOUT_CLOSE] )}, {"http_client.tcp.send_timeout", io_lib:format( "Configure HTTP Client TCP send timeout parameter (milliseconds). " "Default is '~p'.", [?DEFAULT_GUN_TCP_SEND_TIMEOUT] )}, % Cowboy HTTP Server Tuning {"http_api.http.active_n", io_lib:format( "Configure HTTP Server number of packets requested per sockets (integer). " "Default is '~p'.", [?DEFAULT_COWBOY_HTTP_ACTIVE_N] )}, {"http_api.tcp.idle_timeout_seconds", io_lib:format( "The number of seconds allowed for incoming API client connections to be idle " "before closing them. Default is '~p' seconds. " "Please, do not set this value too low " "as it will negatively affect the performance of the node.", [?DEFAULT_COWBOY_TCP_IDLE_TIMEOUT_SECOND] )}, {"http_api.http.inactivity_timeout", io_lib:format( "Configure HTTP Server inactivity timeout (milliseconds). " "Default is '~p'.", [?DEFAULT_COWBOY_HTTP_INACTIVITY_TIMEOUT] )}, {"http_api.http.linger_timeout", io_lib:format( "Configure HTTP Server linger timeout (milliseconds). " "Default is '~p'.", [?DEFAULT_COWBOY_HTTP_LINGER_TIMEOUT] )}, {"http_api.http.request_timeout", io_lib:format( "Configure HTTP Server request timeout (milliseconds). " "Default is '~p'.", [?DEFAULT_COWBOY_HTTP_REQUEST_TIMEOUT] )}, {"http_api.tcp.backlog", io_lib:format( "Configure HTTP Server TCP backlog parameter (integer). " "Default is '~p'.", [?DEFAULT_COWBOY_TCP_BACKLOG] )}, {"http_api.tcp.delay_send", io_lib:format( "Configure HTTP Server TCP delay send parameter (boolean). " "Default is '~p'.", [?DEFAULT_COWBOY_TCP_DELAY_SEND] )}, {"http_api.tcp.keepalive", io_lib:format( "Configure HTTP Server TCP keepalive parameter (boolean). " "Default is '~p'.", [?DEFAULT_COWBOY_TCP_KEEPALIVE] )}, {"http_api.tcp.linger", io_lib:format( "Configure HTTP Server TCP linger parameter (boolean). " "Default is '~p'.", [?DEFAULT_COWBOY_TCP_LINGER] )}, {"http_api.tcp.linger_timeout", io_lib:format( "Configure HTTP Server TCP linger timeout parameter (seconds). " "Default is '~p'.", [?DEFAULT_COWBOY_TCP_LINGER_TIMEOUT] )}, {"http_api.tcp.listener_shutdown", io_lib:format( "Configure HTTP Server listener shutdown (seconds)." "Default is '~p'.", [?DEFAULT_COWBOY_TCP_LISTENER_SHUTDOWN] )}, {"http_api.tcp.nodelay", io_lib:format( "Configure HTTP Server TCP nodelay parameter (boolean). " "Default is '~p'.", [?DEFAULT_COWBOY_TCP_NODELAY] )}, {"http_api.tcp.num_acceptors", io_lib:format( "Configure HTTP Server TCP acceptors (integer). " "Default is '~p'.", [?DEFAULT_COWBOY_TCP_NUM_ACCEPTORS] )}, {"http_api.tcp.send_timeout_close", io_lib:format( "Configure HTTP Server TCP send timeout close parameter (boolean). " "Default is '~p'.", [?DEFAULT_COWBOY_TCP_SEND_TIMEOUT_CLOSE] )}, {"http_api.tcp.send_timeout", io_lib:format( "Configure HTTP Server TCP send timeout parameter (milliseconds). " "Default is '~p'.", [?DEFAULT_COWBOY_TCP_SEND_TIMEOUT] )} ] ). %%-------------------------------------------------------------------- %% @doc evaluate `parse_cli_args/2' function and execute actions if %% required (returned by `parse/2' function). %% @end %%-------------------------------------------------------------------- -spec eval(Args, Config) -> Return when Args :: [string()], Config :: #config{}, Return :: Config | [term()]. eval(Args, Config) -> case parse(Args, Config) of {ok, C} -> C; {error, Actions, _C} -> [ {M, F, A, erlang:apply(M, F, A)} || {M,F,A} <- Actions ] end. %%-------------------------------------------------------------------- %% @doc Legacy argument parser. This function will return the %% configuration as `#config{}' record in case of success, or returns %% an error with a list of actions to execute as MFA. %% @end %%-------------------------------------------------------------------- -spec parse(Args, Config) -> Return when Args :: [string()], Config :: #config{}, Return :: {ok, Config} | {error, Actions, Config}, Actions :: {M, F, A}, M :: atom(), F :: atom(), A :: [term()]. parse([], C) -> {ok, C}; parse(["config_file",_|Rest], C) -> % ignore config_file parameter when using arguments parser. parse(Rest, C); parse(["mine" | Rest], C) -> parse(Rest, C#config{ mine = true }); parse(["verify", "purge" | Rest], C) -> parse(Rest, C#config{ verify = purge }); parse(["verify", "log" | Rest], C) -> parse(Rest, C#config{ verify = log }); parse(["verify", _ | _], C) -> io:format("Invalid verify mode. Valid modes are 'purge' or 'log'.~n"), {error, [ {timer, sleep, [1000]}, {init, stop, [1]} ], C}; parse(["verify_samples", "all" | Rest], C) -> parse(Rest, C#config{ verify_samples = all }); parse(["verify_samples", N | Rest], C) -> parse(Rest, C#config{ verify_samples = list_to_integer(N) }); parse(["vdf", Mode | Rest], C) -> ParsedMode = case Mode of "openssl" ->openssl; "openssllite" ->openssllite; "fused" ->fused; "hiopt_m4" ->hiopt_m4; _ -> io:format("VDF ~p is invalid.~n", [Mode]), openssl end, parse(Rest, C#config{ vdf = ParsedMode }); parse(["peer", Peer | Rest], C = #config{ peers = Ps }) -> case ar_util:safe_parse_peer(Peer) of {ok, ValidPeers} when is_list(ValidPeers) -> NewConfig = C#config{peers = ValidPeers ++ Ps}, parse(Rest, NewConfig); {error, _} -> io:format("Peer ~p is invalid.~n", [Peer]), parse(Rest, C) end; parse(["block_gossip_peer", Peer | Rest], C = #config{ block_gossip_peers = Peers }) -> case ar_util:safe_parse_peer(Peer) of {ok, ValidPeer} when is_list(ValidPeer) -> parse(Rest, C#config{ block_gossip_peers = ValidPeer ++ Peers }); {error, _} -> io:format("Peer ~p invalid ~n", [Peer]), parse(Rest, C) end; parse(["local_peer", Peer | Rest], C = #config{ local_peers = Peers }) -> case ar_util:safe_parse_peer(Peer) of {ok, ValidPeer} when is_list(ValidPeer) -> parse(Rest, C#config{ local_peers = ValidPeer ++ Peers }); {error, _} -> io:format("Peer ~p is invalid.~n", [Peer]), parse(Rest, C) end; parse(["sync_from_local_peers_only" | Rest], C) -> parse(Rest, C#config{ sync_from_local_peers_only = true }); parse(["transaction_blacklist", File | Rest], C = #config{ transaction_blacklist_files = Files } ) -> parse(Rest, C#config{ transaction_blacklist_files = [File | Files] }); parse(["transaction_blacklist_url", URL | Rest], C = #config{ transaction_blacklist_urls = URLs} ) -> parse(Rest, C#config{ transaction_blacklist_urls = [URL | URLs] }); parse(["transaction_whitelist", File | Rest], C = #config{ transaction_whitelist_files = Files } ) -> parse(Rest, C#config{ transaction_whitelist_files = [File | Files] }); parse(["transaction_whitelist_url", URL | Rest], C = #config{ transaction_whitelist_urls = URLs} ) -> parse(Rest, C#config{ transaction_whitelist_urls = [URL | URLs] }); parse(["port", Port | Rest], C) -> parse(Rest, C#config{ port = list_to_integer(Port) }); parse(["data_dir", DataDir | Rest], C) -> parse(Rest, C#config{ data_dir = DataDir }); parse(["log_dir", Dir | Rest], C) -> parse(Rest, C#config{ log_dir = Dir }); parse(["storage_module", StorageModuleString | Rest], C) -> try case ar_config:parse_storage_module(StorageModuleString) of {ok, StorageModule} -> StorageModules = C#config.storage_modules, parse(Rest, C#config{ storage_modules = [StorageModule | StorageModules] }); {repack_in_place, StorageModule} -> StorageModules = C#config.repack_in_place_storage_modules, parse(Rest, C#config{ repack_in_place_storage_modules = [StorageModule | StorageModules] }) end catch _:_ -> io:format("~nstorage_module value must be " "in the {number},{address}[,repack_in_place,{to_packing}] format.~n~n"), {error, [ {init, stop, [1]} ], C} end; parse(["repack_batch_size", N | Rest], C) -> parse(Rest, C#config{ repack_batch_size = list_to_integer(N) }); parse(["repack_cache_size_mb", N | Rest], C) -> parse(Rest, C#config{ repack_cache_size_mb = list_to_integer(N) }); parse(["polling", Frequency | Rest], C) -> parse(Rest, C#config{ polling = list_to_integer(Frequency) }); parse(["block_pollers", N | Rest], C) -> parse(Rest, C#config{ block_pollers = list_to_integer(N) }); parse(["no_auto_join" | Rest], C) -> parse(Rest, C#config{ auto_join = false }); parse(["join_workers", N | Rest], C) -> parse(Rest, C#config{ join_workers = list_to_integer(N) }); parse(["diff", N | Rest], C) -> parse(Rest, C#config{ diff = list_to_integer(N) }); parse(["mining_addr", Addr | Rest], C) -> case C#config.mining_addr of not_set -> case ar_util:safe_decode(Addr) of {ok, DecodedAddr} when byte_size(DecodedAddr) == 32 -> parse(Rest, C#config{ mining_addr = DecodedAddr }); _ -> io:format("~nmining_addr must be a valid Base64Url string, 43" " characters long.~n~n"), {error, [{init, stop, [1]}], C} end; _ -> io:format("~nYou may specify at most one mining_addr.~n~n"), {error, [{init, stop, [1]}], C} end; parse(["hashing_threads", Num | Rest], C) -> parse(Rest, C#config{ hashing_threads = list_to_integer(Num) }); parse(["data_cache_size_limit", Num | Rest], C) -> parse(Rest, C#config{ data_cache_size_limit = list_to_integer(Num) }); parse(["packing_cache_size_limit", Num | Rest], C) -> parse(Rest, C#config{ packing_cache_size_limit = list_to_integer(Num) }); parse(["mining_cache_size_mb", Num | Rest], C) -> parse(Rest, C#config{ mining_cache_size_mb = list_to_integer(Num) }); parse(["max_emitters", Num | Rest], C) -> parse(Rest, C#config{ max_emitters = list_to_integer(Num) }); parse(["disk_space_check_frequency", Frequency | Rest], C) -> parse(Rest, C#config{ disk_space_check_frequency = list_to_integer(Frequency) * 1000 }); parse(["start_from_block_index" | Rest], C) -> parse(Rest, C#config{ start_from_latest_state = true }); parse(["start_from_state", Folder | Rest], C) -> parse(Rest, C#config{ start_from_state = Folder }); parse(["start_from_block", H | Rest], C) -> case ar_util:safe_decode(H) of {ok, Decoded} when byte_size(Decoded) == 48 -> parse(Rest, C#config{ start_from_block = Decoded }); _ -> io:format("Invalid start_from_block.~n", []), {error, [ {timer, sleep, [1000]}, {init, stop, [1]} ], C} end; parse(["start_from_latest_state" | Rest], C) -> parse(Rest, C#config{ start_from_latest_state = true }); parse(["init" | Rest], C)-> parse(Rest, C#config{ init = true }); parse(["internal_api_secret", Secret | Rest], C) when length(Secret) >= ?INTERNAL_API_SECRET_MIN_LEN -> parse(Rest, C#config{ internal_api_secret = list_to_binary(Secret)}); parse(["internal_api_secret", _ | _], C) -> io:format("~nThe internal_api_secret must be at least ~B characters long.~n~n", [?INTERNAL_API_SECRET_MIN_LEN]), {error, [ {init, stop, [1]} ], C}; parse(["enable", Feature | Rest ], C = #config{ enable = Enabled }) -> parse(Rest, C#config{ enable = [ list_to_atom(Feature) | Enabled ] }); parse(["disable", Feature | Rest ], C = #config{ disable = Disabled }) -> parse(Rest, C#config{ disable = [ list_to_atom(Feature) | Disabled ] }); parse(["custom_domain", _ | Rest], C = #config{ }) -> ?LOG_WARNING("Deprecated option found 'custom_domain': " " this option has been removed and is a no-op.", []), parse(Rest, C#config{ }); parse(["requests_per_minute_limit", Num | Rest], C) -> parse(Rest, C#config{ requests_per_minute_limit = list_to_integer(Num) }); parse(["max_propagation_peers", Num | Rest], C) -> parse(Rest, C#config{ max_propagation_peers = list_to_integer(Num) }); parse(["max_block_propagation_peers", Num | Rest], C) -> parse(Rest, C#config{ max_block_propagation_peers = list_to_integer(Num) }); parse(["sync_jobs", Num | Rest], C) -> parse(Rest, C#config{ sync_jobs = list_to_integer(Num) }); parse(["header_sync_jobs", Num | Rest], C) -> parse(Rest, C#config{ header_sync_jobs = list_to_integer(Num) }); parse(["enable_data_roots_syncing", "true" | Rest], C) -> parse(Rest, C#config{ enable_data_roots_syncing = true }); parse(["enable_data_roots_syncing", "false" | Rest], C) -> parse(Rest, C#config{ enable_data_roots_syncing = false }); parse(["data_sync_request_packed_chunks" | Rest], C) -> parse(Rest, C#config{ data_sync_request_packed_chunks = true }); parse(["post_tx_timeout", Num | Rest], C) -> parse(Rest, C#config { post_tx_timeout = list_to_integer(Num) }); parse(["max_connections", Num | Rest], C) -> try list_to_integer(Num) of N when N >= 1 -> parse(Rest, C#config{ 'http_api.tcp.max_connections' = N }); _ -> io:format("Invalid max_connections ~p", [Num]), parse(Rest, C) catch _:_ -> io:format("Invalid max_connections ~p", [Num]), parse(Rest, C) end; parse(["disk_pool_data_root_expiration_time", Num | Rest], C) -> parse(Rest, C#config{ disk_pool_data_root_expiration_time = list_to_integer(Num) }); parse(["max_disk_pool_buffer_mb", Num | Rest], C) -> parse(Rest, C#config{ max_disk_pool_buffer_mb = list_to_integer(Num) }); parse(["max_disk_pool_data_root_buffer_mb", Num | Rest], C) -> parse(Rest, C#config{ max_disk_pool_data_root_buffer_mb = list_to_integer(Num) }); parse(["max_duplicate_data_roots", Num | Rest], C) -> parse(Rest, C#config{ max_duplicate_data_roots = list_to_integer(Num) }); parse(["disk_cache_size_mb", Num | Rest], C) -> parse(Rest, C#config{ disk_cache_size = list_to_integer(Num) }); parse(["packing_workers", Num | Rest], C) -> parse(Rest, C#config{ packing_workers = list_to_integer(Num) }); parse(["replica_2_9_workers", Num | Rest], C) -> parse(Rest, C#config{ replica_2_9_workers = list_to_integer(Num) }); parse(["disable_replica_2_9_device_limit" | Rest], C) -> parse(Rest, C#config{ disable_replica_2_9_device_limit = true }); parse(["replica_2_9_entropy_cache_size_mb", Num | Rest], C) -> parse(Rest, C#config{ replica_2_9_entropy_cache_size_mb = list_to_integer(Num) }); parse(["max_vdf_validation_thread_count", Num | Rest], C) -> parse(Rest, C#config{ max_nonce_limiter_validation_thread_count = list_to_integer(Num) }); parse(["max_vdf_last_step_validation_thread_count", Num | Rest], C) -> parse(Rest, C#config{ max_nonce_limiter_last_step_validation_thread_count = list_to_integer(Num) }); parse(["vdf_server_trusted_peer", Peer | Rest], C) -> #config{ nonce_limiter_server_trusted_peers = Peers } = C, parse(Rest, C#config{ nonce_limiter_server_trusted_peers = [Peer | Peers] }); parse(["vdf_client_peer", RawPeer | Rest], C = #config{ nonce_limiter_client_peers = Peers }) -> parse(Rest, C#config{ nonce_limiter_client_peers = [RawPeer | Peers] }); parse(["debug" | Rest], C) -> parse(Rest, C#config{ debug = true }); parse(["run_defragmentation" | Rest], C) -> parse(Rest, C#config{ run_defragmentation = true }); parse(["defragmentation_trigger_threshold", Num | Rest], C) -> parse(Rest, C#config{ defragmentation_trigger_threshold = list_to_integer(Num) }); parse(["block_throttle_by_ip_interval", Num | Rest], C) -> parse(Rest, C#config{ block_throttle_by_ip_interval = list_to_integer(Num) }); parse(["block_throttle_by_solution_interval", Num | Rest], C) -> parse(Rest, C#config{ block_throttle_by_solution_interval = list_to_integer(Num) }); parse(["defragment_module", DefragModuleString | Rest], C) -> DefragModules = C#config.defragmentation_modules, try {ok, DefragModule} = ar_config:parse_storage_module(DefragModuleString), DefragModules2 = [DefragModule | DefragModules], parse(Rest, C#config{ defragmentation_modules = DefragModules2 }) catch _:_ -> io:format("~ndefragment_module value must be in the {number},{address} format.~n~n"), {error, [ {init, stop, [1]} ], C} end; parse(["tls_cert_file", CertFilePath | Rest], C) -> AbsCertFilePath = filename:absname(CertFilePath), ar_util:assert_file_exists_and_readable(AbsCertFilePath), parse(Rest, C#config{ tls_cert_file = AbsCertFilePath }); parse(["tls_key_file", KeyFilePath | Rest], C) -> AbsKeyFilePath = filename:absname(KeyFilePath), ar_util:assert_file_exists_and_readable(AbsKeyFilePath), parse(Rest, C#config{ tls_key_file = AbsKeyFilePath }); parse(["http_api.tcp.idle_timeout_seconds", Num | Rest], C) -> parse(Rest, C#config { http_api_transport_idle_timeout = list_to_integer(Num) * 1000 }); parse(["coordinated_mining" | Rest], C) -> parse(Rest, C#config{ coordinated_mining = true }); parse(["cm_api_secret", CMSecret | Rest], C) when length(CMSecret) >= ?INTERNAL_API_SECRET_MIN_LEN -> parse(Rest, C#config{ cm_api_secret = list_to_binary(CMSecret) }); parse(["cm_api_secret", _ | _], C) -> io:format("~nThe cm_api_secret must be at least ~B characters long.~n~n", [?INTERNAL_API_SECRET_MIN_LEN]), {error, [ {init, stop, [1]} ], C}; parse(["cm_poll_interval", Num | Rest], C) -> parse(Rest, C#config{ cm_poll_interval = list_to_integer(Num) }); parse(["cm_peer", Peer | Rest], C = #config{ cm_peers = Ps }) -> case ar_util:safe_parse_peer(Peer) of {ok, ValidPeer} when is_list(ValidPeer) -> parse(Rest, C#config{ cm_peers = ValidPeer ++ Ps }); {error, _} -> io:format("Peer ~p is invalid.~n", [Peer]), parse(Rest, C) end; parse(["cm_exit_peer", Peer | Rest], C) -> case ar_util:safe_parse_peer(Peer) of {ok, [ValidPeer|_]} -> parse(Rest, C#config{ cm_exit_peer = ValidPeer }); {error, _} -> io:format("Peer ~p is invalid.~n", [Peer]), parse(Rest, C) end; parse(["cm_out_batch_timeout", Num | Rest], C) -> parse(Rest, C#config{ cm_out_batch_timeout = list_to_integer(Num) }); parse(["is_pool_server" | Rest], C) -> parse(Rest, C#config{ is_pool_server = true }); parse(["is_pool_client" | Rest], C) -> parse(Rest, C#config{ is_pool_client = true }); parse(["pool_api_key", Key | Rest], C) -> parse(Rest, C#config{ pool_api_key = list_to_binary(Key) }); parse(["pool_server_address", Host | Rest], C) -> parse(Rest, C#config{ pool_server_address = list_to_binary(Host) }); parse(["pool_worker_name", Host | Rest], C) -> parse(Rest, C#config{ pool_worker_name = list_to_binary(Host) }); parse(["rocksdb_flush_interval", Seconds | Rest], C) -> parse(Rest, C#config{ rocksdb_flush_interval_s = list_to_integer(Seconds) }); parse(["rocksdb_wal_sync_interval", Seconds | Rest], C) -> parse(Rest, C#config{ rocksdb_wal_sync_interval_s = list_to_integer(Seconds) }); %% tcp shutdown procedure parse(["network.tcp.connection_timeout", Delay|Rest], C) -> parse(Rest, C#config{ shutdown_tcp_connection_timeout = list_to_integer(Delay) }); parse(["network.tcp.shutdown.mode", RawMode|Rest], C) -> case RawMode of "shutdown" -> parse(Rest, C#config{ shutdown_tcp_mode = shutdown}); "close" -> parse(Rest, C#config{ shutdown_tcp_mode = close }); Mode -> io:format("Mode ~p is invalid.~n", [Mode]), parse(Rest, C) end; %% global socket configuration parse(["network.socket.backend", Backend|Rest], C) -> case Backend of "inet" -> parse(Rest, C#config{ 'socket.backend' = inet }); "socket" -> parse(Rest, C#config{ 'socket.backend' = socket }); _ -> io:format("Invalid socket.backend ~p.", [Backend]), parse(Rest, C) end; %% gun http client configuration parse(["http_client.http.keepalive", "infinity"|Rest], C) -> parse(Rest, C#config{ 'http_client.http.keepalive' = infinity}); parse(["http_client.http.keepalive", Keepalive|Rest], C) -> try list_to_integer(Keepalive) of K when K >= 0 -> parse(Rest, C#config{ 'http_client.http.keepalive' = K }); _ -> io:format("Invalid http_client.http.keepalive ~p.", [Keepalive]), parse(Rest, C) catch _:_ -> io:format("Invalid http_client.http.keepalive ~p.", [Keepalive]), parse(Rest, C) end; parse(["http_client.tcp.delay_send", DelaySend|Rest], C) -> case DelaySend of "true" -> parse(Rest, C#config{ 'http_client.tcp.delay_send' = true }); "false" -> parse(Rest, C#config{ 'http_client.tcp.delay_send' = false }); _ -> io:format("Invalid http_client.tcp.delay_send ~p.", [DelaySend]), parse(Rest, C) end; parse(["http_client.tcp.keepalive", Keepalive|Rest], C) -> case Keepalive of "true" -> parse(Rest, C#config{ 'http_client.tcp.keepalive' = true }); "false" -> parse(Rest, C#config{ 'http_client.tcp.keepalive' = false }); _ -> io:format("Invalid http_client.tcp.keepalive ~p.", [Keepalive]), parse(Rest, C) end; parse(["http_client.tcp.linger", Linger|Rest], C) -> case Linger of "true" -> parse(Rest, C#config{ 'http_client.tcp.linger' = true }); "false" -> parse(Rest, C#config{ 'http_client.tcp.linger' = false}); _ -> io:format("Invalid http_client.tcp.linger ~p.", [Linger]), parse(Rest, C) end; parse(["http_client.tcp.linger_timeout", Timeout|Rest], C) -> try list_to_integer(Timeout) of T when T >= 0 -> parse(Rest, C#config{ 'http_client.tcp.linger_timeout' = T }); _ -> io:format("Invalid http_client.tcp.linger_timeout ~p.", [Timeout]), parse(Rest, C) catch _:_ -> io:format("Invalid http_client.tcp.linger_timeout timeout ~p.", [Timeout]), parse(Rest, C) end; parse(["http_client.tcp.nodelay", Nodelay|Rest], C) -> case Nodelay of "true" -> parse(Rest, C#config{ 'http_client.tcp.nodelay' = true }); "false" -> parse(Rest, C#config{ 'http_client.tcp.nodelay' = false }); _ -> io:format("Invalid http_client.tcp.nodelay ~p.", [Nodelay]), parse(Rest, C) end; parse(["http_client.tcp.send_timeout_close", Value|Rest], C) -> case Value of "true" -> parse(Rest, C#config{ 'http_client.tcp.send_timeout_close' = true }); "false" -> parse(Rest, C#config{ 'http_client.tcp.send_timeout_close' = false }); _ -> io:format("Invalid http_client.tcp.send_timeout_close ~p.", [Value]), parse(Rest, C) end; parse(["http_client.tcp.send_timeout", Timeout|Rest], C) -> try list_to_integer(Timeout) of T when T >= 0 -> parse(Rest, C#config{ 'http_client.tcp.send_timeout' = T }); _ -> io:format("Invalid http_client.tcp.send_timeout ~p.", [Timeout]), parse(Rest, C) catch _:_ -> io:format("Invalid http_client.tcp.send_timeout ~p.", [Timeout]), parse(Rest, C) end; %% cowboy http server configuration parse(["http_api.http.active_n", Active|Rest], C) -> try list_to_integer(Active) of N when N >= 1 -> parse(Rest, C#config{ 'http_api.http.active_n' = N }); _ -> io:format("Invalid http_api.http.active_n ~p.", [Active]), parse(Rest, C) catch _:_ -> io:format("Invalid http_api.http.active_n ~p.", [Active]), parse(Rest, C) end; parse(["http_api.http.inactivity_timeout", Timeout|Rest], C) -> try list_to_integer(Timeout) of T when T >= 0 -> parse(Rest, C#config{ 'http_api.http.inactivity_timeout' = T }); _ -> io:format("Invalid http_api.http.inactivity_timeout ~p.", [Timeout]), parse(Rest, C) catch _:_ -> io:format("Invalid http_api.http.inactivity_timeout ~p.", [Timeout]), parse(Rest, C) end; parse(["http_api.http.linger_timeout", Timeout|Rest], C) -> try list_to_integer(Timeout) of T when T >= 0 -> parse(Rest, C#config{ 'http_api.http.linger_timeout' = T }); _ -> io:format("Invalid http_api.http.linger_timeout ~p.", [Timeout]), parse(Rest, C) catch _:_ -> io:format("Invalid http_api.http.linger_timeout ~p.", [Timeout]), parse(Rest, C) end; parse(["http_api.http.request_timeout", Timeout|Rest], C) -> try list_to_integer(Timeout) of T when T >= 0 -> parse(Rest, C#config{ 'http_api.http.request_timeout' = T }); _ -> io:format("Invalid http_api.http.request_timeout ~p.", [Timeout]), parse(Rest, C) catch _:_ -> io:format("Invalid http_api.http.request_timeout ~p.", [Timeout]), parse(Rest, C) end; parse(["http_api.tcp.backlog", Backlog|Rest], C) -> try list_to_integer(Backlog)of B when B >= 1 -> parse(Rest, C#config{ 'http_api.tcp.backlog' = B }); _ -> io:format("Invalid http_api.tcp.backlog ~p.", [Backlog]), parse(Rest, C) catch _:_ -> io:format("Invalid http_api.tcp.backlog ~p.", [Backlog]), parse(Rest, C) end; parse(["http_api.tcp.delay_send", DelaySend|Rest], C) -> case DelaySend of "true" -> parse(Rest, C#config{ 'http_api.tcp.delay_send' = true }); "false" -> parse(Rest, C#config{ 'http_api.tcp.delay_send' = false }); _ -> io:format("Invalid http_api.tcp.delay_send ~p.", [DelaySend]), parse(Rest, C) end; parse(["http_api.tcp.keepalive", "true"|Rest], C) -> parse(Rest, C#config{ 'http_api.tcp.keepalive' = true}); parse(["http_api.tcp.keepalive", "false"|Rest], C) -> parse(Rest, C#config{ 'http_api.tcp.keepalive' = false}); parse(["http_api.tcp.keepalive", Keepalive|Rest], C) -> io:format("Invalid http_api.tcp.keepalive ~p.", [Keepalive]), parse(Rest, C); parse(["http_api.tcp.linger", Linger|Rest], C) -> case Linger of "true" -> parse(Rest, C#config{ 'http_api.tcp.linger' = true }); "false" -> parse(Rest, C#config{ 'http_api.tcp.linger' = false}); _ -> io:format("Invalid http_api.tcp.linger ~p.", [Linger]), parse(Rest, C) end; parse(["http_api.tcp.linger_timeout", Timeout|Rest], C) -> try list_to_integer(Timeout) of T when T >= 0 -> parse(Rest, C#config{ 'http_api.tcp.linger_timeout' = T }); _ -> io:format("Invalid http_api.tcp.linger_timeout ~p.", [Timeout]), parse(Rest, C) catch _:_ -> io:format("Invalid http_api.tcp.linger_timeout ~p.", [Timeout]), parse(Rest, C) end; parse(["http_api.tcp.listener_shutdown", "brutal_kill"|Rest], C) -> parse(Rest, C#config{ 'http_api.tcp.listener_shutdown' = brutal_kill}); parse(["http_api.tcp.listener_shutdown", "infinity"|Rest], C) -> parse(Rest, C#config{ 'http_api.tcp.listener_shutdown' = infinity }); parse(["http_api.tcp.listener_shutdown", Shutdown|Rest], C) -> try list_to_integer(Shutdown) of S when S >= 0 -> parse(Rest, C#config{ 'http_api.tcp.listener_shutdown' = S }); _ -> io:format("Invalid http_api.tcp.listener_shutdown ~p.", [Shutdown]), parse(Rest, C) catch _:_ -> io:format("Invalid http_api.tcp.listener_shutdown ~p.", [Shutdown]), parse(Rest, C) end; parse(["http_api.tcp.nodelay", Nodelay|Rest], C) -> case Nodelay of "true" -> parse(Rest, C#config{ 'http_api.tcp.nodelay' = true }); "false" -> parse(Rest, C#config{ 'http_api.tcp.nodelay' = false }); _ -> io:format("Invalid http_api.tcp.nodelay ~p.", [Nodelay]), parse(Rest, C) end; parse(["http_api.tcp.num_acceptors", Acceptors|Rest], C) -> try list_to_integer(Acceptors) of N when N >= 0 -> parse(Rest, C#config{ 'http_api.tcp.num_acceptors' = N }); _ -> io:format("Invalid http_api.tcp.num_acceptors ~p.", [Acceptors]), parse(Rest, C) catch _:_ -> io:format("Invalid http_api.tcp.num_acceptors ~p.", [Acceptors]), parse(Rest, C) end; parse(["http_api.tcp.send_timeout_close", Value|Rest], C) -> case Value of "true" -> parse(Rest, C#config{ 'http_api.tcp.send_timeout_close' = true }); "false" -> parse(Rest, C#config{ 'http_api.tcp.send_timeout_close' = false }); _ -> io:format("Invalid http_api.tcp.send_timeout_close ~p.", [Value]), parse(Rest, C) end; parse(["http_api.tcp.send_timeout", Timeout|Rest], C) -> try list_to_integer(Timeout) of T when T >= 0 -> parse(Rest, C#config{ 'http_api.tcp.send_timeout' = T }); _ -> io:format("Invalid http_api.tcp.send_timeout ~p.", [Timeout]), parse(Rest, C) catch _:_ -> io:format("Invalid http_api.tcp.send_timeout ~p.", [Timeout]), parse(Rest, C) end; %% Undocumented/unsupported options parse(["chunk_storage_file_size", Num | Rest], C) -> parse(Rest, C#config{ chunk_storage_file_size = list_to_integer(Num) }); parse([Arg | _Rest], C) -> io:format("~nUnknown argument: ~s.~n", [Arg]), {error, [ {?MODULE, show_help, []} ], C}. %% @doc Ensure that parsing of core command line options functions correctly. commandline_parser_test_() -> {timeout, 60, fun() -> Addr = crypto:strong_rand_bytes(32), Tests = [ {"peer 1.2.3.4 peer 5.6.7.8:9", #config.peers, [{5,6,7,8,9},{1,2,3,4,1984}]}, {"mine", #config.mine, true}, {"port 22", #config.port, 22}, {"mining_addr " ++ binary_to_list(ar_util:encode(Addr)), #config.mining_addr, Addr} ], X = string:split(string:join([ L || {L, _, _} <- Tests ], " "), " ", all), C = eval(X, #config{}), lists:foreach( fun({_, Index, Value}) -> ?assertEqual(element(Index, C), Value) end, Tests ) end}. ================================================ FILE: apps/arweave/src/ar_config.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @copyright 2026 (c) Arweave %%% @author Arweave Team %%% @doc arweave legacy configuration parser module. %%% @end %%%=================================================================== -module(ar_config). -export([ compute_own_vdf/0, is_public_vdf_server/0, is_vdf_server/0, log_config/1, parse/1, parse_config_file/1, parse_config_file/2, parse_storage_module/1, pull_from_remote_vdf_server/0, set_dependent_flags/1, use_remote_vdf_server/0, validate_config/1 ]). -include("ar.hrl"). -include("ar_consensus.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). %%%=================================================================== %%% Public interface. %%%=================================================================== %%-------------------------------------------------------------------- %% @doc %% @see parse_config_file/2 %% @end %%-------------------------------------------------------------------- -spec parse_config_file(Args) -> Return when Args :: [string()], Return :: {ok, #config{}} | {error, term(), term()} | {error, term()}. parse_config_file(Args) -> parse_config_file(Args, [], #config{}). %%-------------------------------------------------------------------- %% @doc Take legacy command line argument and look for config_file %% parameter, then read and parse the file. %% @end %%-------------------------------------------------------------------- -spec parse_config_file(Args, Config) -> Return when Args :: [string()], Config :: #config{}, Return :: {ok, #config{}} | {error, term(), term()} | {error, term()}. parse_config_file(Args, Config) -> parse_config_file(Args, [], Config). %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- -spec parse_config_file(Args, Skipped, Config) -> Return when Args :: [string()], Skipped :: [string()], Config :: #config{}, Return :: {ok, #config{}} | {error, term(), term()} | {error, term()}. parse_config_file([], _, Config) -> {ok, Config}; parse_config_file(["config_file", Path | Rest], Skipped, _) -> case read_config_from_file(Path) of {ok, Config} -> parse_config_file(Rest, Skipped, Config); {error, Reason, Item} -> io:format("Failed to parse config: ~p: ~p.~n", [Reason, Item]), ar_cli_parser:show_help(), {error, Reason, Item}; {error, Reason} -> io:format("Failed to parse config: ~p.~n", [Reason]), ar_cli_parser:show_help(), {error, Reason} end; parse_config_file([Arg | Rest], Skipped, Config) -> parse_config_file(Rest, [Arg | Skipped], Config). %%-------------------------------------------------------------------- %% @doc read the content of a configuration and then parse it with %% `ar_config:parse/1'. %% @end %%-------------------------------------------------------------------- -spec read_config_from_file(Path) -> Return when Path :: string(), Return :: {ok, binary()} | {error, file_unreadable, Path}. read_config_from_file(Path) -> case file:read_file(Path) of {ok, FileData} -> ar_config:parse(FileData); {error, _} -> {error, file_unreadable, Path} end. %%-------------------------------------------------------------------- %% @doc Validate legacy configuration file as `#config{}' record. %% @end %%-------------------------------------------------------------------- -spec validate_config(Config :: #config{}) -> boolean(). validate_config(Config) -> validate_init(Config) andalso validate_storage_modules(Config) andalso validate_repack_in_place(Config) andalso validate_cm_pool(Config) andalso validate_cm(Config) andalso validate_unique_replication_type(Config) andalso validate_verify(Config) andalso validate_start_from_state(Config). %%-------------------------------------------------------------------- %% @doc Some flags force other flags to be set. %% @end %%-------------------------------------------------------------------- -spec set_dependent_flags(Config :: #config{}) -> #config{}. set_dependent_flags(Config) -> Config1 = set_start_from_state_flags(Config), Config2 = set_verify_flags(Config1), Config2. set_start_from_state_flags(#config{ start_from_state = not_set } = Config) -> Config; set_start_from_state_flags(Config) -> Config#config{ start_from_latest_state = true }. use_remote_vdf_server() -> {ok, Config} = arweave_config:get_env(), case Config#config.nonce_limiter_server_trusted_peers of [] -> false; _ -> true end. pull_from_remote_vdf_server() -> {ok, Config} = arweave_config:get_env(), not lists:member(vdf_server_pull, Config#config.disable). compute_own_vdf() -> {ok, Config} = arweave_config:get_env(), case Config#config.nonce_limiter_server_trusted_peers of [] -> %% Not a VDF client - compute VDF unless explicitly disabled. not lists:member(compute_own_vdf, Config#config.disable); _ -> %% Computing your own VDF needs to be explicitly enabled on a VDF client. lists:member(compute_own_vdf, Config#config.enable) end. is_vdf_server() -> {ok, Config} = arweave_config:get_env(), case Config#config.nonce_limiter_client_peers of [] -> lists:member(public_vdf_server, Config#config.enable); _ -> true end. is_public_vdf_server() -> {ok, Config} = arweave_config:get_env(), lists:member(public_vdf_server, Config#config.enable). parse(Config) when is_binary(Config) -> case ar_serialize:json_decode(Config) of {ok, JsonValue} -> parse_options(JsonValue); {error, _} -> {error, bad_json, Config} end. parse_storage_module(IOList) -> Bin = iolist_to_binary(IOList), case binary:split(Bin, <<",">>, [global]) of [PartitionNumberBin, PackingBin, <<"repack_in_place">>, ToPackingBin] -> PartitionNumber = binary_to_integer(PartitionNumberBin), true = PartitionNumber >= 0, parse_storage_module(PartitionNumber, ar_block:partition_size(), PackingBin, ToPackingBin); [RangeNumberBin, RangeSizeBin, PackingBin, <<"repack_in_place">>, ToPackingBin] -> RangeNumber = binary_to_integer(RangeNumberBin), true = RangeNumber >= 0, RangeSize = binary_to_integer(RangeSizeBin), true = RangeSize >= 0, parse_storage_module(RangeNumber, RangeSize, PackingBin, ToPackingBin); [PartitionNumberBin, PackingBin] -> PartitionNumber = binary_to_integer(PartitionNumberBin), true = PartitionNumber >= 0, parse_storage_module(PartitionNumber, ar_block:partition_size(), PackingBin); [RangeNumberBin, RangeSizeBin, PackingBin] -> RangeNumber = binary_to_integer(RangeNumberBin), true = RangeNumber >= 0, RangeSize = binary_to_integer(RangeSizeBin), true = RangeSize >= 0, parse_storage_module(RangeNumber, RangeSize, PackingBin) end. %%%=================================================================== %%% Private functions. %%%=================================================================== %% ------------------------------------------------------------------- %% @doc Parse the configuration options. %% ------------------------------------------------------------------- parse_options({KVPairs}) when is_list(KVPairs) -> parse_options(KVPairs, #config{}); parse_options(JsonValue) -> {error, root_not_object, JsonValue}. parse_options([{_, null} | Rest], Config) -> parse_options(Rest, Config); parse_options([{<<"config_file">>, _} | _], _) -> {error, config_file_set}; parse_options([{<<"peers">>, Peers} | Rest], Config) when is_list(Peers) -> case parse_peers(Peers, []) of {ok, ParsedPeers} -> parse_options(Rest, Config#config{ peers = ParsedPeers }); error -> {error, bad_peers, Peers} end; parse_options([{<<"peers">>, Peers} | _], _) -> {error, {bad_type, peers, array}, Peers}; parse_options([{<<"block_gossip_peers">>, Peers} | Rest], Config) when is_list(Peers) -> case parse_peers(Peers, []) of {ok, ParsedPeers} -> parse_options(Rest, Config#config{ block_gossip_peers = ParsedPeers }); error -> {error, bad_peers, Peers} end; parse_options([{<<"block_gossip_peers">>, Peers} | _], _) -> {error, {bad_type, peers, array}, Peers}; parse_options([{<<"local_peers">>, Peers} | Rest], Config) when is_list(Peers) -> case parse_peers(Peers, []) of {ok, ParsedPeers} -> parse_options(Rest, Config#config{ local_peers = ParsedPeers }); error -> {error, bad_local_peers, Peers} end; parse_options([{<<"local_peers">>, Peers} | _], _) -> {error, {bad_type, local_peers, array}, Peers}; parse_options([{<<"sync_from_local_peers_only">>, true} | Rest], Config) -> parse_options(Rest, Config#config{ sync_from_local_peers_only = true }); parse_options([{<<"sync_from_local_peers_only">>, false} | Rest], Config) -> parse_options(Rest, Config#config{ sync_from_local_peers_only = false }); parse_options([{<<"sync_from_local_peers_only">>, Opt} | _], _) -> {error, {bad_type, sync_from_local_peers_only, boolean}, Opt}; parse_options([{<<"start_from_latest_state">>, true} | Rest], Config) -> parse_options(Rest, Config#config{ start_from_latest_state = true }); parse_options([{<<"start_from_latest_state">>, false} | Rest], Config) -> parse_options(Rest, Config#config{ start_from_latest_state = false }); parse_options([{<<"start_from_latest_state">>, Opt} | _], _) -> {error, {bad_type, start_from_latest_state, boolean}, Opt}; parse_options([{<<"start_from_state">>, Folder} | Rest], Config) when is_binary(Folder) -> parse_options(Rest, Config#config{ start_from_state = binary_to_list(Folder) }); parse_options([{<<"start_from_state">>, Folder} | _], _) -> {error, {bad_type, start_from_state, string}, Folder}; parse_options([{<<"start_from_block">>, H} | Rest], Config) when is_binary(H) -> case ar_util:safe_decode(H) of {ok, Decoded} when byte_size(Decoded) == 48 -> parse_options(Rest, Config#config{ start_from_block = Decoded }); _ -> {error, bad_block, H} end; parse_options([{<<"start_from_block">>, Opt} | _], _) -> {error, {bad_type, start_from_block, string}, Opt}; parse_options([{<<"start_from_block_index">>, true} | Rest], Config) -> parse_options(Rest, Config#config{ start_from_latest_state = true }); parse_options([{<<"start_from_block_index">>, false} | Rest], Config) -> parse_options(Rest, Config#config{ start_from_latest_state = false }); parse_options([{<<"start_from_block_index">>, Opt} | _], _) -> {error, {bad_type, start_from_block_index, boolean}, Opt}; parse_options([{<<"mine">>, true} | Rest], Config) -> parse_options(Rest, Config#config{ mine = true }); parse_options([{<<"mine">>, false} | Rest], Config) -> parse_options(Rest, Config); parse_options([{<<"mine">>, Opt} | _], _) -> {error, {bad_type, mine, boolean}, Opt}; parse_options([{<<"verify">>, <<"purge">>} | Rest], Config) -> parse_options(Rest, Config#config{ verify = purge }); parse_options([{<<"verify">>, <<"log">>} | Rest], Config) -> parse_options(Rest, Config#config{ verify = log }); parse_options([{<<"verify">>, Opt} | _], _) -> {error, bad_verify_mode, Opt}; parse_options([{<<"verify_samples">>, N} | Rest], Config) when is_integer(N) -> parse_options(Rest, Config#config{ verify_samples = N }); parse_options([{<<"verify_samples">>, <<"all">>} | Rest], Config) -> parse_options(Rest, Config#config{ verify_samples = all }); parse_options([{<<"verify_samples">>, Opt} | _], _) -> {error, {bad_type, verify_samples, number}, Opt}; parse_options([{<<"vdf">>, Mode} | Rest], Config) -> ParsedMode = case Mode of <<"openssl">> -> openssl; <<"fused">> -> fused; <<"hiopt_m4">> -> hiopt_m4; _ -> io:format("VDF ~p is invalid.~n", [Mode]), openssl end, parse_options(Rest, Config#config{ vdf = ParsedMode }); parse_options([{<<"port">>, Port} | Rest], Config) when is_integer(Port) -> parse_options(Rest, Config#config{ port = Port }); parse_options([{<<"port">>, Port} | _], _) -> {error, {bad_type, port, number}, Port}; parse_options([{<<"data_dir">>, DataDir} | Rest], Config) when is_binary(DataDir) -> parse_options(Rest, Config#config{ data_dir = binary_to_list(DataDir) }); parse_options([{<<"data_dir">>, DataDir} | _], _) -> {error, {bad_type, data_dir, string}, DataDir}; parse_options([{<<"log_dir">>, Dir} | Rest], Config) when is_binary(Dir) -> parse_options(Rest, Config#config{ log_dir = binary_to_list(Dir) }); parse_options([{<<"log_dir">>, Dir} | _], _) -> {error, {bad_type, log_dir, string}, Dir}; parse_options([{<<"storage_modules">>, L} | Rest], Config) when is_list(L) -> try {StorageModules, RepackInPlaceStorageModules} = lists:foldr( fun(Bin, {Acc1, Acc2}) -> case parse_storage_module(Bin) of {ok, Module} -> {[Module | Acc1], Acc2}; {repack_in_place, Module} -> {Acc1, [Module | Acc2]} end end, {[], []}, L ), parse_options(Rest, Config#config{ storage_modules = StorageModules, repack_in_place_storage_modules = RepackInPlaceStorageModules }) catch Error:Reason -> ?LOG_ERROR([{event, parse_failure}, {option, storage_modules}, {error, Error}, {reason, Reason}]), {error, {bad_format, storage_modules, "an array of " "\"{number},{address}[,repack_in_place,{to_packing}]\""}, L} end; parse_options([{<<"storage_modules">>, Bin} | _], _) -> {error, {bad_type, storage_modules, array}, Bin}; parse_options([{<<"repack_batch_size">>, N} | Rest], Config) when is_integer(N) -> parse_options(Rest, Config#config{ repack_batch_size = N }); parse_options([{<<"repack_batch_size">>, Opt} | _], _) -> {error, {bad_type, repack_batch_size, number}, Opt}; parse_options([{<<"repack_cache_size_mb">>, N} | Rest], Config) when is_integer(N) -> parse_options(Rest, Config#config{ repack_cache_size_mb = N }); parse_options([{<<"repack_cache_size_mb">>, Opt} | _], _) -> {error, {bad_type, repack_cache_size_mb, number}, Opt}; parse_options([{<<"polling">>, Frequency} | Rest], Config) when is_integer(Frequency) -> parse_options(Rest, Config#config{ polling = Frequency }); parse_options([{<<"polling">>, Opt} | _], _) -> {error, {bad_type, polling, number}, Opt}; parse_options([{<<"block_pollers">>, N} | Rest], Config) when is_integer(N) -> parse_options(Rest, Config#config{ block_pollers = N }); parse_options([{<<"block_pollers">>, Opt} | _], _) -> {error, {bad_type, block_pollers, number}, Opt}; parse_options([{<<"no_auto_join">>, true} | Rest], Config) -> parse_options(Rest, Config#config{ auto_join = false }); parse_options([{<<"no_auto_join">>, false} | Rest], Config) -> parse_options(Rest, Config); parse_options([{<<"no_auto_join">>, Opt} | _], _) -> {error, {bad_type, no_auto_join, boolean}, Opt}; parse_options([{<<"join_workers">>, N} | Rest], Config) when is_integer(N)-> parse_options(Rest, Config#config{ join_workers = N }); parse_options([{<<"join_workers">>, Opt} | _], _) -> {error, {bad_type, join_workers, number}, Opt}; parse_options([{<<"packing_workers">>, N} | Rest], Config) when is_integer(N)-> parse_options(Rest, Config#config{ packing_workers = N }); parse_options([{<<"packing_workers">>, Opt} | _], _) -> {error, {bad_type, packing_workers, number}, Opt}; parse_options([{<<"replica_2_9_workers">>, N} | Rest], Config) when is_integer(N)-> parse_options(Rest, Config#config{ replica_2_9_workers = N }); parse_options([{<<"replica_2_9_workers">>, Opt} | _], _) -> {error, {bad_type, replica_2_9_workers, number}, Opt}; parse_options([{<<"disable_replica_2_9_device_limit">>, true} | Rest], Config) -> parse_options(Rest, Config#config{ disable_replica_2_9_device_limit = true }); parse_options([{<<"disable_replica_2_9_device_limit">>, false} | Rest], Config) -> parse_options(Rest, Config); parse_options([{<<"disable_replica_2_9_device_limit">>, Opt} | _], _) -> {error, {bad_type, disable_replica_2_9_device_limit, boolean}, Opt}; parse_options([{<<"replica_2_9_entropy_cache_size_mb">>, N} | Rest], Config) when is_integer(N)-> parse_options(Rest, Config#config{ replica_2_9_entropy_cache_size_mb = N }); parse_options([{<<"replica_2_9_entropy_cache_size_mb">>, Opt} | _], _) -> {error, {bad_type, replica_2_9_entropy_cache_size_mb, number}, Opt}; parse_options([{<<"diff">>, Diff} | Rest], Config) when is_integer(Diff) -> parse_options(Rest, Config#config{ diff = Diff }); parse_options([{<<"diff">>, Diff} | _], _) -> {error, {bad_type, diff, number}, Diff}; parse_options([{<<"mining_addr">>, Addr} | Rest], Config) when is_binary(Addr) -> case Config#config.mining_addr of not_set -> case ar_util:safe_decode(Addr) of {ok, D} when byte_size(D) == 32 -> parse_options(Rest, Config#config{ mining_addr = D }); _ -> {error, bad_mining_addr, Addr} end; _ -> {error, at_most_one_mining_addr_is_supported, Addr} end; parse_options([{<<"mining_addr">>, Addr} | _], _) -> {error, {bad_type, mining_addr, string}, Addr}; parse_options([{<<"hashing_threads">>, Threads} | Rest], Config) when is_integer(Threads) -> parse_options(Rest, Config#config{ hashing_threads = Threads }); parse_options([{<<"hashing_threads">>, Threads} | _], _) -> {error, {bad_type, hashing_threads, number}, Threads}; parse_options([{<<"data_cache_size_limit">>, Limit} | Rest], Config) when is_integer(Limit) -> parse_options(Rest, Config#config{ data_cache_size_limit = Limit }); parse_options([{<<"data_cache_size_limit">>, Limit} | _], _) -> {error, {bad_type, data_cache_size_limit, number}, Limit}; parse_options([{<<"packing_cache_size_limit">>, Limit} | Rest], Config) when is_integer(Limit) -> parse_options(Rest, Config#config{ packing_cache_size_limit = Limit }); parse_options([{<<"packing_cache_size_limit">>, Limit} | _], _) -> {error, {bad_type, packing_cache_size_limit, number}, Limit}; parse_options([{<<"mining_cache_size_mb">>, Limit} | Rest], Config) when is_integer(Limit) -> parse_options(Rest, Config#config{ mining_cache_size_mb = Limit }); parse_options([{<<"mining_cache_size_mb">>, Limit} | _], _) -> {error, {bad_type, mining_cache_size_mb, number}, Limit}; parse_options([{<<"max_emitters">>, Value} | Rest], Config) when is_integer(Value) -> parse_options(Rest, Config#config{ max_emitters = Value }); parse_options([{<<"max_emitters">>, Value} | _], _) -> {error, {bad_type, max_emitters, number}, Value}; parse_options([{<<"post_tx_timeout">>, Value} | Rest], Config) when is_integer(Value) -> parse_options(Rest, Config#config{ post_tx_timeout = Value }); parse_options([{<<"post_tx_timeout">>, Value} | _], _) -> {error, {bad_type, post_tx_timeout, number}, Value}; parse_options([{<<"max_propagation_peers">>, Value} | Rest], Config) when is_integer(Value) -> parse_options(Rest, Config#config{ max_propagation_peers = Value }); parse_options([{<<"max_propagation_peers">>, Value} | _], _) -> {error, {bad_type, max_propagation_peers, number}, Value}; parse_options([{<<"max_block_propagation_peers">>, Value} | Rest], Config) when is_integer(Value) -> parse_options(Rest, Config#config{ max_block_propagation_peers = Value }); parse_options([{<<"max_block_propagation_peers">>, Value} | _], _) -> {error, {bad_type, max_block_propagation_peers, number}, Value}; parse_options([{<<"sync_jobs">>, Value} | Rest], Config) when is_integer(Value) -> parse_options(Rest, Config#config{ sync_jobs = Value }); parse_options([{<<"sync_jobs">>, Value} | _], _) -> {error, {bad_type, sync_jobs, number}, Value}; parse_options([{<<"header_sync_jobs">>, Value} | Rest], Config) when is_integer(Value) -> parse_options(Rest, Config#config{ header_sync_jobs = Value }); parse_options([{<<"header_sync_jobs">>, Value} | _], _) -> {error, {bad_type, header_sync_jobs, number}, Value}; parse_options([{<<"enable_data_roots_syncing">>, Value} | Rest], Config) when is_boolean(Value) -> parse_options(Rest, Config#config{ enable_data_roots_syncing = Value }); parse_options([{<<"enable_data_roots_syncing">>, Value} | _], _) -> {error, {bad_type, enable_data_roots_syncing, boolean}, Value}; parse_options([{<<"disk_pool_jobs">>, Value} | Rest], Config) when is_integer(Value) -> parse_options(Rest, Config#config{ disk_pool_jobs = Value }); parse_options([{<<"disk_pool_jobs">>, Value} | _], _) -> {error, {bad_type, disk_pool_jobs, number}, Value}; parse_options([{<<"requests_per_minute_limit">>, L} | Rest], Config) when is_integer(L) -> parse_options(Rest, Config#config{ requests_per_minute_limit = L }); parse_options([{<<"requests_per_minute_limit">>, L} | _], _) -> {error, {bad_type, requests_per_minute_limit, number}, L}; parse_options([{<<"requests_per_minute_limit_by_ip">>, Object} | Rest], Config) when is_tuple(Object) -> case parse_requests_per_minute_limit_by_ip(Object) of {ok, ParsedMap} -> parse_options(Rest, Config#config{ requests_per_minute_limit_by_ip = ParsedMap }); error -> {error, bad_requests_per_minute_limit_by_ip, Object} end; parse_options([{<<"requests_per_minute_limit_by_ip">>, Object} | _], _) -> {error, {bad_type, requests_per_minute_limit_by_ip, object}, Object}; parse_options([{<<"transaction_blacklists">>, TransactionBlacklists} | Rest], Config) when is_list(TransactionBlacklists) -> case safe_map(fun binary_to_list/1, TransactionBlacklists) of {ok, TransactionBlacklistStrings} -> parse_options(Rest, Config#config{ transaction_blacklist_files = TransactionBlacklistStrings }); error -> {error, bad_transaction_blacklists} end; parse_options([{<<"transaction_blacklists">>, TransactionBlacklists} | _], _) -> {error, {bad_type, transaction_blacklists, array}, TransactionBlacklists}; parse_options([{<<"transaction_blacklist_urls">>, TransactionBlacklistURLs} | Rest], Config) when is_list(TransactionBlacklistURLs) -> case safe_map(fun binary_to_list/1, TransactionBlacklistURLs) of {ok, TransactionBlacklistURLStrings} -> parse_options(Rest, Config#config{ transaction_blacklist_urls = TransactionBlacklistURLStrings }); error -> {error, bad_transaction_blacklist_urls} end; parse_options([{<<"transaction_blacklist_urls">>, TransactionBlacklistURLs} | _], _) -> {error, {bad_type, transaction_blacklist_urls, array}, TransactionBlacklistURLs}; parse_options([{<<"transaction_whitelists">>, TransactionWhitelists} | Rest], Config) when is_list(TransactionWhitelists) -> case safe_map(fun binary_to_list/1, TransactionWhitelists) of {ok, TransactionWhitelistStrings} -> parse_options(Rest, Config#config{ transaction_whitelist_files = TransactionWhitelistStrings }); error -> {error, bad_transaction_whitelists} end; parse_options([{<<"transaction_whitelists">>, TransactionWhitelists} | _], _) -> {error, {bad_type, transaction_whitelists, array}, TransactionWhitelists}; parse_options([{<<"transaction_whitelist_urls">>, TransactionWhitelistURLs} | Rest], Config) when is_list(TransactionWhitelistURLs) -> case safe_map(fun binary_to_list/1, TransactionWhitelistURLs) of {ok, TransactionWhitelistURLStrings} -> parse_options(Rest, Config#config{ transaction_whitelist_urls = TransactionWhitelistURLStrings }); error -> {error, bad_transaction_whitelist_urls} end; parse_options([{<<"transaction_whitelist_urls">>, TransactionWhitelistURLs} | _], _) -> {error, {bad_type, transaction_whitelist_urls, array}, TransactionWhitelistURLs}; parse_options([{<<"disk_space_check_frequency">>, Frequency} | Rest], Config) when is_integer(Frequency) -> parse_options(Rest, Config#config{ disk_space_check_frequency = Frequency * 1000 }); parse_options([{<<"disk_space_check_frequency">>, Frequency} | _], _) -> {error, {bad_type, disk_space_check_frequency, number}, Frequency}; parse_options([{<<"init">>, true} | Rest], Config) -> parse_options(Rest, Config#config{ init = true }); parse_options([{<<"init">>, false} | Rest], Config) -> parse_options(Rest, Config#config{ init = false }); parse_options([{<<"init">>, Opt} | _], _) -> {error, {bad_type, init, boolean}, Opt}; parse_options([{<<"internal_api_secret">>, Secret} | Rest], Config) when is_binary(Secret), byte_size(Secret) >= ?INTERNAL_API_SECRET_MIN_LEN -> parse_options(Rest, Config#config{ internal_api_secret = Secret }); parse_options([{<<"internal_api_secret">>, Secret} | _], _) -> {error, bad_secret, Secret}; parse_options([{<<"enable">>, Features} | Rest], Config) when is_list(Features) -> case safe_map(fun(Feature) -> binary_to_atom(Feature, latin1) end, Features) of {ok, FeatureAtoms} -> parse_options(Rest, Config#config{ enable = FeatureAtoms }); error -> {error, bad_enable} end; parse_options([{<<"enable">>, Features} | _], _) -> {error, {bad_type, enable, array}, Features}; parse_options([{<<"disable">>, Features} | Rest], Config) when is_list(Features) -> case safe_map(fun(Feature) -> binary_to_atom(Feature, latin1) end, Features) of {ok, FeatureAtoms} -> parse_options(Rest, Config#config{ disable = FeatureAtoms }); error -> {error, bad_disable} end; parse_options([{<<"disable">>, Features} | _], _) -> {error, {bad_type, disable, array}, Features}; parse_options([{<<"webhooks">>, WebhookConfigs} | Rest], Config) when is_list(WebhookConfigs) -> case parse_webhooks(WebhookConfigs, []) of {ok, ParsedWebhooks} -> parse_options(Rest, Config#config{ webhooks = ParsedWebhooks }); error -> {error, bad_webhooks, WebhookConfigs} end; parse_options([{<<"webhooks">>, Webhooks} | _], _) -> {error, {bad_type, webhooks, array}, Webhooks}; parse_options([{<<"semaphores">>, Semaphores} | Rest], Config) when is_tuple(Semaphores) -> case parse_atom_number_map(Semaphores, Config#config.semaphores) of {ok, ParsedSemaphores} -> parse_options(Rest, Config#config{ semaphores = ParsedSemaphores }); error -> {error, bad_semaphores, Semaphores} end; parse_options([{<<"semaphores">>, Semaphores} | _], _) -> {error, {bad_type, semaphores, object}, Semaphores}; parse_options([{<<"max_connections">>, MaxConnections} | Rest], Config) when is_integer(MaxConnections), MaxConnections >= 1 -> parse_options(Rest, Config#config{ 'http_api.tcp.max_connections' = MaxConnections }); parse_options([{<<"disk_pool_data_root_expiration_time">>, D} | Rest], Config) when is_integer(D) -> parse_options(Rest, Config#config{ disk_pool_data_root_expiration_time = D }); parse_options([{<<"max_disk_pool_buffer_mb">>, D} | Rest], Config) when is_integer(D) -> parse_options(Rest, Config#config{ max_disk_pool_buffer_mb= D }); parse_options([{<<"max_disk_pool_data_root_buffer_mb">>, D} | Rest], Config) when is_integer(D) -> parse_options(Rest, Config#config{ max_disk_pool_data_root_buffer_mb = D }); parse_options([{<<"max_duplicate_data_roots">>, D} | Rest], Config) when is_integer(D) -> parse_options(Rest, Config#config{ max_duplicate_data_roots = D }); parse_options([{<<"disk_cache_size_mb">>, D} | Rest], Config) when is_integer(D) -> parse_options(Rest, Config#config{ disk_cache_size = D }); parse_options([{<<"max_nonce_limiter_validation_thread_count">>, D} | Rest], Config) when is_integer(D) -> parse_options(Rest, Config#config{ max_nonce_limiter_validation_thread_count = D }); parse_options([{<<"max_nonce_limiter_last_step_validation_thread_count">>, D} | Rest], Config) when is_integer(D) -> parse_options(Rest, Config#config{ max_nonce_limiter_last_step_validation_thread_count = D }); parse_options([{<<"vdf_server_trusted_peer">>, <<>>} | Rest], Config) -> parse_options(Rest, Config); parse_options([{<<"vdf_server_trusted_peer">>, Peer} | Rest], Config) -> parse_options(Rest, parse_vdf_server_trusted_peer(Peer, Config)); parse_options([{<<"vdf_server_trusted_peers">>, Peers} | Rest], Config) when is_list(Peers) -> parse_options(Rest, parse_vdf_server_trusted_peers(Peers, Config)); parse_options([{<<"vdf_server_trusted_peers">>, Peers} | _], _) -> {error, {bad_type, vdf_server_trusted_peers, array}, Peers}; parse_options([{<<"vdf_client_peers">>, Peers} | Rest], Config) when is_list(Peers) -> parse_options(Rest, Config#config{ nonce_limiter_client_peers = Peers }); parse_options([{<<"vdf_client_peers">>, Peers} | _], _) -> {error, {bad_type, vdf_client_peers, array}, Peers}; parse_options([{<<"debug">>, B} | Rest], Config) when is_boolean(B) -> parse_options(Rest, Config#config{ debug = B }); parse_options([{<<"run_defragmentation">>, B} | Rest], Config) when is_boolean(B) -> parse_options(Rest, Config#config{ run_defragmentation = B }); parse_options([{<<"defragmentation_trigger_threshold">>, D} | Rest], Config) when is_integer(D) -> parse_options(Rest, Config#config{ defragmentation_trigger_threshold = D }); parse_options([{<<"block_throttle_by_ip_interval">>, D} | Rest], Config) when is_integer(D) -> parse_options(Rest, Config#config{ block_throttle_by_ip_interval = D }); parse_options([{<<"block_throttle_by_solution_interval">>, D} | Rest], Config) when is_integer(D) -> parse_options(Rest, Config#config{ block_throttle_by_solution_interval = D }); parse_options([{<<"defragment_modules">>, L} | Rest], Config) when is_list(L) -> try DefragModules = lists:foldr( fun(Bin, Acc) -> {ok, M} = parse_storage_module(Bin), [M | Acc] end, [], L ), parse_options(Rest, Config#config{ defragmentation_modules = DefragModules }) catch _:_ -> {error, {bad_format, defragment_modules, "an array of \"{number},{address}\""}, L} end; parse_options([{<<"defragment_modules">>, Bin} | _], _) -> {error, {bad_type, defragment_modules, array}, Bin}; parse_options([{<<"http_api.tcp.idle_timeout_seconds">>, D} | Rest], Config) when is_integer(D) -> parse_options(Rest, Config#config{ http_api_transport_idle_timeout = D * 1000 }); parse_options([{<<"coordinated_mining">>, true} | Rest], Config) -> parse_options(Rest, Config#config{ coordinated_mining = true }); parse_options([{<<"coordinated_mining">>, false} | Rest], Config) -> parse_options(Rest, Config); parse_options([{<<"coordinated_mining">>, Opt} | _], _) -> {error, {bad_type, coordinated_mining, boolean}, Opt}; parse_options([{<<"cm_api_secret">>, CMSecret} | Rest], Config) when is_binary(CMSecret), byte_size(CMSecret) >= ?INTERNAL_API_SECRET_MIN_LEN -> parse_options(Rest, Config#config{ cm_api_secret = CMSecret }); parse_options([{<<"cm_api_secret">>, CMSecret} | _], _) -> {error, {bad_type, cm_api_secret, string}, CMSecret}; parse_options([{<<"cm_poll_interval">>, CMPollInterval} | Rest], Config) when is_integer(CMPollInterval) -> parse_options(Rest, Config#config{ cm_poll_interval = CMPollInterval }); parse_options([{<<"cm_poll_interval">>, CMPollInterval} | _], _) -> {error, {bad_type, cm_poll_interval, number}, CMPollInterval}; parse_options([{<<"cm_peers">>, Peers} | Rest], Config) when is_list(Peers) -> case parse_peers(Peers, []) of {ok, ParsedPeers} -> parse_options(Rest, Config#config{ cm_peers = ParsedPeers }); error -> {error, bad_peers, Peers} end; parse_options([{<<"cm_exit_peer">>, Peer} | Rest], Config) -> case ar_util:safe_parse_peer(Peer) of {ok, [ParsedPeer|_]} -> parse_options(Rest, Config#config{ cm_exit_peer = ParsedPeer }); {error, _} -> {error, bad_cm_exit_peer, Peer} end; parse_options([{<<"cm_out_batch_timeout">>, CMBatchTimeout} | Rest], Config) when is_integer(CMBatchTimeout) -> parse_options(Rest, Config#config{ cm_out_batch_timeout = CMBatchTimeout }); parse_options([{<<"cm_out_batch_timeout">>, CMBatchTimeout} | _], _) -> {error, {bad_type, cm_out_batch_timeout, number}, CMBatchTimeout}; parse_options([{<<"is_pool_server">>, true} | Rest], Config) -> parse_options(Rest, Config#config{ is_pool_server = true }); parse_options([{<<"is_pool_server">>, false} | Rest], Config) -> parse_options(Rest, Config); parse_options([{<<"is_pool_server">>, Opt} | _], _) -> {error, {bad_type, is_pool_server, boolean}, Opt}; parse_options([{<<"is_pool_client">>, true} | Rest], Config) -> parse_options(Rest, Config#config{ is_pool_client = true }); parse_options([{<<"is_pool_client">>, false} | Rest], Config) -> parse_options(Rest, Config); parse_options([{<<"is_pool_client">>, Opt} | _], _) -> {error, {bad_type, is_pool_client, boolean}, Opt}; parse_options([{<<"pool_api_key">>, Key} | Rest], Config) when is_binary(Key) -> parse_options(Rest, Config#config{ pool_api_key = Key }); parse_options([{<<"pool_api_key">>, Key} | _], _) -> {error, {bad_type, pool_api_key, string}, Key}; parse_options([{<<"pool_server_address">>, Host} | Rest], Config) when is_binary(Host) -> parse_options(Rest, Config#config{ pool_server_address = Host }); parse_options([{<<"pool_server_address">>, Host} | _], _) -> {error, {bad_type, pool_server_address, string}, Host}; %% Undocumented/unsupported options parse_options([{<<"chunk_storage_file_size">>, ChunkGroupSize} | Rest], Config) when is_integer(ChunkGroupSize) -> parse_options(Rest, Config#config{ chunk_storage_file_size = ChunkGroupSize }); parse_options([{<<"chunk_storage_file_size">>, ChunkGroupSize} | _], _) -> {error, {bad_type, chunk_storage_file_size, number}, ChunkGroupSize}; parse_options([{<<"rocksdb_flush_interval">>, IntervalS} | Rest], Config) when is_integer(IntervalS) -> parse_options(Rest, Config#config{ rocksdb_flush_interval_s = IntervalS }); parse_options([{<<"rocksdb_flush_interval">>, IntervalS} | _], _) -> {error, {bad_type, rocksdb_flush_interval, number}, IntervalS}; parse_options([{<<"rocksdb_wal_sync_interval">>, IntervalS} | Rest], Config) when is_integer(IntervalS) -> parse_options(Rest, Config#config{ rocksdb_wal_sync_interval_s = IntervalS }); parse_options([{<<"rocksdb_wal_sync_interval">>, IntervalS} | _], _) -> {error, {bad_type, rocksdb_wal_sync_interval, number}, IntervalS}; parse_options([{<<"data_sync_request_packed_chunks">>, Bool} | Rest], Config) when is_boolean(Bool) -> parse_options(Rest, Config#config{ data_sync_request_packed_chunks = Bool }); parse_options([{<<"data_sync_request_packed_chunks">>, InvalidValue} | _Rest], _Config) -> {error, {bad_type, data_sync_request_packed_chunks, boolean}, InvalidValue}; %% shutdown procedure parse_options([{<<"network.tcp.shutdown.connection_timeout">>, Delay} | Rest], Config) when is_integer(Delay) andalso Delay > 0 -> NewConfig = Config#config{ shutdown_tcp_connection_timeout = Delay }, parse_options(Rest, NewConfig); parse_options([{<<"network.tcp.shutdown.connection_timeout">>, InvalidValue} | _Rest], _Config) -> {error, {bad_type, shutdown_tcp_connection_timeout, integer}, InvalidValue}; parse_options([{<<"network.tcp.shutdown.mode">>, Mode}|Rest], Config) -> case Mode of <<"shutdown">> -> NewConfig = Config#config{ shutdown_tcp_mode = shutdown }, parse_options(Rest, NewConfig); <<"close">> -> NewConfig = Config#config{ shutdown_tcp_mode = close }, parse_options(Rest, NewConfig); Mode -> {error, {bad_value, shutdown_tcp_mode}, Mode} end; %% Global socket configuration parse_options([{<<"network.socket.backend">>, Backend}|Rest], Config) -> case Backend of <<"inet">> -> parse_options(Rest, Config#config{ 'socket.backend' = inet }); <<"socket">> -> parse_options(Rest, Config#config{ 'socket.backend' = socket }); _ -> {error, {bad_value, 'socket.backend'}, Backend} end; %% Gun client parameters parse_options([{<<"http_client.http.closing_timeout">>, Timeout}|Rest], Config) -> case Timeout of _ when is_integer(Timeout), Timeout >= 0 -> parse_options(Rest, Config#config{ 'http_client.http.closing_timeout' = Timeout }); _ -> {error, {bad_value, 'http_client.http.closing_timeout'}, Timeout} end; parse_options([{<<"http_client.http.keepalive">>, Timeout}|Rest], Config) -> case Timeout of <<"infinity">> -> parse_options(Rest, Config#config{ 'http_client.http.keepalive' = infinity }); _ when is_integer(Timeout), Timeout >= 0 -> parse_options(Rest, Config#config{ 'http_client.http.keepalive' = Timeout }); _ -> {error, {bad_value, 'http_client.http.keepalive'}, Timeout} end; parse_options([{<<"http_client.tcp.delay_send">>, Delay}|Rest], Config) -> case Delay of _ when is_boolean(Delay) -> parse_options(Rest, Config#config{ 'http_client.tcp.delay_send' = Delay }); _ -> {error, {bad_value, 'http_client.tcp.delay_send'}, Delay} end; parse_options([{<<"http_client.tcp.keepalive">>, Keepalive}|Rest], Config) -> case Keepalive of _ when is_boolean(Keepalive) -> parse_options(Rest, Config#config{ 'http_client.tcp.keepalive' = Keepalive }); _ -> {error, {bad_value, 'http_client.tcp.keepalive'}, Keepalive} end; parse_options([{<<"http_client.tcp.linger">>, Linger}|Rest], Config) -> case Linger of _ when is_boolean(Linger) -> parse_options(Rest, Config#config{ 'http_client.tcp.linger' = Linger }); _ -> {error, {bad_value, 'http_client.tcp.linger'}, Linger} end; parse_options([{<<"http_client.tcp.linger_timeout">>, Timeout}|Rest], Config) -> case Timeout of _ when is_integer(Timeout), Timeout >= 0 -> parse_options(Rest, Config#config{ 'http_client.tcp.linger_timeout' = Timeout }); _ -> {error, {bad_value, 'http_client.tcp.linger_timeout'}, Timeout} end; parse_options([{<<"http_client.tcp.nodelay">>, Nodelay}|Rest], Config) -> case Nodelay of _ when is_boolean(Nodelay) -> parse_options(Rest, Config#config{ 'http_client.tcp.nodelay' = Nodelay }); _ -> {error, {bad_value, 'http_client.tcp.nodelay'}, Nodelay } end; parse_options([{<<"http_client.tcp.send_timeout_close">>, Value}|Rest], Config) -> case Value of _ when is_boolean(Value) -> parse_options(Rest, Config#config{ 'http_client.tcp.send_timeout_close' = Value }); _ -> {error, {bad_value, 'http_client.tcp.send_timeout_close'}, Value} end; parse_options([{<<"http_client.tcp.send_timeout">>, Timeout}|Rest], Config) -> case Timeout of _ when is_integer(Timeout), Timeout >= 0 -> parse_options(Rest, Config#config{ 'http_client.tcp.send_timeout' = Timeout }); _ -> {error, {bad_value, 'http_client.tcp.send_timeout'}, Timeout} end; %% Cowboy server parameters parse_options([{<<"http_api.http.active_n">>, Active}|Rest], Config) -> case Active of _ when is_integer(Active), Active >= 1 -> parse_options(Rest, Config#config{ 'http_api.http.active_n' = Active }); _ -> {error, {bad_value, 'http_api.http.active_n'}, Active} end; parse_options([{<<"http_api.http.inactivity_timeout">>, Timeout}|Rest], Config) -> case Timeout of _ when is_integer(Timeout), Timeout >= 0 -> parse_options(Rest, Config#config{ 'http_api.http.inactivity_timeout' = Timeout }); _ -> {error, {bad_value, 'http_api.http.inactivity_timeout'}, Timeout} end; parse_options([{<<"http_api.http.linger_timeout">>, Timeout}|Rest], Config) -> case Timeout of _ when is_integer(Timeout), Timeout >= 0 -> parse_options(Rest, Config#config{ 'http_api.http.linger_timeout' = Timeout }); _ -> {error, {bad_value, 'http_api.http.linger_timeout'}, Timeout} end; parse_options([{<<"http_api.http.request_timeout">>, Timeout}|Rest], Config) -> case Timeout of _ when is_integer(Timeout), Timeout >= 0 -> parse_options(Rest, Config#config{ 'http_api.http.request_timeout' = Timeout }); _ -> {error, {bad_value, 'http_api.http.request_timeout'}, Timeout} end; parse_options([{<<"http_api.tcp.backlog">>, Backlog}|Rest], Config) -> case Backlog of _ when is_integer(Backlog), Backlog >= 1 -> parse_options(Rest, Config#config{ 'http_api.tcp.backlog' = Backlog }); _ -> {error, {bad_value, 'http_api.tcp.backlog'}, Backlog} end; parse_options([{<<"http_api.tcp.delay_send">>, Delay}|Rest], Config) -> case Delay of _ when is_boolean(Delay) -> parse_options(Rest, Config#config{ 'http_api.tcp.delay_send' = Delay }); _ -> {error, {bad_value, 'http_api.tcp.delay_send'}, Delay} end; parse_options([{<<"http_api.tcp.keepalive">>, Keepalive}|Rest], Config) -> case Keepalive of _ when is_boolean(Keepalive) -> parse_options(Rest, Config#config{ 'http_api.tcp.keepalive' = Keepalive }); _ -> {error, {bad_value, 'http_api.tcp.keepalive'}, Keepalive} end; parse_options([{<<"http_api.tcp.linger">>, Linger}|Rest], Config) -> case Linger of _ when is_boolean(Linger) -> parse_options(Rest, Config#config{ 'http_api.tcp.linger' = Linger }); _ -> {error, {bad_value, 'http_api.tcp.linger'}, Linger} end; parse_options([{<<"http_api.tcp.linger_timeout">>, Timeout}|Rest], Config) -> case Timeout of _ when is_integer(Timeout), Timeout >= 0 -> parse_options(Rest, Config#config{ 'http_api.tcp.linger_timeout' = Timeout }); _ -> {error, {bad_value, 'http_api.tcp.linger_timeout'}, Timeout} end; parse_options([{<<"http_api.tcp.listener_shutdown">>, Shutdown}|Rest], Config) -> case Shutdown of "brutal_kill" -> parse_options(Rest, Config#config{ 'http_api.tcp.listener_shutdown' = brutal_kill }); "infinity" -> parse_options(Rest, Config#config{ 'http_api.tcp.listener_shutdown' = infinity }); _ when is_integer(Shutdown), Shutdown >= 0 -> parse_options(Rest, Config#config{ 'http_api.tcp.listener_shutdown' = Shutdown }); _ -> {error, {bad_value, 'http_api.tcp.listener_shutdown'}, Shutdown} end; parse_options([{<<"http_api.tcp.nodelay">>, Nodelay}|Rest], Config) -> case Nodelay of _ when is_boolean(Nodelay) -> parse_options(Rest, Config#config{ 'http_api.tcp.nodelay' = Nodelay }); _ -> {error, {bad_value, 'http_api.tcp.nodelay'}, Nodelay } end; parse_options([{<<"http_api.tcp.num_acceptors">>, Acceptors}|Rest], Config) -> case Acceptors of _ when is_integer(Acceptors), Acceptors >= 1 -> parse_options(Rest, Config#config{ 'http_api.tcp.num_acceptors' = Acceptors }); _ -> {error, {bad_valud, 'http_api.tcp.num_acceptors'}, Acceptors} end; parse_options([{<<"http_api.tcp.send_timeout_close">>, Value}|Rest], Config) -> case Value of _ when is_boolean(Value) -> parse_options(Rest, Config#config{ 'http_api.tcp.send_timeout_close' = Value }); _ -> {error, {bad_value, 'http_api.tcp.send_timeout_close'}, Value} end; parse_options([{<<"http_api.tcp.send_timeout">>, Timeout}|Rest], Config) -> case Timeout of _ when is_integer(Timeout), Timeout >= 0 -> parse_options(Rest, Config#config{ 'http_api.tcp.send_timeout' = Timeout }); _ -> {error, {bad_value, 'http_api.tcp.send_timeout'}, Timeout} end; %% RATE LIMITER GENERAL parse_options([{<<"http_api.limiter.general.sliding_window_limit">>, Limit}|Rest], Config) -> case Limit of Limit when is_integer(Limit), Limit >= 0 -> parse_options(Rest, Config#config{'http_api.limiter.general.sliding_window_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.general.sliding_window_limit'}, Limit} end; parse_options([{<<"http_api.limiter.general.sliding_window_duration">>, Duration}|Rest], Config) -> case Duration of Duration when is_integer(Duration), Duration > 0 -> parse_options(Rest, Config#config{'http_api.limiter.general.sliding_window_duration' = Duration }); _ -> {error, {bad_value, 'http_api.limiter.general.sliding_window_duration'}, Duration} end; parse_options([{<<"http_api.limiter.general.sliding_window_timestamp_cleanup_interval">>, Duration}|Rest], Config) -> case Duration of Duration when is_integer(Duration), Duration > 0 -> parse_options( Rest, Config#config{'http_api.limiter.general.sliding_window_timestamp_cleanup_interval' = Duration }); _ -> {error, {bad_value, 'http_api.limiter.general.sliding_window_timestamp_cleanup_interval'}, Duration} end; parse_options([{<<"http_api.limiter.general.sliding_window_timestamp_cleanup_expiry">>, Duration}|Rest], Config) -> case Duration of Duration when is_integer(Duration), Duration > 0 -> parse_options( Rest, Config#config{'http_api.limiter.general.sliding_window_timestamp_cleanup_expiry' = Duration }); _ -> {error, {bad_value, 'http_api.limiter.general.sliding_window_timestamp_cleanup_expiry'}, Duration} end; parse_options([{<<"http_api.limiter.general.leaky_limit">>, Limit}|Rest], Config) -> case Limit of Limit when is_integer(Limit), Limit >= 0 -> parse_options(Rest, Config#config{'http_api.limiter.general.leaky_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.general.leaky_limit'}, Limit} end; parse_options([{<<"http_api.limiter.general.leaky_tick_interval">>, Duration}|Rest], Config) -> case Duration of Duration when is_integer(Duration), Duration > 0 -> parse_options( Rest, Config#config{'http_api.limiter.general.leaky_tick_interval' = Duration }); _ -> {error, {bad_value, 'http_api.limiter.general.leaky_tick_interval'}, Duration} end; parse_options([{<<"http_api.limiter.general.leaky_tick_reduction">>, Reduction}|Rest], Config) -> case Reduction of Reduction when is_integer(Reduction), Reduction > 0 -> parse_options(Rest, Config#config{'http_api.limiter.general.leaky_tick_reduction' = Reduction }); _ -> {error, {bad_value, 'http_api.limiter.general.leaky_tick_reduction'}, Reduction} end; parse_options([{<<"http_api.limiter.general.concurrency_limit">>, Limit}|Rest], Config) -> case Limit of Limit when is_integer(Limit), Limit > 0 -> parse_options(Rest, Config#config{'http_api.limiter.general.concurrency_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.general.concurrency_limit'}, Limit} end; parse_options([{<<"http_api.limiter.general.is_manual_reduction_disabled">>, IsDisabled}|Rest], Config) -> case IsDisabled of IsDisabled when is_boolean(IsDisabled) -> parse_options(Rest, Config#config{'http_api.limiter.general.is_manual_reduction_disabled' = IsDisabled }); _ -> {error, {bad_value, 'http_api.limiter.general.is_manual_reduction_disabled'}, IsDisabled} end; %% RATE LIMITER CHUNK parse_options([{<<"http_api.limiter.chunk.sliding_window_limit">>, Limit}|Rest], Config) -> case Limit of Limit when is_integer(Limit), Limit >= 0 -> parse_options(Rest, Config#config{'http_api.limiter.chunk.sliding_window_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.chunk.sliding_window_limit'}, Limit} end; parse_options([{<<"http_api.limiter.chunk.sliding_window_duration">>, Duration}|Rest], Config) -> case Duration of Duration when is_integer(Duration), Duration > 0 -> parse_options(Rest, Config#config{'http_api.limiter.chunk.sliding_window_duration' = Duration }); _ -> {error, {bad_value, 'http_api.limiter.chunk.sliding_window_duration'}, Duration} end; parse_options([{<<"http_api.limiter.chunk.sliding_window_timestamp_cleanup_interval">>, Duration}|Rest], Config) -> case Duration of Duration when is_integer(Duration), Duration > 0 -> parse_options( Rest, Config#config{'http_api.limiter.chunk.sliding_window_timestamp_cleanup_interval' = Duration }); _ -> {error, {bad_value, 'http_api.limiter.chunk.sliding_window_timestamp_cleanup_interval'}, Duration} end; parse_options([{<<"http_api.limiter.chunk.sliding_window_timestamp_cleanup_expiry">>, Duration}|Rest], Config) -> case Duration of Duration when is_integer(Duration), Duration > 0 -> parse_options( Rest, Config#config{'http_api.limiter.chunk.sliding_window_timestamp_cleanup_expiry' = Duration }); _ -> {error, {bad_value, 'http_api.limiter.chunk.sliding_window_timestamp_cleanup_expiry'}, Duration} end; parse_options([{<<"http_api.limiter.chunk.leaky_limit">>, Limit}|Rest], Config) -> case Limit of Limit when is_integer(Limit), Limit >= 0 -> parse_options(Rest, Config#config{'http_api.limiter.chunk.leaky_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.chunk.leaky_limit'}, Limit} end; parse_options([{<<"http_api.limiter.chunk.leaky_tick_interval">>, Duration}|Rest], Config) -> case Duration of Duration when is_integer(Duration), Duration > 0 -> parse_options( Rest, Config#config{'http_api.limiter.chunk.leaky_tick_interval' = Duration }); _ -> {error, {bad_value, 'http_api.limiter.chunk.leaky_tick_interval'}, Duration} end; parse_options([{<<"http_api.limiter.chunk.leaky_tick_reduction">>, Reduction}|Rest], Config) -> case Reduction of Reduction when is_integer(Reduction), Reduction > 0 -> parse_options(Rest, Config#config{'http_api.limiter.chunk.leaky_tick_reduction' = Reduction }); _ -> {error, {bad_value, 'http_api.limiter.chunk.leaky_tick_reduction'}, Reduction} end; parse_options([{<<"http_api.limiter.chunk.concurrency_limit">>, Limit}|Rest], Config) -> case Limit of Limit when is_integer(Limit), Limit > 0 -> parse_options(Rest, Config#config{'http_api.limiter.chunk.concurrency_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.chunk.concurrency_limit'}, Limit} end; parse_options([{<<"http_api.limiter.chunk.is_manual_reduction_disabled">>, IsDisabled}|Rest], Config) -> case IsDisabled of IsDisabled when is_boolean(IsDisabled) -> parse_options(Rest, Config#config{'http_api.limiter.chunk.is_manual_reduction_disabled' = IsDisabled }); _ -> {error, {bad_value, 'http_api.limiter.chunk.is_manual_reduction_disabled'}, IsDisabled} end; %% RATE LIMITER DATA_SYNC_RECORD parse_options([{<<"http_api.limiter.data_sync_record.sliding_window_limit">>, Limit}|Rest], Config) -> case Limit of Limit when is_integer(Limit), Limit >= 0 -> parse_options(Rest, Config#config{'http_api.limiter.data_sync_record.sliding_window_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.data_sync_record.sliding_window_limit'}, Limit} end; parse_options([{<<"http_api.limiter.data_sync_record.sliding_window_duration">>, Duration}|Rest], Config) -> case Duration of Duration when is_integer(Duration), Duration > 0 -> parse_options(Rest, Config#config{'http_api.limiter.data_sync_record.sliding_window_duration' = Duration }); _ -> {error, {bad_value, 'http_api.limiter.data_sync_record.sliding_window_duration'}, Duration} end; parse_options([{<<"http_api.limiter.data_sync_record.sliding_window_timestamp_cleanup_interval">>, Duration}|Rest], Config) -> case Duration of Duration when is_integer(Duration), Duration > 0 -> parse_options( Rest, Config#config{'http_api.limiter.data_sync_record.sliding_window_timestamp_cleanup_interval' = Duration }); _ -> {error, {bad_value, 'http_api.limiter.data_sync_record.sliding_window_timestamp_cleanup_interval'}, Duration} end; parse_options([{<<"http_api.limiter.data_sync_record.sliding_window_timestamp_cleanup_expiry">>, Duration}|Rest], Config) -> case Duration of Duration when is_integer(Duration), Duration > 0 -> parse_options( Rest, Config#config{'http_api.limiter.data_sync_record.sliding_window_timestamp_cleanup_expiry' = Duration }); _ -> {error, {bad_value, 'http_api.limiter.data_sync_record.sliding_window_timestamp_cleanup_expiry'}, Duration} end; parse_options([{<<"http_api.limiter.data_sync_record.leaky_limit">>, Limit}|Rest], Config) -> case Limit of Limit when is_integer(Limit), Limit >= 0 -> parse_options(Rest, Config#config{'http_api.limiter.data_sync_record.leaky_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.data_sync_record.leaky_limit'}, Limit} end; parse_options([{<<"http_api.limiter.data_sync_record.leaky_tick_interval">>, Duration}|Rest], Config) -> case Duration of Duration when is_integer(Duration), Duration > 0 -> parse_options( Rest, Config#config{'http_api.limiter.data_sync_record.leaky_tick_interval' = Duration }); _ -> {error, {bad_value, 'http_api.limiter.data_sync_record.leaky_tick_interval'}, Duration} end; parse_options([{<<"http_api.limiter.data_sync_record.leaky_tick_reduction">>, Reduction}|Rest], Config) -> case Reduction of Reduction when is_integer(Reduction), Reduction > 0 -> parse_options(Rest, Config#config{'http_api.limiter.data_sync_record.leaky_tick_reduction' = Reduction }); _ -> {error, {bad_value, 'http_api.limiter.data_sync_record.leaky_tick_reduction'}, Reduction} end; parse_options([{<<"http_api.limiter.data_sync_record.concurrency_limit">>, Limit}|Rest], Config) -> case Limit of Limit when is_integer(Limit), Limit > 0 -> parse_options(Rest, Config#config{'http_api.limiter.data_sync_record.concurrency_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.data_sync_record.concurrency_limit'}, Limit} end; parse_options([{<<"http_api.limiter.data_sync_record.is_manual_reduction_disabled">>, IsDisabled}|Rest], Config) -> case IsDisabled of IsDisabled when is_boolean(IsDisabled) -> parse_options(Rest, Config#config{'http_api.limiter.data_sync_record.is_manual_reduction_disabled' = IsDisabled }); _ -> {error, {bad_value, 'http_api.limiter.data_sync_record.is_manual_reduction_disabled'}, IsDisabled} end; %% RATE LIMITER RECENT_HASH_LIST_DIFF parse_options([{<<"http_api.limiter.recent_hash_list_diff.sliding_window_limit">>, Limit}|Rest], Config) -> case Limit of Limit when is_integer(Limit), Limit >= 0 -> parse_options(Rest, Config#config{'http_api.limiter.recent_hash_list_diff.sliding_window_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.recent_hash_list_diff.sliding_window_limit'}, Limit} end; parse_options([{<<"http_api.limiter.recent_hash_list_diff.sliding_window_duration">>, Duration}|Rest], Config) -> case Duration of Duration when is_integer(Duration), Duration > 0 -> parse_options(Rest, Config#config{'http_api.limiter.recent_hash_list_diff.sliding_window_duration' = Duration }); _ -> {error, {bad_value, 'http_api.limiter.recent_hash_list_diff.sliding_window_duration'}, Duration} end; parse_options([{<<"http_api.limiter.recent_hash_list_diff.sliding_window_timestamp_cleanup_interval">>, Duration}|Rest], Config) -> case Duration of Duration when is_integer(Duration), Duration > 0 -> parse_options( Rest, Config#config{'http_api.limiter.recent_hash_list_diff.sliding_window_timestamp_cleanup_interval' = Duration }); _ -> {error, {bad_value, 'http_api.limiter.recent_hash_list_diff.sliding_window_timestamp_cleanup_interval'}, Duration} end; parse_options([{<<"http_api.limiter.recent_hash_list_diff.sliding_window_timestamp_cleanup_expiry">>, Duration}|Rest], Config) -> case Duration of Duration when is_integer(Duration), Duration > 0 -> parse_options( Rest, Config#config{'http_api.limiter.recent_hash_list_diff.sliding_window_timestamp_cleanup_expiry' = Duration }); _ -> {error, {bad_value, 'http_api.limiter.recent_hash_list_diff.sliding_window_timestamp_cleanup_expiry'}, Duration} end; parse_options([{<<"http_api.limiter.recent_hash_list_diff.leaky_limit">>, Limit}|Rest], Config) -> case Limit of Limit when is_integer(Limit), Limit >= 0 -> parse_options(Rest, Config#config{'http_api.limiter.recent_hash_list_diff.leaky_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.recent_hash_list_diff.leaky_limit'}, Limit} end; parse_options([{<<"http_api.limiter.recent_hash_list_diff.leaky_tick_interval">>, Duration}|Rest], Config) -> case Duration of Duration when is_integer(Duration), Duration > 0 -> parse_options( Rest, Config#config{'http_api.limiter.recent_hash_list_diff.leaky_tick_interval' = Duration }); _ -> {error, {bad_value, 'http_api.limiter.recent_hash_list_diff.leaky_tick_interval'}, Duration} end; parse_options([{<<"http_api.limiter.recent_hash_list_diff.leaky_tick_reduction">>, Reduction}|Rest], Config) -> case Reduction of Reduction when is_integer(Reduction), Reduction > 0 -> parse_options(Rest, Config#config{'http_api.limiter.recent_hash_list_diff.leaky_tick_reduction' = Reduction }); _ -> {error, {bad_value, 'http_api.limiter.recent_hash_list_diff.leaky_tick_reduction'}, Reduction} end; parse_options([{<<"http_api.limiter.recent_hash_list_diff.concurrency_limit">>, Limit}|Rest], Config) -> case Limit of Limit when is_integer(Limit), Limit > 0 -> parse_options(Rest, Config#config{'http_api.limiter.recent_hash_list_diff.concurrency_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.recent_hash_list_diff.concurrency_limit'}, Limit} end; parse_options([{<<"http_api.limiter.recent_hash_list_diff.is_manual_reduction_disabled">>, IsDisabled}|Rest], Config) -> case IsDisabled of IsDisabled when is_boolean(IsDisabled) -> parse_options(Rest, Config#config{'http_api.limiter.recent_hash_list_diff.is_manual_reduction_disabled' = IsDisabled }); _ -> {error, {bad_value, 'http_api.limiter.recent_hash_list_diff.is_manual_reduction_disabled'}, IsDisabled} end; %% RATE LIMITER BLOCK_INDEX parse_options([{<<"http_api.limiter.block_index.sliding_window_limit">>, Limit}|Rest], Config) -> case Limit of Limit when is_integer(Limit), Limit >= 0 -> parse_options(Rest, Config#config{'http_api.limiter.block_index.sliding_window_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.block_index.sliding_window_limit'}, Limit} end; parse_options([{<<"http_api.limiter.block_index.sliding_window_duration">>, Duration}|Rest], Config) -> case Duration of Duration when is_integer(Duration), Duration > 0 -> parse_options(Rest, Config#config{'http_api.limiter.block_index.sliding_window_duration' = Duration }); _ -> {error, {bad_value, 'http_api.limiter.block_index.sliding_window_duration'}, Duration} end; parse_options([{<<"http_api.limiter.block_index.sliding_window_timestamp_cleanup_interval">>, Duration}|Rest], Config) -> case Duration of Duration when is_integer(Duration), Duration > 0 -> parse_options( Rest, Config#config{'http_api.limiter.block_index.sliding_window_timestamp_cleanup_interval' = Duration }); _ -> {error, {bad_value, 'http_api.limiter.block_index.sliding_window_timestamp_cleanup_interval'}, Duration} end; parse_options([{<<"http_api.limiter.block_index.sliding_window_timestamp_cleanup_expiry">>, Duration}|Rest], Config) -> case Duration of Duration when is_integer(Duration), Duration > 0 -> parse_options( Rest, Config#config{'http_api.limiter.block_index.sliding_window_timestamp_cleanup_expiry' = Duration }); _ -> {error, {bad_value, 'http_api.limiter.block_index.sliding_window_timestamp_cleanup_expiry'}, Duration} end; parse_options([{<<"http_api.limiter.block_index.leaky_limit">>, Limit}|Rest], Config) -> case Limit of Limit when is_integer(Limit), Limit >= 0 -> parse_options(Rest, Config#config{'http_api.limiter.block_index.leaky_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.block_index.leaky_limit'}, Limit} end; parse_options([{<<"http_api.limiter.block_index.leaky_tick_interval">>, Duration}|Rest], Config) -> case Duration of Duration when is_integer(Duration), Duration > 0 -> parse_options( Rest, Config#config{'http_api.limiter.block_index.leaky_tick_interval' = Duration }); _ -> {error, {bad_value, 'http_api.limiter.block_index.leaky_tick_interval'}, Duration} end; parse_options([{<<"http_api.limiter.block_index.leaky_tick_reduction">>, Reduction}|Rest], Config) -> case Reduction of Reduction when is_integer(Reduction), Reduction > 0 -> parse_options(Rest, Config#config{'http_api.limiter.block_index.leaky_tick_reduction' = Reduction }); _ -> {error, {bad_value, 'http_api.limiter.block_index.leaky_tick_reduction'}, Reduction} end; parse_options([{<<"http_api.limiter.block_index.concurrency_limit">>, Limit}|Rest], Config) -> case Limit of Limit when is_integer(Limit), Limit > 0 -> parse_options(Rest, Config#config{'http_api.limiter.block_index.concurrency_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.block_index.concurrency_limit'}, Limit} end; parse_options([{<<"http_api.limiter.block_index.is_manual_reduction_disabled">>, IsDisabled}|Rest], Config) -> case IsDisabled of IsDisabled when is_boolean(IsDisabled) -> parse_options(Rest, Config#config{'http_api.limiter.block_index.is_manual_reduction_disabled' = IsDisabled }); _ -> {error, {bad_value, 'http_api.limiter.block_index.is_manual_reduction_disabled'}, IsDisabled} end; %% RATE LIMITER WALLET_LIST parse_options([{<<"http_api.limiter.wallet_list.sliding_window_limit">>, Limit}|Rest], Config) -> case Limit of Limit when is_integer(Limit), Limit >= 0 -> parse_options(Rest, Config#config{'http_api.limiter.wallet_list.sliding_window_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.wallet_list.sliding_window_limit'}, Limit} end; parse_options([{<<"http_api.limiter.wallet_list.sliding_window_duration">>, Duration}|Rest], Config) -> case Duration of Duration when is_integer(Duration), Duration > 0 -> parse_options(Rest, Config#config{'http_api.limiter.wallet_list.sliding_window_duration' = Duration }); _ -> {error, {bad_value, 'http_api.limiter.wallet_list.sliding_window_duration'}, Duration} end; parse_options([{<<"http_api.limiter.wallet_list.sliding_window_timestamp_cleanup_interval">>, Duration}|Rest], Config) -> case Duration of Duration when is_integer(Duration), Duration > 0 -> parse_options( Rest, Config#config{'http_api.limiter.wallet_list.sliding_window_timestamp_cleanup_interval' = Duration }); _ -> {error, {bad_value, 'http_api.limiter.wallet_list.sliding_window_timestamp_cleanup_interval'}, Duration} end; parse_options([{<<"http_api.limiter.wallet_list.sliding_window_timestamp_cleanup_expiry">>, Duration}|Rest], Config) -> case Duration of Duration when is_integer(Duration), Duration > 0 -> parse_options( Rest, Config#config{'http_api.limiter.wallet_list.sliding_window_timestamp_cleanup_expiry' = Duration }); _ -> {error, {bad_value, 'http_api.limiter.wallet_list.sliding_window_timestamp_cleanup_expiry'}, Duration} end; parse_options([{<<"http_api.limiter.wallet_list.leaky_limit">>, Limit}|Rest], Config) -> case Limit of Limit when is_integer(Limit), Limit >= 0 -> parse_options(Rest, Config#config{'http_api.limiter.wallet_list.leaky_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.wallet_list.leaky_limit'}, Limit} end; parse_options([{<<"http_api.limiter.wallet_list.leaky_tick_interval">>, Duration}|Rest], Config) -> case Duration of Duration when is_integer(Duration), Duration > 0 -> parse_options( Rest, Config#config{'http_api.limiter.wallet_list.leaky_tick_interval' = Duration }); _ -> {error, {bad_value, 'http_api.limiter.wallet_list.leaky_tick_interval'}, Duration} end; parse_options([{<<"http_api.limiter.wallet_list.leaky_tick_reduction">>, Reduction}|Rest], Config) -> case Reduction of Reduction when is_integer(Reduction), Reduction > 0 -> parse_options(Rest, Config#config{'http_api.limiter.wallet_list.leaky_tick_reduction' = Reduction }); _ -> {error, {bad_value, 'http_api.limiter.wallet_list.leaky_tick_reduction'}, Reduction} end; parse_options([{<<"http_api.limiter.wallet_list.concurrency_limit">>, Limit}|Rest], Config) -> case Limit of Limit when is_integer(Limit), Limit > 0 -> parse_options(Rest, Config#config{'http_api.limiter.wallet_list.concurrency_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.wallet_list.concurrency_limit'}, Limit} end; parse_options([{<<"http_api.limiter.wallet_list.is_manual_reduction_disabled">>, IsDisabled}|Rest], Config) -> case IsDisabled of IsDisabled when is_boolean(IsDisabled) -> parse_options(Rest, Config#config{'http_api.limiter.wallet_list.is_manual_reduction_disabled' = IsDisabled }); _ -> {error, {bad_value, 'http_api.limiter.wallet_list.is_manual_reduction_disabled'}, IsDisabled} end; %% RATE LIMITER GET_VDF parse_options([{<<"http_api.limiter.get_vdf.sliding_window_limit">>, Limit}|Rest], Config) -> case Limit of Limit when is_integer(Limit), Limit >= 0 -> parse_options(Rest, Config#config{'http_api.limiter.get_vdf.sliding_window_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.get_vdf.sliding_window_limit'}, Limit} end; parse_options([{<<"http_api.limiter.get_vdf.sliding_window_duration">>, Duration}|Rest], Config) -> case Duration of Duration when is_integer(Duration), Duration > 0 -> parse_options(Rest, Config#config{'http_api.limiter.get_vdf.sliding_window_duration' = Duration }); _ -> {error, {bad_value, 'http_api.limiter.get_vdf.sliding_window_duration'}, Duration} end; parse_options([{<<"http_api.limiter.get_vdf.sliding_window_timestamp_cleanup_interval">>, Duration}|Rest], Config) -> case Duration of Duration when is_integer(Duration), Duration > 0 -> parse_options( Rest, Config#config{'http_api.limiter.get_vdf.sliding_window_timestamp_cleanup_interval' = Duration }); _ -> {error, {bad_value, 'http_api.limiter.get_vdf.sliding_window_timestamp_cleanup_interval'}, Duration} end; parse_options([{<<"http_api.limiter.get_vdf.sliding_window_timestamp_cleanup_expiry">>, Duration}|Rest], Config) -> case Duration of Duration when is_integer(Duration), Duration > 0 -> parse_options( Rest, Config#config{'http_api.limiter.get_vdf.sliding_window_timestamp_cleanup_expiry' = Duration }); _ -> {error, {bad_value, 'http_api.limiter.get_vdf.sliding_window_timestamp_cleanup_expiry'}, Duration} end; parse_options([{<<"http_api.limiter.get_vdf.leaky_limit">>, Limit}|Rest], Config) -> case Limit of Limit when is_integer(Limit), Limit >= 0 -> parse_options(Rest, Config#config{'http_api.limiter.get_vdf.leaky_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.get_vdf.leaky_limit'}, Limit} end; parse_options([{<<"http_api.limiter.get_vdf.leaky_tick_interval">>, Duration}|Rest], Config) -> case Duration of Duration when is_integer(Duration), Duration > 0 -> parse_options( Rest, Config#config{'http_api.limiter.get_vdf.leaky_tick_interval' = Duration }); _ -> {error, {bad_value, 'http_api.limiter.get_vdf.leaky_tick_interval'}, Duration} end; parse_options([{<<"http_api.limiter.get_vdf.leaky_tick_reduction">>, Reduction}|Rest], Config) -> case Reduction of Reduction when is_integer(Reduction), Reduction > 0 -> parse_options(Rest, Config#config{'http_api.limiter.get_vdf.leaky_tick_reduction' = Reduction }); _ -> {error, {bad_value, 'http_api.limiter.get_vdf.leaky_tick_reduction'}, Reduction} end; parse_options([{<<"http_api.limiter.get_vdf.concurrency_limit">>, Limit}|Rest], Config) -> case Limit of Limit when is_integer(Limit), Limit > 0 -> parse_options(Rest, Config#config{'http_api.limiter.get_vdf.concurrency_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.get_vdf.concurrency_limit'}, Limit} end; parse_options([{<<"http_api.limiter.get_vdf.is_manual_reduction_disabled">>, IsDisabled}|Rest], Config) -> case IsDisabled of IsDisabled when is_boolean(IsDisabled) -> parse_options(Rest, Config#config{'http_api.limiter.get_vdf.is_manual_reduction_disabled' = IsDisabled }); _ -> {error, {bad_value, 'http_api.limiter.get_vdf.is_manual_reduction_disabled'}, IsDisabled} end; %% RATE LIMITER GET_VDF_SESSION parse_options([{<<"http_api.limiter.get_vdf_session.sliding_window_limit">>, Limit}|Rest], Config) -> case Limit of Limit when is_integer(Limit), Limit >= 0 -> parse_options(Rest, Config#config{'http_api.limiter.get_vdf_session.sliding_window_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.get_vdf_session.sliding_window_limit'}, Limit} end; parse_options([{<<"http_api.limiter.get_vdf_session.sliding_window_duration">>, Duration}|Rest], Config) -> case Duration of Duration when is_integer(Duration), Duration > 0 -> parse_options(Rest, Config#config{'http_api.limiter.get_vdf_session.sliding_window_duration' = Duration }); _ -> {error, {bad_value, 'http_api.limiter.get_vdf_session.sliding_window_duration'}, Duration} end; parse_options([{<<"http_api.limiter.get_vdf_session.sliding_window_timestamp_cleanup_interval">>, Duration}|Rest], Config) -> case Duration of Duration when is_integer(Duration), Duration > 0 -> parse_options( Rest, Config#config{'http_api.limiter.get_vdf_session.sliding_window_timestamp_cleanup_interval' = Duration }); _ -> {error, {bad_value, 'http_api.limiter.get_vdf_session.sliding_window_timestamp_cleanup_interval'}, Duration} end; parse_options([{<<"http_api.limiter.get_vdf_session.sliding_window_timestamp_cleanup_expiry">>, Duration}|Rest], Config) -> case Duration of Duration when is_integer(Duration), Duration > 0 -> parse_options( Rest, Config#config{'http_api.limiter.get_vdf_session.sliding_window_timestamp_cleanup_expiry' = Duration }); _ -> {error, {bad_value, 'http_api.limiter.get_vdf_session.sliding_window_timestamp_cleanup_expiry'}, Duration} end; parse_options([{<<"http_api.limiter.get_vdf_session.leaky_limit">>, Limit}|Rest], Config) -> case Limit of Limit when is_integer(Limit), Limit >= 0 -> parse_options(Rest, Config#config{'http_api.limiter.get_vdf_session.leaky_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.get_vdf_session.leaky_limit'}, Limit} end; parse_options([{<<"http_api.limiter.get_vdf_session.leaky_tick_interval">>, Duration}|Rest], Config) -> case Duration of Duration when is_integer(Duration), Duration > 0 -> parse_options( Rest, Config#config{'http_api.limiter.get_vdf_session.leaky_tick_interval' = Duration }); _ -> {error, {bad_value, 'http_api.limiter.get_vdf_session.leaky_tick_interval'}, Duration} end; parse_options([{<<"http_api.limiter.get_vdf_session.leaky_tick_reduction">>, Reduction}|Rest], Config) -> case Reduction of Reduction when is_integer(Reduction), Reduction > 0 -> parse_options(Rest, Config#config{'http_api.limiter.get_vdf_session.leaky_tick_reduction' = Reduction }); _ -> {error, {bad_value, 'http_api.limiter.get_vdf_session.leaky_tick_reduction'}, Reduction} end; parse_options([{<<"http_api.limiter.get_vdf_session.concurrency_limit">>, Limit}|Rest], Config) -> case Limit of Limit when is_integer(Limit), Limit > 0 -> parse_options(Rest, Config#config{'http_api.limiter.get_vdf_session.concurrency_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.get_vdf_session.concurrency_limit'}, Limit} end; parse_options([{<<"http_api.limiter.get_vdf_session.is_manual_reduction_disabled">>, IsDisabled}|Rest], Config) -> case IsDisabled of IsDisabled when is_boolean(IsDisabled) -> parse_options(Rest, Config#config{'http_api.limiter.get_vdf_session.is_manual_reduction_disabled' = IsDisabled }); _ -> {error, {bad_value, 'http_api.limiter.get_vdf_session.is_manual_reduction_disabled'}, IsDisabled} end; %% RATE LIMITER GET_PREVIOUS_VDF_SESSION parse_options([{<<"http_api.limiter.get_previous_vdf_session.sliding_window_limit">>, Limit}|Rest], Config) -> case Limit of Limit when is_integer(Limit), Limit >= 0 -> parse_options(Rest, Config#config{'http_api.limiter.get_previous_vdf_session.sliding_window_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.get_previous_vdf_session.sliding_window_limit'}, Limit} end; parse_options([{<<"http_api.limiter.get_previous_vdf_session.sliding_window_duration">>, Duration}|Rest], Config) -> case Duration of Duration when is_integer(Duration), Duration > 0 -> parse_options(Rest, Config#config{'http_api.limiter.get_previous_vdf_session.sliding_window_duration' = Duration }); _ -> {error, {bad_value, 'http_api.limiter.get_previous_vdf_session.sliding_window_duration'}, Duration} end; parse_options([{<<"http_api.limiter.get_previous_vdf_session.sliding_window_timestamp_cleanup_interval">>, Duration}|Rest], Config) -> case Duration of Duration when is_integer(Duration), Duration > 0 -> parse_options( Rest, Config#config{'http_api.limiter.get_previous_vdf_session.sliding_window_timestamp_cleanup_interval' = Duration }); _ -> {error, {bad_value, 'http_api.limiter.get_previous_vdf_session.sliding_window_timestamp_cleanup_interval'}, Duration} end; parse_options([{<<"http_api.limiter.get_previous_vdf_session.sliding_window_timestamp_cleanup_expiry">>, Duration}|Rest], Config) -> case Duration of Duration when is_integer(Duration), Duration > 0 -> parse_options( Rest, Config#config{'http_api.limiter.get_previous_vdf_session.sliding_window_timestamp_cleanup_expiry' = Duration }); _ -> {error, {bad_value, 'http_api.limiter.get_previous_vdf_session.sliding_window_timestamp_cleanup_expiry'}, Duration} end; parse_options([{<<"http_api.limiter.get_previous_vdf_session.leaky_limit">>, Limit}|Rest], Config) -> case Limit of Limit when is_integer(Limit), Limit >= 0 -> parse_options(Rest, Config#config{'http_api.limiter.get_previous_vdf_session.leaky_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.get_previous_vdf_session.leaky_limit'}, Limit} end; parse_options([{<<"http_api.limiter.get_previous_vdf_session.leaky_tick_interval">>, Duration}|Rest], Config) -> case Duration of Duration when is_integer(Duration), Duration > 0 -> parse_options( Rest, Config#config{'http_api.limiter.get_previous_vdf_session.leaky_tick_interval' = Duration }); _ -> {error, {bad_value, 'http_api.limiter.get_previous_vdf_session.leaky_tick_interval'}, Duration} end; parse_options([{<<"http_api.limiter.get_previous_vdf_session.leaky_tick_reduction">>, Reduction}|Rest], Config) -> case Reduction of Reduction when is_integer(Reduction), Reduction > 0 -> parse_options(Rest, Config#config{'http_api.limiter.get_previous_vdf_session.leaky_tick_reduction' = Reduction }); _ -> {error, {bad_value, 'http_api.limiter.get_previous_vdf_session.leaky_tick_reduction'}, Reduction} end; parse_options([{<<"http_api.limiter.get_previous_vdf_session.concurrency_limit">>, Limit}|Rest], Config) -> case Limit of Limit when is_integer(Limit), Limit > 0 -> parse_options(Rest, Config#config{'http_api.limiter.get_previous_vdf_session.concurrency_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.get_previous_vdf_session.concurrency_limit'}, Limit} end; parse_options([{<<"http_api.limiter.get_previous_vdf_session.is_manual_reduction_disabled">>, IsDisabled}|Rest], Config) -> case IsDisabled of IsDisabled when is_boolean(IsDisabled) -> parse_options(Rest, Config#config{'http_api.limiter.get_previous_vdf_session.is_manual_reduction_disabled' = IsDisabled }); _ -> {error, {bad_value, 'http_api.limiter.get_previous_vdf_session.is_manual_reduction_disabled'}, IsDisabled} end; %% RATE LIMITER METRICS parse_options([{<<"http_api.limiter.metrics.sliding_window_limit">>, Limit}|Rest], Config) -> case Limit of Limit when is_integer(Limit), Limit >= 0 -> parse_options(Rest, Config#config{'http_api.limiter.metrics.sliding_window_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.metrics.sliding_window_limit'}, Limit} end; parse_options([{<<"http_api.limiter.metrics.sliding_window_duration">>, Duration}|Rest], Config) -> case Duration of Duration when is_integer(Duration), Duration > 0 -> parse_options(Rest, Config#config{'http_api.limiter.metrics.sliding_window_duration' = Duration }); _ -> {error, {bad_value, 'http_api.limiter.metrics.sliding_window_duration'}, Duration} end; parse_options([{<<"http_api.limiter.metrics.sliding_window_timestamp_cleanup_interval">>, Duration}|Rest], Config) -> case Duration of Duration when is_integer(Duration), Duration > 0 -> parse_options( Rest, Config#config{'http_api.limiter.metrics.sliding_window_timestamp_cleanup_interval' = Duration }); _ -> {error, {bad_value, 'http_api.limiter.metrics.sliding_window_timestamp_cleanup_interval'}, Duration} end; parse_options([{<<"http_api.limiter.metrics.sliding_window_timestamp_cleanup_expiry">>, Duration}|Rest], Config) -> case Duration of Duration when is_integer(Duration), Duration > 0 -> parse_options( Rest, Config#config{'http_api.limiter.metrics.sliding_window_timestamp_cleanup_expiry' = Duration }); _ -> {error, {bad_value, 'http_api.limiter.metrics.sliding_window_timestamp_cleanup_expiry'}, Duration} end; parse_options([{<<"http_api.limiter.metrics.leaky_limit">>, Limit}|Rest], Config) -> case Limit of Limit when is_integer(Limit), Limit >= 0 -> parse_options(Rest, Config#config{'http_api.limiter.metrics.leaky_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.metrics.leaky_limit'}, Limit} end; parse_options([{<<"http_api.limiter.metrics.leaky_tick_interval">>, Duration}|Rest], Config) -> case Duration of Duration when is_integer(Duration), Duration > 0 -> parse_options( Rest, Config#config{'http_api.limiter.metrics.leaky_tick_interval' = Duration }); _ -> {error, {bad_value, 'http_api.limiter.metrics.leaky_tick_interval'}, Duration} end; parse_options([{<<"http_api.limiter.metrics.leaky_tick_reduction">>, Reduction}|Rest], Config) -> case Reduction of Reduction when is_integer(Reduction), Reduction > 0 -> parse_options(Rest, Config#config{'http_api.limiter.metrics.leaky_tick_reduction' = Reduction }); _ -> {error, {bad_value, 'http_api.limiter.metrics.leaky_tick_reduction'}, Reduction} end; parse_options([{<<"http_api.limiter.metrics.concurrency_limit">>, Limit}|Rest], Config) -> case Limit of Limit when is_integer(Limit), Limit > 0 -> parse_options(Rest, Config#config{'http_api.limiter.metrics.concurrency_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.metrics.concurrency_limit'}, Limit} end; parse_options([{<<"http_api.limiter.metrics.is_manual_reduction_disabled">>, IsDisabled}|Rest], Config) -> case IsDisabled of IsDisabled when is_boolean(IsDisabled) -> parse_options(Rest, Config#config{'http_api.limiter.metrics.is_manual_reduction_disabled' = IsDisabled }); _ -> {error, {bad_value, 'http_api.limiter.metrics.is_manual_reduction_disabled'}, IsDisabled} end; parse_options([Opt | _], _) -> {error, unknown, Opt}; parse_options([], Config) -> {ok, Config}. parse_storage_module(RangeNumber, RangeSize, PackingBin) -> Packing = case PackingBin of <<"unpacked">> -> unpacked; << MiningAddr:43/binary, ".replica.2.9" >> -> {replica_2_9, ar_util:decode(MiningAddr)}; << MiningAddr:43/binary, ".", PackingDifficultyBin/binary >> -> PackingDifficulty = binary_to_integer(PackingDifficultyBin), true = PackingDifficulty >= 1 andalso PackingDifficulty =< ?MAX_PACKING_DIFFICULTY andalso PackingDifficulty /= ?REPLICA_2_9_PACKING_DIFFICULTY, {composite, ar_util:decode(MiningAddr), PackingDifficulty}; MiningAddr when byte_size(MiningAddr) == 43 -> {spora_2_6, ar_util:decode(MiningAddr)} end, {ok, {RangeSize, RangeNumber, Packing}}. parse_storage_module(RangeNumber, RangeSize, PackingBin, ToPackingBin) -> Packing = case PackingBin of <<"unpacked">> -> unpacked; << MiningAddr:43/binary, ".replica.2.9" >> -> {replica_2_9, ar_util:decode(MiningAddr)}; MiningAddr when byte_size(MiningAddr) == 43 -> {spora_2_6, ar_util:decode(MiningAddr)} end, ToPacking = case ToPackingBin of <<"unpacked">> -> unpacked; << ToMiningAddr:43/binary, ".replica.2.9" >> -> {replica_2_9, ar_util:decode(ToMiningAddr)}; ToMiningAddr when byte_size(ToMiningAddr) == 43 -> {spora_2_6, ar_util:decode(ToMiningAddr)} end, {repack_in_place, {{RangeSize, RangeNumber, Packing}, ToPacking}}. safe_map(Fun, List) -> try {ok, lists:map(Fun, List)} catch _:_ -> error end. parse_peers([Peer | Rest], ParsedPeers) -> case ar_util:safe_parse_peer(Peer) of {ok, ParsedPeer} -> parse_peers(Rest, ParsedPeer ++ ParsedPeers); {error, _} -> error end; parse_peers([], ParsedPeers) -> Flatten = lists:flatten(ParsedPeers), Reverse = lists:reverse(Flatten), {ok, Reverse}. parse_webhooks([{WebhookConfig} | Rest], ParsedWebhookConfigs) when is_list(WebhookConfig) -> case parse_webhook(WebhookConfig, #config_webhook{}) of {ok, ParsedWebhook} -> parse_webhooks(Rest, [ParsedWebhook | ParsedWebhookConfigs]); error -> error end; parse_webhooks([_ | _], _) -> error; parse_webhooks([], ParsedWebhookConfigs) -> {ok, lists:reverse(ParsedWebhookConfigs)}. parse_webhook([{<<"events">>, Events} | Rest], Webhook) when is_list(Events) -> case parse_webhook_events(Events, []) of {ok, ParsedEvents} -> parse_webhook(Rest, Webhook#config_webhook{ events = ParsedEvents }); error -> error end; parse_webhook([{<<"events">>, _} | _], _) -> error; parse_webhook([{<<"url">>, Url} | Rest], Webhook) when is_binary(Url) -> parse_webhook(Rest, Webhook#config_webhook{ url = Url }); parse_webhook([{<<"url">>, _} | _], _) -> error; parse_webhook([{<<"headers">>, {Headers}} | Rest], Webhook) when is_list(Headers) -> parse_webhook(Rest, Webhook#config_webhook{ headers = Headers }); parse_webhook([{<<"headers">>, _} | _], _) -> error; parse_webhook([], Webhook) -> {ok, Webhook}. parse_webhook_events([Event | Rest], Events) -> case Event of <<"transaction">> -> parse_webhook_events(Rest, [transaction | Events]); <<"transaction_data">> -> parse_webhook_events(Rest, [transaction_data | Events]); <<"block">> -> parse_webhook_events(Rest, [block | Events]); <<"solution">> -> parse_webhook_events(Rest, [solution | Events]); _ -> error end; parse_webhook_events([], Events) -> {ok, lists:reverse(Events)}. parse_atom_number_map({[Pair | Pairs]}, Parsed) when is_tuple(Pair) -> parse_atom_number_map({Pairs}, parse_atom_number(Pair, Parsed)); parse_atom_number_map({[]}, Parsed) -> {ok, Parsed}; parse_atom_number_map(_, _) -> error. parse_atom_number({Name, Number}, Parsed) when is_binary(Name), is_number(Number) -> maps:put(binary_to_atom(Name), Number, Parsed); parse_atom_number({Key, Value}, Parsed) -> ?LOG_WARNING([{event, parse_config_bad_type}, {key, io_lib:format("~p", [Key])}, {value, io_lib:format("~p", [Value])}]), Parsed. parse_requests_per_minute_limit_by_ip(Input) -> parse_requests_per_minute_limit_by_ip(Input, #{}). parse_requests_per_minute_limit_by_ip({[{IP, Object} | Pairs]}, Parsed) -> case ar_util:safe_parse_peer(IP) of {error, invalid} -> error; {ok, [{A, B, C, D, _Port}]} -> case parse_atom_number_map(Object, #{}) of error -> error; {ok, ParsedMap} -> parse_requests_per_minute_limit_by_ip({Pairs}, maps:put({A, B, C, D}, ParsedMap, Parsed)) end end; parse_requests_per_minute_limit_by_ip({[]}, Parsed) -> {ok, Parsed}; parse_requests_per_minute_limit_by_ip(_, _) -> error. parse_vdf_server_trusted_peers([Peer | Rest], Config) -> Config2 = parse_vdf_server_trusted_peer(Peer, Config), parse_vdf_server_trusted_peers(Rest, Config2); parse_vdf_server_trusted_peers([], Config) -> Config. parse_vdf_server_trusted_peer(Peer, Config) when is_binary(Peer) -> parse_vdf_server_trusted_peer(binary_to_list(Peer), Config); parse_vdf_server_trusted_peer(Peer, Config) -> #config{ nonce_limiter_server_trusted_peers = Peers } = Config, Config#config{ nonce_limiter_server_trusted_peers = Peers ++ [Peer] }. log_config(Config) -> Fields = record_info(fields, config), ?LOG_INFO("=============== Start Config ==============="), log_config(Config, Fields, 2, []), ?LOG_INFO("=============== End Config ==============="). log_config(_Config, [], _Index, _Acc) -> ok; log_config(Config, [Field | Rest], Index, Acc) -> FieldValue = erlang:element(Index, Config), %% Wrap formatting in a try/catch just in case - we don't want any issues in formatting %% to cause a crash. FormattedValue = try log_config_value(Field, FieldValue) catch _:_ -> FieldValue end, Line = ?LOG_INFO("~s: ~tp", [atom_to_list(Field), FormattedValue]), log_config(Config, Rest, Index+1, [Line | Acc]). log_config_value(peers, FieldValue) -> format_peers(FieldValue); log_config_value(block_gossip_peers, FieldValue) -> format_peers(FieldValue); log_config_value(local_peers, FieldValue) -> format_peers(FieldValue); log_config_value(mining_addr, FieldValue) -> format_binary(FieldValue); log_config_value(start_from_state, FieldValue) -> FieldValue; log_config_value(start_from_block, FieldValue) -> format_binary(FieldValue); log_config_value(storage_modules, FieldValue) -> [format_storage_module(StorageModule) || StorageModule <- FieldValue]; log_config_value(repack_in_place_storage_modules, FieldValue) -> [{format_storage_module(StorageModule), ar_serialize:encode_packing(ToPacking, false)} || {StorageModule, ToPacking} <- FieldValue]; log_config_value(_, FieldValue) -> FieldValue. format_peers(Peers) -> [ar_util:format_peer(Peer) || Peer <- Peers]. format_binary(Address) -> ar_util:encode(Address). format_storage_module({RangeSize, RangeNumber, {spora_2_6, MiningAddress}}) -> {RangeSize, RangeNumber, {spora_2_6, format_binary(MiningAddress)}}; format_storage_module({RangeSize, RangeNumber, {composite, MiningAddress, PackingDiff}}) -> {RangeSize, RangeNumber, {composite, format_binary(MiningAddress), PackingDiff}}; format_storage_module({RangeSize, RangeNumber, {replica_2_9, MiningAddress}}) -> {RangeSize, RangeNumber, {replica_2_9, format_binary(MiningAddress)}}; format_storage_module(StorageModule) -> StorageModule. %% ------------------------------------------------------------------- %% @doc Validate the configuration options. %% ------------------------------------------------------------------- validate_init(Config) -> case Config#config.init of true -> case ?NETWORK_NAME of "arweave.N.1" -> io:format("~nCannot start a new network with the mainnet name! " "Use ./bin/start-localnet ... when running from sources " "or compile via ./rebar3 as localnet tar and use " "./bin/start ... as usual.~n~n"), false; _ -> true end; false -> true end. validate_storage_modules(#config{ storage_modules = StorageModules }) -> case length(StorageModules) =:= length(lists:usort(StorageModules)) of true -> true; false -> io:format("~nDuplicate value detected in the storage_modules option.~n~n"), false end. validate_repack_in_place(Config) -> Modules = [ar_storage_module:id(M) || M <- Config#config.storage_modules], validate_repack_in_place(Config#config.repack_in_place_storage_modules, Modules). validate_repack_in_place([], _Modules) -> true; validate_repack_in_place([{Module, _ToPacking} | L], Modules) -> ID = ar_storage_module:id(Module), ModuleInUse = lists:member(ID, Modules), case ModuleInUse of true -> io:format("~nCannot use the storage module ~s " "while it is being repacked in place.~n~n", [ID]), false; false -> validate_repack_in_place(L, Modules) end. validate_cm_pool(Config) -> A = case {Config#config.coordinated_mining, Config#config.is_pool_server} of {true, true} -> io:format("~nThe pool server node cannot participate " "in the coordinated mining.~n~n"), false; _ -> true end, B = case {Config#config.is_pool_server, Config#config.is_pool_client} of {true, true} -> io:format("~nThe node cannot be a pool server and a pool client " "at the same time.~n~n"), false; _ -> true end, C = case {Config#config.is_pool_client, Config#config.mine} of {true, false} -> io:format("~nThe mine flag must be set along with the is_pool_client flag.~n~n"), false; _ -> true end, A andalso B andalso C. validate_cm(#config{ coordinated_mining = false }) -> true; validate_cm(#config{ cm_api_secret = not_set }) -> io:format("~nThe cm_api_secret must be set when coordinated_mining is set.~n~n"), false; validate_cm(#config{ mine = false }) -> io:format("~nThe mine flag must be set when coordinated_mining is set.~n~n"), false; validate_cm(_Config) -> true. validate_unique_replication_type(#config{ mine = false }) -> true; validate_unique_replication_type(Config) -> MiningAddr = Config#config.mining_addr, UniquePackingDifficulties = lists:foldl( fun({_, _, {composite, Addr, Difficulty}}, Acc) when Addr =:= MiningAddr -> sets:add_element({composite, Difficulty}, Acc); ({_, _, {spora_2_6, Addr}}, Acc) when Addr =:= MiningAddr -> sets:add_element(spora_2_6, Acc); ({_, _, {replica_2_9, Addr}}, Acc) when Addr =:= MiningAddr -> sets:add_element(replica_2_9, Acc); (_, Acc) -> Acc end, sets:new(), Config#config.storage_modules ), case sets:size(UniquePackingDifficulties) =< 1 of true -> true; false -> io:format("~nThe node cannot mine multiple replication types " "for the same mining address.~n~n"), false end. validate_verify(#config{ verify = false }) -> true; validate_verify(#config{ mine = true }) -> io:format("~nThe verify flag cannot be set together with the mine flag.~n~n"), false; validate_verify(#config{ repack_in_place_storage_modules = RepackInPlaceStorageModules }) when RepackInPlaceStorageModules =/= [] -> io:format("~nThe verify flag cannot be set together with the repack_in_place flag.~n~n"), false; validate_verify(_Config) -> true. validate_start_from_state(#config{ start_from_state = not_set }) -> true; validate_start_from_state(#config{ start_from_state = Folder, data_dir = DataDir }) -> case filename:absname(Folder) == filename:absname(DataDir) of true -> io:format("~nstart_from_state folder cannot be the same as data_dir.~n~n"), false; false -> true end. disable_vdf(Config) -> RemovePublicVDFServer = lists:filter(fun(Item) -> Item =/= public_vdf_server end, Config#config.enable), Config#config{ nonce_limiter_client_peers = [], nonce_limiter_server_trusted_peers = [], enable = RemovePublicVDFServer, disable = [compute_own_vdf | Config#config.disable] }. set_verify_flags(#config{ verify = false } = Config) -> Config; set_verify_flags(Config) -> io:format("~n~nWARNING: The verify flag is set. Forcing the following options:"), io:format("~n - auto_join false"), io:format("~n - start_from_latest_state true"), io:format("~n - sync_jobs 0"), io:format("~n - block_pollers 0"), io:format("~n - header_sync_jobs 0"), io:format("~n - disable tx_poller"), io:format("~n - replica_2_9_workers 0"), io:format("~n - max_propagation_peers 0"), io:format("~n - max_block_propagation_peers 0"), io:format("~n - coordinated_mining false"), io:format("~n - cm_peers []"), io:format("~n - cm_exit_peer not_set"), io:format("~n - all VDF features disabled"), Config2 = disable_vdf(Config), Config2#config{ auto_join = false, start_from_latest_state = true, sync_jobs = 0, block_pollers = 0, header_sync_jobs = 0, disable = [tx_poller | Config#config.disable], replica_2_9_workers = 0, coordinated_mining = false, cm_peers = [], cm_exit_peer = not_set, max_propagation_peers = 0, max_block_propagation_peers = 0 }. ================================================ FILE: apps/arweave/src/ar_coordination.erl ================================================ -module(ar_coordination). -behaviour(gen_server). -export([ start_link/0, computed_h1/2, compute_h2_for_peer/2, computed_h2_for_peer/1, get_public_state/0, send_h1_batch_to_peer/0, stat_loop/0, get_peers/1, get_peer/1, update_peer/2, remove_peer/1, garbage_collect/0, is_exit_peer/0, get_unique_partitions_list/0, get_self_plus_external_partitions_list/0, get_cluster_partitions_list/0, is_coordinated_miner/0 ]). -export([init/1, handle_cast/2, handle_call/3, handle_info/2, terminate/2]). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("arweave/include/ar_consensus.hrl"). -include_lib("arweave/include/ar_mining.hrl"). -record(state, { last_peer_response = #{}, peers_by_partition = #{}, out_batches = #{}, out_batch_timeout = ?DEFAULT_CM_BATCH_TIMEOUT_MS }). -define(START_DELAY, 1000). -ifdef(AR_TEST). -define(BATCH_SIZE_LIMIT, 2). -else. -define(BATCH_SIZE_LIMIT, 400). -endif. -define(BATCH_POLL_INTERVAL_MS, 20). %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Start the gen_server. start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). %% Helper function to see state while testing and later for monitoring API get_public_state() -> gen_server:call(?MODULE, get_public_state). %% @doc An H1 has been generated. Store it to send it later to a %% coordinated mining peer computed_h1(Candidate, DiffPair) -> #mining_candidate{ h1 = H1, nonce = Nonce } = Candidate, %% Prepare Candidate to be shared with a remote miner. %% 1. Add the current difficulty (the remote peer will use this instead of %% its local difficulty). %% 2. Remove any data that's not needed by the peer. This cuts down on the volume of data %% shared. %% 3. The peer field will be set to this peer's address by the remote miner. ShareableCandidate = Candidate#mining_candidate{ chunk1 = not_set, chunk2 = not_set, cm_diff = DiffPair, cm_lead_peer = not_set, h1 = not_set, h2 = not_set, nonce = not_set, poa2 = not_set, preimage = not_set }, gen_server:cast(?MODULE, {computed_h1, ShareableCandidate, H1, Nonce}). send_h1_batch_to_peer() -> gen_server:cast(?MODULE, send_h1_batch_to_peer). %% @doc Compute h2 for a remote peer compute_h2_for_peer(Peer, Candidate) -> gen_server:cast(?MODULE, {compute_h2_for_peer, Candidate#mining_candidate{ cm_lead_peer = Peer }}). computed_h2_for_peer(Candidate) -> gen_server:cast(?MODULE, {computed_h2_for_peer, Candidate}). stat_loop() -> gen_server:call(?MODULE, stat_loop). get_peer(PartitionNumber) -> gen_server:call(?MODULE, {get_peer, PartitionNumber}). get_peers(PartitionNumber) -> gen_server:call(?MODULE, {get_peers, PartitionNumber}). update_peer(Peer, PartitionList) -> gen_server:cast(?MODULE, {update_peer, {Peer, PartitionList}}). remove_peer(Peer) -> gen_server:cast(?MODULE, {remove_peer, Peer}). garbage_collect() -> gen_server:cast(?MODULE, garbage_collect). %% Return true if we are an exit peer in the coordinated mining setup. is_exit_peer() -> {ok, Config} = arweave_config:get_env(), Config#config.coordinated_mining == true andalso Config#config.cm_exit_peer == not_set. %% Return true if we are a CM miner in the coordinated mining setup. %% A CM miner may be but does not have to be an exit node. is_coordinated_miner() -> {ok, Config} = arweave_config:get_env(), Config#config.coordinated_mining == true. %% @doc Return a list of unique partitions including local partitions and all of %% external (relevant pool peers') partitions. %% %% A single partition in the following format: %% {[ %% {bucket, PartitionID}, %% {bucketsize, ar_block:partition_size()}, %% {addr, EncodedMiningAddress} %% ]} %% %% A single partition with the composite packing is in the following format: %% {[ %% {bucket, PartitionID}, %% {bucketsize, ar_block:partition_size()}, %% {addr, EncodedMiningAddress}, %% {pdiff, PackingDifficulty} %% ]} get_self_plus_external_partitions_list() -> PoolPeer = ar_pool:pool_peer(), PoolPartitions = get_peer_partitions(PoolPeer), LocalPartitions = get_unique_partitions_set(), lists:sort(sets:to_list(get_unique_partitions_set(PoolPartitions, LocalPartitions))). %% @doc Return a list of unique partitions including local partitions and all of %% CM peers' partitions. %% %% A single partition in the following format: %% {[ %% {bucket, PartitionID}, %% {bucketsize, ar_block:partition_size()}, %% {addr, EncodedMiningAddress} %% ]} %% %% A single partition with the composite packing is in the following format: %% {[ %% {bucket, PartitionID}, %% {bucketsize, ar_block:partition_size()}, %% {addr, EncodedMiningAddress}, %% {pdiff, PackingDifficulty} %% ]} get_cluster_partitions_list() -> gen_server:call(?MODULE, get_cluster_partitions_list, ?DEFAULT_CALL_TIMEOUT). %%%=================================================================== %%% Generic server callbacks. %%%=================================================================== init([]) -> {ok, Config} = arweave_config:get_env(), ar_util:cast_after(?BATCH_POLL_INTERVAL_MS, ?MODULE, check_batches), State = #state{ last_peer_response = #{} }, State2 = case Config#config.coordinated_mining of false -> State; true -> case Config#config.cm_exit_peer of not_set -> ar:console( "This node is configured as a Coordinated Mining Exit Node. If this is " "not correct, set 'cm_exit_peer' and relaunch.~n"); _ -> ok end, ar_util:cast_after(?START_DELAY, ?MODULE, refetch_peer_partitions), State#state{ last_peer_response = #{} } end, {ok, State2#state{ out_batch_timeout = Config#config.cm_out_batch_timeout }}. %% Helper function to see state while testing and later for monitoring API handle_call(get_public_state, _From, State) -> PublicState = {State#state.last_peer_response}, {reply, {ok, PublicState}, State}; handle_call({get_peer, PartitionNumber}, _From, State) -> {reply, get_peer(PartitionNumber, State), State}; handle_call({get_peers, PartitionNumber}, _From, State) -> {reply, get_peers(PartitionNumber, State), State}; handle_call({get_peer_partitions, Peer}, _From, State) -> #state{ last_peer_response = Map } = State, case maps:get(Peer, Map, []) of [] -> {reply, [], State}; {true, Partitions} -> {reply, Partitions, State} end; handle_call(get_cluster_partitions_list, _From, State) -> PeerPartitions = maps:fold( fun(PartitionID, Items, Acc) -> lists:foldl( fun ({{pool, _}, _, _}, Acc2) -> Acc2; ({_Peer, PackingAddr, PackingDifficulty}, Acc2) -> sets:add_element(ar_serialize:partition_to_json_struct( PartitionID, ar_block:partition_size(), PackingAddr, PackingDifficulty), Acc2) end, Acc, Items ) end, sets:new(), State#state.peers_by_partition ), Set = get_unique_partitions_set(ar_mining_io:get_partitions(), PeerPartitions), {reply, lists:sort(sets:to_list(Set)), State}; handle_call(Request, _From, State) -> ?LOG_WARNING([{event, unhandled_call}, {module, ?MODULE}, {request, Request}]), {reply, ok, State}. handle_cast(check_batches, State) -> ar_util:cast_after(?BATCH_POLL_INTERVAL_MS, ?MODULE, check_batches), OutBatches = check_out_batches(State), {noreply, State#state{ out_batches = OutBatches }}; handle_cast({computed_h1, ShareableCandidate, H1, Nonce}, State) -> #state{ out_batches = OutBatches } = State, #mining_candidate{ cache_ref = CacheRef } = ShareableCandidate, Now = os:system_time(millisecond), {Start, ShareableCandidate2} = maps:get(CacheRef, OutBatches, {Now, ShareableCandidate}), H1List = [{H1, Nonce} | ShareableCandidate2#mining_candidate.cm_h1_list], ShareableCandidate3 = ShareableCandidate2#mining_candidate{ cm_h1_list = H1List }, OutBatches2 = case length(H1List) >= ?BATCH_SIZE_LIMIT of true -> send_h1(ShareableCandidate3, State), maps:remove(CacheRef, OutBatches); false -> maps:put(CacheRef, {Start, ShareableCandidate3}, OutBatches) end, {noreply, State#state{ out_batches = OutBatches2 }}; handle_cast({compute_h2_for_peer, Candidate}, State) -> %% No don't need to batch inbound batches since ar_mining_io will cache the recall %% range for a short period greatly lowering the cost of processing the same %% multiple times across several batches. ar_mining_server:compute_h2_for_peer(Candidate), {noreply, State}; handle_cast({computed_h2_for_peer, Candidate}, State) -> #mining_candidate{ cm_lead_peer = Peer, chunk2 = Chunk2 } = Candidate, PoA2 = case ar_mining_server:prepare_poa(poa2, Candidate, #poa{}) of {ok, PoA} -> PoA; {error, _Error} -> %% Fallback. This will probably fail later, but prepare_poa/3 should %% have already printed several errors so we'll continue just in case. %% df: Is this the right fallback?.. #poa{ chunk = Chunk2 } end, send_h2(Peer, Candidate#mining_candidate{ poa2 = PoA2 }), {noreply, State}; handle_cast(refetch_peer_partitions, State) -> {ok, Config} = arweave_config:get_env(), Peers = Config#config.cm_peers, Peers2 = case Config#config.cm_exit_peer == not_set orelse lists:member(Config#config.cm_exit_peer, Peers) of true -> %% Either we are the exit node or the exit node %% is already configured as yet another mining peer. Peers; false -> [Config#config.cm_exit_peer | Peers] end, ar_util:cast_after(Config#config.cm_poll_interval, ?MODULE, refetch_peer_partitions), refetch_peer_partitions(Peers2), {noreply, State}; handle_cast({update_peer, {Peer, PartitionList}}, State) -> SetValue = {true, PartitionList}, State2 = State#state{ last_peer_response = maps:put(Peer, SetValue, State#state.last_peer_response) }, State3 = remove_mining_peer(Peer, State2), State4 = add_mining_peer({Peer, PartitionList}, State3), {noreply, State4}; handle_cast({remove_peer, Peer}, State) -> State3 = case maps:get(Peer, State#state.last_peer_response, none) of none -> State; {_, OldPartitionList} -> SetValue = {false, OldPartitionList}, % NOTE. We keep OldPartitionList because we don't want blinky stat State2 = State#state{ last_peer_response = maps:put(Peer, SetValue, State#state.last_peer_response) }, ?LOG_INFO([{event, cm_peer_removed}, {peer, ar_util:format_peer(Peer)}]), remove_mining_peer(Peer, State2) end, {noreply, State3}; handle_cast(refetch_pool_peer_partitions, State) -> %% Casted when we are a CM exit peer and a pool client. We collect our local peer %% partitions and push them to the pool getting the pool's complementary partitions %% in response. UniquePeerPartitions = maps:fold( fun ({pool, _}, _Value, Acc) -> Acc; (_Peer, {_, Partitions}, Acc) -> get_unique_partitions_set(Partitions, Acc) end, sets:new(), State#state.last_peer_response ), refetch_pool_peer_partitions(UniquePeerPartitions), {noreply, State}; handle_cast(garbage_collect, State) -> erlang:garbage_collect(self(), [{async, {ar_coordination, self(), erlang:monotonic_time()}}]), {noreply, State}; handle_cast(Cast, State) -> ?LOG_WARNING([{event, unhandled_cast}, {module, ?MODULE}, {cast, Cast}]), {noreply, State}. handle_info({garbage_collect, {Name, Pid, StartTime}, GCResult}, State) -> EndTime = erlang:monotonic_time(), ElapsedTime = erlang:convert_time_unit(EndTime-StartTime, native, millisecond), case GCResult == false orelse ElapsedTime > ?GC_LOG_THRESHOLD of true -> ?LOG_DEBUG([ {event, mining_debug_garbage_collect}, {process, Name}, {pid, Pid}, {gc_time, ElapsedTime}, {gc_result, GCResult}]); false -> ok end, {noreply, State}; handle_info(Message, State) -> ?LOG_WARNING([{event, unhandled_info}, {module, ?MODULE}, {message, Message}]), {noreply, State}. terminate(Reason, _State) -> ?LOG_INFO([{module, ?MODULE},{pid, self()},{callback, terminate},{reason, Reason}]), ok. %%%=================================================================== %%% Private functions. %%%=================================================================== %% @doc Return the list of the partitions of the given Peer, to the best %% of our knowledge. get_peer_partitions(Peer) -> gen_server:call(?MODULE, {get_peer_partitions, Peer}, ?DEFAULT_CALL_TIMEOUT). check_out_batches(#state{out_batches = OutBatches}) when map_size(OutBatches) == 0 -> OutBatches; check_out_batches(State) -> #state{ out_batches = OutBatches, out_batch_timeout = BatchTimeout } = State, Now = os:system_time(millisecond), maps:filter( fun (_CacheRef, {Start, Candidate}) -> case Now - Start >= BatchTimeout of true -> %% send this batch, and remove it from the map send_h1(Candidate, State), false; false -> %% not yet time to send this batch, keep it in the map true end end, OutBatches ). get_peers(PartitionNumber, State) -> [element(1, El) || El <- maps:get(PartitionNumber, State#state.peers_by_partition, [])]. get_peer(PartitionNumber, State) -> case get_peers(PartitionNumber, State) of [] -> none; Peers -> lists:last(Peers) end. send_h1(Candidate, State) -> #mining_candidate{ partition_number2 = PartitionNumber2, cm_h1_list = H1List } = Candidate, case get_peer(PartitionNumber2, State) of none -> ok; Peer -> Candidate2 = Candidate#mining_candidate{ label = <<"cm">> }, spawn(fun() -> ar_http_iface_client:cm_h1_send(Peer, Candidate2) end), case Peer of {pool, _} -> ar_mining_stats:h1_sent_to_peer(pool, length(H1List)); _ -> ar_mining_stats:h1_sent_to_peer(Peer, length(H1List)) end end. send_h2(Peer, Candidate) -> spawn(fun() -> ar_http_iface_client:cm_h2_send(Peer, Candidate) end), case Peer of {pool, _} -> ar_mining_stats:h2_sent_to_peer(pool); _ -> ar_mining_stats:h2_sent_to_peer(Peer) end. add_mining_peer({Peer, StorageModules}, State) -> Partitions = lists:map( fun({PartitionID, _PartitionSize, PackingAddr, PackingDifficulty}) -> {PartitionID, PackingAddr, PackingDifficulty} end, StorageModules), ?LOG_INFO([{event, cm_peer_updated}, {peer, ar_util:format_peer(Peer)}, {partitions, io_lib:format("~p", [[{ID, ar_util:encode(Addr), PackingDifficulty} || {ID, Addr, PackingDifficulty} <- Partitions]])}]), PeersByPartition = lists:foldl( fun({PartitionID, PackingAddr, PackingDifficulty}, Acc) -> Items = maps:get(PartitionID, Acc, []), maps:put(PartitionID, [{Peer, PackingAddr, PackingDifficulty} | Items], Acc) end, State#state.peers_by_partition, Partitions ), State#state{ peers_by_partition = PeersByPartition }. remove_mining_peer(Peer, State) -> PeersByPartition = maps:fold( fun(PartitionID, Peers, Acc) -> Peers2 = [{Peer2, Addr, PackingDifficulty} || {Peer2, Addr, PackingDifficulty} <- Peers, Peer2 /= Peer], maps:put(PartitionID, Peers2, Acc) end, #{}, State#state.peers_by_partition ), State#state{ peers_by_partition = PeersByPartition }. refetch_peer_partitions(Peers) -> spawn(fun() -> try MapFun = fun(Peer) -> case ar_http_iface_client:get_cm_partition_table(Peer) of {ok, PartitionList} -> ar_coordination:update_peer(Peer, PartitionList); _ -> ok end end, ar_util:pmap(MapFun, Peers) catch throw:{pmap_timeout, _} -> ?LOG_WARNING([{event, pmap_timeout}, {module, ?MODULE}, {peers, Peers}]); ErrT:Other -> ?LOG_ERROR([{event, pmap_error}, {module, ?MODULE}, {peers, Peers}, {ErrT, Other}]) end, %% ar_util:pmap ensures we fetch all the local up-to-date CM peer partitions first, %% then share them with the Pool to fetch the complementary pool CM peer partitions. case {ar_pool:is_client(), ar_coordination:is_exit_peer()} of {true, true} -> refetch_pool_peer_partitions(); _ -> ok end end). refetch_pool_peer_partitions() -> gen_server:cast(?MODULE, refetch_pool_peer_partitions). get_unique_partitions_list() -> Set = get_unique_partitions_set(ar_mining_io:get_partitions(), sets:new()), lists:sort(sets:to_list(Set)). get_unique_partitions_set() -> get_unique_partitions_set(ar_mining_io:get_partitions(), sets:new()). get_unique_partitions_set([], UniquePartitions) -> UniquePartitions; get_unique_partitions_set([{PartitionID, MiningAddress, PackingDifficulty} | Partitions], UniquePartitions) -> get_unique_partitions_set( Partitions, sets:add_element(ar_serialize:partition_to_json_struct(PartitionID, ar_block:partition_size(), MiningAddress, PackingDifficulty), UniquePartitions) ); get_unique_partitions_set([{PartitionID, BucketSize, MiningAddress, PackingDifficulty} | Partitions], UniquePartitions) -> get_unique_partitions_set( Partitions, sets:add_element( ar_serialize:partition_to_json_struct(PartitionID, BucketSize, MiningAddress, PackingDifficulty), UniquePartitions) ). refetch_pool_peer_partitions(UniquePeerPartitions) -> spawn(fun() -> JSON = ar_serialize:jsonify(lists:sort(sets:to_list(UniquePeerPartitions))), PoolPeer = ar_pool:pool_peer(), case ar_http_iface_client:post_cm_partition_table_to_pool(PoolPeer, JSON) of {ok, PartitionList} -> ar_coordination:update_peer(PoolPeer, PartitionList); _ -> ok end end). ================================================ FILE: apps/arweave/src/ar_data_discovery.erl ================================================ -module(ar_data_discovery). -behaviour(gen_server). -export([start_link/0, get_bucket_peers/1, get_footprint_bucket_peers/1, collect_peers/0, pick_peers/2, report_bucket_stats/0]). -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2]). -include("ar.hrl"). -include("ar_data_discovery.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -record(state, { peer_queue, peers_pending, network_map, footprint_map, expiration_map }). %% The frequency of asking peers about their data. -ifdef(AR_TEST). -define(DATA_DISCOVERY_COLLECT_PEERS_FREQUENCY_MS, 2 * 1000). -else. -define(DATA_DISCOVERY_COLLECT_PEERS_FREQUENCY_MS, 4 * 60 * 1000). -endif. %% The frequency of logging bucket stats. -ifdef(AR_TEST). -define(REPORT_BUCKET_STATS_FREQUENCY_MS, 10 * 1000). -else. -define(REPORT_BUCKET_STATS_FREQUENCY_MS, 60 * 1000). -endif. %% The expiration time of peer's buckets. If a peer is found in the list of %% the first best ?DATA_DISCOVERY_COLLECT_PEERS_COUNT peers (checked every %% ?DATA_DISCOVERY_COLLECT_PEERS_FREQUENCY_MS milliseconds), the timer is refreshed. -define(PEER_EXPIRATION_TIME_MS, 60 * 60 * 1000). %% The maximum number of requests running at any time. -define(DATA_DISCOVERY_PARALLEL_PEER_REQUESTS, 10). %% The number of peers from the top of the rating to schedule for inclusion %% into the peer map every DATA_DISCOVERY_COLLECT_PEERS_FREQUENCY_MS milliseconds. -define(DATA_DISCOVERY_COLLECT_PEERS_COUNT, 1000). %%%=================================================================== %%% Public interface. %%%=================================================================== start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). %% @doc Return the list of ?QUERY_BEST_PEERS_COUNT peers who have at least one byte of %% data synced in the given Bucket of size ?NETWORK_DATA_BUCKET_SIZE. 80% of the peers %% are chosen from the 20% of peers with the biggest share in the given bucket. get_bucket_peers(Bucket) -> case ets:member(ar_peers, block_connections) of true -> []; false -> get_bucket_peers(Bucket, {Bucket, 0, no_peer}, []) end. get_bucket_peers(Bucket, Cursor, Peers) -> case ets:next(?MODULE, Cursor) of {Bucket, _Share, Peer} = Key -> get_bucket_peers(Bucket, Key, [Peer | Peers]); _ -> % matches `end_of_table` or an unexpected value ar_util:unique(Peers) end. %% @doc Return the list of ?QUERY_BEST_PEERS_COUNT peers who have at least one byte of %% data synced in the given footprint bucket of size ?NETWORK_FOOTPRINT_BUCKET_SIZE. %% 80% of the peers are chosen from the 20% of peers with the biggest share %% in the given bucket. get_footprint_bucket_peers(Bucket) -> case ets:member(ar_peers, block_connections) of true -> []; false -> get_footprint_bucket_peers(Bucket, {Bucket, 0, no_peer}, []) end. get_footprint_bucket_peers(Bucket, Cursor, Peers) -> case ets:next(ar_data_discovery_footprint_buckets, Cursor) of {Bucket, _Share, Peer} = Key -> get_footprint_bucket_peers(Bucket, Key, [Peer | Peers]); _ -> ar_util:unique(Peers) end. %% @doc Return a list of peers where 80% of the peers are randomly chosen %% from the first 20% of Peers and the other 20% of the peers are randomly %% chosen from the other 80% of Peers. pick_peers(Peers, N) -> pick_peers(Peers, length(Peers), N). %%%=================================================================== %%% Generic server callbacks. %%%=================================================================== init([]) -> {ok, _} = ar_timer:apply_interval( ?DATA_DISCOVERY_COLLECT_PEERS_FREQUENCY_MS, ?MODULE, collect_peers, [], #{ skip_on_shutdown => false } ), {ok, _} = ar_timer:apply_interval( ?REPORT_BUCKET_STATS_FREQUENCY_MS, ?MODULE, report_bucket_stats, [], #{ skip_on_shutdown => true } ), gen_server:cast(?MODULE, update_network_data_map), ok = ar_events:subscribe(peer), {ok, #state{ peer_queue = queue:new(), peers_pending = 0, network_map = #{}, footprint_map = #{}, expiration_map = #{} }}. handle_call(Request, _From, State) -> ?LOG_WARNING([{event, unhandled_call}, {request, Request}]), {reply, ok, State}. handle_cast({add_peer, Peer}, #state{ peer_queue = Queue } = State) -> {noreply, State#state{ peer_queue = queue:in(Peer, Queue) }}; handle_cast(update_network_data_map, #state{ peers_pending = N } = State) when N < ?DATA_DISCOVERY_PARALLEL_PEER_REQUESTS -> case queue:out(State#state.peer_queue) of {empty, _} -> ar_util:cast_after(200, ?MODULE, update_network_data_map), {noreply, State}; {{value, Peer}, Queue} -> monitor(process, spawn_link( fun() -> fetch_sync_buckets(Peer), fetch_footprint_buckets(Peer) end )), gen_server:cast(?MODULE, update_network_data_map), {noreply, State#state{ peers_pending = N + 1, peer_queue = Queue }} end; handle_cast(update_network_data_map, State) -> ar_util:cast_after(200, ?MODULE, update_network_data_map), {noreply, State}; handle_cast({add_peer_sync_buckets, Peer, SyncBuckets}, State) -> #state{ network_map = Map } = State, State2 = refresh_expiration_timer(Peer, State), Map2 = maps:put(Peer, SyncBuckets, Map), ar_sync_buckets:foreach( fun(Bucket, Share) -> ets:insert(?MODULE, {{Bucket, Share, Peer}}) end, ?NETWORK_DATA_BUCKET_SIZE, SyncBuckets ), {noreply, State2#state{ network_map = Map2 }}; handle_cast({add_peer_footprint_buckets, Peer, FootprintBuckets}, State) -> #state{ footprint_map = Map } = State, State2 = refresh_expiration_timer(Peer, State), Map2 = maps:put(Peer, FootprintBuckets, Map), ar_sync_buckets:foreach( fun(Bucket, Share) -> ets:insert(ar_data_discovery_footprint_buckets, {{Bucket, Share, Peer}}) end, ?NETWORK_FOOTPRINT_BUCKET_SIZE, FootprintBuckets ), {noreply, State2#state{ footprint_map = Map2 }}; handle_cast({remove_peer, Peer}, State) -> #state{ network_map = Map, footprint_map = FootprintMap, expiration_map = E } = State, Map2 = case maps:take(Peer, Map) of error -> Map; {SyncBuckets, Map3} -> ar_sync_buckets:foreach( fun(Bucket, Share) -> ets:delete(?MODULE, {Bucket, Share, Peer}) end, ?NETWORK_DATA_BUCKET_SIZE, SyncBuckets ), Map3 end, FootprintMap2 = case maps:take(Peer, FootprintMap) of error -> FootprintMap; {FootprintBuckets, Map4} -> ar_sync_buckets:foreach( fun(Bucket, Share) -> ets:delete(ar_data_discovery_footprint_buckets, {Bucket, Share, Peer}) end, ?NETWORK_FOOTPRINT_BUCKET_SIZE, FootprintBuckets ), Map4 end, E2 = maps:remove(Peer, E), {noreply, State#state{ network_map = Map2, footprint_map = FootprintMap2, expiration_map = E2 }}; handle_cast(Cast, State) -> ?LOG_WARNING([{event, unhandled_cast}, {cast, Cast}]), {noreply, State}. handle_info({'DOWN', _, process, _, _}, #state{ peers_pending = N } = State) -> {noreply, State#state{ peers_pending = N - 1 }}; handle_info({event, peer, {removed, Peer}}, State) -> gen_server:cast(?MODULE, {remove_peer, Peer}), {noreply, State}; handle_info({event, peer, _}, State) -> {noreply, State}; handle_info(Message, State) -> ?LOG_WARNING([{event, unhandled_info}, {message, Message}]), {noreply, State}. terminate(Reason, _State) -> ?LOG_INFO([{pid, self()},{callback, terminate},{reason, Reason}]), ok. %%%=================================================================== %%% Private functions. %%%=================================================================== pick_peers(Peers, PeerLen, N) when N >= PeerLen -> Peers; pick_peers([], _PeerLen, _N) -> []; pick_peers(_Peers, _PeerLen, N) when N =< 0 -> []; pick_peers(Peers, PeerLen, N) -> %% N: the target number of peers to pick %% Best: top 20% of the Peers list %% Other: the rest of the Peers list {Best, Other} = lists:split(max(PeerLen div 5, 1), Peers), %% TakeBest: Select 80% of N worth of Best - or all of Best if Best is short. TakeBest = max((8 * N) div 10, 1), Part1 = ar_util:pick_random(Best, min(length(Best), TakeBest)), %% TakeOther: rather than strictly take 20% of N, take enough to ensure we're %% getting the full N of picked peers. TakeOther = N - length(Part1), Part2 = ar_util:pick_random(Other, min(length(Other), TakeOther)), Part1 ++ Part2. collect_peers() -> N = ?DATA_DISCOVERY_COLLECT_PEERS_COUNT, {ok, Config} = arweave_config:get_env(), Peers = case Config#config.sync_from_local_peers_only of true -> Config#config.local_peers; false -> %% rank peers by current rating since we care about their recent throughput performance ar_peers:get_peers(current) end, collect_peers(lists:sublist(Peers, N)). collect_peers([Peer | Peers]) -> gen_server:cast(?MODULE, {add_peer, Peer}), collect_peers(Peers); collect_peers([]) -> ok. %% @doc Log bucket statistics for each configured storage module. report_bucket_stats() -> StartTime = erlang:monotonic_time(millisecond), {ok, Config} = arweave_config:get_env(), StorageModules = Config#config.storage_modules, lists:foreach( fun(Module) -> StoreID = ar_storage_module:id(Module), {RangeStart, RangeEnd} = ar_storage_module:get_range(StoreID), report_bucket_stats(StoreID, RangeStart, RangeEnd, normal), report_bucket_stats(StoreID, RangeStart, RangeEnd, footprint) end, StorageModules ), ElapsedMs = erlang:monotonic_time(millisecond) - StartTime, ?LOG_DEBUG([{event, bucket_stats_complete}, {elapsed_ms, ElapsedMs}]). report_bucket_stats(StoreID, RangeStart, RangeEnd, normal) -> StartBucket = RangeStart div ?NETWORK_DATA_BUCKET_SIZE, EndBucket = (RangeEnd - 1) div ?NETWORK_DATA_BUCKET_SIZE, TotalBuckets = EndBucket - StartBucket + 1, {AllPeers, ZeroCount, HealthyCount} = bucket_stats(StartBucket, EndBucket, ?MODULE, sets:new()), set_bucket_stats_metrics(StoreID, normal, AllPeers, TotalBuckets, ZeroCount, HealthyCount); report_bucket_stats(StoreID, RangeStart, RangeEnd, footprint) -> StartBucket = ar_footprint_record:get_footprint_bucket(RangeStart + ?DATA_CHUNK_SIZE), EndBucket = ar_footprint_record:get_footprint_bucket(RangeEnd), TotalBuckets = max(0, EndBucket - StartBucket + 1), {AllPeers, ZeroCount, HealthyCount} = bucket_stats(StartBucket, EndBucket, ar_data_discovery_footprint_buckets, sets:new()), set_bucket_stats_metrics(StoreID, footprint, AllPeers, TotalBuckets, ZeroCount, HealthyCount). bucket_stats(StartBucket, EndBucket, _Table, AllPeers) when StartBucket > EndBucket -> {AllPeers, 0, 0}; bucket_stats(StartBucket, EndBucket, Table, AllPeers) -> bucket_stats(StartBucket, EndBucket, Table, AllPeers, 0, 0). bucket_stats(Bucket, EndBucket, _Table, AllPeers, ZeroCount, HealthyCount) when Bucket > EndBucket -> {AllPeers, ZeroCount, HealthyCount}; bucket_stats(Bucket, EndBucket, Table, AllPeers, ZeroCount, HealthyCount) -> {BucketPeers, AllPeers2} = get_bucket_peers_and_collect(Bucket, Table, AllPeers), PeerCount = length(BucketPeers), {ZeroCount2, HealthyCount2} = case PeerCount of 0 -> {ZeroCount + 1, HealthyCount}; N when N >= 3 -> {ZeroCount, HealthyCount + 1}; _ -> {ZeroCount, HealthyCount} end, bucket_stats(Bucket + 1, EndBucket, Table, AllPeers2, ZeroCount2, HealthyCount2). get_bucket_peers_and_collect(Bucket, Table, AllPeers) -> get_bucket_peers_and_collect(Bucket, Table, {Bucket, 0, no_peer}, [], AllPeers). get_bucket_peers_and_collect(Bucket, Table, Cursor, BucketPeers, AllPeers) -> case ets:next(Table, Cursor) of {Bucket, _Share, Peer} = Key -> get_bucket_peers_and_collect(Bucket, Table, Key, [Peer | BucketPeers], sets:add_element(Peer, AllPeers)); _ -> {ar_util:unique(BucketPeers), AllPeers} end. set_bucket_stats_metrics(StoreID, Type, AllPeers, TotalBuckets, ZeroCount, HealthyCount) -> NumPeers = sets:size(AllPeers), StoreIDLabel = ar_storage_module:label(StoreID), prometheus_gauge:set(data_discovery, [Type, StoreIDLabel, num_peers], NumPeers), prometheus_gauge:set(data_discovery, [Type, StoreIDLabel, total_buckets], TotalBuckets), prometheus_gauge:set(data_discovery, [Type, StoreIDLabel, zero_peer_count], ZeroCount), prometheus_gauge:set(data_discovery, [Type, StoreIDLabel, healthy_peer_count], HealthyCount). fetch_sync_buckets(Peer) -> case ar_http_iface_client:get_sync_buckets(Peer) of {ok, SyncBuckets} -> gen_server:cast(?MODULE, {add_peer_sync_buckets, Peer, SyncBuckets}); {error, request_type_not_found} -> ?LOG_DEBUG([{event, sync_buckets_request_type_not_found}, {peer, ar_util:format_peer(Peer)}]); {error, Reason} -> ar_http_iface_client:log_failed_request(Reason, [{event, failed_to_fetch_sync_buckets}, {peer, ar_util:format_peer(Peer)}, {reason, io_lib:format("~p", [Reason])}]); Error -> ?LOG_DEBUG([{event, failed_to_fetch_sync_buckets}, {peer, ar_util:format_peer(Peer)}, {reason, io_lib:format("~p", [Error])}]) end. fetch_footprint_buckets(Peer) -> case ar_peers:get_peer_release(Peer) >= ?GET_FOOTPRINT_SUPPORT_RELEASE of true -> fetch_footprint_buckets2(Peer); false -> ok end. fetch_footprint_buckets2(Peer) -> case ar_http_iface_client:get_footprint_buckets(Peer) of {ok, SyncBuckets} -> gen_server:cast(?MODULE, {add_peer_footprint_buckets, Peer, SyncBuckets}); {error, request_type_not_found} -> ?LOG_DEBUG([{event, footprint_buckets_request_type_not_found}, {peer, ar_util:format_peer(Peer)}]); {error, Reason} -> ar_http_iface_client:log_failed_request(Reason, [{event, failed_to_fetch_footprint_buckets}, {peer, ar_util:format_peer(Peer)}, {reason, io_lib:format("~p", [Reason])}]); Error -> ?LOG_DEBUG([{event, failed_to_fetch_footprint_buckets}, {peer, ar_util:format_peer(Peer)}, {reason, io_lib:format("~p", [Error])}]) end. refresh_expiration_timer(Peer, State) -> #state{ expiration_map = Map } = State, case maps:get(Peer, Map, not_found) of not_found -> ok; Timer -> timer:cancel(Timer) end, Timer2 = ar_util:cast_after(?PEER_EXPIRATION_TIME_MS, ?MODULE, {remove_peer, Peer}), State#state{ expiration_map = maps:put(Peer, Timer2, Map) }. ================================================ FILE: apps/arweave/src/ar_data_doctor.erl ================================================ -module(ar_data_doctor). -export([main/0, main/1]). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("arweave/include/ar_chunk_storage.hrl"). -include_lib("arweave/include/ar_consensus.hrl"). main() -> main([]). main([]) -> help(), init:stop(1); main(Args) -> logger:set_handler_config(default, level, error), Command = hd(Args), Success = case Command of "merge" -> ar_doctor_merge:main(tl(Args)); "bench" -> ar_doctor_bench:main(tl(Args)); "dump" -> ar_doctor_dump:main(tl(Args)); "inspect" -> ar_doctor_inspect:main(tl(Args)); _ -> false end, case Success of true -> init:stop(0); _ -> help(), init:stop(1) end. help() -> ar:console("~n"), ar_doctor_merge:help(), ar:console("~n"), ar_doctor_bench:help(), ar:console("~n"), ar_doctor_dump:help(), ar:console("~n"), ar_doctor_inspect:help(), ar:console("~n"). ================================================ FILE: apps/arweave/src/ar_data_root_sync.erl ================================================ -module(ar_data_root_sync). -behaviour(gen_server). -export([start_link/1, name/1, store_data_roots/4, store_data_roots_sync/4, validate_data_roots/4]). -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2]). -include("ar.hrl"). -include("ar_data_sync.hrl"). -include("ar_sup.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -record(state, { store_id, range_start, range_end, scan_cursor }). -define(DATA_ROOTS_SYNC_RELEASE_NUMBER, 91). %% How long we wait before (re-)scanning our range for unsynced data roots. -ifdef(AR_TEST). -define(DATA_ROOTS_SYNC_SCAN_INTERVAL_MS, 2000). -else. -define(DATA_ROOTS_SYNC_SCAN_INTERVAL_MS, 600_000). % 10 minutes. -endif. %%%=================================================================== %%% Public interface. %%%=================================================================== start_link(StoreID) -> Name = name(StoreID), gen_server:start_link({local, Name}, ?MODULE, [StoreID], []). name(StoreID) -> list_to_atom("ar_data_root_sync_" ++ ar_storage_module:label(StoreID)). %% @doc Store the given data roots. store_data_roots(BlockStart, BlockEnd, TXRoot, Entries) -> gen_server:cast(ar_data_sync_default, {store_data_roots, BlockStart, BlockEnd, TXRoot, Entries}). %% @doc Store the given data roots synchronously. store_data_roots_sync(BlockStart, BlockEnd, TXRoot, Entries) -> gen_server:call(ar_data_sync_default, {store_data_roots_sync, BlockStart, BlockEnd, TXRoot, Entries}, 120000). %% @doc Validate the given data roots against the local block index. %% Also recompute the TXRoot from entries and verify Merkle paths. validate_data_roots(TXRoot, BlockSize, Entries, Offset) -> {BlockStart, BlockEnd, ExpectedTXRoot} = ar_block_index:get_block_bounds(Offset), CheckBlockBounds = case Offset >= BlockStart andalso Offset < BlockEnd of false -> {error, invalid_block_bounds}; true -> ok end, CheckBlockSize = case CheckBlockBounds of ok -> case BlockSize == BlockEnd - BlockStart of false -> {error, invalid_block_size}; true -> ok end; Error -> Error end, PrepareDataRootPairs = case CheckBlockSize of ok -> prepare_data_root_pairs(Entries, BlockStart, BlockSize); Error2 -> Error2 end, ValidateTXRoot = case PrepareDataRootPairs of {ok, Triplets} -> case TXRoot == ExpectedTXRoot of false -> {error, invalid_tx_root}; true -> {ok, Triplets} end; {error, _} = Error3 -> Error3 end, case ValidateTXRoot of {ok, Triplets2} -> case verify_tx_paths(Triplets2, TXRoot, BlockStart, BlockEnd, 0) of ok -> {ok, {TXRoot, BlockSize, Entries}}; Error4 -> Error4 end; Error5 -> Error5 end. %%%=================================================================== %%% Generic server callbacks. %%%=================================================================== init([StoreID]) -> {RangeStart, RangeEnd} = ar_storage_module:get_range(StoreID), gen_server:cast(self(), sync), {ok, #state{ store_id = StoreID, range_start = RangeStart, range_end = RangeEnd, scan_cursor = RangeStart }}. handle_cast(sync, State) -> case ar_node:is_joined() of false -> ar_util:cast_after(500, self(), sync), {noreply, State}; true -> {ok, Config} = arweave_config:get_env(), {Delay, State2} = case Config#config.enable_data_roots_syncing of true -> sync_block_data_roots(State); false -> {?DATA_ROOTS_SYNC_SCAN_INTERVAL_MS, State} end, ar_util:cast_after(Delay, self(), sync), {noreply, State2} end; handle_cast(Cast, State) -> ?LOG_WARNING([{event, unhandled_cast}, {module, ?MODULE}, {cast, Cast}]), {noreply, State}. handle_info(Message, State) -> ?LOG_WARNING([{event, unhandled_info}, {module, ?MODULE}, {message, Message}]), {noreply, State}. handle_call(Request, _From, State) -> ?LOG_WARNING([{event, unhandled_call}, {module, ?MODULE}, {request, Request}]), {reply, ignored, State}. terminate(_Reason, _State) -> ok. %%%=================================================================== %%% Private functions. %%%=================================================================== sync_block_data_roots(#state{ store_id = StoreID, range_start = RangeStart, range_end = RangeEnd, scan_cursor = Cursor } = State) -> End = min(RangeEnd, ar_data_sync:get_disk_pool_threshold()), {ok, Cursor2} = sync_block_data_roots(StoreID, Cursor, End), {Delay, Cursor3} = case Cursor2 >= End of true -> {?DATA_ROOTS_SYNC_SCAN_INTERVAL_MS, RangeStart}; false -> {0, Cursor2} end, {Delay, State#state{ scan_cursor = Cursor3 }}. sync_block_data_roots(_StoreID, Cursor, RangeEnd) when Cursor >= RangeEnd -> {ok, Cursor}; sync_block_data_roots(StoreID, Cursor, RangeEnd) -> {BlockStart, BlockEnd, TXRoot} = ar_block_index:get_block_bounds(Cursor), Cursor2 = case BlockStart >= RangeEnd of true -> RangeEnd; false -> case ar_data_sync:are_data_roots_synced(BlockStart, BlockEnd, TXRoot) of true -> BlockEnd; false -> maybe_fetch_and_store(BlockStart, BlockEnd), BlockEnd end end, sync_block_data_roots(StoreID, Cursor2, RangeEnd). maybe_fetch_and_store(BlockStart, BlockEnd) -> Peers = ar_peers:get_peers(current), Peers2 = lists:filter( fun(Peer) -> ar_peers:get_peer_release(Peer) >= ?DATA_ROOTS_SYNC_RELEASE_NUMBER end, Peers ), case fetch_data_roots_from_peers(Peers2, BlockStart) of {ok, {TXRoot, BlockSize, Entries}} -> BlockSize = BlockEnd - BlockStart, store_data_roots(BlockStart, BlockEnd, TXRoot, Entries); _ -> ok end. fetch_data_roots_from_peers([], _Offset) -> {error, not_found}; fetch_data_roots_from_peers([Peer | Rest], Offset) -> case ar_http_iface_client:get_data_roots(Peer, Offset) of {ok, _} = Reply -> Reply; {error, Error} -> ?LOG_DEBUG([{event, fetch_data_roots_from_peers_error}, {peer, Peer}, {offset, Offset}, {error, io_lib:format("~p", [Error])}]), fetch_data_roots_from_peers(Rest, Offset) end. prepare_data_root_pairs(Entries, BlockStart, BlockSize) -> Result = lists:foldr( fun (_, {error, _} = Error) -> Error; ({_DataRoot, 0, _TXStartOffset, _TXPath}, _Acc) -> {error, invalid_zero_tx_size}; ({DataRoot, TXSize, TXStartOffset, TXPath}, {ok, {Total, Acc}}) -> MerkleLabel = TXStartOffset + TXSize - BlockStart, case MerkleLabel >= 0 of true -> PaddedSize = get_padded_size(TXSize, BlockStart), {ok, {Total + PaddedSize, [{DataRoot, MerkleLabel, TXPath} | Acc]}}; false -> {error, invalid_entry_merkle_label} end end, {ok, {0,[]}}, Entries ), case Result of {ok, {Total, Entries2}} -> case Total == BlockSize of true -> {ok, Entries2}; false -> {error, invalid_total_tx_size} end; Error6 -> Error6 end. get_padded_size(TXSize, BlockStart) -> case BlockStart >= ar_block:strict_data_split_threshold() of true -> ar_poa:get_padded_offset(TXSize, 0); false -> TXSize end. verify_tx_paths([], _TXRoot, _BlockStart, _BlockEnd, _TXStartOffset) -> ok; verify_tx_paths([Entry | Entries], TXRoot, BlockStart, BlockEnd, TXStartOffset) -> {DataRoot, TXEndOffset, TXPath} = Entry, BlockSize = BlockEnd - BlockStart, case ar_merkle:validate_path(TXRoot, TXEndOffset - 1, BlockSize, TXPath) of false -> {error, invalid_tx_path}; {DataRoot, TXStartOffset, TXEndOffset} -> PaddedEndOffset = get_padded_size(TXEndOffset, BlockStart), verify_tx_paths(Entries, TXRoot, BlockStart, BlockEnd, PaddedEndOffset); _ -> {error, invalid_tx_path} end. ================================================ FILE: apps/arweave/src/ar_data_root_sync_sup.erl ================================================ -module(ar_data_root_sync_sup). -behaviour(supervisor). -export([start_link/0]). -export([init/1]). %% internal -export([register_workers/0]). -include("ar_sup.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). %%%=================================================================== %%% Public interface. %%%=================================================================== start_link() -> supervisor:start_link({local, ?MODULE}, ?MODULE, []). %% =================================================================== %% Supervisor callbacks. %% =================================================================== init([]) -> Workers = register_workers(), {ok, {{one_for_one, 5, 10}, Workers}}. register_workers() -> {ok, Config} = arweave_config:get_env(), lists:map( fun(StorageModule) -> StoreID = ar_storage_module:id(StorageModule), Name = ar_data_root_sync:name(StoreID), ?CHILD_WITH_ARGS(ar_data_root_sync, worker, Name, [StoreID]) end, Config#config.storage_modules ). ================================================ FILE: apps/arweave/src/ar_data_sync.erl ================================================ -module(ar_data_sync). -behaviour(gen_server). -export([name/1, start_link/2, register_workers/0, join/1, add_tip_block/2, add_block/2, invalidate_bad_data_record/4, is_chunk_proof_ratio_attractive/3, add_data_root_to_disk_pool/3, maybe_drop_data_root_from_disk_pool/3, get_chunk/2, get_chunk_data/2, get_chunk_proof/2, get_tx_data/1, get_tx_data/2, get_tx_offset/1, get_tx_offset_data_in_range/2, has_data_root/2, request_tx_data_removal/3, request_data_removal/4, record_disk_pool_chunks_count/0, record_chunk_cache_size_metric/0, is_chunk_cache_full/0, is_disk_space_sufficient/1, get_chunk_by_byte/2, advance_chunks_index_cursor/1, read_chunk/3, write_chunk/5, read_data_path/2, increment_chunk_cache_size/0, decrement_chunk_cache_size/0, get_chunk_metadata_range/3, get_merkle_rebase_threshold/0, is_footprint_record_supported/3, get_data_roots_for_offset/1, are_data_roots_synced/3, get_disk_pool_threshold/0]). -export([add_chunk_to_disk_pool/5]). -export([debug_get_disk_pool_chunks/0]). %% For data-doctor tools -export([init_kv/1]). -export([init/1, handle_cast/2, handle_call/3, handle_info/2, terminate/2]). -export([enqueue_intervals/3, remove_expired_disk_pool_data_roots/0]). -include("ar.hrl"). -include("ar_sup.hrl"). -include("ar_consensus.hrl"). -include("ar_poa.hrl"). -include("ar_data_discovery.hrl"). -include("ar_data_sync.hrl"). -include("ar_sync_buckets.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -ifdef(AR_TEST). -include_lib("eunit/include/eunit.hrl"). -endif. %% The key for storing migration cursor in the migrations_index database. -define(FOOTPRINT_MIGRATION_CURSOR_KEY, <<"footprint_migration_cursor">>). -ifdef(AR_TEST). -define(COLLECT_SYNC_INTERVALS_FREQUENCY_MS, 1_000). -else. -define(COLLECT_SYNC_INTERVALS_FREQUENCY_MS, 10_000). -endif. -ifdef(AR_TEST). -define(DEVICE_LOCK_WAIT, 100). -else. -define(DEVICE_LOCK_WAIT, 5_000). -endif. %% The number of chunks to migrate per batch during footprint migration. -ifdef(AR_TEST). -define(FOOTPRINT_MIGRATION_BATCH_SIZE, 10). -else. -define(FOOTPRINT_MIGRATION_BATCH_SIZE, 200). -endif. %%%=================================================================== %%% Public interface. %%%=================================================================== name(StoreID) -> list_to_atom("ar_data_sync_" ++ ar_storage_module:label(StoreID)). start_link(Name, Args) -> gen_server:start_link({local, Name}, ?MODULE, Args, []). %% @doc Register the workers that will be monitored by ar_data_sync_sup.erl. register_workers() -> {ok, Config} = arweave_config:get_env(), StorageModuleWorkers = lists:map( fun(StorageModule) -> StoreID = ar_storage_module:id(StorageModule), StoreLabel = ar_storage_module:label(StoreID), Name = list_to_atom("ar_data_sync_" ++ StoreLabel), ?CHILD_WITH_ARGS(ar_data_sync, worker, Name, [Name, {StoreID, none}]) end, Config#config.storage_modules ), DefaultStorageModuleWorker = ?CHILD_WITH_ARGS(ar_data_sync, worker, ar_data_sync_default, [ar_data_sync_default, {?DEFAULT_MODULE, none}]), RepackInPlaceWorkers = lists:map( fun({StorageModule, TargetPacking}) -> StoreID = ar_storage_module:id(StorageModule), Name = ar_data_sync:name(StoreID), ?CHILD_WITH_ARGS(ar_data_sync, worker, Name, [Name, {StoreID, TargetPacking}]) end, Config#config.repack_in_place_storage_modules ), StorageModuleWorkers ++ [DefaultStorageModuleWorker] ++ RepackInPlaceWorkers. %% @doc Notify the server the node has joined the network on the given block index. join(RecentBI) -> gen_server:cast(ar_data_sync_default, {join, RecentBI}). %% @doc Notify the server about the new tip block. add_tip_block(BlockTXPairs, RecentBI) -> gen_server:cast(ar_data_sync_default, {add_tip_block, BlockTXPairs, RecentBI}). invalidate_bad_data_record(AbsoluteEndOffset, ChunkSize, StoreID, Case) -> gen_server:cast(name(StoreID), {invalidate_bad_data_record, {AbsoluteEndOffset, ChunkSize, StoreID, Case}}). %% @doc The condition which is true if the chunk is too small compared to the proof. %% Small chunks make syncing slower and increase space amplification. A small chunk %% is accepted if it is the last chunk of the corresponding transaction - such chunks %% may be produced by ar_tx:chunk_binary/1, the legacy splitting method used to split %% v1 data or determine the data root of a v2 tx when data is uploaded via the data field. %% Due to the block limit we can only get up to 1k such chunks per block. is_chunk_proof_ratio_attractive(ChunkSize, TXSize, DataPath) -> DataPathSize = byte_size(DataPath), case DataPathSize of 0 -> false; _ -> case catch ar_merkle:extract_note(DataPath) of {'EXIT', _} -> false; Offset -> Offset == TXSize orelse DataPathSize =< ChunkSize end end. %% @doc Store the given chunk if the proof is valid. %% Called when a chunk is pushed to the node via POST /chunk. %% The chunk is placed in the disk pool. The periodic process %% scanning the disk pool will later record it as synced. %% The item is removed from the disk pool when the chunk's offset %% drops below the disk pool threshold. add_chunk_to_disk_pool(DataRoot, DataPath, Chunk, Offset, TXSize) -> DataRootIndex = {data_root_index, ?DEFAULT_MODULE}, [{_, DiskPoolSize}] = ets:lookup(ar_data_sync_state, disk_pool_size), DiskPoolChunksIndex = {disk_pool_chunks_index, ?DEFAULT_MODULE}, DataRootKey = << DataRoot/binary, TXSize:?OFFSET_KEY_BITSIZE >>, DataRootOffsetReply = get_data_root_offset(DataRootKey, ?DEFAULT_MODULE), DataRootInDiskPool = ets:lookup(ar_disk_pool_data_roots, DataRootKey), ChunkSize = byte_size(Chunk), {ok, Config} = arweave_config:get_env(), DataRootLimit = Config#config.max_disk_pool_data_root_buffer_mb * ?MiB, DiskPoolLimit = Config#config.max_disk_pool_buffer_mb * ?MiB, CheckDiskPool = case {DataRootOffsetReply, DataRootInDiskPool} of {not_found, []} -> ?LOG_INFO([{event, failed_to_add_chunk_to_disk_pool}, {reason, data_root_not_found}, {offset, Offset}, {data_root, ar_util:encode(DataRoot)}]), {error, data_root_not_found}; {not_found, [{_, {Size, Timestamp, TXIDSet}}]} -> case Size + ChunkSize > DataRootLimit orelse DiskPoolSize + ChunkSize > DiskPoolLimit of true -> ?LOG_INFO([{event, failed_to_add_chunk_to_disk_pool}, {reason, exceeds_disk_pool_size_limit1}, {offset, Offset}, {data_root_size, Size}, {chunk_size, ChunkSize}, {data_root_limit, DataRootLimit}, {disk_pool_size, DiskPoolSize}, {disk_pool_limit, DiskPoolLimit}]), {error, exceeds_disk_pool_size_limit}; false -> {ok, {Size + ChunkSize, Timestamp, TXIDSet}} end; _ -> case DiskPoolSize + ChunkSize > DiskPoolLimit of true -> ?LOG_INFO([{event, failed_to_add_chunk_to_disk_pool}, {reason, exceeds_disk_pool_size_limit2}, {offset, Offset}, {chunk_size, ChunkSize}, {disk_pool_size, DiskPoolSize}, {disk_pool_limit, DiskPoolLimit}]), {error, exceeds_disk_pool_size_limit}; false -> Timestamp = case DataRootInDiskPool of [] -> os:system_time(microsecond); [{_, {_, Timestamp2, _}}] -> Timestamp2 end, {ok, {ChunkSize, Timestamp, not_set}} end end, ValidateProof = case CheckDiskPool of {error, _} = Error -> Error; {ok, DiskPoolDataRootValue} -> case validate_data_path(DataRoot, Offset, TXSize, DataPath, Chunk) of false -> ?LOG_INFO([{event, failed_to_add_chunk_to_disk_pool}, {reason, invalid_proof}, {offset, Offset}]), {error, invalid_proof}; {true, PassesBase, PassesStrict, PassesRebase, EndOffset} -> {ok, {EndOffset, PassesBase, PassesStrict, PassesRebase, DiskPoolDataRootValue}} end end, CheckSynced = case ValidateProof of {error, _} = Error2 -> Error2; {ok, {EndOffset2, _PassesBase2, _PassesStrict2, _PassesRebase2, {_, Timestamp3, _}} = PassedState2} -> DataPathHash = crypto:hash(sha256, DataPath), DiskPoolChunkKey = << Timestamp3:256, DataPathHash/binary >>, case ar_kv:get(DiskPoolChunksIndex, DiskPoolChunkKey) of {ok, _DiskPoolChunk} -> %% The chunk is already in disk pool. {synced_disk_pool, EndOffset2}; not_found -> case DataRootOffsetReply of not_found -> {ok, {DataPathHash, DiskPoolChunkKey, PassedState2}}; {ok, {TXStartOffset, _}} -> {ok, Config} = arweave_config:get_env(), case chunk_offsets_synced(DataRootIndex, DataRootKey, %% The same data may be uploaded several times. %% Here we only accept the chunk if any of the %% last configured number of instances of this %% data is not filled in yet. EndOffset2, TXStartOffset, Config#config.max_duplicate_data_roots) of true -> synced; false -> {ok, {DataPathHash, DiskPoolChunkKey, PassedState2}} end end; {error, Reason} -> ?LOG_WARNING([{event, failed_to_read_chunk_from_disk_pool}, {reason, io_lib:format("~p", [Reason])}, {data_path_hash, ar_util:encode(DataPathHash)}, {data_root, ar_util:encode(DataRoot)}, {relative_offset, EndOffset2}]), {error, failed_to_store_chunk} end end, case CheckSynced of synced -> ok; {synced_disk_pool, EndOffset4} -> case is_estimated_long_term_chunk(DataRootOffsetReply, EndOffset4) of false -> temporary; true -> ok end; {error, _} = Error4 -> Error4; {ok, {DataPathHash2, DiskPoolChunkKey2, {EndOffset3, PassesBase3, PassesStrict3, PassesRebase3, DiskPoolDataRootValue2}}} -> ChunkDataKey = get_chunk_data_key(DataPathHash2), case put_chunk_data(ChunkDataKey, ?DEFAULT_MODULE, {Chunk, DataPath}) of {error, Reason2} -> ?LOG_WARNING([{event, failed_to_store_chunk_in_disk_pool}, {reason, io_lib:format("~p", [Reason2])}, {data_path_hash, ar_util:encode(DataPathHash2)}, {data_root, ar_util:encode(DataRoot)}, {relative_offset, EndOffset3}]), {error, failed_to_store_chunk}; ok -> DiskPoolChunkValue = term_to_binary({EndOffset3, ChunkSize, DataRoot, TXSize, ChunkDataKey, PassesBase3, PassesStrict3, PassesRebase3}), case ar_kv:put(DiskPoolChunksIndex, DiskPoolChunkKey2, DiskPoolChunkValue) of {error, Reason3} -> ?LOG_WARNING([{event, failed_to_record_chunk_in_disk_pool}, {reason, io_lib:format("~p", [Reason3])}, {data_path_hash, ar_util:encode(DataPathHash2)}, {data_root, ar_util:encode(DataRoot)}, {relative_offset, EndOffset3}]), {error, failed_to_store_chunk}; ok -> ets:insert(ar_disk_pool_data_roots, {DataRootKey, DiskPoolDataRootValue2}), ets:update_counter(ar_data_sync_state, disk_pool_size, {2, ChunkSize}), prometheus_gauge:inc(pending_chunks_size, ChunkSize), case is_estimated_long_term_chunk(DataRootOffsetReply, EndOffset3) of false -> temporary; true -> ok end end end end. %% @doc Store the given value in the chunk data DB. -spec put_chunk_data( ChunkDataKey :: binary(), StoreID :: term(), Value :: DataPath :: binary() | {Chunk :: binary(), DataPath :: binary()}) -> ok | {error, term()}. put_chunk_data(ChunkDataKey, StoreID, Value) -> ar_kv:put({chunk_data_db, StoreID}, ChunkDataKey, term_to_binary(Value)). get_chunk_data(ChunkDataKey, StoreID) -> ar_kv:get({chunk_data_db, StoreID}, ChunkDataKey). delete_chunk_data(ChunkDataKey, StoreID) -> ar_kv:delete({chunk_data_db, StoreID}, ChunkDataKey). -spec put_chunk_metadata( AbsoluteEndOffset :: non_neg_integer(), StoreID :: term(), Metadata :: term()) -> ok | {error, term()}. put_chunk_metadata(AbsoluteEndOffset, StoreID, {_ChunkDataKey, _TXRoot, _DataRoot, _TXPath, _Offset, _ChunkSize} = Metadata) -> Key = << AbsoluteEndOffset:?OFFSET_KEY_BITSIZE >>, ar_kv:put({chunks_index, StoreID}, Key, term_to_binary(Metadata)). get_chunk_metadata(AbsoluteEndOffset, StoreID) -> case ar_kv:get({chunks_index, StoreID}, << AbsoluteEndOffset:?OFFSET_KEY_BITSIZE >>) of {ok, Value} -> {ok, binary_to_term(Value, [safe])}; not_found -> not_found end. delete_chunk_metadata(AbsoluteEndOffset, StoreID) -> ar_kv:delete({chunks_index, StoreID}, << AbsoluteEndOffset:?OFFSET_KEY_BITSIZE >>). %% @doc Return {ok, Map} | {error, Error} where %% Map is %% AbsoluteEndOffset => {ChunkDataKey, TXRoot, DataRoot, TXPath, RelativeOffset, ChunkSize} %% map with all the chunk metadata found within the given range AbsoluteEndOffset >= Start, %% AbsoluteEndOffset =< End. Return the empty map if no metadata is found. get_chunk_metadata_range(Start, End, StoreID) -> case ar_kv:get_range({chunks_index, StoreID}, << Start:?OFFSET_KEY_BITSIZE >>, << End:?OFFSET_KEY_BITSIZE >>) of {ok, Map} -> {ok, maps:fold( fun(K, V, Acc) -> << Offset:?OFFSET_KEY_BITSIZE >> = K, maps:put(Offset, binary_to_term(V, [safe]), Acc) end, #{}, Map)}; Error -> Error end. delete_chunk_metadata_range(Start, End, State) -> #sync_data_state{ chunks_index = ChunksIndex } = State, ar_kv:delete_range(ChunksIndex, << (Start + 1):?OFFSET_KEY_BITSIZE >>, << (End + 1):?OFFSET_KEY_BITSIZE >>). %% @doc Return true if we expect the chunk with the given data root index value and %% relative end offset to end up in one of the configured storage modules. is_estimated_long_term_chunk(DataRootOffsetReply, EndOffset) -> WeaveSize = ar_node:get_current_weave_size(), case DataRootOffsetReply of not_found -> %% A chunk from a pending transaction. is_offset_vicinity_covered(WeaveSize); {ok, {TXStartOffset, _}} -> WeaveSize = ar_node:get_current_weave_size(), Size = ar_node:get_recent_max_block_size(), AbsoluteEndOffset = TXStartOffset + EndOffset, case AbsoluteEndOffset > WeaveSize - Size * 4 of true -> %% A relatively recent offset - do not expect this chunk to be %% persisted unless we have some storage modules configured for %% the space ahead (the data may be rearranged during after a reorg). is_offset_vicinity_covered(AbsoluteEndOffset); false -> ar_storage_module:has_any(AbsoluteEndOffset) end end. is_offset_vicinity_covered(Offset) -> Size = ar_node:get_recent_max_block_size(), ar_storage_module:has_range(max(0, Offset - Size * 2), Offset + Size * 2). %% @doc Notify the server about the new pending data root (added to mempool). %% The server may accept pending chunks and store them in the disk pool. add_data_root_to_disk_pool(_, 0, _) -> ok; add_data_root_to_disk_pool(DataRoot, _, _) when byte_size(DataRoot) < 32 -> ok; add_data_root_to_disk_pool(DataRoot, TXSize, TXID) -> Key = << DataRoot:32/binary, TXSize:?OFFSET_KEY_BITSIZE >>, case ets:lookup(ar_disk_pool_data_roots, Key) of [] -> ets:insert(ar_disk_pool_data_roots, {Key, {0, os:system_time(microsecond), sets:from_list([TXID])}}); [{_, {_, _, not_set}}] -> ok; [{_, {Size, Timestamp, TXIDSet}}] -> ets:insert(ar_disk_pool_data_roots, {Key, {Size, Timestamp, sets:add_element(TXID, TXIDSet)}}) end, ok. %% @doc Notify the server the given data root has been removed from the mempool. maybe_drop_data_root_from_disk_pool(_, 0, _) -> ok; maybe_drop_data_root_from_disk_pool(DataRoot, _, _) when byte_size(DataRoot) < 32 -> ok; maybe_drop_data_root_from_disk_pool(DataRoot, TXSize, TXID) -> Key = << DataRoot:32/binary, TXSize:?OFFSET_KEY_BITSIZE >>, case ets:lookup(ar_disk_pool_data_roots, Key) of [] -> ok; [{_, {_, _, not_set}}] -> ok; [{_, {Size, Timestamp, TXIDs}}] -> case sets:subtract(TXIDs, sets:from_list([TXID])) of TXIDs -> ok; TXIDs2 -> case sets:size(TXIDs2) of 0 -> ets:delete(ar_disk_pool_data_roots, Key); _ -> ets:insert(ar_disk_pool_data_roots, {Key, {Size, Timestamp, TXIDs2}}) end end end, ok. %% @doc Fetch the chunk corresponding to Offset. When Offset is less than or equal to %% the strict split data threshold, the chunk returned contains the byte with the given %% Offset (the indexing is 1-based). Otherwise, the chunk returned ends in the same 256 KiB %% bucket as Offset counting from the first 256 KiB after the strict split data threshold. %% The strict split data threshold is weave_size of the block preceding the fork 2.5 block. %% %% Options: %% _________________________________________________________________________________________ %% packing | required; spora_2_5 or unpacked or {spora_2_6, } %% or {composite, , } %% or {replica_2_9, } %% _________________________________________________________________________________________ %% pack | if false and a packed chunk is requested but stored unpacked or %% | an unpacked chunk is requested but stored packed, return %% | {error, chunk_not_found} instead of packing/unpacking; %% | true by default; %% _________________________________________________________________________________________ %% bucket_based_offset | does not play a role for the offsets before %% | strict_data_split_threshold (weave_size of the block preceding %% | the fork 2.5 block); if true, return the chunk which ends in %% | the same 256 KiB bucket starting from %% | strict_data_split_threshold where borders belong to the %% | buckets on the left; true by default. get_chunk(Offset, #{ packing := Packing } = Options) -> Pack = maps:get(pack, Options, true), RequestOrigin = maps:get(origin, Options, unknown), IsRecorded = case {RequestOrigin, Pack} of {miner, _} -> StorageModules = ar_storage_module:get_all(Offset), ar_sync_record:is_recorded_any(Offset, ar_data_sync, StorageModules); {_, false} -> ar_sync_record:is_recorded(Offset, {ar_data_sync, Packing}); {_, true} -> ar_sync_record:is_recorded(Offset, ar_data_sync) end, SeekOffset = case maps:get(bucket_based_offset, Options, true) of true -> ar_chunk_storage:get_chunk_seek_offset(Offset); false -> Offset end, case IsRecorded of {{true, StoredPacking}, StoreID} -> get_chunk(Offset, SeekOffset, Pack, Packing, StoredPacking, StoreID, RequestOrigin); {true, StoreID} -> UnpackedReply = ar_sync_record:is_recorded(Offset, {ar_data_sync, unpacked}), log_chunk_error(RequestOrigin, chunk_record_not_associated_with_packing, [{store_id, StoreID}, {seek_offset, SeekOffset}, {is_recorded_unpacked, io_lib:format("~p", [UnpackedReply])}]), {error, chunk_not_found}; Reply -> UnpackedReply = ar_sync_record:is_recorded(Offset, {ar_data_sync, unpacked}), Modules = ar_storage_module:get_all(Offset), ModuleIDs = [ar_storage_module:id(Module) || Module <- Modules], RootRecords = [ets:lookup(sync_records, {ar_data_sync, ID}) || ID <- ModuleIDs], case RequestOrigin of miner -> log_chunk_error(RequestOrigin, chunk_record_not_found, [{modules_covering_offset, ModuleIDs}, {root_sync_records, RootRecords}, {seek_offset, SeekOffset}, {reply, io_lib:format("~p", [Reply])}, {is_recorded_unpacked, io_lib:format("~p", [UnpackedReply])}]); _ -> ok end, {error, chunk_not_found} end. %% @doc Fetch the merkle proofs for the chunk corresponding to Offset. get_chunk_proof(Offset, Options) -> RequestOrigin = maps:get(origin, Options, unknown), IsRecorded = ar_sync_record:is_recorded(Offset, ar_data_sync), SeekOffset = case maps:get(bucket_based_offset, Options, true) of true -> ar_chunk_storage:get_chunk_seek_offset(Offset); false -> Offset end, case IsRecorded of {{true, StoredPacking}, StoreID} -> get_chunk_proof(Offset, SeekOffset, StoredPacking, StoreID, RequestOrigin); _ -> {error, chunk_not_found} end. %% @doc Fetch the transaction data. Return {error, tx_data_too_big} if %% the size is bigger than ?MAX_SERVED_TX_DATA_SIZE, unless the limitation %% is disabled in the configuration. get_tx_data(TXID) -> {ok, Config} = arweave_config:get_env(), SizeLimit = case lists:member(serve_tx_data_without_limits, Config#config.enable) of true -> infinity; false -> ?MAX_SERVED_TX_DATA_SIZE end, get_tx_data(TXID, SizeLimit). %% @doc Fetch the transaction data. Return {error, tx_data_too_big} if %% the size is bigger than SizeLimit. get_tx_data(TXID, SizeLimit) -> case get_tx_offset(TXID) of {error, not_found} -> {error, not_found}; {error, failed_to_read_tx_offset} -> {error, failed_to_read_tx_data}; {ok, {Offset, Size}} -> case Size > SizeLimit of true -> {error, tx_data_too_big}; false -> {ok, Config} = arweave_config:get_env(), Pack = lists:member(pack_served_chunks, Config#config.enable), get_tx_data(Offset - Size, Offset, [], Pack) end end. %% @doc Return the global end offset and size for the given transaction. get_tx_offset(TXID) -> TXIndex = {tx_index, ?DEFAULT_MODULE}, get_tx_offset(TXIndex, TXID). %% @doc Return {ok, [{TXID, AbsoluteStartOffset, AbsoluteEndOffset}, ...]} %% where AbsoluteStartOffset, AbsoluteEndOffset are transaction borders %% (not clipped by the given range) for all TXIDs intersecting the given range. get_tx_offset_data_in_range(Start, End) -> TXIndex = {tx_index, ?DEFAULT_MODULE}, TXOffsetIndex = {tx_offset_index, ?DEFAULT_MODULE}, get_tx_offset_data_in_range(TXOffsetIndex, TXIndex, Start, End). %% @doc Return true if the given {DataRoot, DataSize} is in the mempool %% or in the index. has_data_root(DataRoot, DataSize) -> DataRootKey = << DataRoot:32/binary, DataSize:256 >>, case ets:member(ar_disk_pool_data_roots, DataRootKey) of true -> true; false -> case get_data_root_offset(DataRootKey, ?DEFAULT_MODULE) of {ok, _} -> true; _ -> false end end. %% @doc Record the metadata of the given block. add_block(B, SizeTaggedTXs) -> gen_server:call(ar_data_sync_default, {add_block, B, SizeTaggedTXs}, ?DEFAULT_CALL_TIMEOUT). %% @doc Request the removal of the transaction data. request_tx_data_removal(TXID, Ref, ReplyTo) -> TXIndex = {tx_index, ?DEFAULT_MODULE}, case ar_kv:get(TXIndex, TXID) of {ok, Value} -> {End, Size} = binary_to_term(Value, [safe]), remove_range(End - Size, End, Ref, ReplyTo); not_found -> ?LOG_WARNING([{event, tx_offset_not_found}, {tx, ar_util:encode(TXID)}]), ok; {error, Reason} -> ?LOG_ERROR([{event, failed_to_fetch_blacklisted_tx_offset}, {tx, ar_util:encode(TXID)}, {reason, Reason}]), ok end. %% @doc Request the removal of the given byte range. request_data_removal(Start, End, Ref, ReplyTo) -> remove_range(Start, End, Ref, ReplyTo). %% @doc Return true if the in-memory data chunk cache is full. Return not_initialized %% if there is no information yet. is_chunk_cache_full() -> case ets:lookup(ar_data_sync_state, chunk_cache_size_limit) of [{_, Limit}] -> case ets:lookup(ar_data_sync_state, chunk_cache_size) of [{_, Size}] when Size > Limit -> true; _ -> false end; _ -> not_initialized end. -ifdef(AR_TEST). is_disk_space_sufficient(StoreID) -> %% When testing, disk space is always sufficient *unless* the storage module has not %% been properly initialized. case is_disk_space_sufficient2(StoreID) of not_initialized -> not_initialized; _ -> true end. -else. %% @doc Return true if we have sufficient disk space to write new data for the %% given StoreID. Return not_initialized if there is no information yet. is_disk_space_sufficient(StoreID) -> is_disk_space_sufficient2(StoreID). -endif. %% @doc Return true if we have sufficient disk space to write new data for the %% given StoreID. Return not_initialized if there is no information yet. is_disk_space_sufficient2(StoreID) -> case ets:lookup(ar_data_sync_state, {is_disk_space_sufficient, StoreID}) of [{_, false}] -> false; [{_, true}] -> true; _ -> not_initialized end. get_chunk_by_byte(Byte, StoreID) -> Result = ar_kv:get_next_by_prefix({chunks_index, StoreID}, ?OFFSET_KEY_PREFIX_BITSIZE, ?OFFSET_KEY_BITSIZE, << Byte:?OFFSET_KEY_BITSIZE >>), case Result of {error, Reason} -> {error, Reason}; {ok, Key, Metadata} -> << AbsoluteEndOffset:?OFFSET_KEY_BITSIZE >> = Key, { ChunkDataKey, TXRoot, DataRoot, TXPath, RelativeOffset, ChunkSize } = binary_to_term(Metadata, [safe]), FullMetaData = {AbsoluteEndOffset, ChunkDataKey, TXRoot, DataRoot, TXPath, RelativeOffset, ChunkSize}, {ok, Key, FullMetaData} end. %% @doc: handle situation where get_chunks_by_byte returns invalid_iterator, so we can't %% use the chunk's end offset to advance the cursor. %% %% get_chunk_by_byte looks for a key with the same prefix or the next prefix. %% Therefore, if there is no such key, it does not make sense to look for any %% key smaller than the prefix + 2 in the next iteration. advance_chunks_index_cursor(Cursor) -> PrefixSpaceSize = trunc(math:pow(2, ?OFFSET_KEY_BITSIZE - ?OFFSET_KEY_PREFIX_BITSIZE)), ((Cursor div PrefixSpaceSize) + 2) * PrefixSpaceSize. read_chunk(Offset, ChunkDataKey, StoreID) -> case get_chunk_data(ChunkDataKey, StoreID) of not_found -> not_found; {ok, Value} -> case binary_to_term(Value, [safe]) of {Chunk, DataPath} -> {ok, {Chunk, DataPath}}; DataPath -> case ar_chunk_storage:get(Offset - 1, StoreID) of not_found -> not_found; {_EndOffset, Chunk} -> {ok, {Chunk, DataPath}} end end; Error -> Error end. write_chunk(Offset, ChunkMetadata, Chunk, Packing, StoreID) -> #chunk_metadata{ chunk_data_key = ChunkDataKey, chunk_size = ChunkSize, data_path = DataPath } = ChunkMetadata, write_chunk(Offset, ChunkDataKey, Chunk, ChunkSize, DataPath, Packing, StoreID). read_data_path(ChunkDataKey, StoreID) -> read_data_path(undefined, ChunkDataKey, StoreID). %% The first argument is introduced to match the read_chunk/3 signature. read_data_path(_Offset, ChunkDataKey, StoreID) -> case get_chunk_data(ChunkDataKey, StoreID) of not_found -> not_found; {ok, Value} -> case binary_to_term(Value, [safe]) of {_Chunk, DataPath} -> {ok, DataPath}; DataPath -> {ok, DataPath} end; Error -> Error end. decrement_chunk_cache_size() -> ets:update_counter(ar_data_sync_state, chunk_cache_size, {2, -1}, {chunk_cache_size, 0}). increment_chunk_cache_size() -> ets:update_counter(ar_data_sync_state, chunk_cache_size, {2, 1}, {chunk_cache_size, 1}). debug_get_disk_pool_chunks() -> debug_get_disk_pool_chunks(first). debug_get_disk_pool_chunks(Cursor) -> case ar_kv:get_next({disk_pool_chunks_index, ?DEFAULT_MODULE}, Cursor) of none -> []; {ok, K, V} -> K2 = << K/binary, <<"a">>/binary >>, [{K, V} | debug_get_disk_pool_chunks(K2)] end. %% @doc Check if the footprint record should be updated for the given chunk. %% We maintain the footprint record for all chunks so that footprint-based syncing %% correctly identifies already-synced chunks. Note: in the early weave (before the %% strict data split threshold), multiple small chunks may map to the same bucket, %% making the footprint record imprecise. This is acceptable since footprint syncing %% is primarily for replica 2.9 data and small chunks can use "normal" syncing. is_footprint_record_supported(_AbsoluteOffset, _ChunkSize, _Packing) -> true. %% @doc Return the disk pool threshold, a byte offset where %% the disk pool begins - the data above this offset is considered %% to belong to the disk pool. For example, we do not store the %% disk pool data in the storage modules due to the risk of orphans. get_disk_pool_threshold() -> case ets:lookup(ar_data_sync_state, disk_pool_threshold) of [] -> 0; [{_, DiskPoolThreshold}] -> DiskPoolThreshold end. %%%=================================================================== %%% Generic server callbacks. %%%=================================================================== init({?DEFAULT_MODULE = StoreID, _}) -> %% Trap exit to avoid corrupting any open files on quit.. process_flag(trap_exit, true), {ok, Config} = arweave_config:get_env(), [ok, ok] = ar_events:subscribe([node_state, disksup]), State = init_kv(StoreID), move_disk_pool_index(State), move_data_root_index(State), {ok, _} = ar_timer:apply_interval( ?RECORD_DISK_POOL_CHUNKS_COUNT_FREQUENCY_MS, ar_data_sync, record_disk_pool_chunks_count, [], #{ skip_on_shutdown => false } ), StateMap = read_data_sync_state(), CurrentBI = maps:get(block_index, StateMap), %% Maintain a map of pending, recently uploaded, and orphaned data roots. %% << DataRoot:32/binary, TXSize:256 >> => {Size, Timestamp, TXIDSet}. %% %% Unconfirmed chunks can be accepted only after their data roots end up in this set. %% New chunks for these data roots are accepted until the corresponding size reaches %% #config.max_disk_pool_data_root_buffer_mb or the total size of added pending and %% seeded chunks reaches #config.max_disk_pool_buffer_mb. When a data root is orphaned, %% its timestamp is refreshed so that the chunks have chance to be reincluded later. %% After a data root expires, the corresponding chunks are removed from %% disk_pool_chunks_index and if they do not belong to any storage module - from storage. %% TXIDSet keeps track of pending transaction identifiers - if all pending transactions %% with the << DataRoot:32/binary, TXSize:256 >> key are dropped from the mempool, %% the corresponding entry is removed from DiskPoolDataRoots. When a data root is %% confirmed, TXIDSet is set to not_set - from this point on, the key is only dropped %% after expiration. DiskPoolDataRoots = maps:get(disk_pool_data_roots, StateMap), recalculate_disk_pool_size(DiskPoolDataRoots, State), DiskPoolThreshold = maps:get(disk_pool_threshold, StateMap), ets:insert(ar_data_sync_state, {disk_pool_threshold, DiskPoolThreshold}), State2 = State#sync_data_state{ block_index = CurrentBI, weave_size = maps:get(weave_size, StateMap), disk_pool_cursor = first, disk_pool_threshold = DiskPoolThreshold, store_id = StoreID, sync_status = init_sync_status(StoreID) }, ?LOG_INFO([{event, ar_data_sync_start}, {store_id, StoreID}, {range_start, State2#sync_data_state.range_start}, {range_end, State2#sync_data_state.range_end}]), {ok, _} = ar_timer:apply_interval( ?REMOVE_EXPIRED_DATA_ROOTS_FREQUENCY_MS, ?MODULE, remove_expired_disk_pool_data_roots, [], #{ skip_on_shutdown => false } ), lists:foreach( fun(_DiskPoolJobNumber) -> gen_server:cast(self(), process_disk_pool_item) end, lists:seq(1, Config#config.disk_pool_jobs) ), gen_server:cast(self(), store_sync_state), {ok, Config} = arweave_config:get_env(), Limit = case Config#config.data_cache_size_limit of undefined -> Free = proplists:get_value(free_memory, memsup:get_system_memory_data(), 2000000000), Limit2 = min(1000, erlang:ceil(Free * 0.9 / 3 / 262144)), Limit3 = ar_util:ceil_int(Limit2, 100), Limit3; Limit2 -> Limit2 end, ar:console("~nSetting the data chunk cache size limit to ~B chunks.~n", [Limit]), ets:insert(ar_data_sync_state, {chunk_cache_size_limit, Limit}), ets:insert(ar_data_sync_state, {chunk_cache_size, 0}), {ok, _} = ar_timer:apply_interval( 200, ?MODULE, record_chunk_cache_size_metric, [], #{ skip_on_shutdown => false } ), gen_server:cast(self(), process_store_chunk_queue), {ok, State2}; init({StoreID, RepackInPlacePacking}) -> ?LOG_INFO([{event, ar_data_sync_start}, {store_id, StoreID}]), %% Trap exit to avoid corrupting any open files on quit.. process_flag(trap_exit, true), [ok, ok] = ar_events:subscribe([node_state, disksup]), State = init_kv(StoreID), {RangeStart, RangeEnd} = ar_storage_module:get_range(StoreID), RangeStart2 = max(0, ar_block:get_chunk_padded_offset(RangeStart) - ?DATA_CHUNK_SIZE), RangeEnd2 = ar_block:get_chunk_padded_offset(RangeEnd), State2 = State#sync_data_state{ store_id = StoreID, range_start = RangeStart2, range_end = RangeEnd2, %% weave_size and disk_pool_threshold will be set on join weave_size = 0, disk_pool_threshold = 0 }, case RepackInPlacePacking of none -> gen_server:cast(self(), process_store_chunk_queue), State3 = State2#sync_data_state{ sync_status = init_sync_status(StoreID) }, %% Start syncing immediately. For replica_2_9 packing, chunks will be %% written as unpacked_padded first and upgraded once entropy arrives. gen_server:cast(self(), sync_intervals), gen_server:cast(self(), sync_data), maybe_run_footprint_record_initialization(State3), {ok, State3}; _ -> State3 = State2#sync_data_state{ sync_status = off }, ar_device_lock:set_device_lock_metric(StoreID, sync, off), {ok, State3} end. handle_cast({move_data_root_index, Cursor, N}, State) -> move_data_root_index(Cursor, N, State), {noreply, State}; handle_cast({store_data_roots, BlockStart, BlockEnd, TXRoot, Entries}, State) -> {_, State2} = handle_store_data_roots(BlockStart, BlockEnd, TXRoot, Entries, State), {noreply, State2}; handle_cast(process_store_chunk_queue, State) -> ar_util:cast_after(200, self(), process_store_chunk_queue), {noreply, process_store_chunk_queue(State)}; handle_cast({initialize_footprint_record, Cursor, Packing}, State) -> State2 = initialize_footprint_record(Cursor, Packing, State), {noreply, State2}; handle_cast({join, RecentBI}, State) -> #sync_data_state{ block_index = CurrentBI, store_id = StoreID } = State, [{_, WeaveSize, _} | _] = RecentBI, case {CurrentBI, ar_block_index:get_intersection(CurrentBI)} of {[], _} -> ok; {_, no_intersection} -> io:format("~nWARNING: the stored block index of the data syncing module " "has no intersection with the new one " "in the most recent blocks. If you have just started a new weave using " "the init option, restart from the local state " "or specify some peers.~n~n"), init:stop(1); {_, {_H, Offset, _TXRoot}} -> PreviousWeaveSize = element(2, hd(CurrentBI)), {ok, OrphanedDataRoots} = remove_orphaned_data(State, Offset, PreviousWeaveSize), {ok, Config} = arweave_config:get_env(), [gen_server:cast(name(ar_storage_module:id(Module)), {cut, Offset}) || Module <- Config#config.storage_modules], ok = ar_chunk_storage:cut(Offset, StoreID), ok = ar_sync_record:cut(Offset, ar_data_sync, StoreID), ar_events:send(sync_record, {global_cut, Offset}), reset_orphaned_data_roots_disk_pool_timestamps(OrphanedDataRoots) end, BI = ar_block_index:get_list_by_hash(element(1, lists:last(RecentBI))), repair_data_root_offset_index(BI, State), DiskPoolThreshold = get_disk_pool_threshold(RecentBI), ets:insert(ar_data_sync_state, {disk_pool_threshold, DiskPoolThreshold}), State2 = store_sync_state( State#sync_data_state{ weave_size = WeaveSize, block_index = RecentBI, disk_pool_threshold = DiskPoolThreshold }), {noreply, State2}; handle_cast({cut, Start}, #sync_data_state{ store_id = StoreID, range_end = End } = State) -> case ar_sync_record:get_next_synced_interval(Start, End, ar_data_sync, StoreID) of not_found -> ok; _Interval -> {ok, Config} = arweave_config:get_env(), case lists:member(remove_orphaned_storage_module_data, Config#config.enable) of false -> ar:console("The storage module ~s contains some orphaned data above the " "weave offset ~B. Make sure you are joining the network through " "trusted in-sync peers and restart with " "`enable remove_orphaned_storage_module_data`.~n", [StoreID, Start]), timer:sleep(2000), init:stop(1); true -> ok = delete_chunk_metadata_range(Start, End, State), ok = ar_chunk_storage:cut(Start, StoreID), ok = ar_sync_record:cut(Start, ar_data_sync, StoreID) end end, {noreply, State}; handle_cast({add_tip_block, BlockTXPairs, BI}, State) -> #sync_data_state{ store_id = StoreID, weave_size = CurrentWeaveSize, block_index = CurrentBI } = State, {BlockStartOffset, Blocks} = pick_missing_blocks(CurrentBI, BlockTXPairs), {ok, OrphanedDataRoots} = remove_orphaned_data(State, BlockStartOffset, CurrentWeaveSize), {WeaveSize, AddedDataRoots} = lists:foldl( fun ({_BH, []}, Acc) -> Acc; ({_BH, SizeTaggedTXs}, {StartOffset, CurrentAddedDataRoots}) -> {ok, DataRoots} = add_block_data_roots(SizeTaggedTXs, StartOffset, StoreID), ok = update_tx_index(SizeTaggedTXs, StartOffset, StoreID), {StartOffset + element(2, lists:last(SizeTaggedTXs)), sets:union(CurrentAddedDataRoots, DataRoots)} end, {BlockStartOffset, sets:new()}, Blocks ), add_block_data_roots_to_disk_pool(AddedDataRoots), reset_orphaned_data_roots_disk_pool_timestamps(OrphanedDataRoots), ok = ar_chunk_storage:cut(BlockStartOffset, StoreID), ok = ar_sync_record:cut(BlockStartOffset, ar_data_sync, StoreID), ar_events:send(sync_record, {global_cut, BlockStartOffset}), DiskPoolThreshold = get_disk_pool_threshold(BI), ets:insert(ar_data_sync_state, {disk_pool_threshold, DiskPoolThreshold}), State2 = store_sync_state( State#sync_data_state{ weave_size = WeaveSize, block_index = BI, disk_pool_threshold = DiskPoolThreshold }), {noreply, State2}; handle_cast(sync_data, State) -> #sync_data_state{ store_id = StoreID } = State, Status = ar_device_lock:acquire_lock(sync, StoreID, State#sync_data_state.sync_status), State2 = State#sync_data_state{ sync_status = Status }, State3 = case Status of active -> do_sync_data(State2); paused -> ar_util:cast_after(?DEVICE_LOCK_WAIT, self(), sync_data), State2; _ -> State2 end, {noreply, State3}; handle_cast(sync_data2, State) -> #sync_data_state{ store_id = StoreID } = State, Status = ar_device_lock:acquire_lock(sync, StoreID, State#sync_data_state.sync_status), State2 = State#sync_data_state{ sync_status = Status }, State3 = case Status of active -> do_sync_data2(State2); paused -> ar_util:cast_after(?DEVICE_LOCK_WAIT, self(), sync_data2), State2; _ -> State2 end, {noreply, State3}; %% Schedule syncing of the unsynced intervals. Choose a peer for each of the intervals. %% There are two message payloads: %% 1. collect_peer_intervals %% Start the collection process over the full storage_module range. %% 2. {collect_peer_intervals, Start, End} %% Collect intervals for the specified range. This interface is used to pick up where %% we left off after a pause. There are 2 main conditions that can trigger a pause: %% a. Insufficient disk space. Will pause until disk space frees up %% b. Sync queue is busy. Will pause until previously queued intervals are scheduled to the %% ar_data_sync_coordinator for syncing. handle_cast(collect_peer_intervals, State) -> #sync_data_state{ range_start = Start, range_end = End, disk_pool_threshold = DiskPoolThreshold, sync_phase = SyncPhase, migrations_index = MI, store_id = StoreID, sync_intervals_queue = Q } = State, CheckIsJoined = case ar_node:is_joined() of false -> ar_util:cast_after(1000, self(), collect_peer_intervals), false; true -> true end, IsFootprintRecordMigrated = case ar_kv:get(MI, ?FOOTPRINT_MIGRATION_CURSOR_KEY) of {ok, <<"complete">>} -> true; _ -> ar_util:cast_after(5_000, self(), collect_peer_intervals), false end, IntersectsDiskPool = case CheckIsJoined andalso IsFootprintRecordMigrated of false -> noop; true -> End > DiskPoolThreshold end, %% Alternate between "normal" and footprint-based syncing. %% Footprint-based syncing downloads replica 2.9 chunks footprint by footprint %% to avoid redundant entropy generations for unpacking. "Normal" syncing ignores %% replica 2.9 data and mostly downloads unpacked data from peers storing it. SyncPhase2 = case SyncPhase of undefined -> %% Start with normal syncing. normal; normal -> footprint; footprint -> normal end, ?LOG_DEBUG([{event, collect_peer_intervals_start}, {function, collect_peer_intervals}, {store_id, StoreID}, {s, Start}, {e, End}, {queue_size, gb_sets:size(Q)}, {is_joined, CheckIsJoined}, {is_footprint_record_migrated, IsFootprintRecordMigrated}, {intersects_disk_pool, IntersectsDiskPool}, {sync_phase, SyncPhase2}]), State2 = case IntersectsDiskPool of noop -> State; true -> case SyncPhase2 of footprint -> End2 = min(End, DiskPoolThreshold), gen_server:cast(self(), {collect_peer_intervals, Start, Start, End2, footprint}), State#sync_data_state{ sync_phase = footprint }; _ -> %% The disk pool is only synced during the "normal" phase. gen_server:cast(self(), {collect_peer_intervals, Start, Start, End, normal}), State#sync_data_state{ sync_phase = normal } end; false -> gen_server:cast(self(), {collect_peer_intervals, Start, Start, End, SyncPhase2}), State#sync_data_state{ sync_phase = SyncPhase2 } end, {noreply, State2}; handle_cast({collect_peer_intervals, Offset, Start, End, Type}, State) when Offset >= End -> %% We've finished collecting intervals for the whole storage_module range. Schedule %% the collection process to restart in ?COLLECT_SYNC_INTERVALS_FREQUENCY_MS. ?LOG_DEBUG([{event, collect_peer_intervals_done}, {function, collect_peer_intervals}, {store_id, State#sync_data_state.store_id}, {offset, Offset}, {s, Start}, {e, End}, {type, Type}]), ar_util:cast_after(?COLLECT_SYNC_INTERVALS_FREQUENCY_MS, self(), collect_peer_intervals), {noreply, State}; handle_cast({collect_peer_intervals, Offset, Start, End, Type}, State) -> #sync_data_state{ sync_intervals_queue = Q, store_id = StoreID, weave_size = WeaveSize } = State, IsDiskSpaceSufficient = case is_disk_space_sufficient(StoreID) of true -> true; IsSufficient -> Delay = case IsSufficient of false -> 30_000; not_initialized -> 1000 end, ar_util:cast_after(Delay, self(), {collect_peer_intervals, Offset, Start, End, Type}), false end, IsSyncQueueBusy = case IsDiskSpaceSufficient of false -> true; true -> %% Q contains chunks we've already queued for syncing. We need %% to manage the queue length. %% 1. Periodically sync_intervals will pull from Q and send work to %% ar_data_sync_coordinator. We need to make sure Q is long enough so %% that we never starve ar_data_sync_coordinator of work. %% 2. On the flip side we don't want Q to get so long as to trigger an %% out-of-memory condition. In the extreme case we could collect and %% enqueue all chunks in the entire storage module (usually 3.6 TB). %% A Q of this length would have a roughly 500 MB memory footprint per %% storage module. For a node that is syncing multiple storage modules, %% this can add up fast. %% 3. We also want to make sure we are using the most up to date information %% we can. Every time we add a task to the Q we're locking in a specific %% view of Peer data availability. If that peer goes offline before we %% get to the task it can result in wasted work or syncing stalls. A %% shorter queue helps ensure we're always syncing from the "best" peers %% at any point in time. %% %% With all that in mind, we'll pause collection once the Q hits roughly %% a bucket size worth of chunks. This number is slightly arbitrary and we %% should feel free to adjust as necessary. IntervalsQueueSize = gb_sets:size(Q), StoreIDLabel = ar_storage_module:label(StoreID), prometheus_gauge:set(sync_intervals_queue_size, [StoreIDLabel], IntervalsQueueSize), case IntervalsQueueSize > (?NETWORK_DATA_BUCKET_SIZE / ?DATA_CHUNK_SIZE) of true -> ar_util:cast_after(500, self(), {collect_peer_intervals, Offset, Start, End, Type}), true; false -> false end end, case IsSyncQueueBusy of true -> ?LOG_DEBUG([{event, collect_peer_intervals_skipped}, {function, collect_peer_intervals}, {store_id, StoreID}, {offset, Offset}, {s, Start}, {e, End}, {weave_size, WeaveSize}, {is_disk_space_sufficient, IsDiskSpaceSufficient}, {is_sync_queue_busy, IsSyncQueueBusy}]), ok; false -> End2 = min(End, WeaveSize), case Offset >= End2 of true -> ar_util:cast_after(500, self(), {collect_peer_intervals, Offset, Start, End, Type}); false -> %% All checks have passed, find and enqueue intervals for one %% sync bucket worth of chunks starting at offset Start. ar_peer_intervals:fetch(Offset, Start, End2, StoreID, Type) end end, {noreply, State}; handle_cast({enqueue_intervals, []}, State) -> {noreply, State}; handle_cast({enqueue_intervals, Intervals}, State) -> #sync_data_state{ sync_intervals_queue = Q, sync_intervals_queue_intervals = QIntervals } = State, %% When enqueuing intervals, we want to distribute the intervals among many peers, %% so that: %% 1. We can better saturate our network bandwidth without overwhelming any one peer. %% 2. So that we limit the risk of blocking on one particularly slow peer. %% %% We do a probabilistic distribution: %% 1. We shuffle the peers list so that the ordering differs from call to call %% 2. We cap the number of chunks to enqueue per peer - at roughly 50% more than %% their "fair" share (i.e. ?DEFAULT_SYNC_BUCKET_SIZE / NumPeers). %% %% The compute overhead of these 2 steps is minimal and results in a pretty good %% distribution of sync requests among peers. %% This is an approximation. The intent is to enqueue one sync_bucket at a time - but %% due to the selection of each peer's intervals, the total number of bytes may be %% less than a full sync_bucket. But for the purposes of distributing requests among %% many peers - the approximation is fine (and much cheaper to calculate than taking %% the sum of all the peer intervals). TotalChunksToEnqueue = ?DEFAULT_SYNC_BUCKET_SIZE div ?DATA_CHUNK_SIZE, NumPeers = length(Intervals), %% Allow each Peer to sync slightly more chunks than their strict share - this allows %% us to more reliably sync the full set of requested intervals. ScalingFactor = 1.5, ChunksPerPeer = trunc(((TotalChunksToEnqueue + NumPeers - 1) div NumPeers) * ScalingFactor), {Q2, QIntervals2} = enqueue_intervals( ar_util:shuffle_list(Intervals), ChunksPerPeer, {Q, QIntervals}), ?LOG_DEBUG([{event, enqueue_intervals}, {pid, self()}, {queue_before, gb_sets:size(Q)}, {queue_after, gb_sets:size(Q2)}, {num_peers, NumPeers}, {chunks_per_peer, ChunksPerPeer}, {q_intervals_before, ar_intervals:sum(QIntervals)}, {q_intervals_after, ar_intervals:sum(QIntervals2)}]), {noreply, State#sync_data_state{ sync_intervals_queue = Q2, sync_intervals_queue_intervals = QIntervals2 }}; handle_cast(sync_intervals, State) -> #sync_data_state{ store_id = StoreID } = State, Status = ar_device_lock:acquire_lock(sync, StoreID, State#sync_data_state.sync_status), State2 = State#sync_data_state{ sync_status = Status }, State3 = case Status of active -> do_sync_intervals(State2); paused -> ar_util:cast_after(?DEVICE_LOCK_WAIT, self(), sync_intervals), State2; _ -> State2 end, {noreply, State3}; handle_cast({invalidate_bad_data_record, Args}, State) -> invalidate_bad_data_record(Args), {noreply, State}; handle_cast({pack_and_store_chunk, Args} = Cast, #sync_data_state{ store_id = StoreID } = State) -> case is_disk_space_sufficient(StoreID) of true -> pack_and_store_chunk(Args, State); _ -> ar_util:cast_after(30000, self(), Cast), {noreply, State} end; handle_cast({store_fetched_chunk, Peer, Byte, Proof} = Cast, State) -> {store_fetched_chunk, Peer, Byte, Proof} = Cast, #{ data_path := DataPath, tx_path := TXPath, chunk := Chunk, packing := Packing } = Proof, SeekByte = ar_chunk_storage:get_chunk_seek_offset(Byte + 1) - 1, case validate_proof(SeekByte, Proof) of {need_unpacking, AbsoluteEndOffset, ChunkProof2} -> #chunk_proof{ block_start_offset = BlockStartOffset, tx_start_offset = TXStartOffset, tx_end_offset = TXEndOffset, chunk_end_offset = ChunkEndOffset, chunk_id = ChunkID, metadata = #chunk_metadata{ tx_root = TXRoot, data_root = DataRoot, chunk_size = ChunkSize } } = ChunkProof2, TXSize = TXEndOffset - TXStartOffset, AbsoluteTXStartOffset = BlockStartOffset + TXStartOffset, ChunkArgs = {Packing, Chunk, AbsoluteEndOffset, TXRoot, ChunkSize}, Args = {AbsoluteTXStartOffset, TXSize, DataPath, TXPath, DataRoot, Chunk, ChunkID, ChunkEndOffset, Peer, Byte}, unpack_fetched_chunk(Cast, AbsoluteEndOffset, ChunkArgs, Args, State); false -> decrement_chunk_cache_size(), process_invalid_fetched_chunk(Peer, Byte, State); {true, ChunkProof2} -> #chunk_proof{ block_start_offset = BlockStartOffset, tx_start_offset = TXStartOffset, tx_end_offset = TXEndOffset, chunk_end_offset = ChunkEndOffset, chunk_id = ChunkID, metadata = #chunk_metadata{ tx_root = TXRoot, data_root = DataRoot, chunk_size = ChunkSize } } = ChunkProof2, TXSize = TXEndOffset - TXStartOffset, AbsoluteTXStartOffset = BlockStartOffset + TXStartOffset, AbsoluteEndOffset = AbsoluteTXStartOffset + ChunkEndOffset, ChunkArgs = {unpacked, Chunk, AbsoluteEndOffset, TXRoot, ChunkSize}, Args = {AbsoluteTXStartOffset, TXSize, DataPath, TXPath, DataRoot, Chunk, ChunkID, ChunkEndOffset, Peer, Byte}, process_valid_fetched_chunk(ChunkArgs, Args, State) end; handle_cast(process_disk_pool_item, #sync_data_state{ disk_pool_scan_pause = true } = State) -> ar_util:cast_after(?DISK_POOL_SCAN_DELAY_MS, self(), process_disk_pool_item), {noreply, State}; handle_cast(process_disk_pool_item, State) -> #sync_data_state{ disk_pool_cursor = Cursor, disk_pool_chunks_index = DiskPoolChunksIndex, disk_pool_full_scan_start_key = FullScanStartKey, disk_pool_full_scan_start_timestamp = Timestamp, currently_processed_disk_pool_keys = CurrentlyProcessedDiskPoolKeys } = State, NextKey = case ar_kv:get_next(DiskPoolChunksIndex, Cursor) of {ok, Key1, Value1} -> case sets:is_element(Key1, CurrentlyProcessedDiskPoolKeys) of true -> none; false -> {ok, Key1, Value1} end; none -> case ar_kv:get_next(DiskPoolChunksIndex, first) of none -> none; {ok, Key2, Value2} -> case sets:is_element(Key2, CurrentlyProcessedDiskPoolKeys) of true -> none; false -> {ok, Key2, Value2} end end end, case NextKey of none -> ar_util:cast_after(?DISK_POOL_SCAN_DELAY_MS, self(), resume_disk_pool_scan), ar_util:cast_after(?DISK_POOL_SCAN_DELAY_MS, self(), process_disk_pool_item), {noreply, State#sync_data_state{ disk_pool_cursor = first, disk_pool_full_scan_start_key = none, disk_pool_scan_pause = true }}; {ok, Key3, Value3} -> case FullScanStartKey of none -> process_disk_pool_item(State#sync_data_state{ disk_pool_full_scan_start_key = Key3, disk_pool_full_scan_start_timestamp = erlang:timestamp() }, Key3, Value3); Key3 -> TimePassed = timer:now_diff(erlang:timestamp(), Timestamp), case TimePassed < (?DISK_POOL_SCAN_DELAY_MS) * 1000 of true -> ar_util:cast_after(?DISK_POOL_SCAN_DELAY_MS, self(), resume_disk_pool_scan), ar_util:cast_after(?DISK_POOL_SCAN_DELAY_MS, self(), process_disk_pool_item), {noreply, State#sync_data_state{ disk_pool_cursor = first, disk_pool_full_scan_start_key = none, disk_pool_scan_pause = true }}; false -> process_disk_pool_item(State, Key3, Value3) end; _ -> process_disk_pool_item(State, Key3, Value3) end end; handle_cast(resume_disk_pool_scan, State) -> {noreply, State#sync_data_state{ disk_pool_scan_pause = false }}; handle_cast({process_disk_pool_chunk_offsets, Iterator, MayConclude, Args}, State) -> {Offset, _, _, _, _, _, Key, _, _, _} = Args, %% Place the chunk under its last 10 offsets in the weave (the same data %% may be uploaded several times). case data_root_index_next_v2(Iterator, 10) of none -> State2 = case MayConclude of true -> Iterator2 = data_root_index_reset(Iterator), delete_disk_pool_chunk(Iterator2, Args, State), maybe_reset_disk_pool_full_scan_key(Key, State); false -> State end, gen_server:cast(self(), process_disk_pool_item), {noreply, deregister_currently_processed_disk_pool_key(Key, State2)}; {TXArgs, Iterator2} -> State2 = register_currently_processed_disk_pool_key(Key, State), {TXStartOffset, TXRoot, TXPath} = TXArgs, AbsoluteEndOffset = TXStartOffset + Offset, process_disk_pool_chunk_offset(Iterator2, TXRoot, TXPath, AbsoluteEndOffset, MayConclude, Args, State2) end; handle_cast({remove_range, End, Cursor, Ref, PID}, State) when Cursor > End -> PID ! {removed_range, Ref}, {noreply, State}; handle_cast({remove_range, End, Cursor, Ref, PID}, State) -> #sync_data_state{ store_id = StoreID } = State, case get_chunk_by_byte(Cursor, StoreID) of {ok, _Key, {AbsoluteEndOffset, _, _, _, _, _, _}} when AbsoluteEndOffset > End -> PID ! {removed_range, Ref}, {noreply, State}; {ok, _Key, {AbsoluteEndOffset, _, _, _, _, _, ChunkSize}} -> PaddedStartOffset = ar_block:get_chunk_padded_offset(AbsoluteEndOffset - ChunkSize), PaddedOffset = ar_block:get_chunk_padded_offset(AbsoluteEndOffset), %% 1) store updated sync record %% 2) remove chunk %% 3) update chunks_index %% %% The order is important - in case the VM crashes, %% we will not report false positives to peers, %% and the chunk can still be removed upon retry. RemoveFromFootprint = ar_footprint_record:delete(PaddedOffset, StoreID), RemoveFromSyncRecord = case RemoveFromFootprint of ok -> ar_sync_record:delete(PaddedOffset, PaddedStartOffset, ar_data_sync, StoreID); Error -> Error end, RemoveFromChunkStorage = case RemoveFromSyncRecord of ok -> ar_chunk_storage:delete(PaddedOffset, StoreID); Error2 -> Error2 end, RemoveFromChunksIndex = case RemoveFromChunkStorage of ok -> delete_chunk_metadata(AbsoluteEndOffset, StoreID); Error3 -> Error3 end, case RemoveFromChunksIndex of ok -> NextCursor = AbsoluteEndOffset + 1, gen_server:cast(self(), {remove_range, End, NextCursor, Ref, PID}); {error, Reason} -> ?LOG_ERROR([{event, data_removal_aborted_since_failed_to_remove_chunk}, {offset, Cursor}, {reason, io_lib:format("~p", [Reason])}]) end, {noreply, State}; {error, invalid_iterator} -> NextCursor = advance_chunks_index_cursor(Cursor), gen_server:cast(self(), {remove_range, End, NextCursor, Ref, PID}), {noreply, State}; {error, Reason} -> ?LOG_ERROR([{event, data_removal_aborted_since_failed_to_query_chunk}, {offset, Cursor}, {reason, io_lib:format("~p", [Reason])}]), {noreply, State} end; handle_cast({expire_repack_request, Key}, State) -> #sync_data_state{ packing_map = PackingMap } = State, case maps:get(Key, PackingMap, not_found) of {pack_chunk, {_, DataPath, Offset, DataRoot, _, _, _}} -> decrement_chunk_cache_size(), DataPathHash = crypto:hash(sha256, DataPath), ?LOG_DEBUG([{event, expired_repack_chunk_request}, {data_path_hash, ar_util:encode(DataPathHash)}, {data_root, ar_util:encode(DataRoot)}, {relative_offset, Offset}]), State2 = State#sync_data_state{ packing_map = maps:remove(Key, PackingMap) }, {noreply, State2}; _ -> {noreply, State} end; handle_cast({expire_unpack_request, Key}, State) -> #sync_data_state{ packing_map = PackingMap } = State, case maps:get(Key, PackingMap, not_found) of {unpack_fetched_chunk, _Args} -> decrement_chunk_cache_size(), State2 = State#sync_data_state{ packing_map = maps:remove(Key, PackingMap) }, {noreply, State2}; _ -> {noreply, State} end; handle_cast(store_sync_state, State) -> store_sync_state(State), ar_util:cast_after(?STORE_STATE_FREQUENCY_MS, self(), store_sync_state), {noreply, State}; handle_cast({remove_recently_processed_disk_pool_offset, Offset, ChunkDataKey}, State) -> {noreply, remove_recently_processed_disk_pool_offset(Offset, ChunkDataKey, State)}; handle_cast(Cast, State) -> ?LOG_WARNING([{event, unhandled_cast}, {cast, Cast}]), {noreply, State}. handle_call({add_block, B, SizeTaggedTXs}, _From, State) -> #sync_data_state{ store_id = StoreID } = State, {reply, add_block(B, SizeTaggedTXs, StoreID), State}; handle_call({store_data_roots_sync, BlockStart, BlockEnd, TXRoot, Entries}, _From, State) -> {Reply, State2} = handle_store_data_roots(BlockStart, BlockEnd, TXRoot, Entries, State), {reply, Reply, State2}; handle_call(Request, _From, State) -> ?LOG_WARNING([{event, unhandled_call}, {request, Request}]), {reply, ok, State}. handle_store_data_roots(BlockStart, BlockEnd, TXRoot, Entries, State) -> #sync_data_state{ store_id = ?DEFAULT_MODULE } = State, DataRootIndexKeySet = sets:from_list([ << DataRoot:32/binary, TXSize:?OFFSET_KEY_BITSIZE >> || {DataRoot, TXSize, _TXStart, _TXPath} <- Entries ]), BlockSize = BlockEnd - BlockStart, lists:foreach( fun({DataRoot, TXSize, TXStartOffset, TXPath}) -> ok = update_data_root_index(DataRoot, TXSize, TXStartOffset, TXPath, ?DEFAULT_MODULE) end, Entries ), ok = ar_kv:put({data_root_offset_index, ?DEFAULT_MODULE}, << BlockStart:?OFFSET_KEY_BITSIZE >>, term_to_binary({TXRoot, BlockSize, DataRootIndexKeySet})), {ok, State}. handle_info({event, node_state, {initialized, B}}, State) -> {noreply, State#sync_data_state{ weave_size = B#block.weave_size }}; handle_info({event, node_state, {new_tip, B, _PrevB}}, State) -> {noreply, State#sync_data_state{ weave_size = B#block.weave_size }}; handle_info({event, node_state, {search_space_upper_bound, Bound}}, State) -> {noreply, State#sync_data_state{ disk_pool_threshold = Bound }}; handle_info({event, node_state, _}, State) -> {noreply, State}; handle_info({chunk, {unpacked, Key, ChunkArgs}}, State) -> #sync_data_state{ packing_map = PackingMap } = State, case maps:get(Key, PackingMap, not_found) of {unpack_fetched_chunk, Args} -> State2 = State#sync_data_state{ packing_map = maps:remove(Key, PackingMap) }, process_unpacked_chunk(ChunkArgs, Args, State2); Result -> {Packing, _U, AbsoluteEndOffset, _TXRoot, ChunkSize} = ChunkArgs, Reason = missing_unpacked_chunk, prometheus_counter:inc(sync_chunks_skipped, [Reason]), ?LOG_DEBUG([{event, skipping_synced_chunk}, {reason, Reason}, {key, Key}, {packing, ar_serialize:encode_packing(Packing, true)}, {absolute_offset, AbsoluteEndOffset}, {chunk_size, ChunkSize}, {result, Result}]), {noreply, State} end; handle_info({chunk, {unpack_error, Key, ChunkArgs, Error}}, State) -> #sync_data_state{ packing_map = PackingMap } = State, case maps:get(Key, PackingMap, not_found) of {unpack_fetched_chunk, Args} -> {Packing, _Chunk1, AbsoluteEndOffset, _TXRoot, ChunkSize} = ChunkArgs, {_AbsoluteTXStartOffset, _TXSize, _DataPath, _TXPath, _DataRoot, _Chunk2, _ChunkID, _ChunkEndOffset, Peer, _Byte} = Args, ?LOG_WARNING([{event, got_invalid_packed_chunk}, {peer, ar_util:format_peer(Peer)}, {absolute_end_offset, AbsoluteEndOffset}, {packing, ar_serialize:encode_packing(Packing, true)}, {chunk_size, ChunkSize}, {error, io_lib:format("~p", [Error])}]), State2 = State#sync_data_state{ packing_map = maps:remove(Key, PackingMap) }, ar_peers:issue_warning(Peer, chunk, Error), {noreply, State2}; _ -> {noreply, State} end; handle_info({chunk, {packed, Key, ChunkArgs}}, State) -> #sync_data_state{ packing_map = PackingMap } = State, Packing = element(1, ChunkArgs), case maps:get(Key, PackingMap, not_found) of {pack_chunk, Args} when element(1, Args) == Packing -> State2 = State#sync_data_state{ packing_map = maps:remove(Key, PackingMap) }, {noreply, store_chunk(ChunkArgs, Args, State2)}; _ -> {noreply, State} end; handle_info({chunk, _}, State) -> {noreply, State}; handle_info({event, disksup, {remaining_disk_space, StoreID, false, Percentage, _Bytes}}, #sync_data_state{ store_id = StoreID } = State) -> case Percentage < 0.01 of true -> case is_disk_space_sufficient(StoreID) of false -> ok; _ -> log_insufficient_disk_space(StoreID) end, ets:insert(ar_data_sync_state, {{is_disk_space_sufficient, StoreID}, false}); false -> case Percentage > 0.05 of true -> case is_disk_space_sufficient(StoreID) of false -> log_sufficient_disk_space(StoreID); _ -> ok end; false -> ok end, ets:insert(ar_data_sync_state, {{is_disk_space_sufficient, StoreID}, true}) end, {noreply, State}; handle_info({event, disksup, {remaining_disk_space, StoreID, true, _Percentage, Bytes}}, #sync_data_state{ store_id = StoreID } = State) -> {ok, Config} = arweave_config:get_env(), %% Default values: %% max_disk_pool_buffer_mb = ?DEFAULT_MAX_DISK_POOL_BUFFER_MB = 100_000 %% disk_cache_size = ?DISK_CACHE_SIZE = 5_120 %% DiskPoolSize = ~100GB %% DisckCacheSize = ~5GB %% BufferSize = ~10GB DiskPoolSize = Config#config.max_disk_pool_buffer_mb * ?MiB, DiskCacheSize = Config#config.disk_cache_size * ?MiB, BufferSize = 10_000_000_000, case Bytes < DiskPoolSize + DiskCacheSize + (BufferSize div 2) of true -> ar:console("error: Not enough disk space left on 'data_dir' disk for " "the requested 'max_disk_pool_buffer_mb' ~Bmb and 'disk_cache_size_mb' ~Bmb " "either lower these values or add more disk space.~n", [Config#config.max_disk_pool_buffer_mb, Config#config.disk_cache_size]), case is_disk_space_sufficient(StoreID) of false -> ok; _ -> log_insufficient_disk_space(StoreID) end, ets:insert(ar_data_sync_state, {{is_disk_space_sufficient, StoreID}, false}); false -> case Bytes > DiskPoolSize + DiskCacheSize + BufferSize of true -> case is_disk_space_sufficient(StoreID) of false -> log_sufficient_disk_space(StoreID); _ -> ok end; false -> ok end, ets:insert(ar_data_sync_state, {{is_disk_space_sufficient, StoreID}, true}) end, {noreply, State}; handle_info({event, disksup, _}, State) -> {noreply, State}; handle_info({'EXIT', _PID, normal}, State) -> {noreply, State}; handle_info({'DOWN', _, process, _, normal}, State) -> {noreply, State}; handle_info({'DOWN', _, process, _, noproc}, State) -> {noreply, State}; handle_info({'DOWN', _, process, _, Reason}, #sync_data_state{ store_id = StoreID } = State) -> ?LOG_WARNING([{event, collect_intervals_job_failed}, {reason, io_lib:format("~p", [Reason])}, {action, spawning_another_one}, {store_id, StoreID}]), gen_server:cast(self(), collect_peer_intervals), {noreply, State}; handle_info(Message, #sync_data_state{ store_id = StoreID } = State) -> ?LOG_WARNING([{event, unhandled_info}, {store_id, StoreID}, {message, Message}]), {noreply, State}. terminate(Reason, #sync_data_state{ store_id = StoreID } = State) -> store_sync_state(State), ?LOG_INFO([{event, terminate}, {store_id, StoreID}, {reason, io_lib:format("~p", [Reason])}]), ok. %%%=================================================================== %%% Private functions. %%%=================================================================== init_sync_status(StoreID) -> SyncStatus = case ar_data_sync_coordinator:is_syncing_enabled() of true -> paused; false -> off end, ar_device_lock:set_device_lock_metric(StoreID, sync, SyncStatus), SyncStatus. do_log_chunk_error(LogType, Event, ExtraLogData) -> LogData = [{event, Event}, {tags, [solution_proofs]} | ExtraLogData], case LogType of error -> ?LOG_ERROR(LogData); info -> ?LOG_INFO(LogData) end. log_chunk_error(http, _, _) -> ok; log_chunk_error(tx_data, _, _) -> ok; log_chunk_error(verify, Event, ExtraLogData) -> do_log_chunk_error(info, Event, [{request_origin, verify} | ExtraLogData]); log_chunk_error(RequestOrigin, Event, ExtraLogData) -> do_log_chunk_error(error, Event, [{request_origin, RequestOrigin} | ExtraLogData]). do_sync_intervals(State) -> #sync_data_state{ sync_intervals_queue = Q, sync_intervals_queue_intervals = QIntervals, store_id = StoreID } = State, IsQueueEmpty = case gb_sets:is_empty(Q) of true -> ar_util:cast_after(500, self(), sync_intervals), true; false -> false end, IsDiskSpaceSufficient = case IsQueueEmpty of true -> false; false -> case is_disk_space_sufficient(StoreID) of true -> true; _ -> ar_util:cast_after(30000, self(), sync_intervals), false end end, IsChunkCacheFull = case IsDiskSpaceSufficient of false -> true; true -> case is_chunk_cache_full() of true -> ar_util:cast_after(1000, self(), sync_intervals), true; false -> false end end, AreSyncWorkersBusy = case IsChunkCacheFull of true -> true; false -> case ar_data_sync_coordinator:ready_for_work() of false -> ar_util:cast_after(200, self(), sync_intervals), true; true -> false end end, case AreSyncWorkersBusy of true -> State; false -> gen_server:cast(self(), sync_intervals), {{FootprintKey, Start, End, Peer}, Q2} = gb_sets:take_smallest(Q), I2 = ar_intervals:delete(QIntervals, End, Start), gen_server:cast(ar_data_sync_coordinator, {sync_range, {Start, End, Peer, StoreID, FootprintKey}}), State#sync_data_state{ sync_intervals_queue = Q2, sync_intervals_queue_intervals = I2 } end. do_sync_data(State) -> #sync_data_state{ store_id = StoreID, range_start = RangeStart, range_end = RangeEnd, disk_pool_threshold = DiskPoolThreshold } = State, %% See if any of StoreID's unsynced intervals can be found in the "default" %% storage_module Intervals = get_unsynced_intervals_from_other_storage_modules( StoreID, ?DEFAULT_MODULE, RangeStart, min(RangeEnd, DiskPoolThreshold)), gen_server:cast(self(), sync_data2), %% Find all storage_modules that might include the target chunks (e.g. neighboring %% storage_modules with an overlap, or unpacked copies used for packing, etc...) OtherStorageModules = [ar_storage_module:id(Module) || Module <- ar_storage_module:get_all(RangeStart, RangeEnd), ar_storage_module:id(Module) /= StoreID], ?LOG_INFO([{event, sync_data}, {stage, copy_from_default_storage_module}, {store_id, StoreID}, {range_start, RangeStart}, {range_end, RangeEnd}, {range_end, RangeEnd}, {disk_pool_threshold, DiskPoolThreshold}, {default_intervals, length(Intervals)}, {other_storage_modules, length(OtherStorageModules)}]), State#sync_data_state{ unsynced_intervals_from_other_storage_modules = Intervals, other_storage_modules_with_unsynced_intervals = OtherStorageModules }. %% @doc No unsynced overlap intervals, proceed with syncing do_sync_data2(#sync_data_state{ unsynced_intervals_from_other_storage_modules = [], other_storage_modules_with_unsynced_intervals = [] } = State) -> #sync_data_state{ store_id = StoreID, range_start = RangeStart, range_end = RangeEnd } = State, ?LOG_INFO([{event, sync_data}, {stage, complete}, {store_id, StoreID}, {range_start, RangeStart}, {range_end, RangeEnd}]), ar_util:cast_after(2000, self(), collect_peer_intervals), State; %% @doc Check to see if a neighboring storage_module may have already synced one of our %% unsynced intervals do_sync_data2(#sync_data_state{ store_id = StoreID, range_start = RangeStart, range_end = RangeEnd, unsynced_intervals_from_other_storage_modules = [], other_storage_modules_with_unsynced_intervals = [OtherStoreID | OtherStoreIDs] } = State) -> Packing = ar_storage_module:get_packing(StoreID), OtherPacking = ar_storage_module:get_packing(OtherStoreID), Intervals = get_unsynced_intervals_from_other_storage_modules(StoreID, OtherStoreID, RangeStart, RangeEnd), ?LOG_INFO([{event, sync_data}, {stage, copy_from_other_storage_modules}, {store_id, StoreID}, {other_store_id, OtherStoreID}, {range_start, RangeStart}, {range_end, RangeEnd}, {found_intervals, length(Intervals)}]), gen_server:cast(self(), sync_data2), State#sync_data_state{ unsynced_intervals_from_other_storage_modules = Intervals, other_storage_modules_with_unsynced_intervals = OtherStoreIDs }; %% @doc Read an unsynced interval from the disk of a neighboring storage_module do_sync_data2(#sync_data_state{ store_id = StoreID, unsynced_intervals_from_other_storage_modules = [{OtherStoreID, {Start, End}} | Intervals] } = State) -> State2 = case ar_chunk_copy:read_range(Start, End, OtherStoreID, StoreID) of true -> State#sync_data_state{ unsynced_intervals_from_other_storage_modules = Intervals }; false -> State end, ar_util:cast_after(50, self(), sync_data2), State2. remove_expired_disk_pool_data_roots() -> Now = os:system_time(microsecond), {ok, Config} = arweave_config:get_env(), ExpirationTime = Config#config.disk_pool_data_root_expiration_time * 1000000, ets:foldl( fun({Key, {_Size, Timestamp, _TXIDSet}}, _Acc) -> case Timestamp + ExpirationTime > Now of true -> ok; false -> ets:delete(ar_disk_pool_data_roots, Key), ok end end, ok, ar_disk_pool_data_roots ). get_chunk(Offset, SeekOffset, Pack, Packing, StoredPacking, StoreID, RequestOrigin) -> case read_chunk_with_metadata(Offset, SeekOffset, StoredPacking, StoreID, true, RequestOrigin) of {error, Reason} -> {error, Reason}; {ok, {Chunk, DataPath}, AbsoluteEndOffset, TXRoot, ChunkSize, TXPath} -> ChunkID = case validate_fetched_chunk({AbsoluteEndOffset, DataPath, TXPath, TXRoot, ChunkSize, StoreID, RequestOrigin}) of {true, ID} -> ID; false -> error end, PackResult = case {ChunkID, Packing == StoredPacking, Pack} of {error, _, _} -> %% Chunk was read but could not be validated. {error, chunk_failed_validation}; {_, false, false} -> %% Requested and stored chunk are in different formats, %% and repacking is disabled. {error, chunk_stored_in_different_packing_only}; _ -> ar_packing_server:repack( Packing, StoredPacking, AbsoluteEndOffset, TXRoot, Chunk, ChunkSize) end, case {PackResult, ChunkID} of {{error, Reason}, _} -> log_chunk_error(RequestOrigin, failed_to_repack_chunk, [{packing, ar_serialize:encode_packing(Packing, true)}, {stored_packing, ar_serialize:encode_packing(StoredPacking, true)}, {absolute_end_offset, AbsoluteEndOffset}, {store_id, StoreID}, {error, io_lib:format("~p", [Reason])}]), {error, Reason}; {{ok, PackedChunk, none}, _} -> %% PackedChunk is the requested format. Proof = #{ tx_root => TXRoot, chunk => PackedChunk, data_path => DataPath, tx_path => TXPath, absolute_end_offset => AbsoluteEndOffset, chunk_size => ChunkSize }, {ok, Proof}; {{ok, PackedChunk, MaybeUnpackedChunk}, none} -> %% PackedChunk is the requested format, but the ChunkID could %% not be determined Proof = #{ tx_root => TXRoot, chunk => PackedChunk, data_path => DataPath, tx_path => TXPath, absolute_end_offset => AbsoluteEndOffset, chunk_size => ChunkSize }, case MaybeUnpackedChunk of none -> {ok, Proof}; _ -> {ok, Proof#{ unpacked_chunk => MaybeUnpackedChunk }} end; {{ok, PackedChunk, MaybeUnpackedChunk}, _} -> Proof = #{ tx_root => TXRoot, chunk => PackedChunk, data_path => DataPath, tx_path => TXPath, absolute_end_offset => AbsoluteEndOffset, chunk_size => ChunkSize }, case MaybeUnpackedChunk of none -> {ok, Proof}; _ -> ComputedChunkID = ar_tx:generate_chunk_id(MaybeUnpackedChunk), case ComputedChunkID == ChunkID of true -> {ok, Proof#{ unpacked_chunk => MaybeUnpackedChunk }}; false -> log_chunk_error(RequestOrigin, get_chunk_invalid_id, [{chunk_size, ChunkSize}, {actual_chunk_size, byte_size(MaybeUnpackedChunk)}, {requested_packing, ar_serialize:encode_packing(Packing, true)}, {stored_packing, ar_serialize:encode_packing(StoredPacking, true)}, {absolute_end_offset, AbsoluteEndOffset}, {offset, Offset}, {seek_offset, SeekOffset}, {store_id, StoreID}, {expected_chunk_id, ar_util:encode(ChunkID)}, {chunk_id, ar_util:encode(ComputedChunkID)}, {actual_chunk, binary:part(MaybeUnpackedChunk, 0, 32)}]), invalidate_bad_data_record({AbsoluteEndOffset, ChunkSize, StoreID, get_chunk_invalid_id}), {error, chunk_not_found} end end end end. get_chunk_proof(Offset, SeekOffset, StoredPacking, StoreID, RequestOrigin) -> case read_chunk_with_metadata( Offset, SeekOffset, StoredPacking, StoreID, false, RequestOrigin) of {error, Reason} -> {error, Reason}; {ok, DataPath, AbsoluteEndOffset, TXRoot, ChunkSize, TXPath} -> CheckProof = case validate_fetched_chunk({AbsoluteEndOffset, DataPath, TXPath, TXRoot, ChunkSize, StoreID, false}) of {true, ID} -> ID; false -> error end, case CheckProof of error -> %% Proof was read but could not be validated. log_chunk_error(RequestOrigin, chunk_proof_failed_validation, [{offset, Offset}, {seek_offset, SeekOffset}, {stored_packing, ar_serialize:encode_packing(StoredPacking, true)}, {store_id, StoreID}]), {error, chunk_not_found}; _ -> Proof = #{ data_path => DataPath, tx_path => TXPath }, {ok, Proof} end end. %% @doc Read the chunk metadata and optionally the chunk itself. %% %% When ReadChunk=true, the response is of the format: %% {ok, {Chunk, DataPath}, AbsoluteEndOffset, TXRoot, ChunkSize, TXPath} %% %% Otherwise, the format is %% {ok, DataPath, AbsoluteEndOffset, TXRoot, ChunkSize, TXPath} read_chunk_with_metadata( Offset, SeekOffset, unpacked_padded, StoreID, _ReadChunk, RequestOrigin) -> %% unpacked_padded is an intermediate format and should not be read. Since not all %% the records and indices have been fully setup, trying to read the chunk can cause %% its offset to be invalidated. log_chunk_error(RequestOrigin, read_unpacked_padded_chunk, [{seek_offset, SeekOffset}, {offset, Offset}, {store_id, StoreID}, {stored_packing, unpacked_padded}]), {error, chunk_not_found}; read_chunk_with_metadata( Offset, SeekOffset, StoredPacking, StoreID, ReadChunk, RequestOrigin) -> case get_chunk_by_byte(SeekOffset, StoreID) of {error, invalid_iterator} -> %% No error log needed since this is expected behavior when the chunk simply %% isn't stored. {error, chunk_not_found}; {error, Err} -> Modules = ar_storage_module:get_all(SeekOffset), ModuleIDs = [ar_storage_module:id(Module) || Module <- Modules], log_chunk_error(RequestOrigin, failed_to_fetch_chunk_metadata, [{seek_offset, SeekOffset}, {store_id, StoreID}, {stored_packing, ar_serialize:encode_packing(StoredPacking, true)}, {modules_covering_seek_offset, ModuleIDs}, {error, io_lib:format("~p", [Err])}]), {error, chunk_not_found}; {ok, _, {AbsoluteEndOffset, _, _, _, _, _, ChunkSize}} when AbsoluteEndOffset - SeekOffset >= ChunkSize -> log_chunk_error(RequestOrigin, chunk_offset_mismatch, [{absolute_offset, AbsoluteEndOffset}, {seek_offset, SeekOffset}, {store_id, StoreID}, {stored_packing, ar_serialize:encode_packing(StoredPacking, true)}]), {error, chunk_not_found}; {ok, _, {AbsoluteEndOffset, ChunkDataKey, TXRoot, _, TXPath, _, ChunkSize}} -> ReadFun = case ReadChunk of true -> fun read_chunk/3; _ -> fun read_data_path/3 end, case ReadFun(AbsoluteEndOffset, ChunkDataKey, StoreID) of not_found -> Modules = ar_storage_module:get_all(SeekOffset), ModuleIDs = [ar_storage_module:id(Module) || Module <- Modules], log_chunk_error(RequestOrigin, failed_to_read_chunk_data_path, [{seek_offset, SeekOffset}, {absolute_offset, AbsoluteEndOffset}, {store_id, StoreID}, {stored_packing, ar_serialize:encode_packing(StoredPacking, true)}, {modules_covering_seek_offset, ModuleIDs}, {chunk_data_key, ar_util:encode(ChunkDataKey)}, {read_fun, ReadFun}]), invalidate_bad_data_record({AbsoluteEndOffset, ChunkSize, StoreID, failed_to_read_chunk_data_path}), {error, chunk_not_found}; {error, Error} -> log_chunk_error(RequestOrigin, failed_to_read_chunk, [{reason, io_lib:format("~p", [Error])}, {chunk_data_key, ar_util:encode(ChunkDataKey)}, {absolute_end_offset, Offset}]), {error, failed_to_read_chunk}; {ok, {Chunk, DataPath}} -> case ar_sync_record:is_recorded(Offset, StoredPacking, ar_data_sync, StoreID) of false -> Modules = ar_storage_module:get_all(SeekOffset), ModuleIDs = [ar_storage_module:id(Module) || Module <- Modules], RootRecords = [ets:lookup(sync_records, {ar_data_sync, ID}) || ID <- ModuleIDs], log_chunk_error(RequestOrigin, chunk_metadata_read_sync_record_race_condition, [{seek_offset, SeekOffset}, {storeID, StoreID}, {modules_covering_seek_offset, ModuleIDs}, {root_sync_records, RootRecords}, {stored_packing, ar_serialize:encode_packing(StoredPacking, true)}]), %% The chunk should have been re-packed %% in the meantime - very unlucky timing. {error, chunk_not_found}; true -> {ok, {Chunk, DataPath}, AbsoluteEndOffset, TXRoot, ChunkSize, TXPath} end; {ok, DataPath} -> {ok, DataPath, AbsoluteEndOffset, TXRoot, ChunkSize, TXPath} end end. invalidate_bad_data_record({AbsoluteEndOffset, ChunkSize, StoreID, Type}) -> [{_, T}] = ets:lookup(ar_data_sync_state, disk_pool_threshold), case AbsoluteEndOffset > T of true -> [{_, T}] = ets:lookup(ar_data_sync_state, disk_pool_threshold), case AbsoluteEndOffset > T of true -> %% Do not invalidate fresh records - a reorg may be in progress. ok; false -> invalidate_bad_data_record2({AbsoluteEndOffset, ChunkSize, StoreID, Type}) end; false -> invalidate_bad_data_record2({AbsoluteEndOffset, ChunkSize, StoreID, Type}) end. invalidate_bad_data_record2({AbsoluteEndOffset, ChunkSize, StoreID, Type}) -> PaddedEndOffset = ar_block:get_chunk_padded_offset(AbsoluteEndOffset), StartOffset = AbsoluteEndOffset - ChunkSize, ?LOG_WARNING([{event, invalidating_bad_data_record}, {type, Type}, {range_start, StartOffset}, {range_end, PaddedEndOffset}, {store_id, StoreID}]), case remove_invalid_sync_records(PaddedEndOffset, StartOffset, StoreID) of ok -> ar_sync_record:add(PaddedEndOffset, StartOffset, invalid_chunks, StoreID), case delete_invalid_metadata(AbsoluteEndOffset, StoreID) of ok -> ok; Error2 -> ?LOG_WARNING([{event, failed_to_remove_chunks_index_key}, {absolute_end_offset, AbsoluteEndOffset}, {error, io_lib:format("~p", [Error2])}]) end; Error -> ?LOG_WARNING([{event, failed_to_remove_sync_record_range}, {range_end, PaddedEndOffset}, {range_start, StartOffset}, {error, io_lib:format("~p", [Error])}]) end. remove_invalid_sync_records(PaddedEndOffset, StartOffset, StoreID) -> Remove1 = ar_footprint_record:delete(PaddedEndOffset, StoreID), Remove2 = case Remove1 of ok -> ar_sync_record:delete(PaddedEndOffset, StartOffset, ar_data_sync, StoreID); Error -> Error end, IsSmallChunkBeforeThreshold = PaddedEndOffset - StartOffset < ?DATA_CHUNK_SIZE, Remove3 = case {Remove2, IsSmallChunkBeforeThreshold} of {ok, false} -> ar_sync_record:delete(PaddedEndOffset, StartOffset, ar_chunk_storage, StoreID); _ -> Remove2 end, Remove4 = case {Remove3, IsSmallChunkBeforeThreshold} of {ok, false} -> ar_entropy_storage:delete_record(PaddedEndOffset, StartOffset, StoreID); _ -> Remove3 end, case {Remove4, IsSmallChunkBeforeThreshold} of {ok, false} -> ar_sync_record:delete(PaddedEndOffset, StartOffset, ar_chunk_storage_replica_2_9_1_unpacked, StoreID); _ -> Remove4 end. delete_invalid_metadata(AbsoluteEndOffset, StoreID) -> case get_chunk_metadata(AbsoluteEndOffset, StoreID) of not_found -> ok; {ok, Metadata} -> {ChunkDataKey, _, _, _, _, _} = Metadata, delete_chunk_data(ChunkDataKey, StoreID), delete_chunk_metadata(AbsoluteEndOffset, StoreID) end. validate_fetched_chunk(Args) -> {Offset, DataPath, TXPath, TXRoot, ChunkSize, StoreID, RequestOrigin} = Args, [{_, T}] = ets:lookup(ar_data_sync_state, disk_pool_threshold), case Offset > T orelse not ar_node:is_joined() of true -> case RequestOrigin of miner -> log_chunk_error(RequestOrigin, miner_requested_disk_pool_chunk, [{disk_pool_threshold, T}, {end_offset, Offset}]); _ -> ok end, {true, none}; false -> case ar_block_index:get_block_bounds(Offset - 1) of {BlockStart, BlockEnd, TXRoot} -> ChunkOffset = Offset - BlockStart - 1, case validate_proof2(TXRoot, TXPath, DataPath, BlockStart, BlockEnd, ChunkOffset, ChunkSize, RequestOrigin) of {true, ChunkID} -> {true, ChunkID}; false -> log_chunk_error(RequestOrigin, failed_to_validate_chunk_proofs, [{absolute_end_offset, Offset}, {store_id, StoreID}]), invalidate_bad_data_record({Offset, ChunkSize, StoreID, failed_to_validate_chunk_proofs}), false end; {_BlockStart, _BlockEnd, TXRoot2} -> log_chunk_error(RequestOrigin, stored_chunk_invalid_tx_root, [{end_offset, Offset}, {tx_root, ar_util:encode(TXRoot2)}, {stored_tx_root, ar_util:encode(TXRoot)}, {store_id, StoreID}]), invalidate_bad_data_record({Offset, ChunkSize, StoreID, stored_chunk_invalid_tx_root}), false end end. get_tx_offset(TXIndex, TXID) -> case ar_kv:get(TXIndex, TXID) of {ok, Value} -> {ok, binary_to_term(Value, [safe])}; not_found -> {error, not_found}; {error, Reason} -> ?LOG_ERROR([{event, failed_to_read_tx_offset}, {reason, io_lib:format("~p", [Reason])}, {tx, ar_util:encode(TXID)}]), {error, failed_to_read_offset} end. get_tx_offset_data_in_range(TXOffsetIndex, TXIndex, Start, End) -> case ar_kv:get_prev(TXOffsetIndex, << Start:?OFFSET_KEY_BITSIZE >>) of none -> get_tx_offset_data_in_range2(TXOffsetIndex, TXIndex, Start, End); {ok, << Start2:?OFFSET_KEY_BITSIZE >>, _} -> get_tx_offset_data_in_range2(TXOffsetIndex, TXIndex, Start2, End); Error -> Error end. get_tx_offset_data_in_range2(TXOffsetIndex, TXIndex, Start, End) -> case ar_kv:get_range(TXOffsetIndex, << Start:?OFFSET_KEY_BITSIZE >>, << (End - 1):?OFFSET_KEY_BITSIZE >>) of {ok, EmptyMap} when map_size(EmptyMap) == 0 -> {ok, []}; {ok, Map} -> case maps:fold( fun (_, _Value, {error, _} = Error) -> Error; (_, TXID, Acc) -> case get_tx_offset(TXIndex, TXID) of {ok, {EndOffset, Size}} -> case EndOffset =< Start of true -> Acc; false -> [{TXID, EndOffset - Size, EndOffset} | Acc] end; not_found -> Acc; Error -> Error end end, [], Map ) of {error, _} = Error -> Error; List -> {ok, lists:reverse(List)} end; Error -> Error end. get_tx_data(Start, End, Chunks, _Pack) when Start >= End -> {ok, iolist_to_binary(Chunks)}; get_tx_data(Start, End, Chunks, Pack) -> case get_chunk(Start + 1, #{ pack => Pack, packing => unpacked, bucket_based_offset => false, origin => tx_data }) of {ok, #{ chunk := Chunk }} -> get_tx_data(Start + byte_size(Chunk), End, [Chunks | Chunk], Pack); {error, chunk_not_found} -> {error, not_found}; {error, Reason} -> ?LOG_ERROR([{event, failed_to_get_tx_data}, {reason, io_lib:format("~p", [Reason])}]), {error, failed_to_get_tx_data} end. get_data_root_offset(DataRootKey, StoreID) -> << DataRoot:32/binary, TXSize:?OFFSET_KEY_BITSIZE >> = DataRootKey, DataRootIndex = {data_root_index, StoreID}, case ar_kv:get_prev(DataRootIndex, << DataRoot:32/binary, (ar_serialize:encode_int(TXSize, 8))/binary, <<"a">>/binary >>) of none -> not_found; {ok, << DataRoot:32/binary, TXSizeSize:8, TXSize:(TXSizeSize * 8), OffsetSize:8, Offset:(OffsetSize * 8) >>, TXPath} -> {ok, {Offset, TXPath}}; {ok, _, _} -> not_found; {error, _} = Error -> Error end. remove_range(Start, End, Ref, ReplyTo) -> ReplyFun = fun(Fun, StorageRefs) -> case sets:is_empty(StorageRefs) of true -> ReplyTo ! {removed_range, Ref}, ar_events:send(sync_record, {global_remove_range, Start, End}); false -> receive {removed_range, StorageRef} -> Fun(Fun, sets:del_element(StorageRef, StorageRefs)) after 10000 -> ?LOG_DEBUG([{event, waiting_for_data_range_removal_longer_than_ten_seconds}]), Fun(Fun, StorageRefs) end end end, StorageModules = ar_storage_module:get_all(Start, End), StoreIDs = [?DEFAULT_MODULE | [ar_storage_module:id(M) || M <- StorageModules]], RefL = [make_ref() || _ <- StoreIDs], PID = spawn(fun() -> ReplyFun(ReplyFun, sets:from_list(RefL)) end), lists:foreach( fun({StoreID, R}) -> gen_server:cast(name(StoreID), {remove_range, End, Start + 1, R, PID}) end, lists:zip(StoreIDs, RefL) ). init_kv(StoreID) -> BasicOpts = [{max_open_files, 10000}], BloomFilterOpts = [ {block_based_table_options, [ {cache_index_and_filter_blocks, true}, % Keep bloom filters in memory. {bloom_filter_policy, 10} % ~1% false positive probability. ]}, {optimize_filters_for_hits, true} ], PrefixBloomFilterOpts = BloomFilterOpts ++ [ {prefix_extractor, {capped_prefix_transform, ?OFFSET_KEY_PREFIX_BITSIZE div 8}}], ColumnFamilyDescriptors = [ {"default", BasicOpts}, {"chunks_index", BasicOpts ++ PrefixBloomFilterOpts}, {"data_root_index", BasicOpts ++ BloomFilterOpts}, {"data_root_offset_index", BasicOpts}, {"tx_index", BasicOpts ++ BloomFilterOpts}, {"tx_offset_index", BasicOpts}, {"disk_pool_chunks_index", BasicOpts ++ BloomFilterOpts}, {"migrations_index", BasicOpts} ], {ok, Config} = arweave_config:get_env(), DataDir = Config#config.data_dir, Dir = case StoreID of ?DEFAULT_MODULE -> filename:join(DataDir, ?ROCKS_DB_DIR); _ -> filename:join([DataDir, "storage_modules", StoreID, ?ROCKS_DB_DIR]) end, ok = ar_kv:open(#{ path => filename:join(Dir, "ar_data_sync_db"), cf_descriptors => ColumnFamilyDescriptors, cf_names => [{ar_data_sync, StoreID}, {chunks_index, StoreID}, {data_root_index_old, StoreID}, {data_root_offset_index, StoreID}, {tx_index, StoreID}, {tx_offset_index, StoreID}, {disk_pool_chunks_index_old, StoreID}, {migrations_index, StoreID}]}), ok = ar_kv:open(#{ path => filename:join(Dir, "ar_data_sync_chunk_db"), name => {chunk_data_db, StoreID}, options => [{max_open_files, 10000}, {max_background_compactions, 8}, {write_buffer_size, 256 * ?MiB}, % 256 MiB per memtable. {target_file_size_base, 256 * ?MiB}, % 256 MiB per SST file. %% 10 files in L1 to make L1 == L0 as recommended by the %% RocksDB guide https://github.com/facebook/rocksdb/wiki/RocksDB-Tuning-Guide. {max_bytes_for_level_base, 10 * 256 * ?MiB}]}), ok = ar_kv:open(#{ path => filename:join(Dir, "ar_data_sync_disk_pool_chunks_index_db"), name => {disk_pool_chunks_index, StoreID}, options => [{max_open_files, 1000}, {max_background_compactions, 8}, {write_buffer_size, 256 * ?MiB}, % 256 MiB per memtable. {target_file_size_base, 256 * ?MiB}, % 256 MiB per SST file. %% 10 files in L1 to make L1 == L0 as recommended by the %% RocksDB guide https://github.com/facebook/rocksdb/wiki/RocksDB-Tuning-Guide. {max_bytes_for_level_base, 10 * 256 * ?MiB}] ++ BloomFilterOpts}), ok = ar_kv:open(#{ path => filename:join(Dir, "ar_data_sync_data_root_index_db"), name => {data_root_index, StoreID}, options => [{max_open_files, 100}, {max_background_compactions, 8}, {write_buffer_size, 256 * ?MiB}, % 256 MiB per memtable. {target_file_size_base, 256 * ?MiB}, % 256 MiB per SST file. %% 10 files in L1 to make L1 == L0 as recommended by the %% RocksDB guide https://github.com/facebook/rocksdb/wiki/RocksDB-Tuning-Guide. {max_bytes_for_level_base, 10 * 256 * ?MiB}] ++ BloomFilterOpts}), #sync_data_state{ chunks_index = {chunks_index, StoreID}, data_root_index = {data_root_index, StoreID}, data_root_index_old = {data_root_index_old, StoreID}, data_root_offset_index = {data_root_offset_index, StoreID}, chunk_data_db = {chunk_data_db, StoreID}, tx_index = {tx_index, StoreID}, tx_offset_index = {tx_offset_index, StoreID}, disk_pool_chunks_index = {disk_pool_chunks_index, StoreID}, disk_pool_chunks_index_old = {disk_pool_chunks_index_old, StoreID}, migrations_index = {migrations_index, StoreID} }. move_disk_pool_index(State) -> move_disk_pool_index(first, State). move_disk_pool_index(Cursor, State) -> #sync_data_state{ disk_pool_chunks_index_old = Old, disk_pool_chunks_index = New } = State, case ar_kv:get_next(Old, Cursor) of none -> ok; {ok, Key, Value} -> ok = ar_kv:put(New, Key, Value), ok = ar_kv:delete(Old, Key), move_disk_pool_index(Key, State) end. move_data_root_index(#sync_data_state{ migrations_index = MI, data_root_index_old = DI } = State) -> case ar_kv:get(MI, <<"move_data_root_index">>) of {ok, <<"complete">>} -> ets:insert(ar_data_sync_state, {move_data_root_index_migration_complete}), ok; {ok, Cursor} -> move_data_root_index(Cursor, 1, State); not_found -> case ar_kv:get_next(DI, last) of none -> ets:insert(ar_data_sync_state, {move_data_root_index_migration_complete}), ok; {ok, Key, _} -> move_data_root_index(Key, 1, State) end end. move_data_root_index(Cursor, N, State) -> #sync_data_state{ migrations_index = MI, data_root_index_old = Old, data_root_index = New } = State, case N rem 50000 of 0 -> ?LOG_DEBUG([{event, moving_data_root_index}, {moved_keys, N}]), ok = ar_kv:put(MI, <<"move_data_root_index">>, Cursor), gen_server:cast(self(), {move_data_root_index, Cursor, N + 1}); _ -> case ar_kv:get_prev(Old, Cursor) of none -> ok = ar_kv:put(MI, <<"move_data_root_index">>, <<"complete">>), ets:insert(ar_data_sync_state, {move_data_root_index_migration_complete}), ok; {ok, << DataRoot:32/binary, TXSize:?OFFSET_KEY_BITSIZE >>, Value} -> M = binary_to_term(Value, [safe]), move_data_root_index(DataRoot, TXSize, data_root_index_iterator(M), New), PrevKey = << DataRoot:32/binary, (TXSize - 1):?OFFSET_KEY_BITSIZE >>, move_data_root_index(PrevKey, N + 1, State); {ok, Key, _} -> %% The empty data root key (from transactions without data) was %% unnecessarily recorded in the index. PrevKey = binary:part(Key, 0, byte_size(Key) - 1), move_data_root_index(PrevKey, N + 1, State) end end. move_data_root_index(DataRoot, TXSize, Iterator, DB) -> case data_root_index_next(Iterator, infinity) of none -> ok; {{Offset, _TXRoot, TXPath}, Iterator2} -> Key = data_root_key_v2(DataRoot, TXSize, Offset), ok = ar_kv:put(DB, Key, TXPath), move_data_root_index(DataRoot, TXSize, Iterator2, DB) end. data_root_key_v2(DataRoot, TXSize, Offset) -> << DataRoot:32/binary, (ar_serialize:encode_int(TXSize, 8))/binary, (ar_serialize:encode_int(Offset, 8))/binary >>. record_disk_pool_chunks_count() -> DB = {disk_pool_chunks_index, ?DEFAULT_MODULE}, case ar_kv:count(DB) of Count when is_integer(Count) -> prometheus_gauge:set(disk_pool_chunks_count, Count); Error -> ?LOG_WARNING([{event, failed_to_read_disk_pool_chunks_count}, {error, io_lib:format("~p", [Error])}]) end. read_data_sync_state() -> case ar_storage:read_term(data_sync_state) of {ok, #{ block_index := RecentBI } = M} -> maps:merge(M, #{ weave_size => case RecentBI of [] -> 0; _ -> element(2, hd(RecentBI)) end, disk_pool_threshold => maps:get(disk_pool_threshold, M, get_disk_pool_threshold(RecentBI)) }); not_found -> #{ block_index => [], disk_pool_data_roots => #{}, disk_pool_size => 0, weave_size => 0, packing_2_5_threshold => infinity, disk_pool_threshold => 0 } end. recalculate_disk_pool_size(DataRootMap, State) -> #sync_data_state{ disk_pool_chunks_index = Index } = State, DataRootMap2 = maps:map(fun(_DataRootKey, {_Size, Timestamp, TXIDSet}) -> {0, Timestamp, TXIDSet} end, DataRootMap), recalculate_disk_pool_size(Index, DataRootMap2, first, 0). recalculate_disk_pool_size(Index, DataRootMap, Cursor, Sum) -> case ar_kv:get_next(Index, Cursor) of none -> prometheus_gauge:set(pending_chunks_size, Sum), maps:map(fun(DataRootKey, V) -> ets:insert(ar_disk_pool_data_roots, {DataRootKey, V}) end, DataRootMap), ets:insert(ar_data_sync_state, {disk_pool_size, Sum}); {ok, Key, Value} -> DecodedValue = binary_to_term(Value, [safe]), ChunkSize = element(2, DecodedValue), DataRoot = element(3, DecodedValue), TXSize = element(4, DecodedValue), DataRootKey = << DataRoot:32/binary, TXSize:?OFFSET_KEY_BITSIZE >>, DataRootMap2 = case maps:get(DataRootKey, DataRootMap, not_found) of not_found -> DataRootMap; {Size, Timestamp, TXIDSet} -> maps:put(DataRootKey, {Size + ChunkSize, Timestamp, TXIDSet}, DataRootMap) end, Cursor2 = << Key/binary, <<"a">>/binary >>, recalculate_disk_pool_size(Index, DataRootMap2, Cursor2, Sum + ChunkSize) end. get_disk_pool_threshold([]) -> 0; get_disk_pool_threshold(BI) -> ar_node:get_partition_upper_bound(BI). remove_orphaned_data(State, BlockStartOffset, WeaveSize) -> ok = remove_tx_index_range(BlockStartOffset, WeaveSize, State), {ok, OrphanedDataRoots} = remove_data_root_index_range(BlockStartOffset, WeaveSize, State), ok = remove_data_root_offset_index_range(BlockStartOffset, WeaveSize, State), ok = delete_chunk_metadata_range(BlockStartOffset, WeaveSize, State), {ok, OrphanedDataRoots}. remove_tx_index_range(Start, End, State) -> #sync_data_state{ tx_offset_index = TXOffsetIndex, tx_index = TXIndex } = State, ok = case ar_kv:get_range(TXOffsetIndex, << Start:?OFFSET_KEY_BITSIZE >>, << (End - 1):?OFFSET_KEY_BITSIZE >>) of {ok, EmptyMap} when map_size(EmptyMap) == 0 -> ok; {ok, Map} -> maps:fold( fun (_, _Value, {error, _} = Error) -> Error; (_, TXID, ok) -> ar_kv:delete(TXIndex, TXID), ar_tx_blacklist:norify_about_orphaned_tx(TXID) end, ok, Map ); Error -> Error end, ar_kv:delete_range(TXOffsetIndex, << Start:?OFFSET_KEY_BITSIZE >>, << End:?OFFSET_KEY_BITSIZE >>). remove_data_root_index_range(Start, End, State) -> #sync_data_state{ data_root_offset_index = DataRootOffsetIndex, data_root_index = DataRootIndex } = State, case ar_kv:get_range(DataRootOffsetIndex, << Start:?OFFSET_KEY_BITSIZE >>, << (End - 1):?OFFSET_KEY_BITSIZE >>) of {ok, EmptyMap} when map_size(EmptyMap) == 0 -> {ok, sets:new()}; {ok, Map} -> maps:fold( fun (_, _Value, {error, _} = Error) -> Error; (_, Value, {ok, RemovedDataRoots}) -> {_TXRoot, _BlockSize, DataRootIndexKeySet} = binary_to_term(Value, [safe]), sets:fold( fun (_Key, {error, _} = Error) -> Error; (<< _DataRoot:32/binary, _TXSize:?OFFSET_KEY_BITSIZE >> = Key, {ok, Removed}) -> case remove_data_root(DataRootIndex, Key, Start, End) of removed -> {ok, sets:add_element(Key, Removed)}; ok -> {ok, Removed}; Error -> Error end; (_, Acc) -> Acc end, {ok, RemovedDataRoots}, DataRootIndexKeySet ) end, {ok, sets:new()}, Map ); Error -> Error end. remove_data_root(DataRootIndex, DataRootKey, Start, End) -> << DataRoot:32/binary, TXSize:?OFFSET_KEY_BITSIZE >> = DataRootKey, StartKey = data_root_key_v2(DataRoot, TXSize, Start), EndKey = data_root_key_v2(DataRoot, TXSize, End), case ar_kv:delete_range(DataRootIndex, StartKey, EndKey) of ok -> case ar_kv:get_prev(DataRootIndex, StartKey) of {ok, << DataRoot:32/binary, TXSizeSize:8, TXSize:(TXSizeSize * 8), _Rest/binary >>, _} -> ok; {ok, _, _} -> removed; none -> removed; {error, _} = Error -> Error end; Error -> Error end. remove_data_root_offset_index_range(Start, End, State) -> #sync_data_state{ data_root_offset_index = DataRootOffsetIndex } = State, ar_kv:delete_range(DataRootOffsetIndex, << Start:?OFFSET_KEY_BITSIZE >>, << End:?OFFSET_KEY_BITSIZE >>). repair_data_root_offset_index(BI, State) -> #sync_data_state{ migrations_index = DB } = State, case ar_kv:get(DB, <<"repair_data_root_offset_index">>) of not_found -> ?LOG_INFO([{event, starting_data_root_offset_index_scan}]), ReverseBI = lists:reverse(BI), ResyncBlocks = repair_data_root_offset_index(ReverseBI, <<>>, 0, [], State), [ar_header_sync:remove_block(Height) || Height <- ResyncBlocks], ok = ar_kv:put(DB, <<"repair_data_root_offset_index">>, <<>>), ?LOG_INFO([{event, data_root_offset_index_scan_complete}]); _ -> ok end. repair_data_root_offset_index(BI, Cursor, Height, ResyncBlocks, State) -> #sync_data_state{ data_root_offset_index = DRI } = State, case ar_kv:get_next(DRI, Cursor) of none -> ResyncBlocks; {ok, Key, Value} -> << BlockStart:?OFFSET_KEY_BITSIZE >> = Key, {TXRoot, BlockSize, _DataRootKeys} = binary_to_term(Value, [safe]), BlockEnd = BlockStart + BlockSize, case shift_block_index(TXRoot, BlockStart, BlockEnd, Height, ResyncBlocks, BI) of {ok, {Height2, BI2}} -> Cursor2 = << (BlockStart + 1):?OFFSET_KEY_BITSIZE >>, repair_data_root_offset_index(BI2, Cursor2, Height2, ResyncBlocks, State); {bad_key, []} -> ResyncBlocks; {bad_key, ResyncBlocks2} -> ?LOG_INFO([{event, removing_data_root_index_range}, {range_start, BlockStart}, {range_end, BlockEnd}]), ok = remove_tx_index_range(BlockStart, BlockEnd, State), {ok, _} = remove_data_root_index_range(BlockStart, BlockEnd, State), ok = remove_data_root_offset_index_range(BlockStart, BlockEnd, State), repair_data_root_offset_index(BI, Cursor, Height, ResyncBlocks2, State) end end. shift_block_index(_TXRoot, _BlockStart, _BlockEnd, _Height, ResyncBlocks, []) -> {bad_key, ResyncBlocks}; shift_block_index(TXRoot, BlockStart, BlockEnd, Height, ResyncBlocks, [{_H, WeaveSize, _TXRoot} | BI]) when BlockEnd > WeaveSize -> ResyncBlocks2 = case BlockStart < WeaveSize of true -> [Height | ResyncBlocks]; _ -> ResyncBlocks end, shift_block_index(TXRoot, BlockStart, BlockEnd, Height + 1, ResyncBlocks2, BI); shift_block_index(TXRoot, _BlockStart, WeaveSize, Height, _ResyncBlocks, [{_H, WeaveSize, TXRoot} | BI]) -> {ok, {Height + 1, BI}}; shift_block_index(_TXRoot, _BlockStart, _WeaveSize, Height, ResyncBlocks, _BI) -> {bad_key, [Height | ResyncBlocks]}. add_block(B, SizeTaggedTXs, StoreID) -> #block{ indep_hash = H, weave_size = WeaveSize, tx_root = TXRoot } = B, case ar_block_index:get_element_by_height(B#block.height) of {H, WeaveSize, TXRoot} -> BlockStart = B#block.weave_size - B#block.block_size, case ar_kv:get({data_root_offset_index, StoreID}, << BlockStart:?OFFSET_KEY_BITSIZE >>) of not_found -> {ok, _} = add_block_data_roots(SizeTaggedTXs, BlockStart, StoreID), ok = update_tx_index(SizeTaggedTXs, BlockStart, StoreID), ok; _ -> ok end; _ -> ok end. update_tx_index([], _BlockStartOffset, _StoreID) -> ok; update_tx_index(SizeTaggedTXs, BlockStartOffset, StoreID) -> lists:foldl( fun ({_, Offset}, Offset) -> Offset; ({{padding, _}, Offset}, _) -> Offset; ({{TXID, _}, TXEndOffset}, PreviousOffset) -> AbsoluteEndOffset = BlockStartOffset + TXEndOffset, TXSize = TXEndOffset - PreviousOffset, AbsoluteStartOffset = AbsoluteEndOffset - TXSize, case ar_kv:put({tx_offset_index, StoreID}, << AbsoluteStartOffset:?OFFSET_KEY_BITSIZE >>, TXID) of ok -> case ar_kv:put({tx_index, StoreID}, TXID, term_to_binary({AbsoluteEndOffset, TXSize})) of ok -> ar_events:send(tx, {registered_offset, TXID, AbsoluteEndOffset, TXSize}), ar_tx_blacklist:notify_about_added_tx(TXID, AbsoluteEndOffset, AbsoluteStartOffset), TXEndOffset; {error, Reason} -> ?LOG_ERROR([{event, failed_to_update_tx_index}, {reason, io_lib:format("~p", [Reason])}, {tx, ar_util:encode(TXID)}]), TXEndOffset end; {error, Reason} -> ?LOG_ERROR([{event, failed_to_update_tx_offset_index}, {reason, io_lib:format("~p", [Reason])}, {tx, ar_util:encode(TXID)}]), TXEndOffset end end, 0, SizeTaggedTXs ), ok. add_block_data_roots([], _CurrentWeaveSize, _StoreID) -> {ok, sets:new()}; add_block_data_roots(SizeTaggedTXs, CurrentWeaveSize, StoreID) -> SizeTaggedDataRoots = [{Root, Offset} || {{_, Root}, Offset} <- SizeTaggedTXs], {TXRoot, TXTree} = ar_merkle:generate_tree(SizeTaggedDataRoots), {BlockSize, DataRootIndexKeySet, Args} = lists:foldl( fun ({_, Offset}, {Offset, _, _} = Acc) -> Acc; ({{padding, _}, Offset}, {_, Acc1, Acc2}) -> {Offset, Acc1, Acc2}; ({{_, DataRoot}, Offset}, {_, Acc1, Acc2}) when byte_size(DataRoot) < 32 -> {Offset, Acc1, Acc2}; ({{_, DataRoot}, TXEndOffset}, {PrevOffset, CurrentDataRootSet, CurrentArgs}) -> TXPath = ar_merkle:generate_path(TXRoot, TXEndOffset - 1, TXTree), TXOffset = CurrentWeaveSize + PrevOffset, TXSize = TXEndOffset - PrevOffset, DataRootKey = << DataRoot:32/binary, TXSize:?OFFSET_KEY_BITSIZE >>, {TXEndOffset, sets:add_element(DataRootKey, CurrentDataRootSet), [{DataRoot, TXSize, TXOffset, TXPath} | CurrentArgs]} end, {0, sets:new(), []}, SizeTaggedTXs ), case BlockSize > 0 of true -> ok = ar_kv:put({data_root_offset_index, StoreID}, << CurrentWeaveSize:?OFFSET_KEY_BITSIZE >>, term_to_binary({TXRoot, BlockSize, DataRootIndexKeySet})), lists:foreach( fun({DataRoot, TXSize, TXOffset, TXPath}) -> ok = update_data_root_index(DataRoot, TXSize, TXOffset, TXPath, StoreID) end, Args ); false -> do_not_update_data_root_offset_index end, {ok, DataRootIndexKeySet}. update_data_root_index(DataRoot, TXSize, AbsoluteTXStartOffset, TXPath, StoreID) -> ar_kv:put({data_root_index, StoreID}, data_root_key_v2(DataRoot, TXSize, AbsoluteTXStartOffset), TXPath). add_block_data_roots_to_disk_pool(DataRootKeySet) -> sets:fold( fun(R, T) -> case ets:lookup(ar_disk_pool_data_roots, R) of [] -> ets:insert(ar_disk_pool_data_roots, {R, {0, T, not_set}}); [{_, {Size, Timeout, _}}] -> ets:insert(ar_disk_pool_data_roots, {R, {Size, Timeout, not_set}}) end, T + 1 end, os:system_time(microsecond), DataRootKeySet ). reset_orphaned_data_roots_disk_pool_timestamps(DataRootKeySet) -> sets:fold( fun(R, T) -> case ets:lookup(ar_disk_pool_data_roots, R) of [] -> ets:insert(ar_disk_pool_data_roots, {R, {0, T, not_set}}); [{_, {Size, _, TXIDSet}}] -> ets:insert(ar_disk_pool_data_roots, {R, {Size, T, TXIDSet}}) end, T + 1 end, os:system_time(microsecond), DataRootKeySet ). store_sync_state(#sync_data_state{ store_id = ?DEFAULT_MODULE } = State) -> #sync_data_state{ block_index = BI } = State, DiskPoolDataRoots = ets:foldl( fun({DataRootKey, V}, Acc) -> maps:put(DataRootKey, V, Acc) end, #{}, ar_disk_pool_data_roots), StoredState = #{ block_index => BI, disk_pool_data_roots => DiskPoolDataRoots, %% Storing it for backwards-compatibility. strict_data_split_threshold => ar_block:strict_data_split_threshold() }, case ar_storage:write_term(data_sync_state, StoredState) of {error, enospc} -> ?LOG_WARNING([{event, failed_to_dump_state}, {reason, disk_full}, {store_id, ?DEFAULT_MODULE}]), ok; ok -> ok end, State; store_sync_state(State) -> State. %% @doc Look to StoreID to find data that TargetStoreID is missing. %% Args: %% StoreID - The ID of the storage module to sync to (this module is missing data) %% OtherStoreID - The ID of the storage module to sync from (this module might have the data) %% RangeStart - The start offset of the range to check %% RangeEnd - The end offset of the range to check get_unsynced_intervals_from_other_storage_modules(StoreID, OtherStoreID, RangeStart, RangeEnd) -> get_unsynced_intervals_from_other_storage_modules(StoreID, OtherStoreID, RangeStart, RangeEnd, []). get_unsynced_intervals_from_other_storage_modules(_StoreID, _OtherStoreID, RangeStart, RangeEnd, Intervals) when RangeStart >= RangeEnd -> Intervals; get_unsynced_intervals_from_other_storage_modules(StoreID, OtherStoreID, RangeStart, RangeEnd, Intervals) -> FindNextMissing = case ar_sync_record:get_next_synced_interval(RangeStart, RangeEnd, ar_data_sync, StoreID) of not_found -> {request, {RangeStart, RangeEnd}}; {End, Start} when Start =< RangeStart -> {skip, End}; {_End, Start} -> {request, {RangeStart, Start}} end, case FindNextMissing of {skip, End2} -> get_unsynced_intervals_from_other_storage_modules(StoreID, OtherStoreID, End2, RangeEnd, Intervals); {request, {Cursor, RightBound}} -> case ar_sync_record:get_next_synced_interval(Cursor, RightBound, ar_data_sync, OtherStoreID) of not_found -> get_unsynced_intervals_from_other_storage_modules(StoreID, OtherStoreID, RightBound, RangeEnd, Intervals); {End2, Start2} -> Start3 = max(Start2, Cursor), Intervals2 = [{OtherStoreID, {Start3, End2}} | Intervals], get_unsynced_intervals_from_other_storage_modules(StoreID, OtherStoreID, End2, RangeEnd, Intervals2) end end. enqueue_intervals([], _ChunksToEnqueue, {Q, QIntervals}) -> {Q, QIntervals}; enqueue_intervals([{Peer, Intervals, FootprintKey} | Rest], ChunksToEnqueue, {Q, QIntervals}) -> {Q2, QIntervals2} = enqueue_peer_intervals(Peer, Intervals, FootprintKey, ChunksToEnqueue, {Q, QIntervals}), enqueue_intervals(Rest, ChunksToEnqueue, {Q2, QIntervals2}). enqueue_peer_intervals(Peer, Intervals, FootprintKey, ChunksToEnqueue, {Q, QIntervals}) -> %% Only keep unique intervals. We may get some duplicates for two %% reasons: %% 1) find_peer_intervals might choose the same interval several %% times in a row even when there are other unsynced intervals %% to pick because it is probabilistic. %% 2) We ask many peers simultaneously about the same interval %% to make finding of the relatively rare intervals quicker. OuterJoin = ar_intervals:outerjoin(QIntervals, Intervals), {_, {Q2, QIntervals2}} = ar_intervals:fold( fun (_, {0, {QAcc, QIAcc}}) -> {0, {QAcc, QIAcc}}; ({End, Start}, {ChunksToEnqueue2, {QAcc, QIAcc}}) -> RangeEnd = min(End, Start + (ChunksToEnqueue2 * ?DATA_CHUNK_SIZE)), ChunkOffsets = lists:seq(Start, RangeEnd - 1, ?DATA_CHUNK_SIZE), ChunksEnqueued = length(ChunkOffsets), {ChunksToEnqueue2 - ChunksEnqueued, enqueue_peer_range(Peer, FootprintKey, Start, RangeEnd, ChunkOffsets, {QAcc, QIAcc})} end, {ChunksToEnqueue, {Q, QIntervals}}, OuterJoin ), {Q2, QIntervals2}. enqueue_peer_range(Peer, FootprintKey, RangeStart, RangeEnd, ChunkOffsets, {Q, QIntervals}) -> Q2 = lists:foldl( fun(ChunkStart, QAcc) -> gb_sets:add_element( {FootprintKey, ChunkStart, min(ChunkStart + ?DATA_CHUNK_SIZE, RangeEnd), Peer}, QAcc) end, Q, ChunkOffsets ), QIntervals2 = ar_intervals:add(QIntervals, RangeEnd, RangeStart), {Q2, QIntervals2}. unpack_fetched_chunk(Cast, AbsoluteEndOffset, ChunkArgs, Args, State) -> #sync_data_state{ packing_map = PackingMap } = State, case maps:is_key({AbsoluteEndOffset, unpacked}, PackingMap) of true -> decrement_chunk_cache_size(), {noreply, State}; false -> case ar_packing_server:is_buffer_full() of true -> ar_util:cast_after(1000, self(), Cast), {noreply, State}; false -> ar_packing_server:request_unpack({AbsoluteEndOffset, unpacked}, ChunkArgs), {noreply, State#sync_data_state{ packing_map = PackingMap#{ {AbsoluteEndOffset, unpacked} => {unpack_fetched_chunk, Args} } }} end end. validate_proof(SeekByte, Proof) -> #{ data_path := DataPath, tx_path := TXPath, chunk := Chunk, packing := Packing } = Proof, ChunkMetadata = #chunk_metadata{ tx_path = TXPath, data_path = DataPath }, ChunkProof = ar_poa:chunk_proof(ChunkMetadata, SeekByte, get_merkle_rebase_threshold()), case ar_poa:validate_paths(ChunkProof) of {false, _} -> false; {true, ChunkProof2} -> #chunk_proof{ metadata = Metadata, chunk_id = ChunkID, block_start_offset = BlockStartOffset, chunk_end_offset = ChunkEndOffset, tx_start_offset = TXStartOffset } = ChunkProof2, #chunk_metadata{ chunk_size = ChunkSize } = Metadata, AbsoluteEndOffset = BlockStartOffset + TXStartOffset + ChunkEndOffset, case Packing of unpacked -> case ar_tx:generate_chunk_id(Chunk) == ChunkID of false -> false; true -> case ChunkSize == byte_size(Chunk) of true -> {true, ChunkProof2}; false -> false end end; _ -> {need_unpacking, AbsoluteEndOffset, ChunkProof2} end end. validate_proof2( TXRoot, TXPath, DataPath, BlockStartOffset, BlockEndOffset, BlockRelativeOffset, ExpectedChunkSize, RequestOrigin) -> ChunkMetadata = #chunk_metadata{ tx_root = TXRoot, tx_path = TXPath, data_path = DataPath }, ValidateDataPathRuleset = ar_poa:get_data_path_validation_ruleset( BlockStartOffset, get_merkle_rebase_threshold()), AbsoluteEndOffset = BlockStartOffset + BlockRelativeOffset, ChunkProof = ar_poa:chunk_proof(ChunkMetadata, BlockStartOffset, BlockEndOffset, AbsoluteEndOffset, ValidateDataPathRuleset), {IsValid, ChunkProof2} = ar_poa:validate_paths(ChunkProof), case IsValid of true -> #chunk_proof{ chunk_id = ChunkID, chunk_start_offset = ChunkStartOffset, chunk_end_offset = ChunkEndOffset } = ChunkProof2, case ChunkEndOffset - ChunkStartOffset == ExpectedChunkSize of false -> log_chunk_error(RequestOrigin, failed_to_validate_data_path_offset, [{chunk_end_offset, ChunkEndOffset}, {chunk_start_offset, ChunkStartOffset}, {chunk_size, ExpectedChunkSize}]), false; true -> {true, ChunkID} end; false -> #chunk_proof{ tx_path_is_valid = TXPathIsValid, data_path_is_valid = DataPathIsValid } = ChunkProof2, case {TXPathIsValid, DataPathIsValid} of {invalid, _} -> log_chunk_error(RequestOrigin, failed_to_validate_tx_path, [{block_start_offset, BlockStartOffset}, {block_end_offset, BlockEndOffset}, {block_relative_offset, BlockRelativeOffset}]), false; {_, invalid} -> log_chunk_error(RequestOrigin, failed_to_validate_data_path, [{block_start_offset, BlockStartOffset}, {block_end_offset, BlockEndOffset}, {block_relative_offset, BlockRelativeOffset}]), false end end. validate_data_path(DataRoot, Offset, TXSize, DataPath, Chunk) -> Base = ar_merkle:validate_path(DataRoot, Offset, TXSize, DataPath, strict_borders_ruleset), Strict = ar_merkle:validate_path(DataRoot, Offset, TXSize, DataPath, strict_data_split_ruleset), Rebase = ar_merkle:validate_path(DataRoot, Offset, TXSize, DataPath, offset_rebase_support_ruleset), Result = case {Base, Strict, Rebase} of {false, false, false} -> false; {_, {_, _, _} = StrictResult, _} -> StrictResult; {_, _, {_, _, _} = RebaseResult} -> RebaseResult; {{_, _, _} = BaseResult, _, _} -> BaseResult end, case Result of false -> false; {ChunkID, StartOffset, EndOffset} -> case ar_tx:generate_chunk_id(Chunk) == ChunkID of false -> false; true -> case EndOffset - StartOffset == byte_size(Chunk) of true -> PassesBase = not (Base == false), PassesStrict = not (Strict == false), PassesRebase = not (Rebase == false), {true, PassesBase, PassesStrict, PassesRebase, EndOffset}; false -> false end end end. chunk_offsets_synced(_, _, _, _, N) when N == 0 -> true; chunk_offsets_synced(DataRootIndex, DataRootKey, ChunkOffset, TXStartOffset, N) -> case ar_sync_record:is_recorded(TXStartOffset + ChunkOffset, ar_data_sync) of {{true, _}, _StoreID} -> case TXStartOffset of 0 -> true; _ -> << DataRoot:32/binary, TXSize:?OFFSET_KEY_BITSIZE >> = DataRootKey, Key = data_root_key_v2(DataRoot, TXSize, TXStartOffset - 1), case ar_kv:get_prev(DataRootIndex, Key) of none -> true; {ok, << DataRoot:32/binary, TXSizeSize:8, TXSize:(TXSizeSize * 8), TXStartOffset2Size:8, TXStartOffset2:(TXStartOffset2Size * 8) >>, _} -> chunk_offsets_synced(DataRootIndex, DataRootKey, ChunkOffset, TXStartOffset2, N - 1); {ok, _, _} -> true; _ -> false end end; false -> false end. %% @doc Return a storage reference to the chunk proof (and possibly the chunk itself). get_chunk_data_key(DataPathHash) -> Timestamp = os:system_time(microsecond), << Timestamp:256, DataPathHash/binary >>. write_chunk(Offset, ChunkDataKey, Chunk, ChunkSize, DataPath, Packing, StoreID) -> case ar_tx_blacklist:is_byte_blacklisted(Offset) of true -> {ok, Packing}; false -> write_not_blacklisted_chunk(Offset, ChunkDataKey, Chunk, ChunkSize, DataPath, Packing, StoreID) end. write_not_blacklisted_chunk(Offset, ChunkDataKey, Chunk, ChunkSize, DataPath, Packing, StoreID) -> ShouldStoreInChunkStorage = ar_chunk_storage:is_storage_supported(Offset, ChunkSize, Packing), case {ShouldStoreInChunkStorage, is_binary(DataPath)} of {true, true} -> PaddedOffset = ar_block:get_chunk_padded_offset(Offset), case ar_chunk_storage:put(PaddedOffset, Chunk, Packing, StoreID) of {ok, NewPacking} -> case put_chunk_data(ChunkDataKey, StoreID, DataPath) of ok -> {ok, NewPacking}; Error -> Error end; Other -> Other end; {true, false} -> %% If ar_data_sync:write_chunk/7 is called directly without a DataPath, we %% should just update chunk storage without modifying chunk_data_db. This %% can happen, for example, durin grepack in place. PaddedOffset = ar_block:get_chunk_padded_offset(Offset), ar_chunk_storage:put(PaddedOffset, Chunk, Packing, StoreID); {false, true} -> case put_chunk_data(ChunkDataKey, StoreID, {Chunk, DataPath}) of ok -> prometheus_counter:inc(chunks_stored, [ ar_storage_module:packing_label(Packing), ar_storage_module:label(StoreID)]), {ok, Packing}; Error -> Error end; {false, false} -> %% For chunks which are only stored in chunk_data_db, we currently require that %% both the Chunk and the DataPath are present. {error, invalid_data_path} end. update_chunks_index(Args, UpdateFootprint, State) -> AbsoluteChunkOffset = element(1, Args), case ar_tx_blacklist:is_byte_blacklisted(AbsoluteChunkOffset) of true -> ok; false -> update_chunks_index2(Args, UpdateFootprint, State) end. update_chunks_index2(Args, UpdateFootprint, State) -> {AbsoluteEndOffset, Offset, ChunkDataKey, TXRoot, DataRoot, TXPath, ChunkSize, Packing} = Args, #sync_data_state{ store_id = StoreID } = State, Metadata = {ChunkDataKey, TXRoot, DataRoot, TXPath, Offset, ChunkSize}, case put_chunk_metadata(AbsoluteEndOffset, StoreID, Metadata) of ok -> StartOffset = ar_block:get_chunk_padded_offset(AbsoluteEndOffset - ChunkSize), PaddedOffset = ar_block:get_chunk_padded_offset(AbsoluteEndOffset), case ar_sync_record:add(PaddedOffset, StartOffset, Packing, ar_data_sync, StoreID) of ok -> case UpdateFootprint of true -> case ar_footprint_record:add(PaddedOffset, Packing, StoreID) of ok -> ok; {error, Reason} -> {error, Reason} end; false -> ok end; {error, Reason} -> {error, Reason} end; {error, Reason} -> {error, Reason} end. pick_missing_blocks([{H, WeaveSize, _} | CurrentBI], BlockTXPairs) -> {After, Before} = lists:splitwith(fun({BH, _}) -> BH /= H end, BlockTXPairs), case Before of [] -> pick_missing_blocks(CurrentBI, BlockTXPairs); _ -> {WeaveSize, lists:reverse(After)} end. process_invalid_fetched_chunk(Peer, Byte, State) -> %% Not necessarily a malicious peer, it might happen %% if the chunk is recent and from a different fork. process_invalid_fetched_chunk(Peer, Byte, State, got_invalid_proof_from_peer, []). process_invalid_fetched_chunk(Peer, Byte, State, Event, ExtraLogs) -> #sync_data_state{ weave_size = WeaveSize } = State, prometheus_counter:inc(sync_chunks_skipped, [Event]), ?LOG_WARNING([{event, skipping_synced_chunk}, {reason, Event}, {peer, ar_util:format_peer(Peer)}, {byte, Byte}, {weave_size, WeaveSize} | ExtraLogs]), {noreply, State}. process_valid_fetched_chunk(ChunkArgs, Args, State) -> #sync_data_state{ store_id = StoreID, disk_pool_threshold = DiskPoolThreshold } = State, {Packing, UnpackedChunk, AbsoluteEndOffset, TXRoot, ChunkSize} = ChunkArgs, {AbsoluteTXStartOffset, TXSize, DataPath, TXPath, DataRoot, Chunk, _ChunkID, ChunkEndOffset, Peer, Byte} = Args, case is_chunk_proof_ratio_attractive(ChunkSize, TXSize, DataPath) of false -> Reason = got_too_big_proof_from_peer, prometheus_counter:inc(sync_chunks_skipped, [Reason]), ?LOG_WARNING([{event, skipping_synced_chunk}, {reason, Reason}, {peer, ar_util:format_peer(Peer)}, {absolute_end_offset, AbsoluteEndOffset}, {store_id, StoreID}]), decrement_chunk_cache_size(), {noreply, State}; true -> case ar_sync_record:is_recorded(Byte + 1, ar_data_sync, StoreID) of {true, _} -> Reason = chunk_already_synced, prometheus_counter:inc(sync_chunks_skipped, [Reason]), ?LOG_DEBUG([{event, skipping_synced_chunk}, {reason, Reason}, {peer, ar_util:format_peer(Peer)}, {absolute_end_offset, AbsoluteEndOffset}, {store_id, StoreID}]), %% The chunk has been synced by another job already. decrement_chunk_cache_size(), {noreply, State}; false -> true = AbsoluteEndOffset == AbsoluteTXStartOffset + ChunkEndOffset, case AbsoluteEndOffset >= DiskPoolThreshold of true -> add_chunk_to_disk_pool( DataRoot, DataPath, UnpackedChunk, ChunkEndOffset - 1, TXSize), decrement_chunk_cache_size(), {noreply, State}; false -> pack_and_store_chunk({DataRoot, AbsoluteEndOffset, TXPath, TXRoot, DataPath, Packing, ChunkEndOffset, ChunkSize, Chunk, UnpackedChunk, none, none}, State) end end end. pack_and_store_chunk({_, AbsoluteEndOffset, _, _, _, _, _, _, _, _, _, _}, #sync_data_state{ store_id = StoreID, disk_pool_threshold = DiskPoolThreshold } = State) when AbsoluteEndOffset > DiskPoolThreshold -> %% We do not put data into storage modules unless it is well confirmed. Reason = chunk_is_above_disk_pool_threshold, prometheus_counter:inc(sync_chunks_skipped, [Reason]), ?LOG_DEBUG([{event, skipping_synced_chunk}, {reason, Reason}, {absolute_end_offset, AbsoluteEndOffset}, {store_id, StoreID}]), decrement_chunk_cache_size(), {noreply, State}; pack_and_store_chunk(Args, State) -> {DataRoot, AbsoluteEndOffset, TXPath, TXRoot, DataPath, Packing, Offset, ChunkSize, Chunk, UnpackedChunk, OriginStoreID, OriginChunkDataKey} = Args, #sync_data_state{ store_id = StoreID, packing_map = PackingMap } = State, RequiredPacking = get_required_chunk_packing(AbsoluteEndOffset, ChunkSize, State), PackingStatus = case {RequiredPacking, Packing} of {Packing, Packing} -> {ready, {Packing, Chunk}}; {DifferentPacking, _} -> {need_packing, DifferentPacking} end, case PackingStatus of {ready, {StoredPacking, StoredChunk}} -> ChunkArgs = {StoredPacking, StoredChunk, AbsoluteEndOffset, TXRoot, ChunkSize}, {noreply, store_chunk(ChunkArgs, {StoredPacking, DataPath, Offset, DataRoot, TXPath, OriginStoreID, OriginChunkDataKey}, State)}; {need_packing, RequiredPacking} -> case maps:is_key({AbsoluteEndOffset, RequiredPacking}, PackingMap) of true -> Reason = chunk_already_being_packed, prometheus_counter:inc(sync_chunks_skipped, [Reason]), ?LOG_DEBUG([{event, skipping_synced_chunk}, {reason, Reason}, {absolute_end_offset, AbsoluteEndOffset}, {store_id, StoreID}]), decrement_chunk_cache_size(), {noreply, State}; false -> case ar_packing_server:is_buffer_full() of true -> ar_util:cast_after(1000, self(), {pack_and_store_chunk, Args}), {noreply, State}; false -> {Packing2, Chunk2} = case UnpackedChunk of none -> {Packing, Chunk}; _ -> {unpacked, UnpackedChunk} end, ar_packing_server:request_repack({AbsoluteEndOffset, RequiredPacking}, {RequiredPacking, Packing2, Chunk2, AbsoluteEndOffset, TXRoot, ChunkSize}), PackingArgs = {pack_chunk, {RequiredPacking, DataPath, Offset, DataRoot, TXPath, OriginStoreID, OriginChunkDataKey}}, {noreply, State#sync_data_state{ packing_map = PackingMap#{ {AbsoluteEndOffset, RequiredPacking} => PackingArgs }}} end end end. process_store_chunk_queue(#sync_data_state{ store_chunk_queue_len = StartLen } = State) -> process_store_chunk_queue(State, StartLen). process_store_chunk_queue(#sync_data_state{ store_chunk_queue_len = 0 } = State, _StartLen) -> State; process_store_chunk_queue(State, StartLen) -> #sync_data_state{ store_chunk_queue = Q, store_chunk_queue_len = Len, store_chunk_queue_threshold = Threshold } = State, Timestamp = element(2, gb_sets:smallest(Q)), Now = os:system_time(millisecond), Threshold2 = case Threshold < ?STORE_CHUNK_QUEUE_FLUSH_SIZE_THRESHOLD of true -> Threshold; false -> case Len > Threshold of true -> 0; false -> Threshold end end, case Len > Threshold2 orelse Now - Timestamp > ?STORE_CHUNK_QUEUE_FLUSH_TIME_THRESHOLD of true -> {{_Offset, _Timestamp, _Ref, ChunkArgs, Args}, Q2} = gb_sets:take_smallest(Q), store_chunk2(ChunkArgs, Args, State), decrement_chunk_cache_size(), State2 = State#sync_data_state{ store_chunk_queue = Q2, store_chunk_queue_len = Len - 1, store_chunk_queue_threshold = min(Threshold2 + 1, ?STORE_CHUNK_QUEUE_FLUSH_SIZE_THRESHOLD) }, process_store_chunk_queue(State2, StartLen); false -> State end. store_chunk(ChunkArgs, Args, State) -> %% Let at least N chunks stack up, then write them in the ascending order, %% to reduce out-of-order disk writes causing fragmentation. #sync_data_state{ store_chunk_queue = Q, store_chunk_queue_len = Len } = State, Now = os:system_time(millisecond), Offset = element(3, ChunkArgs), Q2 = gb_sets:add_element({Offset, Now, make_ref(), ChunkArgs, Args}, Q), State2 = State#sync_data_state{ store_chunk_queue = Q2, store_chunk_queue_len = Len + 1 }, process_store_chunk_queue(State2). store_chunk2(ChunkArgs, Args, State) -> #sync_data_state{ store_id = StoreID } = State, {Packing, Chunk, AbsoluteEndOffset, TXRoot, ChunkSize} = ChunkArgs, {_Packing, DataPath, Offset, DataRoot, TXPath, OriginStoreID, OriginChunkDataKey} = Args, PaddedOffset = ar_block:get_chunk_padded_offset(AbsoluteEndOffset), StartOffset = ar_block:get_chunk_padded_offset(AbsoluteEndOffset - ChunkSize), %% This will fail if DataPath is not a string - which is fine as it serves as a sanity %% check that store_chunk2 is called with valid arguments. DataPathHash = crypto:hash(sha256, DataPath), ShouldStoreInChunkStorage = ar_chunk_storage:is_storage_supported(AbsoluteEndOffset, ChunkSize, Packing), CleanRecord = case {ShouldStoreInChunkStorage, ar_storage_module:get_packing(StoreID)} of {true, {replica_2_9, _}} -> %% The 2.9 chunk storage is write-once. ok; _ -> case ar_footprint_record:delete(PaddedOffset, StoreID) of ok -> ar_sync_record:delete(PaddedOffset, StartOffset, ar_data_sync, StoreID); Error -> Error end end, case CleanRecord of {error, Reason} -> log_failed_to_store_chunk(Reason, AbsoluteEndOffset, Offset, DataRoot, DataPathHash, StoreID), {error, Reason}; ok -> ChunkDataKey = case StoreID == OriginStoreID of true -> OriginChunkDataKey; _ -> get_chunk_data_key(DataPathHash) end, StoreIndex = case write_chunk(AbsoluteEndOffset, ChunkDataKey, Chunk, ChunkSize, DataPath, Packing, StoreID) of {ok, NewPacking} -> {true, NewPacking}; Error2 -> Error2 end, ProcessAlreadyStored = case StoreIndex of already_stored -> case ar_sync_record:is_recorded(PaddedOffset, Packing, ar_data_sync, StoreID) of false -> invalidate_bad_data_record({AbsoluteEndOffset, ChunkSize, StoreID, chunk_already_stored_but_not_in_sync_record}); true -> case ar_footprint_record:is_recorded(PaddedOffset, StoreID) of false -> %% Repair the broken footprint record. ar_footprint_record:add(PaddedOffset, Packing, StoreID); true -> ok end end, already_stored; Else -> Else end, case ProcessAlreadyStored of {true, Packing2} -> UpdateFootprintRecord = is_footprint_record_supported(AbsoluteEndOffset, ChunkSize, Packing2), case update_chunks_index({AbsoluteEndOffset, Offset, ChunkDataKey, TXRoot, DataRoot, TXPath, ChunkSize, Packing2}, UpdateFootprintRecord, State) of ok -> ok; {error, Reason} -> log_failed_to_store_chunk(Reason, AbsoluteEndOffset, Offset, DataRoot, DataPathHash, StoreID), {error, Reason} end; {error, Reason} -> log_failed_to_store_chunk(Reason, AbsoluteEndOffset, Offset, DataRoot, DataPathHash, StoreID), {error, Reason} end end. log_failed_to_store_chunk(already_stored, AbsoluteEndOffset, Offset, DataRoot, DataPathHash, StoreID) -> ?LOG_INFO([{event, chunk_already_stored}, {absolute_end_offset, AbsoluteEndOffset}, {relative_offset, Offset}, {data_path_hash, ar_util:safe_encode(DataPathHash)}, {data_root, ar_util:safe_encode(DataRoot)}, {store_id, StoreID}]); log_failed_to_store_chunk(not_prepared_yet, AbsoluteEndOffset, Offset, DataRoot, DataPathHash, StoreID) -> ?LOG_WARNING([{event, chunk_not_prepared_yet}, {absolute_end_offset, AbsoluteEndOffset}, {relative_offset, Offset}, {data_path_hash, ar_util:safe_encode(DataPathHash)}, {data_root, ar_util:safe_encode(DataRoot)}, {store_id, StoreID}]); log_failed_to_store_chunk(Reason, AbsoluteEndOffset, Offset, DataRoot, DataPathHash, StoreID) -> ?LOG_ERROR([{event, failed_to_store_chunk}, {reason, io_lib:format("~p", [Reason])}, {absolute_end_offset, AbsoluteEndOffset}, {relative_offset, Offset}, {data_path_hash, ar_util:safe_encode(DataPathHash)}, {data_root, ar_util:safe_encode(DataRoot)}, {store_id, StoreID}]). get_required_chunk_packing(_Offset, _ChunkSize, #sync_data_state{ store_id = ?DEFAULT_MODULE }) -> unpacked; get_required_chunk_packing(Offset, ChunkSize, State) -> #sync_data_state{ store_id = StoreID } = State, IsEarlySmallChunk = Offset =< ar_block:strict_data_split_threshold() andalso ChunkSize < ?DATA_CHUNK_SIZE, case IsEarlySmallChunk of true -> unpacked; false -> case ar_storage_module:get_packing(StoreID) of {replica_2_9, _Addr} -> unpacked_padded; Packing -> Packing end end. process_disk_pool_item(State, Key, Value) -> #sync_data_state{ disk_pool_chunks_index = DiskPoolChunksIndex, data_root_index = DataRootIndex, store_id = StoreID } = State, prometheus_counter:inc(disk_pool_processed_chunks), << Timestamp:256, DataPathHash/binary >> = Key, DiskPoolChunk = parse_disk_pool_chunk(Value), {Offset, ChunkSize, DataRoot, TXSize, ChunkDataKey, PassedBaseValidation, PassedStrictValidation, PassedRebaseValidation} = DiskPoolChunk, DataRootKey = << DataRoot:32/binary, TXSize:?OFFSET_KEY_BITSIZE >>, InDataRootIndex = get_data_root_offset(DataRootKey, StoreID), InDiskPool = ets:member(ar_disk_pool_data_roots, DataRootKey), case {InDataRootIndex, InDiskPool} of {not_found, true} -> %% Increment the timestamp by one (microsecond), so that the new cursor is %% a prefix of the first key of the next data root. We want to quickly skip %% all chunks belonging to the same data root because the data root is not %% yet on chain. NextCursor = {seek, << (Timestamp + 1):256 >>}, gen_server:cast(self(), process_disk_pool_item), {noreply, State#sync_data_state{ disk_pool_cursor = NextCursor }}; {not_found, false} -> %% The chunk was either orphaned or never made it to the chain. case ets:member(ar_data_sync_state, move_data_root_index_migration_complete) of true -> ok = ar_kv:delete(DiskPoolChunksIndex, Key), ok = delete_chunk_data(ChunkDataKey, StoreID), decrease_occupied_disk_pool_size(ChunkSize, DataRootKey); false -> %% Do not remove the chunk from the disk pool until the data root index %% migration is complete, because the data root might still exist in the %% old index. ok end, NextCursor = << Key/binary, <<"a">>/binary >>, gen_server:cast(self(), process_disk_pool_item), State2 = maybe_reset_disk_pool_full_scan_key(Key, State), {noreply, State2#sync_data_state{ disk_pool_cursor = NextCursor }}; {{ok, {TXStartOffset, _TXPath}}, _} -> DataRootIndexIterator = data_root_index_iterator_v2(DataRootKey, TXStartOffset + 1, DataRootIndex), NextCursor = << Key/binary, <<"a">>/binary >>, State2 = State#sync_data_state{ disk_pool_cursor = NextCursor }, Args = {Offset, InDiskPool, ChunkSize, DataRoot, DataPathHash, ChunkDataKey, Key, PassedBaseValidation, PassedStrictValidation, PassedRebaseValidation}, gen_server:cast(self(), {process_disk_pool_chunk_offsets, DataRootIndexIterator, true, Args}), {noreply, State2} end. decrease_occupied_disk_pool_size(Size, DataRootKey) -> ets:update_counter(ar_data_sync_state, disk_pool_size, {2, -Size}), prometheus_gauge:dec(pending_chunks_size, Size), case ets:lookup(ar_disk_pool_data_roots, DataRootKey) of [] -> ok; [{_, {Size2, Timestamp, TXIDSet}}] -> ets:insert(ar_disk_pool_data_roots, {DataRootKey, {Size2 - Size, Timestamp, TXIDSet}}), ok end. maybe_reset_disk_pool_full_scan_key(Key, #sync_data_state{ disk_pool_full_scan_start_key = Key } = State) -> State#sync_data_state{ disk_pool_full_scan_start_key = none }; maybe_reset_disk_pool_full_scan_key(_Key, State) -> State. parse_disk_pool_chunk(Bin) -> case binary_to_term(Bin, [safe]) of {Offset, ChunkSize, DataRoot, TXSize, ChunkDataKey} -> {Offset, ChunkSize, DataRoot, TXSize, ChunkDataKey, true, false, false}; {Offset, ChunkSize, DataRoot, TXSize, ChunkDataKey, PassesStrict} -> {Offset, ChunkSize, DataRoot, TXSize, ChunkDataKey, true, PassesStrict, false}; R -> R end. delete_disk_pool_chunk(Iterator, Args, State) -> #sync_data_state{ disk_pool_chunks_index = DiskPoolChunksIndex, store_id = StoreID } = State, {Offset, _, ChunkSize, _, _, ChunkDataKey, DiskPoolKey, _, _, _} = Args, case data_root_index_next_v2(Iterator, 10) of none -> ok = ar_kv:delete(DiskPoolChunksIndex, DiskPoolKey), ok = delete_chunk_data(ChunkDataKey, StoreID), DataRootKey = data_root_index_get_key(Iterator), decrease_occupied_disk_pool_size(ChunkSize, DataRootKey); {TXArgs, Iterator2} -> {TXStartOffset, _TXRoot, _TXPath} = TXArgs, AbsoluteEndOffset = TXStartOffset + Offset, case get_chunk_metadata(AbsoluteEndOffset, StoreID) of not_found -> ok; {ok, ChunkArgs} -> case element(1, ChunkArgs) of ChunkDataKey -> PaddedOffset = ar_block:get_chunk_padded_offset(AbsoluteEndOffset), StartOffset = ar_block:get_chunk_padded_offset( AbsoluteEndOffset - ChunkSize), ok = ar_footprint_record:delete(PaddedOffset, StoreID), ok = ar_sync_record:delete(PaddedOffset, StartOffset, ar_data_sync, StoreID), case ar_sync_record:is_recorded(PaddedOffset, ar_data_sync) of false -> ar_events:send(sync_record, {global_remove_range, StartOffset, PaddedOffset}); {{true, {replica_2_9, _}}, _StoreID} -> %% Replica 2.9 data is recorded in the footprint record. ar_events:send(sync_record, {global_remove_range, StartOffset, PaddedOffset}); _ -> ok end, ok = delete_chunk_metadata(AbsoluteEndOffset, StoreID); _ -> %% The entry has been written by the 2.5 version thus has %% a different key. We do not want to remove chunks from %% the existing 2.5 dataset. ok end end, delete_disk_pool_chunk(Iterator2, Args, State) end. register_currently_processed_disk_pool_key(Key, State) -> #sync_data_state{ currently_processed_disk_pool_keys = Keys } = State, Keys2 = sets:add_element(Key, Keys), State#sync_data_state{ currently_processed_disk_pool_keys = Keys2 }. deregister_currently_processed_disk_pool_key(Key, State) -> #sync_data_state{ currently_processed_disk_pool_keys = Keys } = State, Keys2 = sets:del_element(Key, Keys), State#sync_data_state{ currently_processed_disk_pool_keys = Keys2 }. get_merkle_rebase_threshold() -> case ets:lookup(node_state, merkle_rebase_support_threshold) of [] -> infinity; [{_, Threshold}] -> Threshold end. process_disk_pool_chunk_offset(Iterator, TXRoot, TXPath, AbsoluteEndOffset, MayConclude, Args, State) -> #sync_data_state{ disk_pool_threshold = DiskPoolThreshold } = State, {Offset, _, _, DataRoot, DataPathHash, _, _, PassedBase, PassedStrictValidation, PassedRebaseValidation} = Args, PassedValidation = case {AbsoluteEndOffset >= get_merkle_rebase_threshold(), AbsoluteEndOffset >= ar_block:strict_data_split_threshold(), PassedBase, PassedStrictValidation, PassedRebaseValidation} of %% At the rebase threshold we relax some of the validation rules so the strict %% validation may fail. {true, true, _, _, true} -> true; %% Between the "strict" and "rebase" thresholds the "base" and "strict split" %% rules must be followed. {false, true, true, true, _} -> true; %% Before the strict threshold only the base (most relaxed) validation must %% pass. {false, false, true, _, _} -> true; _ -> false end, case PassedValidation of false -> %% When we accept chunks into the disk pool, we do not know where they will %% end up on the weave. Therefore, we cannot require all Merkle proofs pass %% the strict validation rules taking effect only after %% ar_block:strict_data_split_threshold() or allow the merkle tree offset rebases %% supported after the yet another special weave threshold. %% Instead we note down whether the chunk passes the strict and rebase validations %% and take it into account here where the chunk is associated with a global weave %% offset. ?LOG_INFO([{event, disk_pool_chunk_from_bad_split}, {absolute_end_offset, AbsoluteEndOffset}, {merkle_rebase_threshold, get_merkle_rebase_threshold()}, {strict_data_split_threshold, ar_block:strict_data_split_threshold()}, {passed_base, PassedBase}, {passed_strict, PassedStrictValidation}, {passed_rebase, PassedRebaseValidation}, {relative_offset, Offset}, {data_path_hash, ar_util:encode(DataPathHash)}, {data_root, ar_util:encode(DataRoot)}]), gen_server:cast(self(), {process_disk_pool_chunk_offsets, Iterator, MayConclude, Args}), {noreply, State}; true -> case AbsoluteEndOffset > DiskPoolThreshold of true -> process_disk_pool_immature_chunk_offset(Iterator, TXRoot, TXPath, AbsoluteEndOffset, Args, State); false -> process_disk_pool_matured_chunk_offset(Iterator, TXRoot, TXPath, AbsoluteEndOffset, MayConclude, Args, State) end end. process_disk_pool_immature_chunk_offset(Iterator, TXRoot, TXPath, AbsoluteEndOffset, Args, State) -> #sync_data_state{ store_id = StoreID } = State, case ar_sync_record:is_recorded(AbsoluteEndOffset, ar_data_sync, StoreID) of {true, unpacked} -> %% Pass MayConclude as false because we have encountered an offset %% above the disk pool threshold => we need to keep the chunk in the %% disk pool for now and not pack and move to the offset-based storage. %% The motivation is to keep chain reorganisations cheap. gen_server:cast(self(), {process_disk_pool_chunk_offsets, Iterator, false, Args}), {noreply, State}; false -> {Offset, _, ChunkSize, DataRoot, DataPathHash, ChunkDataKey, Key, _, _, _} = Args, case update_chunks_index({AbsoluteEndOffset, Offset, ChunkDataKey, TXRoot, DataRoot, TXPath, ChunkSize, unpacked}, false, State) of ok -> gen_server:cast(self(), {process_disk_pool_chunk_offsets, Iterator, false, Args}), {noreply, State}; {error, Reason} -> ?LOG_WARNING([{event, failed_to_index_disk_pool_chunk}, {reason, io_lib:format("~p", [Reason])}, {data_path_hash, ar_util:encode(DataPathHash)}, {data_root, ar_util:encode(DataRoot)}, {absolute_end_offset, AbsoluteEndOffset}, {relative_offset, Offset}, {chunk_data_key, ar_util:encode(element(5, Args))}]), gen_server:cast(self(), process_disk_pool_item), {noreply, deregister_currently_processed_disk_pool_key(Key, State)} end end. process_disk_pool_matured_chunk_offset(Iterator, TXRoot, TXPath, AbsoluteEndOffset, MayConclude, Args, State) -> %% The chunk has received a decent number of confirmations so we put it in storage %% module(s). If we have no storage modules configured covering this offset, proceed to %% the next offset. If there are several suitable storage modules, send the chunk %% to those modules who have not have it synced yet. #sync_data_state{ store_id = DefaultStoreID } = State, {Offset, _, ChunkSize, DataRoot, DataPathHash, ChunkDataKey, Key, _PassedBaseValidation, _PassedStrictValidation, _PassedRebaseValidation} = Args, FindStorageModules = case ar_storage_module:get_all(AbsoluteEndOffset - ChunkSize, AbsoluteEndOffset) of [] -> gen_server:cast(self(), {process_disk_pool_chunk_offsets, Iterator, MayConclude, Args}), {noreply, State}; Modules -> [ar_storage_module:id(Module) || Module <- Modules] end, IsBlacklisted = case FindStorageModules of {noreply, State2} -> {noreply, State2}; StoreIDs -> case ar_tx_blacklist:is_byte_blacklisted(AbsoluteEndOffset) of true -> gen_server:cast(self(), {process_disk_pool_chunk_offsets, Iterator, MayConclude, Args}), {noreply, remove_recently_processed_disk_pool_offset(AbsoluteEndOffset, ChunkDataKey, State)}; false -> StoreIDs end end, IsSynced = case IsBlacklisted of {noreply, State3} -> {noreply, State3}; StoreIDs2 -> case filter_storage_modules_by_synced_offset(AbsoluteEndOffset, StoreIDs2) of [] -> gen_server:cast(self(), {process_disk_pool_chunk_offsets, Iterator, MayConclude, Args}), {noreply, remove_recently_processed_disk_pool_offset(AbsoluteEndOffset, ChunkDataKey, State)}; StoreIDs3 -> StoreIDs3 end end, IsProcessed = case IsSynced of {noreply, State4} -> {noreply, State4}; StoreIDs4 -> case is_recently_processed_offset(AbsoluteEndOffset, ChunkDataKey, State) of true -> gen_server:cast(self(), {process_disk_pool_chunk_offsets, Iterator, false, Args}), {noreply, State}; false -> StoreIDs4 end end, IsChunkCacheFull = case IsProcessed of {noreply, State5} -> {noreply, State5}; StoreIDs5 -> case is_chunk_cache_full() of true -> gen_server:cast(self(), {process_disk_pool_chunk_offsets, Iterator, false, Args}), {noreply, State}; false -> StoreIDs5 end end, case IsChunkCacheFull of {noreply, State6} -> {noreply, State6}; StoreIDs6 -> case read_chunk(AbsoluteEndOffset, ChunkDataKey, DefaultStoreID) of not_found -> ?LOG_ERROR([{event, disk_pool_chunk_not_found}, {data_path_hash, ar_util:encode(DataPathHash)}, {data_root, ar_util:encode(DataRoot)}, {absolute_end_offset, AbsoluteEndOffset}, {relative_offset, Offset}, {chunk_data_key, ar_util:encode(element(5, Args))}]), gen_server:cast(self(), {process_disk_pool_chunk_offsets, Iterator, MayConclude, Args}), {noreply, State}; {error, Reason2} -> ?LOG_ERROR([{event, failed_to_read_disk_pool_chunk}, {reason, io_lib:format("~p", [Reason2])}, {data_path_hash, ar_util:encode(DataPathHash)}, {data_root, ar_util:encode(DataRoot)}, {absolute_end_offset, AbsoluteEndOffset}, {relative_offset, Offset}, {chunk_data_key, ar_util:encode(element(5, Args))}]), gen_server:cast(self(), process_disk_pool_item), {noreply, deregister_currently_processed_disk_pool_key(Key, State)}; {ok, {Chunk, DataPath}} -> increment_chunk_cache_size(), Args2 = {DataRoot, AbsoluteEndOffset, TXPath, TXRoot, DataPath, unpacked, Offset, ChunkSize, Chunk, Chunk, none, none}, [gen_server:cast(name(StoreID6), {pack_and_store_chunk, Args2}) || StoreID6 <- StoreIDs6], gen_server:cast(self(), {process_disk_pool_chunk_offsets, Iterator, false, Args}), {noreply, cache_recently_processed_offset(AbsoluteEndOffset, ChunkDataKey, State)} end end. remove_recently_processed_disk_pool_offset(Offset, ChunkDataKey, State) -> #sync_data_state{ recently_processed_disk_pool_offsets = Map } = State, case maps:get(Offset, Map, not_found) of not_found -> State; Set -> Set2 = sets:del_element(ChunkDataKey, Set), Map2 = case sets:is_empty(Set2) of true -> maps:remove(Offset, Map); false -> maps:put(Offset, Set2, Map) end, State#sync_data_state{ recently_processed_disk_pool_offsets = Map2 } end. is_recently_processed_offset(Offset, ChunkDataKey, State) -> #sync_data_state{ recently_processed_disk_pool_offsets = Map } = State, Set = maps:get(Offset, Map, sets:new()), sets:is_element(ChunkDataKey, Set). cache_recently_processed_offset(Offset, ChunkDataKey, State) -> #sync_data_state{ recently_processed_disk_pool_offsets = Map } = State, Set = maps:get(Offset, Map, sets:new()), Map2 = case sets:is_element(ChunkDataKey, Set) of false -> ar_util:cast_after(?CACHE_RECENTLY_PROCESSED_DISK_POOL_OFFSET_LIFETIME_MS, self(), {remove_recently_processed_disk_pool_offset, Offset, ChunkDataKey}), maps:put(Offset, sets:add_element(ChunkDataKey, Set), Map); true -> Map end, State#sync_data_state{ recently_processed_disk_pool_offsets = Map2 }. filter_storage_modules_by_synced_offset(AbsoluteEndOffset, [StoreID | StoreIDs]) -> case ar_sync_record:is_recorded(AbsoluteEndOffset, ar_data_sync, StoreID) of {true, _Packing} -> filter_storage_modules_by_synced_offset(AbsoluteEndOffset, StoreIDs); false -> [StoreID | filter_storage_modules_by_synced_offset(AbsoluteEndOffset, StoreIDs)] end; filter_storage_modules_by_synced_offset(_, []) -> []. process_unpacked_chunk(ChunkArgs, Args, State) -> {_AbsoluteTXStartOffset, _TXSize, _DataPath, _TXPath, _DataRoot, _Chunk, ChunkID, _ChunkEndOffset, Peer, Byte} = Args, {_Packing, Chunk, _AbsoluteEndOffset, _TXRoot, ChunkSize} = ChunkArgs, case validate_chunk_id_size(Chunk, ChunkID, ChunkSize) of false -> decrement_chunk_cache_size(), process_invalid_fetched_chunk(Peer, Byte, State); true -> process_valid_fetched_chunk(ChunkArgs, Args, State) end. validate_chunk_id_size(Chunk, ChunkID, ChunkSize) -> case ar_tx:generate_chunk_id(Chunk) == ChunkID of false -> false; true -> ChunkSize == byte_size(Chunk) end. log_sufficient_disk_space(StoreID) -> ar:console("~nThe node has detected available disk space and resumed syncing data " "into the storage module ~s.~n", [StoreID]), ?LOG_INFO([{event, storage_module_resumed_syncing}, {storage_module, StoreID}]). log_insufficient_disk_space(StoreID) -> ar:console("~nThe node has stopped syncing data into the storage module ~s due to " "the insufficient disk space.~n", [StoreID]), ?LOG_INFO([{event, storage_module_stopped_syncing}, {reason, insufficient_disk_space}, {storage_module, StoreID}]). data_root_index_iterator_v2(DataRootKey, TXStartOffset, DataRootIndex) -> {DataRootKey, TXStartOffset, TXStartOffset, DataRootIndex, 1}. data_root_index_next_v2({_, _, _, _, Count}, Limit) when Count > Limit -> none; data_root_index_next_v2({_, 0, _, _, _}, _Limit) -> none; data_root_index_next_v2(Args, _Limit) -> {DataRootKey, TXStartOffset, LatestTXStartOffset, DataRootIndex, Count} = Args, << DataRoot:32/binary, TXSize:?OFFSET_KEY_BITSIZE >> = DataRootKey, Key = data_root_key_v2(DataRoot, TXSize, TXStartOffset - 1), case ar_kv:get_prev(DataRootIndex, Key) of none -> none; {ok, << DataRoot:32/binary, TXSizeSize:8, TXSize:(TXSizeSize * 8), TXStartOffset2Size:8, TXStartOffset2:(TXStartOffset2Size * 8) >>, TXPath} -> {ok, TXRoot} = ar_merkle:extract_root(TXPath), {{TXStartOffset2, TXRoot, TXPath}, {DataRootKey, TXStartOffset2, LatestTXStartOffset, DataRootIndex, Count + 1}}; {ok, _, _} -> none end. data_root_index_reset({DataRootKey, _, TXStartOffset, DataRootIndex, _}) -> {DataRootKey, TXStartOffset, TXStartOffset, DataRootIndex, 1}. data_root_index_get_key(Iterator) -> element(1, Iterator). data_root_index_iterator(TXRootMap) -> {maps:fold( fun(TXRoot, Map, Acc) -> maps:fold( fun(Offset, TXPath, Acc2) -> gb_sets:insert({Offset, TXRoot, TXPath}, Acc2) end, Acc, Map ) end, gb_sets:new(), TXRootMap ), 0}. data_root_index_next({_Index, Count}, Limit) when Count >= Limit -> none; data_root_index_next({Index, Count}, _Limit) -> case gb_sets:is_empty(Index) of true -> none; false -> {Element, Index2} = gb_sets:take_largest(Index), {Element, {Index2, Count + 1}} end. record_chunk_cache_size_metric() -> case ets:lookup(ar_data_sync_state, chunk_cache_size) of [{_, Size}] -> prometheus_gauge:set(chunk_cache_size, Size); _ -> ok end. %% @doc Get data roots for a given offset (>= BlockStartOffset, < BlockEndOffset) from local indices. %% Return only entries corresponding to non-empty transactions. %% Return the complete list of entries in the order they appear in the data root index, %% which corresponds to sorted #tx records in the block. %% Return {ok, {TXRoot, BlockSize, [{DataRoot, TXSize, TXStartOffset, TXPath}, ...]}} %% or {error, Reason}. get_data_roots_for_offset(Offset) -> case Offset >= get_disk_pool_threshold() of true -> {error, not_found}; false -> {BlockStart, BlockEnd, TXRoot} = ar_block_index:get_block_bounds(Offset), true = Offset >= BlockStart andalso Offset < BlockEnd, StoreID = ?DEFAULT_MODULE, DB = {data_root_offset_index, StoreID}, case ar_kv:get(DB, << BlockStart:?OFFSET_KEY_BITSIZE >>) of not_found -> {error, not_found}; {ok, Bin} -> {TXRoot2, BlockSize, DataRootIndexKeySet} = binary_to_term(Bin), true = TXRoot2 == TXRoot, {ok, {TXRoot, BlockSize, lists:sort( fun({_DataRoot1, _TXSize1, TXStart1, _TXPath1}, {_DataRoot2, _TXSize2, TXStart2, _TXPath2}) -> TXStart1 < TXStart2 end, sets:fold( fun(<< DataRoot:32/binary, TXSize:?OFFSET_KEY_BITSIZE >>, Acc) -> read_data_root_entries(DataRoot, TXSize, BlockStart, BlockEnd, StoreID, Acc) end, [], DataRootIndexKeySet ))}} end end. %% @doc Return true if the data roots for the given block range are synced, false otherwise. %% Assert the given BlockEnd and TXRoot match the stored values. are_data_roots_synced(BlockStart, BlockEnd, TXRoot) -> DB = {data_root_offset_index, ?DEFAULT_MODULE}, case ar_kv:get(DB, << BlockStart:?OFFSET_KEY_BITSIZE >>) of not_found -> false; {ok, Bin} -> {TXRoot2, BlockSize, _DataRootIndexKeySet} = binary_to_term(Bin), true = TXRoot2 == TXRoot, true = BlockSize == BlockEnd - BlockStart, true end. read_data_root_entries(_DataRoot, _TXSize, _BlockStart, 0, _StoreID, Acc) -> Acc; read_data_root_entries(DataRoot, TXSize, BlockStart, Cursor, StoreID, Acc) -> Key = data_root_key_v2(DataRoot, TXSize, Cursor - 1), case ar_kv:get_prev({data_root_index, StoreID}, Key) of {ok, << DataRoot:32/binary, TXSizeSize:8, TXSize:(TXSizeSize * 8), TXStartSize:8, TXStart:(TXStartSize * 8) >>, TXPath} when TXStart >= BlockStart -> [{DataRoot, TXSize, TXStart, TXPath} | read_data_root_entries(DataRoot, TXSize, BlockStart, TXStart, StoreID, Acc)]; {ok, _, _} -> Acc; none -> Acc end. maybe_run_footprint_record_initialization(State) -> #sync_data_state{ store_id = StoreID } = State, Packing = ar_storage_module:get_packing(StoreID), {FootprintRecordCursor, InitializationComplete} = get_footprint_record_initialization_state(State), case InitializationComplete of true -> ok; false -> ?LOG_INFO([{event, initializing_footprint_record}, {cursor, FootprintRecordCursor}, {store_id, StoreID}, {packing, ar_serialize:encode_packing(Packing, false)}]), gen_server:cast(self(), {initialize_footprint_record, FootprintRecordCursor, Packing}) end. get_footprint_record_initialization_state(State) -> #sync_data_state{ migrations_index = MI } = State, case ar_kv:get(MI, ?FOOTPRINT_MIGRATION_CURSOR_KEY) of not_found -> {0, false}; {ok, <<"complete">>} -> {complete, true}; {ok, CursorBin} -> Cursor = binary:decode_unsigned(CursorBin), {Cursor, false} end. %% @doc Initialize the footprint record from the ar_data_sync record. %% We don't filter by packing to ensure all synced intervals are migrated. initialize_footprint_record(complete, _Packing, State) -> State; initialize_footprint_record(Cursor, Packing, State) -> #sync_data_state{ store_id = StoreID, range_end = RangeEnd, migrations_index = MI } = State, BatchSize = ?FOOTPRINT_MIGRATION_BATCH_SIZE, case ar_sync_record:get_next_synced_interval(Cursor, RangeEnd, ar_data_sync, StoreID) of not_found -> ok = ar_kv:put(MI, ?FOOTPRINT_MIGRATION_CURSOR_KEY, <<"complete">>), ?LOG_INFO([{event, footprint_record_initialized}, {store_id, StoreID}]), State; {IntervalEnd, IntervalStart} -> Cursor2 = max(Cursor, IntervalStart), EndPosition = min(Cursor2 + (BatchSize * ?DATA_CHUNK_SIZE), IntervalEnd), initialize_footprint_range(Cursor2, EndPosition, Packing, StoreID), NewCursor = EndPosition, ok = ar_kv:put(MI, ?FOOTPRINT_MIGRATION_CURSOR_KEY, binary:encode_unsigned(NewCursor)), ar_util:cast_after(1_000, self(), {initialize_footprint_record, NewCursor, Packing}), State end. %% @doc Migrate chunks in the given range to footprint records. initialize_footprint_range(Start, End, _Packing, _StoreID) when Start >= End -> ok; initialize_footprint_range(Start, End, Packing, StoreID) -> ar_footprint_record:add(Start + 1, Packing, StoreID), initialize_footprint_range(Start + ?DATA_CHUNK_SIZE, End, Packing, StoreID). ================================================ FILE: apps/arweave/src/ar_data_sync_coordinator.erl ================================================ %%% @doc Coordinates data sync tasks between worker processes and peer workers. %%% %%% This module acts as a coordinator that: %%% - Dispatches sync tasks to ar_data_sync_worker processes %%% - Coordinates with ar_peer_worker processes (one per peer) that manage: %%% - Peer task queues and dispatch limits %%% - Footprint management (grouping tasks to limit entropy cache usage) %%% - Peer performance tracking %%% - Performs periodic rebalancing based on peer performance metrics %%% %%% Architecture: %%% - Each peer has its own ar_peer_worker process that manages peer-specific state %%% (queues, footprints, dispatch limits, performance metrics) %%% - This coordinator manages the pool of ar_data_sync_worker processes and %%% dispatches tasks from peer queues to available workers %%% - Worker selection uses round-robin with load balancing %%% %%% Task Flow: %%% 1. Tasks are enqueued to the appropriate ar_peer_worker %%% 2. Peer workers store those tasks in either %%% - their task_queue (ready for dispatch) if they belong to an active footprint %%% - or in a waiting queue (not ready for dispatch) if they don't belong to an %%% active footprint %%% 3. Periodically the coordinator pulls tasks from peer queues and dispatches to workers. %%% This is event based and happens in response to one of these events: %%% - a new task is sent to the coordinator %%% - a task is completed by an ar_data_sync_worker %%% 4. On task completion, peer workers update metrics and notify coordinator. %%% 5. When a footprint completes, a new footprint is activated. Footprint activation is %%% handled both by the ar_peer_worker (if it has waiting tasks) or by the coordinator %%% (if the ar_peer_worker does not have waiting tasks, coordinator will find another %%% peer that does). Note: footprint activation does not immediately dispatch tasks. %%% %%% Tasks can be in one of three states: %%% - waiting: the task belongs to an inactive footprint and is stored in a %%% "waiting" queue on the ar_peer_worker. A task in the "waiting" %%% state contributes to the total_queued_count, but can not be dispatched %%% until its footprint becomes active. %%% - queued: the task belongs to an activae footprint and is stored in the %%% ar_peer_worker's task queue. It will be dispatched as soon as %%% an ar_data_sync_worker becomes available. A task in the "queued" %%% state contributes to the total_queued_count. %%% - dispatched: the task has been dispatched to an ar_data_sync_worker and is %%% being processed. A task in the "dispatched" state contributes to the %%% total_dispatched_count. %%% %%% Footprints can be in one of two states: %%% - active: All tasks belonging to an active footprint are moved to the %%% ar_peer_worker's task queue and are eligible to be dispatched. %%% - inactive: All tasks belonging to an inactive footprint are stored in the %%% ar_peer_worker's "waiting" queue. They are not eligible to be %%% dispatched until their footprint becomes active. %%% -module(ar_data_sync_coordinator). -behaviour(gen_server). -export([start_link/1, register_workers/0, is_syncing_enabled/0, ready_for_work/0]). -export([init/1, handle_cast/2, handle_call/3, handle_info/2, terminate/2]). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave/include/ar_sup.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("arweave/include/ar_peers.hrl"). -define(REBALANCE_FREQUENCY_MS, 10*1000). -record(state, { total_queued_count = 0, %% total count of non-dispatched tasks across all peers total_dispatched_count = 0, %% total count tasks currently assigned to a worker workers = queue:new(), dispatched_count_per_worker = #{}, known_peers = #{}, %% #{Peer => Pid} - cached peer worker Pids %% Global footprint tracking total_active_footprints = 0, %% count of active footprints across all peers max_footprints = 0 %% global max footprints limit }). %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Start the server. start_link(Workers) -> gen_server:start_link({local, ?MODULE}, ?MODULE, Workers, []). register_workers() -> case is_syncing_enabled() of true -> {Workers, WorkerNames} = register_sync_workers(), WorkerMaster = ?CHILD_WITH_ARGS( ar_data_sync_coordinator, worker, ar_data_sync_coordinator, [WorkerNames]), [WorkerMaster] ++ Workers; false -> [] end. register_sync_workers() -> {ok, Config} = arweave_config:get_env(), {Workers, WorkerNames} = lists:foldl( fun(Number, {AccWorkers, AccWorkerNames}) -> Name = list_to_atom("ar_data_sync_worker_" ++ integer_to_list(Number)), Worker = ?CHILD_WITH_ARGS(ar_data_sync_worker, worker, Name, [Name, sync]), {[Worker | AccWorkers], [Name | AccWorkerNames]} end, {[], []}, lists:seq(1, Config#config.sync_jobs) ), {Workers, WorkerNames}. %% @doc Returns true if syncing is enabled (i.e. sync_jobs > 0). is_syncing_enabled() -> {ok, Config} = arweave_config:get_env(), Config#config.sync_jobs > 0. %% @doc Returns true if we can accept new tasks. Will always return false if syncing is %% disabled (i.e. sync_jobs = 0). ready_for_work() -> try gen_server:call(?MODULE, ready_for_work, 1000) catch exit:{timeout,_} -> false end. %%%=================================================================== %%% Generic server callbacks. %%%=================================================================== init(Workers) -> ar_util:cast_after(?REBALANCE_FREQUENCY_MS, ?MODULE, rebalance_peers), MaxFootprints = calculate_max_footprints(), ?LOG_INFO([{event, init}, {module, ?MODULE}, {workers, Workers}, {max_footprints, MaxFootprints}]), {ok, #state{ workers = queue:from_list(Workers), max_footprints = MaxFootprints }}. calculate_max_footprints() -> {ok, Config} = arweave_config:get_env(), %% Calculate global max footprints based on entropy cache size FootprintSize = ar_block:get_replica_2_9_footprint_size(), max(1, (Config#config.replica_2_9_entropy_cache_size_mb * ?MiB) div FootprintSize). handle_call(ready_for_work, _From, State) -> WorkerCount = queue:len(State#state.workers), TotalTaskCount = State#state.total_dispatched_count + State#state.total_queued_count, ReadyForWork = TotalTaskCount < max_tasks(WorkerCount), {reply, ReadyForWork, State}; handle_call({reset_worker, Worker}, _From, State) -> ActiveCount = maps:get(Worker, State#state.dispatched_count_per_worker, 0), State2 = State#state{ total_dispatched_count = State#state.total_dispatched_count - ActiveCount, dispatched_count_per_worker = maps:put(Worker, 0, State#state.dispatched_count_per_worker) }, {reply, ok, State2}; handle_call(Request, _From, State) -> ?LOG_WARNING([{event, unhandled_call}, {module, ?MODULE}, {request, Request}]), {reply, ok, State}. handle_cast({sync_range, Args}, State) -> case queue:is_empty(State#state.workers) of true -> {noreply, State}; false -> {_Start, _End, Peer, _TargetStoreID, FootprintKey} = Args, %% Track this peer and get cached Pid {Pid, State1} = maybe_add_peer(Peer, State), case Pid of undefined -> {noreply, State1}; _ -> %% Check if there's global capacity for new footprints HasCapacity = State1#state.total_active_footprints < State1#state.max_footprints, WorkerCount = queue:len(State1#state.workers), {WasActivated, TasksToDispatch} = ar_peer_worker:enqueue_task( Pid, FootprintKey, Args, HasCapacity, WorkerCount), State2 = case WasActivated of true -> State1#state{ total_active_footprints = State1#state.total_active_footprints + 1, total_queued_count = State1#state.total_queued_count + 1 }; false -> State1#state{ total_queued_count = State1#state.total_queued_count + 1 } end, %% Dispatch tasks to workers State3 = dispatch_tasks(Pid, TasksToDispatch, State2), {noreply, State3} end end; handle_cast({task_completed, {sync_range, {Worker, Result, Args, ElapsedNative}}}, State) -> {Start, End, Peer, _, _, FootprintKey} = Args, DataSize = End - Start, State2 = increment_dispatched_task_count(Worker, -1, State), %% Notify peer worker (handles footprint completion, performance rating) case maps:find(Peer, State2#state.known_peers) of {ok, Pid} -> ar_peer_worker:task_completed(Pid, FootprintKey, Result, ElapsedNative, DataSize); error -> %% Peer not in cache (shouldn't happen normally) ?LOG_WARNING([{event, task_completed_unknown_peer}, {peer, ar_util:format_peer(Peer)}]) end, %% Process all peer queues, starting with the peer that just completed State3 = process_all_peer_queues(Peer, State2), {noreply, State3}; handle_cast(rebalance_peers, State) -> ar_util:cast_after(?REBALANCE_FREQUENCY_MS, ?MODULE, rebalance_peers), %% TODO: Add logic to purge empty peer workers (no queued tasks, no dispatched tasks). PeerPids = State#state.known_peers, %% #{Peer => Pid} Peers = maps:keys(PeerPids), AllPeerPerformances = ar_peers:get_peer_performances(Peers), Targets = calculate_targets(PeerPids, AllPeerPerformances, State), State2 = rebalance_peers(PeerPids, AllPeerPerformances, Targets, State), {noreply, State2}; handle_cast({footprint_deactivated, _Peer}, State) -> NewCount = max(0, State#state.total_active_footprints - 1), State2 = State#state{ total_active_footprints = NewCount }, %% Notify all peers that capacity is available so they can activate waiting footprints State3 = case NewCount < State2#state.max_footprints of true -> notify_peers_capacity_available(State2); false -> State2 end, {noreply, State3}; handle_cast({peer_worker_started, Peer, Pid}, State) -> %% Peer worker (re)started - update cached PID State2 = State#state{ known_peers = maps:put(Peer, Pid, State#state.known_peers) }, {noreply, State2}; handle_cast(Cast, State) -> ?LOG_WARNING([{event, unhandled_cast}, {module, ?MODULE}, {cast, Cast}]), {noreply, State}. handle_info(Message, State) -> ?LOG_WARNING([{event, unhandled_info}, {module, ?MODULE}, {message, Message}]), {noreply, State}. terminate(Reason, _State) -> ?LOG_INFO([{event, terminate}, {module, ?MODULE}, {reason, io_lib:format("~p", [Reason])}]), ok. %%%=================================================================== %%% Private functions. %%%=================================================================== %%-------------------------------------------------------------------- %% Peer queue management %%-------------------------------------------------------------------- %% @doc If a peer has capacity, take tasks from its queue and dispatch them. %% Note: a new footprint will not be activated while dispatching tasks as only tasks %% belonging to an already active footprint will be in the ar_peer_worker's task queue process_peer_queue(Pid, State) -> WorkerCount = queue:len(State#state.workers), TasksToDispatch = ar_peer_worker:process_queue(Pid, WorkerCount), dispatch_tasks(Pid, TasksToDispatch, State). %% @doc Process all peer queues, with the priority peer processed first. process_all_peer_queues(PriorityPeer, State) -> case queue:is_empty(State#state.workers) of true -> State; false -> PeerPids = State#state.known_peers, PriorityPid = maps:get(PriorityPeer, PeerPids, undefined), OtherPids = maps:values(maps:remove(PriorityPeer, PeerPids)), AllPids = case PriorityPid of undefined -> OtherPids; _ -> [PriorityPid | OtherPids] end, process_all_peer_queues2(AllPids, State) end. process_all_peer_queues2([], State) -> State; process_all_peer_queues2([Pid | Rest], State) -> State2 = process_peer_queue(Pid, State), process_all_peer_queues2(Rest, State2). %% @doc the maximum number of tasks we can have in process. max_tasks(WorkerCount) -> WorkerCount * 50. %%-------------------------------------------------------------------- %% Dispatch tasks to be run on workers %%-------------------------------------------------------------------- %% @doc Dispatch tasks to workers. %% Caller must ensure workers are available before calling. dispatch_tasks(_Pid, [], State) -> State; dispatch_tasks(Pid, [Args | Rest], State) -> {Worker, State2} = get_worker(State), {Start, End, Peer, TargetStoreID, FootprintKey} = Args, %% Pass FootprintKey as 6th element so it comes back in task_completed gen_server:cast(Worker, {sync_range, {Start, End, Peer, TargetStoreID, 3, FootprintKey}}), %% When a task is dispatched it's removed from the ar_peer_worker's task queue and sent to %% an ar_data_sync_worker. State3 = increment_dispatched_task_count(Worker, 1, State2), State4 = State3#state{ total_queued_count = max(0, State3#state.total_queued_count - 1) }, dispatch_tasks(Pid, Rest, State4). %%-------------------------------------------------------------------- %% Rebalancing %%-------------------------------------------------------------------- %% @doc Calculate rebalance parameters. %% PeerPids is #{Peer => Pid} %% Returns {WorkerCount, TargetLatency, TotalThroughput, TotalMaxDispatched} calculate_targets(PeerPids, AllPeerPerformances, State) -> WorkerCount = queue:len(State#state.workers), Peers = maps:keys(PeerPids), TotalThroughput = lists:foldl( fun(Peer, Acc) -> Performance = maps:get(Peer, AllPeerPerformances, #performance{}), Acc + Performance#performance.current_rating end, 0.0, Peers), TotalLatency = lists:foldl( fun(Peer, Acc) -> Performance = maps:get(Peer, AllPeerPerformances, #performance{}), Acc + Performance#performance.average_latency end, 0.0, Peers), TargetLatency = case length(Peers) > 0 of true -> TotalLatency / length(Peers); false -> 0.0 end, TotalMaxDispatched = maps:fold( fun(_Peer, Pid, Acc) -> case ar_peer_worker:get_max_dispatched(Pid) of {error, _} -> Acc; MaxDispatched -> MaxDispatched + Acc end end, 0, PeerPids), ?LOG_DEBUG([{event, sync_performance_targets}, {worker_count, WorkerCount}, {target_latency, TargetLatency}, {total_throughput, TotalThroughput}, {total_max_dispatched, TotalMaxDispatched}]), {WorkerCount, TargetLatency, TotalThroughput, TotalMaxDispatched}. %% PeerPidsList is [{Peer, Pid}] rebalance_peers(PeerPids, AllPeerPerformances, Targets, State) -> rebalance_peers2(maps:to_list(PeerPids), AllPeerPerformances, Targets, State). rebalance_peers2([], _AllPeerPerformances, _Targets, State) -> State; rebalance_peers2([{Peer, Pid} | Rest], AllPeerPerformances, Targets, State) -> {WorkerCount, TargetLatency, TotalThroughput, TotalMaxDispatched} = Targets, Performance = maps:get(Peer, AllPeerPerformances, #performance{}), %% Calculate rebalance params (peer calculates FasterThanTarget from Performance) QueueScalingFactor = queue_scaling_factor(TotalThroughput, WorkerCount), WorkersStarved = TotalMaxDispatched < WorkerCount, RebalanceParams = {QueueScalingFactor, TargetLatency, WorkersStarved}, Result = ar_peer_worker:rebalance(Pid, Performance, RebalanceParams), State2 = case Result of {shutdown, RemovedCount} -> %% Peer worker is idle and should be shutdown ?LOG_INFO([{event, shutdown_idle_peer_worker}, {peer, ar_util:format_peer(Peer)}]), ar_peer_worker:stop(Pid), State#state{ total_queued_count = max(0, State#state.total_queued_count - RemovedCount), known_peers = maps:remove(Peer, State#state.known_peers) }; {ok, RemovedCount} -> State#state{ total_queued_count = max(0, State#state.total_queued_count - RemovedCount) }; {error, timeout} -> %% Peer worker timed out, skip it State end, rebalance_peers2(Rest, AllPeerPerformances, Targets, State2). %% @doc Scaling factor for calculating per-peer max queue size. %% Peer worker calculates: MaxQueue = max(PeerThroughput * ScalingFactor, MIN_PEER_QUEUE) queue_scaling_factor(0, _WorkerCount) -> infinity; queue_scaling_factor(0.0, _WorkerCount) -> infinity; queue_scaling_factor(TotalThroughput, WorkerCount) -> max_tasks(WorkerCount) / TotalThroughput. %%-------------------------------------------------------------------- %% Helpers %%-------------------------------------------------------------------- increment_dispatched_task_count(Worker, N, State) -> ActiveCount = maps:get(Worker, State#state.dispatched_count_per_worker, 0) + N, State#state{ total_dispatched_count = State#state.total_dispatched_count + N, dispatched_count_per_worker = maps:put(Worker, ActiveCount, State#state.dispatched_count_per_worker) }. %% @doc Add a peer to known_peers if not already present. Returns {Pid, State}. %% The Pid is cached so we don't have to do whereis + atom lookup on every call. maybe_add_peer(Peer, State) -> case State#state.known_peers of #{Peer := Pid} -> {Pid, State}; _ -> case ar_peer_worker:get_or_start(Peer) of {ok, Pid} -> {Pid, State#state{ known_peers = maps:put(Peer, Pid, State#state.known_peers) }}; {error, _} -> {undefined, State} end end. get_worker(State) -> WorkerCount = queue:len(State#state.workers), AverageLoad = State#state.total_dispatched_count / WorkerCount, cycle_workers(AverageLoad, State). cycle_workers(AverageLoad, State) -> #state{ workers = Workers } = State, {{value, Worker}, Workers2} = queue:out(Workers), State2 = State#state{ workers = queue:in(Worker, Workers2) }, ActiveCount = maps:get(Worker, State2#state.dispatched_count_per_worker, 0), case ActiveCount =< AverageLoad of true -> {Worker, State2}; false -> cycle_workers(AverageLoad, State2) end. %% Notify all known peers that global footprint capacity is available. notify_peers_capacity_available(#state{ known_peers = KnownPeers } = State) -> %% Iterate through peers, stop when one activates a footprint to avoid over-activation PeerList = maps:to_list(KnownPeers), case try_activate_footprint(PeerList) of true -> State#state{ total_active_footprints = State#state.total_active_footprints + 1 }; false -> State end. try_activate_footprint([]) -> false; try_activate_footprint([{_Peer, Pid} | Rest]) -> case ar_peer_worker:try_activate_footprint(Pid) of true -> %% A footprint was activated, stop here true; false -> %% No footprint activated, try next peer try_activate_footprint(Rest) end. %%%=================================================================== %%% Tests. %%%=================================================================== -ifdef(AR_TEST). -include_lib("eunit/include/eunit.hrl"). coordinator_test_() -> [ {timeout, 30, fun test_get_worker/0}, {timeout, 30, fun test_max_tasks/0}, {timeout, 30, fun test_increment_dispatched_task_count/0}, {timeout, 30, fun test_queue_scaling_factor/0}, {timeout, 30, fun test_footprint_deactivated/0}, {timeout, 30, fun test_peer_worker_started_updates_cache/0}, {timeout, 30, fun test_reset_worker/0}, {timeout, 30, fun test_dispatch_tasks_updates_counts/0}, {timeout, 30, fun test_try_activate_footprint_stops_on_success/0}, {timeout, 30, fun test_try_activate_footprint_tries_all/0}, {timeout, 30, fun test_calculate_targets/0} ]. test_get_worker() -> State0 = #state{ workers = queue:from_list([worker1, worker2, worker3]), total_dispatched_count = 6, dispatched_count_per_worker = #{worker1 => 3, worker2 => 2, worker3 => 1} }, {worker2, State1} = get_worker(State0), State2 = increment_dispatched_task_count(worker2, 1, State1), {worker3, State3} = get_worker(State2), State4 = increment_dispatched_task_count(worker3, 1, State3), {worker3, State5} = get_worker(State4), State6 = increment_dispatched_task_count(worker3, 1, State5), {worker1, _} = get_worker(State6). test_max_tasks() -> ?assertEqual(50, max_tasks(1)), ?assertEqual(100, max_tasks(2)), ?assertEqual(500, max_tasks(10)), ?assertEqual(5000, max_tasks(100)). test_increment_dispatched_task_count() -> State0 = #state{ total_dispatched_count = 5, dispatched_count_per_worker = #{worker1 => 3, worker2 => 2} }, %% Increment worker1 State1 = increment_dispatched_task_count(worker1, 2, State0), ?assertEqual(7, State1#state.total_dispatched_count), ?assertEqual(5, maps:get(worker1, State1#state.dispatched_count_per_worker)), %% Decrement worker2 State2 = increment_dispatched_task_count(worker2, -1, State1), ?assertEqual(6, State2#state.total_dispatched_count), ?assertEqual(1, maps:get(worker2, State2#state.dispatched_count_per_worker)), %% Add new worker State3 = increment_dispatched_task_count(worker3, 1, State2), ?assertEqual(7, State3#state.total_dispatched_count), ?assertEqual(1, maps:get(worker3, State3#state.dispatched_count_per_worker)). test_queue_scaling_factor() -> %% Zero throughput returns infinity ?assertEqual(infinity, queue_scaling_factor(0, 10)), ?assertEqual(infinity, queue_scaling_factor(0.0, 10)), %% Normal calculation: max_tasks(WorkerCount) / TotalThroughput %% max_tasks(10) = 500 ?assertEqual(5.0, queue_scaling_factor(100.0, 10)), ?assertEqual(2.5, queue_scaling_factor(200.0, 10)), ?assertEqual(50.0, queue_scaling_factor(10.0, 10)). test_footprint_deactivated() -> State0 = #state{ total_active_footprints = 5, max_footprints = 10, known_peers = #{} }, %% Simulate footprint_deactivated cast {noreply, State1} = handle_cast({footprint_deactivated, {1,2,3,4,1984}}, State0), ?assertEqual(4, State1#state.total_active_footprints), %% Should not go below 0 State2 = State0#state{ total_active_footprints = 0 }, {noreply, State3} = handle_cast({footprint_deactivated, {1,2,3,4,1984}}, State2), ?assertEqual(0, State3#state.total_active_footprints). test_peer_worker_started_updates_cache() -> Peer1 = {1,2,3,4,1984}, Peer2 = {5,6,7,8,1985}, Pid1 = self(), %% Use self() as a fake Pid for testing Pid2 = self(), State0 = #state{ known_peers = #{} }, %% Add first peer {noreply, State1} = handle_cast({peer_worker_started, Peer1, Pid1}, State0), ?assertEqual(Pid1, maps:get(Peer1, State1#state.known_peers)), %% Add second peer {noreply, State2} = handle_cast({peer_worker_started, Peer2, Pid2}, State1), ?assertEqual(Pid1, maps:get(Peer1, State2#state.known_peers)), ?assertEqual(Pid2, maps:get(Peer2, State2#state.known_peers)), %% Update existing peer (simulating restart) NewPid = spawn(fun() -> ok end), {noreply, State3} = handle_cast({peer_worker_started, Peer1, NewPid}, State2), ?assertEqual(NewPid, maps:get(Peer1, State3#state.known_peers)). test_reset_worker() -> State0 = #state{ total_dispatched_count = 10, dispatched_count_per_worker = #{worker1 => 5, worker2 => 3, worker3 => 2} }, %% Reset worker1 (had 5 tasks) {reply, ok, State1} = handle_call({reset_worker, worker1}, self(), State0), ?assertEqual(5, State1#state.total_dispatched_count), %% 10 - 5 = 5 ?assertEqual(0, maps:get(worker1, State1#state.dispatched_count_per_worker)), %% Reset worker2 (had 3 tasks) {reply, ok, State2} = handle_call({reset_worker, worker2}, self(), State1), ?assertEqual(2, State2#state.total_dispatched_count), %% 5 - 3 = 2 ?assertEqual(0, maps:get(worker2, State2#state.dispatched_count_per_worker)), %% Reset unknown worker (should handle gracefully - count is 0) {reply, ok, State3} = handle_call({reset_worker, unknown_worker}, self(), State2), ?assertEqual(2, State3#state.total_dispatched_count). test_dispatch_tasks_updates_counts() -> State0 = #state{ workers = queue:from_list([worker1, worker2]), total_dispatched_count = 0, total_queued_count = 5, dispatched_count_per_worker = #{} }, %% Dispatch empty list - no changes State1 = dispatch_tasks(self(), [], State0), ?assertEqual(0, State1#state.total_dispatched_count), ?assertEqual(5, State1#state.total_queued_count), %% Note: We can't fully test dispatch_tasks without mocking workers, %% but we can verify the state changes for the helper functions. %% Verify that dispatching would decrement total_queued_count %% by manually simulating what dispatch_tasks does State2 = State1#state{ total_queued_count = max(0, State1#state.total_queued_count - 1) }, ?assertEqual(4, State2#state.total_queued_count). test_try_activate_footprint_stops_on_success() -> %% Create mock peer processes that track if they were called Parent = self(), %% Peer1 returns false (no waiting footprint) Pid1 = spawn(fun() -> mock_peer_worker(Parent, peer1, false) end), %% Peer2 returns true (activates a footprint) Pid2 = spawn(fun() -> mock_peer_worker(Parent, peer2, true) end), %% Peer3 should NOT be called (iteration should stop at Peer2) Pid3 = spawn(fun() -> mock_peer_worker(Parent, peer3, false) end), %% Register mock processes so ar_peer_worker:try_activate_footprint can find them %% We need to call try_activate_footprint directly with our mock list PeerList = [{peer1, Pid1}, {peer2, Pid2}, {peer3, Pid3}], %% Call the function directly true = try_activate_footprint(PeerList), %% Wait a bit for messages timer:sleep(50), %% Check which peers were called ?assertEqual([peer1, peer2], collect_called_peers([])), %% Cleanup exit(Pid1, kill), exit(Pid2, kill), exit(Pid3, kill). test_try_activate_footprint_tries_all() -> %% Create mock peer processes that all return false Parent = self(), Pid1 = spawn(fun() -> mock_peer_worker(Parent, peer1, false) end), Pid2 = spawn(fun() -> mock_peer_worker(Parent, peer2, false) end), Pid3 = spawn(fun() -> mock_peer_worker(Parent, peer3, false) end), PeerList = [{peer1, Pid1}, {peer2, Pid2}, {peer3, Pid3}], %% Call the function directly false = try_activate_footprint(PeerList), %% Wait a bit for messages timer:sleep(50), %% All three peers should have been called ?assertEqual([peer1, peer2, peer3], collect_called_peers([])), %% Cleanup exit(Pid1, kill), exit(Pid2, kill), exit(Pid3, kill). %% Mock peer worker that responds to try_activate_footprint calls mock_peer_worker(Parent, PeerName, ReturnValue) -> receive {'$gen_call', From, try_activate_footprint} -> Parent ! {called, PeerName}, gen_server:reply(From, ReturnValue) after 5000 -> ok end. %% Collect all {called, PeerName} messages collect_called_peers(Acc) -> receive {called, PeerName} -> collect_called_peers(Acc ++ [PeerName]) after 10 -> Acc end. test_calculate_targets() -> %% Create mock peer workers that respond to get_max_dispatched Pid1 = spawn(fun() -> mock_peer_worker_max_dispatched(10) end), Pid2 = spawn(fun() -> mock_peer_worker_max_dispatched(15) end), Pid3 = spawn(fun() -> mock_peer_worker_max_dispatched(20) end), Peer1 = {1,2,3,4,1984}, Peer2 = {5,6,7,8,1985}, Peer3 = {9,10,11,12,1986}, PeerPids = #{Peer1 => Pid1, Peer2 => Pid2, Peer3 => Pid3}, %% Create performance records AllPeerPerformances = #{ Peer1 => #performance{ current_rating = 100.0, average_latency = 50.0 }, Peer2 => #performance{ current_rating = 200.0, average_latency = 100.0 }, Peer3 => #performance{ current_rating = 300.0, average_latency = 150.0 } }, State = #state{ workers = queue:from_list([w1, w2, w3, w4, w5]) }, {WorkerCount, TargetLatency, TotalThroughput, TotalMaxDispatched} = calculate_targets(PeerPids, AllPeerPerformances, State), %% WorkerCount = 5 (number of workers in queue) ?assertEqual(5, WorkerCount), %% TotalThroughput = 100 + 200 + 300 = 600 ?assertEqual(600.0, TotalThroughput), %% TargetLatency = (50 + 100 + 150) / 3 = 100 ?assertEqual(100.0, TargetLatency), %% TotalMaxDispatched = 10 + 15 + 20 = 45 ?assertEqual(45, TotalMaxDispatched), %% Cleanup exit(Pid1, kill), exit(Pid2, kill), exit(Pid3, kill). %% Mock peer worker for get_max_dispatched mock_peer_worker_max_dispatched(MaxDispatched) -> receive {'$gen_call', From, get_max_dispatched} -> gen_server:reply(From, MaxDispatched), mock_peer_worker_max_dispatched(MaxDispatched) after 5000 -> ok end. -endif. ================================================ FILE: apps/arweave/src/ar_data_sync_sup.erl ================================================ -module(ar_data_sync_sup). -behaviour(supervisor). -export([start_link/0]). -export([init/1]). -include_lib("ar_sup.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). %%%=================================================================== %%% Public interface. %%%=================================================================== start_link() -> supervisor:start_link({local, ?MODULE}, ?MODULE, []). %% =================================================================== %% Supervisor callbacks. %% =================================================================== init([]) -> %% Peer worker supervisor must start before worker master PeerWorkerSup = #{ id => ar_peer_worker_sup, start => {ar_peer_worker_sup, start_link, []}, restart => permanent, shutdown => infinity, type => supervisor, modules => [ar_peer_worker_sup] }, Children = [PeerWorkerSup] ++ ar_data_sync_coordinator:register_workers() ++ ar_chunk_copy:register_workers() ++ ar_data_sync:register_workers(), {ok, {{one_for_one, 5, 10}, Children}}. ================================================ FILE: apps/arweave/src/ar_data_sync_worker.erl ================================================ %%% @doc A process fetching the weave data from the network and from the local %%% storage modules, one chunk (or a range of chunks) at a time. The workers %%% are coordinated by ar_data_sync_coordinator. The workers do not update the %%% storage - updates are handled by ar_data_sync_* processes. -module(ar_data_sync_worker). -behaviour(gen_server). -export([start_link/2]). -export([init/1, handle_cast/2, handle_call/3, handle_info/2, terminate/2]). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("arweave/include/ar_data_sync.hrl"). -record(state, { name = undefined, request_packed_chunks = false }). %% # of messages to cast to ar_data_sync at once. Each message carries at least 1 chunk worth %% of data (256 KiB). Since there are dozens or hundreds of workers, if each one posts too %% many messages at once it can overload the available memory. -define(READ_RANGE_MESSAGES_PER_BATCH, 40). %%%=================================================================== %%% Public interface. %%%=================================================================== start_link(Name, Mode) -> gen_server:start_link({local, Name}, ?MODULE, {Name, Mode}, []). %%%=================================================================== %%% Generic server callbacks. %%%=================================================================== init({Name, Mode}) -> ?LOG_INFO([{event, init}, {module, ?MODULE}, {name, Name}]), {ok, Config} = arweave_config:get_env(), %% In case there has been a restart we need to tell %% ar_data_sync_coordinator to erase pending worker tasks. %% We only want to do this for sync workers, not read workers. case Mode of sync -> gen_server:call(ar_data_sync_coordinator, {reset_worker, Name}, 30_000); _ -> ok end, {ok, #state{ name = Name, request_packed_chunks = Config#config.data_sync_request_packed_chunks }}. handle_call(Request, _From, State) -> ?LOG_WARNING([{event, unhandled_call}, {module, ?MODULE}, {request, Request}]), {reply, ok, State}. handle_cast({read_range, Args}, State) -> case read_range(Args) of recast -> ok; ReadResult -> gen_server:cast(ar_chunk_copy, {task_completed, {read_range, {State#state.name, ReadResult, Args}}}) end, {noreply, State}; handle_cast({sync_range, Args}, State) -> {_, _, Peer, _, RetryCount, FootprintKey} = Args, StartTime = erlang:monotonic_time(), SyncResult = sync_range(Args, State), EndTime = erlang:monotonic_time(), case SyncResult of recast -> %% Only log at WARNING when retries are exhausted (RetryCount <= 1), %% otherwise log at DEBUG to reduce noise case RetryCount =< 1 of true -> ?LOG_WARNING([{event, sync_range_recast_exhausted}, {peer, ar_util:format_peer(Peer)}, {footprint_key, FootprintKey}, {retry_count, RetryCount}, {worker, State#state.name}]); false -> ok end, ok; _ -> gen_server:cast(ar_data_sync_coordinator, {task_completed, {sync_range, {State#state.name, SyncResult, Args, EndTime-StartTime}}}) end, {noreply, State}; handle_cast(Cast, State) -> ?LOG_WARNING([{event, unhandled_cast}, {module, ?MODULE}, {cast, Cast}]), {noreply, State}. handle_info(_Message, State) -> {noreply, State}. terminate(Reason, _State) -> ?LOG_INFO([{event, terminate}, {module, ?MODULE}, {reason, io_lib:format("~p", [Reason])}]), ok. %%%=================================================================== %%% Private functions. %%%=================================================================== read_range({Start, End, _OriginStoreID, _TargetStoreID}) when Start >= End -> ok; read_range({Start, End, _OriginStoreID, TargetStoreID} = Args) -> case ar_data_sync:is_chunk_cache_full() of false -> case ar_data_sync:is_disk_space_sufficient(TargetStoreID) of true -> ?LOG_DEBUG([{event, read_range}, {pid, self()}, {size_mb, (End - Start) / ?MiB}, {args, Args}]), read_range2(?READ_RANGE_MESSAGES_PER_BATCH, Args); _ -> ar_util:cast_after(30000, self(), {read_range, Args}), recast end; _ -> ar_util:cast_after(200, self(), {read_range, Args}), recast end. read_range2(0, Args) -> ar_util:cast_after(1000, self(), {read_range, Args}), recast; read_range2(_MessagesRemaining, {Start, End, _OriginStoreID, _TargetStoreID}) when Start >= End -> ok; read_range2(MessagesRemaining, {Start, End, OriginStoreID, TargetStoreID}) -> CheckIsRecordedAlready = case ar_sync_record:is_recorded(Start + 1, ar_data_sync, TargetStoreID) of {true, _} -> case ar_sync_record:get_next_unsynced_interval(Start, End, ar_data_sync, TargetStoreID) of not_found -> ok; {_, Start2} -> read_range2(MessagesRemaining, {Start2, End, OriginStoreID, TargetStoreID}) end; _ -> false end, IsRecordedInTheSource = case CheckIsRecordedAlready of ok -> ok; recast -> ok; false -> case ar_sync_record:is_recorded(Start + 1, ar_data_sync, OriginStoreID) of {true, Packing} -> {true, Packing}; SyncRecordReply -> ?LOG_ERROR([{event, cannot_read_requested_range}, {origin_store_id, OriginStoreID}, {missing_start_offset, Start + 1}, {end_offset, End}, {target_store_id, TargetStoreID}, {sync_record_reply, io_lib:format("~p", [SyncRecordReply])}]) end end, ReadChunkMetadata = case IsRecordedInTheSource of ok -> ok; {true, Packing2} -> {Packing2, ar_data_sync:get_chunk_by_byte(Start + 1, OriginStoreID)} end, PaddedEnd = ar_block:get_chunk_padded_offset(End), case ReadChunkMetadata of ok -> ok; {_, {error, invalid_iterator}} -> %% get_chunk_by_byte looks for a key with the same prefix or the next %% prefix. Therefore, if there is no such key, it does not make sense to %% look for any key smaller than the prefix + 2 in the next iteration. PrefixSpaceSize = trunc(math:pow(2, ?OFFSET_KEY_BITSIZE - ?OFFSET_KEY_PREFIX_BITSIZE)), Start3 = ((Start div PrefixSpaceSize) + 2) * PrefixSpaceSize, read_range2(MessagesRemaining, {Start3, End, OriginStoreID, TargetStoreID}); {_, {error, Reason}} -> ?LOG_ERROR([{event, failed_to_query_chunk_metadata}, {offset, Start + 1}, {reason, io_lib:format("~p", [Reason])}]); {_, {ok, _Key, {AbsoluteOffset, _, _, _, _, _, _}}} when AbsoluteOffset > PaddedEnd -> ok; {Packing3, {ok, _Key, {AbsoluteOffset, ChunkDataKey, TXRoot, DataRoot, TXPath, RelativeOffset, ChunkSize}}} -> ReadChunk = ar_data_sync:read_chunk(AbsoluteOffset, ChunkDataKey, OriginStoreID), case ReadChunk of not_found -> ar_data_sync:invalidate_bad_data_record( AbsoluteOffset, ChunkSize, OriginStoreID, read_range_chunk_not_found), read_range2(MessagesRemaining-1, {Start + ChunkSize, End, OriginStoreID, TargetStoreID}); {error, Error} -> ?LOG_ERROR([{event, failed_to_read_chunk}, {absolute_end_offset, AbsoluteOffset}, {chunk_data_key, ar_util:encode(ChunkDataKey)}, {reason, io_lib:format("~p", [Error])}]), read_range2(MessagesRemaining, {Start + ChunkSize, End, OriginStoreID, TargetStoreID}); {ok, {Chunk, DataPath}} -> case ar_sync_record:is_recorded(AbsoluteOffset, ar_data_sync, OriginStoreID) of {true, Packing3} -> ar_data_sync:increment_chunk_cache_size(), UnpackedChunk = case Packing3 of unpacked -> Chunk; _ -> none end, Args = {DataRoot, AbsoluteOffset, TXPath, TXRoot, DataPath, Packing3, RelativeOffset, ChunkSize, Chunk, UnpackedChunk, TargetStoreID, ChunkDataKey}, gen_server:cast(ar_data_sync:name(TargetStoreID), {pack_and_store_chunk, Args}), read_range2(MessagesRemaining-1, {Start + ChunkSize, End, OriginStoreID, TargetStoreID}); {true, _DifferentPacking} -> %% Unlucky timing - the chunk should have been repacked %% in the meantime. read_range2(MessagesRemaining, {Start, End, OriginStoreID, TargetStoreID}); Reply -> ?LOG_ERROR([{event, chunk_record_not_found}, {absolute_end_offset, AbsoluteOffset}, {ar_sync_record_reply, io_lib:format("~p", [Reply])}]), read_range2(MessagesRemaining, {Start + ChunkSize, End, OriginStoreID, TargetStoreID}) end end end. sync_range({Start, End, Peer, _TargetStoreID, _RetryCount, _FootprintKey} = Args, _State) when Start >= End -> ok; sync_range({Start, End, Peer, _TargetStoreID, 0, _FootprintKey}, _State) -> ?LOG_DEBUG([{event, sync_range_retries_exhausted}, {peer, ar_util:format_peer(Peer)}, {start_offset, Start}, {end_offset, End}]), {error, timeout}; sync_range({Start, End, Peer, TargetStoreID, RetryCount, FootprintKey} = Args, State) -> IsChunkCacheFull = case ar_data_sync:is_chunk_cache_full() of true -> ar_util:cast_after(500, self(), {sync_range, Args}), true; false -> false end, IsDiskSpaceSufficient = case IsChunkCacheFull of false -> case ar_data_sync:is_disk_space_sufficient(TargetStoreID) of true -> true; _ -> ar_util:cast_after(30000, self(), {sync_range, Args}), false end; true -> false end, case IsDiskSpaceSufficient of false -> recast; true -> Start2 = ar_tx_blacklist:get_next_not_blacklisted_byte(Start + 1), Byte = Start2 - 1, IsRecorded = ar_sync_record:is_recorded(Byte + 1, ar_data_sync, TargetStoreID), case {Byte >= End, IsRecorded} of {true, _} -> ok; {_, {true, _}} -> ok; _ -> Packing = get_target_packing(TargetStoreID, State#state.request_packed_chunks), case ar_http_iface_client:get_chunk_binary(Peer, Start2, Packing) of {ok, #{ chunk := Chunk } = Proof, _Time, _TransferSize} -> %% In case we fetched a packed small chunk, %% we may potentially skip some chunks by %% continuing with Start2 + byte_size(Chunk) - the skip %% chunks will be then requested later. Start3 = ar_block:get_chunk_padded_offset( Start2 + byte_size(Chunk)) + 1, gen_server:cast(ar_data_sync:name(TargetStoreID), {store_fetched_chunk, Peer, Byte, Proof}), ar_data_sync:increment_chunk_cache_size(), sync_range( {Start3, End, Peer, TargetStoreID, RetryCount, FootprintKey}, State); {error, timeout} -> ?LOG_DEBUG([{event, timeout_fetching_chunk}, {peer, ar_util:format_peer(Peer)}, {start_offset, Start2}, {end_offset, End}]), Args2 = {Start, End, Peer, TargetStoreID, RetryCount - 1, FootprintKey}, ar_util:cast_after(1000, self(), {sync_range, Args2}), recast; {error, {ok, {{<<"404">>, _}, _, _, _, _}} = Reason} -> {error, Reason}; {error, Reason} -> ar_http_iface_client:log_failed_request({error, Reason}, [ {event, failed_to_fetch_chunk}, {peer, ar_util:format_peer(Peer)}, {start_offset, Start2}, {end_offset, End}, {reason, io_lib:format("~p", [Reason])}]), {error, Reason} end end end. get_target_packing(StoreID, true) -> ar_storage_module:get_packing(StoreID); get_target_packing(_StoreID, false) -> any. ================================================ FILE: apps/arweave/src/ar_deep_hash.erl ================================================ -module(ar_deep_hash). -export([hash/1]). -include_lib("arweave/include/ar.hrl"). -include_lib("eunit/include/eunit.hrl"). hash(List) when is_list(List) -> hash_bin_or_list(List). %%% INTERNAL hash_bin_or_list(Bin) when is_binary(Bin) -> Tag = <<"blob", (integer_to_binary(byte_size(Bin)))/binary>>, hash_bin(<<(hash_bin(Tag))/binary, (hash_bin(Bin))/binary>>); hash_bin_or_list(List) when is_list(List) -> Tag = <<"list", (integer_to_binary(length(List)))/binary>>, hash_list(List, hash_bin(Tag)). hash_list([], Acc) -> Acc; hash_list([Head | List], Acc) -> HashPair = <>, NewAcc = hash_bin(HashPair), hash_list(List, NewAcc). hash_bin(Bin) when is_binary(Bin) -> crypto:hash(?DEEP_HASH_ALG, Bin). %%% TESTS hash_test() -> V1 = crypto:strong_rand_bytes(32), V2 = crypto:strong_rand_bytes(32), V3 = crypto:strong_rand_bytes(32), V4 = crypto:strong_rand_bytes(32), DeepList = [V1, [V2, V3], V4], H1 = hash_bin(<<(hash_bin(<<"blob", "32">>))/binary, (hash_bin(V1))/binary>>), H2 = hash_bin(<<(hash_bin(<<"blob", "32">>))/binary, (hash_bin(V2))/binary>>), H3 = hash_bin(<<(hash_bin(<<"blob", "32">>))/binary, (hash_bin(V3))/binary>>), H4 = hash_bin(<<(hash_bin(<<"blob", "32">>))/binary, (hash_bin(V4))/binary>>), HSublistTag = hash_bin(<<"list", "2">>), HSublistHead = hash_bin(<>), HSublist = hash_bin(<>), HListTag = hash_bin(<<"list", "3">>), HHead = hash_bin(<>), HWithSublist = hash_bin(<>), H = hash_bin(<>), ?assertEqual(H, hash(DeepList)). hash_empty_list_test() -> ?assertEqual(hash_bin(<<"list", "0">>), hash([])). hash_uniqueness_test() -> ?assertNotEqual( hash([<<"a">>]), hash([[<<"a">>]]) ), ?assertNotEqual( hash([<<"a">>, <<"b">>]), hash([<<"b">>, <<"a">>]) ), ?assertNotEqual( hash([<<"a">>, <<>>]), hash([<<"a">>]) ), ?assertNotEqual( hash([<<"a">>, <<"b">>]), hash([[<<"a">>], <<"b">>]) ), ?assertNotEqual( hash([<<"a">>, [<<"b">>, <<"c">>]]), hash([<<"a">>, <<"b">>, <<"c">>]) ), ?assertNotEqual( hash([<<"a">>, [<<"b">>, <<"c">>], [<<"d">>, <<"e">>]]), hash([<<"a">>, [<<"b">>, <<"c">>, <<"d">>, <<"e">>]]) ), ?assertNotEqual( hash([<<"a">>, [<<"b">>], <<"c">>, <<"d">>]), hash([<<"a">>, [<<"b">>, <<"c">>], <<"d">>]) ). ================================================ FILE: apps/arweave/src/ar_device_lock.erl ================================================ -module(ar_device_lock). -behaviour(gen_server). -export([get_store_id_to_device_map/0, is_ready/0, acquire_lock/3, release_lock/2, set_device_lock_metric/3]). -export([start_link/0, init/1, handle_call/3, handle_info/2, handle_cast/2, terminate/2]). -include("ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("eunit/include/eunit.hrl"). -record(state, { store_id_to_device = #{}, device_locks = #{}, %% used when device_limit is true store_id_locks = #{}, %% used when device_limit is false initialized = false, num_replica_2_9_workers = 0, device_limit = true }). -type device_mode() :: prepare | sync | repack. -ifdef(AR_TEST). -define(LOCK_LOG_INTERVAL_MS, 10_000). %% 10 seconds -else. -define(LOCK_LOG_INTERVAL_MS, 600_000). %% 10 minutes -endif. %%%=================================================================== %%% Public interface. %%%=================================================================== get_store_id_to_device_map() -> case catch gen_server:call(?MODULE, get_state) of {'EXIT', {Reason, {gen_server, call, _}}} -> {error, Reason}; State -> State#state.store_id_to_device end. is_ready() -> case catch gen_server:call(?MODULE, get_state) of {'EXIT', {Reason, {gen_server, call, _}}} -> ?LOG_WARNING([{event, error_getting_device_lock_state}, {module, ?MODULE}, {reason, Reason}]), false; State -> State#state.initialized end. %% @doc Helper function to wrap common logic around acquiring a device lock. -spec acquire_lock(device_mode(), string(), atom()) -> atom(). acquire_lock(Mode, StoreID, CurrentStatus) -> NewStatus = case CurrentStatus of _ when CurrentStatus == complete; CurrentStatus == off -> % No change needed when we're done or off. CurrentStatus; _ -> case catch gen_server:call(?MODULE, {acquire_lock, Mode, StoreID}) of {'EXIT', {Reason, {gen_server, call, _}}} -> ?LOG_WARNING([{event, error_acquiring_device_lock}, {module, ?MODULE}, {reason, Reason}]), CurrentStatus; true -> active; false -> paused end end, case NewStatus == CurrentStatus of true -> ok; false -> set_device_lock_metric(StoreID, Mode, NewStatus), ?LOG_INFO([{event, acquire_device_lock}, {mode, Mode}, {store_id, StoreID}, {old_status, CurrentStatus}, {new_status, NewStatus}]) end, NewStatus. release_lock(Mode, StoreID) -> gen_server:cast(?MODULE, {release_lock, Mode, StoreID}). set_device_lock_metric(StoreID, Mode, Status) -> StatusCode = case Status of off -> -1; paused -> 0; active -> 1; complete -> 2; _ -> -2 end, StoreIDLabel = ar_storage_module:label(StoreID), prometheus_gauge:set(device_lock_status, [StoreIDLabel, Mode], StatusCode). %%%=================================================================== %%% Generic server callbacks. %%%=================================================================== start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). init([]) -> gen_server:cast(self(), initialize_state), {ok, Config} = arweave_config:get_env(), State = #state{ num_replica_2_9_workers = Config#config.replica_2_9_workers, device_limit = not Config#config.disable_replica_2_9_device_limit }, ?LOG_INFO([{event, starting_device_lock_server}, {num_replica_2_9_workers, State#state.num_replica_2_9_workers}, {device_limit, State#state.device_limit}]), {ok, State}. handle_call(get_state, _From, State) -> {reply, State, State}; handle_call({acquire_lock, Mode, StoreID}, _From, State) -> case State#state.initialized of false -> % Not yet initialized. {reply, false, State}; _ -> {Acquired, State2} = do_acquire_lock(Mode, StoreID, State), {reply, Acquired, State2} end; handle_call(Request, _From, State) -> ?LOG_WARNING([{event, unhandled_call}, {module, ?MODULE}, {request, Request}]), {reply, ok, State}. handle_cast(initialize_state, State) -> State2 = case ar_node:is_joined() of false -> ar_util:cast_after(1000, self(), initialize_state), State; true -> initialize_state(State) end, {noreply, State2}; handle_cast({release_lock, Mode, StoreID}, State) -> case State#state.initialized of false -> % Not yet initialized. {noreply, State}; _ -> State2 = do_release_lock(Mode, StoreID, State), ?LOG_INFO([{event, release_device_lock}, {mode, Mode}, {store_id, StoreID}]), {noreply, State2} end; handle_cast(log_locks, State) -> log_locks(State), ar_util:cast_after(?LOCK_LOG_INTERVAL_MS, ?MODULE, log_locks), {noreply, State}; handle_cast(Request, State) -> ?LOG_WARNING([{event, unhandled_cast}, {module, ?MODULE}, {request, Request}]), {noreply, State}. handle_info(Message, State) -> ?LOG_WARNING([{event, unhandled_info}, {module, ?MODULE}, {message, Message}]), {noreply, State}. terminate(Reason, _State) -> ?LOG_INFO([{module, ?MODULE},{pid, self()},{callback, terminate},{reason, Reason}]), ok. %%%=================================================================== %%% Private functions. %%%=================================================================== initialize_state(State) -> {ok, Config} = arweave_config:get_env(), StorageModules = Config#config.storage_modules, RepackInPlaceModules = [element(1, El) || El <- Config#config.repack_in_place_storage_modules], StoreIDToDevice = lists:foldl( fun(Module, Acc) -> StoreID = ar_storage_module:id(Module), Device = get_system_device(Module), ?LOG_INFO([ {event, storage_module_device}, {store_id, StoreID}, {device, Device}]), maps:put(StoreID, Device, Acc) end, #{}, StorageModules ++ RepackInPlaceModules ), State2 = State#state{ store_id_to_device = StoreIDToDevice, initialized = true }, log_locks(State2), ar_util:cast_after(?LOCK_LOG_INTERVAL_MS, ?MODULE, log_locks), State2. get_system_device(StorageModule) -> {ok, Config} = arweave_config:get_env(), StoreID = ar_storage_module:id(StorageModule), Path = ar_chunk_storage:get_chunk_storage_path(Config#config.data_dir, StoreID), Device = ar_util:get_system_device(Path), case Device of "" -> StoreID; % If the command fails or returns an empty string, return StoreID _ -> Device end. do_acquire_lock(Mode, ?DEFAULT_MODULE, State) -> %% "default" storage module is a special case. It can only be in sync mode. case Mode of sync -> {true, State}; _ -> {false, State} end; do_acquire_lock(Mode, StoreID, State) -> MaxPrepareLocks = State#state.num_replica_2_9_workers, Lock = query_lock(StoreID, State), PrepareLocks = count_prepare_locks(State), {Acquired, NewLock} = case Mode of sync -> %% Can only aquire a sync lock if the device is in sync mode case Lock of sync -> {true, sync}; _ -> {false, Lock} end; prepare -> %% Can only acquire a prepare lock if the device is in sync mode or this %% StoreID already has the prepare lock case {Lock, PrepareLocks} of {sync, _} when PrepareLocks < MaxPrepareLocks -> {true, prepare}; {prepare, _} -> {true, Lock}; _ -> {false, Lock} end; repack -> %% Can only acquire a repack lock if the device is in sync mode or this %% StoreID already has the repack lock case {Lock, PrepareLocks} of {sync, _} when PrepareLocks < MaxPrepareLocks -> {true, repack}; {repack, _} -> {true, Lock}; _ -> {false, Lock} end end, {Acquired, update_lock(StoreID, NewLock, State)}. do_release_lock(Mode, StoreID, State) -> Lock = query_lock(StoreID, State), NewLock = case Mode of sync -> %% Releasing a sync lock does nothing. Lock; prepare -> case Lock of prepare -> %% This StoreID had a prepare lock on this device, so now we can %% put the device back in sync mode so it's ready to be locked again %% if needed. sync; _ -> %% We should only be able to release a prepare lock if we previously %% held it. If we hit this branch something is wrong. ?LOG_WARNING([{event, invalid_release_lock}, {module, ?MODULE}, {mode, Mode}, {store_id, StoreID}, {current_lock, Lock}]), Lock end; repack -> case Lock of repack -> %% This StoreID had a repack lock on this device, so now we can %% put the device back in sync mode so it's ready to be locked again %% if needed. sync; _ -> %% We should only be able to release a repack lock if we previously %% held it. If we hit this branch something is wrong. ?LOG_WARNING([{event, invalid_release_lock}, {module, ?MODULE}, {mode, Mode}, {store_id, StoreID}, {current_lock, Lock}]), Lock end end, update_lock(StoreID, NewLock, State). count_prepare_locks(#state{ device_limit = true } = State) -> maps:fold( fun(_Device, {prepare, _}, Acc) -> Acc + 1; (_Device, _, Acc) -> Acc end, 0, State#state.device_locks ); count_prepare_locks(#state{ device_limit = false } = State) -> maps:fold( fun(_Device, prepare, Acc) -> Acc + 1; (_Device, _, Acc) -> Acc end, 0, State#state.store_id_locks ). query_lock(StoreID, #state{ device_limit = true } = State) -> Device = maps:get(StoreID, State#state.store_id_to_device), DeviceLock = maps:get(Device, State#state.device_locks, sync), case DeviceLock of sync -> sync; {prepare, StoreID} -> prepare; {repack, StoreID} -> repack; _ -> paused end; query_lock(StoreID, #state{ device_limit = false } = State) -> maps:get(StoreID, State#state.store_id_locks, sync). update_lock(StoreID, Lock, #state{ device_limit = true } = State) -> Device = maps:get(StoreID, State#state.store_id_to_device), DeviceLocks = case Lock of paused -> State#state.device_locks; sync -> maps:put(Device, sync, State#state.device_locks); prepare -> maps:put(Device, {prepare, StoreID}, State#state.device_locks); repack -> maps:put(Device, {repack, StoreID}, State#state.device_locks) end, State#state{device_locks = DeviceLocks}; update_lock(StoreID, Lock, #state{ device_limit = false } = State) -> StoreIDLocks = maps:put(StoreID, Lock, State#state.store_id_locks), State#state{store_id_locks = StoreIDLocks}. log_locks(State) -> Logs = do_log_locks(State), lists:foreach(fun(Log) -> ?LOG_INFO(Log) end, Logs). do_log_locks(#state{ device_limit = true } = State) -> StoreIDToDevice = State#state.store_id_to_device, DeviceLocks = State#state.device_locks, SortedStoreIDList = lists:sort( fun({StoreID1, Device1}, {StoreID2, Device2}) -> case Device1 =:= Device2 of true -> StoreID1 =< StoreID2; false -> Device1 < Device2 end end, maps:to_list(StoreIDToDevice)), lists:foldr( fun({StoreID, Device}, Acc) -> DeviceLock = maps:get(Device, DeviceLocks, sync), Status = case DeviceLock of sync -> sync; {prepare, StoreID} -> prepare; {repack, StoreID} -> repack; _ -> paused end, [ [{event, device_lock_status}, {device, Device}, {store_id, StoreID}, {status, Status}] | Acc ] end, [], SortedStoreIDList ); do_log_locks(#state{ device_limit = false } = State) -> StoreIDToDevice = State#state.store_id_to_device, StoreIDLocks = State#state.store_id_locks, SortedStoreIDList = lists:sort( fun({StoreID1, Device1}, {StoreID2, Device2}) -> case Device1 =:= Device2 of true -> StoreID1 =< StoreID2; false -> Device1 < Device2 end end, maps:to_list(StoreIDToDevice)), lists:foldr( fun({StoreID, Device}, Acc) -> Status = maps:get(StoreID, StoreIDLocks, sync), [ [{event, device_lock_status}, {device, Device}, {store_id, StoreID}, {status, Status}] | Acc ] end, [], SortedStoreIDList ). %%%=================================================================== %%% Tests. %%%=================================================================== device_locks_test_() -> [ {timeout, 30, fun test_acquire_lock/0}, {timeout, 30, fun test_acquire_lock_without_device_limit/0}, {timeout, 30, fun test_release_lock/0}, {timeout, 30, fun test_release_lock_without_device_limit/0}, {timeout, 30, fun test_count_prepare_locks/0}, {timeout, 30, fun test_log_locks/0} ]. test_acquire_lock() -> State = #state{ store_id_to_device = #{ "storage_module_0_unpacked" => "device1", "storage_module_1_unpacked" => "device1", "storage_module_2_unpacked" => "device2", "storage_module_3_unpacked" => "device2", "storage_module_4_unpacked" => "device3", "storage_module_5_unpacked" => "device3" }, device_locks = #{ "device1" => sync, "device2" => {prepare, "storage_module_2_unpacked"}, "device3" => {repack, "storage_module_4_unpacked"} }, num_replica_2_9_workers = 2 }, ?assertEqual( {true, State}, do_acquire_lock(sync, "storage_module_0_unpacked", State)), ?assertEqual( {false, State}, do_acquire_lock(sync, "storage_module_2_unpacked", State)), ?assertEqual( {false, State}, do_acquire_lock(sync, "storage_module_3_unpacked", State)), ?assertEqual( {false, State}, do_acquire_lock(sync, "storage_module_4_unpacked", State)), ?assertEqual( {false, State#state{num_replica_2_9_workers = 1}}, do_acquire_lock(prepare, "storage_module_0_unpacked", State#state{num_replica_2_9_workers = 1})), ?assertEqual( {true, State#state{device_locks = #{ "device1" => {prepare, "storage_module_0_unpacked"}, "device2" => {prepare, "storage_module_2_unpacked"}, "device3" => {repack, "storage_module_4_unpacked"} }}}, do_acquire_lock(prepare, "storage_module_0_unpacked", State)), ?assertEqual( {true, State}, do_acquire_lock(prepare, "storage_module_2_unpacked", State)), ?assertEqual( {false, State}, do_acquire_lock(prepare, "storage_module_3_unpacked", State)), ?assertEqual( {false, State}, do_acquire_lock(prepare, "storage_module_4_unpacked", State)), ?assertEqual( {true, State#state{device_locks = #{ "device1" => {repack, "storage_module_0_unpacked"}, "device2" => {prepare, "storage_module_2_unpacked"}, "device3" => {repack, "storage_module_4_unpacked"} }}}, do_acquire_lock(repack, "storage_module_0_unpacked", State)), ?assertEqual( {false, State}, do_acquire_lock(repack, "storage_module_2_unpacked", State)), ?assertEqual( {false, State}, do_acquire_lock(repack, "storage_module_3_unpacked", State)), ?assertEqual( {true, State}, do_acquire_lock(repack, "storage_module_4_unpacked", State)), ?assertEqual( {false, State}, do_acquire_lock(repack, "storage_module_5_unpacked", State)). test_acquire_lock_without_device_limit() -> State = #state{ store_id_to_device = #{ "storage_module_0_unpacked" => "device1", "storage_module_1_unpacked" => "device1", "storage_module_2_unpacked" => "device2", "storage_module_3_unpacked" => "device2", "storage_module_4_unpacked" => "device3", "storage_module_5_unpacked" => "device3" }, store_id_locks = #{ "storage_module_0_unpacked" => sync, "storage_module_1_unpacked" => sync, "storage_module_2_unpacked" => prepare, "storage_module_3_unpacked" => prepare, "storage_module_4_unpacked" => repack, "storage_module_5_unpacked" => repack }, num_replica_2_9_workers = 3, device_limit = false }, ?assertEqual( {true, State}, do_acquire_lock(sync, "storage_module_0_unpacked", State)), ?assertEqual( {false, State}, do_acquire_lock(sync, "storage_module_2_unpacked", State)), ?assertEqual( {false, State}, do_acquire_lock(sync, "storage_module_3_unpacked", State)), ?assertEqual( {false, State}, do_acquire_lock(sync, "storage_module_4_unpacked", State)), ?assertEqual( {false, State#state{num_replica_2_9_workers = 2}}, do_acquire_lock(prepare, "storage_module_0_unpacked", State#state{num_replica_2_9_workers = 2})), ?assertEqual( {true, State#state{store_id_locks = #{ "storage_module_0_unpacked" => prepare, "storage_module_1_unpacked" => sync, "storage_module_2_unpacked" => prepare, "storage_module_3_unpacked" => prepare, "storage_module_4_unpacked" => repack, "storage_module_5_unpacked" => repack }}}, do_acquire_lock(prepare, "storage_module_0_unpacked", State)), ?assertEqual( {true, State}, do_acquire_lock(prepare, "storage_module_2_unpacked", State)), ?assertEqual( {true, State}, do_acquire_lock(prepare, "storage_module_3_unpacked", State)), ?assertEqual( {false, State}, do_acquire_lock(prepare, "storage_module_4_unpacked", State)), ?assertEqual( {true, State#state{store_id_locks = #{ "storage_module_0_unpacked" => repack, "storage_module_1_unpacked" => sync, "storage_module_2_unpacked" => prepare, "storage_module_3_unpacked" => prepare, "storage_module_4_unpacked" => repack, "storage_module_5_unpacked" => repack }}}, do_acquire_lock(repack, "storage_module_0_unpacked", State)), ?assertEqual( {false, State}, do_acquire_lock(repack, "storage_module_2_unpacked", State)), ?assertEqual( {false, State}, do_acquire_lock(repack, "storage_module_3_unpacked", State)), ?assertEqual( {true, State}, do_acquire_lock(repack, "storage_module_4_unpacked", State)), ?assertEqual( {true, State}, do_acquire_lock(repack, "storage_module_5_unpacked", State)). test_release_lock() -> State = #state{ store_id_to_device = #{ "storage_module_0_unpacked" => "device1", "storage_module_1_unpacked" => "device1", "storage_module_2_unpacked" => "device2", "storage_module_3_unpacked" => "device2", "storage_module_4_unpacked" => "device3", "storage_module_5_unpacked" => "device3", "storage_module_6_unpacked" => "device4" }, device_locks = DeviceLocks = #{ "device1" => sync, "device2" => {prepare, "storage_module_2_unpacked"}, "device3" => {repack, "storage_module_4_unpacked"} } }, ?assertEqual( State, do_release_lock(sync, "storage_module_0_unpacked", State)), ?assertEqual( State, do_release_lock(sync, "storage_module_2_unpacked", State)), ?assertEqual( State, do_release_lock(sync, "storage_module_3_unpacked", State)), ?assertEqual( State, do_release_lock(sync, "storage_module_4_unpacked", State)), ?assertEqual( State#state{ device_locks = DeviceLocks#{ "device4" => sync }}, do_release_lock(sync, "storage_module_6_unpacked", State)), ?assertEqual( State, do_release_lock(prepare, "storage_module_0_unpacked", State)), ?assertEqual( State#state{device_locks = #{ "device1" => sync, "device2" => sync, "device3" => {repack, "storage_module_4_unpacked"} }}, do_release_lock(prepare, "storage_module_2_unpacked", State)), ?assertEqual( State, do_release_lock(prepare, "storage_module_3_unpacked", State)), ?assertEqual( State, do_release_lock(prepare, "storage_module_4_unpacked", State)), ?assertEqual( State, do_release_lock(repack, "storage_module_0_unpacked", State)), ?assertEqual( State, do_release_lock(repack, "storage_module_2_unpacked", State)), ?assertEqual( State, do_release_lock(repack, "storage_module_3_unpacked", State)), ?assertEqual( State#state{device_locks = #{ "device1" => sync, "device2" => {prepare, "storage_module_2_unpacked"}, "device3" => sync }}, do_release_lock(repack, "storage_module_4_unpacked", State)), ?assertEqual( State, do_release_lock(repack, "storage_module_5_unpacked", State)). test_release_lock_without_device_limit() -> State = #state{ store_id_to_device = #{ "storage_module_0_unpacked" => "device1", "storage_module_1_unpacked" => "device1", "storage_module_2_unpacked" => "device2", "storage_module_3_unpacked" => "device2", "storage_module_4_unpacked" => "device3", "storage_module_5_unpacked" => "device3", "storage_module_6_unpacked" => "device4" }, store_id_locks = StoreIDLocks = #{ "storage_module_0_unpacked" => sync, "storage_module_1_unpacked" => sync, "storage_module_2_unpacked" => prepare, "storage_module_3_unpacked" => prepare, "storage_module_4_unpacked" => repack, "storage_module_5_unpacked" => repack }, device_limit = false }, ?assertEqual( State, do_release_lock(sync, "storage_module_0_unpacked", State)), ?assertEqual( State, do_release_lock(sync, "storage_module_2_unpacked", State)), ?assertEqual( State, do_release_lock(sync, "storage_module_3_unpacked", State)), ?assertEqual( State, do_release_lock(sync, "storage_module_4_unpacked", State)), ?assertEqual( State#state{ store_id_locks = StoreIDLocks#{ "storage_module_6_unpacked" => sync }}, do_release_lock(sync, "storage_module_6_unpacked", State)), ?assertEqual( State, do_release_lock(prepare, "storage_module_0_unpacked", State)), ?assertEqual( State#state{store_id_locks = #{ "storage_module_0_unpacked" => sync, "storage_module_1_unpacked" => sync, "storage_module_2_unpacked" => sync, "storage_module_3_unpacked" => prepare, "storage_module_4_unpacked" => repack, "storage_module_5_unpacked" => repack }}, do_release_lock(prepare, "storage_module_2_unpacked", State)), ?assertEqual( State#state{store_id_locks = #{ "storage_module_0_unpacked" => sync, "storage_module_1_unpacked" => sync, "storage_module_2_unpacked" => prepare, "storage_module_3_unpacked" => sync, "storage_module_4_unpacked" => repack, "storage_module_5_unpacked" => repack }}, do_release_lock(prepare, "storage_module_3_unpacked", State)), ?assertEqual( State, do_release_lock(prepare, "storage_module_4_unpacked", State)), ?assertEqual( State, do_release_lock(repack, "storage_module_0_unpacked", State)), ?assertEqual( State, do_release_lock(repack, "storage_module_2_unpacked", State)), ?assertEqual( State, do_release_lock(repack, "storage_module_3_unpacked", State)), ?assertEqual( State#state{store_id_locks = #{ "storage_module_0_unpacked" => sync, "storage_module_1_unpacked" => sync, "storage_module_2_unpacked" => prepare, "storage_module_3_unpacked" => prepare, "storage_module_4_unpacked" => sync, "storage_module_5_unpacked" => repack }}, do_release_lock(repack, "storage_module_4_unpacked", State)), ?assertEqual( State#state{store_id_locks = #{ "storage_module_0_unpacked" => sync, "storage_module_1_unpacked" => sync, "storage_module_2_unpacked" => prepare, "storage_module_3_unpacked" => prepare, "storage_module_4_unpacked" => repack, "storage_module_5_unpacked" => sync }}, do_release_lock(repack, "storage_module_5_unpacked", State)). test_count_prepare_locks() -> State = #state{ store_id_to_device = #{ "storage_module_0_unpacked" => "device1", "storage_module_1_unpacked" => "device1", "storage_module_2_unpacked" => "device2", "storage_module_3_unpacked" => "device2", "storage_module_4_unpacked" => "device3", "storage_module_5_unpacked" => "device3" }, device_locks = #{ "device1" => {prepare, "storage_module_0_unpacked"}, "device2" => {prepare, "storage_module_2_unpacked"}, "device3" => {repack, "storage_module_4_unpacked"} }, store_id_locks = #{ "storage_module_0_unpacked" => sync, "storage_module_1_unpacked" => sync, "storage_module_2_unpacked" => prepare, "storage_module_3_unpacked" => prepare, "storage_module_4_unpacked" => prepare, "storage_module_5_unpacked" => repack } }, ?assertEqual(2, count_prepare_locks(State#state{ device_limit = true })), ?assertEqual(3, count_prepare_locks(State#state{ device_limit = false })). test_log_locks() -> State = #state{ store_id_to_device = #{ "storage_module_0_unpacked" => "device1", "storage_module_1_unpacked" => "device1", "storage_module_2_unpacked" => "device2", "storage_module_3_unpacked" => "device2", "storage_module_4_unpacked" => "device3", "storage_module_5_unpacked" => "device3" }, device_locks = #{ "device1" => sync, "device2" => {prepare, "storage_module_2_unpacked"}, "device3" => {repack, "storage_module_4_unpacked"} }, store_id_locks = #{ "storage_module_0_unpacked" => sync, "storage_module_1_unpacked" => sync, "storage_module_2_unpacked" => prepare, "storage_module_3_unpacked" => prepare, "storage_module_4_unpacked" => repack, "storage_module_5_unpacked" => repack } }, ?assertEqual( [ [{event, device_lock_status}, {device, "device1"}, {store_id, "storage_module_0_unpacked"}, {status, sync}], [{event, device_lock_status}, {device, "device1"}, {store_id, "storage_module_1_unpacked"}, {status, sync}], [{event, device_lock_status}, {device, "device2"}, {store_id, "storage_module_2_unpacked"}, {status, prepare}], [{event, device_lock_status}, {device, "device2"}, {store_id, "storage_module_3_unpacked"}, {status, paused}], [{event, device_lock_status}, {device, "device3"}, {store_id, "storage_module_4_unpacked"}, {status, repack}], [{event, device_lock_status}, {device, "device3"}, {store_id, "storage_module_5_unpacked"}, {status, paused}] ], do_log_locks(State#state{device_limit = true})), ?assertEqual( [ [{event, device_lock_status}, {device, "device1"}, {store_id, "storage_module_0_unpacked"}, {status, sync}], [{event, device_lock_status}, {device, "device1"}, {store_id, "storage_module_1_unpacked"}, {status, sync}], [{event, device_lock_status}, {device, "device2"}, {store_id, "storage_module_2_unpacked"}, {status, prepare}], [{event, device_lock_status}, {device, "device2"}, {store_id, "storage_module_3_unpacked"}, {status, prepare}], [{event, device_lock_status}, {device, "device3"}, {store_id, "storage_module_4_unpacked"}, {status, repack}], [{event, device_lock_status}, {device, "device3"}, {store_id, "storage_module_5_unpacked"}, {status, repack}] ], do_log_locks(State#state{device_limit = false})). ================================================ FILE: apps/arweave/src/ar_diff_dag.erl ================================================ %%% @doc A directed acyclic graph with a single sink node. The sink node is supposed %%% to store some big expensive to replicate entity (e.g., a wallet tree). Edges store %%% diffs. To compute a representation of the entity corresponding to a particular vertice, %%% one needs to walk from this vertice down to the sink node, collect all the diffs, and %%% apply them in the reverse order. -module(ar_diff_dag). -export([new/3, get_sink/1, is_sink/2, is_node/2, add_node/5, update_leaf_source/3, update_sink/3, get_metadata/2, get_sink_metadata/1, reconstruct/3, move_sink/4, filter/2]). -include_lib("eunit/include/eunit.hrl"). %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Create a new DAG with a sink node under the given identifier storing the given entity. new(ID, Entity, Metadata) -> {#{ ID => {sink, Entity, {0, Metadata}} }, ID, #{ ID => sets:new() }}. %% @doc Return the entity stored in the sink node. get_sink(DAG) -> ID = element(2, DAG), element(2, maps:get(ID, element(1, DAG))). %% @doc Return true if the given identifier is the identifier of the sink node. is_sink({_Sinks, ID, _Sources}, ID) -> true; is_sink(_DAG, _ID) -> false. %% @doc Return true if the node with the given identifier exists. is_node({Sinks, _Sink, _Sources}, ID) -> maps:is_key(ID, Sinks). %% @doc Create a node with an edge connecting the given source and sink identifiers, %% directed towards the given sink identifier. %% If the node with the given sink identifier does not exist or the node with the given source %% identifier already exists, the call fails with a badkey exception. add_node(DAG, SourceID, SinkID, Diff, Metadata) when SourceID /= SinkID -> assert_exists(SinkID, DAG), assert_not_exists(SourceID, DAG), {Sinks, Sink, Sources} = DAG, SinkSources = maps:get(SinkID, Sources, sets:new()), UpdatedSources = Sources#{ SinkID => sets:add_element(SourceID, SinkSources), SourceID => sets:new() }, {_ID, _Entity, {Counter, _Metadata}} = maps:get(SinkID, Sinks), {Sinks#{ SourceID => {SinkID, Diff, {Counter + 1, Metadata}} }, Sink, UpdatedSources}. %% @doc Update the given node via the given function of a diff and a metadata, which %% returns a "new node identifier, new diff, new metadata" triplet. The node must be %% a source (must have a sink) and a leaf (must be a sink for no node). %% If the node does not exist or is not a leaf source, the call fails with a badkey exception. update_leaf_source(DAG, ID, UpdateFun) -> assert_exists(ID, DAG), assert_not_sink(ID, DAG), {#{ ID := {SinkID, Diff, {Counter, Metadata}} } = Sinks, Sink, Sources} = DAG, case sets:is_empty(maps:get(ID, Sources, sets:new())) of false -> error({badkey, ID}); true -> {NewID, UpdatedDiff, UpdatedMetadata} = UpdateFun(Diff, Metadata), Sinks2 = maps:remove(ID, Sinks), Sources2 = maps:remove(ID, Sources), Set = sets:add_element(NewID, sets:del_element(ID, maps:get(SinkID, Sources))), Sources3 = Sources2#{ SinkID => Set }, {Sinks2#{ NewID => {SinkID, UpdatedDiff, {Counter, UpdatedMetadata}} }, Sink, Sources3} end. %% @doc Update the sink via the given function of an entity and a metadata, which %% returns a "new node identifier, new entity, new metadata" triplet. %% If the node does not exist or is not a sink, the call fails with a badkey exception. update_sink({Sinks, ID, Sources}, ID, UpdateFun) -> #{ ID := {sink, Entity, {Counter, Metadata}} } = Sinks, {NewID, NewEntity, NewMetadata} = UpdateFun(Entity, Metadata), Sinks2 = maps:remove(ID, Sinks), Sinks3 = Sinks2#{ NewID => {sink, NewEntity, {Counter, NewMetadata}} }, {Set, Sources2} = case maps:take(ID, Sources) of error -> {sets:new(), Sources}; Update -> Update end, Sinks4 = sets:fold( fun(SourceID, Acc) -> {ID, Diff, Meta} = maps:get(SourceID, Acc), Acc#{ SourceID => {NewID, Diff, Meta} } end, Sinks3, Set ), Sources3 = Sources2#{ NewID => Set }, {Sinks4, NewID, Sources3}; update_sink(_DAG, ID, _Fun) -> error({badkey, ID}). %% @doc Return metadata stored at the given node. If the node with the given identifier %% does not exist, the call fails with a badkey exception. get_metadata(DAG, ID) -> element(2, element(3, maps:get(ID, element(1, DAG)))). %% @doc Return metadata stored at the sink node. If the node with the given identifier %% does not exist, the call fails with a badkey exception. get_sink_metadata(DAG) -> ID = element(2, DAG), get_metadata(DAG, ID). %% @doc Reconstruct the entity corresponding to the given node using %% the given diff application function - a function of a diff and an entity. %% If the node with the given identifier does not exist, returns {error, not_found}. reconstruct(DAG, ID, ApplyDiffFun) -> Sinks = element(1, DAG), case maps:is_key(ID, Sinks) of true -> reconstruct(DAG, ID, ApplyDiffFun, []); false -> {error, not_found} end. %% @doc Make the given node the sink node. The diffs are reversed %% according to the given function of a diff and an entity. %% The new entity is constructed by applying the diffs on the path from the previous %% sink to the new one using the given diff application function of a diff and an entity. %% If the node with the given identifier does not exist, the call fails with a badkey exception. move_sink(DAG, ID, ApplyDiffFun, ReverseDiffFun) -> assert_exists(ID, DAG), move_sink(DAG, ID, ApplyDiffFun, ReverseDiffFun, []). %% @doc Remove the nodes further away from the sink than the given distance. filter({Sinks, ID, Sources}, Depth) -> {sink, _Entity, {SinkCounter, _Metadata}} = maps:get(ID, Sinks), {ToRemove, Sources2} = filter(maps:iterator(Sinks), SinkCounter, Depth, Sources, sets:new()), {UpdatedSinks, UpdatedSources} = sets:fold( fun(RemoveID, {CurrentSinks, CurrentSources}) -> #{ RemoveID := {SinkID, _CurrentEntity, _CurrentMetadata} } = CurrentSinks, CurrentSources2 = case sets:is_element(SinkID, ToRemove) of false -> Set = maps:get(SinkID, CurrentSources, sets:new()), maps:put( SinkID, sets:del_element(RemoveID, Set), CurrentSources ); true -> CurrentSources end, {maps:remove(RemoveID, CurrentSinks), CurrentSources2} end, {Sinks, Sources2}, ToRemove ), {UpdatedSinks, ID, UpdatedSources}. %%%=================================================================== %%% Private functions. %%%=================================================================== assert_exists(ID, {Sinks, _, _}) -> case maps:is_key(ID, Sinks) of true -> ok; false -> error({badkey, ID}) end. assert_not_exists(ID, {Sinks, _, _}) -> case maps:is_key(ID, Sinks) of true -> error({badkey, ID}); false -> ok end. assert_not_sink(ID, {_, ID, _}) -> error({badkey, ID}); assert_not_sink(_ID, _DAG) -> ok. reconstruct(DAG, ID, ApplyDiffFun, Diffs) -> case DAG of {#{ ID := {sink, Entity, _Meta} }, ID, _} -> lists:foldl(ApplyDiffFun, Entity, Diffs); {#{ ID := {SinkID, Diff, _Meta} }, _Sink, _Sinks} -> reconstruct(DAG, SinkID, ApplyDiffFun, [Diff | Diffs]) end. move_sink(DAG, ID, ApplyDiffFun, ReverseDiffFun, Diffs) -> case DAG of {#{ ID := {sink, Entity, Metadata} }, ID, _Sources} -> {UpdatedSinkID, UpdatedEntity, UpdatedMetadata, UpdatedDAG} = lists:foldl( fun({SinkID, Diff, Meta}, {SourceID, CurrentEntity, CurrentMeta, CurrentDAG}) -> ReversedDiff = ReverseDiffFun(Diff, CurrentEntity), {Sinks, _Sink, Sources} = CurrentDAG, Sinks2 = Sinks#{ SourceID => {SinkID, ReversedDiff, CurrentMeta} }, SourceIDSet2 = sets:del_element(SinkID, maps:get(SourceID, Sources)), SinkIDSet2 = sets:add_element(SourceID, maps:get(SinkID, Sources, sets:new())), Sources2 = Sources#{ SinkID => SinkIDSet2, SourceID => SourceIDSet2 }, {SinkID, ApplyDiffFun(Diff, CurrentEntity), Meta, {Sinks2, SinkID, Sources2}} end, {ID, Entity, Metadata, DAG}, Diffs ), {UpdatedSinks, UpdatedSinkID, UpdatedSources} = UpdatedDAG, UpdatedSinks2 = UpdatedSinks#{ UpdatedSinkID => {sink, UpdatedEntity, UpdatedMetadata} }, {UpdatedSinks2, UpdatedSinkID, UpdatedSources}; {#{ ID := {SinkID, Diff, Metadata} }, _Sink, _Sinks} -> move_sink(DAG, SinkID, ApplyDiffFun, ReverseDiffFun, [{ID, Diff, Metadata} | Diffs]) end. filter(SinkIterator, SinkCounter, Depth, Sources, ToRemove) -> case maps:next(SinkIterator) of none -> {ToRemove, Sources}; {ID, {_ID, _Entity, {Counter, _Metadata}}, NextIterator} -> case sets:is_element(ID, ToRemove) of true -> filter(NextIterator, SinkCounter, Depth, Sources, ToRemove); false -> case abs(Counter - SinkCounter) =< Depth of true -> filter(NextIterator, SinkCounter, Depth, Sources, ToRemove); false -> {Sources2, ToRemove2} = extend_with_subtree_identifiers(ID, {Sources, ToRemove}), filter(NextIterator, SinkCounter, Depth, Sources2, ToRemove2) end end end. extend_with_subtree_identifiers(ID, {Sources, ToRemove}) -> sets:fold( fun(RemoveID, Acc) -> extend_with_subtree_identifiers(RemoveID, Acc) end, {maps:remove(ID, Sources), sets:add_element(ID, ToRemove)}, maps:get(ID, Sources, sets:new()) ). %%%=================================================================== %%% Tests. %%%=================================================================== diff_dag_test() -> %% node-1: {0, meta_1} DAG1 = new("node-1", 0, meta_1), ?assertEqual(0, get_sink(DAG1)), ?assertEqual(DAG1, filter(DAG1, 0)), ?assertEqual(DAG1, filter(DAG1, 1)), ?assertEqual(DAG1, filter(DAG1, 2)), ?assertEqual(0, reconstruct(DAG1, "node-1", fun(_Diff, _E) -> not_called end)), ?assertEqual( {error, not_found}, reconstruct(DAG1, "node-2", fun(_Diff, _E) -> not_called end) ), ?assertEqual(meta_1, get_metadata(DAG1, "node-1")), %% node-1: {0, meta_1} <- node-2-1: {1, meta_2_1} DAG2 = add_node(DAG1, "node-2-1", "node-1", 1, meta_2_1), ?assertEqual(0, get_sink(DAG2)), ?assertEqual(DAG2, filter(DAG2, 1)), ?assertEqual(DAG1, filter(DAG2, 0)), ?assertEqual(DAG2, filter(DAG2, 2)), ?assertEqual(DAG2, filter(DAG2, 3)), ?assertEqual(1, reconstruct(DAG2, "node-2-1", fun(Diff, E) -> E + Diff end)), ?assertEqual(-1, reconstruct(DAG2, "node-2-1", fun(Diff, E) -> E - Diff end)), ?assertEqual(meta_1, get_metadata(DAG2, "node-1")), ?assertEqual(meta_2_1, get_metadata(DAG2, "node-2-1")), %% node-1: {0, meta_1} <- node-2-1: {2, meta_2_2} DAG3 = update_leaf_source(DAG2, "node-2-1", fun(D, _M) -> {"node-2-1", D + 1, meta_2_2} end), ?assertEqual(0, get_sink(DAG3)), ?assertEqual(DAG1, filter(DAG3, 0)), ?assertEqual(DAG3, filter(DAG3, 1)), ?assertEqual(2, reconstruct(DAG3, "node-2-1", fun(Diff, E) -> E + Diff end)), ?assertEqual(meta_2_2, get_metadata(DAG3, "node-2-1")), ?assertException(error, {badkey, "node-1"}, update_leaf_source(DAG2, "node-1", no_function)), %% node-1: {0, meta_1} <- node-2-2: {1, meta_2_2} DAG4 = update_leaf_source(DAG3, "node-2-1", fun(D, M) -> {"node-2-2", D - 1, M} end), ?assertEqual(0, get_sink(DAG4)), ?assertEqual(1, reconstruct(DAG4, "node-2-2", fun(Diff, E) -> E + Diff end)), ?assertEqual(meta_2_2, get_metadata(DAG4, "node-2-2")), ?assertException(error, {badkey, "node-2-1"}, get_metadata(DAG4, "node-2-1")), %% node-1: {0, meta_1} <- node-2-2: {1, meta_2_2} %% <- node-2-3: {2, meta_2_3} DAG5 = add_node(DAG4, "node-2-3", "node-1", 2, meta_2_3), ?assertEqual(1, reconstruct(DAG5, "node-2-2", fun(Diff, E) -> E + Diff end)), ?assertEqual(2, reconstruct(DAG5, "node-2-3", fun(Diff, E) -> E + Diff end)), %% node-1: {0, meta_1} <- node-2-2: {1, meta_2_2} %% <- node-2-3: {2, meta_2_3} <- node-3-1: {-3, meta_3_1} DAG6 = add_node(DAG5, "node-3-1", "node-2-3", -3, meta_3_1), ?assertEqual(1, reconstruct(DAG6, "node-2-2", fun(Diff, E) -> E + Diff end)), ?assertEqual(-1, reconstruct(DAG6, "node-3-1", fun(Diff, E) -> E + Diff end)), ?assertEqual(DAG5, filter(DAG6, 1)), ?assertEqual(DAG1, filter(DAG6, 0)), %% node-1: {-2, meta_1} <- node-2-2: {1, meta_2_2} %% -> node-2-3: {3, meta_2_3} -> node-3-1: {-1, meta_3_1} DAG7 = move_sink(DAG6, "node-3-1", fun(Diff, E) -> E + Diff end, fun(Diff, _E) -> -Diff end), ?assertEqual(-1, get_sink(DAG7)), ?assertEqual(1, reconstruct(DAG7, "node-2-2", fun(Diff, E) -> E + Diff end)), ?assertEqual(meta_2_2, get_metadata(DAG7, "node-2-2")), ?assertEqual(0, reconstruct(DAG7, "node-1", fun(Diff, E) -> E + Diff end)), ?assertEqual(meta_1, get_metadata(DAG7, "node-1")), ?assertEqual(2, reconstruct(DAG7, "node-2-3", fun(Diff, E) -> E + Diff end)), ?assertEqual(meta_2_3, get_metadata(DAG7, "node-2-3")), ?assertEqual(-1, reconstruct(DAG7, "node-3-1", fun(Diff, E) -> E + Diff end)), ?assertEqual(meta_3_1, get_metadata(DAG7, "node-3-1")), ?assert(not is_node(filter(DAG7, 0), "node-2-3")), ?assert(not is_node(filter(DAG7, 0), "node-1")), ?assert(not is_node(filter(DAG7, 0), "node-2-2")), ?assert(is_node(filter(DAG7, 0), "node-3-1")), ?assert(is_node(filter(DAG7, 1), "node-3-1")), ?assert(is_node(filter(DAG7, 1), "node-2-3")), ?assert(not is_node(filter(DAG7, 1), "node-1")), ?assert(is_node(filter(DAG7, 1), "node-3-1")), ?assert(not is_node(filter(DAG7, 1), "node-2-2")), ?assertEqual(DAG7, filter(DAG7, 2)), ?assertEqual(DAG7, filter(DAG7, 3)), %% node-1: {-1, meta_1} -> node-2-2: {1, meta_2_2} %% <- node-2-3: {2, meta_2_3} <- node-3-1: {-3, meta_3_1} DAG9 = move_sink(DAG7, "node-2-2", fun(Diff, E) -> E + Diff end, fun(Diff, _E) -> -Diff end), ?assertEqual(1, get_sink(DAG9)), ?assertEqual(0, reconstruct(DAG9, "node-1", fun(Diff, E) -> E + Diff end)), ?assertEqual(meta_1, get_metadata(DAG9, "node-1")), ?assertEqual(2, reconstruct(DAG9, "node-2-3", fun(Diff, E) -> E + Diff end)), ?assertEqual(meta_2_3, get_metadata(DAG9, "node-2-3")), ?assertEqual(-1, reconstruct(DAG9, "node-3-1", fun(Diff, E) -> E + Diff end)), ?assertEqual(meta_3_1, get_metadata(DAG9, "node-3-1")), %% node-1: {-1, meta_1} -> node-2-2: {1, meta_2_2} <- node-3-2: {10, meta_3_2} %% <- node-2-3: {2, meta_2_3} <- node-3-1: {-3, meta_3_1} DAG10 = add_node(DAG9, "node-3-2", "node-2-2", 10, meta_3_2), %% node-1: {-1, meta_1} -> node-2-2: {-10, meta_2_2} -> node-3-2: {11, meta_3_2} %% <- node-2-3: {2, meta_2_3} <- node-3-1: {-3, meta_3_1} DAG11 = move_sink(DAG10, "node-3-2", fun(Diff, E) -> E + Diff end, fun(Diff, _E) -> -Diff end), ?assertEqual(11, get_sink(DAG11)), ?assertEqual(1, reconstruct(DAG11, "node-2-2", fun(Diff, E) -> E + Diff end)), ?assertEqual(meta_2_2, get_metadata(DAG11, "node-2-2")), ?assertEqual(0, reconstruct(DAG11, "node-1", fun(Diff, E) -> E + Diff end)), ?assertEqual(meta_1, get_metadata(DAG11, "node-1")), ?assertEqual(2, reconstruct(DAG11, "node-2-3", fun(Diff, E) -> E + Diff end)), ?assertEqual(meta_2_3, get_metadata(DAG11, "node-2-3")), ?assertEqual(-1, reconstruct(DAG11, "node-3-1", fun(Diff, E) -> E + Diff end)), ?assertEqual(meta_3_1, get_metadata(DAG11, "node-3-1")), ?assertException( error, {badkey, "node-2-2"}, update_leaf_source(DAG11, "node-2-2", no_function) ), %% node-1: {-1, meta_1} -> node-2-2: {-10, meta_2_2} -> node-3-2: {12, meta_3_2} %% <- node-2-3: {2, meta_2_3} <- node-3-1: {-3, meta_3_1} DAG12 = update_sink(DAG11, "node-3-2", fun(11, meta_3_2) -> {"node-3-2", 12, meta_3_2} end), ?assertEqual(12, get_sink(DAG12)), ?assertEqual(meta_3_2, get_metadata(DAG12, "node-3-2")), ?assertEqual(2, reconstruct(DAG12, "node-2-2", fun(Diff, E) -> E + Diff end)), ?assertEqual(meta_2_2, get_metadata(DAG12, "node-2-2")), ?assertEqual(1, reconstruct(DAG12, "node-1", fun(Diff, E) -> E + Diff end)), ?assertEqual(meta_1, get_metadata(DAG12, "node-1")), ?assertEqual(3, reconstruct(DAG12, "node-2-3", fun(Diff, E) -> E + Diff end)), ?assertEqual(meta_2_3, get_metadata(DAG12, "node-2-3")), ?assertEqual(0, reconstruct(DAG12, "node-3-1", fun(Diff, E) -> E + Diff end)), ?assertEqual(meta_3_1, get_metadata(DAG12, "node-3-1")), ?assertException(error, {badkey, "node-2-2"}, update_sink(DAG11, "node-2-2", no_function)), ?assertException(error, {badkey, "node-3-1"}, update_sink(DAG11, "node-3-1", no_function)), %% node-1: {-1, meta_1} -> node-2-2: {-10, meta_2_2} -> new-node-3-2: {13, meta_3_2} %% <- node-2-3: {2, meta_2_3} <- node-3-1: {-3, meta_3_1} DAG13 = update_sink(DAG12, "node-3-2", fun(12, meta_3_2) -> {"new-node-3-2", 13, meta_3_2} end), ?assertEqual(13, get_sink(DAG13)), ?assertEqual(meta_3_2, get_metadata(DAG13, "new-node-3-2")), ?assertException(error, {badkey, "node-3-2"}, get_metadata(DAG13, "node-3-2")), ?assertEqual(3, reconstruct(DAG13, "node-2-2", fun(Diff, E) -> E + Diff end)), ?assertEqual(meta_2_2, get_metadata(DAG13, "node-2-2")), ?assertEqual(2, reconstruct(DAG13, "node-1", fun(Diff, E) -> E + Diff end)), ?assertEqual(meta_1, get_metadata(DAG13, "node-1")), ?assertEqual(4, reconstruct(DAG13, "node-2-3", fun(Diff, E) -> E + Diff end)), ?assertEqual(meta_2_3, get_metadata(DAG13, "node-2-3")), ?assertEqual(1, reconstruct(DAG13, "node-3-1", fun(Diff, E) -> E + Diff end)), ?assertEqual(meta_3_1, get_metadata(DAG13, "node-3-1")), %% node-1: {0, meta_1} <- node-2: {1, meta_2} DAG14 = add_node(new("node-1", 0, meta_1), "node-2", "node-1", 1, meta_2), ?assertEqual(0, get_sink(DAG14)), ?assertEqual(1, reconstruct(DAG14, "node-2", fun(Diff, E) -> E + Diff end)), %% node-1: {-1, meta_1} -> node-2: {1, meta_2} DAG15 = move_sink(DAG14, "node-2", fun(Diff, E) -> E + Diff end, fun(Diff, _E) -> -Diff end), ?assertEqual(1, get_sink(DAG15)), ?assertEqual(0, reconstruct(DAG15, "node-1", fun(Diff, E) -> E + Diff end)), ?assertException(error, {badkey, "node-2"}, add_node(DAG15, "node-2", "node-1", 1, meta_1)), ?assertException(error, {badkey, "node-1"}, add_node(DAG15, "node-1", "node-2", 1, meta_2)). ================================================ FILE: apps/arweave/src/ar_difficulty.erl ================================================ -module(ar_difficulty). -export([get_hash_rate_fixed_ratio/1, next_cumulative_diff/3, multiply_diff_pre_fork_2_5/2, diff_pair/1, poa1_diff_multiplier/1, poa1_diff/2, scale_diff/3, min_difficulty/1, switch_to_randomx_fork_diff/1, sub_diff/2]). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave/include/ar_consensus.hrl"). %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Return the block time hash rate for the given difficulty. get_hash_rate_fixed_ratio(B) -> HashRate = ?MAX_DIFF div (?MAX_DIFF - B#block.diff), case B#block.height >= ar_fork:height_2_8() of true -> HashRate; false -> %% Adjusting the hash rate by %% (TwoChunkCount + OneChunkCount) / TwoChunkCount counts %% the number of all the hashing attempts. In other words, %% the adjusted value is useful when we want to see the total %% amount of CPU work put into mining a block. This is not what %% we use it for. We use it as a denominator when computing %% a share contributed by a single partition - see %% ar_pricing:get_v2_price_per_gib_minute. Therefore, the hash %% rate computed here needs to have the same "units" as %% the hash rate we estimate for the partition - %% the "normalized" hash rate where a recall range only %% produces 4 nonces from one recall range (chunk-1) %% plus up to 400 nonces (chunk-2). %% %% Note that we did not even adjust it %% by (TwoChunkCount + OneChunkCount) / TwoChunkCount but by %% 100 div (100 + 1), what is wrong. Multiplier = poa1_diff_multiplier(B#block.height), HashRate = ?MAX_DIFF div (?MAX_DIFF - B#block.diff), case Multiplier > 1 of true -> HashRate * Multiplier div (Multiplier + 1); false -> HashRate end end. %% @doc Calculate the cumulative difficulty for the next block. next_cumulative_diff(OldCDiff, NewDiff, Height) -> case Height >= ar_fork:height_1_6() of true -> next_cumulative_diff2(OldCDiff, NewDiff, Height); false -> 0 end. %% @doc Get a difficulty that makes it harder to mine by Multiplier number of times. %% The function was used up to the fork 2.4 and must be reimplemented without the %% floating point numbers in case the need arises after the fork 2.5. %% @end multiply_diff_pre_fork_2_5(Diff, Multiplier) -> ?MAX_DIFF - erlang:trunc(1 / Multiplier * (?MAX_DIFF - Diff)). diff_pair(Block) -> Diff = Block#block.diff, Height = Block#block.height, {poa1_diff(Diff, Height), Diff}. poa1_diff_multiplier(Height) -> case Height >= ar_fork:height_2_7_2() of true -> ?POA1_DIFF_MULTIPLIER; false -> 1 end. poa1_diff(Diff, Height) -> Scale = {poa1_diff_multiplier(Height), 1}, scale_diff(Diff, Scale, Height). %% @doc Scale the difficulty by ScaleDividend/ScaleDivisor. %% Example: scale_diff(Diff, {100, 1}, Height) will scale the difficulty by 100, increasing it %% Example: scale_diff(Diff, {3, 10}, Height) will scale the difficulty by 3/10, decreasing it scale_diff(infinity, _Coeff, _Height) -> infinity; scale_diff(Diff, {1, 1}, _Height) -> Diff; scale_diff(Diff, {ScaleDividend, ScaleDivisor}, Height) -> MaxDiff = ?MAX_DIFF, MinDiff = min_difficulty(Height), %% Scale DiffInverse by ScaleDivisor/ScaleDividend because it's an inverse value. %% I.e. passing in {100, 1} will scale DiffInverse by 1/100 and *increase* the difficulty. DiffInverse = (MaxDiff - Diff) * ScaleDivisor div ScaleDividend, ar_util:between( MaxDiff - DiffInverse, MinDiff, MaxDiff - 1 ). %% @doc Return the new difficulty computed such that N candidates have approximately the same %% chances with the new difficulty as a single candidate with the Diff difficulty. %% %% Let the probability a candidate satisfies the new difficulty be x. %% Let the probability a candidate satisfies the old diffiuclty be p. %% Then, the probability at least one of the N candidates satisfies %% the new difficulty is 1 - (1 - x) ^ N. We want it to be equal to p. %% So, (1 - x) ^ N = 1 - p => x = 1 - 32th root of (1 - p). %% The first three terms of the infinite series of (1 - p) ^ (1 / 32) are %% 1 - (1 / 32) * p - (31 * p ^ 2)/(2 * 32 ^ 2). %% Therefore, x is approximately p/32 + 31 * (p ^ 2) / (2 * 32 ^ 2). %% x = NewReverseDiff / MaxDiff, p = ReverseDiff / MaxDiff. sub_diff(infinity, _N) -> infinity; sub_diff(Diff, N) -> MaxDiff = ?MAX_DIFF, ReverseDiff = MaxDiff - Diff, MaxDiffSquared = MaxDiff * MaxDiff, ReverseDiffSquared = ReverseDiff * ReverseDiff, Dividend = 2 * N * ReverseDiff * MaxDiff + (N - 1) * ReverseDiffSquared, Divisor = 2 * N * N * MaxDiffSquared, (MaxDiff * Divisor - Dividend * MaxDiff) div Divisor. -ifdef(AR_TEST). min_difficulty(_Height) -> 1. switch_to_randomx_fork_diff(_) -> 1. -else. min_spora_difficulty(Height) -> ?SPORA_MIN_DIFFICULTY(Height). min_randomx_difficulty() -> min_sha384_difficulty() + ?RANDOMX_DIFF_ADJUSTMENT. min_sha384_difficulty() -> 31. min_difficulty(Height) -> Diff = case Height >= ar_fork:height_1_7() of true -> case Height >= ar_fork:height_2_4() of true -> min_spora_difficulty(Height); false -> min_randomx_difficulty() end; false -> min_sha384_difficulty() end, case Height >= ar_fork:height_1_8() of true -> case Height >= ar_fork:height_2_5() of true -> ar_retarget:switch_to_linear_diff(Diff); false -> ar_retarget:switch_to_linear_diff_pre_fork_2_5(Diff) end; false -> Diff end. sha384_diff_to_randomx_diff(Sha384Diff) -> max(Sha384Diff + ?RANDOMX_DIFF_ADJUSTMENT, min_randomx_difficulty()). switch_to_randomx_fork_diff(OldDiff) -> sha384_diff_to_randomx_diff(OldDiff) - 2. -endif. %%%=================================================================== %%% Private functions. %%%=================================================================== next_cumulative_diff2(OldCDiff, NewDiff, Height) -> Delta = case Height >= ar_fork:height_1_8() of false -> NewDiff * NewDiff; true -> %% The number of hashes to try on average to find a solution. case Height >= ar_fork:height_2_5() of false -> erlang:trunc(?MAX_DIFF / (?MAX_DIFF - NewDiff)); true -> ?MAX_DIFF div (?MAX_DIFF - NewDiff) end end, OldCDiff + Delta. ================================================ FILE: apps/arweave/src/ar_disk_cache.erl ================================================ %% This Source Code Form is subject to the terms of the GNU General %% Public License, v. 2.0. If a copy of the GPLv2 was not distributed %% with this file, You can obtain one at %% https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html -module(ar_disk_cache). -behaviour(gen_server). -export([lookup_block_filename/1, lookup_block_filename/2, lookup_tx_filename/1, lookup_tx_filename/2, write_block/1, write_block_shadow/1, reset/0, write_tx/1]). -export([start_link/0, init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("arweave/include/ar_wallets.hrl"). %% Internal state definition. -record(state, { limit_max, limit_min, size = 0, path }). %%%=================================================================== %%% API %%%=================================================================== lookup_block_filename(H) -> lookup_block_filename(H, not_set). lookup_block_filename(H, CustomDir) when is_binary(H)-> %% Use the process dictionary to keep the path. PathBlock = case CustomDir of not_set -> case get(ar_disk_cache_path) of undefined -> {ok, Config} = arweave_config:get_env(), Path = filename:join(Config#config.data_dir, ?DISK_CACHE_DIR), put(ar_disk_cache_path, Path), filename:join(Path, ?DISK_CACHE_BLOCK_DIR); Path -> filename:join(Path, ?DISK_CACHE_BLOCK_DIR) end; _ -> filename:join([CustomDir, ?DISK_CACHE_DIR, ?DISK_CACHE_BLOCK_DIR]) end, FileName = binary_to_list(ar_util:encode(H)), FilePath = filename:join(PathBlock, FileName), FilePathJSON = iolist_to_binary([FilePath, ".json"]), case ar_storage:is_file(FilePathJSON) of true -> {ok, {FilePathJSON, json}}; _ -> FilePathBin = iolist_to_binary([FilePath, ".bin"]), case ar_storage:is_file(FilePathBin) of true -> {ok, {FilePathBin, binary}}; _ -> unavailable end end. lookup_tx_filename(Hash) -> lookup_tx_filename(Hash, not_set). lookup_tx_filename(Hash, CustomDir) when is_binary(Hash) -> PathTX = case CustomDir of not_set -> case get(ar_disk_cache_path) of undefined -> {ok, Config} = arweave_config:get_env(), Path = filename:join(Config#config.data_dir, ?DISK_CACHE_DIR), put(ar_disk_cache_path, Path), filename:join(Path, ?DISK_CACHE_TX_DIR); Path -> filename:join(Path, ?DISK_CACHE_TX_DIR) end; _ -> filename:join([CustomDir, ?DISK_CACHE_DIR, ?DISK_CACHE_TX_DIR]) end, FileName = binary_to_list(ar_util:encode(Hash)) ++ ".json", File = filename:join(PathTX, FileName), case ar_storage:is_file(File) of true -> {ok, File}; _ -> unavailable end. write_block_shadow(B) -> Name = binary_to_list(ar_util:encode(B#block.indep_hash)) ++ ".bin", File = filename:join(get_block_path(), Name), Bin = ar_serialize:block_to_binary(B), Size = byte_size(Bin), ?LOG_DEBUG([{event, write_block_shadow}, {hash, ar_util:encode(B#block.indep_hash)}, {size, Size}]), gen_server:cast(?MODULE, {record_written_data, Size}), case ar_storage:write_file_atomic(File, Bin) of ok -> ok; {error, Reason} = Error -> ?LOG_ERROR([{event, failed_to_store_block_in_disk_cache}, {reason, io_lib:format("~p", [Reason])}]), Error end. write_block(B) -> BShadow = B#block{ txs = [TX#tx.id || TX <- B#block.txs] }, case write_block_shadow(BShadow) of ok -> write_txs(B#block.txs); Reply -> Reply end. write_txs([]) -> ok; write_txs([TX | TXs]) -> case write_tx(TX) of ok -> write_txs(TXs); Reply -> Reply end. reset() -> gen_server:call(?MODULE, reset). %%-------------------------------------------------------------------- %% @doc %% Starts the server %% %% @spec start_link() -> {ok, Pid} | ignore | {error, Error} %% @end %%-------------------------------------------------------------------- start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). %%% gen_server callbacks %%%=================================================================== %%-------------------------------------------------------------------- %% @private %% @doc %% Initializes the server %% %% @spec init(Args) -> {ok, State} | %% {ok, State, Timeout} | %% ignore | %% {stop, Reason} %% @end %%-------------------------------------------------------------------- init([]) -> %% Trap exit to avoid corrupting any open files on quit. process_flag(trap_exit, true), {ok, Config} = arweave_config:get_env(), Path = filename:join(Config#config.data_dir, ?DISK_CACHE_DIR), BlockPath = filename:join(Path, ?DISK_CACHE_BLOCK_DIR), TXPath = filename:join(Path, ?DISK_CACHE_TX_DIR), ok = filelib:ensure_dir(BlockPath ++ "/"), ok = filelib:ensure_dir(TXPath ++ "/"), Size = filelib:fold_files( Path, "(.*\\.json$)|(.*\\.bin$)", true, fun(F,Acc) -> filelib:file_size(F) + Acc end, 0 ), LimitMax = Config#config.disk_cache_size * 1048576, % MB to Bytes. LimitMin = trunc(LimitMax * (100 - ?DISK_CACHE_CLEAN_PERCENT_MAX) / 100), State = #state{ limit_max = LimitMax, limit_min = LimitMin, size = Size, path = Path }, erlang:garbage_collect(), {ok, State}. %%-------------------------------------------------------------------- %% @private %% @doc %% Handling call messages %% %% @spec handle_call(Request, From, State) -> %% {reply, Reply, State} | %% {reply, Reply, State, Timeout} | %% {noreply, State} | %% {noreply, State, Timeout} | %% {stop, Reason, Reply, State} | %% {stop, Reason, State} %% @end %%-------------------------------------------------------------------- handle_call(reset, _From, State) -> Path = State#state.path, ?LOG_DEBUG([{event, reset_disk_cache}, {path, Path}]), os:cmd("rm -r " ++ Path ++ "/*"), BlockPath = filename:join(Path, ?DISK_CACHE_BLOCK_DIR), TXPath = filename:join(Path, ?DISK_CACHE_TX_DIR), ok = filelib:ensure_dir(BlockPath ++ "/"), ok = filelib:ensure_dir(TXPath ++ "/"), {reply, ok, State#state{ size = 0 }}; handle_call(Request, _From, State) -> ?LOG_WARNING([{event, unhandled_call}, {module, ?MODULE}, {request, Request}]), {reply, ok, State}. %%-------------------------------------------------------------------- %% @private %% @doc %% Handling cast messages %% %% @spec handle_cast(Msg, State) -> {noreply, State} | %% {noreply, State, Timeout} | %% {stop, Reason, State} %% @end %%-------------------------------------------------------------------- handle_cast({record_written_data, Size}, State) -> CacheSize = State#state.size + Size, gen_server:cast(?MODULE, may_be_clean_up), {noreply, State#state{ size = CacheSize }}; handle_cast(may_be_clean_up, State) when State#state.size > State#state.limit_max -> ?LOG_DEBUG([{event, disk_cache_exceeds_limit}, {limit, State#state.limit_max}, {cache_size, State#state.size}]), Files = lists:sort(filelib:fold_files( State#state.path, "(.*\\.json$)|(.*\\.bin$)", true, fun(F, A) -> [{filelib:last_modified(F), filelib:file_size(F), F} | A] end, []) ), %% How much space should be cleaned up. ToRemove = State#state.size - State#state.limit_min, Removed = delete_file(Files, ToRemove, 0), Size = State#state.size - Removed, erlang:garbage_collect(), {noreply, State#state{ size = Size }}; handle_cast(may_be_clean_up, State) -> {noreply, State}; handle_cast(Cast, State) -> ?LOG_WARNING([{event, unhandled_cast}, {module, ?MODULE}, {cast, Cast}]), {noreply, State}. %%-------------------------------------------------------------------- %% @private %% @doc %% Handling all non call/cast messages %% %% @spec handle_info(Info, State) -> {noreply, State} | %% {noreply, State, Timeout} | %% {stop, Reason, State} %% @end %%-------------------------------------------------------------------- handle_info(Message, State) -> ?LOG_WARNING([{event, unhandled_info}, {module, ?MODULE}, {message, Message}]), {noreply, State}. %%-------------------------------------------------------------------- %% @private %% @doc %% This function is called by a gen_server when it is about to %% terminate. It should be the opposite of Module:init/1 and do any %% necessary cleaning up. When it returns, the gen_server terminates %% with Reason. The return value is ignored. %% %% @spec terminate(Reason, State) -> void() %% @end %%-------------------------------------------------------------------- terminate(Reason, _State) -> ?LOG_INFO([{module, ?MODULE},{pid, self()},{callback, terminate},{reason, Reason}]), ok. %%-------------------------------------------------------------------- %% @private %% @doc %% Convert process state when code is changed %% %% @spec code_change(OldVsn, State, Extra) -> {ok, NewState} %% @end %%-------------------------------------------------------------------- code_change(_OldVsn, State, _Extra) -> {ok, State}. %%%=================================================================== %%% Internal functions %%%=================================================================== get_block_path() -> {ok, Config} = arweave_config:get_env(), Path = filename:join(Config#config.data_dir, ?DISK_CACHE_DIR), filename:join(Path, ?DISK_CACHE_BLOCK_DIR). get_tx_path() -> {ok, Config} = arweave_config:get_env(), Path = filename:join(Config#config.data_dir, ?DISK_CACHE_DIR), filename:join(Path, ?DISK_CACHE_TX_DIR). write_tx(TX) -> Name = binary_to_list(ar_util:encode(TX#tx.id)) ++ ".json", File = filename:join(get_tx_path(), Name), TXHeader = case TX#tx.format of 1 -> TX; 2 -> TX#tx{ data = <<>> } end, JSONStruct = ar_serialize:tx_to_json_struct(TXHeader), Data = ar_serialize:jsonify(JSONStruct), Size = byte_size(Data), ?LOG_DEBUG([{event, write_tx}, {txid, ar_util:encode(TX#tx.id)}, {size, Size}]), gen_server:cast(?MODULE, {record_written_data, Size}), case ar_storage:write_file_atomic(File, Data) of ok -> ok; {error, Reason} = Error -> ?LOG_ERROR([{event, failed_to_store_transaction_in_disk_cache}, {reason, io_lib:format("~p", [Reason])}]), Error end. delete_file([], _ToRemove, Removed) -> Removed; delete_file(_Files, ToRemove, Removed) when ToRemove < 0 -> Removed; delete_file([{_DateTime, Size, Filename} | Files], ToRemove, Removed) -> case file:delete(Filename) of ok -> ?LOG_DEBUG([{event, cleaned_disk_cache}, {removed_file, Filename}, {cleaned_size, Size}]), delete_file(Files, ToRemove - Size, Removed + Size); {error, Reason} -> ?LOG_ERROR([{event, failed_to_remove_disk_cache_file}, {file, Filename}, {reason, io_lib:format("~p", [Reason])}]), delete_file(Files, ToRemove, Removed) end. ================================================ FILE: apps/arweave/src/ar_disksup.erl ================================================ %% Erlang OTP disksup copyright note: %% %% %CopyrightBegin% %% %% Copyright Ericsson AB 1996-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. %% You may obtain a copy of the License at %% %% http://www.apache.org/licenses/LICENSE-2.0 %% %% Unless required by applicable law or agreed to in writing, software %% distributed under the License is distributed on an "AS IS" BASIS, %% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %% See the License for the specific language governing permissions and %% limitations under the License. %% %% %CopyrightEnd% %% %%% @doc The server is a modified version of disksup from Erlang OTP - it periodically %%% checks for available disk space and returns it in bytes (disksup only serves it in %). %%% @end -module(ar_disksup). -behaviour(gen_server). -export([start_link/0, get_disk_space_check_frequency/0, get_disk_data/0, pause/0, resume/0]). -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2]). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -record(state, { timeout, os, diskdata = [], port, paused = false }). %%%=================================================================== %%% Public interface. %%%=================================================================== start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). get_disk_space_check_frequency() -> {ok, Config} = arweave_config:get_env(), Config#config.disk_space_check_frequency. get_disk_data() -> gen_server:call(?MODULE, get_disk_data, ?DEFAULT_CALL_TIMEOUT). pause() -> gen_server:call(?MODULE, pause, ?DEFAULT_CALL_TIMEOUT). resume() -> gen_server:call(?MODULE, resume, ?DEFAULT_CALL_TIMEOUT). %%%=================================================================== %%% Generic server callbacks. %%%=================================================================== init([]) -> process_flag(trap_exit, true), process_flag(priority, low), OS = get_os(), Port = case OS of {unix, Flavor} when Flavor==sunos4; Flavor==solaris; Flavor==freebsd; Flavor==dragonfly; Flavor==darwin; Flavor==linux; Flavor==posix; Flavor==openbsd; Flavor==netbsd -> start_portprogram(); {win32, _OSname} -> not_used; _ -> exit({unsupported_os, OS}) end, %% Initiate the first check. self() ! timeout, Timeout = get_disk_space_check_frequency(), ?LOG_INFO([{event, disksup_init}, {os, OS}, {port, Port}, {timeout, Timeout}]), {ok, #state{ port = Port, os = OS, timeout = Timeout }}. handle_call(get_disk_data, _From, State) -> {reply, State#state.diskdata, State}; handle_call(pause, _From, State) -> ?LOG_INFO([{event, pausing_disksup}]), {reply, ok, State#state{ paused = true }}; handle_call(resume, _From, State) -> ?LOG_INFO([{event, resuming_disksup}]), {reply, ok, State#state{ paused = false }}. handle_cast(_Msg, State) -> {noreply, State}. handle_info(timeout, #state{ paused = true } = State) -> ?LOG_INFO([{event, disksup_paused}]), {ok, _} = ar_timer:send_after( State#state.timeout, self(), timeout, #{ skip_on_shutdown => false } ), {noreply, State}; handle_info(timeout, State) -> NewDiskData = check_disk_space(State#state.os, State#state.port), ensure_storage_modules_paths(), broadcast_disk_free(State#state.os, State#state.port), {ok, _} = ar_timer:send_after( State#state.timeout, self(), timeout, #{ skip_on_shutdown => false } ), {noreply, State#state{ diskdata = NewDiskData }}; handle_info({'EXIT', _Port, Reason}, State) -> {stop, {port_died, Reason}, State#state{ port = not_used }}; handle_info(_Info, State) -> {noreply, State}. terminate(Reason, State) -> case State#state.port of not_used -> ok; Port -> port_close(Port) end, ?LOG_INFO([{module, ?MODULE},{pid, self()},{callback, terminate},{reason, Reason}]), ok. %%%=================================================================== %%% Private functions. %%%=================================================================== get_os() -> case os:type() of {unix, sunos} -> case os:version() of {5, _, _} -> {unix, solaris}; {4, _, _} -> {unix, sunos4}; V -> exit({unknown_os_version, V}) end; OS -> OS end. start_portprogram() -> open_port({spawn, "sh -s disksup 2>&1"}, [stream]). my_cmd(Cmd0, Port) -> %% Insert a new line after the command, in case the command %% contains a comment character. Cmd = io_lib:format("(~s\n) receive {Port, {data, N}} -> case newline(N, O) of {ok, Str} -> Str; {more, Acc} -> get_reply(Port, Acc) end; {'EXIT', Port, Reason} -> exit({port_died, Reason}) end. newline([13 | _], B) -> {ok, lists:reverse(B)}; newline([H | T], B) -> newline(T, [H | B]); newline([], B) -> {more, B}. find_cmd(Cmd) -> os:find_executable(Cmd). find_cmd(Cmd, Path) -> %% Try to find it at the specific location. case os:find_executable(Cmd, Path) of false -> find_cmd(Cmd); Found -> Found end. %% We use as many absolute paths as possible below as there may be stale %% NFS handles in the PATH which cause these commands to hang. check_disk_space({win32, _}, not_used) -> Result = os_mon_sysinfo:get_disk_info(), check_disks_win32(Result); check_disk_space({unix, solaris}, Port) -> Result = my_cmd("/usr/bin/df -k", Port), check_disks_solaris(skip_to_eol(Result)); check_disk_space({unix, linux}, Port) -> Df = find_cmd("df", "/bin"), Result = my_cmd(Df ++ " -k -x squashfs", Port), check_disks_solaris(skip_to_eol(Result)); check_disk_space({unix, posix}, Port) -> Result = my_cmd("df -k -P", Port), check_disks_solaris(skip_to_eol(Result)); check_disk_space({unix, dragonfly}, Port) -> Result = my_cmd("/bin/df -k -t ufs,hammer", Port), check_disks_solaris(skip_to_eol(Result)); check_disk_space({unix, freebsd}, Port) -> Result = my_cmd("/bin/df -k", Port), check_disks_solaris(skip_to_eol(Result)); check_disk_space({unix, openbsd}, Port) -> Result = my_cmd("/bin/df -k", Port), check_disks_solaris(skip_to_eol(Result)); check_disk_space({unix, netbsd}, Port) -> Result = my_cmd("/bin/df -k -t ffs", Port), check_disks_solaris(skip_to_eol(Result)); check_disk_space({unix, sunos4}, Port) -> Result = my_cmd("df", Port), check_disks_solaris(skip_to_eol(Result)); check_disk_space({unix, darwin}, Port) -> Result = my_cmd("/bin/df -i -k -t ufs,hfs,apfs", Port), check_disks_susv3(skip_to_eol(Result)). disk_free_cmd({unix, darwin}, Df, DataDirPath, Port) -> my_cmd(Df ++ " -Pa " ++ DataDirPath ++ "/", Port); disk_free_cmd({unix, _}, Df, DataDirPath, Port) -> my_cmd(Df ++ " -Pa -B1 " ++ DataDirPath ++ "/", Port). %% check for hardware errors in df output check_for_hardware_error(DfOutput, ThrowOnError) -> case lists:member("Input/output error", DfOutput) of true -> ar:console("~nERROR: one or more of your disks are in corrupt/failing state.~n~p~n", [DfOutput]), case ThrowOnError of true -> erlang:error({input_output_error_detected, DfOutput}); _ -> true end; false -> false end. %% doc: iterates trough storage modules broadcast_disk_free({unix, _} = Os, Port) -> Df = find_cmd("df"), [DataDirPathData | StorageModulePaths] = get_storage_modules_paths(), {DataDirID, DataDirPath} = DataDirPathData, DataDirDfResult = disk_free_cmd(Os, Df, DataDirPath, Port), check_for_hardware_error(DataDirDfResult, true), [DataDirFs, DataDirBytes, DataDirPercentage] = parse_df_2(DataDirDfResult), ar_events:send(disksup, { remaining_disk_space, DataDirID, true, DataDirPercentage, DataDirBytes }), HandleSmPath = fun({StoreID, StorageModulePath}) -> Result = disk_free_cmd(Os, Df, StorageModulePath, Port), HasDiskError = check_for_hardware_error(Result, false), case HasDiskError of true -> ar:console("~nERROR: storage module ~p is offline.~n", [StorageModulePath]), ok; false -> [StorageModuleFs, Bytes, Percentage] = parse_df_2(Result), IsDataDirDrive = string:equal(DataDirFs, StorageModuleFs), ar_events:send(disksup, { remaining_disk_space, StoreID, IsDataDirDrive, Percentage, Bytes }) end end, lists:foreach(HandleSmPath, StorageModulePaths); broadcast_disk_free(_, _) -> ar:console("~nWARNING: disk space checks are not supported on your platform. The node " "may stop working if it runs out of space.~n", []). %% This code works for Linux and FreeBSD as well. check_disks_solaris("") -> []; check_disks_solaris("\n") -> []; check_disks_solaris(Str) -> case parse_df(Str, posix) of {ok, {KB, CapKB, MntOn}, RestStr} -> [{MntOn, KB, CapKB} | check_disks_solaris(RestStr)]; _Other -> check_disks_solaris(skip_to_eol(Str)) end. %% @private %% @doc Predicate to take a word from the input string until a space or %% a percent '%' sign (the Capacity field is followed by a %) parse_df_is_not_space($ ) -> false; parse_df_is_not_space($%) -> false; parse_df_is_not_space(_) -> true. %% @private %% @doc Predicate to take spaces away from string. Stops on a non-space parse_df_is_space($ ) -> true; parse_df_is_space(_) -> false. %% @private %% @doc Predicate to consume remaining characters until end of line. parse_df_is_not_eol($\r) -> false; parse_df_is_not_eol($\n) -> false; parse_df_is_not_eol(_) -> true. %% @private %% @doc Trims leading non-spaces (the word) from the string then trims spaces. parse_df_skip_word(Input) -> Remaining = lists:dropwhile(fun parse_df_is_not_space/1, Input), lists:dropwhile(fun parse_df_is_space/1, Remaining). %% @private %% @doc Takes all non-spaces and then drops following spaces. parse_df_take_word(Input) -> {Word, Remaining0} = lists:splitwith(fun parse_df_is_not_space/1, Input), Remaining1 = lists:dropwhile(fun parse_df_is_space/1, Remaining0), {Word, Remaining1}. %% @private %% @doc Takes all non-spaces and then drops the % after it and the spaces. parse_df_take_word_percent(Input) -> {Word, Remaining0} = lists:splitwith(fun parse_df_is_not_space/1, Input), %% Drop the leading % or do nothing. Remaining1 = case Remaining0 of [$% | R1] -> R1; _ -> Remaining0 % Might be no % or empty list even. end, Remaining2 = lists:dropwhile(fun parse_df_is_space/1, Remaining1), {Word, Remaining2}. %% @private %% @doc Given a line of 'df' POSIX/SUSv3 output split it into fields: %% a string (mounted device), 4 integers (kilobytes, used, available %% and capacity), skip % sign, (optionally for susv3 can also skip IUsed, IFree %% and ICap% fields) then take remaining characters as the mount path -spec parse_df(string(), posix | susv3) -> {error, parse_df} | {ok, {integer(), integer(), list()}, string()}. parse_df(Input0, Flavor) -> %% Format of Posix/Linux df output looks like Header + Lines %% Filesystem 1024-blocks Used Available Capacity Mounted on %% udev 2467108 0 2467108 0% /dev Input1 = parse_df_skip_word(Input0), % Skip device path field. {KBStr, Input2} = parse_df_take_word(Input1), % Take KB field. Input3 = parse_df_skip_word(Input2), % Skip Used field. {AvailKBStr, Input4} = parse_df_take_word(Input3), % Take Avail field. {_, Input5} = parse_df_take_word_percent(Input4), % Skip Capacity% field. %% Format of OS X/SUSv3 df looks similar to POSIX but has 3 extra columns %% Filesystem 1024-blocks Used Available Capacity iused ifree %iused Mounted %% /dev/disk1 243949060 2380 86690680 65% 2029724 37555 0% / Input6 = case Flavor of posix -> Input5; susv3 -> % There are 3 extra integers we want to skip. Input5a = parse_df_skip_word(Input5), % Skip IUsed field. Input5b = parse_df_skip_word(Input5a), % Skip IFree field. %% Skip the value of ICap + '%' field. {_, Input5c} = parse_df_take_word_percent(Input5b), Input5c end, %% Path is the remaining string till end of line. {MountPath, Input7} = lists:splitwith(fun parse_df_is_not_eol/1, Input6), %% Trim the newlines. Remaining = lists:dropwhile(fun(X) -> not parse_df_is_not_eol(X) end, Input7), try KB = erlang:list_to_integer(KBStr), CapacityKB = erlang:list_to_integer(AvailKBStr), {ok, {KB, CapacityKB, MountPath}, Remaining} catch error:badarg -> {error, parse_df} end. %% Parse per SUSv3 specification, notably recent OS X. check_disks_susv3("") -> []; check_disks_susv3("\n") -> []; check_disks_susv3(Str) -> case parse_df(Str, susv3) of {ok, {KB, CapKB, MntOn}, RestStr} -> [{MntOn, KB, CapKB} | check_disks_susv3(RestStr)]; _Other -> check_disks_susv3(skip_to_eol(Str)) end. check_disks_win32([]) -> []; check_disks_win32([H|T]) -> case io_lib:fread("~s~s~d~d~d", H) of {ok, [Drive, "DRIVE_FIXED", BAvail, BTot, _TotFree], _RestStr} -> [{Drive, BTot div 1024, BAvail div 1024} | check_disks_win32(T)]; {ok, _, _RestStr} -> check_disks_win32(T); _Other -> [] end. skip_to_eol([]) -> []; skip_to_eol([$\n | T]) -> T; skip_to_eol([_ | T]) -> skip_to_eol(T). get_storage_modules_paths() -> {ok, Config} = arweave_config:get_env(), DataDir = Config#config.data_dir, SMDirs = lists:map( fun(StorageModule) -> StoreID = ar_storage_module:id(StorageModule), {StoreID, filename:join([DataDir, "storage_modules", StoreID])} end, Config#config.storage_modules ), [{?DEFAULT_MODULE, DataDir} | SMDirs]. ensure_storage_modules_paths() -> StoragePaths = get_storage_modules_paths(), EnsurePaths = fun({_, StorageModulePath}) -> filelib:ensure_dir(StorageModulePath ++ "/") end, lists:foreach(EnsurePaths, StoragePaths). parse_df_2(Input) -> [DfHeader, DfInfo] = string:tokens(Input, "\n"), [_, BlocksInfo | _] = string:tokens(DfHeader, " \t"), BlocksNum = case string:tokens(BlocksInfo, "-") of [Num, _] -> erlang:list_to_integer(Num); _-> 1 end, [Filesystem, Total, _, Available, _, _] = string:tokens(DfInfo, " \t"), BytesAvailable = erlang:list_to_integer(Available), TotalCapacity = erlang:list_to_integer(Total), PercentageAvailable = BytesAvailable / TotalCapacity, [Filesystem, BytesAvailable * BlocksNum, PercentageAvailable]. ================================================ FILE: apps/arweave/src/ar_doctor_bench.erl ================================================ -module(ar_doctor_bench). -export([main/1, help/0]). -include_lib("kernel/include/file.hrl"). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("arweave/include/ar_mining.hrl"). -include_lib("arweave/include/ar_consensus.hrl"). -define(NUM_ITERATIONS, 5). -define(NUM_FILES, 15). -define(OUTPUT_FILENAME, ".benchmark.csv"). -define(FILE_FORMAT, "timestamp,bytes_read,elapsed_time_ms,throughput_bps"). main(Args) -> bench_read(Args). help() -> ar:console("data-doctor bench [ ...]~n"), ar:console(" duration: How long, in seconds, to run the benchmark for.~n"), ar:console(" data_dir: Full path to your data_dir.~n"), ar:console(" storage_module: List of storage modules in same format used for Arweave ~n"), ar:console(" configuration (e.g. 0,En2eqsVJARnTVOSh723PBXAKGmKgrGSjQ2YIGwE_ZRI).~n"), ar:console(" It's recommended that you specify all configured storage_modules ~n"), ar:console(" in order to benchmark the overall system performance including ~n"), ar:console(" any data busses that are shared across disks.~n"), ar:console("~n"), ar:console("Example:~n"), ar:console("data-doctor bench 60 /mnt/arweave-data 0,En2eqsVJARnTVOSh723PBXAKGmKgrGSjQ2YIGwE_ZRI \\~n"), ar:console(" 1,En2eqsVJARnTVOSh723PBXAKGmKgrGSjQ2YIGwE_ZRI \\~n"), ar:console(" 2,En2eqsVJARnTVOSh723PBXAKGmKgrGSjQ2YIGwE_ZRI \\~n"), ar:console(" 3,En2eqsVJARnTVOSh723PBXAKGmKgrGSjQ2YIGwE_ZRI~n"), ar:console("~n"), ar:console("Note: During the run data will be logged to ~p in the format:~n", [?OUTPUT_FILENAME]), ar:console(" '~s'~n", [?FILE_FORMAT]). bench_read(Args) when length(Args) < 3 -> false; bench_read(Args) -> [DurationString, DataDir | StorageModuleConfigs] = Args, Duration = list_to_integer(DurationString), {StorageModules, Address} = parse_storage_modules(StorageModuleConfigs, [], undefined), ar:console("Assuming mining address: ~p~n", [ar_util:safe_encode(Address)]), Config = #config{ data_dir = DataDir, storage_modules = StorageModules, mining_addr = Address}, arweave_config:set_env(Config), ar_kv_sup:start_link(), ar_storage_sup:start_link(), ar_sync_record_sup:start_link(), ar_chunk_storage_sup:start_link(), ar_mining_io:start_link(standalone), ar:console("~n~nDisk read benchmark will run for ~B seconds.~n", [Duration]), ar:console("Data will be logged continuously to ~p in the format:~n", [?OUTPUT_FILENAME]), ar:console("'~s'~n~n", [?FILE_FORMAT]), StopTime = erlang:monotonic_time() + erlang:convert_time_unit(Duration, second, native), Results = ar_util:pmap( fun(StorageModule) -> read_storage_module(DataDir, StorageModule, StopTime) end, StorageModules ), lists:foreach( fun({StoreID, SumChunks, SumElapsedTime}) -> ReadRate = (SumChunks * 1000 div 4) div SumElapsedTime, ar:console("~s read ~B chunks in ~B ms (~B MiB/s)~n", [StoreID, SumChunks, SumElapsedTime, ReadRate]) end, Results), ar:console("~n"), true. parse_storage_modules([], StorageModules, Address) -> {StorageModules, Address}; parse_storage_modules([StorageModuleConfig | StorageModuleConfigs], StorageModules, Address) -> {ok, StorageModule} = ar_config:parse_storage_module(StorageModuleConfig), Address2 = ar_storage_module:module_address(StorageModule), case Address2 == Address orelse Address == undefined of true -> ok; false -> ar:console("Warning: multiple mining addresses specified in storage_modules:~n") end, parse_storage_modules( StorageModuleConfigs, StorageModules ++ [StorageModule], Address2). read_storage_module(_DataDir, StorageModule, StopTime) -> StoreID = ar_storage_module:id(StorageModule), ar_chunk_storage:open_files(StoreID), {StartOffset, EndOffset} = ar_storage_module:module_range(StorageModule), OutputFileName = string:replace(?OUTPUT_FILENAME, "", StoreID), random_read(StorageModule, StartOffset, EndOffset, StopTime, OutputFileName). % random_chunk_pread(DataDir, StoreID), % random_dev_pread(DataDir, StoreID), % dd_chunk_files_read(DataDir, StoreID), % dd_chunk_file_read(DataDir, StoreID), % dd_devs_read(DataDir, StoreID), % dd_dev_read(DataDir, StoreID), random_read(StorageModule, StartOffset, EndOffset, StopTime, OutputFileName) -> random_read(StorageModule, StartOffset, EndOffset, StopTime, OutputFileName, 0, 0). random_read(StorageModule, StartOffset, EndOffset, StopTime, OutputFileName, SumChunks, SumElapsedTime) -> StartTime = erlang:monotonic_time(), case StartTime < StopTime of true -> Chunks = read(StorageModule, StartOffset, EndOffset, ?RECALL_RANGE_SIZE, ?NUM_FILES), EndTime = erlang:monotonic_time(), ElapsedTime = erlang:convert_time_unit(EndTime - StartTime, native, millisecond), %% timestamp,bytes_read,elapsed_time_ms,throughput_bps Timestamp = os:system_time(second), BytesRead = Chunks * ?DATA_CHUNK_SIZE, Line = io_lib:format("~B,~B,~B,~B~n", [ Timestamp, BytesRead, ElapsedTime, BytesRead * 1000 div ElapsedTime]), file:write_file(OutputFileName, Line, [append]), random_read(StorageModule, StartOffset, EndOffset, StopTime, OutputFileName, SumChunks + Chunks, SumElapsedTime + ElapsedTime); false -> StoreID = ar_storage_module:id(StorageModule), {StoreID, SumChunks, SumElapsedTime} end. read(StorageModule, StartOffset, EndOffset, Size, NumReads) -> read(StorageModule, StartOffset, EndOffset, Size, 0, NumReads). read(_StorageModule, _StartOffset, _EndOffset, _Size, NumChunks, 0) -> NumChunks; read(StorageModule, StartOffset, EndOffset, Size, NumChunks, NumReads) -> Offset = rand:uniform(EndOffset - Size - StartOffset + 1) + StartOffset, Candidate = #mining_candidate{ mining_address = ar_storage_module:module_address(StorageModule), packing_difficulty = ar_storage_module:module_packing_difficulty(StorageModule) }, RangeExists = ar_mining_io:read_recall_range(chunk1, self(), Candidate, Offset), case RangeExists of true -> receive {chunks_read, _WhichChunk, _Candidate, _RecallRangeStart, ChunkOffsets} -> read(StorageModule, StartOffset, EndOffset, Size, NumChunks + length(ChunkOffsets), NumReads - 1) end; false -> %% Try again with a new random offset read(StorageModule, StartOffset, EndOffset, Size, NumChunks, NumReads) end. %% XXX: the following functions are not used, but may be useful in the future to benchmark %% different read strategies. They can be deleted when they are no longer useful. random_chunk_pread(DataDir, StoreID) -> random_chunk_pread(DataDir, StoreID, ?NUM_ITERATIONS, 0, 0). random_chunk_pread(_DataDir, _StoreID, 0, SumBytes, SumElapsedTime) -> ReadRate = (SumBytes * 1000 div ?MiB) div SumElapsedTime, ar:console("*Random* chunk pread ~B MiB in ~B ms (~B MiB/s)~n", [SumBytes div ?MiB, SumElapsedTime, ReadRate]); random_chunk_pread(DataDir, StoreID, Count, SumBytes, SumElapsedTime) -> Files = open_files(DataDir, StoreID), StartTime = erlang:monotonic_time(), Bytes = pread(Files, ?RECALL_RANGE_SIZE, 0), EndTime = erlang:monotonic_time(), ElapsedTime = erlang:convert_time_unit(EndTime - StartTime, native, millisecond), random_chunk_pread(DataDir, StoreID, Count - 1, SumBytes + Bytes, SumElapsedTime + ElapsedTime). random_dev_pread(DataDir, StoreID) -> random_dev_pread(DataDir, StoreID, ?NUM_ITERATIONS, 0, 0). random_dev_pread(_DataDir, _StoreID, 0, SumBytes, SumElapsedTime) -> ReadRate = (SumBytes * 1000 div ?MiB) div SumElapsedTime, ar:console("*Random* device pread ~B MiB in ~B ms (~B MiB/s)~n", [SumBytes div ?MiB, SumElapsedTime, ReadRate]); random_dev_pread(DataDir, StoreID, Count, SumBytes, SumElapsedTime) -> Filepath = hd(ar_chunk_storage:list_files(DataDir, StoreID)), Device = get_mounted_device(Filepath), {ok, File} = file:open(Device, [read, raw, binary]), Files = [{Device, File, ar_block:partition_size()} || _ <- lists:seq(1, ?NUM_FILES)], StartTime = erlang:monotonic_time(), Bytes = pread(Files, ?RECALL_RANGE_SIZE, 0), EndTime = erlang:monotonic_time(), ElapsedTime = erlang:convert_time_unit(EndTime - StartTime, native, millisecond), random_dev_pread(DataDir, StoreID, Count - 1, SumBytes + Bytes, SumElapsedTime + ElapsedTime). dd_chunk_files_read(DataDir, StoreID) -> dd_chunk_files_read(DataDir, StoreID, ?NUM_ITERATIONS, 0, 0). dd_chunk_files_read(_DataDir, _StoreID, 0, SumBytes, SumElapsedTime) -> ReadRate = (SumBytes * 1000 div ?MiB) div SumElapsedTime, ar:console("*dd* multi chunk files read ~B MiB in ~B ms (~B MiB/s)~n", [SumBytes div ?MiB, SumElapsedTime, ReadRate]); dd_chunk_files_read(DataDir, StoreID, Count, SumBytes, SumElapsedTime) -> Files = open_files(DataDir, StoreID), StartTime = erlang:monotonic_time(), Bytes = dd_files(Files, ?RECALL_RANGE_SIZE, 0), EndTime = erlang:monotonic_time(), ElapsedTime = erlang:convert_time_unit(EndTime - StartTime, native, millisecond), dd_chunk_files_read(DataDir, StoreID, Count - 1, SumBytes + Bytes, SumElapsedTime + ElapsedTime). dd_chunk_file_read(DataDir, StoreID) -> dd_chunk_file_read(DataDir, StoreID, ?NUM_ITERATIONS, 0, 0). dd_chunk_file_read(_DataDir, _StoreID, 0, SumBytes, SumElapsedTime) -> ReadRate = (SumBytes * 1000 div ?MiB) div SumElapsedTime, ar:console("*dd* single chunk file read ~B MiB in ~B ms (~B MiB/s)~n", [SumBytes div ?MiB, SumElapsedTime, ReadRate]); dd_chunk_file_read(DataDir, StoreID, Count, SumBytes, SumElapsedTime) -> Files = open_files(DataDir, StoreID), {Filepath, _File, FileSize} = hd(Files), StartTime = erlang:monotonic_time(), dd(Filepath, FileSize, ?RECALL_RANGE_SIZE, ?NUM_FILES), EndTime = erlang:monotonic_time(), ElapsedTime = erlang:convert_time_unit(EndTime - StartTime, native, millisecond), Bytes = ?RECALL_RANGE_SIZE * ?NUM_FILES, dd_chunk_file_read(DataDir, StoreID, Count - 1, SumBytes + Bytes, SumElapsedTime + ElapsedTime). dd_dev_file_read(DataDir, StoreID) -> dd_dev_file_read(DataDir, StoreID, ?NUM_ITERATIONS, 0, 0). dd_dev_file_read(_DataDir, _StoreID, 0, SumBytes, SumElapsedTime) -> ReadRate = (SumBytes * 1000 div ?MiB) div SumElapsedTime, ar:console("*dd* multi dev file read ~B MiB in ~B ms (~B MiB/s)~n", [SumBytes div ?MiB, SumElapsedTime, ReadRate]); dd_dev_file_read(DataDir, StoreID, Count, SumBytes, SumElapsedTime) -> Filepath = "/opt/prod/data/storage_modules/storage_module_19_cLGt682uYLJCl47QsRHfdTzMhSPTHPsUnUOzuvTm1HQ/dd.10GB", StartTime = erlang:monotonic_time(), dd(Filepath, 10*?GiB, ?RECALL_RANGE_SIZE, ?NUM_FILES), EndTime = erlang:monotonic_time(), ElapsedTime = erlang:convert_time_unit(EndTime - StartTime, native, millisecond), Bytes = ?RECALL_RANGE_SIZE * ?NUM_FILES, dd_dev_file_read(DataDir, StoreID, Count - 1, SumBytes + Bytes, SumElapsedTime + ElapsedTime). dd_devs_read(DataDir, StoreID) -> dd_devs_read(DataDir, StoreID, ?NUM_ITERATIONS, 0, 0). dd_devs_read(_DataDir, _StoreID, 0, SumBytes, SumElapsedTime) -> ReadRate = (SumBytes * 1000 div ?MiB) div SumElapsedTime, ar:console("*dd* multi devs read ~B MiB in ~B ms (~B MiB/s)~n", [SumBytes div ?MiB, SumElapsedTime, ReadRate]); dd_devs_read(DataDir, StoreID, Count, SumBytes, SumElapsedTime) -> Filepath = hd(ar_chunk_storage:list_files(DataDir, StoreID)), Device = get_mounted_device(Filepath), Devices = [{Device, not_set, ar_block:partition_size()} || _ <- lists:seq(1, ?NUM_FILES)], StartTime = erlang:monotonic_time(), Bytes = dd_files(Devices, ?RECALL_RANGE_SIZE, 0), EndTime = erlang:monotonic_time(), ElapsedTime = erlang:convert_time_unit(EndTime - StartTime, native, millisecond), dd_devs_read(DataDir, StoreID, Count - 1, SumBytes + Bytes, SumElapsedTime + ElapsedTime). dd_dev_read(DataDir, StoreID) -> dd_dev_read(DataDir, StoreID, ?NUM_ITERATIONS, 0, 0). dd_dev_read(_DataDir, _StoreID, 0, SumBytes, SumElapsedTime) -> ReadRate = (SumBytes * 1000 div ?MiB) div SumElapsedTime, ar:console("*dd* single dev read ~B MiB in ~B ms (~B MiB/s)~n", [SumBytes div ?MiB, SumElapsedTime, ReadRate]); dd_dev_read(DataDir, StoreID, Count, SumBytes, SumElapsedTime) -> Filepath = hd(ar_chunk_storage:list_files(DataDir, StoreID)), Device = get_mounted_device(Filepath), StartTime = erlang:monotonic_time(), dd(Device, ar_block:partition_size(), ?RECALL_RANGE_SIZE, ?NUM_FILES), EndTime = erlang:monotonic_time(), ElapsedTime = erlang:convert_time_unit(EndTime - StartTime, native, millisecond), Bytes = ?RECALL_RANGE_SIZE * ?NUM_FILES, dd_dev_read(DataDir, StoreID, Count - 1, SumBytes + Bytes, SumElapsedTime + ElapsedTime). get_mounted_device(FilePath) -> Cmd = "df " ++ FilePath ++ " | awk 'NR==2 {print $1}'", Device = os:cmd(Cmd), string:trim(Device, both, "\n"). open_files(DataDir, StoreID) -> AllFilepaths = ar_chunk_storage:list_files(DataDir, StoreID), Filepaths = lists:sublist(ar_util:shuffle_list(AllFilepaths), ?NUM_FILES), lists:foldl( fun(Filepath, Acc) -> {ok, FileInfo} = file:read_file_info(Filepath), {ok, File} = file:open(Filepath, [read, raw, binary]), [{Filepath, File, FileInfo#file_info.size} | Acc] end, [], Filepaths). pread([], _Size, NumBytes) -> NumBytes; pread([{Filepath, File, FileSize} | Files], Size, NumBytes) -> Position = max(0, rand:uniform(FileSize - Size)), % ar:console("pread: ~p ~B ~B ~B ~B~n", [Filepath, FileSize, Position, Size, NumBytes]), {ok, Bin} = file:pread(File, Position, Size), pread(Files, Size, NumBytes + byte_size(Bin)). dd_files([], _Size, NumBytes) -> NumBytes; dd_files([{Filepath, _File, FileSize} | Files], Size, NumBytes) -> dd(Filepath, FileSize, Size, 1), dd_files(Files, Size, NumBytes + Size). dd(Filepath, FileSize, Size, Count) -> BlockSize = ?RECALL_RANGE_SIZE, Bytes = Size * Count, Blocks = Bytes div BlockSize, MaxOffset = max(1, FileSize - Bytes), Position = rand:uniform(MaxOffset) div BlockSize, Command = io_lib:format("dd iflag=direct if=~s skip=~B of=/dev/null bs=~B count=~B", [Filepath, Position, BlockSize, Blocks]), % ar:console("~s~n", [Command]), os:cmd(Command). ================================================ FILE: apps/arweave/src/ar_doctor_dump.erl ================================================ -module(ar_doctor_dump). -export([main/1, help/0]). -include_lib("kernel/include/file.hrl"). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). main(Args) -> dump(Args). help() -> ar:console("data-doctor dump ~n"), ar:console(" include_txs: Whether to include transactions in the dump (true/false).~n"), ar:console(" block_id: The block ID to start the dump from.~n"), ar:console(" min_height: The minimum height of the blocks to dump.~n"), ar:console(" data_dir: Full path to your data_dir.~n"), ar:console(" output_dir: Full path to a directory where the dumped data will be written.~n"), ar:console("~nExample:~n"), ar:console("data-doctor dump true ZR7zbobdw55a....pRpUabEkLD0V 100000 /mnt/arweave-data /mnt/output~n"). dump([IncludeTXs, H, MinHeight, DataDir, OutputDir]) -> ok = filelib:ensure_dir(filename:join([OutputDir, "blocks", "dummy"])), ok = filelib:ensure_dir(filename:join([OutputDir, "txs", "dummy"])), Config = #config{data_dir = DataDir}, arweave_config:set_env(Config), ar_kv_sup:start_link(), ar_storage_sup:start_link(), dump_blocks(ar_util:decode(H), list_to_integer(MinHeight), OutputDir, list_to_boolean(IncludeTXs)), true; dump(_) -> false. list_to_boolean("true") -> true; list_to_boolean("false") -> false; list_to_boolean(_) -> false. dump_blocks(BH, MinHeight, OutputDir, IncludeTXs) -> H = ar_util:encode(BH), case ar_kv:get(block_db, BH) of {ok, Bin} -> try case ar_serialize:binary_to_block(Bin) of {ok, B} -> case B#block.height >= MinHeight of true -> io:format("Block: ~p / ~p", [B#block.height, H]), JsonFilename = io_lib:format("~s.json", [ar_util:encode(B#block.indep_hash)]), OutputFilePath = filename:join([OutputDir, "blocks", JsonFilename]), case file:read_file_info(OutputFilePath) of {ok, _FileInfo} -> io:format(" ... skipping~n"), ok; % File exists, do nothing {error, enoent} -> io:format(" ... writing~n"), % File does not exist, proceed with processing case IncludeTXs of true -> dump_txs(B#block.txs, OutputDir); false -> ok end, Json = ar_serialize:block_to_json_struct(B), JsonString = ar_serialize:jsonify(Json), file:write_file(OutputFilePath, JsonString) end, PrevBH = B#block.previous_block, dump_blocks(PrevBH, MinHeight, OutputDir, IncludeTXs); false -> io:format("Done.~n") end; _ -> ok end catch Type:Reason -> io:format("Error processing block ~p: ~p:~p~n", [H, Type, Reason]) end; not_found -> io:format("Block ~p not found.~n", [H]) end. dump_txs([], _OutputDir) -> ok; dump_txs([TXID | TXIDs], OutputDir) -> case ar_kv:get(tx_db, TXID) of {ok, Bin} -> {ok, TX} = ar_serialize:binary_to_tx(Bin), Json = ar_serialize:tx_to_json_struct(TX), JsonString = ar_serialize:jsonify(Json), JsonFilename = io_lib:format("~s.json", [ar_util:encode(TXID)]), OutputFilePath = filename:join([OutputDir, "txs", JsonFilename]), file:write_file(OutputFilePath, JsonString); _ -> ok end, dump_txs(TXIDs, OutputDir). ================================================ FILE: apps/arweave/src/ar_doctor_inspect.erl ================================================ -module(ar_doctor_inspect). -export([main/1, help/0]). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave/include/ar_consensus.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("arweave/include/ar_chunk_storage.hrl"). %%-------------------------------------------------------------------- %% API %%-------------------------------------------------------------------- %% main/1 expects either: %% 1. [Dir, StartStr, EndStr, Address1, Address2, ...] for traditional inspection %% 2. ["bitmap", DataDir, StorageModule] for generating a bitmap of chunk states main(Args) -> case Args of ["bitmap", DataDir, StorageModuleConfig] -> bitmap(DataDir, StorageModuleConfig), true; ["chunks", Dir, StartStr, EndStr | AddrListStr] when length(AddrListStr) >= 1 -> Addresses = [ar_util:decode(AddrStr) || AddrStr <- AddrListStr], arweave_config:set_env(#config{ disable = [], enable = [randomx_large_pages] }), ar_metrics:register(), ar_packing_sup:start_link(), Start = ar_block:get_chunk_padded_offset(list_to_integer(StartStr)), End = ar_block:get_chunk_padded_offset(list_to_integer(EndStr)), ar:console("~nInspecting chunks from padded offset ~p to ~p~n", [Start, End]), EncodedAddresses = [ar_util:encode(Address) || Address <- Addresses], ar:console("~nChecking chunks against unpacked and all addresses: ~p~n", [EncodedAddresses]), inspect_range(Dir, Start, End, Addresses), true; _ -> false end. help() -> ar:console("Usage: inspect chunks [address2 ...]~n"), ar:console(" inspect bitmap ~n"). %%-------------------------------------------------------------------- %% Inspect Chunks %%-------------------------------------------------------------------- %% iterate from Padded (chunk end offset) = Start to End (inclusive) inspect_range(_Dir, Start, End, _Addresses) when Start > End -> ok; inspect_range(Dir, Start, End, Addresses) -> inspect_chunk(Dir, Start, Addresses), Next = Start + ?DATA_CHUNK_SIZE, inspect_range(Dir, Next, End, Addresses). %% inspect_chunk/2 locates the chunk file and reads the local chunk, %% then queries the remote chunk and prints their generated ids. inspect_chunk(Dir, PaddedEndOffset, Addresses) -> ar:console("~n~n--- Inspecting padded offset: ~p ---~n", [PaddedEndOffset]), ChunkFileStart = ar_chunk_storage:get_chunk_file_start(PaddedEndOffset), Filepath = filename:join([Dir, integer_to_binary(ChunkFileStart)]), {Position, ChunkOffset} = ar_chunk_storage:get_position_and_relative_chunk_offset( ChunkFileStart, PaddedEndOffset), ar:console("File path: ~p~n", [Filepath]), ar:console("Position: ~p~n", [Position]), ar:console("Chunk offset: ~p~n", [ChunkOffset]), %% Fetch the expected chunk from arweave.net {ok, Proof} = fetch_remote_chunk(PaddedEndOffset), ExpectedChunk = maps:get(chunk, Proof), TXPath = maps:get(tx_path, Proof), {ok, TXRoot} = ar_merkle:extract_root(TXPath), ChunkSize = byte_size(ExpectedChunk), ExpectedChunkID = ar_tx:generate_chunk_id(ExpectedChunk), ar:console("~nExpected chunk size: ~p~n", [byte_size(ExpectedChunk)]), ar:console("Expected chunk ID: ~p~n", [ar_util:encode(ExpectedChunkID)]), %% Read local chunk from disk. {RawChunkOffset, RawChunk} = read_local_chunk(Filepath, Position), ar:console("~nRaw chunk: ~p~n", [byte_size(RawChunk)]), ar:console("Raw chunk offset: ~p~n", [RawChunkOffset]), RawChunkID = ar_tx:generate_chunk_id(RawChunk), ar:console("Raw chunk ID: ~p~n", [ar_util:encode(RawChunkID)]), %% Try unpacking the local chunk a number of different ways to see if any match the %% expected chunk ID. Result = check_all( ExpectedChunkID, RawChunk, PaddedEndOffset, Addresses, TXRoot, ChunkSize), print_match(Result). %% New functions for checking unpacked chunks without printing per test; %% only the first matching test is reported. check_unpacked([], _PaddedEndOffset, _TXRoot, _LocalChunk, _ChunkSize, _ExpectedChunkID) -> no_match; check_unpacked( [Address | Rest], PaddedEndOffset, TXRoot, LocalChunk, ChunkSize, ExpectedChunkID) -> case check_packings_for_address( Address, PaddedEndOffset, TXRoot, LocalChunk, ChunkSize, ExpectedChunkID) of {match, Packing} -> {match, Packing}; no_match -> check_unpacked( Rest, PaddedEndOffset, TXRoot, LocalChunk, ChunkSize, ExpectedChunkID) end. check_packings_for_address( Address, PaddedEndOffset, TXRoot, LocalChunk, ChunkSize, ExpectedChunkID) -> Packings = [ {replica_2_9, Address}, {spora_2_6, Address}, {composite, Address, 1}, {composite, Address, 2} ], check_packings(Packings, PaddedEndOffset, TXRoot, LocalChunk, ChunkSize, ExpectedChunkID). check_packings([], _PaddedEndOffset, _TXRoot, _LocalChunk, _ChunkSize, _ExpectedChunkID) -> no_match; check_packings( [Packing | Rest], PaddedEndOffset, TXRoot, LocalChunk, ChunkSize, ExpectedChunkID) -> case check_packing( Packing, PaddedEndOffset, TXRoot, LocalChunk, ChunkSize, ExpectedChunkID) of {match, _} = Match -> Match; no_match -> check_packings( Rest, PaddedEndOffset, TXRoot, LocalChunk, ChunkSize, ExpectedChunkID) end. check_packing(Packing, PaddedEndOffset, TXRoot, LocalChunk, ChunkSize, ExpectedChunkID) -> case ar_packing_server:unpack(Packing, PaddedEndOffset, TXRoot, LocalChunk, ChunkSize) of {ok, Unpacked} -> UnpackedID = ar_tx:generate_chunk_id(Unpacked), if UnpackedID =:= ExpectedChunkID -> {match, Packing}; true -> no_match end; {error, _Reason} -> no_match end. %% read_local_chunk/2 opens the file, reads ?OFFSET_SIZE+?DATA_CHUNK_SIZE bytes %% starting at Position and closes the file. read_local_chunk(Filepath, Position) -> case file:open(Filepath, [read, binary, raw]) of {ok, F} -> %% Read header + chunk data. Length = ?OFFSET_SIZE + ?DATA_CHUNK_SIZE, case file:pread(F, Position, Length) of {ok, << ChunkOffset:?OFFSET_BIT_SIZE, Chunk:?DATA_CHUNK_SIZE/binary, Rest/binary >>} -> file:close(F), {ChunkOffset, Chunk}; Error -> file:close(F), ar:console("Error reading file ~s at position ~p: ~p~n", [Filepath, Position, Error]), {0, <<>>} end; {error, Reason} -> ar:console("Error opening file ~s: ~p~n", [Filepath, Reason]), {0, <<>>} end. %% fetch_remote_chunk/1 uses httpc (in inets application) to query the remote URL. fetch_remote_chunk(PaddedOffset) -> %% Build URL e.g. "http://arweave.net/chunk2/123456" URL = lists:concat(["https://arweave.net/chunk2/", integer_to_list(PaddedOffset)]), ar:console("Fetching remote chunk from ~s~n", [URL]), %% Ensure inets is started. application:ensure_all_started(inets), case httpc:request(get, {URL, []}, [{body_format, binary}], []) of {ok, {{_, 200, _}, _Headers, Body}} -> Bin = list_to_binary(Body), ar_serialize:binary_to_poa(Bin); {ok, Response} -> ar:console("Unexpected response for ~s: ~p~n", [URL, Response]), {error, Response}; {error, Reason} -> ar:console("HTTP request error for ~s: ~p~n", [URL, Reason]), {error, Reason} end. %% check_all/6 performs the raw, entropy, and unpacking checks sequentially check_all(ExpectedChunkID, LocalChunk, PaddedEndOffset, Addresses, TXRoot, ChunkSize) -> LocalID = ar_tx:generate_chunk_id(LocalChunk), case LocalID =:= ExpectedChunkID of true -> {match, "Raw chunk"}; false -> Entropy = ar_entropy_storage:generate_missing_entropy( PaddedEndOffset, hd(Addresses)), EntropyID = ar_tx:generate_chunk_id(Entropy), case EntropyID =:= ExpectedChunkID of true -> {match, "Entropy"}; false -> check_unpacked( Addresses, PaddedEndOffset, TXRoot, LocalChunk, ChunkSize, ExpectedChunkID) end end. %% print_match/1 prints the match result. print_match({match, Type}) when is_list(Type) -> ar:console("~nMATCH: ~s~n", [Type]); print_match({match, Packing}) -> ar:console("~nMATCH: ~p~n", [ar_serialize:encode_packing(Packing, true)]); print_match(no_match) -> ar:console("~nNO MATCH~n"). %%-------------------------------------------------------------------- %% Inspect Bitmap %%-------------------------------------------------------------------- %% @doc Generates a bitmap of the provided storage module. Each pixel is a chunk where %% the color is determined by the packing format of the chunk. Each row of the bitmap %% is a replica.2.9 sector (so the bitmap is 1024 rows high). bitmap(DataDir, StorageModuleConfig) -> {ok, StorageModule} = ar_config:parse_storage_module(StorageModuleConfig), Config = #config{ data_dir = DataDir, storage_modules = [StorageModule]}, arweave_config:set_env(Config), StoreID = ar_storage_module:id(StorageModule), ar_kv_sup:start_link(), ar_storage_sup:start_link(), ar_sync_record_sup:start_link(), ar_data_sync:init_kv(StoreID), {ModuleStart, ModuleEnd} = ar_storage_module:module_range(StorageModule), ChunkPackings = ar_chunk_visualization:get_chunk_packings( ModuleStart, ModuleEnd, StoreID, true), ar_chunk_visualization:print_chunk_stats(ChunkPackings), Bitmap = ar_chunk_visualization:generate_bitmap(ChunkPackings), Filename = "bitmap_" ++ StoreID ++ ".ppm", file:write_file(Filename, ar_chunk_visualization:bitmap_to_binary(Bitmap)), ar:console("Bitmap written to ~s~n", [Filename]). ================================================ FILE: apps/arweave/src/ar_doctor_merge.erl ================================================ -module(ar_doctor_merge). -export([main/1, help/0]). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("arweave/include/ar_chunk_storage.hrl"). -include_lib("arweave/include/ar_consensus.hrl"). main(Args) -> merge(Args). help() -> ar:console("data-doctor merge data_dir storage_module src_directories~n"). merge(Args) when length(Args) < 3 -> false; merge(Args) -> [DataDir, StorageModuleConfig | SrcDirs ] = Args, StorageModule = ar_config:parse_storage_module(StorageModuleConfig), StoreID = ar_storage_module:id(StorageModule), ok = merge(DataDir, StorageModule, StoreID, SrcDirs), true. merge(_DataDir, _StorageModule, _StoreID, []) -> ok; merge(DataDir, StorageModule, StoreID, [SrcDir | SrcDirs]) -> DstDir = filename:join([DataDir, "storage_modules", StoreID]), ar:console("~n~nMerge data from ~p into ~p~n~n", [SrcDir, DstDir]), move_chunk_storage(SrcDir, DstDir), copy_db("ar_data_sync_db", SrcDir, DstDir), copy_db("ar_data_sync_chunk_db", SrcDir, DstDir), copy_db("ar_data_sync_disk_pool_chunks_index_db", SrcDir, DstDir), copy_db("ar_data_sync_data_root_index_db", SrcDir, DstDir), copy_sync_records(SrcDir, DstDir), merge(DataDir, StorageModule, StoreID, SrcDirs). move_chunk_storage(SrcDir, DstDir) -> MkDir = io_lib:format("mkdir -p ~s/chunk_storage ~s/rocksdb~n", [DstDir, DstDir]), Mv = io_lib:format("mv ~s/chunk_storage/* ~s/chunk_storage~n", [SrcDir, DstDir]), ar:console(MkDir), os:cmd(MkDir), ar:console(Mv), os:cmd(Mv). % Function to copy all key/value pairs from one DB to another copy_db(DB, SrcDir, DstDir) -> ar:console("~nCopying DB ~p~n", [DB]), SrcPath = filename:join([SrcDir, "rocksdb", DB]), DstPath = filename:join([DstDir, "rocksdb", DB]), % List all column families in the source database {ok, ColumnFamilies} = rocksdb:list_column_families(SrcPath, [{create_if_missing, false}]), CFDescriptors = lists:foldl( fun(CF, Acc) -> [{CF, []} | Acc] end, [], ColumnFamilies ), % Open Source Database with all column families {ok, SrcDB, SrcCFs} = rocksdb:open(SrcPath, [{create_if_missing, false}], CFDescriptors), % Open Destination Database with all column families, creating them if necessary {ok, DstDB, DstCFs} = rocksdb:open(DstPath, [{create_if_missing, true}, {create_missing_column_families, true}], CFDescriptors), % Iterate and copy for each column family lists:zipwith( fun({SrcCF, DstCF}, ColumnFamily) -> ar:console("Copying family ~p~n", [ColumnFamily]), copy_column_family(SrcDB, DstDB, SrcCF, DstCF) end, lists:zip(SrcCFs, DstCFs), ColumnFamilies), % Close databases rocksdb:close(SrcDB), rocksdb:close(DstDB). % Function to copy a specific column family copy_column_family(SrcDB, DstDB, SrcCF, DstCF) -> % Create an Iterator for this column family in Source Database {ok, Itr} = rocksdb:iterator(SrcDB, SrcCF, []), copy_from_iterator(Itr, rocksdb:iterator_move(Itr, first), DstDB, DstCF), rocksdb:iterator_close(Itr). % Helper function to copy key/value pairs from iterator to destination DB copy_from_iterator(Itr, Res, DstDB, DstCF) -> case Res of {ok, Key, Value} -> ok = rocksdb:put(DstDB, DstCF, Key, Value, []), copy_from_iterator(Itr, rocksdb:iterator_move(Itr, next), DstDB, DstCF); {error, invalid_iterator} -> % End of iteration ok end. copy_sync_records(SrcDir, DstDir) -> ar:console("Copying sync records~n", []), SrcPath = filename:join([SrcDir, "rocksdb", "ar_sync_record_db"]), DstPath = filename:join([DstDir, "rocksdb", "ar_sync_record_db"]), {ok, SrcDB} = rocksdb:open(SrcPath, [{create_if_missing, false}]), {ok, DstDB} = rocksdb:open(DstPath, [{create_if_missing, true}]), SrcSyncRecords = get_sync_records(SrcDB), DstSyncRecords = get_sync_records(DstDB), Union = merge_sync_records(SrcSyncRecords, DstSyncRecords), put_sync_records(DstDB, Union), rocksdb:close(SrcDB), rocksdb:close(DstDB). get_sync_records(DB) -> Record = rocksdb:get(DB, <<"sync_records">>, []), case Record of {ok, Bin} -> binary_to_term(Bin, [safe]); _ -> {#{}, #{}} end. put_sync_records(DB, Intervals) -> rocksdb:put(DB, <<"sync_records">>, term_to_binary(Intervals), []). merge_sync_records( {SrcSyncRecordByID, SrcSyncRecordByIDType}, {DstSyncRecordByID, DstSyncRecordByIDType}) -> UnionSyncRecordByID = maps:merge_with( fun(_Key, Src, Dst) -> ar_intervals:union(Src, Dst) end, SrcSyncRecordByID, DstSyncRecordByID), UnionRecordByIDType = maps:merge_with( fun(_Key, Src, Dst) -> ar_intervals:union(Src, Dst) end, SrcSyncRecordByIDType, DstSyncRecordByIDType), {UnionSyncRecordByID, UnionRecordByIDType}. ================================================ FILE: apps/arweave/src/ar_domain.erl ================================================ -module(ar_domain). -export([get_labeling/3, lookup_arweave_txt_record/1, derive_tx_label/2]). %%%=================================================================== %%% Public interface. %%%=================================================================== get_labeling(ApexDomain, CustomDomains, Hostname) -> Size = byte_size(ApexDomain), case binary:match(Hostname, ApexDomain) of {0, Size} -> apex; {N, Size} -> Label = binary:part(Hostname, {0, N-1}), {labeled, Label}; nomatch -> get_labeling_1(CustomDomains, Hostname) end. lookup_arweave_txt_record(Domain) -> case inet_res:lookup("_arweave." ++ binary_to_list(Domain), in, txt) of [] -> not_found; [RecordChunks|_] -> list_to_binary(lists:concat(RecordChunks)) end. derive_tx_label(TXID, BH) -> Data = <>, Digest = crypto:hash(sha256, Data), binary:part(ar_base32:encode(Digest), {0, 12}). %%%=================================================================== %%% Private functions. %%%=================================================================== get_labeling_1(CustomDomains, Hostname) -> case lists:member(Hostname, CustomDomains) of true -> {custom, Hostname}; false -> unknown end. ================================================ FILE: apps/arweave/src/ar_entropy_cache.erl ================================================ -module(ar_entropy_cache). -export([get/1, clean_up_space/2, put/3, total_size/0]). -include("ar.hrl"). -include_lib("eunit/include/eunit.hrl"). %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Return the stored value, if any, for the given Key. -spec get(Key :: string()) -> {ok, term()} | not_found. get(Key) -> get(Key, ar_entropy_cache). %% @doc Make sure the cache has enough space (i.e., clean up the oldest records, if any) %% to store Size worth of elements such that the total size does not exceed MaxSize. %% In other words, if you want to store new elements with the total size Size, %% call clean_up_space(Size, MaxSize) then call put/3 to store new elements. -spec clean_up_space( Size :: non_neg_integer(), MaxSize :: non_neg_integer() ) -> ok. clean_up_space(Size, MaxSize) -> Table = ar_entropy_cache, OrderedKeyTable = ar_entropy_cache_ordered_keys, clean_up_space(Size, MaxSize, Table, OrderedKeyTable). %% @doc Store the given Value in the cache. Associate it with the given Size and %% increase the total cache size accordingly. -spec put( Key :: string(), Value :: term(), Size :: non_neg_integer() ) -> ok. put(Key, Value, Size) -> Table = ar_entropy_cache, OrderedKeyTable = ar_entropy_cache_ordered_keys, put(Key, Value, Size, Table, OrderedKeyTable). %% @doc Return the size of the cache. -spec total_size() -> non_neg_integer(). total_size() -> Table = ar_entropy_cache, total_size(Table). %%%=================================================================== %%% Private functions. %%%=================================================================== total_size(Table) -> case ets:lookup(Table, total_size) of [] -> 0; [{_, Value}] -> Value end. get(Key, Table) -> case ets:lookup(Table, {key, Key}) of [] -> not_found; [{_, Value}] -> %% Track the number of used keys per entropy to estimate the efficiency %% of the cache. ets:update_counter(Table, {fetched_key_count, Key}, 1, {{fetched_key_count, Key}, 0}), {ok, Value} end. clean_up_space(Size, MaxSize, Table, OrderedKeyTable) -> TotalSize = total_size(Table), case TotalSize + Size > MaxSize of true -> case ets:first(OrderedKeyTable) of '$end_of_table' -> ok; {_Timestamp, Key, ElementSize} = EarliestKey -> ets:delete(Table, {key, Key}), ets:update_counter(Table, total_size, -ElementSize, {total_size, 0}), ets:delete(OrderedKeyTable, EarliestKey), ets:delete(Table, {fetched_key_count, Key}), clean_up_space(Size, MaxSize, Table, OrderedKeyTable) end; false -> prometheus_gauge:set(replica_2_9_entropy_cache, TotalSize + Size), ok end. get_fetched_key_count(Table, Key) -> case ets:lookup(Table, {fetched_key_count, Key}) of [] -> 0; [{_, Count}] -> Count end. put(Key, Value, Size, Table, OrderedKeyTable) -> ets:insert(Table, {{key, Key}, Value}), Timestamp = os:system_time(microsecond), ets:insert(OrderedKeyTable, {{Timestamp, Key, Size}}), ets:update_counter(Table, total_size, Size, {total_size, 0}). %%%=================================================================== %%% Tests. %%%=================================================================== cache_test() -> Table = 'test_entropy_cache_table', OrderedKeyTable = 'test_entropy_cache_ordered_key_table', ets:new(Table, [set, public, named_table]), ets:new(OrderedKeyTable, [ordered_set, public, named_table]), ?assertEqual(0, get_fetched_key_count(Table, some_key)), ?assertEqual(not_found, get(some_key, Table)), ?assertEqual(0, get_fetched_key_count(Table, some_key)), clean_up_space(64, 128, Table, OrderedKeyTable), put(some_key, some_value, 64, Table, OrderedKeyTable), ?assertEqual({ok, some_value}, get(some_key, Table)), ?assertEqual(1, get_fetched_key_count(Table, some_key)), ?assertEqual({ok, some_value}, get(some_key, Table)), ?assertEqual(2, get_fetched_key_count(Table, some_key)), clean_up_space(64, 128, Table, OrderedKeyTable), ?assertEqual({ok, some_value}, get(some_key, Table)), ?assertEqual(3, get_fetched_key_count(Table, some_key)), clean_up_space(64, 128, Table, OrderedKeyTable), ?assertEqual({ok, some_value}, get(some_key, Table)), ?assertEqual(4, get_fetched_key_count(Table, some_key)), clean_up_space(128, 128, Table, OrderedKeyTable), %% We requested an allocation of > MaxSize so the old key needs to be removed. ?assertEqual(not_found, get(some_key, Table)), ?assertEqual(0, get_fetched_key_count(Table, some_key)), %% The put itself does not clean up the cache. put(some_key, some_value, 64, Table, OrderedKeyTable), put(some_other_key, some_other_value, 64, Table, OrderedKeyTable), put(yet_another_key, yet_another_value, 64, Table, OrderedKeyTable), ?assertEqual(0, get_fetched_key_count(Table, some_key)), ?assertEqual({ok, some_value}, get(some_key, Table)), ?assertEqual({ok, some_other_value}, get(some_other_key, Table)), ?assertEqual({ok, yet_another_value}, get(yet_another_key, Table)), ?assertEqual(1, get_fetched_key_count(Table, some_key)), ?assertEqual(1, get_fetched_key_count(Table, some_other_key)), ?assertEqual(1, get_fetched_key_count(Table, yet_another_key)), %% Basically, we are simply reducing the cache 192 -> 128. clean_up_space(0, 128, Table, OrderedKeyTable), ?assertEqual(not_found, get(some_key, Table)), ?assertEqual({ok, some_other_value}, get(some_other_key, Table)), ?assertEqual({ok, yet_another_value}, get(yet_another_key, Table)), clean_up_space(64, 128, Table, OrderedKeyTable), ?assertEqual(not_found, get(some_other_key, Table)), ?assertEqual({ok, yet_another_value}, get(yet_another_key, Table)). ================================================ FILE: apps/arweave/src/ar_entropy_gen.erl ================================================ -module(ar_entropy_gen). -behaviour(gen_server). -export([name/1, register_workers/1, initialize_context/2, map_entropies/8, entropy_offsets/2, generate_entropies/2, generate_entropies/4, generate_entropy_keys/2, shift_entropy_offset/2]). -export([start_link/2, init/1, handle_cast/2, handle_call/3, handle_info/2, terminate/2]). -include("ar.hrl"). -include("ar_sup.hrl"). -include("ar_consensus.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("eunit/include/eunit.hrl"). -record(state, { store_id, packing, module_start, module_end, cursor, prepare_status = undefined }). -ifdef(AR_TEST). -define(DEVICE_LOCK_WAIT, 100). -else. -define(DEVICE_LOCK_WAIT, 5_000). -endif. %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Start the server. start_link(Name, {StoreID, Packing}) -> gen_server:start_link({local, Name}, ?MODULE, {StoreID, Packing}, []). %% @doc Return the name of the server serving the given StoreID. name(StoreID) -> list_to_atom("ar_entropy_gen_" ++ ar_storage_module:label(StoreID)). register_workers(Module) -> {ok, Config} = arweave_config:get_env(), ConfiguredWorkers = lists:filtermap( fun(StorageModule) -> StoreID = ar_storage_module:id(StorageModule), Packing = ar_storage_module:get_packing(StoreID), case is_entropy_packing(Packing) of true -> Worker = ?CHILD_WITH_ARGS( Module, worker, Module:name(StoreID), [Module:name(StoreID), {StoreID, Packing}]), {true, Worker}; false -> false end end, Config#config.storage_modules ), RepackInPlaceWorkers = lists:filtermap( fun({StorageModule, ToPacking}) -> StoreID = ar_storage_module:id(StorageModule), ConfiguredPacking = ar_storage_module:get_packing(StorageModule), %% Note: the config validation will prevent a StoreID from being used in both %% `storage_modules` and `repack_in_place_storage_modules`, so there's %% no risk of a `Name` clash with the workers spawned above. IsEntropyPacking = ( is_entropy_packing(ConfiguredPacking) orelse is_entropy_packing(ToPacking) ), case IsEntropyPacking of true -> Worker = ?CHILD_WITH_ARGS( Module, worker, Module:name(StoreID), [Module:name(StoreID), {StoreID, ToPacking}]), {true, Worker}; false -> false end end, Config#config.repack_in_place_storage_modules ), ConfiguredWorkers ++ RepackInPlaceWorkers. -spec initialize_context(ar_storage_module:store_id(), ar_chunk_storage:packing()) -> {IsPrepared :: boolean(), RewardAddr :: none | ar_wallet:address()}. initialize_context(StoreID, Packing) -> case Packing of {replica_2_9, Addr} -> {ModuleStart, ModuleEnd} = ar_storage_module:get_range(StoreID), Cursor = read_cursor(StoreID, ModuleStart), case Cursor =< ModuleEnd of true -> {false, Addr}; false -> {true, Addr} end; _ -> {true, none} end. -spec is_entropy_packing(ar_chunk_storage:packing()) -> boolean(). is_entropy_packing(unpacked_padded) -> true; is_entropy_packing({replica_2_9, _}) -> true; is_entropy_packing(_) -> false. %% @doc Return a list of all BucketEndOffsets covered by the entropy needed to encipher %% the chunk at the given offset. The list returned may include offsets that occur before %% the provided offset. This is expected if Offset does not refer to a sector 0 chunk. -spec entropy_offsets(non_neg_integer(), non_neg_integer()) -> [non_neg_integer()]. entropy_offsets(Offset, ModuleEnd) -> BucketEndOffset = ar_chunk_storage:get_chunk_bucket_end(Offset), BucketEndOffset2 = reset_entropy_offset(BucketEndOffset), Partition = ar_replica_2_9:get_entropy_partition(BucketEndOffset), {_, EntropyPartitionEnd} = ar_replica_2_9:get_entropy_partition_range(Partition), End = min(EntropyPartitionEnd, ModuleEnd), entropy_offsets2(BucketEndOffset2, End). entropy_offsets2(BucketEndOffset, PaddedPartitionEnd) when BucketEndOffset > PaddedPartitionEnd -> []; entropy_offsets2(BucketEndOffset, PaddedPartitionEnd) -> NextOffset = shift_entropy_offset(BucketEndOffset, 1), [BucketEndOffset | entropy_offsets2(NextOffset, PaddedPartitionEnd)]. %% @doc If we are not at the beginning of the entropy, shift the offset to %% the left. store_entropy_footprint will traverse the entire 2.9 partition shifting %% the offset by sector size. reset_entropy_offset(BucketEndOffset) -> %% Sanity checks BucketEndOffset = ar_chunk_storage:get_chunk_bucket_end(BucketEndOffset), %% End sanity checks SliceIndex = ar_replica_2_9:get_slice_index(BucketEndOffset), shift_entropy_offset(BucketEndOffset, -SliceIndex). shift_entropy_offset(Offset, SectorCount) -> SectorSize = ar_block:get_replica_2_9_entropy_sector_size(), ar_chunk_storage:get_chunk_bucket_end(Offset + SectorSize * SectorCount). %% @doc Returns a list of 32x 8 MiB entropies. These entropies will need to be sliced %% and recombined before they can be used. When properly recombined they contain enough %% entropy to cover 1024 chunks. The chunks covered (aka the "footprint") are distributed %% throughout the partition -spec generate_entropies(StoreID :: ar_storage_module:store_id(), RewardAddr :: ar_wallet:address(), BucketEndOffset :: non_neg_integer(), ReplyTo :: pid()) -> ok. generate_entropies(StoreID, RewardAddr, BucketEndOffset, ReplyTo) -> gen_server:cast(name(StoreID), {generate_entropies, RewardAddr, BucketEndOffset, ReplyTo}). -spec generate_entropies(RewardAddr :: ar_wallet:address(), BucketEndOffset :: non_neg_integer()) -> [binary()] | {error, term()}. generate_entropies(RewardAddr, BucketEndOffset) -> generate_entropies(RewardAddr, BucketEndOffset, true). -spec generate_entropies(RewardAddr :: ar_wallet:address(), BucketEndOffset :: non_neg_integer(), CacheEntropy :: boolean()) -> [binary()] | {error, term()}. generate_entropies(RewardAddr, BucketEndOffset, CacheEntropy) -> prometheus_histogram:observe_duration(replica_2_9_entropy_duration_milliseconds, [], fun() -> do_generate_entropies(RewardAddr, BucketEndOffset, CacheEntropy) end). map_entropies(_Entropies, [], _RangeStart, _Keys, _RewardAddr, _Fun, _Args, Acc) -> %% The amount of entropy generated per partition is slightly more than the amount needed. %% So at the end of a partition we will have finished processing chunks, but still have %% some entropy left. In this case we stop the recursion early and wait for the writes %% to complete. Acc; map_entropies(Entropies, [BucketEndOffset | EntropyOffsets], RangeStart, Keys, RewardAddr, Fun, Args, Acc) -> case take_and_combine_entropy_slices(Entropies) of {ChunkEntropy, Rest} -> %% Sanity checks sanity_check_replica_2_9_entropy_keys(BucketEndOffset, RewardAddr, Keys), %% End sanity checks Acc2 = case BucketEndOffset > RangeStart of true -> erlang:apply(Fun, [ChunkEntropy, BucketEndOffset, RewardAddr] ++ Args ++ [Acc]); false -> %% Don't write entropy before the start of the range. Acc end, %% Jump to the next sector covered by this entropy. map_entropies( Rest, EntropyOffsets, RangeStart, Keys, RewardAddr, Fun, Args, Acc2) end. init({StoreID, Packing}) -> ?LOG_INFO([{event, ar_entropy_gen_init}, {name, name(StoreID)}, {store_id, StoreID}, {packing, ar_serialize:encode_packing(Packing, true)}]), ConfiguredPacking = ar_storage_module:get_packing(StoreID), %% Sanity checks true = is_entropy_packing(ConfiguredPacking) orelse is_entropy_packing(Packing), %% End sanity checks {ModuleStart, ModuleEnd} = ar_storage_module:get_range(StoreID), PaddedRangeEnd = ar_chunk_storage:get_chunk_bucket_end(ModuleEnd), %% Provided Packing will only differ from the StoreID packing when this %% module is configured to repack in place. IsRepackInPlace = Packing /= ConfiguredPacking, State = case IsRepackInPlace of true -> #state{}; false -> %% Only kick of the prepare entropy process if we're not repacking in place. Cursor = read_cursor(StoreID, ModuleStart), ?LOG_INFO([{event, read_prepare_replica_2_9_cursor}, {store_id, StoreID}, {cursor, Cursor}, {module_start, ModuleStart}, {module_end, ModuleEnd}, {padded_range_end, PaddedRangeEnd}]), PrepareStatus = case initialize_context(StoreID, Packing) of {_IsPrepared, none} -> %% ar_entropy_gen is only used for replica_2_9 packing ?LOG_ERROR([{event, invalid_packing_for_entropy}, {module, ?MODULE}, {store_id, StoreID}, {packing, ar_serialize:encode_packing(Packing, true)}]), off; {false, _} -> gen_server:cast(self(), prepare_entropy), paused; {true, _} -> %% Entropy generation is complete complete end, ar_device_lock:set_device_lock_metric(StoreID, prepare, PrepareStatus), #state{ cursor = Cursor, prepare_status = PrepareStatus } end, State2 = State#state{ store_id = StoreID, packing = Packing, module_start = ModuleStart, module_end = PaddedRangeEnd }, {ok, State2}. handle_cast(prepare_entropy, State) -> #state{ store_id = StoreID } = State, NewStatus = ar_device_lock:acquire_lock(prepare, StoreID, State#state.prepare_status), State2 = State#state{ prepare_status = NewStatus }, State3 = case NewStatus of active -> do_prepare_entropy(State2); paused -> ar_util:cast_after(?DEVICE_LOCK_WAIT, self(), prepare_entropy), State2; _ -> State2 end, {noreply, State3}; handle_cast({generate_entropies, RewardAddr, BucketEndOffset, ReplyTo}, State) -> Entropies = generate_entropies(RewardAddr, BucketEndOffset), ReplyTo ! {entropy, BucketEndOffset, RewardAddr, Entropies}, {noreply, State}; handle_cast(Cast, State) -> ?LOG_WARNING([{event, unhandled_cast}, {module, ?MODULE}, {cast, Cast}]), {noreply, State}. handle_call(Call, _From, State) -> ?LOG_WARNING([{event, unhandled_call}, {module, ?MODULE}, {call, Call}]), {reply, {error, unhandled_call}, State}. handle_info({entropy_generated, _Ref, _Entropy}, State) -> ?LOG_WARNING([{event, entropy_generation_timed_out}]), {noreply, State}; handle_info(Info, State) -> ?LOG_WARNING([{event, unhandled_info}, {module, ?MODULE}, {info, Info}]), {noreply, State}. terminate(Reason, State) -> ?LOG_INFO([{event, terminate}, {module, ?MODULE}, {reason, Reason}, {name, name(State#state.store_id)}, {store_id, State#state.store_id}]), ok. do_prepare_entropy(State) -> #state{ cursor = Start, module_start = ModuleStart, module_end = ModuleEnd, packing = Packing, store_id = StoreID } = State, {replica_2_9, RewardAddr} = Packing, BucketEndOffset = ar_chunk_storage:get_chunk_bucket_end(Start), %% Sanity checks: BucketEndOffset = ar_chunk_storage:get_chunk_bucket_end(BucketEndOffset), true = ( ar_chunk_storage:get_chunk_bucket_start(Start) == ar_chunk_storage:get_chunk_bucket_start(BucketEndOffset) ), true = ( max(0, BucketEndOffset - ?DATA_CHUNK_SIZE) == ar_chunk_storage:get_chunk_bucket_start(BucketEndOffset) ), %% End of sanity checks. %% Make sure all prior entropy writes are complete. ar_entropy_storage:is_ready(StoreID), CheckRangeEnd = case BucketEndOffset > ModuleEnd of true -> ar_device_lock:release_lock(prepare, StoreID), ?LOG_INFO([{event, storage_module_entropy_preparation_complete}, {store_id, StoreID}]), ar:console("The storage module ~s is prepared for 2.9 replication.~n", [StoreID]), ar_chunk_storage:set_entropy_complete(StoreID), complete; false -> false end, CheckIsRecorded = case CheckRangeEnd of complete -> complete; false -> ar_entropy_storage:is_entropy_recorded(BucketEndOffset, Packing, StoreID) end, StoreEntropy = case CheckIsRecorded of complete -> complete; true -> is_recorded; false -> %% Get all the entropies needed to encipher the chunk at BucketEndOffset. Entropies = generate_entropies(RewardAddr, BucketEndOffset, false), case Entropies of {error, Reason} -> {error, Reason}; _ -> EntropyKeys = generate_entropy_keys(RewardAddr, BucketEndOffset), EntropyOffsets = entropy_offsets(BucketEndOffset, ModuleEnd), ar_entropy_storage:store_entropy_footprint( StoreID, Entropies, EntropyOffsets, ModuleStart, EntropyKeys, RewardAddr) end end, NextCursor = advance_entropy_offset(BucketEndOffset, Packing, StoreID), case StoreEntropy of complete -> ar_device_lock:set_device_lock_metric(StoreID, prepare, complete), State#state{ prepare_status = complete }; is_recorded -> gen_server:cast(self(), prepare_entropy), State#state{ cursor = NextCursor }; {error, Error} -> ?LOG_WARNING([{event, failed_to_store_entropy}, {cursor, Start}, {store_id, StoreID}, {reason, io_lib:format("~p", [Error])}]), ar_util:cast_after(500, self(), prepare_entropy), State; ok -> gen_server:cast(self(), prepare_entropy), case store_cursor(NextCursor, StoreID) of ok -> ok; {error, Error} -> ?LOG_WARNING([{event, failed_to_store_prepare_entropy_cursor}, {chunk_cursor, NextCursor}, {store_id, StoreID}, {reason, io_lib:format("~p", [Error])}]) end, State#state{ cursor = NextCursor } end. do_generate_entropies(RewardAddr, BucketEndOffset, CacheEntropy) -> SubChunkSize = ?COMPOSITE_PACKING_SUB_CHUNK_SIZE, EntropyTasks = lists:map( fun(Offset) -> Ref = make_ref(), ar_packing_server:request_entropy_generation( Ref, self(), {RewardAddr, BucketEndOffset, Offset, CacheEntropy}), Ref end, lists:seq(0, ?DATA_CHUNK_SIZE - SubChunkSize, SubChunkSize)), Entropies = collect_entropies(EntropyTasks, []), case Entropies of {error, _Reason} -> flush_entropy_messages(); _ -> ok end, Entropies. %% @doc Take the first slice of each entropy and combine into a single binary. This binary %% can be used to encipher a single chunk. -spec take_and_combine_entropy_slices(Entropies :: [binary()]) -> {ChunkEntropy :: binary(), RemainingSlicesOfEachEntropy :: [binary()]}. take_and_combine_entropy_slices(Entropies) -> true = ?COMPOSITE_PACKING_SUB_CHUNK_COUNT == length(Entropies), take_and_combine_entropy_slices(Entropies, [], []). take_and_combine_entropy_slices([], Acc, RestAcc) -> {iolist_to_binary(Acc), lists:reverse(RestAcc)}; take_and_combine_entropy_slices([<<>> | Entropies], _Acc, _RestAcc) -> true = lists:all(fun(Entropy) -> Entropy == <<>> end, Entropies), {<<>>, []}; take_and_combine_entropy_slices([<> | Entropies], Acc, RestAcc) -> take_and_combine_entropy_slices(Entropies, [Acc, EntropySlice], [Rest | RestAcc]). sanity_check_replica_2_9_entropy_keys(PaddedEndOffset, RewardAddr, Keys) -> sanity_check_replica_2_9_entropy_keys(PaddedEndOffset, RewardAddr, 0, Keys). sanity_check_replica_2_9_entropy_keys( _PaddedEndOffset, _RewardAddr, _SubChunkStartOffset, []) -> ok; sanity_check_replica_2_9_entropy_keys( PaddedEndOffset, RewardAddr, SubChunkStartOffset, [Key | Keys]) -> Key = ar_replica_2_9:get_entropy_key(RewardAddr, PaddedEndOffset, SubChunkStartOffset), SubChunkSize = ?COMPOSITE_PACKING_SUB_CHUNK_SIZE, sanity_check_replica_2_9_entropy_keys(PaddedEndOffset, RewardAddr, SubChunkStartOffset + SubChunkSize, Keys). advance_entropy_offset(BucketEndOffset, Packing, StoreID) -> case ar_entropy_storage:get_next_unsynced_interval(BucketEndOffset, Packing, StoreID) of not_found -> BucketEndOffset + ?DATA_CHUNK_SIZE; {_, Start} -> Start + ?DATA_CHUNK_SIZE end. generate_entropy_keys(RewardAddr, Offset) -> generate_entropy_keys(RewardAddr, Offset, 0). generate_entropy_keys(_RewardAddr, _Offset, SubChunkStart) when SubChunkStart == ?DATA_CHUNK_SIZE -> []; generate_entropy_keys(RewardAddr, Offset, SubChunkStart) -> SubChunkSize = ?COMPOSITE_PACKING_SUB_CHUNK_SIZE, [ar_replica_2_9:get_entropy_key(RewardAddr, Offset, SubChunkStart) | generate_entropy_keys(RewardAddr, Offset, SubChunkStart + SubChunkSize)]. collect_entropies([], Acc) -> lists:reverse(Acc); collect_entropies([Ref | Rest], Acc) -> receive {entropy_generated, Ref, Entropy} -> collect_entropies(Rest, [Entropy | Acc]) after 600_000 -> ?LOG_ERROR([{event, entropy_generation_timeout}, {ref, Ref}]), {error, timeout} end. flush_entropy_messages() -> ?LOG_INFO([{event, flush_entropy_messages}]), receive {entropy_generated, _, _} -> flush_entropy_messages() after 0 -> ok end. read_cursor(StoreID, ModuleStart) -> Filepath = ar_chunk_storage:get_filepath("prepare_replica_2_9_cursor", StoreID), Default = ModuleStart + 1, case file:read_file(Filepath) of {ok, Bin} -> case catch binary_to_term(Bin, [safe]) of Cursor when is_integer(Cursor) -> Cursor; _ -> Default end; _ -> Default end. store_cursor(Cursor, StoreID) -> Filepath = ar_chunk_storage:get_filepath("prepare_replica_2_9_cursor", StoreID), file:write_file(Filepath, term_to_binary(Cursor)). %%%=================================================================== %%% Tests. %%%=================================================================== entropy_offsets_test_() -> ar_test_node:test_with_mocked_functions([ {ar_block, strict_data_split_threshold, fun() -> 700_000 end} ], fun test_entropy_offsets/0, 30). test_entropy_offsets() -> SectorSize = ar_block:get_replica_2_9_entropy_sector_size(), ?assertEqual(2 * ?DATA_CHUNK_SIZE, SectorSize), Module0 = {ar_block:partition_size(), 0, unpacked}, Module1 = {ar_block:partition_size(), 1, unpacked}, {_ModuleStart0, ModuleEnd0} = ar_storage_module:module_range(Module0), {_ModuleStart1, ModuleEnd1} = ar_storage_module:module_range(Module1), PaddedModuleEnd0 = ar_chunk_storage:get_chunk_bucket_end(ModuleEnd0), PaddedModuleEnd1 = ar_chunk_storage:get_chunk_bucket_end(ModuleEnd1), ?assertEqual(2097152, PaddedModuleEnd0, "1"), ?assertEqual(4194304, PaddedModuleEnd1, "2"), ?assertEqual([262144, 786432, 1310720, 1835008], entropy_offsets(0, PaddedModuleEnd0), "3"), %% bucket end: 262144 ?assertEqual([262144, 786432, 1310720, 1835008], entropy_offsets(1000, PaddedModuleEnd0), "4"), %% bucket end: 262144 ?assertEqual([262144, 786432, 1310720, 1835008], entropy_offsets(262144, PaddedModuleEnd0), "5"), %% bucket end: 262144 ?assertEqual([524288, 1048576, 1572864, 2097152], entropy_offsets(524288, PaddedModuleEnd0), "6"), %% bucket end: 524288 ?assertEqual([524288, 1048576, 1572864, 2097152], entropy_offsets(699999, PaddedModuleEnd0), "7"), %% bucket end: 524288 ?assertEqual([524288, 1048576, 1572864, 2097152], entropy_offsets(700000, PaddedModuleEnd0), "8"), %% bucket end: 524288 ?assertEqual([262144, 786432, 1310720, 1835008], entropy_offsets(700001, PaddedModuleEnd0), "9"), %% bucket end: 786432 ?assertEqual([262144, 786432, 1310720, 1835008], entropy_offsets(786432, PaddedModuleEnd0), "10"), %% bucket end: 786432 ?assertEqual([262144, 786432, 1310720, 1835008], entropy_offsets(786433, PaddedModuleEnd0), "11"), %% bucket end: 786432 ?assertEqual([524288, 1048576, 1572864, 2097152], entropy_offsets(1048576, PaddedModuleEnd0), "12"), %% bucket end: 1048576 ?assertEqual([262144, 786432, 1310720, 1835008], entropy_offsets(1835007, PaddedModuleEnd0), "13"), %% bucket end: 1835008 ?assertEqual([262144, 786432, 1310720, 1835008], entropy_offsets(1835008, PaddedModuleEnd0), "14"), %% bucket end: 1835008 ?assertEqual([262144, 786432, 1310720, 1835008], entropy_offsets(1835009, PaddedModuleEnd0), "15"), %% bucket end: 1835008 %% entropy partition is determined by the bucket *start* offset. So offsets that are in %% recall partition 1 may still be in entropy partition 0 (e.g. 2000001, 2097152) ?assertEqual([262144, 786432, 1310720, 1835008], entropy_offsets(1999999, PaddedModuleEnd0), "16"), %% bucket end: 1835008 ?assertEqual([262144, 786432, 1310720, 1835008], entropy_offsets(2000000, PaddedModuleEnd0), "17"), %% bucket end: 1835008 ?assertEqual([262144, 786432, 1310720, 1835008], entropy_offsets(2000001, PaddedModuleEnd0), "18"), %% bucket end: 1835008 ?assertEqual([524288, 1048576, 1572864, 2097152], entropy_offsets(2097152, PaddedModuleEnd0), "19"), %% bucket end: 2097152 ?assertEqual([524288, 1048576, 1572864, 2097152], entropy_offsets(2097153, PaddedModuleEnd0), "20"), %% bucket end: 2097152 %% Even when ModuleEnd is high, we should limit entropy to the current entropy partition. ?assertEqual([524288, 1048576, 1572864, 2097152], entropy_offsets(2097152, PaddedModuleEnd1), "21"), %% bucket end: 2097152 %% Retstrict offsets to module end. ?assertEqual([524288, 1048576, 1572864], entropy_offsets(2097152, 2_000_000), "22"), %% bucket end: 2097152 %% Entropy partition 1 ?assertEqual([2359296, 2883584, 3407872, 3932160], entropy_offsets(2359297, PaddedModuleEnd1), "23"), %% bucket end: 2359296 ?assertEqual([2621440, 3145728, 3670016, 4194304], entropy_offsets(2621441, PaddedModuleEnd1), "24"), %% bucket end: 2621440 ok. ================================================ FILE: apps/arweave/src/ar_entropy_storage.erl ================================================ -module(ar_entropy_storage). -behaviour(gen_server). -export([name/1, acquire_semaphore/1, release_semaphore/1, is_ready/1, sync_record_id/0, is_entropy_recorded/3, get_next_unsynced_interval/3, add_record/3, add_record_async/4, delete_record/2, delete_record/3, store_entropy_footprint/6, store_entropy/4, record_chunk/5]). -export([start_link/2, init/1, handle_cast/2, handle_call/3, handle_info/2, terminate/2]). -include("ar.hrl"). -include_lib("eunit/include/eunit.hrl"). -record(state, { store_id }). %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Start the server. start_link(Name, {StoreID, _}) -> gen_server:start_link({local, Name}, ?MODULE, StoreID, []). %% @doc Return the name of the server serving the given StoreID. name(StoreID) -> list_to_atom("ar_entropy_storage_" ++ ar_storage_module:label(StoreID)). init(StoreID) -> ?LOG_INFO([{event, ar_entropy_storage_init}, {name, name(StoreID)}, {store_id, StoreID}]), {ok, #state{ store_id = StoreID }}. sync_record_id() -> ar_chunk_storage_replica_2_9_5_entropy. %% @doc Write all of the entropies in a full 256 MiB entropy footprint to disk. -spec store_entropy_footprint( StoreID :: ar_storage_module:store_id(), Entropies :: [binary()], EntropyOffsets :: [non_neg_integer()], RangeStart :: non_neg_integer(), Keys :: [binary()], RewardAddr :: ar_wallet:address()) -> ok. store_entropy_footprint( StoreID, Entropies, EntropyOffsets, RangeStart, Keys, RewardAddr) -> gen_server:cast(name(StoreID), {store_entropy_footprint, Entropies, EntropyOffsets, RangeStart, Keys, RewardAddr}). store_entropy(ChunkEntropy, BucketEndOffset, StoreID, RewardAddr) -> case catch gen_server:call( name(StoreID), {store_entropy, ChunkEntropy, BucketEndOffset, StoreID, RewardAddr}, ?DEFAULT_CALL_TIMEOUT) of {'EXIT', {Reason, {gen_server, call, _}}} -> ?LOG_WARNING([{event, store_entropy}, {module, ?MODULE}, {name, name(StoreID)}, {store_id, StoreID}, {bucket_end_offset, BucketEndOffset}, {reason, Reason}]), false; Reply -> Reply end. is_ready(StoreID) -> case catch gen_server:call(name(StoreID), is_ready, ?DEFAULT_CALL_TIMEOUT) of {'EXIT', {Reason, {gen_server, call, _}}} -> ?LOG_WARNING([{event, is_ready_error}, {module, ?MODULE}, {name, name(StoreID)}, {store_id, StoreID}, {reason, Reason}]), false; Reply -> Reply end. handle_cast({store_entropy_footprint, Entropies, EntropyOffsets, RangeStart, Keys, RewardAddr}, State) -> #state{ store_id = StoreID } = State, Start = erlang:monotonic_time(millisecond), ar_entropy_gen:map_entropies( Entropies, EntropyOffsets, RangeStart, Keys, RewardAddr, fun do_store_entropy/5, [StoreID], ok), End = erlang:monotonic_time(millisecond), ?LOG_DEBUG([{event, store_entropy_footprint}, {module, ?MODULE}, {name, name(StoreID)}, {offset, hd(EntropyOffsets)}, {num_entropies, length(Entropies)}, {duration, End - Start}]), {noreply, State}; handle_cast(Cast, State) -> ?LOG_WARNING([{event, unhandled_cast}, {module, ?MODULE}, {cast, Cast}]), {noreply, State}. handle_call(is_ready, _From, State) -> {reply, true, State}; handle_call({store_entropy, ChunkEntropy, BucketEndOffset, StoreID, RewardAddr}, _From, State) -> #state{ store_id = StoreID } = State, do_store_entropy(ChunkEntropy, BucketEndOffset, RewardAddr, StoreID), {reply, ok, State}; handle_call(Call, _From, State) -> ?LOG_WARNING([{event, unhandled_call}, {module, ?MODULE}, {call, Call}]), {reply, {error, unhandled_call}, State}. terminate(Reason, State) -> ?LOG_INFO([{event, terminate}, {module, ?MODULE}, {reason, Reason}, {name, name(State#state.store_id)}, {store_id, State#state.store_id}]), ok. handle_info(Info, State) -> ?LOG_WARNING([{event, unhandled_info}, {module, ?MODULE}, {info, Info}]), {noreply, State}. %% @doc Return true if the 2.9 entropy with the given offset is recorded. is_entropy_recorded(PaddedEndOffset, {replica_2_9, _} = Packing, StoreID) -> ChunkBucketStart = ar_chunk_storage:get_chunk_bucket_start(PaddedEndOffset), IsRecorded = ar_sync_record:is_recorded( ChunkBucketStart + 1, Packing, sync_record_id(), StoreID), case IsRecorded of false -> %% Included for backwards compatibility with entropy written prior to 2.9.5. ar_sync_record:is_recorded( ChunkBucketStart + 1, ar_chunk_storage_replica_2_9_1_entropy, StoreID); _ -> true end; is_entropy_recorded(_PaddedEndOffset, _Packing, _StoreID) -> false. get_next_unsynced_interval(Offset, Packing, StoreID) -> case ar_sync_record:get_next_unsynced_interval( Offset, infinity, Packing, sync_record_id(), StoreID) of not_found -> %% Included for backwards compatibility with entropy written prior to 2.9.5. case ar_sync_record:get_next_unsynced_interval( Offset, infinity, ar_chunk_storage_replica_2_9_1_entropy, StoreID) of not_found -> not_found; Interval -> Interval end; Interval -> Interval end. update_sync_records(IsComplete, PaddedEndOffset, StoreID, RewardAddr) -> BucketEnd = ar_chunk_storage:get_chunk_bucket_end(PaddedEndOffset), add_record_async(replica_2_9_entropy, BucketEnd, {replica_2_9, RewardAddr}, StoreID), prometheus_counter:inc(replica_2_9_entropy_stored, [ar_storage_module:label(StoreID)], ?DATA_CHUNK_SIZE), StartOffset = PaddedEndOffset - ?DATA_CHUNK_SIZE, case IsComplete of true -> Packing = {replica_2_9, RewardAddr}, prometheus_counter:inc(chunks_stored, [ar_storage_module:packing_label(Packing), ar_storage_module:label(StoreID)]), ar_sync_record:add_async(replica_2_9_entropy_with_chunk, PaddedEndOffset, StartOffset, ar_chunk_storage, StoreID), ar_sync_record:add_async(replica_2_9_entropy_with_chunk, PaddedEndOffset, StartOffset, {replica_2_9, RewardAddr}, ar_data_sync, StoreID), %% Here we assume we do not store unpadded small chunks (small chunks %% before the strict data split threshold), thus ?DATA_CHUNK_SIZE. case ar_data_sync:is_footprint_record_supported(PaddedEndOffset, ?DATA_CHUNK_SIZE, Packing) of true -> ar_footprint_record:add_async(replica_2_9_entropy_with_chunk, PaddedEndOffset, Packing, StoreID); false -> ok end; false -> ok end. add_record(BucketEndOffset, {replica_2_9, _} = Packing, StoreID) -> BucketStartOffset = BucketEndOffset - ?DATA_CHUNK_SIZE, ar_sync_record:add(BucketEndOffset, BucketStartOffset, Packing, sync_record_id(), StoreID). add_record_async(Event, BucketEndOffset, {replica_2_9, _} = Packing, StoreID) -> BucketStartOffset = BucketEndOffset - ?DATA_CHUNK_SIZE, ar_sync_record:add_async(Event, BucketEndOffset, BucketStartOffset, Packing, sync_record_id(), StoreID). delete_record(PaddedEndOffset, StoreID) -> BucketStart = ar_chunk_storage:get_chunk_bucket_start(PaddedEndOffset), delete_record(BucketStart + ?DATA_CHUNK_SIZE, BucketStart, StoreID). delete_record(EndOffset, StartOffset, StoreID) -> case ar_sync_record:delete(EndOffset, StartOffset, sync_record_id(), StoreID) of ok -> %% Included for backwards compatibility with entropy written prior to 2.9.5. ar_sync_record:delete( EndOffset, StartOffset, ar_chunk_storage_replica_2_9_1_entropy, StoreID); Error -> Error end. generate_missing_entropy(PaddedEndOffset, RewardAddr) -> Entropies = ar_entropy_gen:generate_entropies(RewardAddr, PaddedEndOffset), case Entropies of {error, Reason} -> {error, Reason}; _ -> EntropyIndex = ar_replica_2_9:get_slice_index(PaddedEndOffset), take_combined_entropy_by_index(Entropies, EntropyIndex) end. record_chunk( PaddedEndOffset, Chunk, StoreID, FileIndex, {IsPrepared, RewardAddr}) -> %% Sanity checks PaddedEndOffset = ar_block:get_chunk_padded_offset(PaddedEndOffset), %% End sanity checks Packing = {replica_2_9, RewardAddr}, StartOffset = ar_chunk_storage:get_chunk_bucket_start(PaddedEndOffset), {_ChunkFileStart, Filepath, _Position, _ChunkOffset} = ar_chunk_storage:locate_chunk_on_disk(PaddedEndOffset, StoreID), acquire_semaphore(Filepath), CheckIsChunkStoredAlready = ar_sync_record:is_recorded(PaddedEndOffset, ar_chunk_storage, StoreID), CheckIsEntropyRecorded = case CheckIsChunkStoredAlready of true -> {error, already_stored}; false -> is_entropy_recorded(PaddedEndOffset, Packing, StoreID) end, ReadEntropy = case CheckIsEntropyRecorded of {error, _} = Error -> Error; false -> case IsPrepared of false -> no_entropy_yet; true -> missing_entropy end; true -> ar_chunk_storage:get(StartOffset, StartOffset, StoreID) end, RecordChunk = case ReadEntropy of {error, _} = Error2 -> Error2; not_found -> delete_record(PaddedEndOffset, StoreID), {error, not_prepared_yet}; missing_entropy -> ?LOG_WARNING([{event, missing_entropy}, {padded_end_offset, PaddedEndOffset}, {store_id, StoreID}, {packing, ar_serialize:encode_packing(Packing, true)}]), Entropy = generate_missing_entropy(PaddedEndOffset, RewardAddr), case Entropy of {error, Reason} -> {error, Reason}; _ -> PackedChunk = ar_packing_server:encipher_replica_2_9_chunk(Chunk, Entropy), ar_chunk_storage:record_chunk( PaddedEndOffset, PackedChunk, Packing, StoreID, FileIndex) end; no_entropy_yet -> ar_chunk_storage:record_chunk( PaddedEndOffset, Chunk, unpacked_padded, StoreID, FileIndex); {_EndOffset, Entropy} -> PackedChunk = ar_packing_server:encipher_replica_2_9_chunk(Chunk, Entropy), ar_chunk_storage:record_chunk( PaddedEndOffset, PackedChunk, Packing, StoreID, FileIndex) end, release_semaphore(Filepath), RecordChunk. do_store_entropy(ChunkEntropy, BucketEndOffset, RewardAddr, StoreID, ok) -> do_store_entropy(ChunkEntropy, BucketEndOffset, RewardAddr, StoreID). do_store_entropy(ChunkEntropy, BucketEndOffset, RewardAddr, StoreID) -> %% Sanity checks true = byte_size(ChunkEntropy) == ?DATA_CHUNK_SIZE, %% End sanity checks Byte = ar_chunk_storage:get_chunk_byte_from_bucket_end(BucketEndOffset), CheckUnpackedChunkRecorded = ar_sync_record:get_interval( Byte + 1, ar_chunk_storage:sync_record_id(unpacked_padded), StoreID), {IsUnpackedChunkRecorded, PaddedEndOffset} = case CheckUnpackedChunkRecorded of not_found -> {false, BucketEndOffset}; {_IntervalEnd, IntervalStart} -> EndOffset2 = IntervalStart + ar_util:floor_int(Byte - IntervalStart, ?DATA_CHUNK_SIZE) + ?DATA_CHUNK_SIZE, case ar_chunk_storage:get_chunk_bucket_end(EndOffset2) of BucketEndOffset -> {true, EndOffset2}; _ -> %% This chunk is from a different bucket. It may happen near the %% strict data split threshold where there is no single byte %% unambiguosly determining the bucket the chunk will be routed to. ?LOG_INFO([{event, record_entropy_read_chunk_from_another_bucket}, {bucket_end_offset, BucketEndOffset}, {chunk_end_offset, EndOffset2}]), {false, BucketEndOffset} end end, {ChunkFileStart, Filepath, _Position, _ChunkOffset} = ar_chunk_storage:locate_chunk_on_disk(PaddedEndOffset, StoreID), %% We allow generating and filling it the 2.9 entropy and storing unpacked chunks (to %% be enciphered later) asynchronously. Whatever comes first, is stored. %% If the other counterpart is stored already, we read it, encipher and store the %% packed chunk. acquire_semaphore(Filepath), Chunk = case IsUnpackedChunkRecorded of true -> StartOffset = PaddedEndOffset - ?DATA_CHUNK_SIZE, case ar_chunk_storage:get(Byte, StartOffset, StoreID) of not_found -> {error, not_found}; {error, _} = Error -> Error; {_, UnpackedChunk} -> ar_sync_record:delete(PaddedEndOffset, StartOffset, ar_data_sync, StoreID), ar_footprint_record:delete(PaddedEndOffset, StoreID), ar_packing_server:encipher_replica_2_9_chunk(UnpackedChunk, ChunkEntropy) end; false -> %% The entropy for the first sub-chunk of the chunk. %% The zero-offset does not have a real meaning, it is set %% to make sure we pass offset validation on read. ChunkEntropy end, Result = case Chunk of {error, _} = Error2 -> Error2; _ -> WriteChunkResult = ar_chunk_storage:write_chunk( PaddedEndOffset, Chunk, #{}, StoreID), case WriteChunkResult of {ok, Filepath} -> ets:insert(chunk_storage_file_index, {{ChunkFileStart, StoreID}, Filepath}), update_sync_records( IsUnpackedChunkRecorded, PaddedEndOffset, StoreID, RewardAddr); Error2 -> Error2 end end, case Result of {error, Reason} -> ?LOG_ERROR([{event, failed_to_store_replica_2_9_chunk_entropy}, {filepath, Filepath}, {byte, Byte}, {padded_end_offset, PaddedEndOffset}, {bucket_end_offset, BucketEndOffset}, {store_id, StoreID}, {reason, io_lib:format("~p", [Reason])}]); _ -> ok end, release_semaphore(Filepath), ok. take_combined_entropy_by_index(Entropies, Index) -> take_combined_entropy_by_index(Entropies, Index, []). take_combined_entropy_by_index([], _Index, Acc) -> iolist_to_binary(Acc); take_combined_entropy_by_index([Entropy | Entropies], Index, Acc) -> SubChunkSize = ?COMPOSITE_PACKING_SUB_CHUNK_SIZE, take_combined_entropy_by_index( Entropies, Index, [Acc, binary:part(Entropy, Index * SubChunkSize, SubChunkSize)]). acquire_semaphore(Filepath) -> case ets:insert_new(ar_entropy_storage, {{semaphore, Filepath}}) of false -> ?LOG_DEBUG([ {event, details_store_chunk}, {section, waiting_on_semaphore}, {filepath, Filepath}]), timer:sleep(20), acquire_semaphore(Filepath); true -> ok end. release_semaphore(Filepath) -> ets:delete(ar_entropy_storage, {semaphore, Filepath}). %%%=================================================================== %%% Tests. %%%=================================================================== replica_2_9_test_() -> {timeout, 60, fun test_replica_2_9/0}. test_replica_2_9() -> case ar_block:strict_data_split_threshold() of 786432 -> ok; _ -> throw(unexpected_strict_data_split_threshold) end, RewardAddr = ar_wallet:to_address(ar_wallet:new_keyfile()), Packing = {replica_2_9, RewardAddr}, StorageModules = [ {ar_block:partition_size(), 0, Packing}, {ar_block:partition_size(), 1, Packing} ], {ok, Config} = arweave_config:get_env(), try ar_test_node:start(#{ reward_addr => RewardAddr, storage_modules => StorageModules }), StoreID1 = ar_storage_module:id(lists:nth(1, StorageModules)), StoreID2 = ar_storage_module:id(lists:nth(2, StorageModules)), C1 = crypto:strong_rand_bytes(?DATA_CHUNK_SIZE), %% ar_chunk_storage does not allow overwriting a chunk %% with an unpacked_padded chunk. ?assertEqual({error, already_stored}, ar_chunk_storage:put(?DATA_CHUNK_SIZE, C1, unpacked_padded, StoreID1)), ?assertEqual({error, already_stored}, ar_chunk_storage:put(2 * ?DATA_CHUNK_SIZE, C1, unpacked_padded, StoreID1)), ?assertEqual({error, already_stored}, ar_chunk_storage:put(3 * ?DATA_CHUNK_SIZE, C1, unpacked_padded, StoreID1)), ?assertEqual({ok, Packing}, ar_chunk_storage:put(?DATA_CHUNK_SIZE, C1, Packing, StoreID1)), assert_get(C1, ?DATA_CHUNK_SIZE, StoreID1), ?assertEqual({ok, Packing}, ar_chunk_storage:put(2 * ?DATA_CHUNK_SIZE, C1, Packing, StoreID1)), assert_get(C1, 2 * ?DATA_CHUNK_SIZE, StoreID1), ?assertEqual({ok, Packing}, ar_chunk_storage:put(3 * ?DATA_CHUNK_SIZE, C1, Packing, StoreID1)), assert_get(C1, 3 * ?DATA_CHUNK_SIZE, StoreID1), %% Store the new unpacked_padded chunk. Expect it to be enciphered with %% the its entropy. ?assertEqual({ok, Packing}, ar_chunk_storage:put(4 * ?DATA_CHUNK_SIZE, C1, unpacked_padded, StoreID1)), {ok, P1, _Entropy} = ar_packing_server:pack_replica_2_9_chunk(RewardAddr, 4 * ?DATA_CHUNK_SIZE, C1), assert_get(P1, 4 * ?DATA_CHUNK_SIZE, StoreID1), assert_get(not_found, 8 * ?DATA_CHUNK_SIZE, StoreID1), ?assertEqual({ok, Packing}, ar_chunk_storage:put(8 * ?DATA_CHUNK_SIZE, C1, unpacked_padded, StoreID1)), {ok, P2, _} = ar_packing_server:pack_replica_2_9_chunk(RewardAddr, 8 * ?DATA_CHUNK_SIZE, C1), assert_get(P2, 8 * ?DATA_CHUNK_SIZE, StoreID1), %% Store chunks in the second partition. ?assertEqual({ok, Packing}, ar_chunk_storage:put(12 * ?DATA_CHUNK_SIZE, C1, unpacked_padded, StoreID2)), {ok, P3, Entropy3} = ar_packing_server:pack_replica_2_9_chunk(RewardAddr, 12 * ?DATA_CHUNK_SIZE, C1), assert_get(P3, 12 * ?DATA_CHUNK_SIZE, StoreID2), ?assertEqual({ok, Packing}, ar_chunk_storage:put(15 * ?DATA_CHUNK_SIZE, C1, unpacked_padded, StoreID2)), {ok, P4, Entropy4} = ar_packing_server:pack_replica_2_9_chunk(RewardAddr, 15 * ?DATA_CHUNK_SIZE, C1), assert_get(P4, 15 * ?DATA_CHUNK_SIZE, StoreID2), ?assertNotEqual(P3, P4), ?assertNotEqual(Entropy3, Entropy4), ?assertEqual({ok, Packing}, ar_chunk_storage:put(16 * ?DATA_CHUNK_SIZE, C1, unpacked_padded, StoreID2)), {ok, P5, Entropy5} = ar_packing_server:pack_replica_2_9_chunk(RewardAddr, 16 * ?DATA_CHUNK_SIZE, C1), assert_get(P5, 16 * ?DATA_CHUNK_SIZE, StoreID2), ?assertNotEqual(Entropy4, Entropy5) after ok = arweave_config:set_env(Config) end. assert_get(Expected, Offset, StoreID) -> ExpectedResult = case Expected of not_found -> not_found; _ -> {Offset, Expected} end, ?assertEqual(ExpectedResult, ar_chunk_storage:get(Offset - 1, StoreID)), ?assertEqual(ExpectedResult, ar_chunk_storage:get(Offset - 2, StoreID)), ?assertEqual(ExpectedResult, ar_chunk_storage:get(Offset - ?DATA_CHUNK_SIZE, StoreID)), ?assertEqual(ExpectedResult, ar_chunk_storage:get(Offset - ?DATA_CHUNK_SIZE + 1, StoreID)), ?assertEqual(ExpectedResult, ar_chunk_storage:get(Offset - ?DATA_CHUNK_SIZE + 2, StoreID)), ?assertEqual(ExpectedResult, ar_chunk_storage:get(Offset - ?DATA_CHUNK_SIZE div 2, StoreID)), ?assertEqual(ExpectedResult, ar_chunk_storage:get(Offset - ?DATA_CHUNK_SIZE div 2 + 1, StoreID)), ?assertEqual(ExpectedResult, ar_chunk_storage:get(Offset - ?DATA_CHUNK_SIZE div 2 - 1, StoreID)), ?assertEqual(ExpectedResult, ar_chunk_storage:get(Offset - ?DATA_CHUNK_SIZE div 3, StoreID)). ================================================ FILE: apps/arweave/src/ar_ets_intervals.erl ================================================ %%% @doc The utilities for managing sets of non-overlapping intervals stored in an ETS table. %%% The API is similar to the one of the ar_intervals module. Keeping the intervals in ETS %%% is a convenient way to share them between processes, e.g. the mining module can quickly %%% check whether the given recall byte is synced. ar_intervals, in turn, is helpful %%% for manipulating multiple sets of intervals, e.g. the syncing process uses it to look for %%% the intersections between our data and peers' data. %%% @end -module(ar_ets_intervals). -export([init_from_gb_set/2, add/3, delete/3, cut/2, is_inside/2, get_interval_with_byte/2, get_next_interval_outside/3, get_next_interval/3, get_intersection_size/3]). -include_lib("arweave/include/ar.hrl"). -include_lib("eunit/include/eunit.hrl"). %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Record intervals from the given gb_sets set. init_from_gb_set(Table, Set) -> init_from_gb_set_iterator(Table, gb_sets:iterator(Set)). %% @doc Record an interval, bytes Start + 1, Start + 2 ... End. add(Table, End, Start) when End > Start -> {End2, Start2, InnerEnds} = find_largest_continuous_interval(Table, End, Start), ets:insert(Table, [{End2, Start2}]), remove_inner_intervals(Table, InnerEnds, End2). %% @doc Remove the given interval, bytes Start + 1, Start + 2 ... End. delete(Table, End, Start) when End > Start -> case ets:next(Table, Start) of '$end_of_table' -> ok; End2 -> case ets:lookup(Table, End2) of [] -> %% The key has just been removed, very unlucky timing. delete(Table, End, Start); [{_End2, Start2}] when Start2 >= End -> ok; [{End2, Start2}] -> Insert = case Start2 < Start of true -> [{Start, Start2}]; false -> [] end, Insert2 = case End2 > End of true -> [{End2, End} | Insert]; false -> Insert end, ets:insert(Table, Insert2), case End2 > End of true -> %% We have already inserted {End2, End} above. ok; false -> ets:delete(Table, End2), case End2 < End of true -> delete(Table, End, End2); false -> ok end end end end. %% @doc Cut the set by removing all the intervals and interval's parts above Offset. cut(Table, Offset) -> case ets:next(Table, Offset) of '$end_of_table' -> ok; End -> case ets:lookup(Table, End) of [] -> %% The key has just been removed, very unlucky timing. cut(Table, Offset); [{End, Start}] when Start < Offset -> ets:insert(Table, [{Offset, Start}]), ets:delete(Table, End), cut(Table, Offset); [{End, _Start}] -> ets:delete(Table, End), cut(Table, Offset) end end. %% @doc Return true if the given offset is inside one of the intervals, including %% the right bound, excluding the left bound. %% %% E.g. a table with byte 1: %% add(table, 1, 0), %% is_inside(table, 1) == true, %% is_inside(table, 0) == false %% for a table with bytes 3, 4, and 5: %% add(table, 5, 2), %% is_inside(table, 5) == true, %% is_inside(table, 4) == true, %% is_inside(table, 3) == true, %% is_inside(table, 2) == false. %% @end is_inside(Table, Offset) -> case ets:next(Table, Offset - 1) of '$end_of_table' -> false; NextOffset -> case ets:lookup(Table, NextOffset) of [{NextOffset, Start}] -> Offset > Start; [] -> %% The key should have been just removed, unlucky timing. is_inside(Table, Offset) end end. %% @doc Return the interval containing the given offset, including the right bound, %% excluding the left bound, or not_found. %% @end get_interval_with_byte(Table, Offset) -> case ets:next(Table, Offset - 1) of '$end_of_table' -> not_found; NextOffset -> case ets:lookup(Table, NextOffset) of [{NextOffset, Start}] -> case Offset > Start of true -> {NextOffset, Start}; false -> not_found end; [] -> %% The key should have been just removed, unlucky timing. get_interval_with_byte(Table, Offset) end end. %% @doc Return the lowest interval outside the recorded set of intervals, %% strictly above the given Offset, and with the end offset at most EndOffsetUpperBound. %% Return not_found if there are no such intervals. get_next_interval_outside(_Table, Offset, EndOffsetUpperBound) when Offset >= EndOffsetUpperBound -> not_found; get_next_interval_outside(Table, Offset, EndOffsetUpperBound) -> case ets:next(Table, Offset) of '$end_of_table' -> {EndOffsetUpperBound, Offset}; NextOffset -> case ets:lookup(Table, NextOffset) of [{NextOffset, Start}] when Start > Offset -> {min(EndOffsetUpperBound, Start), Offset}; _ -> get_next_interval_outside(Table, NextOffset, EndOffsetUpperBound) end end. %% @doc Return the lowest interval inside the recorded set of intervals with the %% end offset strictly above the given offset, and with the end offset %% at most EndOffsetUpperBound. %% Return not_found if there are no such intervals. get_next_interval(_Table, Offset, EndOffsetUpperBound) when Offset >= EndOffsetUpperBound -> not_found; get_next_interval(Table, Offset, EndOffsetUpperBound) -> case ets:next(Table, Offset) of '$end_of_table' -> not_found; NextOffset -> case ets:lookup(Table, NextOffset) of [{_NextOffset, Start}] when Start >= EndOffsetUpperBound -> not_found; [{NextOffset, Start}] -> {min(NextOffset, EndOffsetUpperBound), Start}; [] -> %% The key should have been just removed, unlucky timing. get_next_interval(Table, Offset, EndOffsetUpperBound) end end. %% @doc Return the size of the intesection between the stored intervals and the given range. get_intersection_size(Table, End, Start) when End > Start -> case ets:next(Table, Start) of '$end_of_table' -> 0; Offset when Offset >= End -> case ets:lookup(Table, Offset) of [] -> %% An extremely unlikely race condition: just retry. get_intersection_size(Table, End, Start); [{_, Start2}] when Start2 >= End -> 0; [{_, Start2}] -> End - max(Start, Start2) end; Offset -> case ets:lookup(Table, Offset) of [] -> %% An extremely unlikely race condition: just retry. get_intersection_size(Table, End, Start); [{_, Start2}] -> Offset - max(Start, Start2) + get_intersection_size(Table, End, Offset) end end. %%%=================================================================== %%% Private functions. %%%=================================================================== init_from_gb_set_iterator(Table, Iterator) -> case gb_sets:next(Iterator) of none -> ok; {{End, Start}, Iterator2} -> add(Table, End, Start), init_from_gb_set_iterator(Table, Iterator2) end. find_largest_continuous_interval(Table, End, Start) -> find_largest_continuous_interval(Table, End, Start, End, Start, []). find_largest_continuous_interval(Table, End, Start, End2, Start2, InnerEnds) -> case ets:next(Table, Start - 1) of '$end_of_table' -> {End2, Start2, InnerEnds}; End3 -> case ets:lookup(Table, End3) of [] -> %% The key has just been removed, very unlucky timing. find_largest_continuous_interval(Table, End, Start, End2, Start2, InnerEnds); [{_End3, Start3}] when Start3 > End -> {End2, Start2, InnerEnds}; [{End3, Start3}] -> find_largest_continuous_interval( Table, End, End3 + 1, max(End2, End3), min(Start2, Start3), [End3 | InnerEnds] ) end end. remove_inner_intervals(_Table, [], _End) -> ok; remove_inner_intervals(Table, [End | InnerEnds], End) -> remove_inner_intervals(Table, InnerEnds, End); remove_inner_intervals(Table, [InnerEnd | InnerEnds], End) -> ets:delete(Table, InnerEnd), remove_inner_intervals(Table, InnerEnds, End). %%%=================================================================== %%% Tests. %%%=================================================================== ets_intervals_test() -> ets:new(ets_intervals_test, [named_table, ordered_set]), assert_is_not_inside(100, 0), ?assertEqual(ok, cut(ets_intervals_test, 10)), ?assertEqual(ok, delete(ets_intervals_test, 10, 5)), Set = gb_sets:from_list([{1, 0}, {5, 3}, {16, 10}]), init_from_gb_set(ets_intervals_test, Set), assert_is_inside(1, 0), assert_is_not_inside(3, 1), assert_is_inside(5, 3), assert_is_not_inside(10, 5), assert_is_inside(16, 10), assert_is_not_inside(20, 16), %% 1,0 16,3 add(ets_intervals_test, 11, 4), assert_is_inside(16, 3), assert_is_inside(1, 0), assert_is_not_inside(3, 1), assert_is_not_inside(20, 16), %% back to 1,0 5,3 16,10 delete(ets_intervals_test, 10, 5), assert_is_inside(5, 3), assert_is_inside(1, 0), assert_is_inside(16, 10), assert_is_not_inside(3, 1), assert_is_not_inside(10, 5), assert_is_not_inside(20, 16), %% 1,0 5,3 16,10 20,18 add(ets_intervals_test, 20, 18), assert_is_inside(5, 3), assert_is_inside(1, 0), assert_is_inside(16, 10), assert_is_inside(20, 18), assert_is_not_inside(3, 1), assert_is_not_inside(10, 5), assert_is_not_inside(18, 16), assert_is_not_inside(22, 20), %% 1,0 5,3 8,7 16,10 20,18 add(ets_intervals_test, 8, 7), assert_is_inside(5, 3), assert_is_inside(1, 0), assert_is_inside(16, 10), assert_is_inside(20, 18), assert_is_inside(8, 7), assert_is_not_inside(3, 1), assert_is_not_inside(7, 5), assert_is_not_inside(10, 8), assert_is_not_inside(18, 16), assert_is_not_inside(22, 20), %% 5,0 8,7 16,10 20,18 add(ets_intervals_test, 3, 1), assert_is_inside(5, 0), assert_is_inside(8, 7), assert_is_inside(16, 10), assert_is_inside(20, 18), assert_is_not_inside(7, 5), assert_is_not_inside(10, 8), assert_is_not_inside(18, 16), assert_is_not_inside(22, 20), %% 5,0 8,7 16,10 20,18 cut(ets_intervals_test, 22), assert_is_inside(5, 0), assert_is_inside(8, 7), assert_is_inside(16, 10), assert_is_inside(20, 18), assert_is_not_inside(7, 5), assert_is_not_inside(10, 8), assert_is_not_inside(18, 16), assert_is_not_inside(22, 20), %% 5,0 8,7 16,10 20,18 cut(ets_intervals_test, 20), assert_is_inside(5, 0), assert_is_inside(8, 7), assert_is_inside(16, 10), assert_is_inside(20, 18), assert_is_not_inside(7, 5), assert_is_not_inside(10, 8), assert_is_not_inside(18, 16), assert_is_not_inside(22, 20), %% 5,0 8,7 16,10 19,18 cut(ets_intervals_test, 19), assert_is_inside(5, 0), assert_is_inside(8, 7), assert_is_inside(16, 10), assert_is_inside(19, 18), assert_is_not_inside(7, 5), assert_is_not_inside(10, 8), assert_is_not_inside(18, 16), assert_is_not_inside(22, 19), %% 5,0 8,7 14,10 cut(ets_intervals_test, 14), assert_is_inside(5, 0), assert_is_inside(8, 7), assert_is_inside(14, 10), assert_is_not_inside(7, 5), assert_is_not_inside(10, 8), assert_is_not_inside(20, 14), %% 1,0 8,7 14,10 delete(ets_intervals_test, 5, 1), assert_is_inside(1, 0), assert_is_inside(8, 7), assert_is_inside(14, 10), assert_is_not_inside(7, 1), assert_is_not_inside(10, 8), assert_is_not_inside(20, 14), %% 8,7 14,10 delete(ets_intervals_test, 5, 0), assert_is_inside(8, 7), assert_is_inside(14, 10), assert_is_not_inside(7, 0), assert_is_not_inside(10, 8), assert_is_not_inside(20, 14), %% 8,7 15,10 30,20 add(ets_intervals_test, 15, 14), add(ets_intervals_test, 30, 20), assert_is_inside(8, 7), assert_is_inside(15, 10), assert_is_inside(30, 20), assert_is_not_inside(7, 0), assert_is_not_inside(10, 8), assert_is_not_inside(20, 15), assert_is_not_inside(40, 30), %% 8,7 30,25 delete(ets_intervals_test, 25, 8), assert_is_inside(8, 7), assert_is_inside(30, 25), assert_is_not_inside(25, 8), assert_is_not_inside(7, 0), assert_is_not_inside(40, 30), %% 30,7 add(ets_intervals_test, 25, 8), assert_is_inside(30, 7), assert_is_not_inside(7, 0), assert_is_not_inside(40, 30), %% 12,7 18,16 30,25 delete(ets_intervals_test, 16, 12), delete(ets_intervals_test, 25, 18), assert_is_inside(12, 7), assert_is_inside(18, 16), assert_is_inside(30, 25), assert_is_not_inside(16, 12), assert_is_not_inside(25, 18), assert_is_not_inside(40, 30), %% 12,7 21,16 30,25 add(ets_intervals_test, 21, 18), assert_is_inside(12, 7), assert_is_inside(21, 16), assert_is_inside(30, 25), assert_is_not_inside(16, 12), assert_is_not_inside(25, 21), assert_is_not_inside(40, 30), %% 12,7 33,13 add(ets_intervals_test, 33, 13), assert_is_inside(33, 13), assert_is_inside(12, 7), assert_is_not_inside(13, 12), assert_is_not_inside(40, 33), %% 12,7 34,13 add(ets_intervals_test, 34, 13), assert_is_inside(34, 13), assert_is_inside(12, 7), assert_is_not_inside(13, 12), assert_is_not_inside(40, 34), %% 12,7 35,13 add(ets_intervals_test, 35, 22), assert_is_inside(35, 13), assert_is_inside(12, 7), assert_is_not_inside(13, 12), assert_is_not_inside(40, 35). assert_is_inside(End, End) -> ok; assert_is_inside(End, Start) -> ?assertEqual(true, is_inside(ets_intervals_test, Start + 1)), assert_is_inside(End, Start + 1). assert_is_not_inside(End, End) -> ok; assert_is_not_inside(End, Start) -> ?assertEqual(false, is_inside(ets_intervals_test, Start + 1)), assert_is_not_inside(End, Start + 1). ================================================ FILE: apps/arweave/src/ar_events.erl ================================================ %% This Source Code Form is subject to the terms of the GNU General %% Public License, v. 2.0. If a copy of the GPLv2 was not distributed %% with this file, You can obtain one at %% https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html -module(ar_events). -behaviour(gen_server). -export([ event_to_process/1, subscribe/1, cancel/1, send/2 ]). -export([ start_link/1, init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3 ]). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("eunit/include/eunit.hrl"). %% Internal state definition. -record(state, { name, subscribers = #{} }). %%%=================================================================== %%% API %%%=================================================================== event_to_process(Event) when is_atom(Event) -> list_to_atom("ar_event_" ++ atom_to_list(Event)). subscribe(Event) when is_atom(Event) -> Process = event_to_process(Event), gen_server:call(Process, subscribe); subscribe([]) -> []; subscribe([Event | Events]) -> [subscribe(Event) | subscribe(Events)]. cancel(Event) -> Process = event_to_process(Event), gen_server:call(Process, cancel). send(Event, Value) -> Process = event_to_process(Event), case whereis(Process) of undefined -> error; _ -> gen_server:cast(Process, {send, self(), Value}) end. %%-------------------------------------------------------------------- %% @doc %% Starts the server %% %% @spec start_link() -> {ok, Pid} | ignore | {error, Error} %% @end %%-------------------------------------------------------------------- start_link(Name) -> RegName = ar_events:event_to_process(Name), gen_server:start_link({local, RegName}, ?MODULE, Name, []). %%% gen_server callbacks %%%=================================================================== %%-------------------------------------------------------------------- %% @private %% @doc %% Initializes the server %% %% @spec init(Args) -> {ok, State} | %% {ok, State, Timeout} | %% ignore | %% {stop, Reason} %% @end %%-------------------------------------------------------------------- init(Name) -> {ok, #state{ name = Name }}. %%-------------------------------------------------------------------- %% @private %% @doc %% Handling call messages %% %% @spec handle_call(Request, From, State) -> %% {reply, Reply, State} | %% {reply, Reply, State, Timeout} | %% {noreply, State} | %% {noreply, State, Timeout} | %% {stop, Reason, Reply, State} | %% {stop, Reason, State} %% @end %%-------------------------------------------------------------------- handle_call(subscribe , {From, _Tag}, State) -> case maps:get(From, State#state.subscribers, unknown) of unknown -> Ref = erlang:monitor(process, From), Subscribers = maps:put(From, Ref, State#state.subscribers), {reply, ok, State#state{subscribers = Subscribers}}; _ -> {reply, already_subscribed, State} end; handle_call(cancel, {From, _Tag}, State) -> case maps:get(From, State#state.subscribers, unknown) of unknown -> {reply, unknown, State}; Ref -> Subscribers = maps:remove(From, State#state.subscribers), erlang:demonitor(Ref), {reply, ok, State#state{ subscribers = Subscribers }} end; handle_call(Request, _From, State) -> ?LOG_ERROR([{event, unhandled_call}, {message, Request}]), {reply, ok, State}. %%-------------------------------------------------------------------- %% @private %% @doc %% Handling cast messages %% %% @spec handle_cast(Msg, State) -> {noreply, State} | %% {noreply, State, Timeout} | %% {stop, Reason, State} %% @end %%-------------------------------------------------------------------- handle_cast({send, From, Value}, State) -> %% Send to the subscribers except self. [Pid ! {event, State#state.name, Value} || Pid <- maps:keys(State#state.subscribers), Pid /= From], {noreply, State}; handle_cast(Msg, State) -> ?LOG_ERROR([{event, unhandled_cast}, {message, Msg}]), {noreply, State}. %%-------------------------------------------------------------------- %% @private %% @doc %% Handling all non call/cast messages %% %% @spec handle_info(Info, State) -> {noreply, State} | %% {noreply, State, Timeout} | %% {stop, Reason, State} %% @end %%-------------------------------------------------------------------- handle_info({'DOWN', _, process, From, _}, State) -> {_, _, State1} = handle_call(cancel, {From, x}, State), {noreply, State1}; handle_info(Info, State) -> ?LOG_ERROR([{event, unhandled_info}, {module, ?MODULE}, {info, Info}]), {noreply, State}. %%-------------------------------------------------------------------- %% @private %% @doc %% This function is called by a gen_server when it is about to %% terminate. It should be the opposite of Module:init/1 and do any %% necessary cleaning up. When it returns, the gen_server terminates %% with Reason. The return value is ignored. %% %% @spec terminate(Reason, State) -> void() %% @end %%-------------------------------------------------------------------- terminate(Reason, _State) -> ?LOG_INFO([{module, ?MODULE},{pid, self()},{callback, terminate},{reason, Reason}]), ok. %%-------------------------------------------------------------------- %% @private %% @doc %% Convert process state when code is changed %% %% @spec code_change(OldVsn, State, Extra) -> {ok, NewState} %% @end %%-------------------------------------------------------------------- code_change(_OldVsn, State, _Extra) -> {ok, State}. %%%=================================================================== %%% Internal functions %%%=================================================================== subscribe_send_cancel_test() -> %% Check whether all the "event"-processes are alive. %% This list should be aligned with the total number %% of running gen_server's by ar_events_sup. Processes = [tx, block, testing], true = lists:all(fun(P) -> whereis(ar_events:event_to_process(P)) /= undefined end, Processes), EventNetworkStateOnStart = sys:get_state(ar_events:event_to_process(testing)), ok = ar_events:subscribe(testing), already_subscribed = ar_events:subscribe(testing), [ok, already_subscribed] = ar_events:subscribe([tx, testing]), %% Sender shouldn't receive its own event. ok = ar_events:send(testing, 12345), receive {event, testing, 12345} -> ?assert(false, "Received an unexpected event.") after 200 -> ok end, %% Sender should receive an event triggered by another process. spawn(fun() -> ar_events:send(testing, 12345) end), receive {event, testing, 12345} -> ok after 200 -> ?assert(false, "Did not receive an expected event within 200 milliseconds.") end, ok = ar_events:cancel(testing), EventNetworkStateOnStart = sys:get_state(ar_events:event_to_process(testing)). process_terminated_test() -> %% If a subscriber has been terminated without implicit "cancel" call %% it should be cleaned up from the subscription list. EventNetworkStateOnStart = sys:get_state(ar_events:event_to_process(testing)), spawn(fun() -> ar_events:subscribe(testing) end), timer:sleep(200), EventNetworkStateOnStart = sys:get_state(ar_events:event_to_process(testing)). ================================================ FILE: apps/arweave/src/ar_events_sup.erl ================================================ %% This Source Code Form is subject to the terms of the GNU General %% Public License, v. 2.0. If a copy of the GPLv2 was not distributed %% with this file, You can obtain one at %% https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html -module(ar_events_sup). -behaviour(supervisor). %% API -export([start_link/0]). %% Supervisor callbacks -export([init/1]). -include("ar_sup.hrl"). %% Helper macro for declaring children of supervisor. -define(CHILD(Mod, I, Type), {I, {Mod, start_link, [I]}, permanent, ?SHUTDOWN_TIMEOUT, Type, [Mod]}). %% =================================================================== %% API functions %% =================================================================== start_link() -> supervisor:start_link({local, ?MODULE}, ?MODULE, []). %% =================================================================== %% Supervisor callbacks %% =================================================================== init([]) -> {ok, {{one_for_one, 5, 10}, [ %% Events: remaining_disk_space. ?CHILD(ar_events, disksup, worker), %% Events: new, ready_for_mining, orphaned, emitting_scheduled, %% preparing_unblacklisting, ready_for_unblacklisting, registered_offset. ?CHILD(ar_events, tx, worker), %% Events: discovered, rejected, new, mined_block_received. ?CHILD(ar_events, block, worker), %% Events: unpacked, packed. ?CHILD(ar_events, chunk, worker), %% Events: removed ?CHILD(ar_events, peer, worker), %% Events: account_tree_initialized, initialized, %% new_tip, checkpoint_block, search_space_upper_bound. ?CHILD(ar_events, node_state, worker), %% Events: initialized, valid, invalid, validation_error, refuse_validation, %% computed_output. ?CHILD(ar_events, nonce_limiter, worker), %% Events: removed_file. ?CHILD(ar_events, chunk_storage, worker), %% Events: add_range, remove_range, global_remove_range, cut, global_cut. ?CHILD(ar_events, sync_record, worker), %% Events: rejected, stale, partial, accepted, confirmed, orphaned. ?CHILD(ar_events, solution, worker), %% Used for the testing purposes. ?CHILD(ar_events, testing, worker) ]}}. ================================================ FILE: apps/arweave/src/ar_footprint_record.erl ================================================ -module(ar_footprint_record). -export([add/3, add_async/4, delete/2, get_offset/1, get_padded_offset_from_footprint_offset/1, get_footprint/1, get_footprint_bucket/1, get_intervals/3, get_intervals/4, get_unsynced_intervals/3, get_intervals_from_footprint_intervals/1, get_footprint_size/0, get_footprints_per_partition/0, is_recorded/2]). -include("ar.hrl"). -include("ar_consensus.hrl"). -include("ar_data_discovery.hrl"). -include_lib("eunit/include/eunit.hrl"). -moduledoc """ This module exports functions for maintaining a replica 2.9 entropy-aligned record of the synced chunks. It differs from the normal record (ar_data_sync) in that it only registers the bucket numbers of the synced chunks and records chunks with the same footprint next to each other. For example, a record may contain intervals 0-10, 1000-1024, 1028-2048. This means the node has the first 10 chunks of the first entropy footprint, the last 24 chunks of the first entropy footprint and chunks 4-44 of the second entropy footprint. These chunks are from the first partition. The offset of the chunks from the second partition is shifted by the number of chunks in the replica 2.9 entropy generated per partition (which is slightly bigger than the number of chunks that can fit in the 3.6 TB partition). Note that Packing does not have to be replica_2_9. We maintain this record for any packing so that it is convenient to serve the data to any client. """. %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Add a chunk to the footprint record. -spec add(Offset :: non_neg_integer(), Packing :: term(), StoreID :: string()) -> ok. add(Offset, Packing, StoreID) -> FootprintOffset = get_offset(Offset), ar_sync_record:add(FootprintOffset, FootprintOffset - 1, Packing, ar_data_sync_footprints, StoreID). %% @doc Add a chunk to the footprint record asynchronously. -spec add_async(Tag :: term(), Offset :: non_neg_integer(), Packing :: term(), StoreID :: string()) -> ok. add_async(Tag, Offset, Packing, StoreID) -> FootprintOffset = get_offset(Offset), ar_sync_record:add_async(Tag, FootprintOffset, FootprintOffset - 1, Packing, ar_data_sync_footprints, StoreID). %% @doc Get the offset of a chunk in the footprint record. -spec get_offset(Offset :: non_neg_integer()) -> non_neg_integer(). get_offset(Offset) -> PaddedOffset = ar_block:get_chunk_padded_offset(Offset), FootprintSize = get_footprint_size(), FootprintsPerPartition = get_footprints_per_partition(), ChunksPerPartition = get_chunks_per_partition(), Partition = ar_replica_2_9:get_entropy_partition(PaddedOffset), PartitionOffset = (PaddedOffset - Partition * ?PARTITION_SIZE) div ?DATA_CHUNK_SIZE - 1, %% Which footprint within the partition Footprint = PartitionOffset rem FootprintsPerPartition, %% Position within the footprint FootprintOffset = PartitionOffset div FootprintsPerPartition, Partition * ChunksPerPartition + Footprint * FootprintSize + FootprintOffset + 1. %% @doc Return the largest end offset of the chunk that maps to the given footprint offset. get_padded_offset_from_footprint_offset(FootprintOffset) -> Start = FootprintOffset - 1, FootprintSize = get_footprint_size(), ChunksPerPartition = get_chunks_per_partition(), Partition = Start div ChunksPerPartition, FootprintsPerPartition = get_footprints_per_partition(), PartitionStart = Partition * ChunksPerPartition, Footprint = (Start - PartitionStart) div FootprintSize, InFootprintOffset = (Start - PartitionStart) rem FootprintSize, EndOffset = Partition * ?PARTITION_SIZE + (InFootprintOffset * FootprintsPerPartition + (Footprint + 1)) * ?DATA_CHUNK_SIZE, ar_block:get_chunk_padded_offset(EndOffset). %% @doc Get the chunk's footprint's number, >= 0, < the maximum number of footprints %% in a partition. -spec get_footprint(Offset :: non_neg_integer()) -> non_neg_integer(). get_footprint(Offset) -> EntropyIndex = ar_replica_2_9:get_entropy_index(Offset, 0), EntropyIndex div ?COMPOSITE_PACKING_SUB_CHUNK_COUNT. %% @doc Get the footprint bucket number of a chunk. -spec get_footprint_bucket(Offset :: non_neg_integer()) -> non_neg_integer(). get_footprint_bucket(Offset) -> get_offset(Offset) div ?NETWORK_FOOTPRINT_BUCKET_SIZE. %% @doc Get the synced footprint intervals of a chunk. -spec get_intervals( Partition :: non_neg_integer(), Footprint :: non_neg_integer(), StoreID :: string() ) -> term(). get_intervals(Partition, Footprint, StoreID) -> get_intervals(Partition, Footprint, any, StoreID). %% @doc Get the synced footprint intervals of a chunk. -spec get_intervals( Partition :: non_neg_integer(), Footprint :: non_neg_integer(), Packing :: term(), StoreID :: string() ) -> term(). get_intervals(Partition, Footprint, Packing, StoreID) -> FootprintSize = get_footprint_size(), ChunksPerPartition = get_chunks_per_partition(), PartitionStartOffset = Partition * ChunksPerPartition, FootprintStart = PartitionStartOffset + Footprint * FootprintSize, End = min(FootprintStart + FootprintSize, PartitionStartOffset + ChunksPerPartition), collect_intervals(FootprintStart, End, Packing, StoreID). %% @doc Get the unsynced footprint intervals of a chunk. -spec get_unsynced_intervals( Partition :: non_neg_integer(), Footprint :: non_neg_integer(), StoreID :: string() ) -> term(). get_unsynced_intervals(Partition, Footprint, StoreID) -> FootprintSize = get_footprint_size(), ChunksPerPartition = get_chunks_per_partition(), PartitionStartOffset = Partition * ChunksPerPartition, FootprintStart = PartitionStartOffset + Footprint * FootprintSize, End = min(FootprintStart + FootprintSize, PartitionStartOffset + ChunksPerPartition), collect_unsynced_intervals(FootprintStart, End, StoreID). %% @doc Delete a chunk from the footprint record. -spec delete(Offset :: non_neg_integer(), StoreID :: string()) -> ok. delete(Offset, StoreID) -> FootprintOffset = get_offset(Offset), ar_sync_record:delete(FootprintOffset, FootprintOffset - 1, ar_data_sync_footprints, StoreID). %% @doc Convert a list of footprint intervals to a list of intervals. -spec get_intervals_from_footprint_intervals(FootprintIntervals :: term()) -> term(). get_intervals_from_footprint_intervals(FootprintIntervals) -> get_intervals_from_footprint_intervals(ar_intervals:to_list(FootprintIntervals), ar_intervals:new()). %% @doc Get the number of footprints contained in a partition. -spec get_footprints_per_partition() -> non_neg_integer(). get_footprints_per_partition() -> ?REPLICA_2_9_ENTROPY_COUNT div ?COMPOSITE_PACKING_SUB_CHUNK_COUNT. %% @doc Return true if a chunk containing the given Offset (=< EndOffset, > StartOffset) %% is found in the footprint record. -spec is_recorded(Offset :: non_neg_integer(), StoreID :: string()) -> boolean(). is_recorded(Offset, StoreID) -> FootprintOffset = get_offset(Offset), ar_sync_record:is_recorded(FootprintOffset, ar_data_sync_footprints, StoreID). %%%=================================================================== %%% Private functions. %%%=================================================================== get_footprint_size() -> ?REPLICA_2_9_ENTROPY_SIZE div ?COMPOSITE_PACKING_SUB_CHUNK_SIZE. get_chunks_per_partition() -> FootprintSize = get_footprint_size(), ar_util:pad_to_closest_multiple_equal_or_above(?PARTITION_SIZE, ?DATA_CHUNK_SIZE * FootprintSize) div ?DATA_CHUNK_SIZE. collect_intervals(Start, End, Packing, StoreID) -> collect_intervals(Start, End, Packing, StoreID, ar_intervals:new()). collect_intervals(Start, End, _Packing, _StoreID, Intervals) when Start >= End -> Intervals; collect_intervals(Start, End, Packing, StoreID, Intervals) -> Query = case Packing of any -> ar_sync_record:get_next_synced_interval(Start, End, ar_data_sync_footprints, StoreID); Packing -> ar_sync_record:get_next_synced_interval(Start, End, Packing, ar_data_sync_footprints, StoreID) end, case Query of not_found -> Intervals; {End2, Start2} -> End3 = min(End2, End), Start3 = max(Start2, Start), collect_intervals(End3, End, Packing, StoreID, ar_intervals:add(Intervals, End3, Start3)) end. collect_unsynced_intervals(Start, End, StoreID) -> collect_unsynced_intervals(Start, End, StoreID, ar_intervals:new()). collect_unsynced_intervals(Start, End, _StoreID, Intervals) when Start >= End -> Intervals; collect_unsynced_intervals(Start, End, StoreID, Intervals) -> Query = ar_sync_record:get_next_unsynced_interval(Start, End, ar_data_sync_footprints, StoreID), case Query of not_found -> Intervals; {End2, Start2} -> End3 = min(End2, End), Start3 = max(Start2, Start), collect_unsynced_intervals(End3, End, StoreID, ar_intervals:add(Intervals, End3, Start3)) end. get_intervals_from_footprint_intervals([], Intervals) -> Intervals; get_intervals_from_footprint_intervals([{End, Start} | Rest], Intervals) -> Intervals2 = get_intervals_from_footprint_intervals(Start, End, Intervals), get_intervals_from_footprint_intervals(Rest, Intervals2). get_intervals_from_footprint_intervals(Start, End, Intervals) when Start >= End -> Intervals; get_intervals_from_footprint_intervals(Start, End, Intervals) -> Offset = get_padded_offset_from_footprint_offset(Start + 1), Intervals2 = ar_intervals:add(Intervals, Offset, Offset - ?DATA_CHUNK_SIZE), get_intervals_from_footprint_intervals(Start + 1, End, Intervals2). %%%=================================================================== %%% Tests. %%%=================================================================== -ifdef(AR_TEST). get_offset_test() -> %% The first chunk of the first footprint. ?assertEqual(1, get_offset(?DATA_CHUNK_SIZE)), %% The first chunk of the second footprint. ?assertEqual(5, get_offset(?DATA_CHUNK_SIZE * 2)), %% The second chunk of the first footprint. ?assertEqual(2, get_offset(?DATA_CHUNK_SIZE * 3)), %% The second chunk of the second footprint. ?assertEqual(6, get_offset(?DATA_CHUNK_SIZE * 4)), %% The third chunk of the first footprint. ?assertEqual(3, get_offset(?DATA_CHUNK_SIZE * 5)), %% The third chunk of the second footprint. ?assertEqual(7, get_offset(?DATA_CHUNK_SIZE * 6)), %% The fourth chunk of the first footprint. ?assertEqual(4, get_offset(?DATA_CHUNK_SIZE * 7)), %% The fourth chunk of the second footprint. ?assertEqual(8, get_offset(?DATA_CHUNK_SIZE * 8)), %% The first chunk of the first footprint of the second partition. ?assertEqual(9, get_offset(?DATA_CHUNK_SIZE * 9)), %% The first chunk of the second footprint of the second partition. ?assertEqual(13, get_offset(?DATA_CHUNK_SIZE * 10)), %% The second chunk of the first footprint of the second partition. ?assertEqual(10, get_offset(?DATA_CHUNK_SIZE * 11)), %% The second chunk of the second footprint of the second partition. ?assertEqual(14, get_offset(?DATA_CHUNK_SIZE * 12)), %% The third chunk of the first footprint of the second partition. ?assertEqual(11, get_offset(?DATA_CHUNK_SIZE * 13)), %% The third chunk of the second footprint of the second partition. ?assertEqual(15, get_offset(?DATA_CHUNK_SIZE * 14)), %% The fourth chunk of the first footprint of the second partition. ?assertEqual(12, get_offset(?DATA_CHUNK_SIZE * 15)), %% The fourth chunk of the second footprint of the second partition. ?assertEqual(16, get_offset(?DATA_CHUNK_SIZE * 16)), %% The first chunk of the first footprint of the third partition. ?assertEqual(17, get_offset(?DATA_CHUNK_SIZE * 17)), %% The first chunk of the second footprint of the third partition. ?assertEqual(21, get_offset(?DATA_CHUNK_SIZE * 18)), %% The second chunk of the first footprint of the third partition. ?assertEqual(18, get_offset(?DATA_CHUNK_SIZE * 19)). get_padded_offset_from_footprint_offset_test() -> ?assertEqual(262144, get_padded_offset_from_footprint_offset(1)), ?assertEqual(786432, get_padded_offset_from_footprint_offset(2)), ?assertEqual(1310720, get_padded_offset_from_footprint_offset(3)), ?assertEqual(1835008, get_padded_offset_from_footprint_offset(4)), ?assertEqual(524288, get_padded_offset_from_footprint_offset(5)), ?assertEqual(1048576, get_padded_offset_from_footprint_offset(6)), ?assertEqual(1572864, get_padded_offset_from_footprint_offset(7)), ?assertEqual(2097152, get_padded_offset_from_footprint_offset(8)), ?assertEqual(2359296, get_padded_offset_from_footprint_offset(9)), ?assertEqual(2883584, get_padded_offset_from_footprint_offset(10)), 79280870522880 = ar_block:get_chunk_padded_offset(79280870522880), ?assertEqual(317123481, ar_footprint_record:get_offset(79280870522880)), ?assertEqual(79280870522880, ar_footprint_record:get_padded_offset_from_footprint_offset(317123481)). get_offset_get_intervals_from_footprint_intervals_reversal_test() -> Offsets = [?DATA_CHUNK_SIZE, ?DATA_CHUNK_SIZE * 2, ?DATA_CHUNK_SIZE * 3, ?DATA_CHUNK_SIZE * 4, ?DATA_CHUNK_SIZE * 8, ?DATA_CHUNK_SIZE * 9], [get_offset_get_intervals_from_footprint_intervals_reversal(Offset) || Offset <- Offsets]. get_offset_get_intervals_from_footprint_intervals_reversal(ByteOffset) -> FootprintOffset = get_offset(ByteOffset), FootprintInterval = ar_intervals:from_list([{FootprintOffset, FootprintOffset - 1}]), ResultingByteIntervals = get_intervals_from_footprint_intervals(FootprintInterval), [{GotEnd, GotStart}] = ar_intervals:to_list(ResultingByteIntervals), ?assertEqual(ByteOffset, GotEnd), ?assertEqual(ByteOffset - ?DATA_CHUNK_SIZE, GotStart). get_unsynced_intervals_test_() -> ar_test_node:test_with_mocked_functions( [{ar_storage_module, get_by_id, fun(test_unsynced_store) -> test_unsynced_store end}], fun() -> %% Set up a test sync record server. TestStoreID = test_unsynced_store, TestProcessName = list_to_atom("ar_sync_record_" ++ atom_to_list(TestStoreID)), %% Initialize sync_records ETS table if it does not exist. case ets:info(sync_records) of undefined -> ets:new(sync_records, [named_table, public, {read_concurrency, true}]); _ -> %% Clear existing data from previous tests. ets:delete_all_objects(sync_records) end, %% Start the sync record process. case whereis(TestProcessName) of undefined -> {ok, _Pid} = ar_sync_record:start_link(TestProcessName, TestStoreID); _ -> ok end, Partition = 0, Footprint = 0, %% Get unsynced intervals before adding any data. UnsyncedBefore = get_unsynced_intervals(Partition, Footprint, TestStoreID), UnsyncedBeforeList = ar_intervals:to_list(UnsyncedBefore), ?assertEqual([{4, 0}], UnsyncedBeforeList), %% Add some data to the footprint. %% This should map to partition 0, footprint 0. PaddedOffset = ?DATA_CHUNK_SIZE, Packing = unpacked, ok = add(PaddedOffset, Packing, TestStoreID), UnsyncedAfter = get_unsynced_intervals(Partition, Footprint, TestStoreID), UnsyncedAfterList = ar_intervals:to_list(UnsyncedAfter), ?assertEqual([{4, 1}], UnsyncedAfterList) end). get_intervals_test_() -> ar_test_node:test_with_mocked_functions( [{ar_storage_module, get_by_id, fun(test_intervals_store) -> test_intervals_store end}], fun() -> %% Set up a test sync record server. TestStoreID = test_intervals_store, TestProcessName = list_to_atom("ar_sync_record_" ++ atom_to_list(TestStoreID)), %% Initialize sync_records ETS table if it does not exist. case ets:info(sync_records) of undefined -> ets:new(sync_records, [named_table, public, {read_concurrency, true}]); _ -> %% Clear existing data from previous tests. ets:delete_all_objects(sync_records) end, %% Start the sync record process. case whereis(TestProcessName) of undefined -> {ok, _Pid} = ar_sync_record:start_link(TestProcessName, TestStoreID); _ -> ok end, Packing = unpacked, ar_sync_record:add(32, 0, Packing, ar_data_sync_footprints, TestStoreID), Partition = 0, Footprint = 0, SyncedIntervals = get_intervals(Partition, Footprint, TestStoreID), SyncedIntervalsList = ar_intervals:to_list(SyncedIntervals), ?assertEqual([{4, 0}], SyncedIntervalsList), Partition2 = 0, Footprint2 = 1, SyncedIntervals2 = get_intervals(Partition2, Footprint2, TestStoreID), SyncedIntervalsList2 = ar_intervals:to_list(SyncedIntervals2), ?assertEqual([{8, 4}], SyncedIntervalsList2), Partition3 = 0, Footprint3 = 2, SyncedIntervals3 = get_intervals(Partition3, Footprint3, TestStoreID), SyncedIntervalsList3 = ar_intervals:to_list(SyncedIntervals3), ?assertEqual([], SyncedIntervalsList3), Partition4 = 1, Footprint4 = 0, SyncedIntervals4 = get_intervals(Partition4, Footprint4, TestStoreID), SyncedIntervalsList4 = ar_intervals:to_list(SyncedIntervals4), ?assertEqual([{12, 8}], SyncedIntervalsList4), Partition5 = 1, Footprint5 = 1, SyncedIntervals5 = get_intervals(Partition5, Footprint5, TestStoreID), SyncedIntervalsList5 = ar_intervals:to_list(SyncedIntervals5), ?assertEqual([{16, 12}], SyncedIntervalsList5), Partition6 = 1, Footprint6 = 2, SyncedIntervals6 = get_intervals(Partition6, Footprint6, TestStoreID), SyncedIntervalsList6 = ar_intervals:to_list(SyncedIntervals6), ?assertEqual([], SyncedIntervalsList6), Partition7 = 2, Footprint7 = 0, SyncedIntervals7 = get_intervals(Partition7, Footprint7, TestStoreID), SyncedIntervalsList7 = ar_intervals:to_list(SyncedIntervals7), ?assertEqual([{20, 16}], SyncedIntervalsList7) end). get_offset_get_padded_offset_from_footprint_offset_reversal_test() -> Offsets = [ ?DATA_CHUNK_SIZE, ?DATA_CHUNK_SIZE * 2, ?DATA_CHUNK_SIZE * 3, ?PARTITION_SIZE, ?DATA_CHUNK_SIZE * 8, ?DATA_CHUNK_SIZE * 9, ?PARTITION_SIZE * 2, ?PARTITION_SIZE * 3, ?PARTITION_SIZE * 4, ?PARTITION_SIZE * 5, ?PARTITION_SIZE * 6, ?PARTITION_SIZE * 7, ?PARTITION_SIZE * 8, ?PARTITION_SIZE * 9, ?PARTITION_SIZE * 10, ?PARTITION_SIZE * 11, ?PARTITION_SIZE * 12, ?PARTITION_SIZE * 13, ?PARTITION_SIZE * 6249, ?PARTITION_SIZE * 6250, ?PARTITION_SIZE * 6249 + ?DATA_CHUNK_SIZE, ?PARTITION_SIZE * 6250 + ?DATA_CHUNK_SIZE, ?PARTITION_SIZE * 6249 + ?DATA_CHUNK_SIZE * 2, ?PARTITION_SIZE * 6250 + ?DATA_CHUNK_SIZE * 2, ?PARTITION_SIZE * 6249 + ?DATA_CHUNK_SIZE * 8, ?PARTITION_SIZE * 6250 + ?DATA_CHUNK_SIZE * 8, ?PARTITION_SIZE * 6249 + ?DATA_CHUNK_SIZE * 9 ], [get_offset_get_padded_offset_from_footprint_offset_reversal(F) || F <- Offsets], ok. get_offset_get_padded_offset_from_footprint_offset_reversal(Offset) -> FootprintOffset = get_offset(Offset), PaddedEndOffset = get_padded_offset_from_footprint_offset(FootprintOffset), ?assertEqual(ar_block:get_chunk_padded_offset(Offset), PaddedEndOffset). get_intervals_from_footprint_intervals_test() -> TestCases = [ {[], [], "Empty"}, {[{1, 0}], [{?DATA_CHUNK_SIZE, 0}], "One chunk"}, {[{2, 0}], [ {?DATA_CHUNK_SIZE, 0}, {?DATA_CHUNK_SIZE * 3, ?DATA_CHUNK_SIZE * 2}], "Two chunks"}, {[{4, 0}], [ {?DATA_CHUNK_SIZE, 0}, {?DATA_CHUNK_SIZE * 3, ?DATA_CHUNK_SIZE * 2}, {?DATA_CHUNK_SIZE * 5, ?DATA_CHUNK_SIZE * 4}, {?DATA_CHUNK_SIZE * 7, ?DATA_CHUNK_SIZE * 6}], "Full footprint"}, {[{5, 0}], [ {?DATA_CHUNK_SIZE * 5, ?DATA_CHUNK_SIZE * 4}, {?DATA_CHUNK_SIZE * 3, 0}, {?DATA_CHUNK_SIZE * 7, ?DATA_CHUNK_SIZE * 6}], "Footprint wraparound"}, {[{6, 3}], [ {?DATA_CHUNK_SIZE * 7, ?DATA_CHUNK_SIZE * 6}, {?DATA_CHUNK_SIZE * 2, ?DATA_CHUNK_SIZE * 1}, {?DATA_CHUNK_SIZE * 4, ?DATA_CHUNK_SIZE * 3}], "Bits of two footprints"}, {[{1, 0}, {3, 2}], [ {?DATA_CHUNK_SIZE, 0}, {?DATA_CHUNK_SIZE * 5, ?DATA_CHUNK_SIZE * 4}], "Two chunks with a hole"}, {[{8, 0}], [ {?DATA_CHUNK_SIZE * 8, 0}], "Completely covered partition"}, {[{9, 0}], [ {?DATA_CHUNK_SIZE * 9, 0}], "Completely covered partition plus one chunk"}, {[{9, 0}, {13, 12}], [ {?DATA_CHUNK_SIZE * 10, 0}], "Completely covered partition plus two chunks"}, {[{9, 0}, {13, 12}, {15, 14}], [ {?DATA_CHUNK_SIZE * 10, 0}, {?DATA_CHUNK_SIZE * 14, ?DATA_CHUNK_SIZE * 13}], "Completely covered partition plus three chunks"} ], test_get_intervals_from_footprint_intervals(TestCases). test_get_intervals_from_footprint_intervals([]) -> ok; test_get_intervals_from_footprint_intervals([{Input, Expected, Title} | Rest]) -> ?assertEqual(ar_intervals:from_list(Expected), get_intervals_from_footprint_intervals(ar_intervals:from_list(Input)), Title), test_get_intervals_from_footprint_intervals(Rest). -endif. ================================================ FILE: apps/arweave/src/ar_fork.erl ================================================ %%% %%% @doc The module defines Arweave hard forks' heights. %%% -module(ar_fork). -export([height_1_6/0, height_1_7/0, height_1_8/0, height_1_9/0, height_2_0/0, height_2_2/0, height_2_3/0, height_2_4/0, height_2_5/0, height_2_6/0, height_2_6_8/0, height_2_7/0, height_2_7_1/0, height_2_7_2/0, height_2_8/0, height_2_9/0]). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave/include/ar_consensus.hrl"). -ifdef(FORKS_RESET). height_1_6() -> 0. -else. height_1_6() -> 95000. -endif. -ifdef(FORKS_RESET). height_1_7() -> 0. -else. height_1_7() -> 235200. % Targeting 2019-07-08 UTC -endif. -ifdef(FORKS_RESET). height_1_8() -> 0. -else. height_1_8() -> 269510. % Targeting 2019-08-29 UTC -endif. -ifdef(FORKS_RESET). height_1_9() -> 0. -else. height_1_9() -> 315700. % Targeting 2019-11-04 UTC -endif. -ifdef(FORKS_RESET). height_2_0() -> 0. -else. height_2_0() -> 422250. % Targeting 2020-04-09 10:00 UTC -endif. -ifdef(FORKS_RESET). height_2_2() -> 0. -else. height_2_2() -> 552180. % Targeting 2020-10-21 13:00 UTC -endif. -ifdef(FORKS_RESET). height_2_3() -> 0. -else. height_2_3() -> 591140. % Targeting 2020-12-21 11:00 UTC -endif. -ifdef(FORKS_RESET). height_2_4() -> 0. -else. height_2_4() -> 633720. % Targeting 2021-02-24 11:50 UTC -endif. -ifdef(FORKS_RESET). height_2_5() -> 0. -else. height_2_5() -> 812970. -endif. -ifdef(FORK_2_6_HEIGHT). height_2_6() -> ?FORK_2_6_HEIGHT. -else. -ifdef(FORKS_RESET). height_2_6() -> 0. -else. height_2_6() -> 1132210. % Targeting 2023-03-06 14:00 UTC -endif. -endif. -ifdef(FORK_2_6_8_HEIGHT). height_2_6_8() -> ?FORK_2_6_8_HEIGHT. -else. -ifdef(FORKS_RESET). height_2_6_8() -> 0. -else. height_2_6_8() -> 1189560. % Targeting 2023-05-30 16:00 UTC -endif. -endif. -ifdef(FORK_2_7_HEIGHT). height_2_7() -> ?FORK_2_7_HEIGHT. -else. -ifdef(FORKS_RESET). height_2_7() -> 0. -else. height_2_7() -> 1275480. % Targeting 2023-10-04 14:00 UTC -endif. -endif. -ifdef(FORK_2_7_1_HEIGHT). height_2_7_1() -> ?FORK_2_7_1_HEIGHT. -else. -ifdef(FORKS_RESET). height_2_7_1() -> 0. -else. height_2_7_1() -> 1316410. % Targeting 2023-12-05 14:00 UTC -endif. -endif. -ifdef(FORK_2_7_2_HEIGHT). height_2_7_2() -> ?FORK_2_7_2_HEIGHT. -else. -ifdef(FORKS_RESET). height_2_7_2() -> 0. -else. height_2_7_2() -> 1391330. % Targeting 2024-03-26 14:00 UTC -endif. -endif. -ifdef(FORK_2_8_HEIGHT). height_2_8() -> ?FORK_2_8_HEIGHT. -else. -ifdef(FORKS_RESET). height_2_8() -> 0. -else. height_2_8() -> 1547120. % Targeting 2024-11-13 14:00 UTC -endif. -endif. -ifdef(FORK_2_9_HEIGHT). height_2_9() -> ?FORK_2_9_HEIGHT. -else. -ifdef(FORKS_RESET). height_2_9() -> 0. -else. height_2_9() -> 1602350. % Targeting 2025-02-03 14:00 UTC -endif. -endif. ================================================ FILE: apps/arweave/src/ar_fraction.erl ================================================ %%% @doc The module with utilities for performing computations on fractions. -module(ar_fraction). -export([pow/2, natural_exponent/2, factorial/1, minimum/2, maximum/2, multiply/2, reduce/2, add/2]). %%%=================================================================== %%% Types. %%%=================================================================== -type fraction() :: {integer(), integer()}. %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Compute the given power of the given integer. -spec pow(X::integer(), P::integer()) -> integer(). pow(_X, 0) -> 1; pow(X, 1) -> X; pow(X, 2) -> X * X; pow(X, 3) -> X * X * X; pow(X, N) -> case N rem 2 of 0 -> pow(X * X, N div 2); 1 -> X * pow(X * X, N div 2) end. %% @doc Compute the X's power of e by summing up the terms of the Taylor series where %% the last term is a multiple of X to the power of P. -spec natural_exponent(X::fraction(), P::integer()) -> fraction(). natural_exponent({0, _Divisor}, _P) -> {1, 1}; natural_exponent(X, P) -> {natural_exponent_dividend(X, P, 0, 1), natural_exponent_divisor(X, P)}. %% @doc Return the smaller of D1 and D2. -spec minimum(D1::fraction(), D2::fraction()) -> fraction(). minimum({Dividend1, Divisor1} = D1, {Dividend2, Divisor2} = D2) -> case Dividend1 * Divisor2 < Dividend2 * Divisor1 of true -> D1; false -> D2 end. %% @doc Return the bigger of D1 and D2. -spec maximum(D1::fraction(), D2::fraction()) -> fraction(). maximum(D1, D2) -> case minimum(D1, D2) of D1 -> D2; D2 -> D1 end. %% @doc Return the product of D1 and D2. -spec multiply(D1::fraction(), D2::fraction()) -> fraction(). multiply({Dividend1, Divisor1}, {Dividend2, Divisor2}) -> {Dividend1 * Dividend2, Divisor1 * Divisor2}. %% @doc Reduce the fraction until both the divisor and dividend are smaller than %% or equal to Max. Return at most Max or at least 1 / Max. -spec reduce(D::fraction(), Max::integer()) -> fraction(). reduce({0, Divisor}, _Max) -> {0, Divisor}; reduce({Dividend, Divisor}, Max) -> GCD = gcd(Dividend, Divisor), reduce2({Dividend div GCD, Divisor div GCD}, Max). %% @doc Return the sum of two fractions. -spec add(A::fraction(), B::integer()) -> fraction(). add({Dividend1, Divisor1}, {Dividend2, Divisor2}) -> {Dividend1 * Divisor2 + Dividend2 * Divisor1, Divisor1 * Divisor2}. %%%=================================================================== %%% Private functions. %%%=================================================================== natural_exponent_dividend(_X, -1, _K, _M) -> 0; natural_exponent_dividend({Dividend, Divisor} = X, P, K, M) -> pow(Dividend, P) * pow(Divisor, K) * M + natural_exponent_dividend(X, P - 1, K + 1, M * P). natural_exponent_divisor({_Dividend, Divisor}, P) -> pow(Divisor, P) * factorial(P). factorial(0) -> 1; factorial(1) -> 1; factorial(9) -> 362880; factorial(10) -> 3628800; factorial(20) -> 2432902008176640000; factorial(N) -> N * factorial(N - 1). reduce2({Dividend, Divisor}, Max) when Dividend > Max -> case Divisor div 2 of 0 -> {Max, 1}; _ -> reduce({Dividend div 2, Divisor div 2}, Max) end; reduce2({Dividend, Divisor}, Max) when Divisor > Max -> case Dividend div 2 of 0 -> {1, Max}; _ -> reduce({Dividend div 2, Divisor div 2}, Max) end; reduce2(R, _Max) -> R. gcd(A, B) when B > A -> gcd(B, A); gcd(A, B) when A rem B > 0 -> gcd(B, A rem B); gcd(A, B) when A rem B =:= 0 -> B. ================================================ FILE: apps/arweave/src/ar_global_sync_record.erl ================================================ -module(ar_global_sync_record). -behaviour(gen_server). -include("ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include("ar_data_discovery.hrl"). -include("ar_sync_buckets.hrl"). -export([start_link/0, get_serialized_sync_record/1, get_serialized_sync_buckets/0, get_serialized_footprint_buckets/0]). -export([init/1, handle_cast/2, handle_call/3, handle_info/2, terminate/2]). %% The frequency in seconds of updating serialized sync buckets. -ifdef(AR_TEST). -define(UPDATE_SERIALIZED_SYNC_BUCKETS_FREQUENCY_S, 2). -else. -define(UPDATE_SERIALIZED_SYNC_BUCKETS_FREQUENCY_S, 300). -endif. -record(state, { sync_record, sync_buckets, footprint_record, footprint_buckets }). %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Start the server. start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). %% @doc Return a set of data intervals from all configured storage modules. %% %% Args is a map with the following keys %% %% format required etf or json serialize in Erlang Term Format or JSON %% random_subset optional any() pick a random subset if the key is present %% start optional integer() pick intervals with right bound >= start %% right_bound optional integer() pick intervals with right bound <= right_bound %% limit optional integer() the number of intervals to pick %% %% ?MAX_SHARED_SYNCED_INTERVALS_COUNT is both the default and the maximum value for limit. %% If random_subset key is present, a random subset of intervals is picked, the start key is %% ignored. If random_subset key is not present, the start key must be provided. get_serialized_sync_record(Args) -> case catch gen_server:call(?MODULE, {get_serialized_sync_record, Args}, 10000) of {'EXIT', {timeout, {gen_server, call, _}}} -> {error, timeout}; Reply -> Reply end. %% @doc Return an ETF-serialized compact but imprecise representation of the synced data - %% a bucket size and a map where every key is the sequence number of the bucket, every value - %% the percentage of data synced in the reported bucket. get_serialized_sync_buckets() -> case ets:lookup(?MODULE, serialized_sync_buckets) of [] -> {error, not_initialized}; [{_, SerializedSyncBuckets}] -> {ok, SerializedSyncBuckets} end. %% @doc Return an ETF-serialized compact but imprecise representation of the synced footprints - %% a bucket size and a map where every key is the sequence number of the bucket, every value - %% the percentage of data synced in the reported bucket. Every bucket contains one or more %% footprints. Note that while footprints in the buckets are adjacent in the sense that %% the first chunk in the first footprint is adjacent to the first chunk in the second footprint, %% chunks are generally not adjacent because of how the logic of 2.9 footprints goes. get_serialized_footprint_buckets() -> case ets:lookup(?MODULE, serialized_footprint_buckets) of [] -> {error, not_initialized}; [{_, SerializedFootprintBuckets}] -> {ok, SerializedFootprintBuckets} end. %%%=================================================================== %%% Generic server callbacks. %%%=================================================================== init([]) -> ok = ar_events:subscribe(sync_record), SyncRecord = get_sync_record(), SyncBuckets = cache_and_get_sync_buckets(SyncRecord, serialized_sync_buckets, ar_sync_buckets:new()), FootprintRecord = get_footprint_record(), FootprintBuckets = cache_and_get_sync_buckets(FootprintRecord, serialized_footprint_buckets, ar_sync_buckets:new(?NETWORK_FOOTPRINT_BUCKET_SIZE)), {ok, #state{ sync_record = SyncRecord, sync_buckets = SyncBuckets, footprint_record = FootprintRecord, footprint_buckets = FootprintBuckets }}. handle_call({get_serialized_sync_record, Args}, _From, State) -> #state{ sync_record = SyncRecord } = State, Limit = min(maps:get(limit, Args, ?MAX_SHARED_SYNCED_INTERVALS_COUNT), ?MAX_SHARED_SYNCED_INTERVALS_COUNT), {reply, {ok, ar_intervals:serialize(Args#{ limit => Limit }, SyncRecord)}, State}; handle_call(Request, _From, State) -> ?LOG_WARNING([{event, unhandled_call}, {module, ?MODULE}, {request, Request}]), {reply, ok, State}. handle_cast({update_serialized_sync_buckets, serialized_sync_buckets = Key}, State) -> #state{ sync_buckets = SyncBuckets } = State, {SyncBuckets2, SerializedSyncBuckets} = ar_sync_buckets:serialize(SyncBuckets, ?MAX_SYNC_BUCKETS_SIZE), ets:insert(?MODULE, {Key, SerializedSyncBuckets}), ar_util:cast_after(?UPDATE_SERIALIZED_SYNC_BUCKETS_FREQUENCY_S * 1000, ?MODULE, {update_serialized_sync_buckets, Key}), {noreply, State#state{ sync_buckets = SyncBuckets2 }}; handle_cast({update_serialized_sync_buckets, serialized_footprint_buckets = Key}, State) -> #state{ footprint_buckets = FootprintBuckets } = State, {FootprintBuckets2, SerializedFootprintBuckets} = ar_sync_buckets:serialize( FootprintBuckets, ?MAX_SYNC_BUCKETS_SIZE), ets:insert(?MODULE, {Key, SerializedFootprintBuckets}), ar_util:cast_after(?UPDATE_SERIALIZED_SYNC_BUCKETS_FREQUENCY_S * 1000, ?MODULE, {update_serialized_sync_buckets, Key}), {noreply, State#state{ footprint_buckets = FootprintBuckets2 }}; handle_cast(Cast, State) -> ?LOG_WARNING([{event, unhandled_cast}, {module, ?MODULE}, {cast, Cast}]), {noreply, State}. handle_info({event, sync_record, {add_range, Start, End, ar_data_sync, #{ packing := Packing }}}, State) -> #state{ sync_record = SyncRecord, sync_buckets = SyncBuckets } = State, case Packing of {replica_2_9, _} -> %% Replica 2.9 data is recorded in the footprint record. It is synced %% footprint by footprint (not left to right). {noreply, State}; _ -> SyncRecord2 = ar_intervals:add(SyncRecord, End, Start), SyncBuckets2 = ar_sync_buckets:add(End, Start, SyncBuckets), {noreply, State#state{ sync_record = SyncRecord2, sync_buckets = SyncBuckets2 }} end; handle_info({event, sync_record, {add_range, Start, End, ar_data_sync_footprints, _Options}}, State) -> State2 = update_footprint_data(Start, End, State), {noreply, State2}; handle_info({event, sync_record, {global_cut, Offset}}, State) -> #state{ sync_record = SyncRecord, sync_buckets = SyncBuckets } = State, SyncRecord2 = ar_intervals:cut(SyncRecord, Offset), SyncBuckets2 = ar_sync_buckets:cut(Offset, SyncBuckets), {noreply, State#state{ sync_record = SyncRecord2, sync_buckets = SyncBuckets2 }}; handle_info({event, sync_record, {global_remove_range, Start, End}}, State) -> #state{ sync_record = SyncRecord, sync_buckets = SyncBuckets } = State, SyncRecord2 = ar_intervals:delete(SyncRecord, End, Start), SyncBuckets2 = ar_sync_buckets:delete(End, Start, SyncBuckets), State2 = remove_footprint_data(Start, End, State), {noreply, State2#state{ sync_record = SyncRecord2, sync_buckets = SyncBuckets2 }}; handle_info({event, sync_record, _}, State) -> {noreply, State}; handle_info(Message, State) -> ?LOG_WARNING([{event, unhandled_info}, {module, ?MODULE}, {message, Message}]), {noreply, State}. terminate(Reason, _State) -> ?LOG_INFO([{event, terminate}, {module, ?MODULE}, {reason, io_lib:format("~p", [Reason])}]). %%%=================================================================== %%% Private functions. %%%=================================================================== get_sync_record() -> {ok, Config} = arweave_config:get_env(), lists:foldl( fun(Module, Acc) -> case Module of {_, _, {replica_2_9, _}} -> %% Replica 2.9 data is recorded in the footprint record. It is synced %% footprint by footprint (not left to right). Acc; _ -> StoreID = ar_storage_module:id(Module), ar_intervals:union(ar_sync_record:get(ar_data_sync, StoreID), Acc) end end, ar_intervals:new(), [?DEFAULT_MODULE | Config#config.storage_modules] ). get_footprint_record() -> {ok, Config} = arweave_config:get_env(), lists:foldl( fun(Module, Acc) -> StoreID = ar_storage_module:id(Module), ar_intervals:union(ar_sync_record:get(ar_data_sync_footprints, StoreID), Acc) end, ar_intervals:new(), Config#config.storage_modules ). cache_and_get_sync_buckets(SyncRecord, Key, SyncBuckets) -> SyncBuckets2 = ar_sync_buckets:from_intervals(SyncRecord, SyncBuckets), {SyncBuckets3, SerializedSyncBuckets} = ar_sync_buckets:serialize(SyncBuckets2, ?MAX_SYNC_BUCKETS_SIZE), ets:insert(?MODULE, {Key, SerializedSyncBuckets}), ar_util:cast_after(?UPDATE_SERIALIZED_SYNC_BUCKETS_FREQUENCY_S * 1000, ?MODULE, {update_serialized_sync_buckets, Key}), SyncBuckets3. update_footprint_data(Start, End, State) when Start >= End -> State; update_footprint_data(Start, End, State) -> #state{ footprint_record = FootprintRecord, footprint_buckets = FootprintBuckets } = State, FootprintRecord2 = ar_intervals:add(FootprintRecord, Start + 1, Start), FootprintBuckets2 = ar_sync_buckets:add(Start + 1, Start, FootprintBuckets), State2 = State#state{ footprint_record = FootprintRecord2, footprint_buckets = FootprintBuckets2 }, update_footprint_data(Start + 1, End, State2). remove_footprint_data(Start, End, State) when Start >= End -> State; remove_footprint_data(Start, End, State) -> #state{ footprint_record = FootprintRecord, footprint_buckets = FootprintBuckets } = State, Offset = ar_footprint_record:get_offset(Start + ?DATA_CHUNK_SIZE), FootprintRecord2 = ar_intervals:delete(FootprintRecord, Offset, Offset - 1), FootprintBuckets2 = ar_sync_buckets:delete(Offset, Offset - 1, FootprintBuckets), State2 = State#state{ footprint_record = FootprintRecord2, footprint_buckets = FootprintBuckets2 }, remove_footprint_data(Start + ?DATA_CHUNK_SIZE, End, State2). ================================================ FILE: apps/arweave/src/ar_header_sync.erl ================================================ -module(ar_header_sync). -behaviour(gen_server). -export([start_link/0, join/3, add_tip_block/2, add_block/1, request_tx_removal/1, remove_block/1]). -export([init/1, handle_cast/2, handle_call/3, handle_info/2, terminate/2]). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("arweave/include/ar_header_sync.hrl"). -include_lib("arweave/include/ar_data_sync.hrl"). -include_lib("arweave/include/ar_chunk_storage.hrl"). %%% This module syncs block and transaction headers and maintains a persisted record of synced %%% headers. Headers are synced from latest to earliest. -record(state, { block_index, height, sync_record, retry_queue, retry_record, is_disk_space_sufficient }). %%%=================================================================== %%% Public interface. %%%=================================================================== start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). %% @doc Update the tip after the node joins the network. join(Height, RecentBI, Blocks) -> gen_server:cast(?MODULE, {join, Height, RecentBI, Blocks}). %% @doc Add a new tip block to the index and storage, record the new recent block index. add_tip_block(B, RecentBI) -> gen_server:cast(?MODULE, {add_tip_block, B, RecentBI}). %% @doc Add a block to the index and storage. add_block(B) -> gen_server:cast(?MODULE, {add_block, B}). %% @doc Remove the given transaction. request_tx_removal(TXID) -> gen_server:cast(?MODULE, {remove_tx, TXID}). %% @doc Remove the block header with the given Height from the record. The process %% will therefore re-sync it later (if there is available disk space). remove_block(Height) -> gen_server:cast(?MODULE, {remove_block, Height}). %%%=================================================================== %%% Generic server callbacks. %%%=================================================================== init([]) -> ?LOG_INFO([{event, ar_header_sync_start}]), %% Trap exit to avoid corrupting any open files on quit.. process_flag(trap_exit, true), [ok, ok] = ar_events:subscribe([tx, disksup]), {ok, Config} = arweave_config:get_env(), ok = ar_kv:open(#{ path => filename:join([Config#config.data_dir, ?ROCKS_DB_DIR, "ar_header_sync_db"]), name => ?MODULE}), {SyncRecord, Height, CurrentBI} = case ar_storage:read_term(header_sync_state) of not_found -> {ar_intervals:new(), -1, []}; {ok, StoredState} -> StoredState end, lists:foreach( fun(_) -> gen_server:cast(self(), process_item) end, lists:seq(1, Config#config.header_sync_jobs) ), gen_server:cast(self(), store_sync_state), ets:insert(?MODULE, {synced_blocks, ar_intervals:sum(SyncRecord)}), {ok, #state{ sync_record = SyncRecord, height = Height, block_index = CurrentBI, retry_queue = queue:new(), retry_record = ar_intervals:new(), is_disk_space_sufficient = true }}. handle_cast({join, Height, RecentBI, Blocks}, State) -> #state{ height = PrevHeight, block_index = CurrentBI, sync_record = SyncRecord, retry_record = RetryRecord } = State, State2 = State#state{ height = Height, block_index = RecentBI }, StartHeight = PrevHeight - length(CurrentBI) + 1, {Status, State4} = case {CurrentBI, ar_block_index:get_intersection(StartHeight, CurrentBI)} of {[], _} -> {ok, State2}; {_, no_intersection} -> io:format("~nWARNING: the stored block index of the header syncing module " "has no intersection with the " "new one in the most recent blocks. If you have just started a new " "weave using the init option, restart from the local state " "or specify some peers.~n~n"), init:stop(1), {error, State2}; {_, {IntersectionHeight, _}} -> State3 = State2#state{ sync_record = ar_intervals:cut(SyncRecord, IntersectionHeight), retry_record = ar_intervals:cut(RetryRecord, IntersectionHeight) }, ok = store_sync_state(State3), %% Delete from the kv store only after the sync record is saved - no matter %% what happens to the process, if a height is in the record, it must be %% present in the kv store. ok = ar_kv:delete_range(?MODULE, << (IntersectionHeight + 1):256 >>, << (PrevHeight + 1):256 >>), {ok, State3} end, case Status of error -> {noreply, State4}; ok -> State5 = lists:foldl( fun(B, Acc) -> element(2, add_block(B, Acc)) end, State4, Blocks ), ok = store_sync_state(State5), {noreply, State5} end; handle_cast({add_tip_block, #block{ height = Height } = B, RecentBI}, State) -> #state{ sync_record = SyncRecord, retry_record = RetryRecord, block_index = CurrentBI, height = PrevHeight } = State, BaseHeight = get_base_height(CurrentBI, PrevHeight, RecentBI), State2 = State#state{ sync_record = ar_intervals:cut(SyncRecord, BaseHeight), retry_record = ar_intervals:cut(RetryRecord, BaseHeight), block_index = RecentBI, height = Height }, State3 = element(2, add_block(B, State2)), case store_sync_state(State3) of ok -> %% Delete from the kv store only after the sync record is saved - no matter %% what happens to the process, if a height is in the record, it must be present %% in the kv store. ok = ar_kv:delete_range(?MODULE, << (BaseHeight + 1):256 >>, << (PrevHeight + 1):256 >>), {noreply, State3}; Error -> ?LOG_WARNING([{event, failed_to_store_state}, {reason, io_lib:format("~p", [Error])}]), {noreply, State} end; handle_cast({add_historical_block, _, _, _, _, _}, #state{ is_disk_space_sufficient = false } = State) -> gen_server:cast(self(), process_item), {noreply, State}; handle_cast({add_historical_block, B, H, H2, TXRoot, Backoff}, State) -> case add_block(B, State) of {ok, State2} -> gen_server:cast(self(), process_item), {noreply, State2}; {_Error, State2} -> gen_server:cast(self(), {failed_to_get_block, H, H2, TXRoot, B#block.height, Backoff}), {noreply, State2} end; handle_cast({add_block, B}, State) -> {noreply, element(2, add_block(B, State))}; handle_cast(process_item, #state{ is_disk_space_sufficient = false } = State) -> ar_util:cast_after(?CHECK_AFTER_SYNCED_INTERVAL_MS, self(), process_item), {noreply, State}; handle_cast(process_item, #state{ retry_queue = Queue, retry_record = RetryRecord } = State) -> prometheus_gauge:set(downloader_queue_size, queue:len(Queue)), Queue2 = process_item(Queue), State2 = State#state{ retry_queue = Queue2 }, case pick_unsynced_block(State) of nothing_to_sync -> {noreply, State2}; Height -> case ar_node:get_block_index_entry(Height) of not_joined -> {noreply, State2}; not_found -> {noreply, State2}; {H, _WeaveSize, TXRoot} -> %% Before 2.0, to compute a block hash, the complete wallet list %% and all the preceding hashes were required. Getting a wallet list %% and a hash list for every historical block to verify it belongs to %% the weave is very costly. Therefore, a list of 2.0 hashes for 1.0 %% blocks was computed and stored along with the network client. H2 = case Height < ar_fork:height_2_0() of true -> ar_node:get_2_0_hash_of_1_0_block(Height); false -> not_set end, {noreply, State2#state{ retry_queue = enqueue({block, {H, H2, TXRoot, Height}}, Queue2), retry_record = ar_intervals:add(RetryRecord, Height, Height - 1) }} end end; handle_cast({failed_to_get_block, H, H2, TXRoot, Height, Backoff}, #state{ retry_queue = Queue } = State) -> Backoff2 = update_backoff(Backoff), Queue2 = enqueue({block, {H, H2, TXRoot, Height}}, Backoff2, Queue), gen_server:cast(self(), process_item), {noreply, State#state{ retry_queue = Queue2 }}; handle_cast({remove_tx, TXID}, State) -> {ok, _Size} = ar_storage:delete_blacklisted_tx(TXID), ar_tx_blacklist:notify_about_removed_tx(TXID), {noreply, State}; handle_cast({remove_block, Height}, State) -> #state{ sync_record = Record } = State, ok = ar_kv:delete(?MODULE, << Height:256 >>), {noreply, State#state{ sync_record = ar_intervals:delete(Record, Height, Height - 1) }}; handle_cast(store_sync_state, State) -> ar_util:cast_after(?STORE_HEADER_STATE_FREQUENCY_MS, self(), store_sync_state), case store_sync_state(State) of ok -> {noreply, State}; Error -> ?LOG_WARNING([{event, failed_to_store_state}, {reason, io_lib:format("~p", [Error])}]), {noreply, State} end; handle_cast(Msg, State) -> ?LOG_ERROR([{event, unhandled_cast}, {module, ?MODULE}, {message, Msg}]), {noreply, State}. handle_call(_Msg, _From, State) -> {reply, not_implemented, State}. handle_info({event, tx, {preparing_unblacklisting, TXID}}, State) -> #state{ sync_record = SyncRecord, retry_record = RetryRecord } = State, case ar_storage:get_tx_confirmation_data(TXID) of {ok, {Height, _BH}} -> ?LOG_DEBUG([{event, mark_block_with_blacklisted_tx_for_resyncing}, {tx, ar_util:encode(TXID)}, {height, Height}]), State2 = State#state{ sync_record = ar_intervals:delete(SyncRecord, Height, Height - 1), retry_record = ar_intervals:delete(RetryRecord, Height, Height - 1) }, ok = store_sync_state(State2), ok = ar_kv:delete(?MODULE, << Height:256 >>), ar_events:send(tx, {ready_for_unblacklisting, TXID}), {noreply, State2}; not_found -> ar_events:send(tx, {ready_for_unblacklisting, TXID}), {noreply, State}; {error, Reason} -> ?LOG_WARNING([{event, failed_to_read_tx_confirmation_index}, {error, io_lib:format("~p", [Reason])}]), {noreply, State} end; handle_info({event, tx, _}, State) -> {noreply, State}; handle_info({event, disksup, {remaining_disk_space, ?DEFAULT_MODULE, true, _Percentage, Bytes}}, State) -> {ok, Config} = arweave_config:get_env(), DiskPoolSize = Config#config.max_disk_pool_buffer_mb * ?MiB, DiskCacheSize = Config#config.disk_cache_size * 1048576, BufferSize = 10_000_000_000, case Bytes < DiskPoolSize + DiskCacheSize + BufferSize div 2 of true -> case State#state.is_disk_space_sufficient of true -> Msg = "~nThe node has stopped syncing headers. Add more disk space " "if you wish to store more block and transaction headers. " "The node will keep recording account tree updates and " "transaction confirmations - they do not take up a lot of " "space but you need to make sure the remaining disk space " "stays available for the node.~n~n" "The mining performance is not affected.~n", ar:console(Msg, []), ?LOG_INFO([{event, ar_header_sync_stopped_syncing}, {reason, insufficient_disk_space}]); false -> ok end, {noreply, State#state{ is_disk_space_sufficient = false }}; false -> case Bytes > DiskPoolSize + DiskCacheSize + BufferSize of true -> case State#state.is_disk_space_sufficient of true -> ok; false -> Msg = "The available disk space has been detected, " "resuming header syncing.~n", ar:console(Msg, []), ?LOG_INFO([{event, ar_header_sync_resumed_syncing}]) end, {noreply, State#state{ is_disk_space_sufficient = true }}; false -> {noreply, State} end end; handle_info({event, disksup, _}, State) -> {noreply, State}; handle_info({'DOWN', _, process, _, normal}, State) -> {noreply, State}; handle_info({'DOWN', _, process, _, noproc}, State) -> {noreply, State}; handle_info({'DOWN', _, process, _, Reason}, State) -> ?LOG_WARNING([{event, header_sync_job_failed}, {reason, io_lib:format("~p", [Reason])}, {action, spawning_another_one}]), gen_server:cast(self(), process_item), {noreply, State}; handle_info({_Ref, _Atom}, State) -> %% Some older versions of Erlang OTP have a bug where gen_tcp:close may leak %% a message. https://github.com/ninenines/gun/issues/193, %% https://bugs.erlang.org/browse/ERL-1049. {noreply, State}; handle_info(Info, State) -> ?LOG_ERROR([{event, unhandled_info}, {message, Info}]), {noreply, State}. terminate(Reason, _State) -> ?LOG_INFO([{event, ar_header_sync_terminate}, {reason, Reason}]). %%%=================================================================== %%% Private functions. %%%=================================================================== store_sync_state(State) -> #state{ sync_record = SyncRecord, height = LastHeight, block_index = BI } = State, SyncedCount = ar_intervals:sum(SyncRecord), prometheus_gauge:set(synced_blocks, SyncedCount), ets:insert(?MODULE, {synced_blocks, SyncedCount}), ar_storage:write_term(header_sync_state, {SyncRecord, LastHeight, BI}). get_base_height([{H, _, _} | CurrentBI], CurrentHeight, RecentBI) -> case lists:search(fun({BH, _, _}) -> BH == H end, RecentBI) of false -> get_base_height(CurrentBI, CurrentHeight - 1, RecentBI); _ -> CurrentHeight end. add_block(B, State) -> case check_fork(B#block.height, B#block.indep_hash, B#block.tx_root) of false -> {ok, State}; true -> case B#block.height == 0 andalso ?NETWORK_NAME == "arweave.N.1" of true -> ok; false -> ar_data_sync:add_block(B, B#block.size_tagged_txs) end, add_block2(B, State) end. add_block2(B, #state{ is_disk_space_sufficient = false } = State) -> case ar_storage:update_confirmation_index(B) of ok -> {ok, State}; Error -> ?LOG_ERROR([{event, failed_to_record_block_confirmations}, {reason, io_lib:format("~p", [Error])}]), {Error, State} end; add_block2(B, #state{ sync_record = SyncRecord, retry_record = RetryRecord } = State) -> #block{ indep_hash = H, previous_block = PrevH, height = Height } = B, case ar_storage:write_full_block(B, B#block.txs) of ok -> case ar_intervals:is_inside(SyncRecord, Height) of true -> {ok, State}; false -> ok = ar_kv:put(?MODULE, << Height:256 >>, term_to_binary({H, PrevH})), SyncRecord2 = ar_intervals:add(SyncRecord, Height, Height - 1), RetryRecord2 = ar_intervals:delete(RetryRecord, Height, Height - 1), {ok, State#state{ sync_record = SyncRecord2, retry_record = RetryRecord2 }} end; {error, Reason} -> ?LOG_WARNING([{event, failed_to_store_block}, {block, ar_util:encode(H)}, {height, Height}, {reason, Reason}]), {{error, Reason}, State} end. %% @doc Return the latest height we have not synced or put in the retry queue yet. %% Return 'nothing_to_sync' if everything is either synced or in the retry queue. pick_unsynced_block(#state{ height = Height, sync_record = SyncRecord, retry_record = RetryRecord }) -> Union = ar_intervals:union(SyncRecord, RetryRecord), case ar_intervals:is_empty(Union) of true -> Height; false -> case ar_intervals:take_largest(Union) of {{End, _Start}, _Union2} when Height > End -> Height; {{_End, -1}, _Union2} -> nothing_to_sync; {{_End, Start}, _Union2} -> Start end end. enqueue(Item, Queue) -> queue:in({Item, initial_backoff()}, Queue). initial_backoff() -> {os:system_time(seconds), ?INITIAL_BACKOFF_INTERVAL_S}. process_item(Queue) -> Now = os:system_time(second), case queue:out(Queue) of {empty, _Queue} -> ar_util:cast_after(?PROCESS_ITEM_INTERVAL_MS, self(), process_item), Queue; {{value, {Item, {BackoffTimestamp, _} = Backoff}}, Queue2} when BackoffTimestamp > Now -> ar_util:cast_after(?PROCESS_ITEM_INTERVAL_MS, self(), process_item), enqueue(Item, Backoff, Queue2); {{value, {{block, {H, H2, TXRoot, Height}}, Backoff}}, Queue2} -> case check_fork(Height, H, TXRoot) of false -> ok; true -> Parent = self(), monitor(process, spawn( fun() -> %% Trap exit to avoid corrupting any open files on quit.. process_flag(trap_exit, true), case download_block(H, H2, TXRoot) of {error, _Reason} -> gen_server:cast(Parent, {failed_to_get_block, H, H2, TXRoot, Height, Backoff}); {ok, B} -> gen_server:cast(Parent, {add_historical_block, B, H, H2, TXRoot, Backoff}) end end )) end, Queue2 end. enqueue(Item, Backoff, Queue) -> queue:in({Item, Backoff}, Queue). update_backoff({_Timestamp, Interval}) -> Interval2 = min(?MAX_BACKOFF_INTERVAL_S, Interval * 2), {os:system_time(second) + Interval2, Interval2}. check_fork(Height, H, TXRoot) -> case Height < ar_fork:height_2_0() of true -> true; false -> case ar_node:get_block_index_entry(Height) of not_joined -> false; not_found -> false; {H, _WeaveSize, TXRoot} -> true; _ -> false end end. download_block(H, H2, TXRoot) -> Peers = ar_peers:get_peers(current), case ar_storage:read_block(H) of unavailable -> download_block(Peers, H, H2, TXRoot); B -> download_txs(Peers, B, TXRoot) end. download_block(Peers, H, H2, TXRoot) -> Fork_2_0 = ar_fork:height_2_0(), Opts = #{ rand_min => length(Peers) }, case ar_http_iface_client:get_block_shadow(Peers, H, Opts) of unavailable -> ?LOG_WARNING([ {event, ar_header_sync_failed_to_download_block_header}, {block, ar_util:encode(H)} ]), {error, block_header_unavailable}; {Peer, #block{ height = Height } = B, Time, BlockSize} -> BH = case Height >= Fork_2_0 of true -> ar_block:indep_hash(B); false -> ar_block:indep_hash( B#block{ tx_root = TXRoot, txs = lists:sort(B#block.txs) } ) end, case BH of H when Height >= Fork_2_0 -> ar_peers:rate_fetched_data(Peer, block, Time, BlockSize), download_txs(Peers, B, TXRoot); H2 when Height < Fork_2_0 -> ar_peers:rate_fetched_data(Peer, block, Time, BlockSize), download_txs(Peers, B, TXRoot); _ -> ?LOG_WARNING([ {event, ar_header_sync_block_hash_mismatch}, {block, ar_util:encode(H)}, {peer, ar_util:format_peer(Peer)} ]), {error, block_hash_mismatch} end end. download_txs(Peers, B, TXRoot) -> case ar_http_iface_client:get_txs(Peers, B) of {ok, TXs} -> SizeTaggedTXs = ar_block:generate_size_tagged_list_from_txs(TXs, B#block.height), SizeTaggedDataRoots = [{Root, Offset} || {{_, Root}, Offset} <- SizeTaggedTXs], {Root, _Tree} = ar_merkle:generate_tree(SizeTaggedDataRoots), case Root of TXRoot -> {ok, B#block{ txs = TXs, size_tagged_txs = SizeTaggedTXs }}; _ -> ?LOG_WARNING([ {event, ar_header_sync_block_tx_root_mismatch}, {block, ar_util:encode(B#block.indep_hash)} ]), {error, block_tx_root_mismatch} end; {error, txs_exceed_block_size_limit} -> ?LOG_WARNING([ {event, ar_header_sync_block_txs_exceed_block_size_limit}, {block, ar_util:encode(B#block.indep_hash)} ]), {error, txs_exceed_block_size_limit}; {error, txs_count_exceeds_limit} -> ?LOG_WARNING([ {event, ar_header_sync_block_txs_count_exceeds_limit}, {block, ar_util:encode(B#block.indep_hash)} ]), {error, txs_count_exceeds_limit}; {error, tx_not_found} -> ?LOG_WARNING([ {event, ar_header_sync_block_tx_not_found}, {block, ar_util:encode(B#block.indep_hash)} ]), {error, tx_not_found} end. ================================================ FILE: apps/arweave/src/ar_header_sync_sup.erl ================================================ -module(ar_header_sync_sup). -behaviour(supervisor). -export([start_link/1]). -export([init/1]). %%%=================================================================== %%% Public API. %%%=================================================================== start_link(Args) -> supervisor:start_link({local, ?MODULE}, ?MODULE, Args). %%%=================================================================== %%% Supervisor callbacks. %%%=================================================================== init(Args) -> SupFlags = #{strategy => one_for_one, intensity => 10, period => 1}, ChildSpec = #{ id => ar_header_sync, start => {ar_header_sync, start_link, [Args]} }, {ok, {SupFlags, [ChildSpec]}}. ================================================ FILE: apps/arweave/src/ar_http.erl ================================================ %%% A wrapper library for gun. -module(ar_http). -behaviour(gen_server). -export([start_link/0, req/1]). -ifdef(AR_TEST). -export([block_peer_connections/0, unblock_peer_connections/0]). -endif. -export([init/1, handle_cast/2, handle_call/3, handle_info/2, terminate/2]). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -record(state, { pid_by_peer = #{}, status_by_pid = #{} }). %%% ================================================================== %%% Public interface. %%% ================================================================== start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). -ifdef(AR_TEST). block_peer_connections() -> ets:insert(?MODULE, {block_peer_connections}), ok. unblock_peer_connections() -> ets:delete(?MODULE, block_peer_connections), ok. req(Args) -> case ar_shutdown_manager:state() of running -> req2(Args); shutdown -> {error, shutdown} end. req2(#{ peer := {_, _} } = Args) -> req(Args, false); req2(#{ peer := Peer } = Args) -> {ok, Config} = arweave_config:get_env(), case Config#config.port == element(5, Peer) of true -> %% Do not block requests to self. req(Args, false); false -> case ets:lookup(?MODULE, block_peer_connections) of [{_}] -> case lists:keyfind(<<"x-p2p-port">>, 1, maps:get(headers, Args, [])) of {_, _} -> {error, blocked}; _ -> %% Do not block requests made from the test processes. req(Args, false) end; _ -> req(Args, false) end end. -else. req(Args) -> req(Args, false). -endif. req(Args, ReestablishedConnection) -> StartTime = erlang:monotonic_time(), #{ peer := Peer, path := Path, method := Method } = Args, ok = ar_rate_limiter:throttle(Peer, Path), Response = case catch gen_server:call(?MODULE, {get_connection, Args}, 15000) of {ok, PID} -> case request(PID, Args) of {error, Error} -> case {ReestablishedConnection, should_retry_closed_connection(Error)} of {false, true} -> req(Args, true); {_, true} -> {error, client_error}; {_, false} -> {error, Error} end; Reply -> Reply end; {'EXIT', _} -> {error, client_error}; Error -> Error end, EndTime = erlang:monotonic_time(), %% Only log the metric for the top-level call to req/2 - not the recursive call %% that happens when the connection is reestablished. case ReestablishedConnection of true -> ok; false -> %% NOTE: the erlang prometheus client looks at the metric name to determine units. %% If it sees _duration_ it assumes the observed value is in %% native units and it converts it to .To query native units, use: %% erlant:monotonic_time() without any arguments. %% See: https://github.com/deadtrickster/prometheus.erl/blob/6dd56bf321e99688108bb976283a80e4d82b3d30/src/prometheus_time.erl#L2-L84 prometheus_histogram:observe(ar_http_request_duration_seconds, [ method_to_list(Method), ar_http_iface_server:label_http_path(list_to_binary(Path)), ar_metrics:get_status_class(Response) ], EndTime - StartTime) end, Response. %%% ================================================================== %%% gen_server callbacks. %%% ================================================================== init([]) -> {ok, #state{}}. handle_call({get_connection, Args}, From, #state{ pid_by_peer = PIDByPeer, status_by_pid = StatusByPID } = State) -> Peer = maps:get(peer, Args), case maps:get(Peer, PIDByPeer, not_found) of not_found -> {ok, PID} = open_connection(Args), MonitorRef = monitor(process, PID), PIDByPeer2 = maps:put(Peer, PID, PIDByPeer), StatusByPID2 = maps:put(PID, {{connecting, [{From, Args}]}, MonitorRef, Peer}, StatusByPID), {noreply, State#state{ pid_by_peer = PIDByPeer2, status_by_pid = StatusByPID2 }}; PID -> case maps:get(PID, StatusByPID) of {{connecting, PendingRequests}, MonitorRef, Peer} -> StatusByPID2 = maps:put(PID, {{connecting, [{From, Args} | PendingRequests]}, MonitorRef, Peer}, StatusByPID), {noreply, State#state{ status_by_pid = StatusByPID2 }}; {connected, _MonitorRef, Peer} -> {reply, {ok, PID}, State} end end; handle_call(Request, _From, State) -> ?LOG_WARNING([{event, unhandled_call}, {module, ?MODULE}, {request, Request}]), {reply, ok, State}. handle_cast(Cast, State) -> ?LOG_WARNING([{event, unhandled_cast}, {module, ?MODULE}, {cast, Cast}]), {noreply, State}. handle_info({gun_up, PID, _Protocol}, #state{ status_by_pid = StatusByPID } = State) -> case maps:get(PID, StatusByPID, not_found) of not_found -> %% A connection timeout should have occurred. {noreply, State}; {{connecting, PendingRequests}, MonitorRef, Peer} -> [gen_server:reply(ReplyTo, {ok, PID}) || {ReplyTo, _} <- PendingRequests], StatusByPID2 = maps:put(PID, {connected, MonitorRef, Peer}, StatusByPID), prometheus_gauge:inc(outbound_connections), ar_peers:connected_peer(Peer), {noreply, State#state{ status_by_pid = StatusByPID2 }}; {connected, _MonitorRef, Peer} -> ?LOG_WARNING([{event, gun_up_pid_already_exists}, {peer, ar_util:format_peer(Peer)}]), ar_peers:connected_peer(Peer), {noreply, State} end; handle_info({gun_error, PID, Reason}, #state{ pid_by_peer = PIDByPeer, status_by_pid = StatusByPID } = State) -> case maps:get(PID, StatusByPID, not_found) of not_found -> ?LOG_WARNING([{even, gun_connection_error_with_unknown_pid}]), {noreply, State}; {Status, _MonitorRef, Peer} -> PIDByPeer2 = maps:remove(Peer, PIDByPeer), StatusByPID2 = maps:remove(PID, StatusByPID), Reason2 = case Reason of timeout -> connect_timeout; {Type, _} -> Type; _ -> Reason end, case Status of {connecting, PendingRequests} -> reply_error(PendingRequests, Reason2); connected -> prometheus_gauge:dec(outbound_connections), ok end, ar_peers:disconnected_peer(Peer), gun:shutdown(PID), ?LOG_DEBUG([{event, connection_error}, {reason, io_lib:format("~p", [Reason])}]), {noreply, State#state{ status_by_pid = StatusByPID2, pid_by_peer = PIDByPeer2 }} end; % missing pattern from gun 2.2+ handle_info({gun_down, Pid, Protocol, Reason, Streams}, State) -> handle_info({gun_down, Pid, Protocol, Reason, [], Streams}, State); handle_info({gun_down, PID, Protocol, Reason, _KilledStreams, _UnprocessedStreams}, #state{ pid_by_peer = PIDByPeer, status_by_pid = StatusByPID } = State) -> case maps:get(PID, StatusByPID, not_found) of not_found -> ?LOG_WARNING([{even, gun_connection_down_with_unknown_pid}, {protocol, Protocol}]), {noreply, State}; {Status, _MonitorRef, Peer} -> PIDByPeer2 = maps:remove(Peer, PIDByPeer), StatusByPID2 = maps:remove(PID, StatusByPID), Reason2 = case Reason of {Type, _} -> Type; _ -> Reason end, case Status of {connecting, PendingRequests} -> reply_error(PendingRequests, Reason2); _ -> prometheus_gauge:dec(outbound_connections), ok end, ar_peers:disconnected_peer(Peer), {noreply, State#state{ status_by_pid = StatusByPID2, pid_by_peer = PIDByPeer2 }} end; handle_info({'DOWN', _Ref, process, PID, Reason}, #state{ pid_by_peer = PIDByPeer, status_by_pid = StatusByPID } = State) -> case maps:get(PID, StatusByPID, not_found) of not_found -> {noreply, State}; {Status, _MonitorRef, Peer} -> PIDByPeer2 = maps:remove(Peer, PIDByPeer), StatusByPID2 = maps:remove(PID, StatusByPID), case Status of {connecting, PendingRequests} -> reply_error(PendingRequests, Reason); _ -> prometheus_gauge:dec(outbound_connections), ok end, ar_peers:disconnected_peer(Peer), {noreply, State#state{ status_by_pid = StatusByPID2, pid_by_peer = PIDByPeer2 }} end; handle_info(Message, State) -> ?LOG_WARNING([{event, unhandled_info}, {module, ?MODULE}, {message, Message}]), {noreply, State}. terminate(Reason, #state{ status_by_pid = StatusByPID }) -> maps:map(fun(PID, _Status) -> gun:shutdown(PID) end, StatusByPID), ?LOG_INFO([{event, http_client_terminating}, {reason, io_lib:format("~p", [Reason])}]), ok. %%% ================================================================== %%% Private functions. %%% ================================================================== open_connection(#{ peer := Peer } = Args) -> {ok, Config} = arweave_config:get_env(), {IPOrHost, Port} = get_ip_port(Peer), ConnectTimeout = maps:get(connect_timeout, Args, maps:get(timeout, Args, ?HTTP_REQUEST_CONNECT_TIMEOUT)), GunOpts = #{ retry => 0, connect_timeout => ConnectTimeout, http_opts => #{ closing_timeout => Config#config.'http_client.http.closing_timeout', keepalive => Config#config.'http_client.http.keepalive' }, tcp_opts => [ {delay_send, Config#config.'http_client.tcp.delay_send'}, {keepalive, Config#config.'http_client.tcp.keepalive'}, {linger, { Config#config.'http_client.tcp.linger', Config#config.'http_client.tcp.linger_timeout' } }, {nodelay, Config#config.'http_client.tcp.nodelay'}, {send_timeout_close, Config#config.'http_client.tcp.send_timeout_close'}, {send_timeout, Config#config.'http_client.tcp.send_timeout'} ] }, gun:open(IPOrHost, Port, GunOpts). get_ip_port({_, _} = Peer) -> Peer; get_ip_port(Peer) -> {erlang:delete_element(size(Peer), Peer), erlang:element(size(Peer), Peer)}. reply_error([], _Reason) -> ok; reply_error([PendingRequest | PendingRequests], Reason) -> ReplyTo = element(1, PendingRequest), Args = element(2, PendingRequest), Method = maps:get(method, Args), Path = maps:get(path, Args), record_response_status(Method, Path, {error, Reason}), gen_server:reply(ReplyTo, {error, Reason}), reply_error(PendingRequests, Reason). record_response_status(Method, Path, Response) -> prometheus_counter:inc(gun_requests_total, [method_to_list(Method), ar_http_iface_server:label_http_path(list_to_binary(Path)), ar_metrics:get_status_class(Response)]). method_to_list(get) -> "GET"; method_to_list(post) -> "POST"; method_to_list(put) -> "PUT"; method_to_list(head) -> "HEAD"; method_to_list(delete) -> "DELETE"; method_to_list(connect) -> "CONNECT"; method_to_list(options) -> "OPTIONS"; method_to_list(trace) -> "TRACE"; method_to_list(patch) -> "PATCH"; method_to_list(_) -> "unknown". request(PID, Args) -> Timeout = maps:get(timeout, Args, ?HTTP_REQUEST_SEND_TIMEOUT), Ref = request2(PID, Args), ResponseArgs = #{ pid => PID , stream_ref => Ref , timeout => Timeout , limit => maps:get(limit, Args, infinity) , counter => 0 , acc => [] , start => os:system_time(microsecond) , is_peer_request => maps:get(is_peer_request, Args, true) }, Response = await_response(maps:merge(Args, ResponseArgs)), Method = maps:get(method, Args), Path = maps:get(path, Args), record_response_status(Method, Path, Response), Response. request2(PID, #{ path := Path } = Args) -> Headers = case maps:get(is_peer_request, Args, true) of true -> merge_headers(?DEFAULT_REQUEST_HEADERS, maps:get(headers, Args, [])); _ -> maps:get(headers, Args, []) end, Method = case maps:get(method, Args) of get -> "GET"; post -> "POST" end, gun:request(PID, Method, Path, Headers, maps:get(body, Args, <<>>)). merge_headers(HeadersA, HeadersB) -> lists:ukeymerge(1, lists:keysort(1, HeadersB), lists:keysort(1, HeadersA)). await_response( #{ pid := PID, stream_ref := Ref, timeout := Timeout , start := Start, limit := Limit, counter := Counter , acc := Acc, method := Method, path := Path } = Args) -> case gun:await(PID, Ref, Timeout) of {response, fin, Status, Headers} -> End = os:system_time(microsecond), upload_metric(Args), {ok, {{integer_to_binary(Status), <<>>}, Headers, <<>>, Start, End}}; {response, nofin, Status, Headers} -> await_response(Args#{ status => Status, headers => Headers }); {data, nofin, Data} -> case Limit of infinity -> await_response(Args#{ acc := [Acc | Data] }); Limit -> Counter2 = size(Data) + Counter, case Limit >= Counter2 of true -> await_response(Args#{ counter := Counter2, acc := [Acc | Data] }); false -> log(err, http_fetched_too_much_data, Args, <<"Fetched too much data">>), {error, too_much_data} end end; {data, fin, Data} -> End = os:system_time(microsecond), FinData = iolist_to_binary([Acc | Data]), download_metric(FinData, Args), upload_metric(Args), ResponseCode = gen_code_rest(maps:get(status, Args)), ResponseHeaders = maps:get(headers, Args), Response = {ResponseCode, ResponseHeaders, FinData, Start, End}, {ok, Response}; {error, timeout} = Response -> record_response_status(Method, Path, Response), gun:cancel(PID, Ref), log(warn, gun_await_process_down, Args, Response), Response; {error, Reason} = Response when is_tuple(Reason) -> record_response_status(Method, Path, Response), gun:cancel(PID, Ref), log(warn, gun_await_process_down, Args, Reason), Response; Response -> record_response_status(Method, Path, Response), gun:cancel(PID, Ref), log(warn, gun_await_unknown, Args, Response), Response end. log(Type, Event, #{method := Method, peer := Peer, path := Path}, Reason) -> {ok, Config} = arweave_config:get_env(), case lists:member(http_logging, Config#config.enable) of true when Type == warn -> ?LOG_WARNING([ {event, Event}, {http_method, Method}, {peer, ar_util:format_peer(Peer)}, {path, Path}, {reason, Reason} ]); true when Type == err -> ?LOG_ERROR([ {event, Event}, {http_method, Method}, {peer, ar_util:format_peer(Peer)}, {path, Path}, {reason, Reason} ]); _ -> ok end. download_metric(Data, #{path := Path}) -> prometheus_counter:inc( http_client_downloaded_bytes_total, [ar_http_iface_server:label_http_path(list_to_binary(Path))], byte_size(Data) ). upload_metric(#{method := post, path := Path, body := Body}) -> prometheus_counter:inc( http_client_uploaded_bytes_total, [ar_http_iface_server:label_http_path(list_to_binary(Path))], byte_size(Body) ); upload_metric(_) -> ok. should_retry_closed_connection({shutdown, normal}) -> true; should_retry_closed_connection(noproc) -> true; should_retry_closed_connection({down, {shutdown, closed}}) -> true; should_retry_closed_connection({down, {shutdown, {error, einval}}}) -> true; should_retry_closed_connection({stream_error, closed}) -> true; should_retry_closed_connection({stream_error, closing}) -> true; should_retry_closed_connection({stream_error, {closed, normal}}) -> true; should_retry_closed_connection({shutdown, closed}) -> true; should_retry_closed_connection(closed) -> true; should_retry_closed_connection(closing) -> true; should_retry_closed_connection(_) -> false. gen_code_rest(200) -> {<<"200">>, <<"OK">>}; gen_code_rest(201) -> {<<"201">>, <<"Created">>}; gen_code_rest(202) -> {<<"202">>, <<"Accepted">>}; gen_code_rest(208) -> {<<"208">>, <<"Transaction already processed">>}; gen_code_rest(400) -> {<<"400">>, <<"Bad Request">>}; gen_code_rest(419) -> {<<"419">>, <<"419 Missing Chunk">>}; gen_code_rest(421) -> {<<"421">>, <<"Misdirected Request">>}; gen_code_rest(429) -> {<<"429">>, <<"Too Many Requests">>}; gen_code_rest(N) -> {integer_to_binary(N), <<>>}. ================================================ FILE: apps/arweave/src/ar_http_iface_client.erl ================================================ %%% %%% @doc Exposes access to an internal Arweave client to external nodes on the network. %%% -module(ar_http_iface_client). -export([send_tx_json/3, send_tx_json/4, send_tx_binary/3, send_tx_binary/4]). -export([send_block_json/3, send_block_binary/3, send_block_binary/4, send_block_announcement/2, get_block/3, get_tx/2, get_txs/2, get_tx_from_remote_peers/3, get_tx_data/2, get_wallet_list_chunk/2, get_wallet_list_chunk/3, get_wallet_list/2, add_peer/1, get_info/1, get_info/2, get_peers/1, get_time/2, get_height/1, get_block_index/3, get_sync_record/1, get_sync_record/3, get_sync_record/4, get_footprints/3, get_chunk_binary/3, get_mempool/1, get_sync_buckets/1, get_footprint_buckets/1, get_recent_hash_list/1, get_recent_hash_list_diff/2, get_reward_history/3, get_block_time_history/3, push_nonce_limiter_update/3, get_vdf_update/1, get_vdf_session/1, get_previous_vdf_session/1, get_cm_partition_table/1, cm_h1_send/2, cm_h2_send/2, cm_publish_send/2, get_jobs/2, post_partial_solution/2, get_pool_cm_jobs/2, post_pool_cm_jobs/2, post_cm_partition_table_to_pool/2, get_data_roots/2]). -export([get_block_shadow/2, get_block_shadow/3, get_block_shadow/4]). -export([log_failed_request/2]). %% -- Testing exports -export([get_tx_from_remote_peer/3]). %% -- End of testing exports -include("ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include("ar_consensus.hrl"). -include("ar_data_sync.hrl"). -include("ar_sync_buckets.hrl"). -include("ar_data_discovery.hrl"). -include("ar_mining.hrl"). -include("ar_wallets.hrl"). -include("ar_pool.hrl"). %%-------------------------------------------------------------------- %% @doc Send a JSON-encoded transaction to the given Peer with default %% parameters. %% %% == Examples == %% %% ``` %% Host = {127,0,0,1}, %% Port = 1984, %% Peer = {Host, Port}, %% TXID = <<0:256>>, %% Bin = ar_serialize:tx_to_binary(#tx{}), %% send_tx_json(Peer, TXID, Bin). %% ''' %% %% @see send_tx_json/4 %% @end %%-------------------------------------------------------------------- send_tx_json(Peer, TXID, Bin) -> send_tx_json(Peer, TXID, Bin, #{}). %%-------------------------------------------------------------------- %% @doc Send a JSON-encoded transaction to the given Peer. %% %% == Examples == %% %% ``` %% Host = {127,0,0,1}, %% Port = 1984, %% Peer = {Host, Port}, %% TXID = <<0:256>>, %% Bin = ar_serialize:tx_to_binary(#tx{}), %% Opts = #{ connect_timeout => 5 %% , timeout => 30 %% }, %% send_tx_json(Peer, TXID, Bin, Opts). %% ''' %% %% @end %%-------------------------------------------------------------------- send_tx_json(Peer, TXID, Bin, Opts) -> ConnectTimeout = maps:get(connect_timeout, Opts, 5), Timeout = maps:get(timeout, Opts, 30), ar_http:req(#{ method => post, peer => Peer, path => "/tx", headers => add_header(<<"arweave-tx-id">>, ar_util:encode(TXID), p2p_headers()), body => Bin, connect_timeout => ConnectTimeout * 1000, timeout => Timeout * 1000 }). %%-------------------------------------------------------------------- %% @doc Send a binary-encoded transaction to the given Peer with %% default parameters. %% @see send_tx_binary/4 %% @end %%-------------------------------------------------------------------- send_tx_binary(Peer, TXID, Bin) -> send_tx_binary(Peer, TXID, Bin, #{}). %%-------------------------------------------------------------------- %% @doc Send a binary-encoded transaction to the given Peer. %% @end %%-------------------------------------------------------------------- send_tx_binary(Peer, TXID, Bin, Opts) -> ConnectTimeout = maps:get(connect_timeout, Opts, 5), Timeout = maps:get(timeout, Opts, 30), ar_http:req(#{ method => post, peer => Peer, path => "/tx2", headers => add_header(<<"arweave-tx-id">>, ar_util:encode(TXID), p2p_headers()), body => Bin, connect_timeout => ConnectTimeout * 1000, timeout => Timeout * 1000 }). %% @doc Announce a block to Peer. send_block_announcement(Peer, Announcement) -> ar_http:req(#{ method => post, peer => Peer, path => "/block_announcement", headers => p2p_headers(), body => ar_serialize:block_announcement_to_binary(Announcement), timeout => 10 * 1000 }). %% @doc Send the given JSON-encoded block to the given peer. send_block_json(Peer, H, Payload) -> ar_http:req(#{ method => post, peer => Peer, path => "/block", headers => add_header(<<"arweave-block-hash">>, ar_util:encode(H), p2p_headers()), body => Payload, connect_timeout => 5000, timeout => 120 * 1000 }). %% @doc Send the given binary-encoded block to the given peer. send_block_binary(Peer, H, Payload) -> send_block_binary(Peer, H, Payload, undefined). send_block_binary(Peer, H, Payload, RecallByte) -> Headers = add_header(<<"arweave-block-hash">>, ar_util:encode(H), p2p_headers()), %% The way of informing the recipient about the recall byte used before the fork %% 2.6. Since the fork 2.6 blocks have a "recall_byte" field. Headers2 = case RecallByte of undefined -> Headers; _ -> add_header(<<"arweave-recall-byte">>, integer_to_binary(RecallByte), Headers) end, ar_http:req(#{ method => post, peer => Peer, path => "/block2", headers => Headers2, body => Payload, timeout => 20 * 1000 }). %% @doc Request to be added as a peer to a remote host. add_peer(Peer) -> ar_http:req(#{ method => post, peer => Peer, path => "/peers", headers => p2p_headers(), body => ar_serialize:jsonify({[{network, list_to_binary(?NETWORK_NAME)}]}), timeout => 3 * 1000 }). %% @doc Retrieve a block. We request the peer to include complete %% transactions at the given positions (in the sorted transaction list). get_block(Peer, H, TXIndices) -> case handle_block_response(Peer, binary, ar_http:req(#{ method => get, peer => Peer, path => "/block2/hash/" ++ binary_to_list(ar_util:encode(H)), headers => p2p_headers(), connect_timeout => 1000, timeout => 15 * 1000, body => ar_util:encode_list_indices(TXIndices), limit => ?MAX_BODY_SIZE })) of not_found -> not_found; {ok, B, Time, Size} -> {B, Time, Size} end. %%-------------------------------------------------------------------- %% @doc get a block shadow using default parameter. %% @end %%-------------------------------------------------------------------- get_block_shadow(Peers, ID) -> get_block_shadow(Peers, ID, #{}). %%-------------------------------------------------------------------- %% @doc Retrieve a block shadow by hash or height from one of the given %% peers. Some options can be modified like `rand_min', %% `connect_timeout' and `timeout'. %% @see get_block_shadow/4 %% @end %%-------------------------------------------------------------------- get_block_shadow([], _ID, _Opts) -> unavailable; get_block_shadow(Peers, ID, Opts) -> RandMin = maps:get(rand_min, Opts, 5), Random = rand:uniform(min(RandMin, length(Peers))), Peer = lists:nth(Random, Peers), case get_block_shadow(ID, Peer, binary, Opts) of not_found -> get_block_shadow(Peers -- [Peer], ID, Opts); {ok, B, Time, Size} -> {Peer, B, Time, Size} end. %%-------------------------------------------------------------------- %% @doc Retrieve a block shadow by hash or height from the given peer. %% @end %%-------------------------------------------------------------------- get_block_shadow(ID, Peer, Encoding, _Opts) -> handle_block_response(Peer, Encoding, ar_http:req(#{ method => get, peer => Peer, path => get_block_path(ID, Encoding), headers => p2p_headers(), connect_timeout => 500, timeout => 30 * 1000, limit => ?MAX_BODY_SIZE })). %% @doc Generate an appropriate URL for a block by its identifier. get_block_path({ID, _, _}, Encoding) -> get_block_path(ID, Encoding); get_block_path(ID, Encoding) when is_binary(ID) -> case Encoding of binary -> "/block2/hash/" ++ binary_to_list(ar_util:encode(ID)); json -> "/block/hash/" ++ binary_to_list(ar_util:encode(ID)) end; get_block_path(ID, Encoding) when is_integer(ID) -> case Encoding of binary -> "/block2/height/" ++ integer_to_list(ID); json -> "/block/height/" ++ integer_to_list(ID) end. %% @doc Get a bunch of wallets by the given root hash from external peers. get_wallet_list_chunk(Peers, H) -> get_wallet_list_chunk(Peers, H, start). get_wallet_list_chunk([], _H, _Cursor) -> {error, not_found}; get_wallet_list_chunk([Peer | Peers], H, Cursor) -> BasePath = "/wallet_list/" ++ binary_to_list(ar_util:encode(H)), Path = case Cursor of start -> BasePath; _ -> BasePath ++ "/" ++ binary_to_list(ar_util:encode(Cursor)) end, Response = ar_http:req(#{ method => get, peer => Peer, path => Path, headers => p2p_headers(), limit => ?MAX_SERIALIZED_WALLET_LIST_CHUNK_SIZE, timeout => 10 * 1000, connect_timeout => 1000 }), case Response of {ok, {{<<"200">>, _}, _, Body, _, _}} -> case ar_serialize:etf_to_wallet_chunk_response(Body) of {ok, #{ next_cursor := NextCursor, wallets := Wallets }} -> {ok, {NextCursor, Wallets}}; DeserializationResult -> ?LOG_ERROR([ {event, got_unexpected_wallet_list_chunk_deserialization_result}, {deserialization_result, DeserializationResult} ]), get_wallet_list_chunk(Peers, H, Cursor) end; Response -> get_wallet_list_chunk(Peers, H, Cursor) end. %% @doc Get a wallet list by the given block hash from external peers. get_wallet_list([], _H) -> not_found; get_wallet_list([Peer | Peers], H) -> case get_wallet_list(Peer, H) of unavailable -> get_wallet_list(Peers, H); not_found -> get_wallet_list(Peers, H); WL -> WL end; get_wallet_list(Peer, H) -> Response = ar_http:req(#{ method => get, peer => Peer, path => "/block/hash/" ++ binary_to_list(ar_util:encode(H)) ++ "/wallet_list", headers => p2p_headers() }), case Response of {ok, {{<<"200">>, _}, _, Body, _, _}} -> {ok, ar_serialize:json_struct_to_wallet_list(Body)}; {ok, {{<<"404">>, _}, _, _, _, _}} -> not_found; _ -> unavailable end. get_block_index(Peer, Start, End) -> get_block_index(Peer, Start, End, binary). get_block_index(Peer, Start, End, Encoding) -> StartList = integer_to_list(Start), EndList = integer_to_list(End), Root = case Encoding of binary -> "/block_index2/"; json -> "/block_index/" end, case ar_http:req(#{ method => get, peer => Peer, path => Root ++ StartList ++ "/" ++ EndList, timeout => 20000, connect_timeout => 5000, headers => p2p_headers() }) of {ok, {{<<"400">>, _}, _, <<"Request type not found.">>, _, _}} when Encoding == binary -> get_block_index(Peer, Start, End, json); {ok, {{<<"200">>, _}, _, Body, _, _}} -> case decode_block_index(Body, Encoding) of {ok, BI} -> {ok, BI}; Error -> ?LOG_WARNING([{event, failed_to_decode_block_index_range}, Error]), Error end; Error -> ?LOG_WARNING([{event, failed_to_fetch_block_index_range}, {error, io_lib:format("~p", [Error])}]), {error, Error} end. decode_block_index(Bin, binary) -> ar_serialize:binary_to_block_index(Bin); decode_block_index(Bin, json) -> case ar_serialize:json_decode(Bin) of {ok, Struct} -> case catch ar_serialize:json_struct_to_block_index(Struct) of {'EXIT', _} = Exc -> {error, Exc}; BI -> {ok, BI} end; Error -> Error end. get_sync_record(Peer) -> Headers = [{<<"Content-Type">>, <<"application/etf">>}], handle_sync_record_response(ar_http:req(#{ peer => Peer, method => get, path => "/data_sync_record", timeout => 30 * 1000, connect_timeout => 2000, limit => ?MAX_ETF_SYNC_RECORD_SIZE, headers => Headers })). get_sync_record(Peer, Start, Limit) -> Headers = [{<<"Content-Type">>, <<"application/etf">>}], handle_sync_record_response(ar_http:req(#{ peer => Peer, method => get, path => "/data_sync_record/" ++ integer_to_list(Start) ++ "/" ++ integer_to_list(Limit), timeout => 30 * 1000, connect_timeout => 5000, limit => ?MAX_ETF_SYNC_RECORD_SIZE, headers => Headers }), Start, Limit). get_sync_record(Peer, Start, End, Limit) -> Headers = [{<<"Content-Type">>, <<"application/etf">>}], handle_sync_record_response(ar_http:req(#{ peer => Peer, method => get, path => "/data_sync_record/" ++ integer_to_list(Start) ++ "/" ++ integer_to_list(End) ++ "/" ++ integer_to_list(Limit), timeout => 30 * 1000, connect_timeout => 5000, limit => ?MAX_ETF_SYNC_RECORD_SIZE, headers => Headers }), Start, Limit). get_footprints(Peer, Partition, Footprint) -> handle_footprints_response(ar_http:req(#{ peer => Peer, method => get, path => "/footprints/" ++ integer_to_list(Partition) ++ "/" ++ integer_to_list(Footprint), timeout => 10_000, connect_timeout => 5_000, limit => ?MAX_FOOTPRINT_PAYLOAD_SIZE, headers => p2p_headers() })). get_chunk_binary(Peer, Offset, RequestedPacking) -> PackingBinary = iolist_to_binary(ar_serialize:encode_packing(RequestedPacking, false)), Headers = [{<<"x-packing">>, PackingBinary}, %% The nodes not upgraded to the 2.5 version would ignore this header. %% It is fine because all offsets before 2.5 are not bucket-based. %% Client libraries do not send this header - normally they do not need %% bucket-based offsets. Bucket-based offsets are required in mining %% after the fork 2.5 and it is convenient to use them for syncing, %% thus setting the header here. A bucket-based offset corresponds to %% the chunk that ends in the same 256 KiB bucket starting from the %% 2.5 block. In most cases a bucket-based offset would correspond to %% the same chunk as the normal offset except for the offsets of the %% last and second last chunks of the transactions when these chunks %% are smaller than 256 KiB. {<<"x-bucket-based-offset">>, <<"true">>}], StartTime = erlang:monotonic_time(), Response = ar_http:req(#{ peer => Peer, method => get, path => "/chunk2/" ++ integer_to_binary(Offset), timeout => 120 * 1000, connect_timeout => 5000, limit => ?MAX_SERIALIZED_CHUNK_PROOF_SIZE, headers => p2p_headers() ++ Headers }), prometheus_histogram:observe( http_client_get_chunk_duration_seconds, [ ar_metrics:get_status_class(Response), ar_util:format_peer(Peer) ], erlang:monotonic_time() - StartTime), handle_chunk_response(Response, RequestedPacking, Peer). get_mempool([]) -> {error, not_found}; get_mempool([Peer | Peers]) -> case get_mempool(Peer) of {{ok, TXIDs}, Peer} -> {{ok, TXIDs}, Peer}; {error, Error} -> log_failed_request(Error, [{event, failed_to_get_mempool_txids_from_peer}, {peer, ar_util:format_peer(Peer)}, {error, io_lib:format("~p", [Error])}]), get_mempool(Peers -- [Peer]) end; get_mempool(Peer) -> handle_mempool_response(ar_http:req(#{ peer => Peer, method => get, path => "/tx/pending", timeout => 5 * 1000, connect_timeout => 500, %% Sufficient for a JSON-encoded list of the transaction identifiers %% from a mempool with 250 MiB worth of transaction headers with no data. limit => 3000000, headers => p2p_headers() }), Peer). get_sync_buckets(Peer) -> handle_get_sync_buckets_response(ar_http:req(#{ peer => Peer, method => get, path => "/sync_buckets", timeout => 10 * 1000, connect_timeout => 2000, limit => ?MAX_SYNC_BUCKETS_SIZE, headers => p2p_headers() }), ?DEFAULT_SYNC_BUCKET_SIZE). get_footprint_buckets(Peer) -> handle_get_sync_buckets_response(ar_http:req(#{ peer => Peer, method => get, path => "/footprint_buckets", timeout => 10 * 1000, connect_timeout => 2000, limit => ?MAX_SYNC_BUCKETS_SIZE, headers => p2p_headers() }), ?NETWORK_FOOTPRINT_BUCKET_SIZE). get_recent_hash_list(Peer) -> handle_get_recent_hash_list_response(ar_http:req(#{ peer => Peer, method => get, path => "/recent_hash_list", timeout => 2 * 1000, connect_timeout => 1000, limit => 3400, headers => p2p_headers() })). get_recent_hash_list_diff(Peer, HL) -> ReverseHL = lists:reverse(HL), handle_get_recent_hash_list_diff_response(ar_http:req(#{ peer => Peer, method => get, path => "/recent_hash_list_diff", timeout => 10 * 1000, connect_timeout => 1000, %% PrevH H Len TXID limit => (48 + (48 + 2 + 1000 * 32) * 49), % 1570498 bytes, % very pessimistic case. body => iolist_to_binary(ReverseHL), headers => p2p_headers() }), HL, Peer). %% @doc Fetch the reward history from one of the given peers. The reward history %% must contain ar_rewards:buffered_reward_history_length/1 elements. The reward history %% hashes are validated against the given ExpectedRewardHistoryHashes. Return not_found %% if we fail to fetch a reward history of the expected length from any of the peers. get_reward_history([Peer | Peers], B, ExpectedRewardHistoryHashes) -> #block{ height = Height, indep_hash = H } = B, ExpectedLength = ar_rewards:buffered_reward_history_length(Height), DoubleCheckLength = ar_rewards:expected_hashes_length(Height), true = length(ExpectedRewardHistoryHashes) == min( Height - ar_fork:height_2_6() + 1, DoubleCheckLength), case ar_http:req(#{ peer => Peer, method => get, path => "/reward_history/" ++ binary_to_list(ar_util:encode(H)), timeout => 30000, headers => p2p_headers() }) of {ok, {{<<"200">>, _}, _, Body, _, _}} -> case ar_serialize:binary_to_reward_history(Body) of {ok, RewardHistory} -> % when length(RewardHistory) == ExpectedLength -> case ar_rewards:validate_reward_history_hashes(Height, RewardHistory, ExpectedRewardHistoryHashes) of true -> ?LOG_DEBUG([ {event, received_valid_reward_history}, {peer, ar_util:format_peer(Peer)}, {height, Height}, {expected_length, ExpectedLength}, {length, length(RewardHistory)} ]), {ok, RewardHistory}; false -> ?LOG_WARNING([{event, received_invalid_reward_history}, {peer, ar_util:format_peer(Peer)}]), get_reward_history(Peers, B, ExpectedRewardHistoryHashes) end; % {ok, L} -> % ?LOG_WARNING([{event, received_reward_history_of_unexpected_length}, % {expected_length, ExpectedLength}, {received_length, length(L)}, % {peer, ar_util:format_peer(Peer)}]), % get_reward_history(Peers, B, ExpectedRewardHistoryHashes); {error, _} -> ?LOG_WARNING([{event, failed_to_parse_reward_history}, {peer, ar_util:format_peer(Peer)}]), get_reward_history(Peers, B, ExpectedRewardHistoryHashes) end; Reply -> ?LOG_WARNING([{event, failed_to_fetch_reward_history}, {peer, ar_util:format_peer(Peer)}, {reply, io_lib:format("~p", [Reply])}]), get_reward_history(Peers, B, ExpectedRewardHistoryHashes) end; get_reward_history([], _B, _RewardHistoryHashes) -> not_found. get_block_time_history([Peer | Peers], B, ExpectedBlockTimeHistoryHashes) -> #block{ height = Height, indep_hash = H } = B, Fork_2_7 = ar_fork:height_2_7(), true = Height >= Fork_2_7, ExpectedLength = min(Height - Fork_2_7 + 1, ar_block_time_history:history_length() + ar_block:get_consensus_window_size()), true = length(ExpectedBlockTimeHistoryHashes) == min(Height - Fork_2_7 + 1, ar_block:get_consensus_window_size()), case ar_http:req(#{ peer => Peer, method => get, path => "/block_time_history/" ++ binary_to_list(ar_util:encode(H)), timeout => 30000, headers => p2p_headers() }) of {ok, {{<<"200">>, _}, _, Body, _, _}} -> case ar_serialize:binary_to_block_time_history(Body) of {ok, BlockTimeHistory} when length(BlockTimeHistory) == ExpectedLength -> case ar_block_time_history:validate_hashes(BlockTimeHistory, ExpectedBlockTimeHistoryHashes) of true -> {ok, BlockTimeHistory}; false -> ?LOG_WARNING([{event, received_invalid_block_time_history}, {peer, ar_util:format_peer(Peer)}]), get_block_time_history(Peers, B, ExpectedBlockTimeHistoryHashes) end; {ok, L} -> ?LOG_WARNING([{event, received_block_time_history_of_unexpected_length}, {expected_length, ExpectedLength}, {received_length, length(L)}, {peer, ar_util:format_peer(Peer)}]), get_block_time_history(Peers, B, ExpectedBlockTimeHistoryHashes); {error, _} -> ?LOG_WARNING([{event, failed_to_parse_block_time_history}, {peer, ar_util:format_peer(Peer)}]), get_block_time_history(Peers, B, ExpectedBlockTimeHistoryHashes) end; Reply -> ?LOG_WARNING([{event, failed_to_fetch_block_time_history}, {peer, ar_util:format_peer(Peer)}, {reply, io_lib:format("~p", [Reply])}]), get_block_time_history(Peers, B, ExpectedBlockTimeHistoryHashes) end; get_block_time_history([], _B, _RewardHistoryHashes) -> not_found. push_nonce_limiter_update(Peer, Update, Format) -> Body = ar_serialize:nonce_limiter_update_to_binary(Format, Update), case ar_http:req(#{ peer => Peer, method => post, path => "/vdf", body => Body, timeout => 2000, limit => 100, headers => p2p_headers() }) of {ok, {{<<"200">>, _}, _, <<>>, _, _}} -> ok; {ok, {{<<"202">>, _}, _, ResponseBody, _, _}} -> ar_serialize:binary_to_nonce_limiter_update_response(ResponseBody); {ok, {{Status, _}, _, ResponseBody, _, _}} -> {error, {Status, ResponseBody}}; Reply -> Reply end. get_vdf_update(Peer) -> case ar_http:req(#{ peer => Peer, method => get, path => "/vdf2", timeout => 2000, headers => p2p_headers() }) of {ok, {{<<"200">>, _}, _, Bin, _, _}} -> ar_serialize:binary_to_nonce_limiter_update(2, Bin); {ok, {{<<"404">>, _}, _, _, _, _}} -> {error, not_found}; {ok, {{Status, _}, _, ResponseBody, _, _}} -> {error, {Status, ResponseBody}}; Reply -> Reply end. get_vdf_session(Peer) -> {Path, Format} = case ar_config:compute_own_vdf() of true -> %% If we compute our own VDF, we need to know the VDF difficulties %% so that we can continue extending the new session. The VDF difficulties %% have been introduced in the format number 4. {"/vdf4/session", 4}; false -> {"/vdf3/session", 3} end, case ar_http:req(#{ peer => Peer, method => get, path => Path, timeout => 10000, headers => p2p_headers() }) of {ok, {{<<"200">>, _}, _, Bin, _, _}} -> ar_serialize:binary_to_nonce_limiter_update(Format, Bin); {ok, {{<<"404">>, _}, _, _, _, _}} -> {error, not_found}; {ok, {{Status, _}, _, ResponseBody, _, _}} -> {error, {Status, ResponseBody}}; Reply -> Reply end. get_previous_vdf_session(Peer) -> {Path, Format} = case ar_config:compute_own_vdf() of true -> %% If we compute our own VDF, we need to know the VDF difficulties %% so that we can continue extending the new session. The VDF difficulties %% have been introduced in the format number 4. {"/vdf4/previous_session", 4}; false -> {"/vdf2/previous_session", 2} end, case ar_http:req(#{ peer => Peer, method => get, path => Path, timeout => 10000, headers => p2p_headers() }) of {ok, {{<<"200">>, _}, _, Bin, _, _}} -> ar_serialize:binary_to_nonce_limiter_update(Format, Bin); {ok, {{<<"404">>, _}, _, _, _, _}} -> {error, not_found}; {ok, {{Status, _}, _, ResponseBody, _, _}} -> {error, {Status, ResponseBody}}; Reply -> Reply end. %% ----------------------------------------------------------------------------- %% Coordinated Mining and Pool Request %% ----------------------------------------------------------------------------- get_cm_partition_table(Peer) -> Req = build_cm_or_pool_request(get, Peer, "/coordinated_mining/partition_table"), handle_cm_partition_table_response(ar_http:req(Req)). cm_h1_send(Peer, Candidate) -> JSON = ar_serialize:jsonify(ar_serialize:candidate_to_json_struct(Candidate)), Req = build_cm_or_pool_request(post, Peer, "/coordinated_mining/h1", JSON), handle_cm_noop_response(ar_http:req(Req)). cm_h2_send(Peer, Candidate) -> JSON = ar_serialize:jsonify(ar_serialize:candidate_to_json_struct(Candidate)), Req = build_cm_or_pool_request(post, Peer, "/coordinated_mining/h2", JSON), handle_cm_noop_response(ar_http:req(Req)). cm_publish_send(Peer, Solution) -> ?LOG_DEBUG([{event, cm_publish_send}, {peer, ar_util:format_peer(Peer)}, {solution, ar_util:encode(Solution#mining_solution.solution_hash)}, {step_number, Solution#mining_solution.step_number}, {start_interval_number, Solution#mining_solution.start_interval_number}, {seed, ar_util:encode(Solution#mining_solution.seed)}]), JSON = ar_serialize:jsonify(ar_serialize:solution_to_json_struct(Solution)), Req = build_cm_or_pool_request(post, Peer, "/coordinated_mining/publish", JSON), handle_cm_noop_response(ar_http:req(Req)). %% @doc Fetch the jobs from the pool or coordinated mining exit peer. get_jobs(Peer, PrevOutput) -> prometheus_counter:inc(pool_job_request_count), Req = build_cm_or_pool_request(get, Peer, "/jobs/" ++ binary_to_list(ar_util:encode(PrevOutput))), handle_get_jobs_response(ar_http:req(Req)). %% @doc Post the partial solution to the pool or coordinated mining exit peer. post_partial_solution(Peer, Solution) -> Payload = case is_binary(Solution) of true -> Solution; false -> ar_serialize:jsonify(ar_serialize:solution_to_json_struct(Solution)) end, Req = build_cm_or_pool_request(post, Peer, "/partial_solution", Payload), handle_post_partial_solution_response(ar_http:req(Req#{ timeout => 20 * 1000, connect_timeout => 5 * 1000 })). get_pool_cm_jobs(Peer, Jobs) -> JSON = ar_serialize:jsonify(ar_serialize:pool_cm_jobs_to_json_struct(Jobs)), Req = build_cm_or_pool_request(post, Peer, "/pool_cm_jobs", JSON), handle_get_pool_cm_jobs_response(ar_http:req(Req#{ connect_timeout => 1000 })). post_pool_cm_jobs(Peer, Payload) -> Req = build_cm_or_pool_request(post, Peer, "/pool_cm_jobs", Payload), handle_post_pool_cm_jobs_response(ar_http:req(Req#{ timeout => 10 * 1000, connect_timeout => 2000 })). post_cm_partition_table_to_pool(Peer, Payload) -> Req = build_cm_or_pool_request(post, Peer, "/coordinated_mining/partition_table", Payload), handle_cm_partition_table_response(ar_http:req(Req#{ timeout => 10 * 1000, connect_timeout => 2000 })). %% @doc Fetch data_root metadata for the block that starts at or before the given offset, %% and validate it against the local block index. Also recompute the TXRoot from entries. get_data_roots(Peer, Offset) -> Path = "/data_roots/" ++ integer_to_list(Offset), Response = ar_http:req(#{ peer => Peer, method => get, path => Path, timeout => 10 * 1000, connect_timeout => 2000, headers => p2p_headers() }), handle_get_data_roots_response(Response, Offset). handle_get_data_roots_response({ok, {{<<"200">>, _}, _, Body, _, _}}, Offset) -> case ar_serialize:binary_to_data_roots(Body) of {ok, {TXRoot, BlockSize, Entries}} -> ar_data_root_sync:validate_data_roots(TXRoot, BlockSize, Entries, Offset); _ -> {error, invalid_response} end; handle_get_data_roots_response({ok, {{<<"404">>, _}, _, _, _, _}}, _Offset) -> {error, not_found}; handle_get_data_roots_response(Other, _Offset) -> {error, Other}. get_peer_and_path_from_url(URL) -> #{ host := Host, path := P } = Parsed = uri_string:parse(URL), Peer = case maps:get(port, Parsed, undefined) of undefined -> case maps:get(scheme, Parsed, undefined) of "https" -> {binary_to_list(Host), 443}; _ -> {binary_to_list(Host), 1984} end; Port -> {binary_to_list(Host), Port} end, {Peer, binary_to_list(P)}. build_cm_or_pool_request(Method, Peer, Path) -> build_cm_or_pool_request(Method, Peer, Path, <<>>). build_cm_or_pool_request(Method, Peer, Path, Body) -> {Peer3, Headers, BasePath, IsPeerRequest} = case Peer of {pool, URL} -> {Peer2, Path2} = get_peer_and_path_from_url(URL), {Peer2, pool_client_headers(), Path2, false}; _ -> {Peer, cm_p2p_headers(), "", true} end, Headers2 = case Method of get -> Headers; _ -> add_header(<<"content-type">>, <<"application/json">>, Headers) end, #{ peer => Peer3, method => Method, path => BasePath ++ Path, timeout => 5 * 1000, connect_timeout => 500, headers => Headers2, body => Body, is_peer_request => IsPeerRequest }. handle_get_pool_cm_jobs_response({ok, {{<<"200">>, _}, _, Body, _, _}}) -> case catch ar_serialize:json_map_to_pool_cm_jobs( element(2, ar_serialize:json_decode(Body, [return_maps]))) of {'EXIT', _} -> {error, invalid_json}; Jobs -> {ok, Jobs} end; handle_get_pool_cm_jobs_response(Reply) -> {error, Reply}. handle_post_pool_cm_jobs_response({ok, {{<<"200">>, _}, _, _, _, _}}) -> ok; handle_post_pool_cm_jobs_response(Reply) -> {error, Reply}. handle_post_partial_solution_response({ok, {{<<"200">>, _}, _, Body, _, _}}) -> case catch jiffy:decode(Body, [return_maps]) of {'EXIT', _} -> {error, invalid_json}; Response -> {ok, Response} end; handle_post_partial_solution_response(Reply) -> {error, Reply}. handle_get_jobs_response({ok, {{<<"200">>, _}, _, Body, _, _}}) -> case catch ar_serialize:json_struct_to_jobs(ar_serialize:dejsonify(Body)) of {'EXIT', _} -> {error, invalid_json}; Jobs -> prometheus_counter:inc(pool_total_job_got_count, length(Jobs#jobs.jobs)), {ok, Jobs} end; handle_get_jobs_response(Reply) -> {error, Reply}. handle_sync_record_response({ok, {{<<"200">>, _}, _, Body, _, _}}) -> ar_intervals:safe_from_etf(Body); handle_sync_record_response({ok, {{<<"429">>, _}, _, _, _, _}}) -> {error, too_many_requests}; handle_sync_record_response(Reply) -> {error, Reply}. handle_sync_record_response({ok, {{<<"200">>, _}, _, Body, _, _}}, Start, Limit) -> case ar_intervals:safe_from_etf(Body) of {ok, Intervals} -> case ar_intervals:count(Intervals) > Limit of true -> {error, too_many_intervals}; false -> case ar_intervals:is_empty(Intervals) of true -> {ok, Intervals}; false -> case element(1, ar_intervals:smallest(Intervals)) < Start of true -> {error, intervals_do_not_match_cursor}; false -> {ok, Intervals} end end end; Error -> Error end; handle_sync_record_response({ok, {{<<"429">>, _}, _, _, _, _}}, _, _) -> {error, too_many_requests}; handle_sync_record_response(Reply, _, _) -> {error, Reply}. handle_footprints_response({ok, {{<<"200">>, _}, _, Body, _, _}}) -> case catch ar_serialize:json_map_to_footprint(jiffy:decode(Body, [return_maps])) of {'EXIT', Reason} -> {error, Reason}; Footprint -> {ok, Footprint} end; handle_footprints_response({ok, {{<<"404">>, _}, _, _, _, _}}) -> not_found; handle_footprints_response({ok, {{<<"400">>, _}, _, Body, _, _}}) -> case catch jiffy:decode(Body, [return_maps]) of {'EXIT', Reason} -> {error, Reason}; #{ <<"error">> := <<"footprint_number_too_large">> } -> {error, footprint_number_too_large}; #{ <<"error">> := <<"negative_footprint_number">> } -> {error, negative_footprint_number}; #{ <<"error">> := <<"negative_partition_number">> } -> {error, negative_partition_number}; #{ <<"error">> := <<"invalid_footprint_number_encoding">> } -> {error, invalid_footprint_number_encoding}; Response -> {error, Response} end; handle_footprints_response({ok, {{<<"429">>, _}, _, _, _, _}}) -> {error, too_many_requests}; handle_footprints_response(Reply) -> {error, Reply}. handle_chunk_response({ok, {{<<"200">>, _}, _, Body, Start, End}}, RequestedPacking, Peer) -> case catch ar_serialize:binary_to_poa(Body) of {'EXIT', Reason} -> {error, Reason}; {error, Reason} -> {error, Reason}; {ok, #{ packing := Packing } = Proof} -> CheckPacking = case RequestedPacking of any -> true; Packing -> true; _ -> false end, case CheckPacking of true -> case maps:get(chunk, Proof) of <<>> -> {error, empty_chunk}; Chunk when byte_size(Chunk) > ?DATA_CHUNK_SIZE -> {error, chunk_bigger_than_256kib}; _ -> {ok, Proof, End - Start, byte_size(term_to_binary(Proof))} end; false -> ?LOG_WARNING([{event, peer_served_proof_with_wrong_packing}, {requested_packing, ar_serialize:encode_packing(RequestedPacking, false)}, {got_packing, ar_serialize:encode_packing(Packing, false)}, {peer, ar_util:format_peer(Peer)}]), {error, wrong_packing} end end; handle_chunk_response({error, _} = Response, _RequestedPacking, _Peer) -> Response; handle_chunk_response(Response, _RequestedPacking, _Peer) -> {error, Response}. handle_mempool_response({ok, {{<<"200">>, _}, _, Body, _, _}}, Peer) -> case catch jiffy:decode(Body) of {'EXIT', Error} -> ?LOG_WARNING([{event, failed_to_parse_peer_mempool}, {error, io_lib:format("~p", [Error])}]), {error, invalid_json}; L when is_list(L) -> Result = lists:foldr( fun (_, {error, Reason}) -> {error, Reason}; (EncodedTXID, {ok, Acc}) -> case ar_util:safe_decode(EncodedTXID) of {ok, TXID} when byte_size(TXID) /= 32 -> ?LOG_WARNING([{event, failed_to_parse_peer_mempool}, {reason, invalid_txid}, {txid, io_lib:format("~p", [EncodedTXID])}]), {error, invalid_txid}; {ok, TXID} -> {ok, [TXID | Acc]}; {error, invalid} -> ?LOG_WARNING([{event, failed_to_parse_peer_mempool}, {reason, invalid_txid}, {txid, io_lib:format("~p", [EncodedTXID])}]), {error, invalid_txid} end end, {ok, []}, L ), case Result of {ok, TXIDs} -> {{ok, TXIDs}, Peer}; {error, Reason2} -> {error, Reason2} end; NotList -> ?LOG_WARNING([{event, failed_to_parse_peer_mempool}, {reason, invalid_format}, {reply, io_lib:format("~p", [NotList])}]), {error, invalid_format} end; handle_mempool_response(Response, _Peer) -> {error, Response}. handle_get_sync_buckets_response({ok, {{<<"200">>, _}, _, Body, _, _}}, BucketSize) -> case ar_sync_buckets:deserialize(Body, BucketSize) of {ok, Buckets} -> {ok, Buckets}; {'EXIT', Reason} -> {error, Reason}; _ -> {error, invalid_response_type} end; handle_get_sync_buckets_response({ok, {{<<"400">>, _}, _, <<"Request type not found.">>, _, _}}, _BucketSize) -> {error, request_type_not_found}; handle_get_sync_buckets_response(Response, _BucketSize) -> {error, Response}. handle_get_recent_hash_list_response({ok, {{<<"200">>, _}, _, Body, _, _}}) -> case ar_serialize:json_decode(Body) of {ok, HL} when is_list(HL) -> decode_hash_list(HL); {ok, _} -> {error, invalid_hash_list}; Error -> Error end; handle_get_recent_hash_list_response({ok, {{<<"400">>, _}, _, <<"Request type not found.">>, _, _}}) -> {error, request_type_not_found}; handle_get_recent_hash_list_response(Response) -> {error, Response}. handle_get_recent_hash_list_diff_response({ok, {{<<"200">>, _}, _, Body, _, _}}, HL, Peer) -> case parse_recent_hash_list_diff(Body, HL) of {error, invalid_input} -> ar_peers:issue_warning(Peer, recent_hash_list_diff, invalid_input), {error, invalid_input}; {error, unknown_base} -> ar_peers:issue_warning(Peer, recent_hash_list_diff, unknown_base), {error, unknown_base}; {ok, Reply} -> {ok, Reply} end; handle_get_recent_hash_list_diff_response({ok, {{<<"404">>, _}, _, _, _, _}}, _HL, _Peer) -> {error, not_found}; handle_get_recent_hash_list_diff_response({ok, {{<<"400">>, _}, _, <<"Request type not found.">>, _, _}}, _HL, _Peer) -> {error, request_type_not_found}; handle_get_recent_hash_list_diff_response(Response, _HL, _Peer) -> {error, Response}. decode_hash_list(HL) -> decode_hash_list(HL, []). decode_hash_list([H | HL], DecodedHL) -> case ar_util:safe_decode(H) of {ok, DecodedH} -> decode_hash_list(HL, [DecodedH | DecodedHL]); Error -> Error end; decode_hash_list([], DecodedHL) -> {ok, lists:reverse(DecodedHL)}. parse_recent_hash_list_diff(<< PrevH:48/binary, Rest/binary >>, HL) -> case lists:member(PrevH, HL) of true -> parse_recent_hash_list_diff(Rest); false -> {error, unknown_base} end; parse_recent_hash_list_diff(_Input, _HL) -> {error, invalid_input}. parse_recent_hash_list_diff(<<>>) -> {ok, in_sync}; parse_recent_hash_list_diff(<< H:48/binary, Len:16, TXIDs:(32 * Len)/binary, Rest/binary >>) when Len =< ?BLOCK_TX_COUNT_LIMIT -> case ar_block_cache:get(block_cache, H) of not_found -> case count_blocks_on_top(Rest) of {ok, N} -> {ok, {H, parse_txids(TXIDs), N}}; Error -> Error end; _ -> parse_recent_hash_list_diff(Rest) end; parse_recent_hash_list_diff(_Input) -> {error, invalid_input4}. count_blocks_on_top(Bin) -> count_blocks_on_top(Bin, 0). count_blocks_on_top(<<>>, N) -> {ok, N}; count_blocks_on_top(<< _H:48/binary, Len:16, _TXIDs:(32 * Len)/binary, Rest/binary >>, N) -> count_blocks_on_top(Rest, N + 1); count_blocks_on_top(_Bin, _N) -> {error, invalid_input5}. parse_txids(<< TXID:32/binary, Rest/binary >>) -> [TXID | parse_txids(Rest)]; parse_txids(<<>>) -> []. %% @doc Return the current height of a remote node. get_height(Peer) -> Response = ar_http:req(#{ method => get, peer => Peer, path => "/height", headers => p2p_headers() }), case Response of {ok, {{<<"200">>, _}, _, Body, _, _}} -> case catch binary_to_integer(Body) of {'EXIT', _} -> {error, invalid_height}; Height -> Height end; {ok, {{<<"500">>, _}, _, _, _, _}} -> not_joined end. get_txs(Peers, B) -> case B#block.txs of TXIDs when length(TXIDs) > ?BLOCK_TX_COUNT_LIMIT -> ?LOG_ERROR([{event, downloaded_txs_count_exceeds_limit}]), {error, txs_count_exceeds_limit}; TXIDs -> get_txs(B#block.height, Peers, TXIDs, [], 0) end. get_txs(_Height, _Peers, [], TXs, _TotalSize) -> {ok, lists:reverse(TXs)}; get_txs(Height, Peers, [TXID | Rest], TXs, TotalSize) -> Fork_2_0 = ar_fork:height_2_0(), case get_tx(Peers, TXID) of #tx{ format = 2 } = TX -> get_txs(Height, Peers, Rest, [TX | TXs], TotalSize); #tx{} = TX when Height < Fork_2_0 -> get_txs(Height, Peers, Rest, [TX | TXs], TotalSize); #tx{ format = 1 } = TX -> case TotalSize + TX#tx.data_size of NewTotalSize when NewTotalSize > ?BLOCK_TX_DATA_SIZE_LIMIT -> ?LOG_ERROR([{event, downloaded_txs_exceed_block_size_limit}]), {error, txs_exceed_block_size_limit}; NewTotalSize -> get_txs(Height, Peers, Rest, [TX | TXs], NewTotalSize) end; _ -> {error, tx_not_found} end. %% @doc Retreive a tx by ID from the memory pool, disk, or a remote peer. get_tx(Peer, TX) when not is_list(Peer) -> get_tx([Peer], TX); get_tx(_Peers, #tx{} = TX) -> TX; get_tx(Peers, TXID) -> case ar_mempool:get_tx(TXID) of not_found -> get_tx_from_disk_or_peers(Peers, TXID); TX -> TX end. get_tx_from_disk_or_peers(Peers, TXID) -> case ar_storage:read_tx(TXID) of unavailable -> case get_tx_from_remote_peers(Peers, TXID) of not_found -> not_found; {TX, _Peer, _Time, _Size} -> TX end; TX -> TX end. get_tx_from_remote_peers(Peers, TXID) -> get_tx_from_remote_peers(Peers, TXID, true). get_tx_from_remote_peers([], _TXID, _RatePeer) -> not_found; get_tx_from_remote_peers(Peers, TXID, RatePeer) -> Peer = lists:nth(rand:uniform(min(5, length(Peers))), Peers), case get_tx_from_remote_peer(Peer, TXID, RatePeer) of {#tx{} = TX, Peer, Time, Size} -> {TX, Peer, Time, Size}; _ -> get_tx_from_remote_peers(Peers -- [Peer], TXID, RatePeer) end. get_tx_from_remote_peer(Peer, TXID, RatePeer) -> Release = ar_peers:get_peer_release(Peer), Encoding = case Release >= 52 of true -> binary; _ -> json end, case handle_tx_response(Peer, Encoding, ar_http:req(#{ method => get, peer => Peer, path => get_tx_path(TXID, Encoding), headers => p2p_headers(), connect_timeout => 1000, timeout => 30 * 1000, limit => ?MAX_BODY_SIZE }) ) of {ok, #tx{} = TX, Time, Size} -> case ar_tx:verify_tx_id(TXID, TX) of false -> ?LOG_WARNING([ {event, peer_served_invalid_tx}, {peer, ar_util:format_peer(Peer)}, {tx, ar_util:encode(TXID)} ]), ar_peers:issue_warning(Peer, tx, invalid), {error, invalid_tx}; true -> case RatePeer of true -> ar_peers:rate_fetched_data(Peer, tx, Time, Size); false -> ok end, {TX, Peer, Time, Size} end; Error -> Error end. get_tx_path(TXID, json) -> "/unconfirmed_tx/" ++ binary_to_list(ar_util:encode(TXID)); get_tx_path(TXID, binary) -> "/unconfirmed_tx2/" ++ binary_to_list(ar_util:encode(TXID)). %% @doc Retreive only the data associated with a transaction. %% The function must only be used when it is known that the transaction %% has data. get_tx_data([], _Hash) -> unavailable; get_tx_data(Peers, Hash) when is_list(Peers) -> Peer = lists:nth(rand:uniform(min(5, length(Peers))), Peers), case get_tx_data(Peer, Hash) of unavailable -> get_tx_data(Peers -- [Peer], Hash); Data -> Data end; get_tx_data(Peer, Hash) -> Reply = ar_http:req(#{ method => get, peer => Peer, path => "/tx/" ++ binary_to_list(ar_util:encode(Hash)) ++ "/data", headers => p2p_headers(), connect_timeout => 500, timeout => 120 * 1000, limit => ?MAX_BODY_SIZE }), case Reply of {ok, {{<<"200">>, _}, _, <<>>, _, _}} -> unavailable; {ok, {{<<"200">>, _}, _, EncodedData, _, _}} -> case ar_util:safe_decode(EncodedData) of {ok, Data} -> Data; {error, invalid} -> unavailable end; _ -> unavailable end. %% @doc Retreive the current universal time as claimed by a foreign node. get_time(Peer, Timeout) -> case ar_http:req(#{method => get, peer => Peer, path => "/time", headers => p2p_headers(), timeout => Timeout + 100}) of {ok, {{<<"200">>, _}, _, Body, Start, End}} -> case catch binary_to_integer(Body) of {'EXIT', _} -> {error, invalid_time}; Time -> RequestTime = ceil((End - Start) / 1000000), %% The timestamp returned by the HTTP daemon is floored second precision. %% Thus the upper bound is increased by 1. {ok, {Time - RequestTime, Time + RequestTime + 1}} end; Other -> {error, Other} end. %% @doc Retreive information from a peer. Optionally, filter the resulting %% keyval list for required information. get_info(Peer, Type) -> case get_info(Peer) of info_unavailable -> info_unavailable; Info -> maps:get(atom_to_binary(Type), Info, info_unavailable) end. get_info(Peer) -> case ar_http:req(#{ method => get, peer => Peer, path => "/info", headers => p2p_headers(), connect_timeout => 1000, timeout => 2 * 1000 }) of {ok, {{<<"200">>, _}, _, JSON, _, _}} -> case ar_serialize:json_decode(JSON, [return_maps]) of {ok, JsonMap} -> JsonMap; {error, _} -> info_unavailable end; _ -> info_unavailable end. %% @doc Return a list of parsed peer IPs for a remote server. get_peers(Peer) -> try begin {ok, {{<<"200">>, _}, _, Body, _, _}} = ar_http:req(#{ method => get, peer => Peer, path => "/peers", headers => p2p_headers(), connect_timeout => 500, timeout => 2 * 1000 }), PeerArray = ar_serialize:dejsonify(Body), PeersList = lists:map(fun ar_util:parse_peer/1, PeerArray), lists:flatten(PeersList) end catch _:_ -> unavailable end. %% @doc Process the response of an /block call. handle_block_response(_Peer, _Encoding, {ok, {{<<"400">>, _}, _, _, _, _}}) -> not_found; handle_block_response(_Peer, _Encoding, {ok, {{<<"404">>, _}, _, _, _, _}}) -> not_found; handle_block_response(Peer, Encoding, {ok, {{<<"200">>, _}, _, Body, Start, End}}) -> DecodeFun = case Encoding of json -> fun(Input) -> ar_serialize:json_struct_to_block(ar_serialize:dejsonify(Input)) end; binary -> fun ar_serialize:binary_to_block/1 end, case catch DecodeFun(Body) of {'EXIT', Reason} -> ?LOG_INFO( "event: failed_to_parse_block_response, peer: ~s, reason: ~p", [ar_util:format_peer(Peer), Reason]), ar_peers:issue_warning(Peer, block, Reason), not_found; {ok, B} -> {ok, B, End - Start, byte_size(term_to_binary(B))}; B when is_record(B, block) -> {ok, B, End - Start, byte_size(term_to_binary(B))}; Error -> ?LOG_INFO( "event: failed_to_parse_block_response, peer: ~s, error: ~p", [ar_util:format_peer(Peer), Error]), ar_peers:issue_warning(Peer, block, Error), not_found end; handle_block_response(Peer, _Encoding, Response) -> ar_peers:issue_warning(Peer, block, Response), not_found. %% @doc Process the response of a GET /unconfirmed_tx call. handle_tx_response(_Peer, _Encoding, {ok, {{<<"404">>, _}, _, _, _, _}}) -> {error, not_found}; handle_tx_response(_Peer, _Encoding, {ok, {{<<"400">>, _}, _, _, _, _}}) -> {error, bad_request}; handle_tx_response(Peer, Encoding, {ok, {{<<"200">>, _}, _, Body, Start, End}}) -> DecodeFun = case Encoding of json -> fun ar_serialize:json_struct_to_tx/1; binary -> fun ar_serialize:binary_to_tx/1 end, case catch DecodeFun(Body) of {ok, TX} -> Size = byte_size(term_to_binary(TX)), case TX#tx.format == 1 of true -> {ok, TX, End - Start, Size}; _ -> DataSize = byte_size(TX#tx.data), {ok, TX#tx{ data = <<>> }, End - Start, Size - DataSize} end; TX when is_record(TX, tx) -> Size = byte_size(term_to_binary(TX)), case TX#tx.format == 1 of true -> {ok, TX, End - Start, Size}; _ -> DataSize = byte_size(TX#tx.data), {ok, TX#tx{ data = <<>> }, End - Start, Size - DataSize} end; {'EXIT', Reason} -> ar_peers:issue_warning(Peer, tx, Reason), {error, Reason}; Reply -> ar_peers:issue_warning(Peer, tx, Reply), Reply end; handle_tx_response(Peer, _Encoding, Response) -> ar_peers:issue_warning(Peer, tx, Response), {error, Response}. handle_cm_partition_table_response({ok, {{<<"200">>, _}, _, Body, _, _}}) -> case catch jiffy:decode(Body) of {'EXIT', Error} -> ?LOG_WARNING([{event, failed_to_parse_cm_partition_table}, {error, io_lib:format("~p", [Error])}]), {error, invalid_json}; L when is_list(L) -> lists:foldr( fun (_, {error, Reason}) -> {error, Reason}; (Partition, {ok, Acc}) -> case Partition of {[ {<<"bucket">>, Bucket}, {<<"bucketsize">>, BucketSize}, {<<"addr">>, EncodedAddr} ]} -> DecodedPartition = { Bucket, BucketSize, ar_util:decode(EncodedAddr), 0 }, {ok, [DecodedPartition | Acc]}; {[ {<<"bucket">>, Bucket}, {<<"bucketsize">>, BucketSize}, {<<"addr">>, EncodedAddr}, {<<"pdiff">>, PackingDifficulty} ]} when is_integer(PackingDifficulty) andalso ( (PackingDifficulty >= 1 andalso PackingDifficulty =< ?MAX_PACKING_DIFFICULTY) orelse (PackingDifficulty == ?REPLICA_2_9_PACKING_DIFFICULTY)) -> DecodedPartition = { Bucket, BucketSize, ar_util:decode(EncodedAddr), PackingDifficulty }, {ok, [DecodedPartition | Acc]}; _ -> ?LOG_WARNING([{event, failed_to_parse_cm_partition_table}, {reason, invalid_partition}, {txid, io_lib:format("~p", [Partition])}]), {error, invalid_partition} end end, {ok, []}, L ); NotList -> ?LOG_WARNING([{event, failed_to_parse_cm_partition_table}, {reason, invalid_format}, {reply, io_lib:format("~p", [NotList])}]), {error, invalid_format} end; handle_cm_partition_table_response(Response) -> {error, Response}. handle_cm_noop_response({ok, {{<<"200">>, _}, _, _Body, _, _}}) -> {ok, []}; handle_cm_noop_response(Response) -> {error, Response}. p2p_headers() -> {ok, Config} = arweave_config:get_env(), [{<<"x-p2p-port">>, integer_to_binary(Config#config.port)}, {<<"x-release">>, integer_to_binary(?RELEASE_NUMBER)}]. cm_p2p_headers() -> {ok, Config} = arweave_config:get_env(), add_header(<<"x-cm-api-secret">>, Config#config.cm_api_secret, p2p_headers()). pool_client_headers() -> {ok, Config} = arweave_config:get_env(), Headers = add_header(<<"x-pool-api-key">>, Config#config.pool_api_key, p2p_headers()), case Config#config.pool_worker_name of not_set -> Headers; WorkerName -> add_header(<<"worker">>, WorkerName, Headers) end. add_header(Name, Value, Headers) when is_binary(Name) andalso is_binary(Value) -> [{Name, Value} | Headers]; add_header(Name, Value, Headers) -> ?LOG_ERROR([{event, invalid_header}, {name, Name}, {value, Value}]), Headers. %% @doc Utility to filter out some log spam. We generally don't want to log a failed HTTP %% request if the peer just times out or is no longer taking connections. log_failed_request(Reason, Log) -> case Reason of {error,{shutdown,econnrefused}} -> ok; {error,{shutdown,timeout}} -> ok; {error,timeout} -> ok; {error,{shutdown,ehostunreach}} -> ok; {error,{stream_error,closed}} -> ok; {error,{stream_error,closing}} -> ok; {error,{stream_error,{closed,normal}}} -> ok; _ -> ?LOG_DEBUG(Log) end. ================================================ FILE: apps/arweave/src/ar_http_iface_middleware.erl ================================================ -module(ar_http_iface_middleware). -behaviour(cowboy_middleware). -export([execute/2, read_body_chunk/4]). -include_lib("arweave_config/include/arweave_config.hrl"). -include("ar.hrl"). -include("ar_consensus.hrl"). -include("ar_mining.hrl"). -include("ar_data_sync.hrl"). -include("ar_data_discovery.hrl"). -include("ar_pool.hrl"). -define(HANDLER_TIMEOUT, ?DEFAULT_HTTP_HANDLER_TIMEOUT_MS). -define(MAX_SERIALIZED_RECENT_HASH_LIST_DIFF, 2400). % 50 * 48. -define(MAX_SERIALIZED_MISSING_TX_INDICES, 125). % Every byte encodes 8 positions. -define(MAX_BLOCK_INDEX_RANGE_SIZE, 10000). %%%=================================================================== %%% Cowboy handler callbacks. %%%=================================================================== %% To allow prometheus_cowboy2_handler to be run when the %% cowboy_router middleware matches on the /metrics route, this %% middleware runs between the cowboy_router and cowboy_handler %% middlewares. It uses the `handler` env value set by cowboy_router %% to determine whether or not it should run, otherwise it lets %% the cowboy_handler middleware run prometheus_cowboy2_handler. execute(Req, #{ handler := ar_http_iface_handler }) -> Pid = self(), HandlerPid = spawn_link(fun() -> {Duration, Response = {Code, _, _, Resp}} = timer:tc(fun() -> handle(Req, Pid) end), log(Code, Resp, #{duration => Duration}), Pid ! {handled, Response} end), {ok, TimeoutRef} = ar_timer:send_after( ?HANDLER_TIMEOUT, self(), {timeout, HandlerPid, Req}, #{ skip_on_shutdown => false } ), loop(TimeoutRef); execute(Req, Env) -> {ok, Req, Env}. %%-------------------------------------------------------------------- %% @doc Logs client requests. HTTP logging is done only if %% arweave_http_api handler is started. %% @end %%-------------------------------------------------------------------- log(Code, Resp, Init) -> case ar_logger:is_started(arweave_http_api) of true -> Buffer = Init#{ domain => [arweave,http,api], code => "undefined", method => "undefined", path => "undefined", peer_ip => "undefined", peer_port => "undefined", body_length => 0, version => "undefined" }, Meta = log_code(Code, Resp, Buffer), logger:info("", [], Meta); _ -> ok end. log_code(Code, Resp, Buffer) when is_integer(Code) -> NewBuffer = Buffer#{code => integer_to_list(Code)}, log_method(Code, Resp, NewBuffer); log_code(Code, Resp, Buffer) -> log_method(Code, Resp, Buffer). log_method(Code, Resp = #{method := Method}, Buffer) when is_binary(Method) -> NewBuffer = Buffer#{method => binary_to_list(Method)}, log_path(Code, Resp, NewBuffer); log_method(Code, Resp, Buffer) -> log_path(Code, Resp, Buffer). log_path(Code, Resp = #{path := Path}, Buffer) when is_binary(Path) -> NewBuffer = Buffer#{path => binary_to_list(Path)}, log_peer(Code, Resp, NewBuffer); log_path(Code, Resp, Buffer) -> log_peer(Code, Resp, Buffer). log_peer(Code, Resp = #{peer := {IP={A,B,C,D},Port}}, Buffer) when is_integer(A), is_integer(B), is_integer(C), is_integer(D), is_integer(Port) -> PeerIP = inet:ntoa(IP), PeerPort = integer_to_list(Port), NewBuffer = Buffer#{ peer_ip => PeerIP, peer_port => PeerPort }, log_body_length(Code, Resp, NewBuffer); log_peer(Code, Resp, Buffer) -> log_body_length(Code, Resp, Buffer). log_body_length(Code, Resp = #{body_length := BodyLength}, Buffer) when is_integer(BodyLength) -> NewBuffer = Buffer#{body_length => integer_to_list(BodyLength)}, log_version(Code, Resp, NewBuffer); log_body_length(Code, Resp, Buffer) -> log_version(Code, Resp, Buffer). log_version(_Code, _Resp = #{version := Version}, Buffer) when is_atom(Version) -> Buffer#{version => atom_to_list(Version)}; log_version(_Code, _Resp, Buffer) -> Buffer. %%%=================================================================== %%% Private functions. %%%=================================================================== %% @doc In order to be able to have a handler-side timeout, we need to %% handle the request asynchronously. However, cowboy doesn't allow %% reading the request's body from a process other than its handler's. %% This following loop function allows us to work around this %% limitation. (see https://github.com/ninenines/cowboy/issues/1374) %% @end loop(TimeoutRef) -> receive {handled, {Status, Headers, Body, HandledReq}} -> timer:cancel(TimeoutRef), CowboyStatus = handle_custom_codes(Status), RepliedReq = cowboy_req:reply(CowboyStatus, Headers, Body, HandledReq), {stop, RepliedReq}; {read_complete_body, From, Req, SizeLimit} -> case catch ar_http_req:body(Req, SizeLimit) of Term -> From ! {read_complete_body, Term} end, loop(TimeoutRef); {read_body_chunk, From, Req, Size, Timeout} -> case catch ar_http_req:read_body_chunk(Req, Size, Timeout) of Term -> From ! {read_body_chunk, Term} end, loop(TimeoutRef); {timeout, HandlerPid, InitialReq} -> unlink(HandlerPid), exit(HandlerPid, handler_timeout), ?LOG_WARNING([{event, handler_timeout}, {method, cowboy_req:method(InitialReq)}, {path, cowboy_req:path(InitialReq)}]), RepliedReq = cowboy_req:reply(500, #{}, <<"Handler timeout">>, InitialReq), {stop, RepliedReq} end. handle(Req, Pid) -> Peer = ar_http_util:arweave_peer(Req), handle(Peer, Req, Pid). handle(Peer, Req, Pid) -> Method = cowboy_req:method(Req), SplitPath = ar_http_iface_server:split_path(cowboy_req:path(Req)), {ok, Config} = arweave_config:get_env(), case lists:member(http_logging, Config#config.enable) of true -> ?LOG_INFO([ {event, http_request}, {method, Method}, {path, SplitPath}, {peer, ar_util:format_peer(Peer)} ]); _ -> do_nothing end, Response2 = handle4(Method, SplitPath, Req, Pid), add_cors_headers(Req, Response2). add_cors_headers(Req, Response) -> case Response of {Status, Hdrs, Body, HandledReq} -> {Status, maps:merge(?CORS_HEADERS, Hdrs), Body, HandledReq}; {Status, Body, HandledReq} -> {Status, ?CORS_HEADERS, Body, HandledReq}; {error, timeout} -> {503, ?CORS_HEADERS, jiffy:encode(#{ error => timeout }), Req} end. -ifdef(TESTNET). handle4(<<"POST">>, [<<"mine">>], Req, _Pid) -> ar_test_node:mine(), {200, #{}, <<>>, Req}; handle4(<<"GET">>, [<<"tx">>, <<"ready_for_mining">>], Req, _Pid) -> {200, #{}, ar_serialize:jsonify( lists:map( fun ar_util:encode/1, ar_node:get_ready_for_mining_txs() ) ), Req}; handle4(Method, SplitPath, Req, Pid) -> handle(Method, SplitPath, Req, Pid). -else. handle4(Method, SplitPath, Req, Pid) -> handle(Method, SplitPath, Req, Pid). -endif. %% Return network information from a given node. %% GET request to endpoint /info. handle(<<"GET">>, [], Req, _Pid) -> {200, #{}, ar_serialize:jsonify(ar_info:get_info()), Req}; handle(<<"GET">>, [<<"info">>], Req, _Pid) -> {200, #{}, ar_serialize:jsonify(ar_info:get_info()), Req}; handle(<<"GET">>, [<<"recent">>], Req, _Pid) -> case ar_node:is_joined() of false -> not_joined(Req); true -> {200, #{}, ar_serialize:jsonify(ar_info:get_recent()), Req} end; handle(<<"GET">>, [<<"is_tx_blacklisted">>, EncodedTXID], Req, _Pid) -> case ar_util:safe_decode(EncodedTXID) of {error, invalid} -> {400, #{}, jiffy:encode(#{ error => invalid_tx_id }), Req}; {ok, TXID} -> {200, #{}, jiffy:encode(ar_tx_blacklist:is_tx_blacklisted(TXID)), Req} end; %% Some load balancers use 'HEAD's rather than 'GET's to tell if a node %% is alive. Appease them. handle(<<"HEAD">>, [], Req, _Pid) -> {200, #{}, <<>>, Req}; handle(<<"HEAD">>, [<<"info">>], Req, _Pid) -> {200, #{}, <<>>, Req}; %% Return permissive CORS headers for all endpoints. handle(<<"OPTIONS">>, [<<"block">>], Req, _Pid) -> {200, #{<<"access-control-allow-methods">> => <<"GET, POST">>, <<"access-control-allow-headers">> => <<"Content-Type">>}, <<"OK">>, Req}; handle(<<"OPTIONS">>, [<<"tx">>], Req, _Pid) -> {200, #{<<"access-control-allow-methods">> => <<"GET, POST">>, <<"access-control-allow-headers">> => <<"Content-Type">>}, <<"OK">>, Req}; handle(<<"OPTIONS">>, [<<"peer">> | _], Req, _Pid) -> {200, #{<<"access-control-allow-methods">> => <<"GET, POST">>, <<"access-control-allow-headers">> => <<"Content-Type">>}, <<"OK">>, Req}; handle(<<"OPTIONS">>, _, Req, _Pid) -> {200, #{<<"access-control-allow-methods">> => <<"GET">>}, <<"OK">>, Req}; %% Return the current universal time in seconds. handle(<<"GET">>, [<<"time">>], Req, _Pid) -> {200, #{}, integer_to_binary(os:system_time(second)), Req}; %% Return all mempool transactions. %% GET request to endpoint /tx/pending. handle(<<"GET">>, [<<"tx">>, <<"pending">>], Req, _Pid) -> case ar_node:is_joined() of false -> not_joined(Req); true -> {200, #{}, ar_serialize:jsonify( %% Should encode lists:map( fun ar_util:encode/1, ar_mempool:get_all_txids() ) ), Req} end; %% Return outgoing transaction priority queue. %% GET request to endpoint /queue. %% @deprecated handle(<<"GET">>, [<<"queue">>], Req, _Pid) -> {200, #{}, <<"[]">>, Req}; %% Return additional information about the transaction with the given identifier (hash). %% GET request to endpoint /tx/{hash}/status. handle(<<"GET">>, [<<"tx">>, Hash, <<"status">>], Req, _Pid) -> case ar_node:is_joined() of false -> not_joined(Req); true -> handle_get_tx_status(Hash, Req) end; %% Return a JSON-encoded transaction. %% GET request to endpoint /tx/{hash}. handle(<<"GET">>, [<<"tx">>, Hash], Req, _Pid) -> handle_get_tx(Hash, Req, json); %% Return a binary-encoded transaction. %% GET request to endpoint /tx2/{hash}. handle(<<"GET">>, [<<"tx2">>, Hash], Req, _Pid) -> handle_get_tx(Hash, Req, binary); %% Return a possibly unconfirmed JSON-encoded transaction. %% GET request to endpoint /unconfirmed_tx/{hash}. handle(<<"GET">>, [<<"unconfirmed_tx">>, Hash], Req, _Pid) -> handle_get_unconfirmed_tx(Hash, Req, json); %% Return a possibly unconfirmed binary-encoded transaction. %% GET request to endpoint /unconfirmed_tx2/{hash}. handle(<<"GET">>, [<<"unconfirmed_tx2">>, Hash], Req, _Pid) -> handle_get_unconfirmed_tx(Hash, Req, binary); %% Return the data field of the transaction specified via the transaction ID (hash) %% served as HTML. %% GET request to endpoint /tx/{hash}/data.html handle(<<"GET">>, [<<"tx">>, Hash, << "data.", _/binary >>], Req, _Pid) -> {ok, Config} = arweave_config:get_env(), case lists:member(serve_html_data, Config#config.disable) of true -> {421, #{}, <<"Serving HTML data is disabled on this node.">>, Req}; _ -> case ar_util:safe_decode(Hash) of {error, invalid} -> {400, #{}, <<"Invalid hash.">>, Req}; {ok, ID} -> case ar_storage:read_tx(ID) of unavailable -> {404, #{ <<"content-type">> => <<"text/html; charset=utf-8">> }, sendfile("genesis_data/not_found.html"), Req}; #tx{} = TX -> serve_tx_html_data(Req, TX) end end end; handle(<<"GET">>, [<<"sync_buckets">>], Req, _Pid) -> case ar_node:is_joined() of false -> not_joined(Req); true -> ok = ar_semaphore:acquire(get_sync_record, ?DEFAULT_CALL_TIMEOUT), case ar_global_sync_record:get_serialized_sync_buckets() of {ok, Binary} -> {200, #{}, Binary, Req}; {error, not_initialized} -> {500, #{}, jiffy:encode(#{ error => not_initialized }), Req}; {error, timeout} -> {503, #{}, jiffy:encode(#{ error => timeout }), Req} end end; handle(<<"GET">>, [<<"footprint_buckets">>], Req, _Pid) -> case ar_node:is_joined() of false -> not_joined(Req); true -> ok = ar_semaphore:acquire(get_sync_record, ?DEFAULT_CALL_TIMEOUT), case ar_global_sync_record:get_serialized_footprint_buckets() of {ok, Binary} -> {200, #{}, Binary, Req}; {error, not_initialized} -> {500, #{}, jiffy:encode(#{ error => not_initialized }), Req}; {error, timeout} -> {503, #{}, jiffy:encode(#{ error => timeout }), Req} end end; handle(<<"GET">>, [<<"data_sync_record">>], Req, _Pid) -> case ar_node:is_joined() of false -> not_joined(Req); true -> Format = case cowboy_req:header(<<"content-type">>, Req) of <<"application/json">> -> json; _ -> etf end, ok = ar_semaphore:acquire(get_sync_record, ?DEFAULT_CALL_TIMEOUT), Options = #{ format => Format, random_subset => true }, case ar_global_sync_record:get_serialized_sync_record(Options) of {ok, Binary} -> {200, #{}, Binary, Req}; {error, timeout} -> {503, #{}, jiffy:encode(#{ error => timeout }), Req} end end; handle(<<"GET">>, [<<"data_sync_record">>, EncodedStart, EncodedLimit], Req, _Pid) -> case catch binary_to_integer(EncodedStart) of {'EXIT', _} -> {400, #{}, jiffy:encode(#{ error => invalid_start_encoding }), Req}; Start -> case catch binary_to_integer(EncodedLimit) of {'EXIT', _} -> {400, #{}, jiffy:encode(#{ error => invalid_limit_encoding }), Req}; Limit -> case Limit > ?MAX_SHARED_SYNCED_INTERVALS_COUNT of true -> {400, #{}, jiffy:encode(#{ error => limit_too_big }), Req}; false -> ok = ar_semaphore:acquire(get_sync_record, ?DEFAULT_CALL_TIMEOUT), handle_get_data_sync_record(Start, Limit, Req) end end end; handle(<<"GET">>, [<<"data_sync_record">>, EncodedStart, EncodedEnd, EncodedLimit], Req, _Pid) -> case catch binary_to_integer(EncodedStart) of {'EXIT', _} -> {400, #{}, jiffy:encode(#{ error => invalid_start_encoding }), Req}; Start -> case catch binary_to_integer(EncodedEnd) of {'EXIT', _} -> {400, #{}, jiffy:encode(#{ error => invalid_end_encoding }), Req}; End -> case catch binary_to_integer(EncodedLimit) of {'EXIT', _} -> {400, #{}, jiffy:encode(#{ error => invalid_limit_encoding }), Req}; Limit -> case Limit > ?MAX_SHARED_SYNCED_INTERVALS_COUNT of true -> {400, #{}, jiffy:encode(#{ error => limit_too_big }), Req}; false -> ok = ar_semaphore:acquire(get_sync_record, ?DEFAULT_CALL_TIMEOUT), handle_get_data_sync_record(Start, End, Limit, Req) end end end end; %% Return the information about the presence of the data from the given footprint %% in the given partition. The returned intervals contain the numbers of the chunks %% starting from 0 belonging to the given footprint (and present on this node). %% The footprint is constructed like a replica 2.9 entropy footprint where chunks are %% spread out across the partition. Therefore, the interval [0, 2] does not denote %% two adjacent chunks but rather two chunks separated by %% ar_block:get_replica_2_9_entropy_count() chunks. %% Note that we do not only record footprints for replica_2_9 storage modules, but %% for any packing, because we want to make it convenient for any client to fetch %% the data from us. %% %% Example response: %% { %% "packing": "replica_2_9_A5KJQ7LjCyfGpNj-L-pasroRRVA7z_vWDNcK4aSgZs0", %% "intervals": [ %% ["0", "1"], %% ["2", "10"], %% ["12", "1024"] %% ] %% } %% %% Example response: %% { %% "packing": "unpacked", %% "intervals": ["0", "1024"] %% } %% %% Return 404 when no storage module is configured for the given partition. %% %% Return 400 when the partition or footprint number is not a non-negative integer or the %% footprint number is too large. %% %% GET /footprints/{partition_number}/{footprint_number} handle(<<"GET">>, [<<"footprints">>, EncodedPartition, EncodedFootprintNumber], Req, _Pid) -> case catch binary_to_integer(EncodedPartition) of {'EXIT', _} -> {400, #{}, jiffy:encode(#{ error => invalid_partition_encoding }), Req}; Partition when Partition >= 0 -> case catch binary_to_integer(EncodedFootprintNumber) of {'EXIT', _} -> {400, #{}, jiffy:encode(#{ error => invalid_footprint_number_encoding }), Req}; FootprintNumber when FootprintNumber >= 0 -> ok = ar_semaphore:acquire(get_sync_record, ?DEFAULT_CALL_TIMEOUT), handle_get_footprints(Partition, FootprintNumber, Req); _ -> {400, #{}, jiffy:encode(#{ error => negative_footprint_number }), Req} end; _ -> {400, #{}, jiffy:encode(#{ error => negative_partition_number }), Req} end; handle(<<"GET">>, [<<"chunk">>, OffsetBinary], Req, _Pid) -> handle_get_chunk(OffsetBinary, Req, json); handle(<<"GET">>, [<<"chunk_proof">>, OffsetBinary], Req, _Pid) -> handle_get_chunk_proof(OffsetBinary, Req, json); handle(<<"GET">>, [<<"chunk2">>, OffsetBinary], Req, _Pid) -> handle_get_chunk(OffsetBinary, Req, binary); handle(<<"GET">>, [<<"chunk_proof2">>, OffsetBinary], Req, _Pid) -> handle_get_chunk_proof(OffsetBinary, Req, binary); handle(<<"GET">>, [<<"tx">>, EncodedID, <<"offset">>], Req, _Pid) -> case ar_node:is_joined() of false -> not_joined(Req); true -> case ar_util:safe_decode(EncodedID) of {error, invalid} -> {400, #{}, jiffy:encode(#{ error => invalid_address }), Req}; {ok, ID} -> case ar_data_sync:get_tx_offset(ID) of {ok, {Offset, Size}} -> ResponseBody = jiffy:encode(#{ offset => integer_to_binary(Offset), size => integer_to_binary(Size) }), {200, #{}, ResponseBody, Req}; {error, not_found} -> {404, #{}, <<>>, Req}; {error, failed_to_read_offset} -> {500, #{}, <<>>, Req}; {error, timeout} -> {503, #{}, jiffy:encode(#{ error => timeout }), Req} end end end; %% Return data root metadata for the block containing the offset, >= BlockStartOffset, < BlockEndOffset. %% Return only entries corresponding to non-empty transactions. %% Return the complete list of entries in the order they appear in the data root index, %% which corresponds to sorted #tx records in the block. %% GET /data_roots/{offset} handle(<<"GET">>, [<<"data_roots">>, OffsetBin], Req, _Pid) -> case ar_node:is_joined() of false -> not_joined(Req); true -> ok = ar_semaphore:acquire(get_data_roots, ?DEFAULT_CALL_TIMEOUT), case catch binary_to_integer(OffsetBin) of {'EXIT', _} -> {400, #{}, <<>>, Req}; Offset -> case ar_data_sync:get_data_roots_for_offset(Offset) of {ok, {TXRoot, BlockSize, Entries}} -> Payload = ar_serialize:data_roots_to_binary({TXRoot, BlockSize, Entries}), {200, #{}, Payload, Req}; {error, not_found} -> {404, #{}, jiffy:encode(#{ error => not_found }), Req}; _ -> {500, #{}, <<>>, Req} end end end; %% Accept data roots for a given block offset (>= BlockStartOffset, < BlockEndOffset). %% Expect only entries corresponding to non-empty transactions. %% Expect the complete list of entries in the order they appear in the data root index, %% which corresponds to sorted #tx records in the block. %% POST /data_roots/{offset} handle(<<"POST">>, [<<"data_roots">>, OffsetBin], Req, Pid) -> case ar_node:is_joined() of false -> not_joined(Req); true -> ok = ar_semaphore:acquire(get_data_roots, ?DEFAULT_CALL_TIMEOUT), DiskPoolThreshold = ar_data_sync:get_disk_pool_threshold(), ReadOffset = case catch binary_to_integer(OffsetBin) of {'EXIT', _} -> {reply, {400, #{}, <<>>, Req}}; Offset when Offset >= DiskPoolThreshold -> {reply, {400, #{}, jiffy:encode(#{ error => offset_above_disk_pool_threshold }), Req}}; Offset when Offset < 0 -> {reply, {400, #{}, jiffy:encode(#{ error => negative_offset }), Req}}; Offset -> {BlockStart, BlockEnd, ExpectedTXRoot} = ar_block_index:get_block_bounds(Offset), case ar_data_sync:are_data_roots_synced(BlockStart, BlockEnd, ExpectedTXRoot) of true -> {reply, {200, #{}, <<>>, Req}}; false -> {Offset, BlockStart, BlockEnd} end end, case ReadOffset of {reply, Reply} -> Reply; {Offset2, BlockStart2, BlockEnd2} -> case read_complete_body(Req, Pid) of {ok, Body, Req2} -> case ar_serialize:binary_to_data_roots(Body) of {ok, {TXRoot, BlockSize, Entries}} -> case ar_data_root_sync:validate_data_roots(TXRoot, BlockSize, Entries, Offset2) of {ok, _} -> case catch ar_data_root_sync:store_data_roots_sync( BlockStart2, BlockEnd2, TXRoot, Entries) of ok -> {200, #{}, <<>>, Req2}; {'EXIT', {timeout, _}} -> {503, #{}, jiffy:encode(#{ error => timeout }), Req2}; {'EXIT', _} -> {503, #{}, jiffy:encode(#{ error => timeout }), Req2}; {error, Reason} -> {503, #{}, jiffy:encode(#{ error => Reason }), Req2} end; {error, Reason} -> {400, #{}, jiffy:encode(#{ error => Reason }), Req2} end; _ -> {400, #{}, jiffy:encode(#{ error => invalid_format }), Req2} end; {error, body_size_too_large} -> {400, #{}, <<>>, Req}; {error, timeout} -> {503, #{}, jiffy:encode(#{ error => timeout }), Req} end end end; handle(<<"POST">>, [<<"chunk">>], Req, Pid) -> Joined = case ar_node:is_joined() of false -> not_joined(Req); true -> ok end, DataRootKnown = case Joined of ok -> case get_data_root_from_headers(Req) of not_set -> ok; {ok, {DataRoot, DataSize}} -> case ar_data_sync:has_data_root(DataRoot, DataSize) of true -> ok; false -> {400, #{}, jiffy:encode(#{ error => data_root_not_found }), Req} end end; Reply -> Reply end, ParseChunk = case DataRootKnown of ok -> parse_chunk(Req, Pid); Reply2 -> Reply2 end, case ParseChunk of {ok, {Proof, Req2}} -> case ar_semaphore:acquire(post_chunk, 5000) of ok -> handle_post_chunk(Proof, Req2); {error, timeout} -> {503, #{}, jiffy:encode(#{ error => timeout }), Req2} end; Reply3 -> Reply3 end; %% Accept an announcement of a block. Reply 412 (no previous block), %% 200 (optionally specifying missing transactions and chunk in the response) %% or 208 (already processing the block). handle(<<"POST">>, [<<"block_announcement">>], Req, Pid) -> case read_complete_body(Req, Pid) of {ok, Body, Req2} -> case catch ar_serialize:binary_to_block_announcement(Body) of {ok, Announcement} -> handle_block_announcement(Announcement, Req2); {'EXIT', _Reason} -> {400, #{}, <<>>, Req2}; {error, _Reason} -> {400, #{}, <<>>, Req2} end; {error, body_size_too_large} -> {400, #{}, <<>>, Req}; {error, timeout} -> {503, #{}, jiffy:encode(#{ error => timeout }), Req} end; %% Accept a JSON-encoded block with Base64Url encoded fields. handle(<<"POST">>, [<<"block">>], Req, Pid) -> post_block(request, {Req, Pid, json}, erlang:timestamp()); %% Accept a binary-encoded block. handle(<<"POST">>, [<<"block2">>], Req, Pid) -> erlang:put(post_block2, true), post_block(request, {Req, Pid, binary}, erlang:timestamp()); %% Accept a (partial) solution from a pool or a CM node and validate it. %% %% If the node is a CM exit node and a pool client, send the given solution to %% the pool and return an empty JSON object. %% %% If the node is a pool server, return a JSON object: %% { %% "indep_hash": "", %% "status": "" %% }, %% where the status is one of "accepted", "accepted_block", "rejected_bad_poa", %% "rejected_wrong_hash", "rejected_bad_vdf", "rejected_mining_address_banned", %% "stale", "rejected_vdf_not_found", "rejected_missing_key_file", %% "rejected_invalid_packing_difficulty". %% If the solution is partial, "indep_hash" string is empty. handle(<<"POST">>, [<<"partial_solution">>], Req, Pid) -> case ar_node:is_joined() of true -> handle_post_partial_solution(Req, Pid); false -> not_joined(Req) end; %% Return the information about up to ?GET_JOBS_COUNT latest VDF steps and a difficulty. %% %% If the given VDF output is present in the latest 10 VDF steps, return only the steps %% strictly above the given output. If the given output is our latest output, %% wait for up to ?GET_JOBS_TIMEOUT_S and return an empty list if no new steps are %% computed by the time. Also, only return the steps strictly above the latest block. %% %% If we are a pool server, return the current network difficulty along with the VDF %% information. %% %% If we are a CM exit node and a pool client, return the partial difficulty provided %% by the pool. %% %% Return a JSON object: %% { %% "jobs": %% [ %% {"nonce_limiter_output": "...", "step_number": "...", "partition_upper_bound": "..."}, %% ... %% ], %% "partial_diff": "...", %% "next_seed": "...", %% "interval_number": "...", %% "next_vdf_difficulty": "..." %% } handle(<<"GET">>, [<<"jobs">>, EncodedPrevOutput], Req, _Pid) -> case ar_node:is_joined() of false -> not_joined(Req); true -> case ar_util:safe_decode(EncodedPrevOutput) of {ok, PrevOutput} -> handle_get_jobs(PrevOutput, Req); {error, invalid} -> {400, #{}, jiffy:encode(#{ error => invalid_prev_output }), Req} end end; handle(<<"GET">>, [<<"jobs">>], Req, _Pid) -> case ar_node:is_joined() of false -> not_joined(Req); true -> handle_get_jobs(<<>>, Req) end; handle(<<"POST">>, [<<"pool_cm_jobs">>], Req, Pid) -> case ar_node:is_joined() of false -> not_joined(Req); true -> handle_post_pool_cm_jobs(Req, Pid) end; %% Generate a wallet and receive a secret key identifying it. %% Requires internal_api_secret startup option to be set. %% WARNING: only use it if you really really know what you are doing. handle(<<"POST">>, [<<"wallet">>], Req, _Pid) -> case check_internal_api_secret(Req) of pass -> WalletAccessCode = ar_util:encode(crypto:strong_rand_bytes(32)), case ar_wallet:new_keyfile(?DEFAULT_KEY_TYPE, WalletAccessCode) of {error, Reason} -> ?LOG_ERROR([{event, failed_to_create_new_wallet}, {reason, io_lib:format("~p", [Reason])}]), {500, #{}, <<>>, Req}; {_, Pub} -> ResponseProps = [ {<<"wallet_address">>, ar_util:encode(ar_wallet:to_address(Pub))}, {<<"wallet_access_code">>, WalletAccessCode} ], {200, #{}, ar_serialize:jsonify({ResponseProps}), Req} end; {reject, {Status, Headers, Body}} -> {Status, Headers, Body, Req} end; %% Accept a new JSON-encoded transaction. %% POST request to endpoint /tx. handle(<<"POST">>, [<<"tx">>], Req, Pid) -> handle_post_tx({Req, Pid, json}); %% Accept a new binary-encoded transaction. %% POST request to endpoint /tx2. handle(<<"POST">>, [<<"tx2">>], Req, Pid) -> handle_post_tx({Req, Pid, binary}); %% Sign and send a tx to the network. %% Fetches the wallet by the provided key generated via POST /wallet. %% Requires internal_api_secret startup option to be set. %% WARNING: only use it if you really really know what you are doing. handle(<<"POST">>, [<<"unsigned_tx">>], Req, Pid) -> case {ar_node:is_joined(), check_internal_api_secret(Req)} of {false, _} -> not_joined(Req); {true, pass} -> case read_complete_body(Req, Pid) of {ok, Body, Req2} -> {UnsignedTXProps} = ar_serialize:dejsonify(Body), WalletAccessCode = proplists:get_value(<<"wallet_access_code">>, UnsignedTXProps), %% ar_serialize:json_struct_to_tx/1 requires all properties to be there, %% so we're adding id, owner and signature with bogus values. These %% will later be overwritten in ar_tx:sign/2 FullTxProps = lists:append( proplists:delete(<<"wallet_access_code">>, UnsignedTXProps), [ {<<"id">>, ar_util:encode(crypto:strong_rand_bytes(32))}, {<<"owner">>, ar_util:encode(<<"owner placeholder">>)}, {<<"signature">>, ar_util:encode(<<"signature placeholder">>)} ] ), KeyPair = ar_wallet:load_keyfile( ar_wallet:wallet_filepath(WalletAccessCode)), UnsignedTX = ar_serialize:json_struct_to_tx({FullTxProps}), Data = UnsignedTX#tx.data, DataSize = byte_size(Data), DataRoot = case DataSize > 0 of true -> TreeTX = ar_tx:generate_chunk_tree(#tx{ data = Data }), TreeTX#tx.data_root; false -> <<>> end, Format2TX = UnsignedTX#tx{ format = 2, data_size = DataSize, data_root = DataRoot }, SignedTX = ar_tx:sign(Format2TX, KeyPair), Peer = ar_http_util:arweave_peer(Req), Reply = ar_serialize:jsonify({[{<<"id">>, ar_util:encode(SignedTX#tx.id)}]}), case handle_post_tx(Req2, Peer, SignedTX) of ok -> {200, #{}, Reply, Req2}; {error_response, {Status, Headers, ErrBody}} -> {Status, Headers, ErrBody, Req2} end; {error, body_size_too_large} -> {413, #{}, <<"Payload too large">>, Req}; {error, timeout} -> {500, #{}, <<"Handler timeout">>, Req} end; {true, {reject, {Status, Headers, Body}}} -> {Status, Headers, Body, Req} end; %% Return the list of peers held by the node. %% GET request to endpoint /peers. handle(<<"GET">>, [<<"peers">>], Req, _Pid) -> {200, #{}, ar_serialize:jsonify( [ list_to_binary(ar_util:format_peer(P)) || P <- ar_peers:get_peers(current), P /= ar_http_util:arweave_peer(Req), ar_peers:is_public_peer(P) ] ), Req}; %% Return the inflation reward emitted at the given block. %% GET request to endpoint /price/{height}. handle(<<"GET">>, [<<"inflation">>, EncodedHeight], Req, _Pid) -> case catch binary_to_integer(EncodedHeight) of {'EXIT', _} -> {400, #{}, jiffy:encode(#{ error => height_must_be_an_integer }), Req}; Height when Height < 0 -> {400, #{}, jiffy:encode(#{ error => height_must_be_non_negative }), Req}; Height when Height > 13000000 -> % An approximate number. {200, #{}, "0", Req}; Height -> {200, #{}, integer_to_list(trunc(ar_inflation:calculate(Height))), Req} end; %% Return the estimated transaction fee not including a new wallet fee. %% GET request to endpoint /price/{bytes}. handle(<<"GET">>, [<<"price">>, SizeInBytesBinary], Req, _Pid) -> case ar_node:is_joined() of false -> not_joined(Req); true -> case catch binary_to_integer(SizeInBytesBinary) of {'EXIT', _} -> {400, #{}, jiffy:encode(#{ error => size_must_be_an_integer }), Req}; Size -> {Fee, _Denomination} = estimate_tx_fee(Size, <<>>), {200, #{}, integer_to_binary(Fee), Req} end end; %% Return the estimated transaction fee not (including a new wallet fee) along with the %% denomination code. %% GET request to endpoint /price2/{bytes}. handle(<<"GET">>, [<<"price2">>, SizeInBytesBinary], Req, _Pid) -> case ar_node:is_joined() of false -> not_joined(Req); true -> case catch binary_to_integer(SizeInBytesBinary) of {'EXIT', _} -> {400, #{}, jiffy:encode(#{ error => size_must_be_an_integer }), Req}; Size -> {Fee, Denomination} = estimate_tx_fee(Size, <<>>), {200, #{}, jiffy:encode(#{ fee => integer_to_binary(Fee), denomination => Denomination }), Req} end end; %% Return the optimistic transaction fee not (including a new wallet fee) along with the %% denomination code. %% GET request to endpoint /optimistic_price/{bytes}. handle(<<"GET">>, [<<"optimistic_price">>, SizeInBytesBinary], Req, _Pid) -> case ar_node:is_joined() of false -> not_joined(Req); true -> case catch binary_to_integer(SizeInBytesBinary) of {'EXIT', _} -> {400, #{}, jiffy:encode(#{ error => size_must_be_an_integer }), Req}; Size -> {Fee, Denomination} = estimate_tx_fee(Size, <<>>, optimistic), {200, #{}, jiffy:encode(#{ fee => integer_to_binary(Fee), denomination => Denomination }), Req} end end; %% Return the estimated transaction fee (including a new wallet fee if the given address %% is not found in the account tree). %% GET request to endpoint /price/{bytes}/{address}. handle(<<"GET">>, [<<"price">>, SizeInBytesBinary, EncodedAddr], Req, _Pid) -> case ar_node:is_joined() of false -> not_joined(Req); true -> case ar_wallet:base64_address_with_optional_checksum_to_decoded_address_safe( EncodedAddr) of {error, invalid} -> {400, #{}, <<"Invalid address.">>, Req}; {ok, Addr} -> case catch binary_to_integer(SizeInBytesBinary) of {'EXIT', _} -> {400, #{}, jiffy:encode(#{ error => size_must_be_an_integer }), Req}; Size -> {Fee, _Denomination} = estimate_tx_fee(Size, Addr), {200, #{}, integer_to_binary(Fee), Req} end end end; %% Return the estimated transaction fee (including a new wallet fee if the given address %% is not found in the account tree) along with the denomination code. %% GET request to endpoint /price2/{bytes}/{address}. handle(<<"GET">>, [<<"price2">>, SizeInBytesBinary, EncodedAddr], Req, _Pid) -> case ar_node:is_joined() of false -> not_joined(Req); true -> case ar_wallet:base64_address_with_optional_checksum_to_decoded_address_safe( EncodedAddr) of {error, invalid} -> {400, #{}, <<"Invalid address.">>, Req}; {ok, Addr} -> case catch binary_to_integer(SizeInBytesBinary) of {'EXIT', _} -> {400, #{}, jiffy:encode(#{ error => size_must_be_an_integer }), Req}; Size -> {Fee, Denomination} = estimate_tx_fee(Size, Addr), {200, #{}, jiffy:encode(#{ fee => integer_to_binary(Fee), denomination => Denomination }), Req} end end end; %% Return the estimated transaction fee (including a new wallet fee if the given address %% is not found in the account tree) along with the denomination code. %% GET request to endpoint /optimistic_price/{bytes}/{address}. handle(<<"GET">>, [<<"optimistic_price">>, SizeInBytesBinary, EncodedAddr], Req, _Pid) -> case ar_node:is_joined() of false -> not_joined(Req); true -> case ar_wallet:base64_address_with_optional_checksum_to_decoded_address_safe( EncodedAddr) of {error, invalid} -> {400, #{}, <<"Invalid address.">>, Req}; {ok, Addr} -> case catch binary_to_integer(SizeInBytesBinary) of {'EXIT', _} -> {400, #{}, jiffy:encode(#{ error => size_must_be_an_integer }), Req}; Size -> {Fee, Denomination} = estimate_tx_fee(Size, Addr, optimistic), {200, #{}, jiffy:encode(#{ fee => integer_to_binary(Fee), denomination => Denomination }), Req} end end end; %% Return the estimated transaction fee not including a new wallet fee. The fee is estimated %% using the new pricing scheme. %% GET request to endpoint /v2price/{bytes}. handle(<<"GET">>, [<<"v2price">>, SizeInBytesBinary], Req, _Pid) -> case ar_node:is_joined() of false -> not_joined(Req); true -> case catch binary_to_integer(SizeInBytesBinary) of {'EXIT', _} -> {400, #{}, jiffy:encode(#{ error => size_must_be_an_integer }), Req}; Size -> Fee = estimate_tx_fee_v2(Size, <<>>), {200, #{}, integer_to_binary(Fee), Req} end end; %% Return the estimated transaction fee (including a new wallet fee if the given address %% is not found in the account tree). The fee is estimated using the new pricing scheme. %% GET request to endpoint /v2price/{bytes}/{address}. handle(<<"GET">>, [<<"v2price">>, SizeInBytesBinary, EncodedAddr], Req, _Pid) -> case ar_node:is_joined() of false -> not_joined(Req); true -> case ar_wallet:base64_address_with_optional_checksum_to_decoded_address_safe( EncodedAddr) of {error, invalid} -> {400, #{}, <<"Invalid address.">>, Req}; {ok, Addr} -> case catch binary_to_integer(SizeInBytesBinary) of {'EXIT', _} -> {400, #{}, jiffy:encode(#{ error => size_must_be_an_integer }), Req}; Size -> Fee = estimate_tx_fee_v2(Size, Addr), {200, #{}, integer_to_binary(Fee), Req} end end end; handle(<<"GET">>, [<<"reward_history">>, EncodedBH], Req, _Pid) -> case ar_node:is_joined() of false -> not_joined(Req); true -> ok = ar_semaphore:acquire(get_reward_history, ?DEFAULT_CALL_TIMEOUT), case ar_util:safe_decode(EncodedBH) of {ok, BH} -> Fork_2_6 = ar_fork:height_2_6(), case ar_block_cache:get_block_and_status(block_cache, BH) of {#block{ height = Height, reward_history = RewardHistory }, {Status, _}} when (Status == on_chain orelse Status == validated), Height >= Fork_2_6 -> RewardHistory2 = ar_rewards:trim_buffered_reward_history(Height, RewardHistory), {200, #{}, ar_serialize:reward_history_to_binary(RewardHistory2), Req}; _ -> {404, #{}, <<>>, Req} end; {error, invalid} -> {400, #{}, jiffy:encode(#{ error => invalid_block_hash }), Req} end end; handle(<<"GET">>, [<<"block_time_history">>, EncodedBH], Req, _Pid) -> case ar_node:is_joined() of false -> not_joined(Req); true -> case ar_util:safe_decode(EncodedBH) of {ok, BH} -> Fork_2_7 = ar_fork:height_2_7(), case ar_block_cache:get_block_and_status(block_cache, BH) of {#block{ height = Height, block_time_history = BlockTimeHistory }, {Status, _}} when (Status == on_chain orelse Status == validated), Height >= Fork_2_7 -> {200, #{}, ar_serialize:block_time_history_to_binary( BlockTimeHistory), Req}; _ -> {404, #{}, <<>>, Req} end; {error, invalid} -> {400, #{}, jiffy:encode(#{ error => invalid_block_hash }), Req} end end; %% Return the current JSON-encoded hash list held by the node. %% GET request to endpoint /block_index. handle(<<"GET">>, [<<"hash_list">>], Req, _Pid) -> handle(<<"GET">>, [<<"block_index">>], Req, _Pid); handle(<<"GET">>, [<<"block_index">>], Req, _Pid) -> ok = ar_semaphore:acquire(get_block_index, ?DEFAULT_CALL_TIMEOUT), case ar_node:is_joined() of false -> not_joined(Req); true -> case ar_node:get_height() >= ar_fork:height_2_6() of true -> {400, #{}, jiffy:encode(#{ error => not_supported_since_fork_2_6 }), Req}; false -> BI = ar_node:get_block_index(), {200, #{}, ar_serialize:jsonify( ar_serialize:block_index_to_json_struct( format_bi_for_peer(BI, Req) ) ), Req} end end; %% Return the current binary-encoded block index held by the node. %% GET request to endpoint /block_index2. handle(<<"GET">>, [<<"block_index2">>], Req, _Pid) -> ok = ar_semaphore:acquire(get_block_index, ?DEFAULT_CALL_TIMEOUT), case ar_node:is_joined() of false -> not_joined(Req); true -> case ar_node:get_height() >= ar_fork:height_2_6() of true -> {400, #{}, jiffy:encode(#{ error => not_supported_since_fork_2_6 }), Req}; false -> BI = ar_node:get_block_index(), Bin = ar_serialize:block_index_to_binary(BI), {200, #{}, Bin, Req} end end; handle(<<"GET">>, [<<"hash_list">>, From, To], Req, _Pid) -> handle(<<"GET">>, [<<"block_index">>, From, To], Req, _Pid); handle(<<"GET">>, [<<"hash_list2">>, From, To], Req, _Pid) -> handle(<<"GET">>, [<<"block_index2">>, From, To], Req, _Pid); handle(<<"GET">>, [<<"block_index2">>, From, To], Req, _Pid) -> erlang:put(encoding, binary), handle(<<"GET">>, [<<"block_index">>, From, To], Req, _Pid); handle(<<"GET">>, [<<"block_index">>, From, To], Req, _Pid) -> ok = ar_semaphore:acquire(get_block_index, ?DEFAULT_CALL_TIMEOUT), case ar_node:is_joined() of false -> not_joined(Req); true -> Props = ets:select( node_state, [{{'$1', '$2'}, [{'or', {'==', '$1', height}, {'==', '$1', recent_block_index}}], ['$_']}] ), Height = proplists:get_value(height, Props), RecentBI = proplists:get_value(recent_block_index, Props), try Start = binary_to_integer(From), End = binary_to_integer(To), Encoding = case erlang:get(encoding) of undefined -> json; Enc -> Enc end, handle_get_block_index_range(Start, End, Height, RecentBI, Req, Encoding) catch _:_ -> {400, #{}, jiffy:encode(#{ error => invalid_range }), Req} end end; handle(<<"GET">>, [<<"recent_hash_list">>], Req, _Pid) -> case ar_node:is_joined() of false -> not_joined(Req); true -> Encoded = [ar_util:encode(H) || H <- ar_node:get_block_anchors()], {200, #{}, ar_serialize:jsonify(Encoded), Req} end; %% Accept the list of independent block hashes ordered from oldest to newest %% and return the deviation of our hash list from the given one. %% Peers may use this endpoint to make sure they did not miss blocks or learn %% about the missed blocks and their transactions so that they can catch up quickly. handle(<<"GET">>, [<<"recent_hash_list_diff">>], Req, Pid) -> case ar_node:is_joined() of false -> not_joined(Req); true -> case read_complete_body(Req, Pid, ?MAX_SERIALIZED_RECENT_HASH_LIST_DIFF) of {ok, Body, Req2} -> case decode_recent_hash_list(Body) of {ok, ReverseHL} -> {BlockTXPairs, _} = ar_block_cache:get_longest_chain_cache(block_cache), case get_recent_hash_list_diff(ReverseHL, lists:reverse(BlockTXPairs)) of no_intersection -> {404, #{}, <<>>, Req2}; Bin -> {200, #{}, Bin, Req2} end; error -> {400, #{}, <<>>, Req2} end; {error, timeout} -> {503, #{}, jiffy:encode(#{ error => timeout }), Req}; {error, body_size_too_large} -> {413, #{}, <<"Payload too large">>, Req} end end; %% Return the sum of all the existing accounts in the latest state, in Winston. %% GET request to endpoint /total_supply. handle(<<"GET">>, [<<"total_supply">>], Req, _Pid) -> case ar_node:is_joined() of false -> not_joined(Req); true -> ok = ar_semaphore:acquire(get_wallet_list, ?DEFAULT_CALL_TIMEOUT), B = ar_node:get_current_block(), TotalSupply = get_total_supply(B#block.wallet_list, first, 0, B#block.denomination), {200, #{}, integer_to_binary(TotalSupply), Req} end; %% Return the current wallet list held by the node. %% GET request to endpoint /wallet_list. handle(<<"GET">>, [<<"wallet_list">>], Req, _Pid) -> case ar_node:is_joined() of false -> not_joined(Req); true -> H = ar_node:get_current_block_hash(), process_request(get_block, [<<"hash">>, ar_util:encode(H), <<"wallet_list">>], Req) end; %% Return a bunch of wallets, up to ?WALLET_LIST_CHUNK_SIZE, from the tree with %% the given root hash. The wallet addresses are picked in the ascending alphabetical order. handle(<<"GET">>, [<<"wallet_list">>, EncodedRootHash], Req, _Pid) -> case ar_node:is_joined() of false -> not_joined(Req); true -> process_get_wallet_list_chunk(EncodedRootHash, first, Req) end; %% Return a bunch of wallets, up to ?WALLET_LIST_CHUNK_SIZE, from the tree with %% the given root hash, starting with the provided cursor, taken the wallet addresses %% are picked in the ascending alphabetical order. handle(<<"GET">>, [<<"wallet_list">>, EncodedRootHash, EncodedCursor], Req, _Pid) -> case ar_node:is_joined() of false -> not_joined(Req); true -> process_get_wallet_list_chunk(EncodedRootHash, EncodedCursor, Req) end; %% Return the balance of the given address from the wallet tree with the given root hash. handle(<<"GET">>, [<<"wallet_list">>, EncodedRootHash, EncodedAddr, <<"balance">>], Req, _Pid) -> case ar_node:is_joined() of false -> not_joined(Req); true -> case {ar_util:safe_decode(EncodedRootHash), ar_util:safe_decode(EncodedAddr)} of {{error, invalid}, _} -> {400, #{}, jiffy:encode(#{ error => invalid_root_hash_encoding }), Req}; {_, {error, invalid}} -> {400, #{}, jiffy:encode(#{ error => invalid_address_encoding }), Req}; {{ok, RootHash}, {ok, Addr}} -> case ar_wallets:get_balance(RootHash, Addr) of {error, not_found} -> {404, #{}, jiffy:encode(#{ error => root_hash_not_found }), Req}; Balance when is_integer(Balance) -> {200, #{}, integer_to_binary(Balance), Req}; _Error -> {500, #{}, <<>>, Req} end end end; %% Share your IP with another peer. %% @deprecated To make a node learn your IP, you can make any request to it. handle(<<"POST">>, [<<"peers">>], Req, _Pid) -> {200, #{}, <<>>, Req}; %% Return the balance of the wallet specified via wallet_address. %% GET request to endpoint /wallet/{wallet_address}/balance. handle(<<"GET">>, [<<"wallet">>, Addr, <<"balance">>], Req, _Pid) -> case ar_node:is_joined() of false -> not_joined(Req); true -> case ar_wallet:base64_address_with_optional_checksum_to_decoded_address_safe(Addr) of {error, invalid} -> {400, #{}, <<"Invalid address.">>, Req}; {ok, AddrOK} -> case ar_node:get_balance(AddrOK) of node_unavailable -> {503, #{}, <<"Internal timeout.">>, Req}; Balance -> {200, #{}, integer_to_binary(Balance), Req} end end end; %% Return the sum of reserved mining rewards of the given account. %% GET request to endpoint /wallet/{wallet_address}/reserved_rewards_total. handle(<<"GET">>, [<<"wallet">>, Addr, <<"reserved_rewards_total">>], Req, _Pid) -> case ar_node:is_joined() of false -> not_joined(Req); true -> case ar_wallet:base64_address_with_optional_checksum_to_decoded_address_safe(Addr) of {ok, AddrOK} when byte_size(AddrOK) == 32 -> B = ar_node:get_current_block(), Sum = ar_rewards:get_total_reward_for_address(AddrOK, B), {200, #{}, integer_to_binary(Sum), Req}; _ -> {400, #{}, <<"Invalid address.">>, Req} end end; %% Return the last transaction ID (hash) for the wallet specified via wallet_address. %% GET request to endpoint /wallet/{wallet_address}/last_tx. handle(<<"GET">>, [<<"wallet">>, Addr, <<"last_tx">>], Req, _Pid) -> case ar_node:is_joined() of false -> not_joined(Req); true -> case ar_wallet:base64_address_with_optional_checksum_to_decoded_address_safe(Addr) of {error, invalid} -> {400, #{}, <<"Invalid address.">>, Req}; {ok, AddrOK} -> {200, #{}, ar_util:encode( ?OK(ar_node:get_last_tx(AddrOK)) ), Req} end end; %% Return a block anchor to use for building transactions. handle(<<"GET">>, [<<"tx_anchor">>], Req, _Pid) -> case ar_node:is_joined() of false -> not_joined(Req); true -> List = ar_node:get_block_anchors(), SuggestedAnchor = lists:nth(min(length(List), ?SUGGESTED_TX_ANCHOR_DEPTH), List), {200, #{}, ar_util:encode(SuggestedAnchor), Req} end; %% Return the JSON-encoded block with the given height or hash. %% GET request to endpoint /block/{height|hash}/{height|hash}. handle(<<"GET">>, [<<"block">>, Type, ID], Req, Pid) when Type == <<"height">> orelse Type == <<"hash">> -> handle_get_block(Type, ID, Req, Pid, json); %% Return the binary-encoded block with the given height or hash. %% GET request to endpoint /block2/{height|hash}/{height|hash}. %% Optionally accept an HTTP body, up to 125 bytes - the encoded %% transaction indices where the Nth bit being 1 asks to include %% the Nth transaction in the alphabetical order (not just its identifier) %% in the response. The node only includes transactions in the response %% when the corresponding indices are present in the request and those %% transactions are found in the block cache - the motivation is to keep %% the endpoint lightweight. handle(<<"GET">>, [<<"block2">>, Type, ID], Req, Pid) when Type == <<"height">> orelse Type == <<"hash">> -> handle_get_block(Type, ID, Req, Pid, binary); %% Return block or block field. handle(<<"GET">>, [<<"block">>, Type, ID, Field], Req, _Pid) when Type == <<"height">> orelse Type == <<"hash">> -> case ar_node:is_joined() of false -> not_joined(Req); true -> process_request(get_block, [Type, ID, Field], Req) end; %% Return the balance of the given wallet at the given block. handle(<<"GET">>, [<<"block">>, <<"height">>, Height, <<"wallet">>, Addr, <<"balance">>], Req, _Pid) -> ok = ar_semaphore:acquire(get_wallet_list, ?DEFAULT_CALL_TIMEOUT), handle_get_block_wallet_balance(Height, Addr, Req); %% Return the current block. %% GET request to endpoint /block/current. handle(<<"GET">>, [<<"block">>, <<"current">>], Req, Pid) -> case ar_node:get_current_block_hash() of not_joined -> not_joined(Req); H when is_binary(H) -> handle(<<"GET">>, [<<"block">>, <<"hash">>, ar_util:encode(H)], Req, Pid) end; %% DEPRECATED (12/07/2018) handle(<<"GET">>, [<<"current_block">>], Req, Pid) -> handle(<<"GET">>, [<<"block">>, <<"current">>], Req, Pid); %% Return a given field of the transaction specified by the transaction ID (hash). %% GET request to endpoint /tx/{hash}/{field} %% %% {field} := { id | last_tx | owner | tags | target | quantity | data | signature | reward } handle(<<"GET">>, [<<"tx">>, Hash, Field], Req, _Pid) -> case ar_node:is_joined() of false -> not_joined(Req); true -> ReadTX = case ar_util:safe_decode(Hash) of {error, invalid} -> {reply, {400, #{}, <<"Invalid hash.">>, Req}}; {ok, ID} -> {ar_storage:read_tx(ID), ID} end, case ReadTX of {unavailable, TXID} -> case is_a_pending_tx(TXID) of true -> {202, #{}, <<"Pending">>, Req}; false -> {404, #{}, <<"Not Found.">>, Req} end; {reply, Reply} -> Reply; {#tx{} = TX, _} -> case Field of <<"tags">> -> {200, #{}, ar_serialize:jsonify(lists:map( fun({Name, Value}) -> {[{name, ar_util:encode(Name)}, {value, ar_util:encode(Value)}]} end, TX#tx.tags)), Req}; <<"data">> -> serve_tx_data(Req, TX); _ -> case catch binary_to_existing_atom(Field) of {'EXIT', _} -> {400, #{}, jiffy:encode(#{ error => invalid_field }), Req}; FieldAtom -> {TXJSON} = ar_serialize:tx_to_json_struct(TX), case catch val_for_key(FieldAtom, TXJSON) of {'EXIT', _} -> {400, #{}, jiffy:encode(#{ error => invalid_field }), Req}; Val -> {200, #{}, Val, Req} end end end end end; %% Return the current block hieght, or 500. handle(Method, [<<"height">>], Req, _Pid) when (Method == <<"GET">>) or (Method == <<"HEAD">>) -> case ar_node:is_joined() of false -> not_joined(Req); true -> H = ar_node:get_height(), {200, #{}, integer_to_binary(H), Req} end; %% If we are given a hash with no specifier (block, tx, etc), assume that %% the user is requesting the data from the TX associated with that hash. %% Optionally allow a file extension. handle(<<"GET">>, [<>], Req, Pid) -> handle(<<"GET">>, [<<"tx">>, Hash, <<"data.", MaybeExt/binary>>], Req, Pid); %% Accept a nonce limiter (VDF) update from a configured peer, if any. %% POST request to /vdf. handle(<<"POST">>, [<<"vdf">>], Req, Pid) -> case ar_node:is_joined() of false -> not_joined(Req); true -> handle_post_vdf(Req, Pid) end; %% Serve an VDF update to a configured VDF client. %% GET request to /vdf. handle(<<"GET">>, [<<"vdf">>], Req, _Pid) -> case ar_node:is_joined() of false -> not_joined(Req); true -> handle_get_vdf(Req, get_update, 2) end; %% Serve an VDF update to a configured VDF client. %% GET request to /vdf2. handle(<<"GET">>, [<<"vdf2">>], Req, _Pid) -> case ar_node:is_joined() of false -> not_joined(Req); true -> handle_get_vdf(Req, get_update, 2) end; %% Serve the current VDF session to a configured VDF client. %% GET request to /vdf/session. handle(<<"GET">>, [<<"vdf">>, <<"session">>], Req, _Pid) -> case ar_node:is_joined() of false -> not_joined(Req); true -> handle_get_vdf(Req, get_session, 2) end; %% Serve the current VDF session to a configured VDF client. %% GET request to /vdf2/session. handle(<<"GET">>, [<<"vdf2">>, <<"session">>], Req, _Pid) -> case ar_node:is_joined() of false -> not_joined(Req); true -> handle_get_vdf(Req, get_session, 2) end; %% Serve the current VDF session to a configured VDF client. %% GET request to /vdf3/session. handle(<<"GET">>, [<<"vdf3">>, <<"session">>], Req, _Pid) -> case ar_node:is_joined() of false -> not_joined(Req); true -> handle_get_vdf(Req, get_session, 3) end; %% Serve the current VDF session to a configured VDF client. %% GET request to /vdf3/session. handle(<<"GET">>, [<<"vdf4">>, <<"session">>], Req, _Pid) -> case ar_node:is_joined() of false -> not_joined(Req); true -> handle_get_vdf(Req, get_session, 4) end; %% Serve the previous VDF session to a configured VDF client. %% GET request to /vdf/previous_session. handle(<<"GET">>, [<<"vdf">>, <<"previous_session">>], Req, _Pid) -> case ar_node:is_joined() of false -> not_joined(Req); true -> handle_get_vdf(Req, get_previous_session, 2) end; %% Serve the previous VDF session to a configured VDF client. %% GET request to /vdf2/previous_session. handle(<<"GET">>, [<<"vdf2">>, <<"previous_session">>], Req, _Pid) -> case ar_node:is_joined() of false -> not_joined(Req); true -> handle_get_vdf(Req, get_previous_session, 2) end; %% Serve the previous VDF session to a configured VDF client. %% GET request to /vdf4/previous_session. handle(<<"GET">>, [<<"vdf4">>, <<"previous_session">>], Req, _Pid) -> case ar_node:is_joined() of false -> not_joined(Req); true -> handle_get_vdf(Req, get_previous_session, 4) end; handle(<<"GET">>, [<<"coordinated_mining">>, <<"partition_table">>], Req, _Pid) -> case check_cm_api_secret(Req) of pass -> case ar_node:is_joined() of false -> not_joined(Req); true -> Partitions = case {ar_pool:is_client(), ar_coordination:is_exit_peer()} of {true, true} -> %% When we work with a pool, the exit node shares %% the information about external partitions with %% every internal miner. ar_coordination:get_self_plus_external_partitions_list(); _ -> %% CM miners ask each other about their local %% partitions. A CM exit node is not an exception - it %% does NOT aggregate peer partitions in this case. ar_coordination:get_unique_partitions_list() end, JSON = ar_serialize:jsonify(Partitions), {200, #{}, JSON, Req} end; {reject, {Status, Headers, Body}} -> {Status, Headers, Body, Req} end; % If somebody want to make GUI, monitoring tool handle(<<"GET">>, [<<"coordinated_mining">>, <<"state">>], Req, _Pid) -> case check_cm_api_secret(Req) of pass -> case ar_node:is_joined() of false -> not_joined(Req); true -> {ok, {LastPeerResponse}} = ar_coordination:get_public_state(), Peers = maps:fold(fun(Peer, Value, Acc) -> {AliveStatus, PartitionList} = Value, Table = lists:map( fun (ListValue) -> {Bucket, BucketSize, Addr, PackingDifficulty} = ListValue, ar_serialize:partition_to_json_struct(Bucket, BucketSize, Addr, PackingDifficulty) end, PartitionList ), Val = {[ {peer, list_to_binary(ar_util:format_peer(Peer))}, {alive, AliveStatus}, {partition_table, Table} ]}, [Val | Acc] end, [], LastPeerResponse ), {200, #{}, ar_serialize:jsonify(Peers), Req} end; {reject, {Status, Headers, Body}} -> {Status, Headers, Body, Req} end; %% POST request to /coordinated_mining/h1. handle(<<"POST">>, [<<"coordinated_mining">>, <<"h1">>], Req, Pid) -> case check_cm_api_secret(Req) of pass -> case ar_node:is_joined() of false -> not_joined(Req); true -> handle_mining_h1(Req, Pid) end; {reject, {Status, Headers, Body}} -> {Status, Headers, Body, Req} end; %% POST request to /coordinated_mining/h2. handle(<<"POST">>, [<<"coordinated_mining">>, <<"h2">>], Req, Pid) -> case check_cm_api_secret(Req) of pass -> case ar_node:is_joined() of false -> not_joined(Req); true -> handle_mining_h2(Req, Pid) end; {reject, {Status, Headers, Body}} -> {Status, Headers, Body, Req} end; handle(<<"POST">>, [<<"coordinated_mining">>, <<"publish">>], Req, Pid) -> case check_cm_api_secret(Req) of pass -> case ar_node:is_joined() of false -> not_joined(Req); true -> handle_mining_cm_publish(Req, Pid) end; {reject, {Status, Headers, Body}} -> {Status, Headers, Body, Req} end; %% Catch case for requests made to unknown endpoints. %% Returns error code 400 - Request type not found. handle(_, _, Req, _Pid) -> not_found(Req). %% Cowlib does not yet support status codes 208 and 419 properly. %% See https://github.com/ninenines/cowlib/pull/79 handle_custom_codes(208) -> <<"208 Already Reported">>; handle_custom_codes(419) -> <<"419 Missing Chunk">>; handle_custom_codes(Status) -> Status. format_bi_for_peer(BI, Req) -> case cowboy_req:header(<<"x-block-format">>, Req, <<"2">>) of <<"2">> -> ?BI_TO_BHL(BI); _ -> BI end. handle_get_block_index_range(Start, _End, _CurrentHeight, _RecentBI, Req, _Encoding) when Start < 0 -> {400, #{}, jiffy:encode(#{ error => negative_start }), Req}; handle_get_block_index_range(Start, End, _CurrentHeight, _RecentBI, Req, _Encoding) when Start > End -> {400, #{}, jiffy:encode(#{ error => start_bigger_than_end }), Req}; handle_get_block_index_range(Start, End, _CurrentHeight, _RecentBI, Req, _Encoding) when End - Start + 1 > ?MAX_BLOCK_INDEX_RANGE_SIZE -> {400, #{}, jiffy:encode(#{ error => range_too_big, max_range_size => ?MAX_BLOCK_INDEX_RANGE_SIZE }), Req}; handle_get_block_index_range(Start, _End, CurrentHeight, _RecentBI, Req, _Encoding) when Start > CurrentHeight -> {400, #{}, jiffy:encode(#{ error => start_too_big }), Req}; handle_get_block_index_range(Start, End, CurrentHeight, RecentBI, Req, Encoding) -> CheckpointHeight = CurrentHeight - ar_block:get_consensus_window_size() + 1, RecentRange = case End >= CheckpointHeight of true -> Top = min(CurrentHeight, End), Range1 = lists:nthtail(CurrentHeight - Top, RecentBI), lists:sublist(Range1, min(Top - Start + 1, ar_block:get_consensus_window_size() - (CurrentHeight - Top))); false -> [] end, Range = case Start < CheckpointHeight of true -> RecentRange ++ ar_block_index:get_range(Start, min(End, CheckpointHeight - 1)); false -> RecentRange end, case Encoding of binary -> {200, #{}, ar_serialize:block_index_to_binary(Range), Req}; json -> {200, #{}, ar_serialize:jsonify(ar_serialize:block_index_to_json_struct( format_bi_for_peer(Range, Req))), Req} end. sendfile(Filename) -> {sendfile, 0, filelib:file_size(Filename), Filename}. not_found(Req) -> {400, #{}, <<"Request type not found.">>, Req}. not_joined(Req) -> {503, #{}, jiffy:encode(#{ error => not_joined }), Req}. handle_get_tx_status(EncodedTXID, Req) -> case ar_util:safe_decode(EncodedTXID) of {error, invalid} -> {400, #{}, <<"Invalid address.">>, Req}; {ok, TXID} -> case is_a_pending_tx(TXID) of true -> {202, #{}, <<"Pending">>, Req}; false -> case ar_storage:get_tx_confirmation_data(TXID) of {ok, {Height, BH}} -> PseudoTags = [ {<<"block_height">>, Height}, {<<"block_indep_hash">>, ar_util:encode(BH)} ], case ar_block_index:get_element_by_height(Height) of not_found -> {404, #{}, <<"Not Found.">>, Req}; {BH, _, _} -> CurrentHeight = ar_node:get_height(), %% First confirmation is when the TX is %% in the latest block. NumberOfConfirmations = CurrentHeight - Height + 1, Status = PseudoTags ++ [{<<"number_of_confirmations">>, NumberOfConfirmations}], {200, #{}, ar_serialize:jsonify({Status}), Req}; _ -> {404, #{}, <<"Not Found.">>, Req} end; not_found -> {404, #{}, <<"Not Found.">>, Req}; {error, timeout} -> {503, #{}, <<"ArQL unavailable.">>, Req} end end end. handle_get_tx(Hash, Req, Encoding) -> case ar_util:safe_decode(Hash) of {error, invalid} -> {400, #{}, <<"Invalid hash.">>, Req}; {ok, ID} -> ok = ar_semaphore:acquire(get_tx, ?DEFAULT_CALL_TIMEOUT), case ar_storage:read_tx(ID) of unavailable -> maybe_tx_is_pending_response(ID, Req); #tx{} = TX -> Body = case Encoding of json -> ar_serialize:jsonify(ar_serialize:tx_to_json_struct(TX)); binary -> ar_serialize:tx_to_binary(TX) end, {200, #{}, Body, Req} end end. handle_get_unconfirmed_tx(Hash, Req, Encoding) -> case ar_util:safe_decode(Hash) of {error, invalid} -> {400, #{}, <<"Invalid hash.">>, Req}; {ok, TXID} -> case ar_mempool:get_tx(TXID) of not_found -> handle_get_tx(Hash, Req, Encoding); TX -> Body = case Encoding of json -> ar_serialize:jsonify(ar_serialize:tx_to_json_struct(TX)); binary -> ar_serialize:tx_to_binary(TX) end, {200, #{}, Body, Req} end end. maybe_tx_is_pending_response(ID, Req) -> case is_a_pending_tx(ID) of true -> {202, #{}, <<"Pending">>, Req}; false -> case ar_tx_db:get_error_codes(ID) of {ok, ErrorCodes} -> ErrorBody = list_to_binary(lists:join(" ", ErrorCodes)), {410, #{}, ErrorBody, Req}; not_found -> {404, #{}, <<"Not Found.">>, Req} end end. serve_tx_data(Req, #tx{ format = 1 } = TX) -> {200, #{}, ar_util:encode(TX#tx.data), Req}; serve_tx_data(Req, #tx{ format = 2, id = ID, data_size = DataSize } = TX) -> DataFilename = ar_storage:tx_data_filepath(TX), case filelib:is_file(DataFilename) of true -> {200, #{}, sendfile(DataFilename), Req}; false -> ok = ar_semaphore:acquire(get_tx_data, ?DEFAULT_CALL_TIMEOUT), case ar_data_sync:get_tx_data(ID) of {ok, Data} -> {200, #{}, ar_util:encode(Data), Req}; {error, tx_data_too_big} -> {400, #{}, jiffy:encode(#{ error => tx_data_too_big }), Req}; {error, not_found} when DataSize == 0 -> {200, #{}, <<>>, Req}; {error, not_found} -> {404, #{ <<"content-type">> => <<"text/html; charset=utf-8">> }, sendfile("genesis_data/not_found.html"), Req}; {error, timeout} -> {503, #{}, jiffy:encode(#{ error => timeout }), Req} end end. serve_tx_html_data(Req, TX) -> serve_tx_html_data(Req, TX, ar_http_util:get_tx_content_type(TX)). serve_tx_html_data(Req, #tx{ format = 1 } = TX, {valid, ContentType}) -> {200, #{ <<"content-type">> => ContentType }, TX#tx.data, Req}; serve_tx_html_data(Req, #tx{ format = 1 } = TX, none) -> {200, #{ <<"content-type">> => <<"text/html">> }, TX#tx.data, Req}; serve_tx_html_data(Req, #tx{ format = 2 } = TX, {valid, ContentType}) -> serve_format_2_html_data(Req, ContentType, TX); serve_tx_html_data(Req, #tx{ format = 2 } = TX, none) -> serve_format_2_html_data(Req, <<"text/html">>, TX); serve_tx_html_data(Req, _TX, invalid) -> {421, #{}, <<>>, Req}. serve_format_2_html_data(Req, ContentType, TX) -> case ar_storage:read_tx_data(TX) of {ok, Data} -> {200, #{ <<"content-type">> => ContentType }, Data, Req}; {error, enoent} -> ok = ar_semaphore:acquire(get_tx_data, ?DEFAULT_CALL_TIMEOUT), case ar_data_sync:get_tx_data(TX#tx.id) of {ok, Data} -> {200, #{ <<"content-type">> => ContentType }, Data, Req}; {error, tx_data_too_big} -> {400, #{}, jiffy:encode(#{ error => tx_data_too_big }), Req}; {error, not_found} when TX#tx.data_size == 0 -> {200, #{ <<"content-type">> => ContentType }, <<>>, Req}; {error, not_found} -> {404, #{ <<"content-type">> => <<"text/html; charset=utf-8">> }, sendfile("genesis_data/not_found.html"), Req}; {error, timeout} -> {503, #{}, jiffy:encode(#{ error => timeout }), Req} end end. estimate_tx_fee(Size, Addr) -> estimate_tx_fee(Size, Addr, pessimistic). estimate_tx_fee(Size, Addr, Type) -> Props = ets:select( node_state, [{{'$1', '$2'}, [{'or', {'==', '$1', height}, {'==', '$1', wallet_list}, {'==', '$1', usd_to_ar_rate}, {'==', '$1', scheduled_usd_to_ar_rate}, {'==', '$1', price_per_gib_minute}, {'==', '$1', denomination}, {'==', '$1', scheduled_price_per_gib_minute}, {'==', '$1', kryder_plus_rate_multiplier}}], ['$_']}] ), Height = proplists:get_value(height, Props), CurrentPricePerGiBMinute = proplists:get_value(price_per_gib_minute, Props), Denomination = proplists:get_value(denomination, Props), ScheduledPricePerGiBMinute = proplists:get_value(scheduled_price_per_gib_minute, Props), KryderPlusRateMultiplier = proplists:get_value(kryder_plus_rate_multiplier, Props), PricePerGiBMinute = case Type of pessimistic -> max(CurrentPricePerGiBMinute, ScheduledPricePerGiBMinute); optimistic -> min(CurrentPricePerGiBMinute, ScheduledPricePerGiBMinute) end, RootHash = proplists:get_value(wallet_list, Props), Accounts = case Addr of <<>> -> #{}; _ -> ar_wallets:get(RootHash, Addr) end, Size2 = ar_tx:get_weave_size_increase(Size, Height + 1), Args = {Size2, PricePerGiBMinute, KryderPlusRateMultiplier, Addr, Accounts, Height + 1}, Denomination2 = case Height >= ar_fork:height_2_6() of true -> Denomination; false -> 0 end, {ar_tx:get_tx_fee(Args), Denomination2}. estimate_tx_fee_v2(Size, Addr) -> Props = ets:select( node_state, [{{'$1', '$2'}, [{'or', {'==', '$1', height}, {'==', '$1', wallet_list}, {'==', '$1', price_per_gib_minute}, {'==', '$1', scheduled_price_per_gib_minute}, {'==', '$1', kryder_plus_rate_multiplier}}], ['$_']}] ), Height = proplists:get_value(height, Props), CurrentPricePerGiBMinute = proplists:get_value(price_per_gib_minute, Props), ScheduledPricePerGiBMinute = proplists:get_value(scheduled_price_per_gib_minute, Props), KryderPlusRateMultiplier = proplists:get_value(kryder_plus_rate_multiplier, Props), PricePerGiBMinute = max(CurrentPricePerGiBMinute, ScheduledPricePerGiBMinute), RootHash = proplists:get_value(wallet_list, Props), Accounts = case Addr of <<>> -> #{}; _ -> ar_wallets:get(RootHash, Addr) end, Size2 = ar_tx:get_weave_size_increase(Size, Height + 1), Args = {Size2, PricePerGiBMinute, KryderPlusRateMultiplier, Addr, Accounts, Height + 1}, ar_tx:get_tx_fee2(Args). handle_get_block(Type, ID, Req, Pid, Encoding) -> case Type of <<"hash">> -> case ar_util:safe_decode(ID) of {error, invalid} -> {404, #{}, <<"Block not found.">>, Req}; {ok, H} -> handle_get_block(H, Req, Pid, Encoding) end; <<"height">> -> case ar_node:is_joined() of false -> not_joined(Req); true -> CurrentHeight = ar_node:get_height(), try binary_to_integer(ID) of Height when Height < 0 -> {400, #{}, <<"Invalid height.">>, Req}; Height when Height > CurrentHeight -> {404, #{}, <<"Block not found.">>, Req}; Height -> case ar_block_index:get_element_by_height(Height) of not_found -> {404, #{}, <<"Block not found.">>, Req}; {H, _, _} -> handle_get_block(<<"hash">>, ar_util:encode(H), Req, Pid, Encoding) end catch _:_ -> {400, #{}, <<"Invalid height.">>, Req} end end end. handle_get_block(H, Req, Pid, Encoding) -> case ar_block_cache:get(block_cache, H) of not_found -> handle_get_block2(H, Req, Encoding); B -> case {Encoding, lists:any(fun(TX) -> is_binary(TX) end, B#block.txs)} of {binary, false} -> %% We have found the block in the block cache. Therefore, we can %% include the requested transactions without doing disk lookups. case read_complete_body(Req, Pid, ?MAX_SERIALIZED_MISSING_TX_INDICES) of {ok, Body, Req2} -> case ar_util:parse_list_indices(Body) of error -> {400, #{}, <<>>, Req2}; Indices -> Map = collect_missing_transactions(B#block.txs, Indices), TXs2 = [maps:get(TX#tx.id, Map, TX#tx.id) || TX <- B#block.txs], handle_get_block3(B#block{ txs = TXs2 }, Req2, binary) end; {error, body_size_too_large} -> {413, #{}, <<"Payload too large">>, Req}; {error, timeout} -> {503, #{}, jiffy:encode(#{ error => timeout }), Req} end; _ -> handle_get_block3(B, Req, Encoding) end end. handle_get_block2(H, Req, Encoding) -> case ar_storage:read_block(H) of unavailable -> {404, #{}, <<"Block not found.">>, Req}; #block{} = B -> handle_get_block3(B, Req, Encoding) end. handle_get_block3(B, Req, Encoding) -> Bin = case Encoding of json -> ar_serialize:jsonify(ar_serialize:block_to_json_struct(B)); binary -> ar_serialize:block_to_binary(B) end, {200, #{}, Bin, Req}. collect_missing_transactions(TXs, Indices) -> collect_missing_transactions(TXs, Indices, 0). collect_missing_transactions([#tx{ id = TXID } = TX | TXs], [N | Indices], N) -> maps:put(TXID, TX, collect_missing_transactions(TXs, Indices, N + 1)); collect_missing_transactions([_TX | TXs], Indices, N) -> collect_missing_transactions(TXs, Indices, N + 1); collect_missing_transactions(_TXs, [], _N) -> #{}; collect_missing_transactions([], _Indices, _N) -> #{}. handle_post_tx({Req, Pid, Encoding}) -> case ar_node:is_joined() of false -> not_joined(Req); true -> case post_tx_parse_id({Req, Pid, Encoding}) of {error, invalid_hash, Req2} -> {400, #{}, <<"Invalid hash.">>, Req2}; {error, tx_already_processed, _TXID, Req2} -> {208, #{}, <<"Transaction already processed.">>, Req2}; {error, invalid_signature_type, Req2} -> {400, #{}, <<"Invalid signature type.">>, Req2}; {error, invalid_json, Req2} -> {400, #{}, <<"Invalid JSON.">>, Req2}; {error, body_size_too_large, Req2} -> {413, #{}, <<"Payload too large">>, Req2}; {error, timeout} -> {503, #{}, <<>>, Req}; {ok, TX, Req2} -> {ok, Config} = arweave_config:get_env(), case ar_semaphore:acquire(post_tx, Config#config.post_tx_timeout * 1000) of {error, timeout} -> {503, #{}, <<>>, Req2}; ok -> Peer = ar_http_util:arweave_peer(Req), case handle_post_tx(Req2, Peer, TX) of ok -> {200, #{}, <<"OK">>, Req2}; {error_response, {Status, Headers, Body}} -> Ref = erlang:get(tx_id_ref), ar_ignore_registry:remove_ref(TX#tx.id, Ref), {Status, Headers, Body, Req2} end end end end. handle_post_tx(Req, Peer, TX) -> case ar_tx_validator:validate(TX) of {invalid, tx_verification_failed} -> handle_post_tx_verification_response(); {invalid, last_tx_in_mempool} -> handle_post_tx_last_tx_in_mempool_response(); {invalid, invalid_last_tx} -> handle_post_tx_verification_response(); {invalid, tx_bad_anchor} -> handle_post_tx_bad_anchor_response(); {invalid, tx_already_in_weave} -> handle_post_tx_already_in_weave_response(); {invalid, tx_already_in_mempool} -> handle_post_tx_already_in_mempool_response(); {invalid, invalid_data_root_size} -> handle_post_tx_invalid_data_root_response(); {valid, TX2} -> ar_data_sync:add_data_root_to_disk_pool(TX2#tx.data_root, TX2#tx.data_size, TX#tx.id), handle_post_tx_accepted(Req, TX, Peer) end. handle_post_tx_accepted(Req, TX, Peer) -> %% Exclude successful requests with valid transactions from the %% IP-based throttling, to avoid connectivity issues at the times %% of excessive transaction volumes. {A, B, C, D, _} = Peer, %%-> Peer is the peer key for the general rate limiter group. arweave_limiter:reduce_for_peer(general, {A, B, C, D}), BodyReadTime = ar_http_req:body_read_time(Req), ar_peers:rate_gossiped_data(Peer, tx, erlang:convert_time_unit(BodyReadTime, native, microsecond), byte_size(term_to_binary(TX))), ar_events:send(tx, {new, TX, {pushed, Peer}}), TXID = TX#tx.id, Ref = erlang:get(tx_id_ref), ar_ignore_registry:remove_ref(TXID, Ref), ar_ignore_registry:add_temporary(TXID, 10 * 60 * 1000), ok. handle_post_tx_verification_response() -> {error_response, {400, #{}, <<"Transaction verification failed.">>}}. handle_post_tx_last_tx_in_mempool_response() -> {error_response, {400, #{}, <<"Invalid anchor (last_tx from mempool).">>}}. handle_post_tx_bad_anchor_response() -> {error_response, {400, #{}, <<"Invalid anchor (last_tx).">>}}. handle_post_tx_already_in_weave_response() -> {error_response, {400, #{}, <<"Transaction is already on the weave.">>}}. handle_post_tx_already_in_mempool_response() -> {error_response, {400, #{}, <<"Transaction is already in the mempool.">>}}. handle_post_tx_invalid_data_root_response() -> {error_response, {400, #{}, <<"The attached data is split in an unknown way.">>}}. handle_get_data_sync_record(Start, Limit, Req) -> Format = case cowboy_req:header(<<"content-type">>, Req) of <<"application/json">> -> json; _ -> etf end, Options = #{ start => Start, limit => Limit, format => Format }, case ar_global_sync_record:get_serialized_sync_record(Options) of {ok, Binary} -> {200, #{}, Binary, Req}; {error, timeout} -> {503, #{}, jiffy:encode(#{ error => timeout }), Req} end. handle_get_data_sync_record(Start, End, Limit, Req) -> Format = case cowboy_req:header(<<"content-type">>, Req) of <<"application/json">> -> json; _ -> etf end, Options = #{ start => Start, right_bound => End, limit => Limit, format => Format }, case ar_global_sync_record:get_serialized_sync_record(Options) of {ok, Binary} -> {200, #{}, Binary, Req}; {error, timeout} -> {503, #{}, jiffy:encode(#{ error => timeout }), Req} end. handle_get_footprints(Partition, FootprintNumber, Req) -> FootprintsPerPartition = ar_block:get_replica_2_9_entropy_count(), CheckFootprintNumber = case FootprintNumber >= FootprintsPerPartition of true -> {400, #{}, jiffy:encode(#{ error => footprint_number_too_large }), Req}; false -> ok end, {Start, End} = ar_replica_2_9:get_entropy_partition_range(Partition), FindStorageModules = case CheckFootprintNumber of ok -> case ar_storage_module:get_all(Start, End) of [] -> {404, #{}, <<>>, Req}; Modules -> {ok, Modules} end; Reply -> Reply end, FindStoreIDPacking = case FindStorageModules of {ok, StorageModules} -> {ok, [{ar_storage_module:id(Module), Packing} || {_, _, Packing} = Module <- StorageModules]}; Reply2 -> Reply2 end, CollectIntervals = case FindStoreIDPacking of {ok, L} -> {ok, lists:foldl( fun({StoreID2, Packing2}, Acc) -> Intervals = ar_footprint_record:get_intervals(Partition, FootprintNumber, Packing2, StoreID2), ar_intervals:union(Acc, Intervals) end, ar_intervals:new(), L )}; Reply3 -> Reply3 end, case CollectIntervals of {ok, Intervals2} -> Payload = jiffy:encode(ar_serialize:footprint_to_json_map(Intervals2)), {200, #{}, Payload, Req}; Reply4 -> Reply4 end. handle_get_chunk(OffsetBinary, Req, Encoding) -> case catch binary_to_integer(OffsetBinary) of Offset when is_integer(Offset) -> case << Offset:(?NOTE_SIZE * 8) >> of %% A positive number represented by =< ?NOTE_SIZE bytes. << Offset:(?NOTE_SIZE * 8) >> -> RequestedPacking = ar_serialize:decode_packing( cowboy_req:header(<<"x-packing">>, Req, <<"unpacked">>), any ), IsBucketBasedOffset = case cowboy_req:header(<<"x-bucket-based-offset">>, Req, not_set) of not_set -> false; _ -> true end, {ReadPacking, CheckRecords} = case ar_sync_record:is_recorded(Offset, ar_data_sync) of false -> {none, {reply, {404, #{}, <<>>, Req}}}; {true, _} -> %% Chunk is recorded but packing is unknown. {none, {reply, {404, #{}, <<>>, Req}}}; {{true, RequestedPacking}, _StoreID} -> ok = ar_semaphore:acquire(get_chunk, ?DEFAULT_CALL_TIMEOUT), {RequestedPacking, ok}; {{true, Packing}, _StoreID} when RequestedPacking == any -> ok = ar_semaphore:acquire(get_chunk, ?DEFAULT_CALL_TIMEOUT), {Packing, ok}; {{true, _}, _StoreID} -> {ok, Config} = arweave_config:get_env(), case lists:member(pack_served_chunks, Config#config.enable) of false -> {none, {reply, {404, #{}, <<>>, Req}}}; true -> ok = ar_semaphore:acquire(get_and_pack_chunk, ?DEFAULT_CALL_TIMEOUT), {RequestedPacking, ok} end end, case CheckRecords of {reply, Reply} -> Reply; ok -> Args = #{ packing => ReadPacking, bucket_based_offset => IsBucketBasedOffset, origin => http }, case ar_data_sync:get_chunk(Offset, Args) of {ok, Proof} -> Proof2 = maps:remove(unpacked_chunk, Proof#{ packing => ReadPacking }), Reply = case Encoding of json -> jiffy:encode( ar_serialize:poa_map_to_json_map( Proof2)); binary -> ar_serialize:poa_map_to_binary(Proof2) end, {200, #{}, Reply, Req}; {error, chunk_not_found} -> {404, #{}, <<>>, Req}; {error, invalid_padding} -> {404, #{}, <<>>, Req}; {error, chunk_failed_validation} -> {404, #{}, <<>>, Req}; {error, chunk_stored_in_different_packing_only} -> {404, #{}, <<>>, Req}; {error, not_joined} -> not_joined(Req); {error, Error} -> ?LOG_ERROR([{event, get_chunk_error}, {offset, Offset}, {requested_packing, ar_serialize:encode_packing(RequestedPacking, false)}, {read_packing, ar_serialize:encode_packing(ReadPacking, false)}, {error, Error}]), {500, #{}, <<>>, Req} end end; _ -> {400, #{}, jiffy:encode(#{ error => offset_out_of_bounds }), Req} end; _ -> {400, #{}, jiffy:encode(#{ error => invalid_offset }), Req} end. handle_get_chunk_proof(OffsetBinary, Req, Encoding) -> case catch binary_to_integer(OffsetBinary) of Offset when is_integer(Offset) -> case << Offset:(?NOTE_SIZE * 8) >> of %% A positive number represented by =< ?NOTE_SIZE bytes. << Offset:(?NOTE_SIZE * 8) >> -> handle_get_chunk_proof2(Offset, Req, Encoding); _ -> {400, #{}, jiffy:encode(#{ error => offset_out_of_bounds }), Req} end; _ -> {400, #{}, jiffy:encode(#{ error => invalid_offset }), Req} end. handle_get_chunk_proof2(Offset, Req, Encoding) -> IsBucketBasedOffset = case cowboy_req:header(<<"x-bucket-based-offset">>, Req, not_set) of not_set -> false; _ -> true end, ok = ar_semaphore:acquire(get_chunk, ?DEFAULT_CALL_TIMEOUT), CheckRecords = case ar_sync_record:is_recorded(Offset, ar_data_sync) of false -> {reply, {404, #{}, <<>>, Req}}; {{true, _Packing}, _StoreID} -> ok end, case CheckRecords of {reply, Reply} -> Reply; ok -> Args = #{ bucket_based_offset => IsBucketBasedOffset }, case ar_data_sync:get_chunk_proof(Offset, Args) of {ok, Proof} -> Reply = case Encoding of json -> jiffy:encode( ar_serialize:poa_no_chunk_map_to_json_map( Proof)); binary -> ar_serialize:poa_no_chunk_map_to_binary(Proof) end, {200, #{}, Reply, Req}; {error, chunk_not_found} -> {404, #{}, <<>>, Req}; {error, not_joined} -> not_joined(Req); {error, failed_to_read_chunk} -> {500, #{}, <<>>, Req} end end. get_data_root_from_headers(Req) -> case {cowboy_req:header(<<"arweave-data-root">>, Req, not_set), cowboy_req:header(<<"arweave-data-size">>, Req, not_set)} of {not_set, _} -> not_set; {_, not_set} -> not_set; {EncodedDataRoot, EncodedDataSize} when byte_size(EncodedDataRoot) == 43 -> case catch binary_to_integer(EncodedDataSize) of DataSize when is_integer(DataSize) -> case ar_util:safe_decode(EncodedDataRoot) of {ok, DataRoot} -> {ok, {DataRoot, DataSize}}; _ -> not_set end; _ -> not_set end; _ -> not_set end. parse_chunk(Req, Pid) -> case read_complete_body(Req, Pid, ?MAX_SERIALIZED_CHUNK_PROOF_SIZE) of {ok, Body, Req2} -> case ar_serialize:json_decode(Body, [return_maps]) of {ok, JSON} -> case catch ar_serialize:json_map_to_poa_map(JSON) of {'EXIT', _} -> {400, #{}, jiffy:encode(#{ error => invalid_json }), Req2}; Proof -> {ok, {Proof, Req2}} end; {error, _} -> {400, #{}, jiffy:encode(#{ error => invalid_json }), Req2} end; {error, body_size_too_large} -> {413, #{}, <<"Payload too large">>, Req}; {error, timeout} -> {503, #{}, jiffy:encode(#{ error => timeout }), Req} end. handle_post_chunk(Proof, Req) -> handle_post_chunk(check_data_size, Proof, Req). handle_post_chunk(check_data_size, Proof, Req) -> case maps:get(data_size, Proof) > trunc(math:pow(2, ?NOTE_SIZE * 8)) - 1 of true -> {400, #{}, jiffy:encode(#{ error => data_size_too_big }), Req}; false -> handle_post_chunk(check_chunk_size, Proof, Req) end; handle_post_chunk(check_chunk_size, Proof, Req) -> case byte_size(maps:get(chunk, Proof)) > ?DATA_CHUNK_SIZE of true -> {400, #{}, jiffy:encode(#{ error => chunk_too_big }), Req}; false -> handle_post_chunk(check_data_path_size, Proof, Req) end; handle_post_chunk(check_data_path_size, Proof, Req) -> case byte_size(maps:get(data_path, Proof)) > ?MAX_PATH_SIZE of true -> {400, #{}, jiffy:encode(#{ error => data_path_too_big }), Req}; false -> handle_post_chunk(check_offset_field, Proof, Req) end; handle_post_chunk(check_offset_field, Proof, Req) -> case maps:is_key(offset, Proof) of false -> {400, #{}, jiffy:encode(#{ error => offset_field_required }), Req}; true -> handle_post_chunk(check_offset_size, Proof, Req) end; handle_post_chunk(check_offset_size, Proof, Req) -> case maps:get(offset, Proof) > trunc(math:pow(2, ?NOTE_SIZE * 8)) - 1 of true -> {400, #{}, jiffy:encode(#{ error => offset_too_big }), Req}; false -> handle_post_chunk(check_chunk_proof_ratio, Proof, Req) end; handle_post_chunk(check_chunk_proof_ratio, Proof, Req) -> DataPath = maps:get(data_path, Proof), Chunk = maps:get(chunk, Proof), DataSize = maps:get(data_size, Proof), case ar_data_sync:is_chunk_proof_ratio_attractive(byte_size(Chunk), DataSize, DataPath) of false -> {400, #{}, jiffy:encode(#{ error => chunk_proof_ratio_not_attractive }), Req}; true -> handle_post_chunk(validate_proof, Proof, Req) end; handle_post_chunk(validate_proof, Proof, Req) -> Parent = self(), #{ chunk := Chunk, data_path := DataPath, data_size := TXSize, offset := Offset, data_root := DataRoot } = Proof, spawn(fun() -> Parent ! ar_data_sync:add_chunk_to_disk_pool( DataRoot, DataPath, Chunk, Offset, TXSize) end), receive ok -> {200, #{}, <<>>, Req}; temporary -> {303, #{}, <<>>, Req}; {error, data_root_not_found} -> {400, #{}, jiffy:encode(#{ error => data_root_not_found }), Req}; {error, exceeds_disk_pool_size_limit} -> {400, #{}, jiffy:encode(#{ error => exceeds_disk_pool_size_limit }), Req}; {error, disk_full} -> {400, #{}, jiffy:encode(#{ error => disk_full }), Req}; {error, failed_to_store_chunk} -> {500, #{}, <<>>, Req}; {error, invalid_proof} -> {400, #{}, jiffy:encode(#{ error => invalid_proof }), Req} end. check_internal_api_secret(Req) -> {ok, Config} = arweave_config:get_env(), check_api_secret( <<"x-internal-api-secret">>, Config#config.internal_api_secret, <<"Internal API">>, Req). check_cm_api_secret(Req) -> {ok, Config} = arweave_config:get_env(), check_api_secret(<<"x-cm-api-secret">>, Config#config.cm_api_secret, <<"CM API">>, Req). check_api_secret(Header, Secret, APIName, Req) -> Reject = fun(Msg) -> log_api_reject(Msg, Req), %% Reduce efficiency of timing attacks by sleeping randomly between 1-2s. timer:sleep(rand:uniform(1000) + 1000), {reject, { 421, #{}, <> }} end, case {Secret, cowboy_req:header(Header, Req)} of {not_set, _} -> Reject(<<"Request to disabled ", APIName/bitstring>>); {Secret, Secret} when is_binary(Secret) -> pass; _ -> Reject(<<"Invalid secret for ", APIName/bitstring, " request">>) end. log_api_reject(Msg, Req) -> spawn(fun() -> Path = ar_http_iface_server:split_path(cowboy_req:path(Req)), {IpAddr, _Port} = cowboy_req:peer(Req), BinIpAddr = list_to_binary(inet:ntoa(IpAddr)), ?LOG_WARNING("~s: IP address: ~s Path: ~p", [Msg, BinIpAddr, Path]) end). %% @doc Convert a blocks field with the given label into a string. block_field_to_string(<<"timestamp">>, Res) -> integer_to_list(Res); block_field_to_string(<<"last_retarget">>, Res) -> integer_to_list(Res); block_field_to_string(<<"diff">>, Res) -> integer_to_list(Res); block_field_to_string(<<"cumulative_diff">>, Res) -> integer_to_list(Res); block_field_to_string(<<"height">>, Res) -> integer_to_list(Res); block_field_to_string(<<"txs">>, Res) -> ar_serialize:jsonify(Res); block_field_to_string(<<"hash_list">>, Res) -> ar_serialize:jsonify(Res); block_field_to_string(<<"wallet_list">>, Res) -> ar_serialize:jsonify(Res); block_field_to_string(<<"usd_to_ar_rate">>, Res) -> ar_serialize:jsonify(Res); block_field_to_string(<<"scheduled_usd_to_ar_rate">>, Res) -> ar_serialize:jsonify(Res); block_field_to_string(<<"poa">>, Res) -> ar_serialize:jsonify(Res); block_field_to_string(_, Res) -> Res. %% @doc Return true if TXID is a pending tx. is_a_pending_tx(TXID) -> ar_mempool:has_tx(TXID). decode_block(JSON, json) -> try {Struct} = ar_serialize:dejsonify(JSON), JSONB = val_for_key(<<"new_block">>, Struct), BShadow = ar_serialize:json_struct_to_block(JSONB), {ok, BShadow} catch Exception:Reason -> {error, {Exception, Reason}} end; decode_block(Bin, binary) -> try ar_serialize:binary_to_block(Bin) catch Exception:Reason -> {error, {Exception, Reason}} end. %% @doc Convenience function for lists:keyfind(Key, 1, List). Returns Value, not {Key, Value}. val_for_key(K, L) -> case lists:keyfind(K, 1, L) of false -> false; {K, V} -> V end. handle_block_announcement(#block_announcement{ indep_hash = H, previous_block = PrevH, tx_prefixes = Prefixes, recall_byte2 = RecallByte2 }, Req) -> case ar_ignore_registry:member(H) of true -> check_block_receive_timestamp(H), {208, #{}, <<>>, Req}; false -> case ar_node:get_block_shadow_from_cache(PrevH) of not_found -> {412, #{}, <<>>, Req}; #block{} -> Indices = collect_missing_tx_indices(Prefixes), prometheus_counter:inc(block_announcement_reported_transactions, length(Prefixes)), prometheus_counter:inc(block_announcement_missing_transactions, length(Indices)), Response = #block_announcement_response{ missing_chunk = true, missing_tx_indices = Indices }, Response2 = case RecallByte2 == undefined of true -> Response; false -> Response#block_announcement_response{ missing_chunk2 = true } end, {200, #{}, ar_serialize:block_announcement_response_to_binary(Response2), Req} end end. collect_missing_tx_indices(Prefixes) -> collect_missing_tx_indices(Prefixes, [], 0). collect_missing_tx_indices([], Indices, _N) -> lists:reverse(Indices); collect_missing_tx_indices([Prefix | Prefixes], Indices, N) -> case ets:member(tx_prefixes, Prefix) of false -> collect_missing_tx_indices(Prefixes, [N | Indices], N + 1); true -> collect_missing_tx_indices(Prefixes, Indices, N + 1) end. post_block(request, {Req, Pid, Encoding}, ReceiveTimestamp) -> Peer = ar_http_util:arweave_peer(Req), case ar_blacklist_middleware:is_peer_banned(Peer) of not_banned -> post_block(check_joined, Peer, {Req, Pid, Encoding}, ReceiveTimestamp); banned -> {403, #{}, <<"IP address blocked due to previous request.">>, Req} end. post_block(check_joined, Peer, {Req, Pid, Encoding}, ReceiveTimestamp) -> case ar_node:is_joined() of true -> ConfirmedHeight = ar_node:get_height() - ar_block:get_consensus_window_size(), case {Encoding, ConfirmedHeight >= ar_fork:height_2_6()} of {json, true} -> %% We gesticulate it explicitly here that POST /block is not %% supported after the 2.6 fork. However, this check is not strictly %% necessary because ar_serialize:json_struct_to_block/1 fails %% unless the block height is smaller than the fork 2.6 height. {400, #{}, <<>>, Req}; _ -> post_block(check_block_hash_header, Peer, {Req, Pid, Encoding}, ReceiveTimestamp) end; false -> %% The node is not ready to validate and accept blocks. %% If the network adopts this block, ar_poller will catch up. {503, #{}, <<"Not joined.">>, Req} end; post_block(check_block_hash_header, Peer, {Req, Pid, Encoding}, ReceiveTimestamp) -> case cowboy_req:header(<<"arweave-block-hash">>, Req, not_set) of not_set -> post_block(read_body, Peer, {Req, Pid, Encoding}, ReceiveTimestamp); EncodedBH -> case ar_util:safe_decode(EncodedBH) of {ok, BH} when byte_size(BH) =< 48 -> case ar_ignore_registry:member(BH) of true -> check_block_receive_timestamp(BH), {208, #{}, <<"Block already processed.">>, Req}; false -> post_block(read_body, Peer, {Req, Pid, Encoding}, ReceiveTimestamp) end; _ -> post_block(read_body, Peer, {Req, Pid, Encoding}, ReceiveTimestamp) end end; post_block(read_body, Peer, {Req, Pid, Encoding}, ReceiveTimestamp) -> case read_complete_body(Req, Pid) of {ok, Body, Req2} -> case decode_block(Body, Encoding) of {error, _} -> {400, #{}, <<"Invalid block.">>, Req2}; {ok, BShadow} -> post_block(check_transactions_are_present, {BShadow, Peer}, Req2, ReceiveTimestamp) end; {error, body_size_too_large} -> {413, #{}, <<"Payload too large">>, Req}; {error, timeout} -> {503, #{}, jiffy:encode(#{ error => timeout }), Req} end; post_block(check_transactions_are_present, {BShadow, Peer}, Req, ReceiveTimestamp) -> case erlang:get(post_block2) of true -> case get_missing_tx_identifiers(BShadow#block.txs) of [] -> post_block(enqueue_block, {BShadow, Peer}, Req, ReceiveTimestamp); {error, tx_list_too_long} -> {400, #{}, <<>>, Req}; MissingTXIDs -> {418, #{}, encode_txids(MissingTXIDs), Req} end; _ -> % POST /block; do not reject for backwards-compatibility post_block(enqueue_block, {BShadow, Peer}, Req, ReceiveTimestamp) end; post_block(enqueue_block, {B, Peer}, Req, ReceiveTimestamp) -> B2 = case B#block.height >= ar_fork:height_2_6() of true -> B; false -> case cowboy_req:header(<<"arweave-recall-byte">>, Req, not_set) of not_set -> B; ByteBin -> case catch binary_to_integer(ByteBin) of RecallByte when is_integer(RecallByte) -> B#block{ recall_byte = RecallByte }; _ -> B end end end, ?LOG_INFO([{event, received_block}, {block, ar_util:encode(B#block.indep_hash)}, {peer, ar_util:format_peer(Peer)}]), BodyReadTime = ar_http_req:body_read_time(Req), case ar_block_pre_validator:pre_validate(B2, Peer, ReceiveTimestamp) of ok -> ar_peers:rate_gossiped_data(Peer, block, erlang:convert_time_unit(BodyReadTime, native, microsecond), byte_size(term_to_binary(B))); _ -> ok end, {200, #{}, <<"OK">>, Req}. check_block_receive_timestamp(H) -> case ar_block_cache:get(block_cache, H) of not_found -> not_found; B -> case B#block.receive_timestamp of undefined -> %% This node mined block H and this is the first time it's been %% gossipped back to it. Update the node's receive_timestamp. ar_events:send(block, {mined_block_received, H, erlang:timestamp()}); _ -> ok end end. handle_post_partial_solution(Req, Pid) -> {ok, Config} = arweave_config:get_env(), CMExitNode = ar_coordination:is_exit_peer() andalso ar_pool:is_client(), case {Config#config.is_pool_server, CMExitNode} of {false, false} -> {501, #{}, jiffy:encode(#{ error => configuration }), Req}; {true, _} -> case check_internal_api_secret(Req) of {reject, {Status, Headers, Body}} -> {Status, Headers, Body, Req}; pass -> handle_post_partial_solution_pool_server(Req, Pid) end; {_, true} -> case check_cm_api_secret(Req) of {reject, {Status, Headers, Body}} -> {Status, Headers, Body, Req}; pass -> handle_post_partial_solution_cm_exit_peer_pool_client(Req, Pid) end end. handle_post_partial_solution_pool_server(Req, Pid) -> case read_complete_body(Req, Pid) of {ok, Body, Req2} -> case catch ar_serialize:json_map_to_solution( jiffy:decode(Body, [return_maps])) of {'EXIT', _} -> {400, #{}, jiffy:encode(#{ error => invalid_json }), Req2}; Solution -> Response = ar_pool:process_partial_solution(Solution), JSON = ar_serialize:partial_solution_response_to_json_struct(Response), {200, #{}, ar_serialize:jsonify(JSON), Req2} end; {error, body_size_too_large} -> {413, #{}, <<"Payload too large">>, Req}; {error, timeout} -> {500, #{}, <<"Handler timeout">>, Req} end. handle_post_partial_solution_cm_exit_peer_pool_client(Req, Pid) -> case read_complete_body(Req, Pid) of {ok, Body, Req2} -> ar_pool:post_partial_solution(Body), {200, #{}, jiffy:encode(#{}), Req2}; {error, body_size_too_large} -> {413, #{}, <<"Payload too large">>, Req}; {error, timeout} -> {500, #{}, <<"Handler timeout">>, Req} end. handle_get_jobs(PrevOutput, Req) -> {ok, Config} = arweave_config:get_env(), CMExitNode = ar_coordination:is_exit_peer() andalso ar_pool:is_client(), case {Config#config.is_pool_server, CMExitNode} of {false, false} -> {501, #{}, jiffy:encode(#{ error => configuration }), Req}; {true, _} -> case check_internal_api_secret(Req) of {reject, {Status, Headers, Body}} -> {Status, Headers, Body, Req}; pass -> handle_get_jobs_pool_server(PrevOutput, Req) end; {_, true} -> case check_cm_api_secret(Req) of {reject, {Status, Headers, Body}} -> {Status, Headers, Body, Req}; pass -> handle_get_jobs_cm_exit_peer_pool_client(PrevOutput, Req) end end. handle_get_jobs_pool_server(PrevOutput, Req) -> Props = ets:select( node_state, [{{'$1', '$2'}, [{'or', {'==', '$1', diff_pair}, {'==', '$1', nonce_limiter_info}}], ['$_']}] ), DiffPair = proplists:get_value(diff_pair, Props), Info = proplists:get_value(nonce_limiter_info, Props), Result = ar_util:do_until( fun() -> S = ar_nonce_limiter:get_step_triplets(Info, PrevOutput, ?GET_JOBS_COUNT), case S of [] -> false; _ -> {ok, S} end end, 200, (?GET_JOBS_TIMEOUT_S) * 1000 ), Steps = case Result of {ok, S} -> S; _ -> [] end, {NextSeed, IntervalNumber, NextVDFDiff} = ar_nonce_limiter:session_key(Info), JobList = [#job{ output = O, global_step_number = SN, partition_upper_bound = U } || {O, SN, U} <- Steps], Jobs = #jobs{ jobs = JobList, seed = Info#nonce_limiter_info.seed, next_seed = NextSeed, interval_number = IntervalNumber, next_vdf_difficulty = NextVDFDiff, partial_diff = DiffPair }, {200, #{}, ar_serialize:jsonify(ar_serialize:jobs_to_json_struct(Jobs)), Req}. handle_get_jobs_cm_exit_peer_pool_client(PrevOutput, Req) -> {200, #{}, ar_serialize:jsonify( ar_serialize:jobs_to_json_struct(ar_pool:get_jobs(PrevOutput))), Req}. %% Only for cm miners that are NOT exit peers. handle_post_pool_cm_jobs(Req, Pid) -> PoolCMMiner = (not ar_coordination:is_exit_peer()) andalso ar_pool:is_client(), case PoolCMMiner of false -> {501, #{}, jiffy:encode(#{ error => configuration }), Req}; true -> case check_cm_api_secret(Req) of {reject, {Status, Headers, Body}} -> {Status, Headers, Body, Req}; pass -> handle_post_pool_cm_jobs2(Req, Pid) end end. handle_post_pool_cm_jobs2(Req, Pid) -> Peer = ar_http_util:arweave_peer(Req), case read_complete_body(Req, Pid) of {ok, Body, Req2} -> case catch ar_serialize:json_map_to_pool_cm_jobs( element(2, ar_serialize:json_decode(Body, [return_maps]))) of {'EXIT', _} -> {400, #{}, jiffy:encode(#{ error => invalid_json }), Req2}; Jobs -> ar_pool:process_cm_jobs(Jobs, Peer), {200, #{}, <<>>, Req2} end; {error, body_size_too_large} -> {413, #{}, <<"Payload too large">>, Req}; {error, timeout} -> {500, #{}, <<"Handler timeout">>, Req} end. encode_txids([]) -> <<>>; encode_txids([TXID | TXIDs]) -> << TXID/binary, (encode_txids(TXIDs))/binary >>. get_missing_tx_identifiers(TXIDs) -> get_missing_tx_identifiers(TXIDs, [], 0). get_missing_tx_identifiers([], MissingTXIDs, _N) -> MissingTXIDs; get_missing_tx_identifiers([_ | _], _, N) when N == ?BLOCK_TX_COUNT_LIMIT -> {error, tx_list_too_long}; get_missing_tx_identifiers([#tx{} | TXIDs], MissingTXIDs, N) -> get_missing_tx_identifiers(TXIDs, MissingTXIDs, N + 1); get_missing_tx_identifiers([TXID | TXIDs], MissingTXIDs, N) -> case ar_node_worker:is_mempool_or_block_cache_tx(TXID) of true -> get_missing_tx_identifiers(TXIDs, MissingTXIDs, N + 1); false -> get_missing_tx_identifiers(TXIDs, [TXID | MissingTXIDs], N + 1) end. decode_recent_hash_list(<<>>) -> {ok, []}; decode_recent_hash_list(<< H:48/binary, Rest/binary >>) -> case decode_recent_hash_list(Rest) of error -> error; {ok, HL} -> {ok, [H | HL]} end; decode_recent_hash_list(_Rest) -> error. get_recent_hash_list_diff([H | HL], BlockTXPairs) -> case lists:dropwhile(fun({BH, _TXIDs}) -> BH /= H end, BlockTXPairs) of [] -> get_recent_hash_list_diff(HL, BlockTXPairs); Tail -> get_recent_hash_list_diff(HL, tl(Tail), H) end; get_recent_hash_list_diff([], _BlockTXPairs) -> no_intersection. get_recent_hash_list_diff([H | HL], [{H, _SizeTaggedTXs} | BlockTXPairs], _PrevH) -> get_recent_hash_list_diff(HL, BlockTXPairs, H); get_recent_hash_list_diff(_HL, BlockTXPairs, PrevH) -> << PrevH/binary, (get_recent_hash_list_diff(BlockTXPairs))/binary >>. get_recent_hash_list_diff([{H, TXIDs} | BlockTXPairs]) -> Len = length(TXIDs), << H:48/binary, Len:16, (iolist_to_binary([TXID || TXID <- TXIDs]))/binary, (get_recent_hash_list_diff(BlockTXPairs))/binary >>; get_recent_hash_list_diff([]) -> <<>>. get_total_supply(RootHash, Cursor, Sum, Denomination) -> {ok, {NextCursor, Range}} = ar_wallets:get_chunk(RootHash, Cursor), RangeSum = get_balance_sum(Range, Denomination), case NextCursor of last -> Sum + RangeSum; _ -> get_total_supply(RootHash, NextCursor, Sum + RangeSum, Denomination) end. get_balance_sum([{_, {Balance, _LastTX}} | Range], BlockDenomination) -> ar_pricing:redenominate(Balance, 1, BlockDenomination) + get_balance_sum(Range, BlockDenomination); get_balance_sum([{_, {Balance, _LastTX, Denomination, _MiningPermission}} | Range], BlockDenomination) -> ar_pricing:redenominate(Balance, Denomination, BlockDenomination) + get_balance_sum(Range, BlockDenomination); get_balance_sum([], _BlockDenomination) -> 0. %% Return the block hash list associated with a block. process_request(get_block, [Type, ID, <<"hash_list">>], Req) -> case find_block(Type, ID) of {error, height_not_integer} -> {400, #{}, jiffy:encode(#{ error => size_must_be_an_integer }), Req}; unavailable -> {404, #{}, <<"Not Found.">>, Req}; B -> ok = ar_semaphore:acquire(get_block_index, ?DEFAULT_CALL_TIMEOUT), case ar_node:get_height() >= ar_fork:height_2_6() of true -> {400, #{}, jiffy:encode(#{ error => not_supported_since_fork_2_6 }), Req}; false -> CurrentBI = ar_node:get_block_index(), HL = ar_block:generate_hash_list_for_block(B#block.indep_hash, CurrentBI), {200, #{}, ar_serialize:jsonify(lists:map(fun ar_util:encode/1, HL)), Req} end end; %% @doc Return the wallet list associated with a block. process_request(get_block, [Type, ID, <<"wallet_list">>], Req) -> case find_block(Type, ID) of {error, height_not_integer} -> {400, #{}, jiffy:encode(#{ error => size_must_be_an_integer }), Req}; unavailable -> {404, #{}, <<"Not Found.">>, Req}; B -> {ok, Config} = arweave_config:get_env(), case {B#block.height >= ar_fork:height_2_2(), lists:member(serve_wallet_lists, Config#config.enable)} of {true, false} -> {400, #{}, jiffy:encode(#{ error => does_not_serve_blocks_after_2_2_fork }), Req}; {true, _} -> ok = ar_semaphore:acquire(get_wallet_list, ?DEFAULT_CALL_TIMEOUT), case ar_storage:read_wallet_list(B#block.wallet_list) of {ok, Tree} -> {200, #{}, ar_serialize:jsonify( ar_serialize:wallet_list_to_json_struct( B#block.reward_addr, false, Tree )), Req}; _ -> {404, #{}, <<"Block not found.">>, Req} end; _ -> WLFilepath = ar_storage:wallet_list_filepath(B#block.wallet_list), case filelib:is_file(WLFilepath) of true -> {200, #{}, sendfile(WLFilepath), Req}; false -> {404, #{}, <<"Block not found.">>, Req} end end end; %% Return a requested field of a given block. %% GET request to endpoint /block/hash/{hash|height}/{field}. %% %% field :: nonce | previous_block | timestamp | last_retarget | diff | height | hash | %% indep_hash | txs | hash_list | wallet_list | reward_addr | tags | reward_pool process_request(get_block, [Type, ID, Field], Req) -> {ok, Config} = arweave_config:get_env(), case lists:member(subfield_queries, Config#config.enable) of true -> case find_block(Type, ID) of {error, height_not_integer} -> {400, #{}, jiffy:encode(#{ error => size_must_be_an_integer }), Req}; unavailable -> {404, #{}, <<"Not Found.">>, Req}; B -> {BLOCKJSON} = ar_serialize:block_to_json_struct(B), case catch list_to_existing_atom(binary_to_list(Field)) of {'EXIT', _} -> {404, #{}, <<"Not Found.">>, Req}; Atom -> case lists:keyfind(Atom, 1, BLOCKJSON) of {_, Res} -> Result = block_field_to_string(Field, Res), {200, #{}, Result, Req}; _ -> {404, #{}, <<"Not Found.">>, Req} end end end; _ -> {421, #{}, <<"Subfield block querying is disabled on this node.">>, Req} end. handle_get_block_wallet_balance(EncodedHeight, EncodedAddr, Req) -> case ar_node:is_joined() of false -> not_joined(Req); true -> CurrentHeight = ar_node:get_height(), try binary_to_integer(EncodedHeight) of Height when Height < 0 -> {400, #{}, jiffy:encode(#{ error => invalid_height }), Req}; Height when Height > CurrentHeight -> {404, #{}, jiffy:encode(#{ error => block_not_found }), Req}; Height -> case ar_block_index:get_element_by_height(Height) of not_found -> {404, #{}, jiffy:encode(#{ error => block_not_found }), Req}; {H, _, _} -> B = case ar_block_cache:get(block_cache, H) of not_found -> ar_storage:read_block(H); B2 -> B2 end, case B of unavailable -> {404, #{}, jiffy:encode(#{ error => block_not_found }), Req}; #block{ wallet_list = RootHash } -> case ar_util:safe_decode(EncodedAddr) of {ok, Addr} -> handle_get_block_wallet_balance2(Addr, RootHash, Req); {error, invalid} -> {400, #{}, jiffy:encode(#{ error => invalid_address }), Req} end end end catch _:_ -> {400, #{}, jiffy:encode(#{ error => invalid_height }), Req} end end. handle_get_block_wallet_balance2(Addr, RootHash, Req) -> case ar_wallets:get_balance(RootHash, Addr) of {error, not_found} -> handle_get_block_wallet_balance3(Addr, RootHash, Req); Balance when is_integer(Balance) -> {200, #{}, integer_to_binary(Balance), Req}; _Error -> {500, #{}, <<>>, Req} end. handle_get_block_wallet_balance3(Addr, RootHash, Req) -> case ar_storage:read_account(Addr, RootHash) of not_found -> {404, #{}, jiffy:encode(#{ error => account_data_not_found }), Req}; {Balance, _LastTX} -> {200, #{}, integer_to_binary(Balance), Req}; {Balance, _LastTX, _Denomination, _MiningPermission} -> {200, #{}, integer_to_binary(Balance), Req} end. process_get_wallet_list_chunk(EncodedRootHash, EncodedCursor, Req) -> DecodeCursorResult = case EncodedCursor of first -> {ok, first}; _ -> ar_util:safe_decode(EncodedCursor) end, case {ar_util:safe_decode(EncodedRootHash), DecodeCursorResult} of {{error, invalid}, _} -> {400, #{}, <<"Invalid root hash.">>, Req}; {_, {error, invalid}} -> {400, #{}, <<"Invalid root hash.">>, Req}; {{ok, RootHash}, {ok, Cursor}} -> case ar_wallets:get_chunk(RootHash, Cursor) of {ok, {NextCursor, Wallets}} -> SerializeFn = case cowboy_req:header(<<"content-type">>, Req) of <<"application/json">> -> fun wallet_list_chunk_to_json/1; <<"application/etf">> -> fun erlang:term_to_binary/1; _ -> fun erlang:term_to_binary/1 end, Reply = SerializeFn(#{ next_cursor => NextCursor, wallets => Wallets }), {200, #{}, Reply, Req}; {error, root_hash_not_found} -> {404, #{}, <<"Root hash not found.">>, Req} end end. wallet_list_chunk_to_json(#{ next_cursor := NextCursor, wallets := Wallets }) -> SerializedWallets = lists:map( fun({Addr, Value}) -> ar_serialize:wallet_to_json_struct(Addr, Value) end, Wallets ), case NextCursor of last -> jiffy:encode(#{ wallets => SerializedWallets }); Cursor when is_binary(Cursor) -> jiffy:encode(#{ next_cursor => ar_util:encode(Cursor), wallets => SerializedWallets }) end. %% @doc Find a block, given a type and a specifier. find_block(<<"height">>, RawHeight) -> case catch binary_to_integer(RawHeight) of {'EXIT', _} -> {error, height_not_integer}; Height -> case ar_block_index:get_element_by_height(Height) of not_found -> unavailable; {H, _, _} -> ar_storage:read_block(H) end end; find_block(<<"hash">>, ID) -> case ar_util:safe_decode(ID) of {ok, H} -> ar_storage:read_block(H); _ -> unavailable end. post_tx_parse_id({Req, Pid, Encoding}) -> post_tx_parse_id(check_header, {Req, Pid, Encoding}). post_tx_parse_id(check_header, {Req, Pid, Encoding}) -> case cowboy_req:header(<<"arweave-tx-id">>, Req, not_set) of not_set -> post_tx_parse_id(read_body, {not_set, Req, Pid, Encoding}); EncodedTXID -> case ar_util:safe_decode(EncodedTXID) of {ok, TXID} when byte_size(TXID) =< 32 -> post_tx_parse_id(check_ignore_list, {TXID, Req, Pid, Encoding}); _ -> {error, invalid_hash, Req} end end; post_tx_parse_id(check_ignore_list, {TXID, Req, Pid, Encoding}) -> case ar_mempool:is_known_tx(TXID) of true -> {error, tx_already_processed, TXID, Req}; false -> Ref = make_ref(), erlang:put(tx_id_ref, Ref), ar_ignore_registry:add_ref(TXID, Ref, 5000), post_tx_parse_id(read_body, {TXID, Req, Pid, Encoding}) end; post_tx_parse_id(read_body, {TXID, Req, Pid, Encoding}) -> case read_complete_body(Req, Pid) of {ok, Body, Req2} -> case Encoding of json -> post_tx_parse_id(parse_json, {TXID, Req2, Body}); binary -> post_tx_parse_id(parse_binary, {TXID, Req2, Body}) end; {error, body_size_too_large} -> {error, body_size_too_large, Req}; {error, timeout} -> {error, timeout} end; post_tx_parse_id(parse_json, {TXID, Req, Body}) -> Ref = erlang:get(tx_id_ref), case catch ar_serialize:json_struct_to_tx(Body) of {'EXIT', _} -> case TXID of not_set -> noop; _ -> ar_ignore_registry:remove_ref(TXID, Ref) end, {error, invalid_json, Req}; {error, invalid_signature_type} -> case TXID of not_set -> noop; _ -> ar_ignore_registry:remove_ref(TXID, Ref), ar_tx_db:put_error_codes(TXID, [<<"invalid_signature_type">>]) end, {error, invalid_signature_type, Req}; {error, _} -> case TXID of not_set -> noop; _ -> ar_ignore_registry:remove_ref(TXID, Ref) end, {error, invalid_json, Req}; TX -> post_tx_parse_id(verify_id_match, {TXID, Req, TX}) end; post_tx_parse_id(parse_binary, {TXID, Req, Body}) -> Ref = erlang:get(tx_id_ref), case catch ar_serialize:binary_to_tx(Body) of {'EXIT', _} -> case TXID of not_set -> noop; _ -> ar_ignore_registry:remove_ref(TXID, Ref) end, {error, invalid_json, Req}; {error, _} -> case TXID of not_set -> noop; _ -> ar_ignore_registry:remove_ref(TXID, Ref) end, {error, invalid_json, Req}; {ok, TX} -> post_tx_parse_id(verify_id_match, {TXID, Req, TX}) end; post_tx_parse_id(verify_id_match, {MaybeTXID, Req, TX}) -> TXID = TX#tx.id, Ref = erlang:get(tx_id_ref), case MaybeTXID of TXID -> {ok, TX, Req}; MaybeNotSet -> case MaybeNotSet of not_set -> noop; MismatchingTXID -> ar_ignore_registry:remove_ref(MismatchingTXID, Ref) end, case byte_size(TXID) > 32 of true -> {error, invalid_hash, Req}; false -> case ar_mempool:is_known_tx(TXID) of true -> {error, tx_already_processed, TXID, Req}; false -> Ref2 = case Ref of undefined -> make_ref(); _ -> Ref end, erlang:put(tx_id_ref, Ref2), ar_ignore_registry:add_ref(TXID, Ref2, 5000), {ok, TX, Req} end end end. handle_post_vdf(Req, Pid) -> Peer = ar_http_util:arweave_peer(Req), case ets:member(ar_peers, {vdf_server_peer, Peer}) of false -> {400, #{}, <<>>, Req}; true -> handle_post_vdf2(Req, Pid, Peer) end. handle_post_vdf2(Req, Pid, Peer) -> case ar_config:pull_from_remote_vdf_server() of true -> %% We are pulling the updates - tell the server not to push them. Response = #nonce_limiter_update_response{ postpone = 120 }, Bin = ar_serialize:nonce_limiter_update_response_to_binary(Response), {202, #{}, Bin, Req}; false -> handle_post_vdf3(Req, Pid, Peer) end. handle_post_vdf3(Req, Pid, Peer) -> case read_complete_body(Req, Pid) of {ok, Body, Req2} -> Format = case ar_config:compute_own_vdf() of true -> %% If we compute our own VDF, we need to know the VDF difficulties %% so that we can continue extending the new session. %% The VDF difficulties have been introduced in the format number 4. 4; false -> 2 end, case ar_serialize:binary_to_nonce_limiter_update(Format, Body) of {ok, Update} -> case ar_nonce_limiter:apply_external_update(Update, Peer) of ok -> {200, #{}, <<>>, Req2}; #nonce_limiter_update_response{} = Response -> Bin = ar_serialize:nonce_limiter_update_response_to_binary(Response), {202, #{}, Bin, Req2} end; {error, _} -> %% We couldn't deserialize the update, ask for a different format Response = #nonce_limiter_update_response{ format = Format }, Bin = ar_serialize:nonce_limiter_update_response_to_binary(Response), {202, #{}, Bin, Req} end; {error, body_size_too_large} -> {413, #{}, <<"Payload too large">>, Req}; {error, timeout} -> {503, #{}, jiffy:encode(#{ error => timeout }), Req} end. handle_get_vdf(Req, Call, Format) -> {ok, Config} = arweave_config:get_env(), case lists:member(public_vdf_server, Config#config.enable) of true -> handle_get_vdf2(Req, Call, Format); false -> Peer = ar_http_util:arweave_peer(Req), case ets:lookup(ar_peers, {vdf_client_peer, Peer}) of [] -> {400, #{}, jiffy:encode(#{ error => not_our_vdf_client }), Req}; [{_, _RawPeer}] -> handle_get_vdf2(Req, Call, Format) end end. handle_get_vdf2(Req, Call, Format) -> Update = case Call of get_update -> ar_nonce_limiter_server:get_update(Format); get_session -> ar_nonce_limiter_server:get_full_update(Format); get_previous_session -> ar_nonce_limiter_server:get_full_prev_update(Format) end, case Update of not_found -> {404, #{}, <<>>, Req}; Update -> {200, #{}, Update, Req} end. read_complete_body(Req, Pid) -> read_complete_body(Req, Pid, ?MAX_BODY_SIZE). read_complete_body(Req, Pid, SizeLimit) -> Pid ! {read_complete_body, self(), Req, SizeLimit}, receive {read_complete_body, {'EXIT', timeout}} -> ?LOG_WARNING([{event, body_read_cowboy_timeout}, {method, cowboy_req:method(Req)}, {path, cowboy_req:path(Req)}]), {error, timeout}; {read_complete_body, Term} -> Term end. read_body_chunk(Req, Pid, Size, Timeout) -> Pid ! {read_body_chunk, self(), Req, Size, Timeout}, receive {read_body_chunk, {'EXIT', timeout}} -> Peer = ar_http_util:arweave_peer(Req), ?LOG_DEBUG([{event, body_read_cowboy_timeout}, {method, cowboy_req:method(Req)}, {path, cowboy_req:path(Req)}, {peer, ar_util:format_peer(Peer)}]), {error, timeout}; {read_body_chunk, Term} -> Term after Timeout -> Peer = ar_http_util:arweave_peer(Req), ?LOG_DEBUG([{event, body_read_timeout}, {method, cowboy_req:method(Req)}, {path, cowboy_req:path(Req)}, {peer, ar_util:format_peer(Peer)}]), {error, timeout} end. handle_mining_h1(Req, Pid) -> Peer = ar_http_util:arweave_peer(Req), case read_complete_body(Req, Pid) of {ok, Body, Req2} -> case ar_serialize:json_decode(Body, [return_maps]) of {ok, JSON} -> case catch ar_serialize:json_map_to_candidate(JSON) of {'EXIT', _} -> {400, #{}, jiffy:encode(#{ error => invalid_json }), Req2}; Candidate -> case {ar_pool:is_client(), ar_coordination:is_exit_peer()} of {true, true} -> PoolPeer = ar_pool:pool_peer(), Jobs = #pool_cm_jobs{ h1_to_h2_jobs = [Candidate] }, Payload = ar_serialize:jsonify( ar_serialize:pool_cm_jobs_to_json_struct(Jobs)), spawn(fun() -> ar_http_iface_client:post_pool_cm_jobs(PoolPeer, Payload) end), {200, #{}, <<>>, Req2}; _ -> ar_coordination:compute_h2_for_peer(Peer, Candidate), {200, #{}, <<>>, Req} end end; {error, _} -> {400, #{}, jiffy:encode(#{ error => invalid_json }), Req2} end; {error, body_size_too_large} -> {413, #{}, <<"Payload too large">>, Req} end. handle_mining_h2(Req, Pid) -> Peer = ar_http_util:arweave_peer(Req), case read_complete_body(Req, Pid) of {ok, Body, Req2} -> case ar_serialize:json_decode(Body, [return_maps]) of {ok, JSON} -> case catch ar_serialize:json_map_to_candidate(JSON) of {'EXIT', _} -> {400, #{}, jiffy:encode(#{ error => invalid_json }), Req2}; Candidate -> ?LOG_INFO([{event, h2_received}, {peer, ar_util:format_peer(Peer)}]), case {ar_pool:is_client(), ar_coordination:is_exit_peer()} of {true, true} -> PoolPeer = ar_pool:pool_peer(), Jobs = #pool_cm_jobs{ h1_read_jobs = [Candidate] }, Payload = ar_serialize:jsonify( ar_serialize:pool_cm_jobs_to_json_struct(Jobs)), spawn(fun() -> ar_http_iface_client:post_pool_cm_jobs(PoolPeer, Payload) end), {200, #{}, <<>>, Req2}; _ -> ar_mining_server:prepare_and_post_solution(Candidate), ar_mining_stats:h2_received_from_peer(Peer), {200, #{}, <<>>, Req} end end; {error, _} -> {400, #{}, jiffy:encode(#{ error => invalid_json }), Req2} end; {error, body_size_too_large} -> {413, #{}, <<"Payload too large">>, Req} end. handle_mining_cm_publish(Req, Pid) -> Peer = ar_http_util:arweave_peer(Req), case read_complete_body(Req, Pid) of {ok, Body, Req2} -> case ar_serialize:json_decode(Body, [return_maps]) of {ok, JSON} -> case catch ar_serialize:json_map_to_solution(JSON) of {'EXIT', _} -> {400, #{}, jiffy:encode(#{ error => invalid_json }), Req2}; Solution -> ar:console("Block candidate ~p from ~p ~n", [ ar_util:encode(Solution#mining_solution.solution_hash), ar_util:format_peer(Peer)]), ?LOG_INFO("Block candidate ~p from ~p ~n", [ ar_util:encode(Solution#mining_solution.solution_hash), ar_util:format_peer(Peer)]), ar_mining_server:prepare_and_post_solution(Solution), {200, #{}, <<>>, Req} end; {error, _} -> {400, #{}, jiffy:encode(#{ error => invalid_json }), Req2} end; {error, body_size_too_large} -> {413, #{}, <<"Payload too large">>, Req} end. ================================================ FILE: apps/arweave/src/ar_http_iface_rate_limiter_middleware.erl ================================================ %%% %%% @doc Cowboy handler to manage server-side rate limiting. %%% %%% This module provides a routing layer, mapping incoming requests %%% to respective rate limiter groups (RLG). %%% The mapping logic can be extended in a quite complex manner if %%% required, however it should be considered that the execute function will be %%% called for each HTTP request. %%% %%% Also, there is nothing limiting the developer from calling multiple RLGs %%% for a single request, if necessary. %%% %%% The LimiterRef reference in the arweave_limiter:register_or_reject_call/2 %%% call must match one of the RLGs started by the arweave_limiter application, %%% otherwise a noproc error will be raised. %%% %%% We currency use IP addresses and ports as Keys for the calling peers. %%% However, any Erlang term might be used as a key in an RLG. %%% -module(ar_http_iface_rate_limiter_middleware). -behaviour(cowboy_middleware). -export([execute/2]). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). execute(Req, Env) -> LimiterRef = get_limiter_ref(Req), PeerKey = get_peer_key(Req), case arweave_limiter:register_or_reject_call(LimiterRef, PeerKey) of {reject, Reason, Data} -> ?LOG_DEBUG([{event, rate_limiter_reject}, {reason, Reason}, {data, Data}]), {stop, reject(Req, Reason, Data)}; _ -> {ok, Req, Env} end. get_limiter_ref(Req) -> {ok, Config} = arweave_config:get_env(), LocalIPs = [config_peer_to_ip_addr(Peer) || Peer <- Config#config.local_peers], PeerIP = config_peer_to_ip_addr(get_peer_key(Req)), case lists:member(PeerIP, LocalIPs) of true -> local_peers; _ -> Path = ar_http_iface_server:split_path(cowboy_req:path(Req)), path_to_limiter_ref(Path) end. reject(Req, _Reason, _Data) -> cowboy_req:reply( 429, #{}, <<"Too Many Requests">>, Req ). -ifdef(AR_TEST). get_peer_key(Req) -> {{A, B, C, D}, _Port} = cowboy_req:peer(Req), case cowboy_req:header(<<"x-p2p-port">>, Req) of undefined -> {A, B, C, D}; PortBin -> case catch binary_to_integer(PortBin) of Port when is_integer(Port) -> {A, B, C, D, Port}; _ -> {A, B, C, D} end end. -else. get_peer_key(Req) -> {{A, B, C, D}, _Port} = cowboy_req:peer(Req), {A, B, C, D}. -endif. config_peer_to_ip_addr({{A, B, C, D}, _Port}) -> {A, B, C, D}; config_peer_to_ip_addr({A, B, C, D, _Port}) -> {A, B, C, D}; config_peer_to_ip_addr({A, B, C, D}) -> {A, B, C, D}. path_to_limiter_ref([<<"chunk">> | _]) -> chunk; path_to_limiter_ref([<<"chunk2">> | _]) -> chunk; path_to_limiter_ref([<<"data_sync_record">> | _]) -> data_sync_record; path_to_limiter_ref([<<"recent_hash_list_diff">> | _]) -> recent_hash_list_diff; path_to_limiter_ref([<<"hash_list">>]) -> block_index; path_to_limiter_ref([<<"hash_list2">>]) -> block_index; path_to_limiter_ref([<<"block_index">>]) -> block_index; path_to_limiter_ref([<<"block_index2">>]) -> block_index; path_to_limiter_ref([<<"block">>, _Type, _ID, <<"hash_list">>]) -> block_index; path_to_limiter_ref([<<"wallet_list">>]) -> wallet_list; path_to_limiter_ref([<<"block">>, _Type, _ID, <<"wallet_list">>]) -> wallet_list; path_to_limiter_ref([<<"vdf">>]) -> get_vdf; path_to_limiter_ref([<<"vdf2">>]) -> get_vdf; path_to_limiter_ref([<<"vdf">>, <<"session">>]) -> get_vdf_session; path_to_limiter_ref([<<"vdf2">>, <<"session">>]) -> get_vdf_session; path_to_limiter_ref([<<"vdf3">>, <<"session">>]) -> get_vdf_session; path_to_limiter_ref([<<"vdf4">>, <<"session">>]) -> get_vdf_session; path_to_limiter_ref([<<"vdf">>, <<"previous_session">>]) -> get_previous_vdf_session; path_to_limiter_ref([<<"vdf2">>, <<"previous_session">>]) -> get_previous_vdf_session; %% No vdf3 prev_session in ar_blacklist_middleware.hrl ?RPM_BY_PATH path_to_limiter_ref([<<"vdf4">>, <<"previous_session">>]) -> get_previous_vdf_session; path_to_limiter_ref([<<"metrics">> | _ ])-> metrics; path_to_limiter_ref(_) -> general. ================================================ FILE: apps/arweave/src/ar_http_iface_server.erl ================================================ %%%=================================================================== %%% @doc Handle http requests. %%%=================================================================== -module(ar_http_iface_server). -behavior(gen_server). -export([start_link/0]). -export([init/1]). -export([handle_call/3, handle_cast/2, handle_info/2, terminate/2]). -export([split_path/1, label_http_path/1, label_req/1]). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("eunit/include/eunit.hrl"). -define(HTTP_IFACE_MIDDLEWARES, [ ar_http_iface_rate_limiter_middleware, ar_network_middleware, cowboy_router, ar_http_iface_middleware, cowboy_handler ]). -define(HTTP_IFACE_ROUTES, [ {"/metrics/[:registry]", ar_prometheus_cowboy_handler, []}, {"/[...]", ar_http_iface_handler, []} ]). -define(ENDPOINTS, ["info", "block", "block_announcement", "block2", "tx", "tx2", "queue", "recent_hash_list", "recent_hash_list_diff", "tx_anchor", "arql", "time", "chunk", "chunk2", "data_sync_record", "sync_buckets", "footprint_buckets", "wallet", "unsigned_tx", "peers", "hash_list", "block_index", "block_index2", "total_supply", "wallet_list", "height", "metrics", "vdf", "vdf2", "partial_solution", "pool_cm_jobs"]). %%%=================================================================== %%% Public interface. %%%=================================================================== start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). init(_) -> ?LOG_INFO([{start, ?MODULE}, {pid, self()}]), % this process needs to be stopped in a clean way, % if something goes wrong, the connections must % be cleaned before leaving. erlang:process_flag(trap_exit, true), {ok, Config} = arweave_config:get_env(), case start_http_iface_listener(Config) of {ok, Pid} -> {ok, Pid}; Elsewise -> {error, Elsewise} end. split_path(Path) -> binary:split(Path, <<"/">>, [global, trim_all]). %% @doc Return the HTTP path label, %% Used for cowboy_requests_total and gun_requests_total metrics, as well as P3 handling. label_http_path(Path) when is_list(Path) -> name_route(Path); label_http_path(Path) -> label_http_path(split_path(Path)). label_req(Req) -> SplitPath = ar_http_iface_server:split_path(cowboy_req:path(Req)), ar_http_iface_server:label_http_path(SplitPath). handle_call(Msg, From, State) -> ?LOG_WARNING([{process, ?MODULE}, {received, Msg}, {from, From}]), {noreply, State}. handle_cast(Msg, State) -> ?LOG_WARNING([{process, ?MODULE}, {received, Msg}]), {noreply, State}. handle_info(Msg = {'EXIT', _From, Reason}, _State) -> ?LOG_ERROR([{process, ?MODULE}, {received, Msg}]), {stop, Reason}; handle_info(Msg, State) -> ?LOG_WARNING([{process, ?MODULE}, {received, Msg}]), {noreply, State}. terminate(Reason, _State) -> ?LOG_INFO([{module, ?MODULE},{pid, self()},{callback, terminate},{reason, Reason}]), ok. %%%=================================================================== %%% Private functions. %%%=================================================================== start_http_iface_listener(Config) -> Dispatch = cowboy_router:compile([{'_', ?HTTP_IFACE_ROUTES}]), TlsCertfilePath = Config#config.tls_cert_file, TlsKeyfilePath = Config#config.tls_key_file, TransportOpts = #{ % ranch_tcp parameters backlog => Config#config.'http_api.tcp.backlog', delay_send => Config#config.'http_api.tcp.delay_send', keepalive => Config#config.'http_api.tcp.keepalive', linger => { Config#config.'http_api.tcp.linger', Config#config.'http_api.tcp.linger_timeout' }, max_connections => Config#config.'http_api.tcp.max_connections', nodelay => Config#config.'http_api.tcp.nodelay', num_acceptors => Config#config.'http_api.tcp.num_acceptors', send_timeout_close => Config#config.'http_api.tcp.send_timeout_close', send_timeout => Config#config.'http_api.tcp.send_timeout', shutdown => Config#config.'http_api.tcp.listener_shutdown', socket_opts => [ {port, Config#config.port} ] }, ProtocolOpts = #{ active_n => Config#config.'http_api.http.active_n', inactivity_timeout => Config#config.'http_api.http.inactivity_timeout', linger_timeout => Config#config.'http_api.http.linger_timeout', request_timeout => Config#config.'http_api.http.request_timeout', idle_timeout => Config#config.http_api_transport_idle_timeout, middlewares => ?HTTP_IFACE_MIDDLEWARES, env => #{ dispatch => Dispatch }, metrics_callback => fun prometheus_cowboy2_instrumenter:observe/1, stream_handlers => [cowboy_metrics_h, cowboy_stream_h] }, case TlsCertfilePath of not_set -> cowboy:start_clear(ar_http_iface_listener, TransportOpts, ProtocolOpts); _ -> cowboy:start_tls(ar_http_iface_listener, TransportOpts ++ [ {certfile, TlsCertfilePath}, {keyfile, TlsKeyfilePath} ], ProtocolOpts) end. name_route([]) -> "/"; name_route([<<"current_block">>]) -> "/current/block"; name_route([<<_Hash:43/binary, _MaybeExt/binary>>]) -> "/{hash}[.{ext}]"; name_route([Bin]) -> L = binary_to_list(Bin), case lists:member(L, ?ENDPOINTS) of true -> "/" ++ L; false -> undefined end; name_route([<<"peer">> | _]) -> "/peer/..."; name_route([<<"jobs">>, _PrevOutput]) -> "/jobs/{prev_output}"; name_route([<<"vdf">>, <<"session">>]) -> "/vdf/session"; name_route([<<"vdf2">>, <<"session">>]) -> "/vdf2/session"; name_route([<<"vdf3">>, <<"session">>]) -> "/vdf3/session"; name_route([<<"vdf4">>, <<"session">>]) -> "/vdf4/session"; name_route([<<"vdf">>, <<"previous_session">>]) -> "/vdf/previous_session"; name_route([<<"vdf2">>, <<"previous_session">>]) -> "/vdf2/previous_session"; name_route([<<"vdf4">>, <<"previous_session">>]) -> "/vdf4/previous_session"; name_route([<<"tx">>, <<"pending">>]) -> "/tx/pending"; name_route([<<"tx">>, _Hash, <<"status">>]) -> "/tx/{hash}/status"; name_route([<<"tx">>, _Hash]) -> "/tx/{hash}"; name_route([<<"tx2">>, _Hash]) -> "/tx2/{hash}"; name_route([<<"unconfirmed_tx">>, _Hash]) -> "/unconfirmed_tx/{hash}"; name_route([<<"unconfirmed_tx2">>, _Hash]) -> "/unconfirmed_tx2/{hash}"; name_route([<<"tx">>, _Hash, << "data" >>]) -> "/tx/{hash}/data"; name_route([<<"tx">>, _Hash, << "data.", _/binary >>]) -> "/tx/{hash}/data.{ext}"; name_route([<<"tx">>, _Hash, << "offset" >>]) -> "/tx/{hash}/offset"; name_route([<<"tx">>, _Hash, _Field]) -> "/tx/{hash}/{field}"; name_route([<<"chunk">>, _Offset]) -> "/chunk/{offset}"; name_route([<<"chunk2">>, _Offset]) -> "/chunk2/{offset}"; name_route([<<"data_roots">>, _Offset]) -> "/data_roots/{offset}"; name_route([<<"chunk_proof">>, _Offset]) -> "/chunk_proof/{offset}"; name_route([<<"chunk_proof2">>, _Offset]) -> "/chunk_proof2/{offset}"; name_route([<<"data_sync_record">>, _Start, _Limit]) -> "/data_sync_record/{start}/{limit}"; name_route([<<"data_sync_record">>, _Start, _End, _Limit]) -> "/data_sync_record/{start}/{end}/{limit}"; name_route([<<"footprints">>, _Partition, _Number]) -> "/footprints/{partition}/{footprint_number}"; name_route([<<"price">>, _SizeInBytes]) -> "/price/{bytes}"; name_route([<<"price">>, _SizeInBytes, _Addr]) -> "/price/{bytes}/{address}"; name_route([<<"price2">>, _SizeInBytes]) -> "/price2/{bytes}"; name_route([<<"price2">>, _SizeInBytes, _Addr]) -> "/price2/{bytes}/{address}"; name_route([<<"v2price">>, _SizeInBytes]) -> "/v2price/{bytes}"; name_route([<<"v2price">>, _SizeInBytes, _Addr]) -> "/v2price/{bytes}/{address}"; name_route([<<"optimistic_price">>, _SizeInBytes]) -> "/optimistic_price/{bytes}"; name_route([<<"optimistic_price">>, _SizeInBytes, _Addr]) -> "/optimistic_price/{bytes}/{address}"; name_route([<<"reward_history">>, _BH]) -> "/reward_history/{block_hash}"; name_route([<<"block_time_history">>, _BH]) -> "/block_time_history/{block_hash}"; name_route([<<"wallet">>, _Addr, <<"balance">>]) -> "/wallet/{addr}/balance"; name_route([<<"wallet">>, _Addr, <<"last_tx">>]) -> "/wallet/{addr}/last_tx"; name_route([<<"wallet">>, _Addr, <<"txs">>]) -> "/wallet/{addr}/txs"; name_route([<<"wallet">>, _Addr, <<"txs">>, _EarliestTX]) -> "/wallet/{addr}/txs/{earliest_tx}"; name_route([<<"wallet">>, _Addr, <<"deposits">>]) -> "/wallet/{addr}/deposits"; name_route([<<"wallet">>, _Addr, <<"deposits">>, _EarliestDeposit]) -> "/wallet/{addr}/deposits/{earliest_deposit}"; name_route([<<"wallet_list">>, _Root]) -> "/wallet_list/{root_hash}"; name_route([<<"wallet_list">>, _Root, _Cursor]) -> "/wallet_list/{root_hash}/{cursor}"; name_route([<<"wallet_list">>, _Root, _Addr, <<"balance">>]) -> "/wallet_list/{root_hash}/{addr}/balance"; name_route([<<"block_index">>, _From, _To]) -> "/block_index/{from}/{to}"; name_route([<<"block_index2">>, _From, _To]) -> "/block_index2/{from}/{to}"; name_route([<<"hash_list">>, _From, _To]) -> "/hash_list/{from}/{to}"; name_route([<<"hash_list2">>, _From, _To]) -> "/hash_list2/{from}/{to}"; name_route([<<"block">>, <<"hash">>, _IndepHash]) -> "/block/hash/{indep_hash}"; name_route([<<"block">>, <<"height">>, _Height]) -> "/block/height/{height}"; name_route([<<"block2">>, <<"hash">>, _IndepHash]) -> "/block2/hash/{indep_hash}"; name_route([<<"block2">>, <<"height">>, _Height]) -> "/block2/height/{height}"; name_route([<<"block">>, _Type, _IDBin, _Field]) -> "/block/{type}/{id_bin}/{field}"; name_route([<<"block">>, <<"height">>, _Height, <<"wallet">>, _Addr, <<"balance">>]) -> "/block/height/{height}/wallet/{addr}/balance"; name_route([<<"block">>, <<"current">>]) -> "/block/current"; name_route([<<"coordinated_mining">>, <<"h1">>]) -> "/coordinated_mining/h1"; name_route([<<"coordinated_mining">>, <<"h2">>]) -> "/coordinated_mining/h2"; name_route([<<"coordinated_mining">>, <<"partition_table">>]) -> "/coordinated_mining/partition_table"; name_route([<<"coordinated_mining">>, <<"publish">>]) -> "/coordinated_mining/publish"; name_route([<<"coordinated_mining">>, <<"state">>]) -> "/coordinated_mining/state"; name_route(_) -> undefined. ================================================ FILE: apps/arweave/src/ar_http_req.erl ================================================ -module(ar_http_req). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -export([body/2, read_body_chunk/3, body_read_time/1]). -define(AR_HTTP_REQ_BODY, '_ar_http_req_body'). -define(AR_HTTP_REQ_BODY_READ_TIME, '_ar_http_req_body_read_time'). body(Req, SizeLimit) -> case maps:get(?AR_HTTP_REQ_BODY, Req, not_set) of not_set -> StartTime = erlang:monotonic_time(), read_complete_body(Req, SizeLimit, StartTime); Body -> {ok, Body, Req} end. %% @doc The elapsed time (in native units) to read the request body via `read_complete_body()` body_read_time(Req) -> maps:get(?AR_HTTP_REQ_BODY_READ_TIME, Req, undefined). read_body_chunk(Req, Size, Timeout) -> case cowboy_req:read_body(Req, #{ length => Size, period => Timeout }) of {_, Chunk, Req2} when byte_size(Chunk) >= Size -> prometheus_counter:inc(http_server_accepted_bytes_total, [ar_prometheus_cowboy_labels:label_value(route, #{ req => Req2 })], Size), {ok, Chunk, Req2}; {_, Chunk, Req2} -> prometheus_counter:inc(http_server_accepted_bytes_total, [ar_prometheus_cowboy_labels:label_value(route, #{ req => Req2 })], byte_size(Chunk)), exit(timeout) end. read_complete_body(Req, SizeLimit, StartTime) -> Parent = self(), Ref = make_ref(), {Pid, MonRef} = spawn_monitor( fun() -> do_read_body(Req, Parent, Ref) end), TRef = erlang:send_after(?DEFAULT_HTTP_MAX_BODY_READ_TIME_MS, self(), {body_timeout, Ref}), Result = accumulate_body(MonRef, Pid, Ref, Req, [], 0, SizeLimit, StartTime), erlang:cancel_timer(TRef), flush_messages(Ref), demonitor(MonRef, [flush]), case Result of exit_timeout -> exit(timeout); _ -> Result end. accumulate_body(MonRef, Pid, Ref, Req, Acc, Size, SizeLimit, StartTime) -> receive {Ref, _OkOrMore, _Data, DataSize} when Size + DataSize > SizeLimit -> exit(Pid, kill), {error, body_size_too_large}; {Ref, more, Data, DataSize} -> NewSize = Size + DataSize, accumulate_body(MonRef, Pid, Ref, Req, [Acc | Data], NewSize, SizeLimit, StartTime); {Ref, ok, Data, _DataSize} -> Body = iolist_to_binary([Acc | Data]), BodyReadTime = erlang:monotonic_time() - StartTime, {ok, Body, with_body_req_fields(Req, Body, BodyReadTime)}; {body_timeout, Ref} -> exit(Pid, kill), exit_timeout; {'DOWN', MonRef, process, Pid, timeout} -> exit_timeout; {'DOWN', MonRef, process, Pid, Reason} -> {error, Reason} after ?DEFAULT_HTTP_READ_BODY_PERIOD_MS + 2000 -> exit(Pid, kill), exit_timeout end. do_read_body(Req, Parent, Ref) -> {MoreOrOk, Data, ReadReq} = cowboy_req:read_body(Req, #{ period => ?DEFAULT_HTTP_READ_BODY_PERIOD_MS, timeout => ?DEFAULT_HTTP_READ_BODY_PERIOD_MS + 1000 }), DataSize = byte_size(Data), prometheus_counter:inc( http_server_accepted_bytes_total, [ar_prometheus_cowboy_labels:label_value(route, #{ req => Req })], DataSize), Parent ! {Ref, MoreOrOk, Data, DataSize}, case MoreOrOk of ok -> ok; more -> do_read_body(ReadReq, Parent, Ref) end. flush_messages(Ref) -> receive {body_timeout, Ref} -> flush_messages(Ref); {Ref, _, _, _} -> flush_messages(Ref) after 0 -> ok end. with_body_req_fields(Req, Body, BodyReadTime) -> Req#{ ?AR_HTTP_REQ_BODY => Body, ?AR_HTTP_REQ_BODY_READ_TIME => BodyReadTime }. ================================================ FILE: apps/arweave/src/ar_http_sup.erl ================================================ %% This Source Code Form is subject to the terms of the GNU General %% Public License, v. 2.0. If a copy of the GPLv2 was not distributed %% with this file, You can obtain one at %% https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html -module(ar_http_sup). -behaviour(supervisor). %% API -export([start_link/0]). %% Supervisor callbacks -export([init/1]). -include_lib("arweave/include/ar_sup.hrl"). %% =================================================================== %% API functions %% =================================================================== start_link() -> supervisor:start_link({local, ?MODULE}, ?MODULE, []). %% =================================================================== %% Supervisor callbacks %% =================================================================== init([]) -> {ok, {{one_for_one, 5, 10}, [?CHILD(ar_http, worker)]}}. ================================================ FILE: apps/arweave/src/ar_http_util.erl ================================================ -module(ar_http_util). -export([get_tx_content_type/1, arweave_peer/1]). -include_lib("arweave/include/ar.hrl"). -include_lib("eunit/include/eunit.hrl"). -define(PRINTABLE_ASCII_REGEX, "^[ -~]*$"). %%%=================================================================== %%% Public interface. %%%=================================================================== get_tx_content_type(#tx { tags = Tags }) -> case lists:keyfind(<<"Content-Type">>, 1, Tags) of {<<"Content-Type">>, ContentType} -> case is_valid_content_type(ContentType) of true -> {valid, ContentType}; false -> invalid end; false -> none end. %%-------------------------------------------------------------------- %% @doc Check and valid `x-p2p-port' header. %% @end %%-------------------------------------------------------------------- -spec arweave_peer(Req) -> Return when Req :: cowboy:req(), Return :: {A, A, A, A, Port}, A :: pos_integer(), Port :: pos_integer(). arweave_peer(Req) -> P2PPort = cowboy_req:header(<<"x-p2p-port">>, Req), {IP, _Port} = cowboy_req:peer(Req), arweave_peer2(P2PPort, IP). %%-------------------------------------------------------------------- %% @private %% @hidden %%-------------------------------------------------------------------- -spec arweave_peer2(Binary, IP) -> Return when Binary :: binary() | undefined, IP :: {A, A, A, A}, Return :: {A, A, A, A, Port}, A :: pos_integer(), Port :: pos_integer(). arweave_peer2(Binary, {A, B, C, D}) when is_binary(Binary) -> try binary_to_integer(Binary) of P when P >= 1 andalso P =< 65535 -> {A, B, C, D, P}; _ -> {A, B, C, D, ?DEFAULT_HTTP_IFACE_PORT} catch _:_ -> {A, B, C, D, ?DEFAULT_HTTP_IFACE_PORT} end; arweave_peer2(_, {A, B, C, D}) -> {A, B, C, D, ?DEFAULT_HTTP_IFACE_PORT}. %%%=================================================================== %%% Private functions. %%%=================================================================== is_valid_content_type(ContentType) -> case re:run( ContentType, ?PRINTABLE_ASCII_REGEX, [dollar_endonly, {capture, none}] ) of match -> true; nomatch -> false end. arweave_peer_test() -> [ % an undefined x-p2p-port header should return the % default arweave port ?assertEqual( {1,2,3,4, ?DEFAULT_HTTP_IFACE_PORT}, arweave_peer(#{ headers => #{}, peer => {{1,2,3,4}, 1234} }) ), % 1/TCP port is valid ?assertEqual( {1,2,3,4, 1}, arweave_peer(#{ headers => #{ <<"x-p2p-port">> => <<"1">> }, peer => {{1,2,3,4}, 1234} }) ), % 65535/TCP port is valid ?assertEqual( {1,2,3,4, 65535}, arweave_peer(#{ headers => #{ <<"x-p2p-port">> => <<"65535">> }, peer => {{1,2,3,4}, 1234} }) ), % 0/TCP port is invalid ?assertEqual( {1,2,3,4, ?DEFAULT_HTTP_IFACE_PORT}, arweave_peer(#{ headers => #{ <<"x-p2p-port">> => <<"0">> }, peer => {{1,2,3,4}, 1234} }) ), % 65536/TCP port is invalid ?assertEqual( {1,2,3,4, ?DEFAULT_HTTP_IFACE_PORT}, arweave_peer(#{ headers => #{ <<"x-p2p-port">> => <<"65536">> }, peer => {{1,2,3,4}, 1234} }) ), % a TCP port must be an integer, if not, a default % port is returned. ?assertEqual( {1,2,3,4, ?DEFAULT_HTTP_IFACE_PORT}, arweave_peer(#{ headers => #{ <<"x-p2p-port">> => <<"test">> }, peer => {{1,2,3,4}, 1234} }) ) ]. ================================================ FILE: apps/arweave/src/ar_ignore_registry.erl ================================================ %%% @doc The module offers an interface to the "ignore registry" - %%% an in-memory storage used for avoiding redundant processing of %%% blocks and transactions, in the setting of historically synchronous %%% POST /block and POST /tx requests. An incoming block or transaction is %%% temporary placed in the registry. Requests with the same identifiers %%% are ignored for the time. After a block or a transaction is validated %%% a permanent record can be inserted into the registry. %%% @end -module(ar_ignore_registry). -export([add/1, add_ref/2, add_ref/3, remove/1, remove_ref/2, add_temporary/2, remove_temporary/2, member/1, permanent_member/1]). %% @doc Put a permanent ID record into the registry. add(ID) -> ets:insert(ignored_ids, {ID, permanent}). %% @doc Remove a permanent ID record from the registry. remove(ID) -> catch ets:delete_object(ignored_ids, {ID, permanent}). %% @doc Put a referenced ID record into the registry. %% The record may be removed by ar_ignore_registry:remove_ref/2. add_ref(ID, Ref) -> add_ref(ID, Ref, 10000). add_ref(ID, Ref, Timeout) -> ets:insert(ignored_ids, {ID, {ref, Ref}}), {ok, _} = ar_timer:apply_after( Timeout, ar_ignore_registry, remove_ref, [ID, Ref], #{ skip_on_shutdown => false } ). %% @doc Remove a referenced ID record from the registry. remove_ref(ID, Ref) -> catch ets:delete_object(ignored_ids, {ID, {ref, Ref}}). %% @doc Put a temporary ID record into the registry. %% The record expires after Timeout milliseconds. add_temporary(ID, Timeout) -> Ref = make_ref(), ets:insert(ignored_ids, {ID, {temporary, Ref}}), {ok, _} = ar_timer:apply_after( Timeout, ar_ignore_registry, remove_temporary, [ID, Ref], #{ skip_on_shutdown => false } ). %% @doc Remove the temporary record from the registry. remove_temporary(ID, Ref) -> catch ets:delete_object(ignored_ids, {ID, {temporary, Ref}}). %% @doc Check if there is a temporary or a permanent record in the registry. member(ID) -> case ets:lookup(ignored_ids, ID) of [] -> false; _ -> true end. %% @doc Check if there is a permanent record in the registry. permanent_member(ID) -> Entries = ets:lookup(ignored_ids, ID), lists:member({ID, permanent}, Entries). ================================================ FILE: apps/arweave/src/ar_inflation.erl ================================================ %%% @doc Module responsible for managing and testing the inflation schedule of %%% the Arweave main network. -module(ar_inflation). -export([calculate/1, blocks_per_year/1]). -include_lib("arweave/include/ar_inflation.hrl"). -include_lib("eunit/include/eunit.hrl"). %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Calculate the static reward received for mining a given block. %% This reward portion depends only on block height, not the number of transactions. -ifdef(AR_TEST). calculate(_Height) -> 10. -else. calculate(Height) -> calculate2(Height). -endif. calculate2(Height) when Height =< ?FORK_15_HEIGHT -> pre_15_calculate(Height); calculate2(Height) when Height =< ?PRE_25_BLOCKS_PER_YEAR -> calculate_base(Height) + ?POST_15_Y1_EXTRA; calculate2(Height) -> case Height >= ar_fork:height_2_5() of true -> calculate_base(Height); false -> calculate_base_pre_fork_2_5(Height) end. %% @doc An estimation for the number of blocks produced in a year. %% Note: I've confirmed that when TARGET_BLOCK_TIME = 120 the following equation is %% exactly equal to `30 * 24 * 365` when executed within an Erlang shell (i.e. 262800). blocks_per_year(Height) -> ((60 * 60 * 24 * 365) div ar_testnet:target_block_time(Height)). %%%=================================================================== %%% Private functions. %%%=================================================================== %% @doc Pre-1.5.0.0 style reward calculation. pre_15_calculate(Height) -> RewardDelay = (?PRE_15_BLOCKS_PER_YEAR)/4, case Height =< RewardDelay of true -> 1; false -> ?WINSTON_PER_AR * 0.2 * ?GENESIS_TOKENS * math:pow(2, -(Height - RewardDelay) / ?PRE_15_BLOCKS_PER_YEAR) * math:log(2) / ?PRE_15_BLOCKS_PER_YEAR end. calculate_base(Height) -> {Ln2Dividend, Ln2Divisor} = ?LN2, Dividend = Height * Ln2Dividend, Divisor = blocks_per_year(Height) * Ln2Divisor, Precision = ?INFLATION_NATURAL_EXPONENT_DECIMAL_FRACTION_PRECISION, {EXDividend, EXDivisor} = ar_fraction:natural_exponent({Dividend, Divisor}, Precision), ?GENESIS_TOKENS * ?WINSTON_PER_AR * EXDivisor * 2 * Ln2Dividend div ( 10 * blocks_per_year(Height) * Ln2Divisor * EXDividend ). calculate_base_pre_fork_2_5(Height) -> ?WINSTON_PER_AR * ( 0.2 * ?GENESIS_TOKENS * math:pow(2, -(Height) / ?PRE_25_BLOCKS_PER_YEAR) * math:log(2) ) / ?PRE_25_BLOCKS_PER_YEAR. %%%=================================================================== %%% Tests. %%%=================================================================== %% Test that the within tolerance helper function works as anticipated. is_in_tolerance_test() -> true = is_in_tolerance(100, 100.5, 1), false = is_in_tolerance(100, 101.5, 1), true = is_in_tolerance(100.9, 100, 1), false = is_in_tolerance(101.1, 100, 1), true = is_in_tolerance(100.0001, 100, 0.01), false = is_in_tolerance(100.0001, 100, 0.00009), true = is_in_tolerance(?AR(100 * 1000000), ?AR(100 * 1000000) + 10, 0.01). %%% Calculate and verify per-year expected and actual inflation. year_1_test_() -> {timeout, 60, fun test_year_1/0}. test_year_1() -> true = is_in_tolerance(year_sum_rewards(0), ?AR(5500000)). year_2_test_() -> {timeout, 60, fun test_year_2/0}. test_year_2() -> true = is_in_tolerance(year_sum_rewards(1), ?AR(2750000)). year_3_test_() -> {timeout, 60, fun test_year_3/0}. test_year_3() -> true = is_in_tolerance(year_sum_rewards(2), ?AR(1375000)). year_4_test_() -> {timeout, 60, fun test_year_4/0}. test_year_4() -> true = is_in_tolerance(year_sum_rewards(3), ?AR(687500)). year_5_test_() -> {timeout, 60, fun test_year_5/0}. test_year_5() -> true = is_in_tolerance(year_sum_rewards(4), ?AR(343750)). year_6_test_() -> {timeout, 60, fun test_year_6/0}. test_year_6() -> true = is_in_tolerance(year_sum_rewards(5), ?AR(171875)). year_7_test_() -> {timeout, 60, fun test_year_7/0}. test_year_7() -> true = is_in_tolerance(year_sum_rewards(6), ?AR(85937.5)). year_8_test_() -> {timeout, 60, fun test_year_8/0}. test_year_8() -> true = is_in_tolerance(year_sum_rewards(7), ?AR(42968.75)). year_9_test_() -> {timeout, 60, fun test_year_9/0}. test_year_9() -> true = is_in_tolerance(year_sum_rewards(8), ?AR(21484.375)). year_10_test_() -> {timeout, 60, fun test_year_10/0}. test_year_10() -> true = is_in_tolerance(year_sum_rewards(9), ?AR(10742.1875)). %% @doc Is the value X within TolerancePercent of Y. is_in_tolerance(X, Y) -> is_in_tolerance(X, Y, ?DEFAULT_TOLERANCE_PERCENT). is_in_tolerance(X, Y, TolerancePercent) -> Tolerance = TolerancePercent / 100, ( X >= ( Y * (1 - Tolerance ) ) ) and ( X =< ( Y + (Y * Tolerance ) ) ). %% @doc Count the total inflation rewards for a given year. year_sum_rewards(YearNum) -> year_sum_rewards(YearNum, fun calculate2/1). year_sum_rewards(YearNum, Fun) -> sum_rewards( Fun, (YearNum * trunc(?PRE_25_BLOCKS_PER_YEAR)), ((YearNum + 1) * trunc(?PRE_25_BLOCKS_PER_YEAR)) ). %% @doc Calculate the reward sum between two blocks. sum_rewards(Fun, Start, End) -> lists:sum(lists:map(Fun, lists:seq(Start, End))). ================================================ FILE: apps/arweave/src/ar_info.erl ================================================ %%% %%% @doc Gathers the data for the /info and /recent endpoints. %%% -module(ar_info). -export([get_info/0, get_recent/0]). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave/include/ar_chain_stats.hrl"). get_info() -> {Time, Current} = timer:tc(fun() -> ar_node:get_current_block_hash() end), {Time2, Height} = timer:tc(fun() -> ar_node:get_height() end), [{_, BlockCount}] = ets:lookup(ar_header_sync, synced_blocks), #{ <<"network">> => list_to_binary(?NETWORK_NAME), <<"version">> => ?CLIENT_VERSION, <<"release">> => ?RELEASE_NUMBER, <<"height">> => case Height of not_joined -> -1; H -> H end, <<"current">> => case is_atom(Current) of true -> atom_to_binary(Current, utf8); false -> ar_util:encode(Current) end, <<"blocks">> => BlockCount, <<"peers">> => prometheus_gauge:value(arweave_peer_count), <<"queue_length">> => element( 2, erlang:process_info(whereis(ar_node_worker), message_queue_len) ), <<"node_state_latency">> => (Time + Time2) div 2 }. get_recent() -> #{ %% #{ %% "id": , %% "received": ", %% "height": %% } <<"blocks">> => get_recent_blocks(), %% #{ %% "id": , %% "height": , %% "timestamp": %% "blocks": [, , ...] %% } <<"forks">> => get_recent_forks() }. %% @doc Return the the most recent blocks in reverse chronological order. %% %% There are a few list reversals that happen here: %% 1. get_block_anchors returns the blocks in reverse chronological order (latest block first) %% 2. [Element | Acc] reverses the list into chronological order (latest block last) %% 3. The final lists:reverse puts the list back into reverse chronological order %% (latest block first) get_recent_blocks() -> Anchors = lists:sublist(ar_node:get_block_anchors(), ?CHECKPOINT_DEPTH), Blocks = lists:foldl( fun(H, Acc) -> B = ar_block_cache:get(block_cache, H), [#{ <<"id">> => ar_util:encode(H), <<"received">> => get_block_timestamp(B, length(Acc)), <<"height">> => B#block.height } | Acc] end, [], Anchors ), lists:reverse(Blocks). %% @doc Return the the most recent forks in reverse chronological order. get_recent_forks() -> CutOffTime = os:system_time(seconds) - ?RECENT_FORKS_AGE, case ar_chain_stats:get_forks(CutOffTime) of {error, _} -> error; Forks -> %% 1. We receive forks in ascending order (oldest first) %% 2. But since we want to truncate the list to only include the most recent forks, %% we first reverse... ReversedForks = lists:reverse(Forks), %% 3. Then truncate... TruncatedForks = lists:sublist(ReversedForks, ?RECENT_FORKS_LENGTH), %% 4. Then convert to JSON maps %% (which reverses the list again due to list prepending) RecentForks = lists:foldl( fun(Fork, Acc) -> #fork{ id = ID, height = Height, timestamp = Timestamp, block_ids = BlockIDs} = Fork, [#{ <<"id">> => ar_util:encode(ID), <<"height">> => Height, <<"timestamp">> => Timestamp div 1000, <<"blocks">> => [ ar_util:encode(BlockID) || BlockID <- BlockIDs ] } | Acc] end, [], TruncatedForks ), %% 5. Then finally reverse the list again so we end up with forks in descending %% order (newest first) lists:reverse(RecentForks) end. get_block_timestamp(B, Depth) when Depth < ?RECENT_BLOCKS_WITHOUT_TIMESTAMP orelse B#block.receive_timestamp =:= undefined -> <<"pending">>; get_block_timestamp(B, _Depth) -> ar_util:timestamp_to_seconds(B#block.receive_timestamp). ================================================ FILE: apps/arweave/src/ar_intervals.erl ================================================ %%% @doc A set of non-overlapping intervals. -module(ar_intervals). -export([new/0, from_list/1, add/3, delete/3, cut/2, is_inside/2, sum/1, union/2, serialize/2, safe_from_etf/1, count/1, is_empty/1, take_smallest/1, take_largest/1, largest/1, smallest/1, to_list/1, iterator_from/2, next/1, fold/3, outerjoin/2, intersection/2]). -include_lib("eunit/include/eunit.hrl"). %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Create an empty set of intervals. new() -> gb_sets:new(). %% @doc Create a set from a list of {End, Start} pairs. from_list(L) -> lists:foldl(fun({End, Start}, Acc) -> add(Acc, End, Start) end, new(), L). %% @doc Add a new interval. Intervals are compacted - e.g., (2, 1) and (1, 0) are joined %% into (2, 0). Also, if two intervals intersect each other, they are joined. %% @end add(Intervals, End, Start) when End > Start -> Iter = gb_sets:iterator_from({Start - 1, Start - 1}, Intervals), add2(Iter, Intervals, End, Start). %% @doc Remove the given interval from the set. delete(Intervals, End, Start) -> Iter = gb_sets:iterator_from({Start - 1, Start - 1}, Intervals), delete2(Iter, Intervals, End, Start). %% @doc Remove the interval above the given cut. If there is an interval containing %% the cut, replace it with its part up to the cut. %% @end cut(Intervals, Cut) -> case gb_sets:size(Intervals) of 0 -> Intervals; _ -> case gb_sets:take_largest(Intervals) of {{_, Start}, UpdatedIntervals} when Start >= Cut -> cut(UpdatedIntervals, Cut); {{End, Start}, UpdatedIntervals} when End > Cut -> gb_sets:add_element({Cut, Start}, UpdatedIntervals); _ -> Intervals end end. %% @doc Return true if the given number is inside one of the intervals, false otherwise. %% The left bounds of the intervals are excluded from search, the right bounds are included. %% @end is_inside(Intervals, Number) -> Iter = gb_sets:iterator_from({Number - 1, Number - 1}, Intervals), case gb_sets:next(Iter) of none -> false; {{Number, _Start}, _Iter} -> true; {{_End, Start}, _Iter} when Number > Start -> true; _ -> false end. %% @doc Return the sum of the lengths of the intervals. sum(Intervals) -> gb_sets:fold(fun({End, Start}, Acc) -> Acc + End - Start end, 0, Intervals). %% @doc Return the set of intervals consisting of the points of intervals from both sets. union(I1, I2) -> {Longer, Shorter} = case gb_sets:size(I1) > gb_sets:size(I2) of true -> {I1, I2}; false -> {I2, I1} end, gb_sets:fold( fun({End, Start}, Acc) -> add(Acc, End, Start) end, Longer, Shorter ). %% @doc Serialize a subset of the intervals using the requested format, etf | json. %% The subset is always smaller than or equal to Limit. If random_subset key is present, %% the chosen subset is random. Otherwise, the right bound of the first interval is %% greater than or equal to start. serialize(#{ random_subset := _, limit := Limit, format := Format }, Intervals) -> serialize_random_subset(Intervals, Limit, Format); serialize(#{ start := Start, limit := Limit, format := Format } = Args, Intervals) -> RightBound = maps:get(right_bound, Args, infinity), serialize_subset(Intervals, Start, RightBound, Limit, Format). %% @doc Convert the binary produced by to_etf/2 into the set of intervals. %% Return {error, invalid} if the binary is not a valid ETF representation of the %% non-overlapping intervals. %% @end safe_from_etf(Binary) -> case catch from_etf(Binary) of {ok, Intervals} -> {ok, Intervals}; _ -> {error, invalid} end. %% @doc Return the number of intervals in the set. count(Intervals) -> gb_sets:size(Intervals). %% @doc Return true if the set of intervals is empty, false otherwise. is_empty(Intervals) -> gb_sets:is_empty(Intervals). %% @doc Return {Interval, Intervals2} when Interval is the interval with the smallest %% right bound and Intervals2 is the set of intervals with this interval removed. %% @end take_smallest(Intervals) -> gb_sets:take_smallest(Intervals). %% @doc Return {Interval, Intervals2} when Interval is the interval with the largest %% right bound and Intervals2 is the set of intervals with this interval removed. %% @end take_largest(Intervals) -> gb_sets:take_largest(Intervals). %% @doc A proxy for gb_sets:smallest/1. smallest(Intervals) -> gb_sets:smallest(Intervals). %% @doc A proxy for gb_sets:largest/1. largest(Intervals) -> gb_sets:largest(Intervals). %% @doc A proxy for gb_sets:iterator_from/2. iterator_from(Interval, Intervals) -> gb_sets:iterator_from(Interval, Intervals). %% @doc A proxy for gb_sets:next/1. next(Iterator) -> gb_sets:next(Iterator). %% @doc A proxy for gb_sets:fold/3. fold(Fun, Acc, Intervals) -> gb_sets:fold(Fun, Acc, Intervals). %% @doc A proxy for gb_sets:to_list/1. to_list(Intervals) -> gb_sets:to_list(Intervals). %% @doc Return a set of intervals containing the points from the second given set of %% intervals and excluding the points from the first given set of intervals. outerjoin(I1, I2) -> %% intersection(inverse(I1), I2) also works but slower because inverse is relatively %% expensive. intersection(I1, I2) is expected to be relatively small so inverting it %% is quick. intersection(inverse(intersection(I1, I2)), I2). %% @doc Return the set of intervals - the intersection of the two given sets. intersection(I1, I2) -> case gb_sets:is_empty(I1) orelse gb_sets:is_empty(I2) of true -> new(); false -> {_, Start1} = gb_sets:smallest(I1), {_, Start2} = gb_sets:smallest(I2), Start = min(Start1, Start2), intersection(gb_sets:iterator_from({Start, infinity}, I1), gb_sets:iterator_from({Start, infinity}, I2), new()) end. %%%=================================================================== %%% Private functions. %%%=================================================================== add2(Iter, Intervals, End, Start) -> case gb_sets:next(Iter) of none -> gb_sets:add_element({End, Start}, Intervals); {{End2, Start2}, Iter2} when End >= Start2 andalso Start =< End2 -> End3 = max(End, End2), Start3 = min(Start, Start2), add2(Iter2, gb_sets:del_element({End2, Start2}, Intervals), End3, Start3); _ -> gb_sets:add_element({End, Start}, Intervals) end. delete2(Iter, Intervals, End, Start) -> case gb_sets:next(Iter) of none -> Intervals; {{End2, Start2}, Iter2} when End >= Start2 andalso Start =< End2 -> Intervals2 = gb_sets:del_element({End2, Start2}, Intervals), Intervals3 = case End2 > End of true -> gb_sets:insert({End2, End}, Intervals2); false -> Intervals2 end, Intervals4 = case Start > Start2 of true -> gb_sets:insert({Start, Start2}, Intervals3); false -> Intervals3 end, delete2(Iter2, Intervals4, End, Start); _ -> Intervals end. serialize_random_subset(Intervals, Limit, Format) -> case gb_sets:is_empty(Intervals) of true -> serialize_empty(Format); false -> {Largest, _} = gb_sets:largest(Intervals), RandomOffsets = [rand:uniform(Largest) || _ <- lists:seq(1, Limit)], serialize_random_subset(Intervals, RandomOffsets, Format, ar_intervals:new(), Limit) end. serialize_empty(etf) -> term_to_binary([]); serialize_empty(json) -> jiffy:encode([]). serialize_random_subset(_Intervals, [], Format, PickedIntervals, Limit) -> serialize_subset(PickedIntervals, 0, infinity, Limit, Format); serialize_random_subset(Intervals, [Offset | Offsets], Format, PickedIntervals, Limit) -> Iter = gb_sets:iterator_from({Offset, 0}, Intervals), case gb_sets:next(Iter) of none -> serialize_random_subset(Intervals, Offsets, Format, PickedIntervals, Limit); {{End, Start}, _} -> serialize_random_subset(Intervals, Offsets, Format, ar_intervals:add(PickedIntervals, End, Start), Limit) end. serialize_list(L, etf) -> term_to_binary(L); serialize_list(L, json) -> jiffy:encode(L). serialize_item(End, Start, etf) -> {<< End:256 >>, << Start:256 >>}; serialize_item(End, Start, json) -> #{ integer_to_binary(End) => integer_to_binary(Start) }. serialize_subset(Intervals, Start, End, Limit, Format) -> case gb_sets:is_empty(Intervals) of true -> serialize_empty(Format); false -> Iterator = gb_sets:iterator_from({Start, 0}, Intervals), serialize_subset(Iterator, [], 0, End, Limit, Format) end. serialize_subset(_Iterator, L, Count, _RightBound, Limit, Format) when Count == Limit -> serialize_list(L, Format); serialize_subset(Iterator, L, Count, RightBound, Limit, Format) -> case gb_sets:next(Iterator) of {{End, Start}, Iterator2} when Start < RightBound -> End2 = min(End, RightBound), L2 = [serialize_item(End2, Start, Format) | L], serialize_subset(Iterator2, L2, Count + 1, RightBound, Limit, Format); _ -> serialize_list(L, Format) end. from_etf(Binary) -> L = binary_to_term(Binary, [safe]), from_etf(L, infinity, new()). from_etf([], _, Intervals) -> {ok, Intervals}; from_etf([{<< End:256 >>, << Start:256 >>} | List], R, Intervals) when End > Start andalso R > End andalso Start >= 0 -> from_etf(List, Start, gb_sets:add_element({End, Start}, Intervals)). inverse(Intervals) -> inverse(gb_sets:iterator(Intervals), 0, new()). inverse(Iterator, L, G) -> case gb_sets:next(Iterator) of none -> gb_sets:add_element({infinity, L}, G); {{End1, Start1}, I1} -> G2 = case Start1 > L of true -> gb_sets:add_element({Start1, L}, G); _ -> G end, L2 = End1, case gb_sets:next(I1) of none -> gb_sets:add_element({infinity, L2}, G2); {{End2, Start2}, I2} -> inverse(I2, End2, gb_sets:add_element({Start2, End1}, G2)) end end. intersection(I1, I2, G) -> case {gb_sets:next(I1), gb_sets:next(I2)} of {none, _} -> G; {_, none} -> G; {{{End1, _Start1}, UpdatedI1}, {{_End2, Start2}, _UpdatedI2}} when Start2 >= End1 -> intersection(UpdatedI1, I2, G); {{{_End1, Start1}, _UpdatedI1}, {{End2, _Start2}, UpdatedI2}} when Start1 >= End2 -> intersection(I1, UpdatedI2, G); {{{End1, Start1}, UpdatedI1}, {{End2, Start2}, _UpdatedI2}} when End2 >= End1 -> intersection(UpdatedI1, I2, gb_sets:add_element({End1, max(Start1, Start2)}, G)); {{{End1, Start1}, _UpdatedI1}, {{End2, Start2}, UpdatedI2}} when End1 > End2 -> intersection(I1, UpdatedI2, gb_sets:add_element({End2, max(Start1, Start2)}, G)) end. %%%=================================================================== %%% Tests. %%%=================================================================== intervals_test() -> I = new(), ?assertEqual(0, count(I)), ?assertEqual(0, sum(I)), ?assert(not is_inside(I, 0)), ?assert(not is_inside(I, 1)), ?assertEqual(<<"[]">>, serialize(#{ random_subset => true, format => json, limit => 1 }, I)), ?assertEqual(<<"[]">>, serialize(#{ start => 0, format => json, limit => 1 }, I)), ?assertEqual(<<"[]">>, serialize(#{ start => 1, format => json, limit => 1 }, I)), ?assertEqual( {ok, new()}, safe_from_etf(serialize(#{ random_subset => true, format => etf, limit => 1 }, I)) ), ?assertEqual(new(), outerjoin(I, I)), ?assertEqual(new(), delete(I, 2, 1)), I2 = add(I, 2, 1), ?assertEqual(1, count(I2)), ?assertEqual(1, sum(I2)), ?assert(not is_inside(I2, 0)), ?assert(not is_inside(I2, 1)), ?assert(is_inside(I2, 2)), ?assert(not is_inside(I2, 3)), ?assertEqual(new(), delete(I2, 2, 1)), ?assertEqual(new(), delete(I2, 2, 0)), ?assertEqual(new(), delete(I2, 3, 1)), ?assertEqual(new(), delete(I2, 3, 0)), ?assertEqual(new(), cut(I2, 1)), ?assertEqual(new(), cut(I2, 0)), compare(I2, cut(I2, 2)), compare(I2, cut(I2, 3)), ?assertEqual( <<"[{\"2\":\"1\"}]">>, serialize(#{ random_subset => true, limit => 1, format => json }, I2) ), ?assertEqual( <<"[]">>, serialize(#{ random_subset => true, limit => 0, format => json }, I2) ), ?assertEqual( <<"[{\"2\":\"1\"}]">>, serialize(#{ start => 2, limit => 1, format => json }, I2) ), ?assertEqual( <<"[]">>, serialize(#{ start => 3, limit => 1, format => json }, I2) ), ?assertEqual( <<"[]">>, serialize(#{ start => 2, limit => 0, format => json }, I2) ), {ok, I2_FromETF} = safe_from_etf(serialize(#{ format => etf, limit => 1, random_subset => true }, I2)), compare(I2, I2_FromETF), ?assertEqual( {ok, new()}, safe_from_etf(serialize(#{ format => etf, limit => 0, random_subset => true }, I2)) ), compare(I2, add(I2, 2, 1)), compare(add(new(), 3, 1), add(I2, 3, 1)), compare(add(new(), 2, 0), add(I2, 2, 0)), ?assertEqual(new(), outerjoin(I2, I)), compare(add(add(new(), 1, 0), 3, 2), outerjoin(I2, add(new(), 3, 0))), I3 = add(I2, 6, 3), ?assertEqual(2, count(I3)), ?assertEqual(4, sum(I3)), ?assert(not is_inside(I3, 0)), ?assert(not is_inside(I3, 1)), ?assert(is_inside(I3, 2)), ?assert(not is_inside(I3, 3)), ?assert(is_inside(I3, 4)), ?assert(is_inside(I3, 5)), ?assert(is_inside(I3, 6)), compare(add(add(add(new(), 2, 1), 6, 5), 4, 3), delete(I3, 5, 4)), compare(add(new(), 6, 5), delete(I3, 5, 1)), compare(add(new(), 10, 0), add(I3, 10, 0)), ?assertEqual(new(), cut(I3, 1)), ?assertEqual(new(), cut(I3, 0)), ?assertEqual(I2, cut(I3, 2)), ?assertEqual(I2, cut(I3, 3)), compare(add(I2, 4, 3), cut(I3, 4)), compare(add(I2, 5, 3), cut(I3, 5)), compare(I3, cut(I3, 6)), ?assertEqual( <<"[{\"6\":\"3\"},{\"2\":\"1\"}]">>, serialize(#{ random_subset => true, limit => 1000, format => json }, I3) ), ?assertEqual( <<"[{\"6\":\"3\"},{\"2\":\"1\"}]">>, serialize(#{ start => 1, limit => 10, format => json }, I3) ), ?assertEqual( <<"[{\"2\":\"1\"}]">>, serialize(#{ start => 1, limit => 1, format => json }, I3) ), ?assertEqual( <<"[{\"6\":\"3\"}]">>, serialize(#{ start => 3, limit => 10, format => json }, I3) ), {ok, I3_FromETF} = safe_from_etf(serialize(#{ format => etf, limit => 1000, random_subset => true }, I3)), compare(I3, I3_FromETF), compare(I3, add(I3, 4, 3)), compare(add(new(), 6, 1), add(I3, 3, 1)), I3_2 = add(new(), 7, 5), compare(add(new(), 7, 5), outerjoin(I2, I3_2)), compare(add(new(), 7, 6), outerjoin(I3, I3_2)), compare(add(add(add(new(), 1, 0), 3, 2), 8, 6), outerjoin(I3, add(new(), 8, 0))), I4 = add(I3, 7, 6), ?assertEqual(2, count(I4)), ?assertEqual(5, sum(I4)), ?assert(not is_inside(I4, 0)), ?assert(not is_inside(I4, 1)), ?assert(is_inside(I4, 2)), ?assert(not is_inside(I4, 3)), ?assert(is_inside(I4, 4)), ?assert(is_inside(I4, 5)), ?assert(is_inside(I4, 6)), ?assert(is_inside(I4, 7)), ?assert(not is_inside(I4, 8)), ?assertEqual(new(), cut(I4, 1)), ?assertEqual(new(), cut(I4, 0)), compare(add(I2, 5, 3), cut(I4, 5)), compare(I4, cut(I4, 7)), ?assertEqual( <<"[{\"7\":\"3\"},{\"2\":\"1\"}]">>, serialize(#{ format => json, limit => 1000, random_subset => true }, I4) ), {ok, I4_FromETF} = safe_from_etf(serialize(#{ limit => 1000, random_subset => true, format => etf }, I4)), compare(I4, I4_FromETF), I5 = add(I4, 3, 2), ?assertEqual(1, count(I5)), ?assertEqual(6, sum(I5)), compare(I5, add(I5, 3, 2)), compare(I5, add(I5, 2, 1)), compare(add(add(new(), 3, 2), 8, 7), delete(add(add(new(), 4, 2), 8, 6), 7, 3)). compare(I1, I2) -> ?assertEqual( serialize(#{ format => json, limit => count(I1), start => 0 }, I1), serialize(#{ format => json, limit => count(I2), start => 0 }, I2) ), Folded1 = gb_sets:fold(fun({K, V}, Acc) -> [{K, V} | Acc] end, [], I1), Folded2 = gb_sets:fold(fun({K, V}, Acc) -> [{K, V} | Acc] end, [], I2), ?assertEqual(Folded1, Folded2). ================================================ FILE: apps/arweave/src/ar_join.erl ================================================ -module(ar_join). -export([start/1]). -include("ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("eunit/include/eunit.hrl"). %%% Represents a process that handles downloading the block index and the latest %%% blocks from the trusted peers, to initialize the node state. %% The number of block index elements to fetch per request. %% Must not exceed ?MAX_BLOCK_INDEX_RANGE_SIZE defined in ar_http_iface_middleware.erl. -ifdef(AR_TEST). -define(REQUEST_BLOCK_INDEX_RANGE_SIZE, 2). -else. -define(REQUEST_BLOCK_INDEX_RANGE_SIZE, 10000). -endif. %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Start a process that will attempt to download the block index and the latest blocks. start(Peers) -> spawn(fun() -> process_flag(trap_exit, true), start2(filter_peers(Peers)) end). %%%=================================================================== %%% Private functions. %%%=================================================================== filter_peers(Peers) -> filter_peers(Peers, []). filter_peers([Peer | Peers], Peers2) -> case ar_http_iface_client:get_info(Peer, height) of info_unavailable -> ?LOG_WARNING([{event, trusted_peer_unavailable}, {peer, ar_util:format_peer(Peer)}]), filter_peers(Peers, Peers2); Height -> filter_peers(Peers, [{Height, Peer} | Peers2]) end; filter_peers([], []) -> []; filter_peers([], Peers2) -> MaxHeight = lists:max([Height || {Height, _Peer} <- Peers2]), filter_peers2(Peers2, MaxHeight). filter_peers2([], _MaxHeight) -> []; filter_peers2([{Height, Peer} | Peers], MaxHeight) when MaxHeight - Height >= 5 -> ?LOG_WARNING([{event, trusted_peer_five_or_more_blocks_behind}, {peer, ar_util:format_peer(Peer)}]), filter_peers2(Peers, MaxHeight); filter_peers2([{_Height, Peer} | Peers], MaxHeight) -> [Peer | filter_peers2(Peers, MaxHeight)]. start2([]) -> ar:console("~nTrusted peers are not available.~n", []), ?LOG_WARNING([{event, not_joining}, {reason, trusted_peers_not_available}]), timer:sleep(1000), init:stop(1); start2(Peers) -> ar:console("Joining the Arweave network...~n"), [{H, _, _} | _] = BI = get_block_index(Peers, ?REJOIN_RETRIES), ar:console("Downloaded the block index successfully.~n", []), B = get_block(Peers, H), ExpectedBIMerkleH = ar_unbalanced_merkle:block_index_to_merkle_root(tl(BI)), case B#block.hash_list_merkle of ExpectedBIMerkleH -> do_join(Peers, B, BI); _ -> {ok, Config} = arweave_config:get_env(), ID = binary_to_list(ar_util:encode(crypto:strong_rand_bytes(16))), File = filename:join(Config#config.data_dir, "inconsistent_joining_data_dump_" ++ ID), file:write_file(File, term_to_binary({B, Peers, BI})), ar:console("Inconsistent head block and block index. Error dump: ~s.", [File]), timer:sleep(2000), init:stop(1) end. get_block_index(Peers, Retries) -> case get_block_index(Peers) of unavailable -> case Retries > 0 of true -> ar:console( "Failed to fetch the block index from any of the peers." " Retrying..~n" ), ?LOG_WARNING([{event, failed_to_fetch_block_index}]), timer:sleep(?REJOIN_TIMEOUT), get_block_index(Peers, Retries - 1); false -> ar:console( "Failed to fetch the block index from any of the peers. Giving up.." " Consider changing the peers.~n" ), ?LOG_ERROR([{event, failed_to_fetch_block_index}]), timer:sleep(1000), init:stop(1) end; BI -> BI end. get_block_index([]) -> unavailable; get_block_index([Peer | Peers]) -> case get_block_index2(Peer) of unavailable -> get_block_index(Peers); BI -> BI end. get_block_index2(Peer) -> Height = ar_http_iface_client:get_info(Peer, height), get_block_index2(Peer, 0, Height, []). get_block_index2(Peer, Start, Height, BI) -> N = ?REQUEST_BLOCK_INDEX_RANGE_SIZE, case ar_http_iface_client:get_block_index(Peer, min(Start, Height), min(Height, Start + N - 1)) of {ok, Range} when length(Range) < N -> case Start of 0 -> Range; _ -> case lists:last(Range) == hd(BI) of true -> Range ++ tl(BI); false -> unavailable end end; {ok, Range} when length(Range) == N -> case Start of 0 -> get_block_index2(Peer, Start + N - 1, Height, Range); _ -> case lists:last(Range) == hd(BI) of true -> get_block_index2(Peer, Start + N - 1, Height, Range ++ tl(BI)); false -> unavailable end end; _ -> unavailable end. get_block(Peers, H) -> case ar_storage:read_block(H) of unavailable -> get_block(Peers, H, 10); BShadow -> get_block(Peers, BShadow, BShadow#block.txs, [], 10) end. get_block(Peers, H, Retries) -> ar:console("Downloading joining block ~s.~n", [ar_util:encode(H)]), case ar_http_iface_client:get_block_shadow(Peers, H) of {_Peer, #block{} = BShadow, _Time, _Size} -> get_block(Peers, BShadow, BShadow#block.txs, [], Retries); _ -> case Retries > 0 of true -> ar:console( "Failed to fetch a joining block ~s from any of the peers." " Retrying..~n", [ar_util:encode(H)] ), ?LOG_WARNING([ {event, failed_to_fetch_joining_block}, {block, ar_util:encode(H)} ]), timer:sleep(1000), get_block(Peers, H, Retries - 1); false -> ar:console( "Failed to fetch a joining block ~s from any of the peers. Giving up.." " Consider changing the peers.~n", [ar_util:encode(H)] ), ?LOG_ERROR([ {event, failed_to_fetch_joining_block}, {block, ar_util:encode(H)} ]), timer:sleep(1000), init:stop(1) end end. get_block(_Peers, BShadow, [], TXs, _Retries) -> BShadow#block{ txs = lists:reverse(TXs) }; get_block(Peers, BShadow, [TXID | TXIDs], TXs, Retries) -> case ar_http_iface_client:get_tx(Peers, TXID) of #tx{} = TX -> get_block(Peers, BShadow, TXIDs, [TX | TXs], Retries); _ -> case Retries > 0 of true -> ar:console( "Failed to fetch a joining transaction ~s from any of the peers." " Retrying..~n", [ar_util:encode(TXID)] ), ?LOG_WARNING([ {event, failed_to_fetch_joining_tx}, {tx, ar_util:encode(TXID)} ]), timer:sleep(1000), get_block(Peers, BShadow, [TXID | TXIDs], TXs, Retries - 1); false -> ar:console( "Failed to fetch a joining tx ~s from any of the peers. Giving up.." " Consider changing the peers.~n", [ar_util:encode(TXID)] ), ?LOG_ERROR([ {event, failed_to_fetch_joining_tx}, {block, ar_util:encode(TXID)} ]), timer:sleep(1000), init:stop(1) end end. %% @doc Perform the joining process. do_join(Peers, B, BI) -> ar:console("Downloading the block trail.~n", []), {ok, Config} = arweave_config:get_env(), WorkerQ = queue:from_list([spawn(fun() -> worker() end) || _ <- lists:seq(1, Config#config.join_workers)]), PeerQ = queue:from_list(Peers), Trail = lists:sublist(tl(BI), 2 * ar_block:get_max_tx_anchor_depth()), SizeTaggedTXs = ar_block:generate_size_tagged_list_from_txs(B#block.txs, B#block.height), Retries = lists:foldl(fun(Peer, Acc) -> maps:put(Peer, 5, Acc) end, #{}, Peers), Blocks = [B#block{ size_tagged_txs = SizeTaggedTXs } | get_block_trail(WorkerQ, PeerQ, Trail, Retries)], ar:console("Downloaded the block trail successfully.~n", []), Blocks2 = maybe_set_reward_history(Blocks, Peers), Blocks3 = maybe_set_block_time_history(Blocks2, Peers), ar_node_worker ! {join, B#block.height, BI, Blocks3}, join_peers(Peers). %% @doc Get the 2 * ar_block:get_max_tx_anchor_depth() blocks preceding the head block. %% If the block list is shorter than 2 * ar_block:get_max_tx_anchor_depth(), simply %% get all existing blocks. %% %% The node needs 2 * ar_block:get_max_tx_anchor_depth() block anchors so that it %% can validate transactions even if it enters a ar_block:get_max_tx_anchor_depth()-deep %% fork recovery (which is the deepest fork recovery possible) immediately after %% joining the network. get_block_trail(_WorkerQ, _PeerQ, [], _Retries) -> []; get_block_trail(WorkerQ, PeerQ, Trail, Retries) -> {WorkerQ2, PeerQ2} = request_blocks(Trail, WorkerQ, PeerQ), FetchState = #{ awaiting_block_count => length(Trail) }, get_block_trail_loop(WorkerQ2, PeerQ2, Retries, Trail, FetchState). request_blocks([], WorkerQ, PeerQ) -> {WorkerQ, PeerQ}; request_blocks([{H, _, _} | Trail], WorkerQ, PeerQ) -> {{value, W}, WorkerQ2} = queue:out(WorkerQ), {{value, Peer}, PeerQ2} = queue:out(PeerQ), W ! {get_block_shadow, H, Peer, self()}, request_blocks(Trail, queue:in(W, WorkerQ2), queue:in(Peer, PeerQ2)). get_block_trail_loop(WorkerQ, PeerQ, Retries, Trail, FetchState) -> receive {block_response, H, _Peer, #block{} = BShadow, Origin} -> case Origin of storage -> ok; _ -> ar_disk_cache:write_block_shadow(BShadow) end, TXCount = length(BShadow#block.txs), FetchState2 = maps:put(H, {BShadow, #{}, TXCount}, FetchState), AwaitingBlockCount = maps:get(awaiting_block_count, FetchState2), AwaitingBlockCount2 = case TXCount of 0 -> ?LOG_INFO([{event, join_remaining_blocks_to_fetch}, {remaining_blocks_count, AwaitingBlockCount - 1}]), AwaitingBlockCount - 1; _ -> AwaitingBlockCount end, FetchState3 = maps:put(awaiting_block_count, AwaitingBlockCount2, FetchState2), {WorkerQ2, PeerQ2} = request_txs(H, BShadow#block.txs, WorkerQ, PeerQ), case AwaitingBlockCount2 of 0 -> get_blocks(Trail, FetchState3); _ -> get_block_trail_loop(WorkerQ2, PeerQ2, Retries, Trail, FetchState3) end; {block_response, H, Peer, Response, peer} -> PeerRetries = maps:get(Peer, Retries), case PeerRetries > 0 of true -> ar:console("Failed to fetch a joining block ~s from ~s." " Retrying..~n", [ar_util:encode(H), ar_util:format_peer(Peer)]), ?LOG_WARNING([ {event, failed_to_fetch_joining_block}, {block, ar_util:encode(H)}, {peer, ar_util:format_peer(Peer)}, {response, io_lib:format("~p", [Response])} ]), timer:sleep(1000), Retries2 = maps:put(Peer, PeerRetries - 1, Retries), {WorkerQ2, PeerQ2} = request_block(H, WorkerQ, PeerQ), get_block_trail_loop(WorkerQ2, PeerQ2, Retries2, Trail, FetchState); false -> case queue:to_list(PeerQ) of [Peer] -> % The last peer left and it is out of attempts. ar:console( "Failed to fetch the joining headers from any of the peers, " "consider trying some other trusted peers.", []), ?LOG_ERROR([{event, failed_to_join}]), timer:sleep(1000), init:stop(1); _ -> case queue:member(Peer, PeerQ) of false -> {WorkerQ2, PeerQ2} = request_block(H, WorkerQ, PeerQ), get_block_trail_loop(WorkerQ2, PeerQ2, Retries, Trail, FetchState); true -> PeerQ2 = queue:delete(Peer, PeerQ), ar:console("Failed to fetch a joining block ~s from ~s. " "Removing the peer from the queue..", [ar_util:encode(H), ar_util:format_peer(Peer)]), ?LOG_ERROR([ {event, failed_to_fetch_joining_block}, {block, ar_util:encode(H)}, {peer, ar_util:format_peer(Peer)}, {response, io_lib:format("~p", [Response])} ]), {WorkerQ2, PeerQ3} = request_block(H, WorkerQ, PeerQ2), get_block_trail_loop(WorkerQ2, PeerQ3, Retries, Trail, FetchState) end end end; {tx_response, H, TXID, _Peer, #tx{} = TX, Origin} -> case Origin of storage -> ok; _ -> ar_disk_cache:write_tx(TX) end, {BShadow, TXMap, AwaitingTXCount} = maps:get(H, FetchState), TXMap2 = maps:put(TXID, TX, TXMap), AwaitingTXCount2 = AwaitingTXCount - 1, FetchState2 = maps:put(H, {BShadow, TXMap2, AwaitingTXCount2}, FetchState), AwaitingBlockCount = maps:get(awaiting_block_count, FetchState2), AwaitingBlockCount2 = case AwaitingTXCount2 of 0 -> ?LOG_INFO([{event, join_remaining_blocks_to_fetch}, {remaining_blocks_count, AwaitingBlockCount - 1}]), AwaitingBlockCount - 1; _ -> AwaitingBlockCount end, FetchState3 = maps:put(awaiting_block_count, AwaitingBlockCount2, FetchState2), case AwaitingBlockCount2 of 0 -> get_blocks(Trail, FetchState3); _ -> get_block_trail_loop(WorkerQ, PeerQ, Retries, Trail, FetchState3) end; {tx_response, H, TXID, Peer, Response, peer} -> PeerRetries = maps:get(Peer, Retries), case PeerRetries > 0 of true -> ar:console("Failed to fetch a joining transaction ~s from ~s. " "Retrying..~n", [ar_util:encode(TXID), ar_util:format_peer(Peer)]), ?LOG_WARNING([{event, failed_to_fetch_joining_tx}, {block, ar_util:encode(H)}, {tx, ar_util:encode(TXID)}, {peer, ar_util:format_peer(Peer)}, {response, io_lib:format("~p", [Response])}]), timer:sleep(1000), Retries2 = maps:put(Peer, PeerRetries - 1, Retries), {WorkerQ2, PeerQ2} = request_tx(H, TXID, WorkerQ, PeerQ), get_block_trail_loop(WorkerQ2, PeerQ2, Retries2, Trail, FetchState); false -> case queue:to_list(PeerQ) of [Peer] -> % The last peer left and it is out of attempts. ar:console( "Failed to fetch the joining headers from any of the peers, " "consider trying some other trusted peers.", []), ?LOG_ERROR([{event, failed_to_join}]), timer:sleep(1000), init:stop(1); _ -> case queue:member(Peer, PeerQ) of false -> {WorkerQ2, PeerQ2} = request_tx(H, TXID, WorkerQ, PeerQ), get_block_trail_loop(WorkerQ2, PeerQ2, Retries, Trail, FetchState); true -> PeerQ2 = queue:delete(Peer, PeerQ), ar:console("Failed to fetch a joining tx ~s from ~s. " "Removing the peer from the queue..", [ar_util:encode(TXID), ar_util:format_peer(Peer)]), ?LOG_ERROR([ {event, failed_to_fetch_joining_tx}, {block, ar_util:encode(H)}, {tx, ar_util:encode(TXID)}, {peer, ar_util:format_peer(Peer)}, {response, io_lib:format("~p", [Response])} ]), {WorkerQ2, PeerQ3} = request_tx(H, TXID, WorkerQ, PeerQ2), get_block_trail_loop(WorkerQ2, PeerQ3, Retries, Trail, FetchState) end end end end. request_txs(_H, [], WorkerQ, PeerQ) -> {WorkerQ, PeerQ}; request_txs(H, [TXID | TXIDs], WorkerQ, PeerQ) -> {WorkerQ2, PeerQ2} = request_tx(H, TXID, WorkerQ, PeerQ), request_txs(H, TXIDs, WorkerQ2, PeerQ2). request_tx(H, TXID, WorkerQ, PeerQ) -> {{value, W}, WorkerQ2} = queue:out(WorkerQ), {{value, Peer}, PeerQ2} = queue:out(PeerQ), W ! {get_tx, H, TXID, Peer, self()}, {queue:in(W, WorkerQ2), queue:in(Peer, PeerQ2)}. get_blocks([], _FetchState) -> []; get_blocks([{H, _, _} | Trail], FetchState) -> {B, TXMap, _} = maps:get(H, FetchState), TXs = [maps:get(TXID, TXMap) || TXID <- B#block.txs], SizeTaggedTXs = ar_block:generate_size_tagged_list_from_txs(TXs, B#block.height), [B#block{ txs = TXs, size_tagged_txs = SizeTaggedTXs } | get_blocks(Trail, FetchState)]. request_block(H, WorkerQ, PeerQ) -> {{value, W}, WorkerQ2} = queue:out(WorkerQ), {{value, Peer}, PeerQ2} = queue:out(PeerQ), W ! {get_block_shadow, H, Peer, self()}, {queue:in(W, WorkerQ2), queue:in(Peer, PeerQ2)}. maybe_set_reward_history(Blocks, Peers) -> HeadB = hd(Blocks), ExpectedHashesLen = ar_rewards:expected_hashes_length(HeadB#block.height), ExpectedHashes = [B#block.reward_history_hash || B <- lists:sublist(Blocks, ExpectedHashesLen)], case ar_http_iface_client:get_reward_history(Peers, HeadB, ExpectedHashes) of {ok, RewardHistory} -> ar_rewards:set_reward_history(Blocks, RewardHistory); _ -> ar:console("Failed to fetch the reward history for the block ~s from " "any of the peers. Consider changing the peers.~n", [ar_util:encode((hd(Blocks))#block.indep_hash)]), ?LOG_WARNING([{event, failed_to_fetch_reward_history}]), timer:sleep(1000), init:stop(1) end. maybe_set_block_time_history([#block{ height = Height } | _] = Blocks, Peers) -> case Height >= ar_fork:height_2_7() of true -> case ar_http_iface_client:get_block_time_history( Peers, hd(Blocks), ar_block_time_history:get_hashes(Blocks)) of {ok, BlockTimeHistory} -> ar_block_time_history:set_history(Blocks, BlockTimeHistory); _ -> ar:console("Failed to fetch the block time history for the block ~s from " "any of the peers. Consider changing the peers.~n", [ar_util:encode((hd(Blocks))#block.indep_hash)]), timer:sleep(1000), init:stop(1) end; false -> Blocks end. join_peers(Peers) -> lists:foreach( fun(Peer) -> ar_http_iface_client:add_peer(Peer) end, Peers ). worker() -> receive {get_block_shadow, H, Peer, From} -> case ar_storage:read_block(H) of #block{} = B -> From ! {block_response, H, Peer, B, storage}; unavailable -> case ar_http_iface_client:get_block_shadow([Peer], H) of {_, B, _, _} -> From ! {block_response, H, Peer, B, peer}; Error -> From ! {block_response, H, Peer, Error, peer} end end, worker(); {get_tx, H, TXID, Peer, From} -> case ar_storage:read_tx(TXID) of #tx{} = TX -> From ! {tx_response, H, TXID, Peer, TX, storage}; unavailable -> case ar_http_iface_client:get_tx_from_remote_peer(Peer, TXID, true) of {TX, _, _, _} -> From ! {tx_response, H, TXID, Peer, TX, peer}; Error -> From ! {tx_response, H, TXID, Peer, Error, peer} end end, worker() end. %%%=================================================================== %%% Tests. %%%=================================================================== %% @doc Check that nodes can join a running network by using the fork recoverer. basic_node_join_test_() -> {timeout, ?TEST_NODE_TIMEOUT, fun() -> [B0] = ar_weave:init(), ar_test_node:start(B0), ar_test_node:mine(), ar_test_node:wait_until_height(main, 1), ar_test_node:mine(), ar_test_node:wait_until_height(main, 2), ar_test_node:join_on(#{ node => peer1, join_on => main }), ar_test_node:assert_wait_until_height(peer1, 2) end}. %% @doc Ensure that both nodes can mine after a join. node_join_test_() -> {timeout, ?TEST_NODE_TIMEOUT, fun() -> [B0] = ar_weave:init(), ar_test_node:start(B0), ar_test_node:mine(), ar_test_node:wait_until_height(main, 1), ar_test_node:mine(), ar_test_node:wait_until_height(main, 2), ar_test_node:join_on(#{ node => peer1, join_on => main }), ar_test_node:assert_wait_until_height(peer1, 2), ar_test_node:mine(peer1), ar_test_node:wait_until_height(main, 3) end}. %% @doc Ensure that get_tx works with a single peer and a list of peers. get_tx_test_() -> [ ar_test_node:test_with_mocked_functions( [{ar_http_iface_client, get_tx_from_remote_peer, fun(_, _, _) -> {error,{closed,"The connection was lost."}} end}], fun test_get_tx/0) ]. test_get_tx() -> ?assertEqual(ar_http_iface_client:get_tx({127, 0, 0, 1, 1984}, <<"123">>), not_found), ?assertEqual(ar_http_iface_client:get_tx([{127, 0, 0, 1, 1984}], <<"123">>), not_found), ?assertEqual(ar_http_iface_client:get_tx( [{127, 0, 0, 1, 1984}, {127, 0, 0, 1, 1985}], <<"123">>), not_found). ================================================ FILE: apps/arweave/src/ar_kv.erl ================================================ -module(ar_kv). -behaviour(gen_server). -export([ start_link/0, create_ets/0, open/1, open_readonly/1, close/1, put/3, get/2, get_next_by_prefix/4, get_next/2, get_prev/2, get_range/2, get_range/3, delete/2, delete_range/3, count/1 ]). -export([init/1, handle_cast/2, handle_call/3, handle_info/2, terminate/2]). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("eunit/include/eunit.hrl"). -define(WITH_DB(Name, Callback), with_db(Name, ?FUNCTION_NAME, Callback)). -define(WITH_ITERATOR(Name, IteratorOptions, Callback), with_iterator(Name, ?FUNCTION_NAME, IteratorOptions, Callback)). -define(DEFAULT_ROCKSDB_DATABASE_OPTIONS, #{ create_if_missing => true, create_missing_column_families => true, %% these are default values, but they must not be overriden; %% otherwise the syncWAL will not work. allow_mmap_reads => false, allow_mmap_writes => false }). -record(db, { %% name may be undefined in short intervals before opening the database, %% or reopening the database (which implies close and open operations). %% It may happen in case of opening the database with column families. %% NB: records with undefined db_handle must not be stored in the ETS table. name :: term() | undefined, filepath :: file:filename_all(), db_options :: rocksdb:db_options(), %% db_handle may be undefined in short intervals before opening the database, %% or reopening the database (which implies close and open operations). %% NB: records with undefined db_handle must not be stored in the ETS table. db_handle :: rocksdb:db_handle() | undefined, %% column families only fields, must be set to undefined for plain databases. cf_names = undefined :: [term()], cf_descriptors = undefined :: [rocksdb:cf_descriptor()], cf_handle = undefined :: rocksdb:cf_handle(), readonly :: boolean() }). -define(msg_trigger_timer(Kind, Secret), {msg_trigger_timer, Kind, Secret}). -define(msg_trigger_db_flush(Secret), ?msg_trigger_timer(db_flush, Secret)). -define(msg_trigger_wal_sync(Secret), ?msg_trigger_timer(wal_sync, Secret)). -record(timer, { interval_ms :: pos_integer(), ref :: erlang:reference() | undefined, secret :: erlang:reference() | undefined }). -record(state, { db_flush_timer :: #timer{}, wal_sync_timer :: #timer{} }). %%%=================================================================== %%% Public interface. %%%=================================================================== start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). %% @doc Creates a named ETS table. %% This function is used within `ar_kv_sup` as well as `ar_test_node` modules. create_ets() -> ets:new(?MODULE, [set, public, named_table, {keypos, #db.name}]). %% @doc Open a key-value store. %% Args is a map with the following keys: %% - path: the filesystem path of the database (required) %% - name: the name of the database (required for plain databases, not used for column family databases) %% - log_path: the filesystem path of the log file (optional, [data_dir]/logs/rocksdb/ by default) %% - options: the options for the database (optional, see ?DEFAULT_ROCKSDB_DATABASE_OPTIONS for the default options) %% - cf_descriptors: the column family descriptors (optional, see rocksdb:open/3 for the supported options) %% - cf_names: the column family names (required if cf_descriptors is provided) %% - readonly: whether to open the database in read-only mode (optional, false by default) open(Args) -> Path = maps:get(path, Args), CustomLogPath = maps:get(log_path, Args, not_set), Options = maps:get(options, Args, []), ReadOnly = maps:get(readonly, Args, false), case maps:get(cf_descriptors, Args, undefined) of undefined -> Name = maps:get(name, Args), gen_server:call(?MODULE, {open, {Path, CustomLogPath, Options, Name, ReadOnly}}, ?DEFAULT_CALL_TIMEOUT); CFDescriptors -> CFNames = maps:get(cf_names, Args), gen_server:call( ?MODULE, {open, {Path, CustomLogPath, CFDescriptors, Options, CFNames, ReadOnly}}, ?DEFAULT_CALL_TIMEOUT ) end. %% @doc Open a key-value store in read-only mode. %% This will not modify any database files (no WAL writes, no compaction, no manifest updates). %% Useful for reading snapshot data without altering it. %% Args is a map with the same keys as open/1. open_readonly(Args) -> open(Args#{ readonly => true }). %% @doc Store the given value under the given key. put(Name, Key, Value) -> ?WITH_DB(Name, fun (#db{db_handle = Db, cf_handle = undefined}) -> rocksdb:put(Db, Key, Value, []); (#db{db_handle = Db, cf_handle = Cf}) -> rocksdb:put(Db, Cf, Key, Value, []) end). %% @doc Return the value stored under the given key. get(Name, Key) -> ?WITH_DB(Name, fun (#db{db_handle = Db, cf_handle = undefined}) -> rocksdb:get(Db, Key, []); (#db{db_handle = Db, cf_handle = Cf}) -> rocksdb:get(Db, Cf, Key, []) end). %% @doc Return the key ({ok, Key, Value}) equal to or bigger than OffsetBinary with %% either the matching PrefixBitSize first bits or PrefixBitSize first bits bigger by one. get_next_by_prefix(Name, PrefixBitSize, KeyBitSize, OffsetBinary) -> ?WITH_ITERATOR(Name, [{prefix_same_as_start, true}], fun (Iterator) -> get_next_by_prefix2(Iterator, PrefixBitSize, KeyBitSize, OffsetBinary) end). get_next_by_prefix2(Iterator, PrefixBitSize, KeyBitSize, OffsetBinary) -> case rocksdb:iterator_move(Iterator, {seek, OffsetBinary}) of {error, invalid_iterator} -> %% There is no bigger or equal key sharing the prefix. %% Query one more time with prefix + 1. SuffixBitSize = KeyBitSize - PrefixBitSize, << Prefix:PrefixBitSize, _:SuffixBitSize >> = OffsetBinary, NextPrefixSmallestBytes = << (Prefix + 1):PrefixBitSize, 0:SuffixBitSize >>, rocksdb:iterator_move(Iterator, {seek, NextPrefixSmallestBytes}); Reply -> Reply end. %% @doc Return {ok, Key, Value} where Key is the smallest Key equal to or bigger than Cursor %% or none. get_next(Name, Cursor) -> ?WITH_ITERATOR(Name, [{total_order_seek, true}], fun (Iterator) -> get_next2(Iterator, Cursor) end). get_next2(Iterator, Cursor) -> case rocksdb:iterator_move(Iterator, Cursor) of {error, invalid_iterator} -> none; Reply -> Reply end. %% @doc Return {ok, Key, Value} where Key is the largest Key equal to or smaller than Cursor %% or none. get_prev(Name, Cursor) -> ?WITH_ITERATOR(Name, [{total_order_seek, true}], fun (Iterator) -> get_prev2(Iterator, Cursor) end). get_prev2(Iterator, Cursor) -> case rocksdb:iterator_move(Iterator, {seek_for_prev, Cursor}) of {error, invalid_iterator} -> none; Reply -> Reply end. %% @doc Return a Key => Value map with all keys equal to or larger than Start. get_range(Name, Start) -> get_range2(Name, {Start, undefined}). %% @doc Return a Key => Value map with all keys equal to or larger than Start and %% equal to or smaller than End. get_range(Name, Start, End) -> get_range2(Name, {Start, End}). get_range2(Name, {StartOffsetBinary, MaybeEndOffsetBinary}) -> ?WITH_ITERATOR(Name, [{total_order_seek, true}], fun (Iterator) -> get_range3(Iterator, {StartOffsetBinary, MaybeEndOffsetBinary}) end). get_range3(Iterator, {StartOffsetBinary, MaybeEndOffsetBinary}) -> case rocksdb:iterator_move(Iterator, {seek, StartOffsetBinary}) of {ok, Key, _Value} when is_binary(MaybeEndOffsetBinary), Key > MaybeEndOffsetBinary -> {ok, #{}}; {ok, Key, Value} -> get_range4(Iterator, #{ Key => Value }, MaybeEndOffsetBinary); {error, invalid_iterator} -> {ok, #{}}; {error, Reason} -> {error, Reason} end. get_range4(Iterator, Map, MaybeEndOffsetBinary) -> case rocksdb:iterator_move(Iterator, next) of {ok, Key, _Value} when is_binary(MaybeEndOffsetBinary), Key > MaybeEndOffsetBinary -> {ok, Map}; {ok, Key, Value} -> get_range4(Iterator, Map#{ Key => Value }, MaybeEndOffsetBinary); {error, invalid_iterator} -> {ok, Map}; {error, Reason} -> {error, Reason} end. %% @doc Remove the given key. delete(Name, Key) -> ?WITH_DB(Name, fun (#db{db_handle = Db, cf_handle = undefined}) -> rocksdb:delete(Db, Key, []); (#db{db_handle = Db, cf_handle = Cf}) -> rocksdb:delete(Db, Cf, Key, []) end). %% @doc Remove the keys equal to or larger than Start and smaller than End. delete_range(Name, StartOffsetBinary, EndOffsetBinary) -> ?WITH_DB(Name, fun (#db{db_handle = Db, cf_handle = undefined}) -> rocksdb:delete_range(Db, StartOffsetBinary, EndOffsetBinary, []); (#db{db_handle = Db, cf_handle = Cf}) -> rocksdb:delete_range(Db, Cf, StartOffsetBinary, EndOffsetBinary, []) end). %% @doc Return the number of keys in the table. count(Name) -> ?WITH_DB(Name, fun (#db{db_handle = Db, cf_handle = undefined}) -> rocksdb:count(Db); (#db{db_handle = Db, cf_handle = Cf}) -> rocksdb:count(Db, Cf) end). %%%=================================================================== %%% Generic server callbacks. %%%=================================================================== init([]) -> process_flag(trap_exit, true), {ok, Config} = arweave_config:get_env(), S0 = #state{ db_flush_timer = #timer{interval_ms = Config#config.rocksdb_flush_interval_s * 1000}, wal_sync_timer = #timer{interval_ms = Config#config.rocksdb_wal_sync_interval_s * 1000} }, S1 = init_db_flush_timer(S0), S2 = init_wal_sync_timer(S1), {ok, S2}. handle_call({open, {Filepath, LogFilepath, UserOptions, Name, ReadOnly}}, _From, State) -> DbRec0 = new_dbrec(Name, Filepath, LogFilepath, UserOptions, ReadOnly), case ets:lookup(?MODULE, DbRec0#db.name) of [] -> case do_open(DbRec0) of ok -> {reply, ok, State}; {error, Reason} -> {reply, {error, Reason}, State} end; [#db{filepath = Filepath, db_options = DbOptions}] when DbRec0#db.filepath == Filepath, DbRec0#db.db_options == DbOptions -> {reply, ok, State}; [#db{filepath = Filepath, db_options = Options}] -> {reply, {error, {already_open, Filepath, Options}}, State} end; handle_call({open, {Filepath, LogFilepath, CfDescriptors, UserOptions, CfNames, ReadOnly}}, _From, State) -> DbRec0 = new_dbrec(CfNames, CfDescriptors, Filepath, LogFilepath, UserOptions, ReadOnly), case ets:lookup(?MODULE, hd(CfNames)) of [] -> case do_open(DbRec0) of ok -> {reply, ok, State}; {error, Reason} -> {reply, {error, Reason}, State} end; [#db{filepath = Filepath, db_options = DbOptions, cf_descriptors = CfDescriptors, cf_names = CfNames}] when DbRec0#db.filepath == Filepath, DbRec0#db.db_options == DbOptions, DbRec0#db.cf_descriptors == CfDescriptors, DbRec0#db.cf_names == CfNames -> {reply, ok, State}; [#db{filepath = Filepath1, db_options = Options1}] -> {reply, {error, {already_open, Filepath1, Options1}}, State} end; handle_call({close, Name}, _From, State) -> case ets:lookup(?MODULE, Name) of [] -> {reply, {error, not_found}, State}; [DbRec] -> {reply, close(DbRec), State} end; handle_call(Request, _From, State) -> ?LOG_WARNING([{event, unhandled_call}, {module, ?MODULE}, {request, Request}]), {reply, ok, State}. handle_cast(Cast, State) -> ?LOG_WARNING([{event, unhandled_cast}, {module, ?MODULE}, {cast, Cast}]), {noreply, State}. handle_info( ?msg_trigger_db_flush(SameSecret), #state{db_flush_timer = #timer{secret = SameSecret}} = S0 ) -> with_each_db(fun(DbRec) -> {ElapsedUs, _} = timer:tc(fun() -> db_flush(DbRec) end), ?LOG_DEBUG([ {event, periodic_timer}, {}, {op, db_flush}, {name, io_lib:format("~p", [DbRec#db.name])}, {elapsed_us, ElapsedUs} ]) end), {noreply, init_db_flush_timer(S0)}; handle_info( ?msg_trigger_wal_sync(SameSecret), #state{wal_sync_timer = #timer{secret = SameSecret}} = S0 ) -> with_each_db(fun(DbRec) -> {ElapsedUs, _} = timer:tc(fun() -> wal_sync(DbRec) end), ?LOG_DEBUG([ {event, periodic_timer}, {}, {op, wal_sync}, {name, io_lib:format("~p", [DbRec#db.name])}, {elapsed_us, ElapsedUs} ]) end), {noreply, init_wal_sync_timer(S0)}; handle_info(Message, State) -> ?LOG_WARNING([{event, unhandled_info}, {module, ?MODULE}, {message, Message}]), {noreply, State}. terminate(Reason, _State) -> Result = with_each_db(fun(DbRec) -> ?LOG_INFO([{event, terminate_db}, {module, ?MODULE}, {db, DbRec#db.name}]), _ = db_flush(DbRec), _ = wal_sync(DbRec), _ = close(DbRec) end), ?LOG_INFO([{event, terminate_complete}, {module, ?MODULE}, {reason, Reason}]), Result. %%%=================================================================== %%% Private functions. %%%=================================================================== maybe_cancel_timer(#timer{ref = undefined}) -> ok; maybe_cancel_timer(#timer{ref = TRef}) -> erlang:cancel_timer(TRef). init_timer(Timer0, MsgFun) -> _ = maybe_cancel_timer(Timer0), Secret = erlang:make_ref(), TRef = erlang:send_after(Timer0#timer.interval_ms, self(), apply(MsgFun, [Secret])), Timer0#timer{ref = TRef, secret = Secret}. init_db_flush_timer(#state{db_flush_timer = Timer0} = S0) -> S0#state{ db_flush_timer = init_timer(Timer0, fun(Secret) -> ?msg_trigger_db_flush(Secret) end) }. init_wal_sync_timer(#state{wal_sync_timer = Timer0} = S0) -> S0#state{ wal_sync_timer = init_timer(Timer0, fun(Secret) -> ?msg_trigger_wal_sync(Secret) end) }. %% @doc Create a new plain database record. new_dbrec(Name, Filepath, LogFilepath, UserOptions, ReadOnly) -> LogDir = filename:join([get_base_log_dir(LogFilepath), ?ROCKS_DB_DIR, filename:basename(Filepath)]), ok = filelib:ensure_dir(Filepath ++ "/"), ok = filelib:ensure_dir(LogDir ++ "/"), DefaultOptionsMap = (?DEFAULT_ROCKSDB_DATABASE_OPTIONS)#{db_log_dir => LogDir}, DbOptions = maps:to_list(maps:merge(maps:from_list(UserOptions), DefaultOptionsMap)), #db{ name = Name, filepath = Filepath, db_options = DbOptions, readonly = ReadOnly }. %% @doc Create a new 'column-family' database record. new_dbrec(CfNames, CfDescriptors, Filepath, LogFilepath, UserOptions, ReadOnly) -> LogDir = filename:join([get_base_log_dir(LogFilepath), ?ROCKS_DB_DIR, filename:basename(Filepath)]), ok = filelib:ensure_dir(Filepath ++ "/"), ok = filelib:ensure_dir(LogDir ++ "/"), DefaultOptionsMap = (?DEFAULT_ROCKSDB_DATABASE_OPTIONS)#{db_log_dir => LogDir}, DbOptions = maps:to_list(maps:merge(maps:from_list(UserOptions), DefaultOptionsMap)), #db{ name = hd(CfNames), filepath = Filepath, db_options = DbOptions, cf_descriptors = CfDescriptors, cf_names = CfNames, readonly = ReadOnly }. %% @doc Attempt to open the database. %% Both plain and 'column-family' databases are attempted. %% When opening the plain database, the record will have `name` set to the given %% name parameter. %% When opening 'column-family' database, the record will have a column name; several %% database records will be inserted during the process. do_open(#db{ db_handle = undefined, cf_descriptors = undefined, filepath = Filepath, db_options = DbOptions, readonly = ReadOnly } = DbRec0) -> Open = case ReadOnly of true -> rocksdb:open_readonly(Filepath, DbOptions); false -> rocksdb:open(Filepath, DbOptions) end, case Open of {ok, Db} -> DbRec1 = DbRec0#db{db_handle = Db}, true = ets:insert(?MODULE, DbRec1), ok; {error, OpenError} -> ?LOG_ERROR([{event, db_operation_failed}, {op, open}, {name, io_lib:format("~p", [DbRec0#db.name])}, {reason, io_lib:format("~p", [OpenError])}]), {error, failed} end; do_open(#db{ db_handle = undefined, cf_descriptors = CfDescriptors, cf_names = CfNames, filepath = Filepath, db_options = DbOptions, readonly = ReadOnly } = DbRec0) -> Open = case ReadOnly of true -> rocksdb:open_readonly(Filepath, DbOptions, CfDescriptors); false -> rocksdb:open(Filepath, DbOptions, CfDescriptors) end, case Open of {ok, Db, Cfs} -> FirstDbRec = lists:foldr( fun({Cf, CfName}, _) -> DbRec1 = DbRec0#db{name = CfName, db_handle = Db, cf_handle = Cf}, true = ets:insert(?MODULE, DbRec1), DbRec1 end, undefined, lists:zip(Cfs, CfNames) ), %% flush the cf database (all column families at once) _ = db_flush(FirstDbRec), ok; {error, OpenError} -> ?LOG_ERROR([{event, db_operation_failed}, {op, open}, {name, io_lib:format("~p", [DbRec0#db.name])}, {reason, io_lib:format("~p", [OpenError])}]), {error, failed} end; do_open(#db{} = DbRec0) -> ?LOG_ERROR([ {event, db_operation_failed}, {op, open}, {error, already_open}, {name, io_lib:format("~p", [DbRec0#db.name])} ]). %% Attempt to close the database and remove the ETS entries related to it. %% This function WILL NOT perform any actions regarding persistence: it is up to %% the user to ensure that both db_flush/1 and wal_sync/1 functions are called %% prior to calling this function. %% Database must be open at the moment of calling the function. close(Name) when not is_record(Name, db) -> gen_server:call(?MODULE, {close, Name}, ?DEFAULT_CALL_TIMEOUT); close(#db{db_handle = undefined}) -> {error, closed}; close(#db{db_handle = Db, name = Name}) -> try case rocksdb:close(Db) of ok -> true = ets:match_delete(?MODULE, #db{db_handle = Db, _ = '_'}); {error, CloseError} -> ?LOG_ERROR([ {event, db_operation_failed}, {op, close}, {name, io_lib:format("~p", [Name])}, {error, io_lib:format("~p", [CloseError])} ]) end catch Exc -> ?LOG_ERROR([ {event, ar_kv_failed}, {op, close}, {name, io_lib:format("~p", [Name])}, {reason, io_lib:format("~p", [Exc])} ]) end. %% @doc Attempt to flush the database: persist the memtables contents on disk. %% Database must be open at the moment of calling the function. db_flush(#db{name = Name, db_handle = undefined}) -> ?LOG_ERROR([{event, db_operation_failed}, {op, db_flush}, {error, closed}, {name, io_lib:format("~p", [Name])}]), {error, closed}; db_flush(#db{readonly = true}) -> ok; db_flush(#db{name = Name, db_handle = Db}) -> case rocksdb:flush(Db, [{wait, true}, {allow_write_stall, false}]) of {error, FlushError} -> ?LOG_ERROR([{event, db_operation_failed}, {op, db_flush}, {name, io_lib:format("~p", [Name])}, {reason, io_lib:format("~p", [FlushError])}]), {error, failed}; _ -> ok end. %% @doc Attempt to sync Write Ahead Log (WAL): persist WAL contents on disk. %% Database must be open at the moment of calling the function. wal_sync(#db{name = Name, db_handle = undefined}) -> ?LOG_ERROR([{event, db_operation_failed}, {op, wal_sync}, {error, closed}, {name, io_lib:format("~p", [Name])}]), {error, closed}; wal_sync(#db{readonly = true}) -> ok; wal_sync(#db{name = Name, db_handle = Db}) -> case rocksdb:sync_wal(Db) of {error, SyncError} -> ?LOG_ERROR([{event, db_operation_failed}, {op, wal_sync}, {name, io_lib:format("~p", [Name])}, {reason, io_lib:format("~p", [SyncError])}]), {error, failed}; _ -> ok end. %% @doc Apply callback if it is possible to obtain the iterator for the database. %% The callback will get an iterator as an argument. with_iterator(Name, Op, IteratorOptions, Callback) -> with_db(Name, Op, fun (#db{db_handle = Db, cf_handle = undefined}) -> case rocksdb:iterator(Db, IteratorOptions) of {ok, Iterator} -> apply(Callback, [Iterator]); {error, IteratorError} -> {error, IteratorError} end; (#db{db_handle = Db, cf_handle = Cf}) -> case rocksdb:iterator(Db, Cf, IteratorOptions) of {ok, Iterator} -> apply(Callback, [Iterator]); {error, IteratorError} -> {error, IteratorError} end end). %% @doc Apply callback if the database is available. %% The callback will get the database record (#db{}) as an argument. with_db(Name, Op, Callback) -> try case ets:lookup(?MODULE, Name) of [] -> {error, db_not_found}; [DbRec0] -> apply(Callback, [DbRec0]) end catch Exc -> ?LOG_ERROR([{event, db_operation_failed}, {op, Op}, {name, io_lib:format("~p", [Name])}, {reason, io_lib:format("~p", [Exc])}]), {error, failed} end. %% @doc Apply callback for each unique database found in ETS (column family %% databases will be only called once). %% The callback will get the database record (#db{}) as an argument. with_each_db(Callback) -> ets:foldl( fun(#db{db_handle = Db} = DbRec0, Acc) -> case sets:is_element(Db, Acc) of true -> Acc; false -> _ = apply(Callback, [DbRec0]), sets:add_element(Db, Acc) end end, sets:new(), ?MODULE ). get_base_log_dir(LogFilepath) -> case LogFilepath of not_set -> {ok, Config} = arweave_config:get_env(), Config#config.log_dir; _ -> LogFilepath end. test_get_data_dir() -> {ok, Config} = arweave_config:get_env(), Config#config.data_dir. %%%=================================================================== %%% Tests. %%%=================================================================== rocksdb_iterator_test_() -> {timeout, 300, fun test_rocksdb_iterator/0}. test_rocksdb_iterator() -> test_destroy("test_db"), DataDir = test_get_data_dir(), %% Configure the DB similarly to how it used to be configured before the tested change. Opts = [ {prefix_extractor, {capped_prefix_transform, 28}}, {optimize_filters_for_hits, true}, {max_open_files, 1000000} ], ok = ar_kv:open(#{ path => filename:join([DataDir, ?ROCKS_DB_DIR, "test_db"]), cf_descriptors => [{"default", Opts}, {"test", Opts}], cf_names => [default, test]}), ok = ar_kv:open(#{ path => filename:join([DataDir, ?ROCKS_DB_DIR, "test_db"]), cf_descriptors => [{"default", Opts}, {"test", Opts}], cf_names => [default, test]}), SmallerPrefix = crypto:strong_rand_bytes(29), << O1:232 >> = SmallerPrefix, BiggerPrefix = << (O1 + 1):232 >>, Suffixes = sets:to_list(sets:from_list([crypto:strong_rand_bytes(3) || _ <- lists:seq(1, 20)])), {Suffixes1, Suffixes2} = lists:split(10, Suffixes), lists:foreach( fun(Suffix) -> ok = ar_kv:put( test, << SmallerPrefix/binary, Suffix/binary >>, crypto:strong_rand_bytes(40 * ?MiB) ), ok = ar_kv:put( test, << BiggerPrefix/binary, Suffix/binary >>, crypto:strong_rand_bytes(40 * ?MiB) ) end, Suffixes1 ), test_close(test), %% Reopen with the new configuration. Opts2 = [ {block_based_table_options, [ {cache_index_and_filter_blocks, true}, {bloom_filter_policy, 10} ]}, {prefix_extractor, {capped_prefix_transform, 29}}, {optimize_filters_for_hits, true}, {max_open_files, 1000000}, {write_buffer_size, 256 * ?MiB}, {target_file_size_base, 256 * ?MiB}, {max_bytes_for_level_base, 10 * 256 * ?MiB} ], ok = ar_kv:open(#{ path => filename:join([DataDir, ?ROCKS_DB_DIR, "test_db"]), cf_descriptors => [{"default", Opts2}, {"test", Opts2}], cf_names => [default, test]}), %% Store new data enough for new SST files to be created. lists:foreach( fun(Suffix) -> ok = ar_kv:put( test, << SmallerPrefix/binary, Suffix/binary >>, crypto:strong_rand_bytes(40 * ?MiB) ), ok = ar_kv:put( test, << BiggerPrefix/binary, Suffix/binary >>, crypto:strong_rand_bytes(50 * ?MiB) ) end, Suffixes2 ), assert_iteration(test, SmallerPrefix, BiggerPrefix, Suffixes), %% Close the database to make sure the new data is flushed. test_close(test), ok = ar_kv:open(#{ path => filename:join([DataDir, ?ROCKS_DB_DIR, "test_db"]), cf_descriptors => [{"default", Opts2}, {"test", Opts2}], cf_names => [default1, test1]}), assert_iteration(test1, SmallerPrefix, BiggerPrefix, Suffixes), test_close(test1), test_destroy("test_db"). delete_range_test_() -> {timeout, 300, fun test_delete_range/0}. test_delete_range() -> test_destroy("test_db"), DataDir = test_get_data_dir(), ok = ar_kv:open(#{ path => filename:join([DataDir, ?ROCKS_DB_DIR, "test_db"]), name => test_db}), ok = ar_kv:put(test_db, << 0:256 >>, << 0:256 >>), ok = ar_kv:put(test_db, << 1:256 >>, << 1:256 >>), ok = ar_kv:put(test_db, << 2:256 >>, << 2:256 >>), ok = ar_kv:put(test_db, << 3:256 >>, << 3:256 >>), ok = ar_kv:put(test_db, << 4:256 >>, << 4:256 >>), ?assertEqual({ok, << 1:256 >>}, ar_kv:get(test_db, << 1:256 >>)), %% Base case ?assertEqual(ok, ar_kv:delete_range(test_db, << 1:256 >>, << 2:256 >>)), ?assertEqual({ok, << 0:256 >>}, ar_kv:get(test_db, << 0:256 >>)), ?assertEqual(not_found, ar_kv:get(test_db, << 1:256 >>)), ?assertEqual({ok, << 2:256 >>}, ar_kv:get(test_db, << 2:256 >>)), %% Missing start and missing end ?assertEqual(ok, ar_kv:delete_range(test_db, << 1:256 >>, << 5:256 >>)), ?assertEqual({ok, << 0:256 >>}, ar_kv:get(test_db, << 0:256 >>)), ?assertEqual(not_found, ar_kv:get(test_db, << 1:256 >>)), ?assertEqual(not_found, ar_kv:get(test_db, << 2:256 >>)), ?assertEqual(not_found, ar_kv:get(test_db, << 3:256 >>)), ?assertEqual(not_found, ar_kv:get(test_db, << 4:256 >>)), %% Empty range ?assertEqual(ok, ar_kv:delete_range(test_db, << 1:256 >>, << 1:256 >>)), ?assertEqual({ok, << 0:256 >>}, ar_kv:get(test_db, << 0:256 >>)), ?assertEqual(not_found, ar_kv:get(test_db, << 1:256 >>)), ?assertEqual(not_found, ar_kv:get(test_db, << 2:256 >>)), ?assertEqual(not_found, ar_kv:get(test_db, << 3:256 >>)), ?assertEqual(not_found, ar_kv:get(test_db, << 4:256 >>)), %% Reversed range ?assertMatch({error, _}, ar_kv:delete_range(test_db, << 1:256 >>, << 0:256 >>)), ?assertEqual({ok, << 0:256 >>}, ar_kv:get(test_db, << 0:256 >>)), ?assertEqual(not_found, ar_kv:get(test_db, << 1:256 >>)), ?assertEqual(not_found, ar_kv:get(test_db, << 2:256 >>)), ?assertEqual(not_found, ar_kv:get(test_db, << 3:256 >>)), ?assertEqual(not_found, ar_kv:get(test_db, << 4:256 >>)), test_destroy("test_db"). assert_iteration(Name, SmallerPrefix, BiggerPrefix, Suffixes) -> SortedSuffixes = lists:sort(Suffixes), SmallestKey = << SmallerPrefix/binary, (lists:nth(1, SortedSuffixes))/binary >>, NextSmallestKey = << SmallerPrefix/binary, (lists:nth(2, SortedSuffixes))/binary >>, << SmallestOffset:256 >> = SmallestKey, %% Assert forwards and backwards iteration within the same prefix works. ?assertMatch({ok, SmallestKey, _}, ar_kv:get_next_by_prefix(Name, 232, 256, SmallestKey)), ?assertMatch({ok, SmallestKey, _}, ar_kv:get_prev(Name, SmallestKey)), ?assertMatch({ok, NextSmallestKey, _}, ar_kv:get_next_by_prefix(Name, 232, 256, << (SmallestOffset + 1):256 >>)), << NextSmallestOffset:256 >> = NextSmallestKey, ?assertMatch({ok, SmallestKey, _}, ar_kv:get_prev(Name, << (NextSmallestOffset - 1):256 >>)), %% Assert forwards and backwards iteration across different prefixes works. SmallerPrefixBiggestKey = << SmallerPrefix/binary, (lists:last(SortedSuffixes))/binary >>, BiggerPrefixSmallestKey = << BiggerPrefix/binary, (lists:nth(1, SortedSuffixes))/binary >>, << SmallerPrefixBiggestOffset:256 >> = SmallerPrefixBiggestKey, ?assertMatch({ok, BiggerPrefixSmallestKey, _}, ar_kv:get_next_by_prefix(Name, 232, 256, << (SmallerPrefixBiggestOffset + 1):256 >>)), << BiggerPrefixSmallestOffset:256 >> = BiggerPrefixSmallestKey, ?assertMatch({ok, SmallerPrefixBiggestKey, _}, ar_kv:get_prev(Name, << (BiggerPrefixSmallestOffset - 1):256 >>)), BiggerPrefixNextSmallestKey = << BiggerPrefix/binary, (lists:nth(2, SortedSuffixes))/binary >>, {ok, Map} = ar_kv:get_range(Name, SmallerPrefixBiggestKey, BiggerPrefixNextSmallestKey), ?assertEqual(3, map_size(Map)), ?assert(maps:is_key(SmallerPrefixBiggestKey, Map)), ?assert(maps:is_key(BiggerPrefixNextSmallestKey, Map)), ?assert(maps:is_key(BiggerPrefixSmallestKey, Map)), ar_kv:delete_range(Name, SmallerPrefixBiggestKey, BiggerPrefixNextSmallestKey), ?assertEqual(not_found, ar_kv:get(Name, SmallerPrefixBiggestKey)), ?assertEqual(not_found, ar_kv:get(Name, BiggerPrefixSmallestKey)), lists:foreach( fun(Suffix) -> ?assertMatch({ok, _}, ar_kv:get(Name, << BiggerPrefix/binary, Suffix/binary >>)) end, lists:sublist(lists:reverse(SortedSuffixes), length(SortedSuffixes) - 1) ), lists:foreach( fun(Suffix) -> ?assertMatch({ok, _}, ar_kv:get(Name, << SmallerPrefix/binary, Suffix/binary >>)) end, lists:sublist(SortedSuffixes, length(SortedSuffixes) - 1) ), ar_kv:put(Name, SmallerPrefixBiggestKey, crypto:strong_rand_bytes(50 * 1024)), ar_kv:put(Name, BiggerPrefixNextSmallestKey, crypto:strong_rand_bytes(50 * 1024)), ar_kv:put(Name, BiggerPrefixSmallestKey, crypto:strong_rand_bytes(50 * 1024)). test_destroy(Name) -> RocksDBDir = filename:join(test_get_data_dir(), ?ROCKS_DB_DIR), Filename = filename:join(RocksDBDir, Name), case filelib:is_dir(Filename) of true -> rocksdb:destroy(Filename, []); false -> ok end. test_close(Name) -> ?WITH_DB(Name, fun(Db) -> _ = db_flush(Db), _ = wal_sync(Db), _ = close(Db) end). ================================================ FILE: apps/arweave/src/ar_kv_sup.erl ================================================ -module(ar_kv_sup). -behaviour(supervisor). -export([start_link/0]). -export([init/1]). -include_lib("arweave/include/ar_sup.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). %%%=================================================================== %%% Public interface. %%%=================================================================== start_link() -> supervisor:start_link({local, ?MODULE}, ?MODULE, []). %% =================================================================== %% Supervisor callbacks. %% =================================================================== init([]) -> ar_kv:create_ets(), {ok, {{one_for_one, 5, 10}, [?CHILD(ar_kv, worker)]}}. ================================================ FILE: apps/arweave/src/ar_localnet.erl ================================================ -module(ar_localnet). -export([start/0, start/1, submit_snapshot_data/0, mine_one_block/0, mine_until_height/1, create_snapshot/0]). -include("ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("kernel/include/file.hrl"). -define(WAIT_UNTIL_JOINED_TIMEOUT, 200_000). -define(DEFAULT_SNAPSHOT_DIR, "localnet_snapshot"). -ifndef(START_FROM_STATE_SEARCH_DEPTH). -define(LOCALNET_START_FROM_STATE_SEARCH_DEPTH, 100). -else. -define(LOCALNET_START_FROM_STATE_SEARCH_DEPTH, ?START_FROM_STATE_SEARCH_DEPTH). -endif. -define(LOCALNET_DATA_DIR, ".tmp/data_localnet_main"). %% @doc Start a node from the localnet_snapshot directory. %% Disable mining (can be triggered by request). %% Configure a single storage module with the first 21 MiB of partition 0 %% (enough to cover the seed data with some headroom). %% Seed the storage module with data. start() -> start(#config{ data_dir = ?LOCALNET_DATA_DIR }). start(SnapshotDir) when is_list(SnapshotDir) -> start(#config{ data_dir = ?LOCALNET_DATA_DIR, start_from_state = SnapshotDir }); start(SnapshotDir) when is_atom(SnapshotDir) -> start(#config{ data_dir = ?LOCALNET_DATA_DIR, start_from_state = atom_to_list(SnapshotDir) }); start(Config) -> SnapshotDir = snapshot_dir(Config), DataDir = Config#config.data_dir, arweave_config:start(), ok = filelib:ensure_dir(DataDir ++ "/"), MiningAddr = case Config#config.mining_addr of not_set -> ar_wallet:to_address(ar_wallet:new_keyfile({?ECDSA_SIGN_ALG, secp256k1}, wallet_address, DataDir)); Addr -> Addr end, StorageModules = case Config#config.storage_modules of [] -> [{21 * ?MiB, 0, {replica_2_9, MiningAddr}}]; ConfiguredStorageModules -> ConfiguredStorageModules end, ok = arweave_config:set_env(Config#config{ mining_addr = MiningAddr, storage_modules = StorageModules, start_from_latest_state = true, start_from_state = SnapshotDir, disk_cache_size = 128, max_disk_pool_buffer_mb = 128, max_disk_pool_data_root_buffer_mb = 128, auto_join = true, peers = [], cm_exit_peer = not_set, cm_peers = [], local_peers = [], mine = false, disk_space_check_frequency = 1000, sync_jobs = 0, disk_pool_jobs = 1, header_sync_jobs = 0, debug = true }), ar:start_dependencies(), case wait_until_joined() of true -> submit_snapshot_data(), io:format("~n~nLocalnet node started~n"), io:format(" Snapshot: ~s~n", [SnapshotDir]), io:format(" Data dir: ~s~n", [DataDir]), io:format(" Mining address: ~s~n", [ar_util:encode(MiningAddr)]), io:format(" Storage modules:~n"), lists:foreach(fun({Size, Partition, Packing}) -> io:format(" - partition ~B, size ~B MB, packing ~s~n", [Partition, Size div (1_000_000), ar_serialize:encode_packing(Packing, false)]) end, StorageModules), io:format("~nMining is disabled. Call ar_localnet:mine_one_block/0 to mine a block.~n" "Call ar_localnet:mine_until_height/1 to mine until the given height.~n~n"); {error, _} = Error -> io:format( "Localnet startup failed while waiting for node to join (~B ms): ~p~n", [?WAIT_UNTIL_JOINED_TIMEOUT, Error] ), ar:stop_dependencies(), Error end. %% @doc Mine one block. mine_one_block() -> ar_node_worker:mine_one_block(). %% @doc Mine blocks until the given height is reached. mine_until_height(Height) -> ar_node_worker:mine_until_height(Height). %% @doc Create a reproducible snapshot in localnet_snapshot_[mainnet_starting_height]_[localnet_end_height]. create_snapshot() -> {ok, Config} = arweave_config:get_env(), case ar_node:is_joined() of false -> {error, node_not_joined}; true -> SnapshotDir = snapshot_dir(Config), case open_snapshot_databases(SnapshotDir) of {ok, CloseSnapshotDbs} -> SnapshotResult = case ar_storage:read_block_index(SnapshotDir) of not_found -> {error, snapshot_block_index_not_found}; BI -> MainnetStartHeight = length(BI) - 1, LocalnetEndHeight = ar_node:get_height(), NewSnapshotDir = snapshot_dir_name(MainnetStartHeight, LocalnetEndHeight), create_snapshot(SnapshotDir, Config#config.data_dir, NewSnapshotDir) end, CloseSnapshotDbs(), SnapshotResult; {error, _} = Error -> Error end end. %% @doc Poll every 100ms until the node has joined the network, or until %% WAIT_UNTIL_JOINED_TIMEOUT ms have elapsed. wait_until_joined() -> ar_util:do_until( fun() -> ar_node:is_joined() end, 100, ?WAIT_UNTIL_JOINED_TIMEOUT ). %% @doc Read recent block heads, tx headers, block time history, and account tree data %% from the snapshot directory and store them in the data directory. Does not do anything %% if recent blocks are already available locally. store_snapshot_data(SnapshotDir) -> case ar_node:get_block_index() of [] -> {error, empty_block_index}; BI -> Height = length(BI) - 1, SearchDepth = min(Height, ?LOCALNET_START_FROM_STATE_SEARCH_DEPTH), io:format("Copying snapshot data into data_dir from ~s~n", [SnapshotDir]), io:format(" Height: ~B, search depth: ~B~n", [Height, SearchDepth]), case read_recent_blocks_local(BI, SearchDepth) of {Skipped, _Blocks} -> io:format(" Local blocks available (skipped: ~B). Skip backfill.~n", [Skipped]), ok; not_found -> store_snapshot_data3(BI, Height, SearchDepth, SnapshotDir) end end. store_snapshot_data3(BI, Height, SearchDepth, SnapshotDir) -> case read_recent_blocks_from_snapshot(BI, SearchDepth, SnapshotDir) of not_found -> {error, block_headers_not_found}; {Skipped, Blocks} -> io:format(" Recent blocks: ~B (skipped: ~B)~n", [length(Blocks), Skipped]), BI2 = lists:nthtail(Skipped, BI), Height2 = Height - Skipped, RewardHistoryBI = ar_rewards:interim_reward_history_bi(Height2, BI2), BlockTimeHistoryBI = block_time_history_bi(BI2), io:format(" Reward history: ~B entries~n", [length(RewardHistoryBI)]), io:format(" Block time history: ~B entries~n", [length(BlockTimeHistoryBI)]), case store_snapshot_blocks_from_list(Blocks) of {ok, TxIds} -> io:format(" Tx headers to copy: ~B~n", [length(TxIds)]), case store_snapshot_tx_headers(TxIds, SnapshotDir) of ok -> case store_snapshot_history_entries( RewardHistoryBI, start_from_state_reward_history_db, reward_history_db, reward_history ) of ok -> case store_snapshot_history_entries( BlockTimeHistoryBI, start_from_state_block_time_history_db, block_time_history_db, block_time_history ) of ok -> store_snapshot_wallet_list(Blocks, SnapshotDir, SearchDepth); {error, _} = Error -> Error end; {error, _} = Error -> Error end; {error, _} = Error -> Error end; {error, _} = Error -> Error end end. %% @doc Read special hard-coded seed transactions from the snapshot directory and submit their data %% chunks to the node's storage. %% Return {TotalBigChunkBytes, TotalSmallChunkBytes}. submit_snapshot_data() -> {ok, Config} = arweave_config:get_env(), SnapshotDir = snapshot_dir(Config), io:format("Seeding data from snapshot...~n"), SnapshotTXs = snapshot_txs(), BlockStarts = lists:sort(maps:keys(SnapshotTXs)), {TotalBigChunk, TotalSmallChunk} = lists:foldl( fun(BlockStart, {AccBigChunk, AccSmallChunk}) -> TXIDs = maps:get(BlockStart, SnapshotTXs), TXs = lists:map(fun(TXID) -> Filename = filename:join([SnapshotDir, "seed_txs", binary_to_list(TXID) ++ ".json"]), case file:read_file(Filename) of {ok, JSON} -> Map = jiffy:decode(JSON, [return_maps]), #tx{ id = ar_util:decode(TXID), data_size = binary_to_integer(maps:get(<<"data_size">>, Map, <<"0">>)), data = ar_util:decode(maps:get(<<"data">>, Map, <<>>)), data_root = ar_util:decode(maps:get(<<"data_root">>, Map, <<>>)), format = maps:get(<<"format">>, Map, 1) }; {error, Reason} -> io:format("Failed to read ~s: ~p~n", [Filename, Reason]), error({missing_tx_file, TXID}) end end, TXIDs), {BlockBigChunk, BlockSmallChunk} = submit_block_data(BlockStart, TXs), {AccBigChunk + BlockBigChunk, AccSmallChunk + BlockSmallChunk} end, {0, 0}, BlockStarts ), io:format("Seeding completed. Total size: ~B bytes (~B big chunks, ~B small chunks).~n", [TotalBigChunk + TotalSmallChunk, TotalBigChunk, TotalSmallChunk]), {TotalBigChunk, TotalSmallChunk}. %% @doc For a given block start offset and its transactions, generate Merkle %% paths and data roots, register them with ar_data_root_sync, then write the %% raw transaction data to storage. %% Return {TotalBigChunkBytes, TotalSmallChunkBytes}. submit_block_data(BlockStart, TXs) -> {BlockStart, BlockEnd, TXRoot, Height} = ar_block_index:get_block_bounds_with_height(BlockStart), SortedTXs = lists:sort(TXs), SizeTaggedTXs = ar_block:generate_size_tagged_list_from_txs(SortedTXs, Height), SizeTaggedTXsNoPadding = [T || {{ID, _}, _} = T <- SizeTaggedTXs, ID /= padding], TXs2 = [TX#tx{ data_root = DR } || {TX, {{_ID, DR}, _End}} <- lists:zip(SortedTXs, SizeTaggedTXsNoPadding) ], {_, Tree} = ar_merkle:generate_tree([{DR, End} || {{_, DR}, End} <- SizeTaggedTXs]), Entries = [ {DR, TX#tx.data_size, BlockStart + End - TX#tx.data_size, ar_merkle:generate_path(TXRoot, End - 1, Tree)} || {TX, {{_ID, DR}, End}} <- lists:zip(TXs2, SizeTaggedTXsNoPadding), TX#tx.data_size > 0 ], ar_data_root_sync:store_data_roots_sync(BlockStart, BlockEnd, TXRoot, Entries), lists:foreach(fun(TX) -> case TX#tx.data_size > 0 of true -> Data = TX#tx.data, TXID = TX#tx.id, case ar_storage:write_tx_data(TX#tx.data_root, Data, TXID) of ok -> ok; {error, Errors} -> io:format(" Failed to write data for tx ~s: ~p~n", [binary_to_list(ar_util:encode(TXID)), Errors]) end; false -> ok end end, TXs2), lists:foldl( fun(TX, {AccBigChunk, AccSmallChunk}) -> DataSize = TX#tx.data_size, BigChunkSize = (DataSize div ?DATA_CHUNK_SIZE) * ?DATA_CHUNK_SIZE, SmallChunkSize = DataSize rem ?DATA_CHUNK_SIZE, {AccBigChunk + BigChunkSize, AccSmallChunk + SmallChunkSize} end, {0, 0}, TXs2 ). %% @doc Return the hardcoded map of {BlockStartOffset => [TXID]} for the seed %% data included in the localnet snapshot. These are real mainnet transactions %% whose data is bundled in the snapshot's seed_txs/ directory so the localnet %% node has chunk data to mine with. snapshot_txs() -> #{ 0 => [<<"t81tluHdoePSxjq7qG-6TMqBKmQLYr5gupmfvW25Y_o">>], 599058 => [<<"QAQ-134At0mSPVrwBzTTUalyL_zqE_dMR_WggkZvF5E">>], 1039029 => [<<"-B7wF8TF5AodemKM2UjeFySwA_-Q12Ai8z9FSqgIEyA">>, <<"vYnzbcbBQbPQB7GKrXzPlz1MuT9cfnNI_NBVajaTnPg">>, <<"Lt7WJclVu4iYHqGHIYIBia3ABMnvmd5cW4ELIzUTfPE">>], 1074629 => [<<"JfTiLBj5Gxr1v7JwoNf9-7sRAiLOrg1AZ6kqwSkEpTc">>, <<"YMnQwrWWVRmkMs0B41lz-VdixskatlPcY7j4r0iSLbQ">>, <<"dMTZgKHD-NkP3iM5RjFNhppiwfTlYd-Imi9aA6IK0So">>], 1113021 => [<<"qHvSpQXYh9RZmXIoIOexmDs0iQgjCubl6KSsgg7cDz8">>], 1179126 => [<<"8TiSScQCv06oS9b8Tt5WBnf7sUVgzPAFGJ3Lq2bt8rY">>], 1621676 => [<<"U_1PPd40n2grpuhkMJcMXPVuJhtaQoUWei63iN2rS7o">>], 2057001 => [<<"m1DnUoXf7wMtIGkkDZAALobw0GbGehfEMX_jNLvs3i8">>], 2507233 => [<<"JUf6alhhrfuL22XuQ0yrZ6_xBFBIQqi85wRxv2nUCMs">>], 3065027 => [<<"EDt8sO0AWKJyNeUxd-U6ihy0rgRKUPjpfRGarEHlOCs">>], 3103419 => [<<"UH3C65dDo62rp5ciK3XzyhufE71xorL7r7MWVwdhavk">>], 3551503 => [<<"FbeSRhJR00VPygimhm47VwirSeBATnlf240hv4a2G4E">>], 3853182 => [<<"ViCjDXb4IEZcXBtlYvTm3HCB6cf4gDbrXCCdvVVgB1g">>], 3898843 => [<<"Jo3rf0JPJR2kCHBqZG71xouWzuOSY-MXJufpfzFl7sE">>, <<"Tg9QZvUPJoAZKRkPhPgQrgnlTY6s9UxRSQaMw6shhOU">>], 3933160 => [<<"uNiZ8TfAQ8GWjtbqhVi90qO3U5dl9afmKE1-KbHQYM4">>], 4748637 => [<<"ujON59jsellR3M8hq9unBPISOwRgEVUogdi3FG_pVMk">>], 4913142 => [<<"U_UF7e-hOd5uLIj10fYZVxQ5mXyZUxvMxhWWgAMaj0s">>], 4984947 => [<<"_AiF52l4uqTkKOVpQw9hr6l6FCIdWs8PCFtFxEBOopU">>], 5052944 => [<<"Ace-njSprwHMwZaW5nuD0y1lKFoaafU3T8d7PLBeEIA">>], 5168327 => [<<"UbW68tRQtThl9ah8tJb-X_af5M8FHYARiGZFiPGk_90">>], 5339549 => [<<"JDS1sGkpC0ua7UGfpLEJSF-jXUnjAs2fa5V7y6rccdY">>, <<"hMfNPSlINViUDVnor18GgPs0Ut0i9XY7dwM9MVOL-2I">>], 5530545 => [<<"MCCCpl9AGNAzy3WvM5lniJ88iC3-8NPiiWIsxcLZZxQ">>], 5573275 => [<<"F3c9tsVvmCiFNxK7hVEzROraVm477QdyQ8t6afBs5E4">>], 5716883 => [<<"uOqsnEjVGQCbtrKI7QbHYxbbLUdCKC-792SgZr5KUKM">>], 5951779 => [<<"Ie-fxxzdBweiA0N1ZbzUqXhNI310uDUmaBc3ajlV6YY">>, <<"ycjvsn3A9cUMjnbDaSUpf1HRQd4duP9AL1YVwSjwuAQ">>], 6012107 => [<<"1VknqhhAXRQ6hzeZL-IMVBznTFCdiWcwlXhzpLKS8Zk">>], 6049618 => [<<"wntmnG9yRP9aoioRDILKkmSZqdemR-XDCIKJS-wpRYw">>, <<"WJTACYoRG89VIpjzsIZLIy93U7HoC4OJyLy6WAlqv-0">>], 6095199 => [<<"EayO1EsmOinnbi-NVa2V7cVraoI0TZ6xE5-sNU7fc94">>, <<"8CPVZq-zPdMQ2to1P91vl6XBXyL7sLH8-vNclnOCug8">>, <<"QyQL1TYdwmguUIBjTV-shWqrwS6AwxhZ6lf7Rx-vxH0">>, <<"vvPtX1U0EZS9PMsQBVk3mjD9yS6EHIt0FXdKf2dOELw">>], 6441166 => [<<"MklsZ_cDz470C40UGZUJoVfMeVA89-r7SHxuomBeCPM">>], 6441564 => [<<"Hs-Yj4ZE9ACfQIjzS8E-qvxSkQALsCIDHwcLEMnlz90">>], 6441962 => [<<"I4ifBnOF6OQFautfisGFTVIn2NsrvqrdnQ-O7JOMouE">>], 6808479 => [<<"7fat_nqzDJCTfJMqyEpOcavt1cZNM-tfSzASJd0wrHo">>, <<"YcTBCg3mLRFByb1cnjrq9DzEBnnOT9jQtfYEE34QZ1M">>], 6820776 => [<<"Re-7lkSGlYP4SFddz0rrXIF0r4MVYZuagjkVpEm79bY">>, <<"hXNDNwQ6zA7aHAqvfBj_az9CovV37bJywdgPdb_ooIA">>], 6906339 => [<<"qDEFXj8hSgOuuqWM52y6pbUX1cyp7bS4qItfctgtVx8">>], 6992421 => [<<"9hX3cS3Vjr6vAqJW3WtPN665NpLJegcxyaDZO2esElM">>], 7145431 => [<<"kVNsLH0kpIkFnBBGWxoIajVLSpvzmsKHpsATPAcR86Y">>], 7177405 => [<<"hB1Hj0mfuh_x3ijhqkw1s3wdCh8qdPz_IMs0MPraVCk">>], 7193892 => [<<"ivWTdg5M9XqjP-Iu4C97r3qZQhotJgfF17g__7EH7VM">>], 7308719 => [<<"lxtOUAEj-E1jb6J8uGCRlRgJDHJyFOu0O73jQHnAhpg">>, <<"ntnx85KcYZ_ZhR6dL2A_p8foCmStgD-69ODoOUdipiA">>, <<"yo8VtPVXWBpTqLbLL-ZeOmZTW2HTqTzsf9RPzgHM-bQ">>, <<"_gduN41u7Xxac_Gm3pBQI3icoKhOfiRV2TKhDnlyakU">>, <<"5iK4mPnFqGdUxpiZmGtTbj7xoSC2una7sjsbUyZkOmM">>, <<"iWUFDucATDE8gjbsL-9KpOIW9l8Ipsh1wliv4e05xhg">>], 7308855 => [<<"oiYeEvWqOkaHzCSunznZ09U_tuHqP1UyZkRrKYHgNBw">>], 7465544 => [<<"xK4fFG-PbnQx6EGmmj1A0JVWQ9Bg7q-FncaU7hHk9ds">>], 7470098 => [<<"eGYHUFl46laNa8v_WjdadvCkIErWqmx0hoia7PCSmSw">>], 7516201 => [<<"2dxNaIAvkAuL_N2qpTGSl7d7rU3Hu7d4l4IkYb9jgDU">>], 7737695 => [<<"jStDc8gP5lyHVSFIJiT_2RrXhT26GpAhNItDEje07_Y">>], 8693975 => [<<"_BN_07s59sawk5e9YcjHTX2qtYX9q7nCBYrlSWXoEsc">>], 8721209 => [<<"4tlIV1x4YRWtNMut11ox9SS-lWt3xIzcXnrBBbNxGYs">>], 8740888 => [<<"p4oyXU5C3T0ZycNhEwBZ0MbpV0j3voWV4mr__3fhOek">>], 8770977 => [<<"cTmKy32Fbmlybl-WtbyuVFNhO11Efr4e_rGbzwAkPbs">>, <<"zNae10gPNkFt5aRVaSL2eSgxZiRDG79B9oDIeYqyzDY">>], 9155991 => [<<"8qtH9T9jgYLHH-xi39w1OCNJykqew1O5qzrDkhAxN0U">>], 9489880 => [<<"vDtQzZ9jl6r7yzczhoKhvzekCQYx-qskwYdzQO92eWo">>], 9525187 => [<<"MQD8-8yIZwNC4A006TC1FVZSyCDHeIAN6YpDbTiX2RU">>], 9840597 => [<<"1QGjyW1AEFlrFAs6VtUcmwOVOEZJjxaBR_z61W9mftI">>], 9870742 => [<<"K0w8hOO1oCu4sQipWDQGyEFvn6kAXO-M93neMZmRoUc">>, <<"HoEZ6sK46bzTg4Jzrfy1kHFzkFQgI2UMm9pm0qJS3as">>], 10104161 => [<<"rTaanqa6Z5KxtBV4Kj2Fu2KKqAWlstE0JeUbZ3AuN3o">>], 10324897 => [<<"rAARxLc7tOdjUXEdNmSpOtsJIAw0XS229YHO1KOeUqI">>], 10337196 => [<<"CCH2h2MzMP7WMh0Xf3GYL7zZDbU7E4CZPJWngp1qmDc">>], 10411070 => [<<"xaB3eS6qbtKSrfFACMcYpgxWRtaJfT1kmOVpyaE45tI">>, <<"Hv0Q5APV6ARfDXDpxI-07R1YFSJAQpxTFh1Z8_nCk3U">>, <<"xCUsF5aatMdiiUAkGjg29_TiQGKqXpbzoMsB0yI-Dd8">>, <<"TNj-jk-KpKzz84xb1SRiKqyp8LNBnONxA9SIXs3XU7k">>], 10766840 => [<<"AoCuo7S7ewDIqhYheBX6AjShrbyTgIv6Fp1AwQgmGqg">>], 11188140 => [<<"sEw-yqeADuF0n_M6jTPLrOgH3coalIQHYPLrwM87nmo">>], 11386662 => [<<"tn3FQGSVFt_TE5nyQNpuf_gnHdaWF85hZg1iE5hPQSE">>], 11392406 => [<<"WwgngUwH7mXX15tdbfcjG_9gX2t8N8wbbfW2N34b3dA">>], 11392470 => [<<"3DSCNJ5H9Hpyy7auT9qG5vom9jHBrCgjs48w_R6iSJg">>], 11490753 => [<<"Zu9CSLWidXEnbSAQVuXGk62eMrVAGQb4qHmrtQrOQIQ">>], 12665298 => [<<"Yk_dta-f75GShvyUvXq132pohaNpiQgerfIKJA0vdCw">>], 13324206 => [<<"goAmthhGPdbYUqbAymyG_MjBUWVdS9OBm78mOoiITHo">>], 13489960 => [<<"RJzScDd1IYIVaVOMo8zV2sXaGE4ZtKxwO2ONPFK-ou4">>], 13543865 => [<<"ehTWq16I6ixhFOVkpTKi7s4jgYjNzGJ5CoJW3xjHDTE">>], 13577296 => [<<"D29DVKVYAe74sAj9NBQ351rI6SseWZ5MMsSedGtydS8">>, <<"q8aw85uHTIPxuXcv2Awts4JVVHEMCl7J-61WfnvbYuQ">>], 13583145 => [<<"4QcodvSlgZnuz5uWGmBARsGUJ5XaYORIO5jYM1dTucI">>], 14080582 => [<<"clMyhm_qgwUJq68xb8Yf9EEaN3F7jgdqgKnKgjVRom8">>], 14080694 => [<<"uoTzfoaN81h2_JyFkrvXTLFMnoSlWiuc9Yu1CmsFkH0">>], 14228100 => [<<"NBjbIMFIdd6jFhSZ20izEke9Ju8jMuvYl8O4bqe4wC4">>], 14262305 => [<<"vFP1U-4lk3GypDZFceLvRXjoadcB2FRKrcNQf9WjzpQ">>], 14714324 => [<<"HOMVwtocaJIRPdCeKgzorJZJq1jw_lVGz0pQ3POj7No">>], 14714340 => [<<"IqJf6iISeiEj3oof9491-jQX4drDZ92VoFuZqNmoixk">>], 15161433 => [<<"d8CQoDBSrekoGZXqTatc7Y5JkHtNviX1D3JD-fxFDmU">>], 15337962 => [<<"U2DZlRhnzhZrC7GsVNX0TxnXbHh03P3g-cU4fkHpiXA">>], 15355055 => [<<"PgxqlgdluUGnmGCal3dgB6PYCd5S7FtBpI0zKDc8-AY">>], 15355071 => [<<"XrtNbxWFUGlP-SYqQm8aYawQJU6H7CSyHpRZM1iLdKg">>], 15355087 => [<<"rcc-B4OWqf0dbVY7Eq6q3pRDHLUjJ8tix8UeLQ4D68w">>], 15355103 => [<<"0KMeq830vwvxUUM7RLCwE0ve4i0h_XHugbUTCkPNH-M">>], 15359953 => [<<"IeEkQUBq3aE2CSbCF2Bk126lLaLZEYjUPJ_IO601tZg">>], 15365216 => [<<"7M4KyVB4Wr-Le3Knb7JExgnsXTtG7718JIlhVBNstlE">>], 15366225 => [<<"A5oMEDa7ZEm1kjPlXpwjuZd40rqP6eo3GobNGQY4HlY">>], 15435544 => [<<"R5utplMYRQsJwA9Y63cL3Na4mXtYzE4gWG6g6zwgEQE">>], 15762143 => [<<"lF7NSIz6CNf8WsMNQl8It8HbJem3MAllokozblLdU5A">>], 15864737 => [<<"sMF6pWIkJFygBbR2IS10liEsjsLAMDja_E9_yUvUgeI">>], 15905630 => [<<"D_3jwPKLfcTpWcrDV1Q7k3D4sMtyfw7vd45D2C9pUNA">>], 15915756 => [<<"VL10zUkfmLz5eRxQsZi0G5wsfo8mvyN3p82updP17D4">>], 16002235 => [<<"B_F4zIV1I5DXM-lR-Ko1tVUTTSmLCOYR7PoY8V8wFas">>], 16004299 => [<<"FIrCkHY8jVkXcIkWYbMpuQSRYxavkOQ3wtUZPwMS1hM">>], 16017728 => [<<"bhEMgsj4Yf5tdCDlwK9KpHmsgVLAsBDPOLtYeUDLw0M">>], 16106226 => [<<"oNZMr_dB-L40nSUj6Fc19-FGteHQu7ZaRZu9_mgM1BI">>], 16114723 => [<<"TMjINkrJIS3kbGu8bmcVt_34TaFN8lINFQPR_YGzHss">>, <<"ks0ODNqrNY4CCDxJcrgRY324WykCeTiSH4Tmdi30I2E">>], 16119806 => [<<"rY4cJeAtYkg3bnTdqk4Vb0ojEcfS76L4B-iqyvQZ2VA">>], 16169319 => [<<"ldoaD2NbG9VRhLOXddM1ypoAU3W5gR_zabUWZa4r6lM">>], 16197187 => [<<"hPnpcoVcfRdkyUyhYSFNhsEcz7nQU0UU-fPSiRalDvw">>], 16197716 => [<<"JeP9HaxmjN-TcbCkhKDIQejkGdKTlOgp68O5cy_2GRc">>], 16199303 => [<<"JNCYRy7XYR_20vvXEAwpT43ovKB23np9yE9cqQfsIJk">>, <<"U4o0STLxwOEf42F4DF22ooOoA5Ykdp5j_D1io-4w1lc">>], 16376488 => [<<"jOFeroI0Oz4TWcOx8mgv4iOZLv6ncbRXFRtJfqS4Pq0">>, <<"o9ArU5IxydvpJo2iiPI-p4EGBwlpBlyFIfbnz8Qrg6c">>], 16455497 => [<<"y9wJkLq6Q0hKSDD67ilFqtMMatw9qpsKM9W2uy2Rfjc">>], 16560237 => [<<"DlRct3GdPx7oYi3MSdmv16CgGWqhLJjbrKcIfU0E48I">>], 16985772 => [<<"MGDpPk3LsexVpFBF43-FIIvc0vyeEDroYcIONJ6abd0">>], 17081857 => [<<"fAnOUj-jmlzPMtIN90ZvowG9VUmBtD36MZ8-tRP1Ut4">>], 17251267 => [<<"bcbIZq2gy8ivQiUlEch7tjNoCcUMTTLhInMlj9P2P88">>, <<"hxyn3yZ7-LCgKqfkCljyM7Hq7HJnmPnEKaXoybXJjHo">>], 17334749 => [<<"hQvPcHPcBhyxv7GPx-E3bZWiNBhnCpFIDwWa3XBcYEU">>], 17335147 => [<<"BFfNP1eCeYIkLiWWAVvHNLzk1N2pxkOChFzQbdv1IiA">>], 17416549 => [<<"Cdcx7-UZJN324I9L47rrph9dIVy8RwfJa9mY7cJp9gk">>, <<"Hiu5cti9FefwcvT6xRCIoADUMkuDEm_6pZo92CK3fiw">>], 17416947 => [<<"oO7raEVlJC6KhfK-UbNuppzbYPGdKWbh1e6rOymd_-o">>], 17455331 => [<<"ojgJyXT8qwRXj1hOVx2gbeJDT0xEOIye0o9EbfU2LRM">>], 17670889 => [<<"JDG-HBsrHGDodot2clC3nNkRKV5cvuhRWZjCwVFHG_Q">>], 17750387 => [<<"G6JD1n-FXMSyTSryo0HoX7L3i7e4KEFK_ekDMEn9Bcg">>], 17785886 => [<<"NixeAD5Y_8sQfcrMBWkODQuoXgJouUBmQmQzwTzlaKU">>], 18017285 => [<<"P5KQo3QSWLzTLWkq3wgJlii11CEUSKMG_O2NMN6y_8c">>], 18199428 => [<<"zavm_CqSq0KuWfc-E0JccEyrrQzjigxt7yuW1ceYjE0">>], 18248736 => [<<"OGA55Jyg2c-Jhkx5zDNyiDvbFZiRXF0S_JESMhWAWcs">>], 18834003 => [<<"kLP-8ILxdLSAQsrC6IwvfqQL6Loq2Q6lqOzwrnb6QoE">>], 19175334 => [<<"1fzKf0Ygc-z3ejpZ1ZLOiNBYDRzViGRdPLtUqRS1nKY">>], 19774875 => [<<"fr3nkF8AHXTcq9bT_b7x2X7Mun2A--Ssb7eyoKgQEwI">>], 20029848 => [<<"_KI9ocPARF5JjaDPIbtpqw2hj_qRonw-AERjWOs5ZYM">>], 20076163 => [<<"n1GVITzrvCF95Vz7l6hH7fdYzebDDAJav5z4-9C7lB4">>], 20250499 => [<<"p0MVPvnv_lkWwfhSuSCgQ3NUj83shBffAx1NKPn4oy8">>], 20424523 => [<<"CQv9OVOCzntq2DRqNJ9j_WnWPcsniyGRXpt4i_a8Iy0">>], 20599699 => [<<"35wYULjhQBiTFh9u-PJz6ki0v7Zi1whk_AhowUt99Ac">>] }. %% @doc Return the configured snapshot directory. Fall back to %% DEFAULT_SNAPSHOT_DIR ("localnet_snapshot") when start_from_state is not set. snapshot_dir(Config) -> case Config#config.start_from_state of not_set -> ?DEFAULT_SNAPSHOT_DIR; Dir -> Dir end. %% @doc Generate a snapshot directory name encoding both the mainnet starting %% height and the localnet ending height, e.g. "localnet_snapshot_1500000_1500050". snapshot_dir_name(MainnetStartHeight, LocalnetEndHeight) -> lists:flatten( io_lib:format("localnet_snapshot_~B_~B", [MainnetStartHeight, LocalnetEndHeight]) ). %% @doc Open the read-only "start-from-state" databases for the given snapshot %% directory. Return {ok, CloseFun} on success, where CloseFun/0 closes them. open_snapshot_databases(SnapshotDir) -> case ar_storage:open_start_from_state_databases(SnapshotDir) of ok -> {ok, fun ar_storage:close_start_from_state_databases/0}; {error, _} = Error -> Error end. %% @doc Create a new snapshot directory. create_snapshot(SnapshotDir, DataDir, NewSnapshotDir) -> case ensure_snapshot_dir(NewSnapshotDir) of ok -> case maybe_backfill_snapshot_data(SnapshotDir) of ok -> ok = copy_from_dir(DataDir, SnapshotDir, NewSnapshotDir, "wallets"), ok = copy_from_dir(DataDir, SnapshotDir, NewSnapshotDir, "ar_tx_blacklist"), ok = copy_from_dir(DataDir, SnapshotDir, NewSnapshotDir, "data_sync_state"), ok = copy_from_dir(DataDir, SnapshotDir, NewSnapshotDir, "header_sync_state"), ok = copy_from_dir(DataDir, SnapshotDir, NewSnapshotDir, "mempool"), ok = copy_from_dir(DataDir, SnapshotDir, NewSnapshotDir, "peers"), ok = copy_required_dir(SnapshotDir, NewSnapshotDir, "seed_txs"), case create_snapshot_rocksdb(NewSnapshotDir) of ok -> io:format("Snapshot created: ~s~n", [NewSnapshotDir]), {ok, NewSnapshotDir}; {error, _} = Error -> Error end; {error, _} = Error -> Error end; {error, _} = Error -> Error end. %% @doc Open the original snapshot's databases, copy any data that is missing %% from the node databases, then close the snapshot dbs. maybe_backfill_snapshot_data(SnapshotDir) -> case open_snapshot_databases(SnapshotDir) of {ok, CloseSnapshotDbs} -> Result = store_snapshot_data(SnapshotDir), CloseSnapshotDbs(), Result; {error, _} = Error -> Error end. %% @doc Create the RocksDB databases for a new snapshot: open fresh databases, %% populate them with the block index, recent blocks, tx headers, block time history, %% and account tree, then verify the result. create_snapshot_rocksdb(NewSnapshotDir) -> case ar_node:get_block_index() of [] -> {error, empty_block_index}; BI -> Height = length(BI) - 1, SearchDepth = min(Height, ?LOCALNET_START_FROM_STATE_SEARCH_DEPTH), case open_snapshot_dbs(NewSnapshotDir) of {ok, SnapshotDbs} -> Result = create_snapshot_rocksdb2( BI, Height, SearchDepth, SnapshotDbs ), close_snapshot_dbs(SnapshotDbs), Result; {error, _} = Error -> Error end end. create_snapshot_rocksdb2(BI, Height, SearchDepth, SnapshotDbs) -> case write_block_index_snapshot(BI, maps:get(block_index, SnapshotDbs)) of ok -> create_snapshot_rocksdb3( BI, Height, SearchDepth, SnapshotDbs ); {error, _} = Error -> Error end. create_snapshot_rocksdb3(BI, Height, SearchDepth, SnapshotDbs) -> case read_recent_blocks_local(BI, SearchDepth) of not_found -> {error, block_headers_not_found}; {Skipped, Blocks} -> io:format("Snapshot: recent blocks ~B (skipped: ~B)~n", [length(Blocks), Skipped]), BI2 = lists:nthtail(Skipped, BI), Height2 = Height - Skipped, RewardHistoryBI = ar_rewards:interim_reward_history_bi(Height2, BI2), BlockTimeHistoryBI = block_time_history_bi(BI2), case store_snapshot_blocks_in_snapshot(Blocks, SnapshotDbs) of {ok, TxIds} -> create_snapshot_rocksdb4( BI, Blocks, RewardHistoryBI, BlockTimeHistoryBI, SnapshotDbs, TxIds, SearchDepth ); {error, _} = Error -> Error end end. create_snapshot_rocksdb4(BI, Blocks, RewardHistoryBI, BlockTimeHistoryBI, SnapshotDbs, TxIds, SearchDepth) -> case store_tx_headers(TxIds, maps:get(tx, SnapshotDbs)) of ok -> case copy_history_entries( RewardHistoryBI, reward_history_db, maps:get(reward_history, SnapshotDbs), reward_history ) of ok -> case copy_history_entries( BlockTimeHistoryBI, block_time_history_db, maps:get(block_time_history, SnapshotDbs), block_time_history ) of ok -> case store_latest_wallet_list_from_blocks(Blocks, maps:get(account_tree, SnapshotDbs), SearchDepth) of ok -> verify_snapshot_rocksdb(BI, Blocks, maps:get(block_index, SnapshotDbs), maps:get(block, SnapshotDbs), maps:get(account_tree, SnapshotDbs)); {error, _} = Error -> io:format("Snapshot: error storing wallet list: ~p~n", [Error]), Error end; {error, _} = Error -> io:format("Snapshot: error copying block time history: ~p~n", [Error]), Error end; {error, _} = Error -> io:format("Snapshot: error copying reward history: ~p~n", [Error]), Error end; {error, _} = Error -> io:format("Snapshot: error storing tx headers: ~p~n", [Error]), Error end. %% @doc Open a fresh set of RocksDB databases (tx_confirmation, tx, block, %% reward_history, block_time_history, block_index, account_tree) under %% NewSnapshotDir for writing. Return {ok, DbMap} where DbMap contains %% named handles keyed by atom. We cannot use ar_storage:open_databases/0 %% since it always targets the data_dir and would collide with the %% running node's DB names. open_snapshot_dbs(NewSnapshotDir) -> RocksDir = filename:join([NewSnapshotDir, ?ROCKS_DB_DIR]), Dbs = [ {tx_confirmation, snapshot_tx_confirmation_db, "ar_storage_tx_confirmation_db"}, {tx, snapshot_tx_db, "ar_storage_tx_db"}, {block, snapshot_block_db, "ar_storage_block_db"}, {reward_history, snapshot_reward_history_db, "reward_history_db"}, {block_time_history, snapshot_block_time_history_db, "block_time_history_db"}, {block_index, snapshot_block_index_db, "block_index_db"}, {account_tree, snapshot_account_tree_db, "account_tree_db"} ], open_snapshot_write_dbs(Dbs, RocksDir, #{}, []). %% @doc Recursively open each RocksDB database in the list. On failure, close %% any already-opened databases before returning the error. open_snapshot_write_dbs([], _RocksDir, DbMap, Opened) -> {ok, DbMap#{ opened => Opened }}; open_snapshot_write_dbs([{Key, Name, DirName} | Rest], RocksDir, DbMap, Opened) -> Path = filename:join([RocksDir, DirName]), case ar_kv:open(#{ path => Path, name => Name }) of ok -> open_snapshot_write_dbs(Rest, RocksDir, DbMap#{ Key => Name }, [Name | Opened]); {error, _} = Error -> close_snapshot_dbs(DbMap#{ opened => Opened }), Error end. %% @doc Close all RocksDB databases that were opened for a snapshot. close_snapshot_dbs(SnapshotDbs) -> Opened = maps:get(opened, SnapshotDbs, []), lists:foreach(fun ar_kv:close/1, Opened). %% @doc Extract the portion of the block index needed for the block time history: %% history_length + consensus_window_size entries from the tip. block_time_history_bi(BI) -> lists:sublist(BI, ar_block_time_history:history_length() + ar_block:get_consensus_window_size()). %% @doc Serialize the full block index into a snapshot RocksDB database, keyed %% by height. Each entry stores {H, WeaveSize, TXRoot, PrevH} so the chain %% linkage can be validated on load. write_block_index_snapshot(BI, SnapshotBlockIndexDb) -> write_block_index_snapshot2(0, <<>>, lists:reverse(BI), SnapshotBlockIndexDb). write_block_index_snapshot2(_Height, _PrevH, [], _SnapshotBlockIndexDb) -> ok; write_block_index_snapshot2(Height, PrevH, [{H, WeaveSize, TXRoot} | BI], SnapshotBlockIndexDb) -> Bin = term_to_binary({H, WeaveSize, TXRoot, PrevH}), case ar_kv:put(SnapshotBlockIndexDb, << Height:256 >>, Bin) of ok -> write_block_index_snapshot2(Height + 1, H, BI, SnapshotBlockIndexDb); Error -> Error end. %% @doc Store blocks and their tx confirmation entries into the snapshot's %% RocksDB databases (block_db and tx_confirmation_db). store_snapshot_blocks_in_snapshot(Blocks, SnapshotDbs) -> BlockDb = maps:get(block, SnapshotDbs), TxConfirmationDb = maps:get(tx_confirmation, SnapshotDbs), store_snapshot_blocks_with_dbs(Blocks, BlockDb, TxConfirmationDb, "Snapshot"). %% @doc Serialize a single block (replacing full tx records with just their IDs) %% and write it to the given block database, keyed by indep_hash. store_block_snapshot(B, SnapshotBlockDb) -> TxIds = lists:map(fun tx_id/1, B#block.txs), BlockBin = ar_serialize:block_to_binary(B#block{ txs = TxIds }), ar_kv:put(SnapshotBlockDb, B#block.indep_hash, BlockBin). %% @doc Extract the transaction ID from either a #tx{} record or a raw binary. tx_id(#tx{ id = TXID }) -> TXID; tx_id(TXID) -> TXID. %% @doc Copy transaction confirmation entries from the tx_confirmation_db %% to the snapshot database. If an entry is missing from the storage, build %% one from the given Height and BlockHash. copy_tx_confirmations([], _Height, _BlockHash, _SnapshotTxConfDb) -> ok; copy_tx_confirmations([TXID | Rest], Height, BlockHash, SnapshotTxConfDb) -> case tx_id(TXID) of TXID2 when is_binary(TXID2) -> case ar_kv:get(tx_confirmation_db, TXID2) of {ok, Bin} -> case ar_kv:put(SnapshotTxConfDb, TXID2, Bin) of ok -> copy_tx_confirmations(Rest, Height, BlockHash, SnapshotTxConfDb); Error -> Error end; not_found -> case ar_kv:put(SnapshotTxConfDb, TXID2, term_to_binary({Height, BlockHash})) of ok -> copy_tx_confirmations(Rest, Height, BlockHash, SnapshotTxConfDb); Error -> Error end; {error, _} = Error -> Error end; _ -> copy_tx_confirmations(Rest, Height, BlockHash, SnapshotTxConfDb) end. %% @doc Read transactions from the node and write their headers (stripping %% data from v2 format) to the given snapshot tx database. store_tx_headers(TxIds, SnapshotTxDb) -> lists:foldl( fun(TXID, Acc) -> case Acc of ok -> case read_tx_local(TXID) of #tx{} = TX -> store_tx_header_snapshot(TX, SnapshotTxDb); unavailable -> {error, {tx_unavailable, tx_id(TXID)}}; Error -> {error, {tx_unavailable, tx_id(TXID), Error}} end; {error, _} = Error -> Error end end, ok, TxIds ). %% @doc Write a single tx header to a database. For v2 transactions, strip the %% data field (it's stored separately as chunks); v1 transactions keep their data %% inline. store_tx_header_snapshot(TX, SnapshotTxDb) -> TX2 = case TX#tx.format of 1 -> TX; _ -> TX#tx{ data = <<>> } end, ar_kv:put(SnapshotTxDb, TX2#tx.id, ar_serialize:tx_to_binary(TX2)). %% @doc Copy history entries (reward_history or block_time_history) from SourceDb %% to DestDb for each block hash in HistoryBI. Stops on the first error. copy_history_entries(HistoryBI, SourceDb, DestDb, Label) -> lists:foldl( fun({BH, _, _}, Acc) -> case Acc of ok -> case ar_kv:get(SourceDb, BH) of {ok, Bin} -> ar_kv:put(DestDb, BH, Bin); not_found -> {error, {Label, not_found, BH}}; {error, Reason} -> {error, {Label, Reason, BH}} end; {error, _} = Error -> Error end end, ok, HistoryBI ). %% @doc Find the most recent readable wallet tree from the block list, verify %% its root hash matches the block's wallet_list field, then write the account %% tree nodes to the snapshot account_tree database. store_latest_wallet_list_from_blocks(Blocks, SnapshotAccountTreeDb, SearchDepth) -> case find_wallet_tree_with_search(Blocks, SearchDepth, fun ar_storage:read_wallet_list/1) of {ok, {B, Tree}} -> {RootHash, _UpdatedTree, UpdateMap} = ar_block:hash_wallet_list(Tree), case RootHash == B#block.wallet_list of true -> store_account_tree_update_snapshot( B#block.height, RootHash, UpdateMap, SnapshotAccountTreeDb ); false -> {error, {wallet_list_root_mismatch, RootHash, B#block.wallet_list}} end; not_found -> {error, wallet_list_not_found} end. %% @doc Search through the block list to find one whose account tree can be %% successfully read. Start at the consensus-window-offset block and walks %% backward, skipping up to SearchDepth blocks. Return {ok, {Block, Tree}} %% or not_found. find_wallet_tree_with_search([], _SearchDepth, _ReadWalletFun) -> not_found; find_wallet_tree_with_search(Blocks, SearchDepth, ReadWalletFun) -> find_wallet_tree_with_search(Blocks, SearchDepth, 0, ReadWalletFun). find_wallet_tree_with_search(_Blocks, Skipped, Skipped, _ReadWalletFun) -> not_found; find_wallet_tree_with_search(Blocks, SearchDepth, Skipped, ReadWalletFun) -> {IsLast, B} = case length(Blocks) >= ar_block:get_consensus_window_size() of true -> {false, lists:nth(ar_block:get_consensus_window_size(), Blocks)}; false -> {true, lists:last(Blocks)} end, case ReadWalletFun(B#block.wallet_list) of {ok, Tree} -> {ok, {B, Tree}}; _ -> case IsLast of true -> not_found; false -> find_wallet_tree_with_search(tl(Blocks), SearchDepth, Skipped + 1, ReadWalletFun) end end. %% @doc Write account tree nodes from the update map to the snapshot database. %% Each key is {Hash, Prefix} and the value is the tree node data. Existing %% entries are not overwritten. store_account_tree_update_snapshot(_Height, _RootHash, Map, SnapshotAccountTreeDb) -> maps:fold( fun({H, Prefix}, Value, Acc) -> case Acc of ok -> Prefix2 = case Prefix of root -> <<>>; _ -> Prefix end, DBKey = << H/binary, Prefix2/binary >>, case ar_kv:get(SnapshotAccountTreeDb, DBKey) of not_found -> ar_kv:put(SnapshotAccountTreeDb, DBKey, term_to_binary(Value)); {ok, _} -> ok; {error, Reason} -> {error, {account_tree_read_failed, Reason}} end; {error, _} = Error -> Error end end, ok, Map ). %% @doc Validate a completed snapshot by checking: (1) the block index length %% matches, (2) all recent blocks exist in the snapshot block db, and (3) at %% least one account tree root is present in the account tree db. verify_snapshot_rocksdb(BI, Blocks, SnapshotBlockIndexDb, SnapshotBlockDb, SnapshotAccountTreeDb) -> Height = length(BI) - 1, case read_block_index_from_db(SnapshotBlockIndexDb) of not_found -> {error, snapshot_block_index_not_found}; SnapshotBI -> case length(SnapshotBI) == length(BI) of true -> case verify_recent_blocks_from_blocks(Blocks, SnapshotBlockDb) of ok -> case validate_wallet_root_from_blocks(Blocks, SnapshotAccountTreeDb) of ok -> ok; {error, _} = Error -> Error end; {error, _} = Error -> Error end; false -> {error, {snapshot_block_index_length_mismatch, Height}} end end. %% @doc Read the complete block index from a RocksDB database and return it as %% a list of {H, WeaveSize, TXRoot} tuples (newest-first). Validates the PrevH %% chain linkage during reconstruction. read_block_index_from_db(DbName) -> case ar_kv:get_prev(DbName, <<"a">>) of none -> not_found; {ok, << Height:256 >>, _V} -> {ok, Map} = ar_kv:get_range(DbName, << 0:256 >>, << Height:256 >>), read_block_index_from_map(Map, 0, Height, <<>>, []) end. read_block_index_from_map(_Map, Height, End, _PrevH, BI) when Height > End -> BI; read_block_index_from_map(Map, Height, End, PrevH, BI) -> V = maps:get(<< Height:256 >>, Map, not_found), case V of not_found -> not_found; _ -> case binary_to_term(V) of {H, WeaveSize, TXRoot, PrevH} -> read_block_index_from_map(Map, Height + 1, End, H, [{H, WeaveSize, TXRoot} | BI]); {_, _, _, _} -> not_found end end. %% @doc Verify that every block in the list exists in the snapshot block database. verify_recent_blocks_from_blocks([], _SnapshotBlockDb) -> ok; verify_recent_blocks_from_blocks([B | Rest], SnapshotBlockDb) -> BH = B#block.indep_hash, case ar_kv:get(SnapshotBlockDb, BH) of {ok, _} -> verify_recent_blocks_from_blocks(Rest, SnapshotBlockDb); not_found -> {error, {snapshot_block_missing, BH}}; {error, _} = Error -> Error end. %% @doc Check that at least one block's wallet_list root hash is present in the %% snapshot account tree database. validate_wallet_root_from_blocks([], _SnapshotAccountTreeDb) -> {error, wallet_list_not_found}; validate_wallet_root_from_blocks([B | Rest], SnapshotAccountTreeDb) -> WalletList = B#block.wallet_list, case ar_kv:get_prev(SnapshotAccountTreeDb, << WalletList/binary >>) of none -> validate_wallet_root_from_blocks(Rest, SnapshotAccountTreeDb); {ok, _, _} -> ok end. %% @doc If TXID is already a #tx{} record, return it directly; otherwise read %% the transaction from local storage. read_tx_local(TXID) -> case TXID of #tx{} = TX -> TX; _ -> ar_storage:read_tx(TXID) end. %% @doc Read recent blocks from the node's local storage. read_recent_blocks_local(BI, SearchDepth) -> ar_node:read_recent_blocks(BI, SearchDepth, not_set). %% @doc Read recent blocks from a snapshot directory's databases. read_recent_blocks_from_snapshot(BI, SearchDepth, SnapshotDir) -> ar_node:read_recent_blocks(BI, SearchDepth, SnapshotDir). %% @doc Store blocks into the node's block_db and tx_confirmation_db. store_snapshot_blocks_from_list(Blocks) -> store_snapshot_blocks_with_dbs(Blocks, block_db, tx_confirmation_db, "Startup copy"). %% @doc Serialize and store a list of blocks and their tx %% confirmation entries into the given databases. Return {ok, TXIDList} with %% the deduplicated set of all tx IDs across the stored blocks. store_snapshot_blocks_with_dbs(Blocks, BlockDb, TxConfirmationDb, LogPrefix) -> case lists:foldl( fun(B, Acc) -> case Acc of {ok, TxIdSet} -> io:format("~s: block ~s height ~B~n", [LogPrefix, ar_util:encode(B#block.indep_hash), B#block.height]), case store_block_snapshot(B, BlockDb) of ok -> TxIds = lists:map(fun tx_id/1, B#block.txs), case copy_tx_confirmations(TxIds, B#block.height, B#block.indep_hash, TxConfirmationDb) of ok -> {ok, sets:union(TxIdSet, sets:from_list(TxIds))}; {error, _} = Error -> io:format("~s: error confirmations for block ~s: ~p~n", [LogPrefix, ar_util:encode(B#block.indep_hash), Error]), Error end; {error, _} = Error -> io:format("~s: error storing block ~s: ~p~n", [LogPrefix, ar_util:encode(B#block.indep_hash), Error]), Error end; {error, _} = Error -> Error end end, {ok, sets:new()}, Blocks ) of {ok, TxIdSet} -> {ok, sets:to_list(TxIdSet)}; {error, _} = Error -> Error end. %% @doc Copy tx headers into tx_db during startup. First check if %% the entry already exists; if not, read the tx from the snapshot directory %% as a fallback. store_snapshot_tx_headers(TxIds, SnapshotDir) -> io:format("Startup copy: tx headers to copy ~B~n", [length(TxIds)]), lists:foldl( fun(TXID, Acc) -> case Acc of ok -> TXID2 = tx_id(TXID), case ar_kv:get(tx_db, TXID2) of {ok, _} -> ok; not_found -> case ar_storage:read_tx(TXID2, SnapshotDir) of #tx{} = TX -> store_tx_header_snapshot(TX, tx_db); unavailable -> io:format("Startup copy: missing tx header ~s~n", [ar_util:encode(TXID2)]), {error, {tx_unavailable, TXID2}}; Error -> io:format("Startup copy: error reading tx ~s: ~p~n", [ar_util:encode(TXID2), Error]), {error, {tx_unavailable, TXID2, Error}} end; {error, _} = Error -> io:format("Startup copy: error reading tx db ~s: ~p~n", [ar_util:encode(TXID2), Error]), {error, {tx_db_read_failed, TXID2, Error}} end; {error, _} = Error -> Error end end, ok, TxIds ). %% @doc Copy block time history and reward history entries from SourceDB to DestDB %% during startup. Unlike copy_history_entries/4, this skips entries that %% already exist in the destination. store_snapshot_history_entries(HistoryBI, SourceDb, DestDb, Label) -> lists:foldl( fun({BH, _, _}, Acc) -> case Acc of ok -> case ar_kv:get(DestDb, BH) of {ok, _} -> ok; not_found -> case ar_kv:get(SourceDb, BH) of {ok, Bin} -> ar_kv:put(DestDb, BH, Bin); not_found -> io:format("Startup copy: missing ~p entry ~s~n", [Label, ar_util:encode(BH)]), {error, {Label, not_found, BH}}; {error, Reason} -> io:format("Startup copy: error ~p entry ~s: ~p~n", [Label, ar_util:encode(BH), Reason]), {error, {Label, Reason, BH}} end; {error, _} = Error -> io:format("Startup copy: error reading ~p entry ~s: ~p~n", [Label, ar_util:encode(BH), Error]), {error, {Label, Error, BH}} end; {error, _} = Error -> Error end end, ok, HistoryBI ). %% @doc Read an account tree from the snapshot directory, %% verify the root hash, and store the account tree nodes in the local %% account_tree_db. store_snapshot_wallet_list(Blocks, SnapshotDir, SearchDepth) -> case find_wallet_tree_with_search(Blocks, SearchDepth, fun(WalletList) -> read_wallet_list_from_snapshot(WalletList, SnapshotDir) end) of {ok, {B, Tree}} -> {RootHash, _UpdatedTree, UpdateMap} = ar_block:hash_wallet_list(Tree), io:format("Startup copy: wallet list root ~s height ~B~n", [ar_util:encode(RootHash), B#block.height]), case RootHash == B#block.wallet_list of true -> store_account_tree_update_snapshot( B#block.height, RootHash, UpdateMap, account_tree_db ); false -> {error, {wallet_list_root_mismatch, RootHash, B#block.wallet_list}} end; not_found -> {error, wallet_list_not_found} end. %% @doc Try to read a wallet list from local storage first; fall back to the %% snapshot directory if not available locally. read_wallet_list_from_snapshot(WalletList, SnapshotDir) -> case ar_storage:read_wallet_list(WalletList) of {ok, Tree} -> {ok, Tree}; _ -> ar_storage:read_wallet_list(WalletList, SnapshotDir) end. %% @doc Create the snapshot output directory. Fail if the directory already %% exists (to avoid accidentally overwriting a previous snapshot). ensure_snapshot_dir(NewSnapshotDir) -> case file:read_file_info(NewSnapshotDir) of {ok, _} -> {error, {snapshot_dir_exists, NewSnapshotDir}}; {error, enoent} -> filelib:ensure_dir(filename:join(NewSnapshotDir, "placeholder") ++ "/"); {error, Reason} -> {error, {snapshot_dir_unavailable, Reason}} end. %% @doc Copy a named subdirectory or file into TargetDir. Tries PrimaryDir %% first, then FallbackDir. Silently succeeds if the name is not found in %% either location (the data may simply not exist yet). copy_from_dir(PrimaryDir, FallbackDir, TargetDir, Name) -> PrimaryPath = filename:join([PrimaryDir, Name]), FallbackPath = filename:join([FallbackDir, Name]), TargetPath = filename:join([TargetDir, Name]), case exists_on_disk(PrimaryPath) of true -> copy_any(PrimaryPath, TargetPath); false -> case exists_on_disk(FallbackPath) of true -> copy_any(FallbackPath, TargetPath); false -> ok end end. %% @doc Copy a named subdirectory or file from SourceDir to TargetDir. Unlike %% copy_from_dir/4, this returns an error if the source does not exist. copy_required_dir(SourceDir, TargetDir, Name) -> SourcePath = filename:join([SourceDir, Name]), TargetPath = filename:join([TargetDir, Name]), case exists_on_disk(SourcePath) of true -> copy_any(SourcePath, TargetPath); false -> {error, {snapshot_path_missing, SourcePath}} end. %% @doc Return true if the given path exists on disk (file, directory, or symlink). exists_on_disk(Path) -> case file:read_file_info(Path) of {ok, _} -> true; _ -> false end. %% @doc Copy a filesystem entry (regular file, directory, or symlink) from %% SourcePath to TargetPath, dispatching to the appropriate copy function. copy_any(SourcePath, TargetPath) -> case file:read_file_info(SourcePath) of {ok, #file_info{ type = directory }} -> copy_dir(SourcePath, TargetPath); {ok, #file_info{ type = regular }} -> filelib:ensure_dir(TargetPath), case file:copy(SourcePath, TargetPath) of {ok, _} -> ok; Error -> Error end; {ok, #file_info{ type = symlink }} -> copy_symlink(SourcePath, TargetPath); {ok, #file_info{ type = other }} -> {error, {unsupported_type, SourcePath}}; {error, Reason} -> {error, {read_file_info_failed, SourcePath, Reason}} end. %% @doc Recursively copy a directory and all its contents. copy_dir(SourceDir, TargetDir) -> case file:list_dir(SourceDir) of {ok, Entries} -> ok = filelib:ensure_dir(filename:join([TargetDir, "placeholder"]) ++ "/"), lists:foldl( fun(Entry, Acc) -> case Acc of ok -> SourcePath = filename:join([SourceDir, Entry]), TargetPath = filename:join([TargetDir, Entry]), copy_any(SourcePath, TargetPath); {error, _} = Error -> Error end end, ok, Entries ); {error, Reason} -> {error, {list_dir_failed, SourceDir, Reason}} end. %% @doc Copy a symbolic link by reading its target and creating a new symlink. copy_symlink(SourcePath, TargetPath) -> case file:read_link(SourcePath) of {ok, LinkTarget} -> filelib:ensure_dir(TargetPath), file:make_symlink(LinkTarget, TargetPath); {error, Reason} -> {error, {read_link_failed, SourcePath, Reason}} end. ================================================ FILE: apps/arweave/src/ar_localnet_mining_server.erl ================================================ -module(ar_localnet_mining_server). -behaviour(ar_mining_server_behaviour). -behaviour(gen_server). -export([start_link/0, start_mining/1, pause/0, is_paused/0, set_difficulty/1, set_merkle_rebase_threshold/1, set_height/1]). -export([init/1, handle_cast/2, handle_call/3, handle_info/2, terminate/2]). -include("ar.hrl"). -include("ar_consensus.hrl"). -include("ar_mining.hrl"). -include("ar_vdf.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -record(state, { paused = true, difficulty = {infinity, infinity}, merkle_rebase_threshold = infinity, height = 0 }). -define(RETRY_MINE_DELAY_MS, 1000). %%%=================================================================== %%% Public interface. %%%=================================================================== start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). start_mining(Args) -> gen_server:cast(?MODULE, {start_mining, Args}). pause() -> gen_server:cast(?MODULE, pause). is_paused() -> gen_server:call(?MODULE, is_paused). set_difficulty(DiffPair) -> gen_server:cast(?MODULE, {set_difficulty, DiffPair}). set_merkle_rebase_threshold(Threshold) -> gen_server:cast(?MODULE, {set_merkle_rebase_threshold, Threshold}). set_height(Height) -> gen_server:cast(?MODULE, {set_height, Height}). %%%=================================================================== %%% Generic server callbacks. %%%=================================================================== init([]) -> {ok, #state{}}. handle_call(is_paused, _From, State) -> {reply, State#state.paused, State}; handle_call(_Request, _From, State) -> {reply, ok, State}. handle_cast({start_mining, _Args}, #state{ paused = false } = State) -> {noreply, State}; handle_cast({start_mining, {DiffPair, RebaseThreshold, Height}}, State) -> gen_server:cast(self(), mine), {noreply, State#state{ paused = false, difficulty = DiffPair, merkle_rebase_threshold = RebaseThreshold, height = Height }}; handle_cast(pause, State) -> ar:console("Pausing localnet mining.~n"), {noreply, State#state{ paused = true }}; handle_cast({set_difficulty, DiffPair}, State) -> {noreply, State#state{ difficulty = DiffPair }}; handle_cast({set_merkle_rebase_threshold, Threshold}, State) -> {noreply, State#state{ merkle_rebase_threshold = Threshold }}; handle_cast({set_height, Height}, State) -> {noreply, State#state{ height = Height }}; handle_cast(mine, #state{ paused = true } = State) -> {noreply, State}; handle_cast(mine, State) -> case mine_block(State) of ok -> {noreply, State#state{ paused = true }}; error -> erlang:send_after(?RETRY_MINE_DELAY_MS, self(), retry_mine), {noreply, State} end; handle_cast(_Msg, State) -> {noreply, State}. handle_info(retry_mine, State) -> gen_server:cast(self(), mine), {noreply, State}; handle_info(_Info, State) -> {noreply, State}. terminate(_Reason, _State) -> ok. %%%=================================================================== %%% Internal functions. %%%=================================================================== mine_block(State) -> {ok, Config} = arweave_config:get_env(), MiningAddr = Config#config.mining_addr, StorageModules = Config#config.storage_modules, mine_block2(pick_random_storage_module(StorageModules), State, MiningAddr, StorageModules). mine_block2(error, _State, _MiningAddr, _StorageModules) -> ?LOG_ERROR([{event, failed_to_create_localnet_block}, {step, sample_storage_module}, {reason, all_storage_modules_empty}]), error; mine_block2({StoreID, Intervals}, State, MiningAddr, StorageModules) -> mine_block3(sample_chunk_with_proof(StoreID, Intervals, MiningAddr), State, MiningAddr, StorageModules). mine_block3({error, Error}, _State, _MiningAddr, _StorageModules) -> ?LOG_ERROR([{event, failed_to_create_localnet_block}, {step, sample_chunk_with_proof}, {reason, io_lib:format("~p", [Error])}]), error; mine_block3({RecallByte1, _Chunk1, PoA1}, State, MiningAddr, StorageModules) -> NoncesPerChunk = ar_block:get_nonces_per_chunk(?REPLICA_2_9_PACKING_DIFFICULTY), Nonce = rand:uniform(NoncesPerChunk) - 1, SubChunk1 = get_sub_chunk(PoA1#poa.chunk, Nonce, ?REPLICA_2_9_PACKING_DIFFICULTY), Stage1Data = #{ recall_byte1 => RecallByte1, poa1 => PoA1#poa{ chunk = SubChunk1 }, nonce => Nonce }, IsTwoChunk = rand:uniform(?POA1_DIFF_MULTIPLIER + 1) > 1, mine_block4(IsTwoChunk, Stage1Data, State, MiningAddr, StorageModules). mine_block4(false, Stage1Data, State, MiningAddr, _StorageModules) -> mine_block7(Stage1Data, one_chunk, State, MiningAddr); mine_block4(true, Stage1Data, State, MiningAddr, StorageModules) -> mine_block5(pick_random_storage_module(StorageModules), Stage1Data, State, MiningAddr, StorageModules). mine_block5(error, Stage1Data, State, MiningAddr, _StorageModules) -> mine_block7(Stage1Data, one_chunk, State, MiningAddr); mine_block5({StoreID2, Intervals2}, Stage1Data, State, MiningAddr, StorageModules) -> mine_block6(sample_chunk_with_proof(StoreID2, Intervals2, MiningAddr), Stage1Data, State, MiningAddr, StorageModules). mine_block6({error, _Error}, Stage1Data, State, MiningAddr, _StorageModules) -> mine_block7(Stage1Data, one_chunk, State, MiningAddr); mine_block6({RecallByte2, _Chunk2, PoA2}, Stage1Data, State, MiningAddr, _StorageModules) -> #{ nonce := Nonce } = Stage1Data, SubChunk2 = get_sub_chunk(PoA2#poa.chunk, Nonce, ?REPLICA_2_9_PACKING_DIFFICULTY), Stage2Data = #{ recall_byte2 => RecallByte2, poa2 => PoA2#poa{ chunk = SubChunk2 } }, mine_block7(Stage1Data, Stage2Data, State, MiningAddr). mine_block7(Stage1Data, Stage2Data, State, MiningAddr) -> [{_, TipNonceLimiterInfo}] = ets:lookup(node_state, nonce_limiter_info), PrevStepNumber = TipNonceLimiterInfo#nonce_limiter_info.global_step_number, SessionKey = ar_nonce_limiter:session_key(TipNonceLimiterInfo), case ar_nonce_limiter:get_session(SessionKey) of not_found -> ?LOG_ERROR([ {event, localnet_nonce_limiter_session_not_found}, {session_key, ar_nonce_limiter:encode_session_key(SessionKey)}, {prev_step_number, PrevStepNumber} ]), error; #vdf_session{} = Session -> mine_block7_with_session( Session, SessionKey, PrevStepNumber, TipNonceLimiterInfo, Stage1Data, Stage2Data, State, MiningAddr ) end. mine_block7_with_session( Session, SessionKey, PrevStepNumber, TipNonceLimiterInfo, Stage1Data, Stage2Data, State, MiningAddr ) -> {NextSeed, StartIntervalNumber, NextVDFDifficulty} = SessionKey, {StepNumber, Output, Seed, Checkpoints, Steps} = case Session#vdf_session.step_number == PrevStepNumber of true -> { PrevStepNumber, TipNonceLimiterInfo#nonce_limiter_info.output, TipNonceLimiterInfo#nonce_limiter_info.seed, TipNonceLimiterInfo#nonce_limiter_info.last_step_checkpoints, TipNonceLimiterInfo#nonce_limiter_info.steps }; false -> StepNumber0 = Session#vdf_session.step_number, { StepNumber0, hd(Session#vdf_session.steps), Session#vdf_session.seed, maps:get(StepNumber0, Session#vdf_session.step_checkpoints_map, []), Session#vdf_session.steps } end, #{ recall_byte1 := RecallByte1, poa1 := PoA1, nonce := Nonce } = Stage1Data, H0 = ar_block:compute_h0( Output, ar_node:get_partition_number(RecallByte1), Seed, MiningAddr, ?REPLICA_2_9_PACKING_DIFFICULTY ), {H1, _} = ar_block:compute_h1(H0, Nonce, PoA1#poa.chunk), {RecallByte2, PoA2, SolutionHash} = case Stage2Data of one_chunk -> {undefined, #poa{}, H1}; #{ recall_byte2 := RecallByte2_0, poa2 := PoA2_0 } -> {H2, _} = ar_block:compute_h2(H1, PoA2_0#poa.chunk, H0), {RecallByte2_0, PoA2_0, H2} end, Solution = #mining_solution{ mining_address = MiningAddr, merkle_rebase_threshold = State#state.merkle_rebase_threshold, next_seed = NextSeed, next_vdf_difficulty = NextVDFDifficulty, nonce = Nonce, nonce_limiter_output = Output, partition_number = ar_node:get_partition_number(RecallByte1), partition_upper_bound = ar_node:get_weave_size(), poa1 = PoA1, poa2 = PoA2, recall_byte1 = RecallByte1, recall_byte2 = RecallByte2, seed = Seed, solution_hash = SolutionHash, start_interval_number = StartIntervalNumber, step_number = StepNumber, packing_difficulty = ?REPLICA_2_9_PACKING_DIFFICULTY, replica_format = 1, last_step_checkpoints = Checkpoints, steps = Steps }, ar_node_worker:found_solution(miner, Solution, undefined, undefined), ok. pick_random_storage_module(StorageModules) -> ModulesWithData = lists:filtermap( fun(Module) -> StoreID = ar_storage_module:id(Module), Intervals = ar_sync_record:get(ar_data_sync, StoreID), case ar_intervals:is_empty(Intervals) of true -> false; false -> {true, {StoreID, Intervals}} end end, StorageModules ), case ModulesWithData of [] -> error; _ -> lists:nth(rand:uniform(length(ModulesWithData)), ModulesWithData) end. sample_chunk_with_proof(_StoreID, Intervals, MiningAddr) -> TotalSize = ar_intervals:sum(Intervals), RandomOffset = rand:uniform(TotalSize) - 1, List = ar_intervals:to_list(Intervals), AbsoluteOffset = find_offset_in_intervals(List, RandomOffset), RecallByte = (AbsoluteOffset div ?DATA_CHUNK_SIZE) * ?DATA_CHUNK_SIZE, Packing = {replica_2_9, MiningAddr}, Options = #{ pack => true, packing => Packing, origin => miner }, case ar_data_sync:get_chunk(RecallByte + 1, Options) of {ok, Proof} -> #{ chunk := PackedChunk, tx_path := TXPath, data_path := DataPath } = Proof, case maps:get(unpacked_chunk, Proof, not_found) of not_found -> #{ tx_root := TXRoot, absolute_end_offset := AbsoluteEndOffset, chunk_size := ChunkSize } = Proof, case ar_packing_server:unpack( Packing, AbsoluteEndOffset, TXRoot, PackedChunk, ChunkSize ) of {ok, UnpackedChunk} -> PaddedUnpackedChunk = ar_packing_server:pad_chunk(UnpackedChunk), {RecallByte, PackedChunk, #poa{ chunk = PackedChunk, unpacked_chunk = PaddedUnpackedChunk, data_path = DataPath, tx_path = TXPath }}; Error -> {error, Error} end; UnpackedChunk -> PaddedUnpackedChunk = ar_packing_server:pad_chunk(UnpackedChunk), {RecallByte, PackedChunk, #poa{ chunk = PackedChunk, unpacked_chunk = PaddedUnpackedChunk, data_path = DataPath, tx_path = TXPath }} end; Error -> {error, Error} end. find_offset_in_intervals([{End, Start} | Rest], Offset) -> Len = End - Start, case Offset < Len of true -> Start + Offset; false -> find_offset_in_intervals(Rest, Offset - Len) end. get_sub_chunk(Chunk, _Nonce, 0) when byte_size(Chunk) == ?DATA_CHUNK_SIZE -> Chunk; get_sub_chunk(Chunk, Nonce, _PackingDifficulty) -> SubChunkSize = ?COMPOSITE_PACKING_SUB_CHUNK_SIZE, SubChunkStartOffset = SubChunkSize * Nonce, binary:part(Chunk, SubChunkStartOffset, SubChunkSize). ================================================ FILE: apps/arweave/src/ar_localnet_mining_sup.erl ================================================ -module(ar_localnet_mining_sup). -behaviour(supervisor). -export([start_link/0]). -export([init/1]). -include_lib("arweave/include/ar_sup.hrl"). %%%=================================================================== %%% Public interface. %%%=================================================================== start_link() -> supervisor:start_link({local, ?MODULE}, ?MODULE, []). %% =================================================================== %% Supervisor callbacks. %% =================================================================== init([]) -> Children = [ ?CHILD(ar_localnet_mining_server, worker) ], {ok, {{one_for_one, 5, 10}, Children}}. ================================================ FILE: apps/arweave/src/ar_logger.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @copyright 2025 (c) Arweave %%% @author Arweave Team %%% @doc Arweave Logging Interface. %%% %%% This module is in charge of starting, stopping, enabling, %%% disabling logging Arweave handlers. %%% %%% == Logger Primary Configuration == %%% %%% Primary logger configuration is defined in `config/sys.config', %%% with the help of `logger_level' key. %%% %%% see: https://www.erlang.org/doc/apps/kernel/logger_chapter %%% %%% == Logger Default Configuration == %%% %%% The default configuration is used to log to the console, and it %%% should be not modified by default. To avoid modify this, this %%% value is defined outside of this module, in `config/sys.config' %%% via the help of `logger' key. Here the configuration: %%% %%% ``` %%% [{handler, default, logger_std_h, #{ %%% level => warning, %%% formatter => { %%% logger_formatter, #{ %%% legacy_header => false, %%% single_line => true, %%% chars_limit => 16256, %%% max_size => 8128, %%% depth => 256, %%% template => [time," [",level,"] ",mfa,":",line," ",msg,"\n"] %%% } %%% } %%% } %%% }]. %%% ''' %%% %%% see: https://www.erlang.org/doc/apps/kernel/logger_chapter %%% @end %%% @see logger %%% @see logger_handler %%% @TODO integrate with arweave_config. %%% @TODO create domain for different part of the code, but all %%% calls to logger inside arweave should be in the domain %%% [arweave]. %%% @TODO ensure primary logger configuration is set with the right %%% values (level => all). %%%=================================================================== -module(ar_logger). -compile(warnings_as_errors). -export([ init/1, is_started/1, handlers/0, start_handlers/0, start_handler/1, started_handlers/0, stop_handlers/0, stop_handler/1, gen_log/3 ]). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). %%-------------------------------------------------------------------- %% @doc legacy compatible interface. to be removed. %% @end %%-------------------------------------------------------------------- init(Config = #config{}) -> start_handler(default), start_handler(arweave_info), init_debug(Config). init_debug(#config{ debug = true }) -> start_handler(arweave_debug); init_debug(_) -> ok. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- template() -> [time," [",level,"] ",mfa,":",line," ",msg,"\n"]. %%-------------------------------------------------------------------- %% @doc wrapper around `logger:get_handler_config/1'. %% @see logger:get_handle_config/1 %% @end %%-------------------------------------------------------------------- is_started(Handler) -> case logger:get_handler_config(Handler) of {ok, _} -> true; _ -> false end. %%-------------------------------------------------------------------- %% @doc defined loggers. %% @end %%-------------------------------------------------------------------- handlers() -> #{ % log every info message. % This handler can be configured on demand, all message % greater or equal than info are being logged. arweave_info => #{ level => info, config => #{ type => file, file => logfile_path(#{ prefix => "arweave", level => info }), compress_on_rotate => arweave_config:get( [logging,compress_on_rotate], false ), max_no_files => arweave_config:get( [logging,sync_mode_qlen], 10 ), max_no_bytes => arweave_config:get( [logging,max_no_bytes], 51_418_800 ), modes => [raw, append], sync_mode_qlen => arweave_config:get( [logging,sync_mode_qlen], 10 ), drop_mode_qlen => arweave_config:get( [logging,drop_mode_qlen], 200 ), flush_qlen => arweave_config:get( [logging,flush_qlen], 1000 ), burst_limit_enable => arweave_config:get( [logging,burst_limit_enable], true ), burst_limit_max_count => arweave_config:get( [logging,burst_limit_max_count], 500 ), burst_limit_window_time => arweave_config:get( [logging,burst_limit_window_time], 1000 ), overload_kill_enable => arweave_config:get( [logging,overload_kill_enable], true ), overload_kill_qlen => arweave_config:get( [logging,overload_kill_qlen], 20_000 ), overload_kill_mem_size => arweave_config:get( [logging,overload_kill_mem_size], 3_000_000 ), overload_kill_restart_after => arweave_config:get( [logging,overload_kill_restart_after], 5000 ) }, formatter => { logger_formatter, #{ chars_limit => arweave_config:get( [logging,formatter,chars_limit], 16256 ), depth => arweave_config:get( [logging,formatter,depth], 256 ), legacy_header => false, max_size => arweave_config:get( [logging,formatter,max_size], 8128 ), single_line => true, template => arweave_config:get( [logging,formatter,template], template() ), time_offset => "Z" } }, filter_default => log, filters => [ {n_wildcard, {fun logger_filters:level/2, {stop, lt, info}}}, {n_http, {fun logger_filters:domain/2, {stop, sub, [arweave,http]}}} ] }, % log every debug message. % Only debug messages are being logged. arweave_debug => #{ level => debug, config => #{ type => file, file => logfile_path(#{ prefix => "arweave", level => debug }), compress_on_rotate => arweave_config:get( [logging,handlers,debug,compress_on_rotate], false ), max_no_files => arweave_config:get( [logging,handlers,debug,max_no_files], 10 ), max_no_bytes => arweave_config:get( [logging,handlers,debug,max_no_bytes], 51_418_800 ), modes => [raw, append], sync_mode_qlen => arweave_config:get( [logging,handlers,debug,sync_mode_qlen], 10 ), drop_mode_qlen => arweave_config:get( [logging,handlers,debug,drop_mode_qlen], 200 ), flush_qlen => arweave_config:get( [logging,handlers,debug,flush_qlen], 1000 ), burst_limit_enable => arweave_config:get( [logging,handlers,debug,burst_limit_enable], true ), burst_limit_max_count => arweave_config:get( [logging,handlers,debug,burst_limit_max_count], 500 ), burst_limit_window_time => arweave_config:get( [logging,handlers,debug,burst_limit_window_time], 1000 ), overload_kill_enable => arweave_config:get( [logging,handlers,debug,overload_kill_enable], true ), overload_kill_qlen => arweave_config:get( [logging,handlers,debug,overload_kill_qlen], 20_000 ), overload_kill_mem_size => arweave_config:get( [logging,handlers,debug,overload_kill_mem_size], 3_000_000 ), overload_kill_restart_after => arweave_config:get( [logging,handlers,debug,overload_kill_restart_after], 5000 ) }, formatter => { logger_formatter, #{ chars_limit => arweave_config:get( [logging,handlers,debug,formatter,chars_limit], 16256 ), depth => arweave_config:get( [logging,handlers,debug,formatter,depth], 256 ), legacy_header => false, max_size => arweave_config:get( [logging,handlers,debug,formatter,max_size], 8128 ), single_line => true, template => arweave_config:get( [logging,handlers,debug,formatter,template], template() ), time_offset => "Z" } }, filter_default => log, filters => [ {n_wildcard, {fun logger_filters:level/2, {stop, lt, debug}}}, {n_http, {fun logger_filters:domain/2, {stop, sub, [arweave,http]}}} ] }, % this handler will log only log message containing the domain % [arweave,http,api] in info level. arweave_http_api => #{ level => info, config => #{ type => file, file => logfile_path(#{ prefix => "arweave-http-api", level => debug }), compress_on_rotate => arweave_config:get( [logging,handlers,http,api,compress_on_rotate], false ), max_no_files => arweave_config:get( [logging,handlers,http,api,max_no_files], 10 ), max_no_bytes => arweave_config:get( [logging,handlers,http,api,max_no_bytes], 51_418_800 ), modes => [raw, append], sync_mode_qlen => arweave_config:get( [logging,handlers,http,api,sync_mode_qlen], 10 ), drop_mode_qlen => arweave_config:get( [logging,handlers,http,api,drop_mode_qlen], 200 ), flush_qlen => arweave_config:get( [logging,handlers,http,api,flush_qlen], 1000 ), burst_limit_enable => arweave_config:get( [logging,handlers,http,api,burst_limit_enable], true ), burst_limit_max_count => arweave_config:get( [logging,handlers,http,api,burst_limit_max_count], 500 ), burst_limit_window_time => arweave_config:get( [logging,handlers,http,api,burst_limit_window_time], 1000 ), overload_kill_enable => arweave_config:get( [logging,handlers,http,api,overload_kill_enable], true ), overload_kill_qlen => arweave_config:get( [logging,handlers,http,api,overload_kill_qlen], 20_000 ), overload_kill_mem_size => arweave_config:get( [logging,handlers,http,api,overload_kill_mem_size], 3_000_000 ), overload_kill_restart_after => arweave_config:get( [logging,handlers,http,api,overload_kill_restart_after], 5000 ) }, formatter => { logger_formatter, #{ legacy_header => false, single_line => true, chars_limit => arweave_config:get( [logging,handlers,http,api,formatter,chars_limit], 16256 ), max_size => arweave_config:get( [logging,handlers,http,api,formatter,max_size], 8128 ), depth => arweave_config:get( [logging,handlers,http,api,formatter,depth], 256 ), template => [ time, " ", "ip=", peer_ip, " ", "port=", peer_port, " ", "version=", version, " ", "method=", method, " ", "code=", code, " ", "path=", path, " ", "body_length=", body_length, " ", "duration=", duration, " ", "msg=", msg, "\n" ], time_offset => "Z" } }, filter_default => stop, filters => [ {info, {fun logger_filters:level/2, {stop, lt, info}}}, {http, {fun logger_filters:domain/2, {log, sub, [arweave,http,api]}}} ] } }. %%-------------------------------------------------------------------- %% @doc start all defined loggers. %% @end %%-------------------------------------------------------------------- start_handlers() -> Handlers = maps:keys(handlers()), [ start_handler(Handler) || Handler <- Handlers ]. %%-------------------------------------------------------------------- %% @doc start one defined logger. %% @end %%-------------------------------------------------------------------- start_handler(Handler) -> case maps:get(Handler, handlers(), undefined) of undefined -> {error, not_found}; Config -> start_handler(Handler, Config) end. start_handler(Handler, Config) -> case is_started(Handler) of true -> ok; false -> logger:add_handler(Handler, logger_std_h, Config) end. %%-------------------------------------------------------------------- %% @doc stop all loggers set. %% @end %%-------------------------------------------------------------------- stop_handlers() -> Handlers = maps:keys(handlers()), [ stop_handler(Handler) || Handler <- Handlers ]. %%-------------------------------------------------------------------- %% @doc stop logger. %% @end %%-------------------------------------------------------------------- stop_handler(Handler) -> logger:remove_handler(Handler). %%-------------------------------------------------------------------- %% @hidden %% @doc list started handlers. %% @end %%-------------------------------------------------------------------- started_handlers() -> HandlersIds = maps:keys(handlers()), #{ handlers := HandlersStarted } = logger:get_config(), [ Id || #{ id := Id } <- HandlersStarted, Id2 <- HandlersIds, Id =:= Id2 ]. %%-------------------------------------------------------------------- %% @hidden %% @doc function only used to write to logs during test. %% @end %%-------------------------------------------------------------------- gen_log(Format, FormatMsg, Meta) -> ?LOG_EMERGENCY(Format, FormatMsg, Meta), ?LOG_ALERT(Format, FormatMsg, Meta), ?LOG_CRITICAL(Format, FormatMsg, Meta), ?LOG_ERROR(Format, FormatMsg, Meta), ?LOG_WARNING(Format, FormatMsg, Meta), ?LOG_NOTICE(Format, FormatMsg, Meta), ?LOG_INFO(Format, FormatMsg, Meta), ?LOG_DEBUG(Format, FormatMsg, Meta). %%-------------------------------------------------------------------- %% @hidden %% @doc returns a log filename. %% @end %%-------------------------------------------------------------------- logfile_path(Opts) -> % TODO: if arweave_config is not set, even with a default % value set, this part of the code crashes. LogDir = arweave_config:get([logging,path], "./logs"), Prefix = maps:get(prefix, Opts), Level = maps:get(level, Opts), NodeName = erlang:node(), RawFilename = lists:join("-", [Prefix, NodeName, Level]), Filename = filename:flatten(RawFilename) ++ ".log", filename:flatten(filename:join(LogDir, Filename)). ================================================ FILE: apps/arweave/src/ar_mempool.erl ================================================ -module(ar_mempool). -include("ar.hrl"). -export([reset/0, load_from_disk/0, add_tx/2, drop_txs/1, drop_txs/3, get_map/0, get_all_txids/0, take_chunk/2, get_tx/1, is_known_tx/1, has_tx/1, get_priority_set/0, get_last_tx_map/0, get_origin_tx_map/0, get_propagation_queue/0, del_from_propagation_queue/2]). reset() -> ets:insert(node_state, [ {mempool_size, {0, 0}}, {tx_priority_set, gb_sets:new()}, {tx_propagation_queue, gb_sets:new()}, {last_tx_map, maps:new()}, {origin_tx_map, maps:new()}, {origin_spent_total_map, maps:new()}, {origin_spent_total_denomination, 0} ]). load_from_disk() -> case ar_storage:read_term(mempool) of {ok, {SerializedTXs, _MempoolSize}} -> TXs = maps:map(fun(_, {TX, St}) -> {deserialize_tx(TX), St} end, SerializedTXs), MaxDenomination = maps:fold( fun(_TXID, {TX, _Status}, Acc) -> max(Acc, TX#tx.denomination) end, 0, TXs ), {MempoolSize2, PrioritySet2, PropagationQueue2, LastTXMap2, OriginTXMap2, OriginSpentTotalMap2} = maps:fold( fun(TXID, {TX, Status}, {MempoolSize, PrioritySet, PropagationQueue, LastTXMap, OriginTXMap, OriginSpentTotalMap}) -> Metadata = {_, _, Timestamp} = init_tx_metadata(TX, Status), ets:insert(node_state, {{tx, TXID}, Metadata}), ets:insert(tx_prefixes, {ar_node_worker:tx_id_prefix(TXID), TXID}), Q = case Status of ready_for_mining -> PropagationQueue; _ -> add_to_propagation_queue(PropagationQueue, TX, Timestamp) end, { increase_mempool_size(MempoolSize, TX), add_to_priority_set(PrioritySet, TX, Status, Timestamp), Q, add_to_last_tx_map(LastTXMap, TX), add_to_origin_tx_map(OriginTXMap, TX), add_to_origin_spent_total_map(OriginSpentTotalMap, TX, MaxDenomination) } end, { {0, 0}, gb_sets:new(), gb_sets:new(), maps:new(), maps:new(), maps:new() }, TXs ), ets:insert(node_state, [ {mempool_size, MempoolSize2}, {tx_priority_set, PrioritySet2}, {tx_propagation_queue, PropagationQueue2}, {last_tx_map, LastTXMap2}, {origin_tx_map, OriginTXMap2}, {origin_spent_total_map, OriginSpentTotalMap2}, {origin_spent_total_denomination, MaxDenomination} ]); not_found -> reset(); {error, Error} -> ?LOG_ERROR([{event, failed_to_load_mempool}, {reason, Error}]), reset() end. add_tx(TX, Status) -> prometheus_histogram:observe_duration(ar_mempool_add_tx_duration_milliseconds, fun() -> add_tx2(TX, Status) end). add_tx2(#tx{ id = TXID } = TX, Status) -> Denomination = max(get_current_denomination(), get_origin_spent_total_denomination()), CheckRequiresUpdate = case get_tx_metadata(TXID) of not_found -> {_, _, Timestamp} = init_tx_metadata(TX, Status), ets:insert(tx_prefixes, {ar_node_worker:tx_id_prefix(TXID), TXID}), OriginSpentTotalMap = get_redenominated_origin_spent_total_map(Denomination), { {TX, Status, Timestamp}, increase_mempool_size(get_mempool_size(), TX), add_to_priority_set(get_priority_set(), TX, Status, Timestamp), add_to_propagation_queue(get_propagation_queue(), TX, Timestamp), add_to_last_tx_map(get_last_tx_map(), TX), add_to_origin_tx_map(get_origin_tx_map(), TX), add_to_origin_spent_total_map(OriginSpentTotalMap, TX, Denomination) }; {KnownTX, PrevStatus, Timestamp} -> {TX2, IsDataUpdatedRequired} = assert_same_tx(TX, KnownTX), case {Status == PrevStatus, IsDataUpdatedRequired} of {true, false} -> does_not_require_update; _ -> { {TX2, Status, Timestamp}, get_mempool_size(), add_to_priority_set(get_priority_set(), TX2, PrevStatus, Status, Timestamp), get_propagation_queue(), get_last_tx_map(), get_origin_tx_map(), get_redenominated_origin_spent_total_map(Denomination) } end end, case CheckRequiresUpdate of does_not_require_update -> ok; {Metadata, MempoolSize, PrioritySet, PropagationQueue, LastTXMap, OriginTXMap, OriginSpentTotalMap2} -> %% Insert all data at the same time to ensure atomicity ets:insert(node_state, [ {{tx, TXID}, Metadata}, {mempool_size, MempoolSize}, {tx_priority_set, PrioritySet}, {tx_propagation_queue, PropagationQueue}, {last_tx_map, LastTXMap}, {origin_tx_map, OriginTXMap}, {origin_spent_total_map, OriginSpentTotalMap2}, {origin_spent_total_denomination, Denomination} ]), case ar_node:is_joined() of true -> %% 1. Drop unconfirmable transactions: %% - those with clashing last_tx %% - those which overspend an account %% 2. If the mempool is too large, drop low priority transactions %% until the mempool is small enough %% To limit revalidation work, all of these checks assume %% every TX in the mempool has previously been validated. drop_txs(find_clashing_txs(TX)), drop_txs(find_overspent_txs(TX, Denomination)), drop_txs(find_low_priority_txs()); false -> noop end end. assert_same_tx(#tx{ format = 1 } = TX, #tx{ format = 1 } = TX) -> {TX, false}; assert_same_tx(#tx{ format = 2, data = Data } = TX, #tx{ format = 2 } = TX2) -> true = TX#tx{ data = <<>> } == TX2#tx{ data = <<>> }, case byte_size(Data) == 0 of true -> {TX2, false}; false -> {TX, true} end. drop_txs(DroppedTXs) -> drop_txs(DroppedTXs, true, true). drop_txs([], _RemoveTXPrefixes, _DropFromDiskPool) -> ok; drop_txs(DroppedTXs, RemoveTXPrefixes, DropFromDiskPool) -> prometheus_histogram:observe_duration(drop_txs_duration_milliseconds, fun() -> drop_txs2(DroppedTXs, RemoveTXPrefixes, DropFromDiskPool) end). drop_txs2(DroppedTXs, RemoveTXPrefixes, DropFromDiskPool) -> Denomination = max(get_current_denomination(), get_origin_spent_total_denomination()), OriginSpentTotalMap0 = get_redenominated_origin_spent_total_map(Denomination), {MempoolSize2, PrioritySet2, PropagationQueue2, LastTXMap2, OriginTXMap2, OriginSpentTotalMap2} = lists:foldl( fun(TX, {MempoolSize, PrioritySet, PropagationQueue, LastTXMap, OriginTXMap, OriginSpentTotalMap}) -> TXID = TX#tx.id, case get_tx_metadata(TXID) of not_found -> {MempoolSize, PrioritySet, PropagationQueue, LastTXMap, OriginTXMap, OriginSpentTotalMap}; {_, Status, Timestamp} -> ets:delete(node_state, {tx, TXID}), case RemoveTXPrefixes of true -> ets:delete_object(tx_prefixes, {ar_node_worker:tx_id_prefix(TXID), TXID}); false -> ok end, case DropFromDiskPool of true -> may_be_drop_from_disk_pool(TX); false -> ok end, { decrease_mempool_size(MempoolSize, TX), del_from_priority_set(PrioritySet, TX, Status, Timestamp), del_from_propagation_queue(PropagationQueue, TX, Timestamp), del_from_last_tx_map(LastTXMap, TX), del_from_origin_tx_map(OriginTXMap, TX), del_from_origin_spent_total_map(OriginSpentTotalMap, TX, Denomination) } end end, { get_mempool_size(), get_priority_set(), get_propagation_queue(), get_last_tx_map(), get_origin_tx_map(), OriginSpentTotalMap0 }, DroppedTXs ), ets:insert(node_state, [ {mempool_size, MempoolSize2}, {tx_priority_set, PrioritySet2}, {tx_propagation_queue, PropagationQueue2}, {last_tx_map, LastTXMap2}, {origin_tx_map, OriginTXMap2}, {origin_spent_total_map, OriginSpentTotalMap2}, {origin_spent_total_denomination, Denomination} ]). get_map() -> gb_sets:fold( fun({_Utility, TXID, Status}, Acc) -> Acc#{TXID => Status} end, #{}, get_priority_set() ). get_all_txids() -> gb_sets:fold( fun({_Utility, TXID, _Status}, Acc) -> [TXID | Acc] end, [], get_priority_set() ). take_chunk(Mempool, Size) -> take_chunk(Mempool, Size, []). take_chunk(Mempool, 0, Taken) -> {ok, Taken, Mempool}; take_chunk([], _Size, Taken) -> {ok, Taken, []}; take_chunk(Mempool, Size, Taken) -> TXID = lists:last(Mempool), RemainingMempool = lists:droplast(Mempool), case get_tx(TXID) of not_found -> take_chunk(RemainingMempool, Size, Taken); TX -> take_chunk(RemainingMempool, Size - 1, [TX | Taken]) end. get_tx_metadata(TXID) -> case ets:lookup(node_state, {tx, TXID}) of [{_, {TX, Status, Timestamp}}] -> {TX, Status, Timestamp}; _ -> not_found end. get_tx(TXID) -> case get_tx_metadata(TXID) of not_found -> not_found; {TX, _Status, _Timestamp} -> TX end. is_known_tx(TXID) -> case ar_ignore_registry:member(TXID) of true -> true; false -> has_tx(TXID) end. has_tx(TXID) -> ets:member(node_state, {tx, TXID}). get_priority_set() -> case ets:lookup(node_state, tx_priority_set) of [{tx_priority_set, Set}] -> Set; _ -> gb_sets:new() end. get_propagation_queue() -> case ets:lookup(node_state, tx_propagation_queue) of [{tx_propagation_queue, Q}] -> Q; _ -> gb_sets:new() end. get_last_tx_map() -> case ets:lookup(node_state, last_tx_map) of [{last_tx_map, Map}] -> Map; _ -> maps:new() end. get_origin_tx_map() -> case ets:lookup(node_state, origin_tx_map) of [{origin_tx_map, Map}] -> Map; _ -> maps:new() end. del_from_propagation_queue(Priority, TXID) -> ets:insert(node_state, { tx_propagation_queue, del_from_propagation_queue(ar_mempool:get_propagation_queue(), Priority, TXID) }). del_from_propagation_queue(PropagationQueue, TX = #tx{}, Timestamp) -> Priority = {ar_tx:utility(TX), Timestamp}, del_from_propagation_queue(PropagationQueue, Priority, TX#tx.id); del_from_propagation_queue(PropagationQueue, Priority, TXID) when is_bitstring(TXID) -> prometheus_histogram:observe_duration(del_from_propagation_queue_duration_milliseconds, fun() -> gb_sets:del_element({Priority, TXID}, PropagationQueue) end). %% ------------------------------------------------------------------ %% Private Functions %% ------------------------------------------------------------------ get_mempool_size() -> case ets:lookup(node_state, mempool_size) of [{mempool_size, MempoolSize}] -> MempoolSize; _ -> {0, 0} end. init_tx_metadata(TX, Status) -> {TX, Status, -os:system_time(microsecond)}. add_to_priority_set(PrioritySet, TX, Status, Timestamp) -> Priority = {ar_tx:utility(TX), Timestamp}, gb_sets:add_element({Priority, TX#tx.id, Status}, PrioritySet). add_to_priority_set(PrioritySet, TX, PrevStatus, Status, Timestamp) -> Priority = {ar_tx:utility(TX), Timestamp}, gb_sets:add_element({Priority, TX#tx.id, Status}, gb_sets:del_element({Priority, TX#tx.id, PrevStatus}, PrioritySet ) ). del_from_priority_set(PrioritySet, TX, Status, Timestamp) -> Priority = {ar_tx:utility(TX), Timestamp}, gb_sets:del_element({Priority, TX#tx.id, Status}, PrioritySet). add_to_propagation_queue(PropagationQueue, TX, Timestamp) -> Priority = {ar_tx:utility(TX), Timestamp}, gb_sets:add_element({Priority, TX#tx.id}, PropagationQueue). %% @doc Store a map of last_tx TXIDs to a priority set of TXs that use %% that last_tx. We actually store the TXIDs of the TXs to avoid bloating %% the ets table. The trade off is that we have to do a TXID to TX lookup %% when resolving last_tx clashes. add_to_last_tx_map(LastTXMap, TX) -> Element = unconfirmed_tx(TX), Set2 = case maps:get(TX#tx.last_tx, LastTXMap, not_found) of not_found -> gb_sets:from_list([Element]); Set -> gb_sets:add_element(Element, Set) end, maps:put(TX#tx.last_tx, Set2, LastTXMap). del_from_last_tx_map(LastTXMap, TX) -> Element = unconfirmed_tx(TX), case maps:get(TX#tx.last_tx, LastTXMap, not_found) of not_found -> LastTXMap; Set -> maps:put(TX#tx.last_tx, gb_sets:del_element(Element, Set), LastTXMap) end. %% @doc Store a map of addresses to a priority set of TXs that spend %% from that address. We actually store the TXIDs of the TXs to avoid bloating %% the ets table. The trade off is that we have to do a TXID to TX lookup %% when resolving overspends. add_to_origin_tx_map(OriginTXMap, TX) -> Element = unconfirmed_tx(TX), Origin = ar_tx:get_owner_address(TX), Set2 = case maps:get(Origin, OriginTXMap, not_found) of not_found -> gb_sets:from_list([Element]); Set -> gb_sets:add_element(Element, Set) end, maps:put(Origin, Set2, OriginTXMap). del_from_origin_tx_map(OriginTXMap, TX) -> Element = unconfirmed_tx(TX), Origin = ar_tx:get_owner_address(TX), case maps:get(Origin, OriginTXMap, not_found) of not_found -> OriginTXMap; Set -> maps:put(Origin, gb_sets:del_element(Element, Set), OriginTXMap) end. unconfirmed_tx(TX = #tx{}) -> {ar_tx:utility(TX), TX#tx.id}. increase_mempool_size( _MempoolSize = {MempoolHeaderSize, MempoolDataSize}, TX = #tx{}) -> {HeaderSize, DataSize} = tx_mempool_size(TX), {MempoolHeaderSize + HeaderSize, MempoolDataSize + DataSize}. decrease_mempool_size( _MempoolSize = {MempoolHeaderSize, MempoolDataSize}, TX = #tx{}) -> {HeaderSize, DataSize} = tx_mempool_size(TX), {MempoolHeaderSize - HeaderSize, MempoolDataSize - DataSize}. tx_mempool_size(#tx{ format = 1, data = Data }) -> {?TX_SIZE_BASE + byte_size(Data), 0}; tx_mempool_size(#tx{ format = 2, data = Data }) -> {?TX_SIZE_BASE, byte_size(Data)}. deserialize_tx(Bin) when is_binary(Bin) -> {ok, TX} = ar_serialize:binary_to_tx(Bin), TX; deserialize_tx(TX) -> ar_storage:migrate_tx_record(TX). may_be_drop_from_disk_pool(#tx{ format = 1 }) -> ok; may_be_drop_from_disk_pool(TX) -> ar_data_sync:maybe_drop_data_root_from_disk_pool(TX#tx.data_root, TX#tx.data_size, TX#tx.id). find_low_priority_txs() -> find_low_priority_txs(gb_sets:iterator(get_priority_set()), get_mempool_size()). find_low_priority_txs(Iterator, {MempoolHeaderSize, MempoolDataSize}) when MempoolHeaderSize > ?MEMPOOL_HEADER_SIZE_LIMIT; MempoolDataSize > ?MEMPOOL_DATA_SIZE_LIMIT -> {{_Utility, TXID, _Status} = _Element, Iterator2} = gb_sets:next(Iterator), TX = get_tx(TXID), case should_drop_low_priority_tx(TX, {MempoolHeaderSize, MempoolDataSize}) of true -> MempoolSize2 = decrease_mempool_size({MempoolHeaderSize, MempoolDataSize}, TX), [TX | find_low_priority_txs(Iterator2, MempoolSize2)]; false -> find_low_priority_txs(Iterator2, {MempoolHeaderSize, MempoolDataSize}) end; find_low_priority_txs(_Iterator, {_MempoolHeaderSize, _MempoolDataSize}) -> []. should_drop_low_priority_tx(_TX, {MempoolHeaderSize, _MempoolDataSize}) when MempoolHeaderSize > ?MEMPOOL_HEADER_SIZE_LIMIT -> true; should_drop_low_priority_tx(TX, {_MempoolHeaderSize, MempoolDataSize}) when MempoolDataSize > ?MEMPOOL_DATA_SIZE_LIMIT -> TX#tx.format == 2 andalso byte_size(TX#tx.data) > 0; should_drop_low_priority_tx(_TX, {_MempoolHeaderSize, _MempoolDataSize}) -> false. %% @doc identify any transactions that refer to the same last_tx %% (where last_tx is the last confirmed transaction in the wallet). %% Only 1 of these transactions will confirm, so we want to drop %% all the others to prevent a mempool spam attack. find_clashing_txs(#tx{ last_tx = <<>> }) -> []; find_clashing_txs(TX = #tx{}) -> Wallets = ar_wallets:get(ar_tx:get_addresses([TX])), find_clashing_txs(TX, Wallets). find_clashing_txs(TX = #tx{}, Wallets) when is_map(Wallets) -> case ar_tx:check_last_tx(Wallets, TX) of true -> ClashingTXIDs = maps:get(TX#tx.last_tx, get_last_tx_map(), gb_sets:new()), filter_clashing_txs(ClashingTXIDs); _ -> [] end; find_clashing_txs(_TX, _Wallets) -> []. %% @doc Only the highest priority TX will be kept, others will be dropped. %% Priority is defined as: %% 1. ar_tx:utility %% 2. alphanumeric order of TXID (z is higher priority than a) %% %% Adding the TXID term to the priority calculation (rather than local %% timestamp), ensures that the sorting will be stable and deterministic %% across peers and so all peers will drop the same clashing TXs regardless %% of the order in which the transactions are received. filter_clashing_txs(ClashingTXIDs) -> case gb_sets:is_empty(ClashingTXIDs) of true -> []; false -> % Exclude the highest priority TX from the list of TXs to be dropped {_, UncomfirmableTXIDs} = gb_sets:take_largest(ClashingTXIDs), to_txs(UncomfirmableTXIDs) end. %% @doc identify any transactions that would overspend an account if %% they were to be confirmed. Since those transactions won't confirm %% we want to drop them to prevent a mempool spam attack (e.g. %% an attacker posts hundreds or thousands of overspend transactions %% which saturate the mempool, but for which only 1 will ever be %% confirmed) %% %% Note: when doing the overspend calculation any unconfirmed deposit %% transactions are ignored. This is to prevent a second potentially %% malicious scenario like the following: %% %% Peer A: receives deposit TX and several spend TXs, %% all TXs are added to the mempool %% Peer B: receives only the spend TXs, and all are dropped from the mempool %% Peer A: publishes block %% Peer B: needs to request potentially many TXs from peer A since their %% mempools differ %% A malicious attacker could exploit this to greatly increase the overall %% network traffic, slow down block propagation, and increases fork incidence. %% %% By ignoring unconfirmed deposit TXs, and ensuring a globally consistent %% sort order (e.g. (format, reward, TXID)) this malicious scenario is %% prevented. find_overspent_txs(<<>>, _Denomination) -> []; find_overspent_txs(TX, Denomination) when TX#tx.reward > 0 orelse TX#tx.quantity > 0 -> Origin = ar_tx:get_owner_address(TX), SpentTotal = get_origin_spent_total(Origin), Balance = get_confirmed_balance(Origin, Denomination), case SpentTotal =< Balance of true -> []; false -> SpentTXIDs = maps:get(Origin, get_origin_tx_map(), gb_sets:new()), drop_lowest_until_solvent(SpentTXIDs, SpentTotal, Balance, Denomination) end; find_overspent_txs(_TX, _Denomination) -> []. %% @doc Walk from lowest priority, dropping TXs until spent total =< balance. %% Only iterates over the TXs that need to be dropped (amortized O(1) per add_tx). drop_lowest_until_solvent(SpentTXIDs, SpentTotal, Balance, Denomination) -> case SpentTotal =< Balance of true -> []; false -> case gb_sets:is_empty(SpentTXIDs) of true -> []; false -> {{_, TXID}, SpentTXIDs2} = gb_sets:take_smallest(SpentTXIDs), TX = get_tx(TXID), Amount = tx_spent_amount(TX, Denomination), [TX | drop_lowest_until_solvent( SpentTXIDs2, SpentTotal - Amount, Balance, Denomination)] end end. to_txs(TXIDs) when is_list(TXIDs) -> [get_tx(TXID) || TXID <- TXIDs]; to_txs(TXIDs) -> to_txs([TXID || {_, TXID} <- gb_sets:to_list(TXIDs)]). tx_spent_amount(#tx{ reward = Reward, quantity = Quantity }, 0) -> Reward + Quantity; tx_spent_amount(#tx{ reward = Reward, quantity = Quantity, denomination = TXDenom }, Denomination) -> ar_pricing:redenominate(Reward + Quantity, TXDenom, Denomination). get_current_denomination() -> case ar_node:get_current_block() of not_joined -> 0; B -> B#block.denomination end. get_origin_spent_total_map() -> case ets:lookup(node_state, origin_spent_total_map) of [{origin_spent_total_map, Map}] -> Map; _ -> maps:new() end. get_origin_spent_total_denomination() -> case ets:lookup(node_state, origin_spent_total_denomination) of [{origin_spent_total_denomination, D}] -> D; _ -> 0 end. add_to_origin_spent_total_map(SpentTotalMap, TX, Denomination) -> Origin = ar_tx:get_owner_address(TX), Amount = tx_spent_amount(TX, Denomination), OldAmount = maps:get(Origin, SpentTotalMap, 0), maps:put(Origin, OldAmount + Amount, SpentTotalMap). del_from_origin_spent_total_map(SpentTotalMap, TX, Denomination) -> Origin = ar_tx:get_owner_address(TX), Amount = tx_spent_amount(TX, Denomination), OldAmount = maps:get(Origin, SpentTotalMap, 0), maps:put(Origin, max(0, OldAmount - Amount), SpentTotalMap). get_origin_spent_total(Origin) -> maps:get(Origin, get_origin_spent_total_map(), 0). %% @doc Return the origin address => spent total map in the given denomination. If the %% denomination has increased, redenominate all stored totals directly. %% This is O(number of origins) and only triggers on denomination change. get_redenominated_origin_spent_total_map(Denomination) -> case get_origin_spent_total_denomination() of Denomination -> get_origin_spent_total_map(); OldDenomination -> redenominate_origin_spent_total(get_origin_spent_total_map(), OldDenomination, Denomination) end. redenominate_origin_spent_total(SpentTotalMap, OldDenomination, NewDenomination) -> maps:map( fun(_Origin, Total) -> ar_pricing:redenominate(Total, OldDenomination, NewDenomination) end, SpentTotalMap ). get_confirmed_balance(Origin, Denomination) -> Wallet = ar_wallets:get(Origin), case maps:get(Origin, Wallet, not_found) of not_found -> 0; {Balance, _LastTX} -> ar_pricing:redenominate(Balance, 1, Denomination); {Balance, _LastTX, AccountDenomination, _MiningPermission} -> ar_pricing:redenominate(Balance, AccountDenomination, Denomination) end. ================================================ FILE: apps/arweave/src/ar_merkle.erl ================================================ %%% @doc Generates annotated merkle trees, paths inside those trees, as well %%% as verification of those proofs. -module(ar_merkle). -export([generate_tree/1, generate_path/3, validate_path/4, validate_path/5, extract_note/1, extract_root/1]). -export([get/2, hash/1, note_to_binary/1]). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave/include/ar_consensus.hrl"). -include_lib("eunit/include/eunit.hrl"). %%% @doc Generates annotated merkle trees, paths inside those trees, as well %%% as verification of those proofs. -record(node, { id, type = branch, % root | branch | leaf data, % The value (for leaves). note, % The offset, a number less than 2^256. left, % The (optional) ID of a node to the left. right, % The (optional) ID of a node to the right. max, % The maximum observed note at this point. is_rebased = false }). -define(HASH_SIZE, ?CHUNK_ID_HASH_SIZE). %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Generate a Merkle tree from a list of pairs of IDs (of length 32 bytes) %% and labels -- offsets. The list may be arbitrarily nested - the inner lists then %% contain the leaves of the sub trees with the rebased (on 0) starting offsets. generate_tree(Elements) -> generate_tree(Elements, queue:new(), []). %% @doc Generate a Merkle path for the given offset Dest from the tree Tree %% with the root ID. generate_path(ID, Dest, Tree) -> binary:list_to_bin(generate_path_parts(ID, Dest, Tree, 0)). %% @doc Validate the given merkle path. validate_path(ID, Dest, RightBound, Path) -> validate_path(ID, Dest, RightBound, Path, basic_ruleset). %% @doc Validate the given merkle path using the given set of rules. validate_path(ID, Dest, RightBound, _Path, _Ruleset) when RightBound =< 0 -> ?LOG_ERROR([{event, validate_path_called_with_non_positive_right_bound}, {root, ar_util:encode(ID)}, {dest, Dest}, {right_bound, RightBound}]), throw(invalid_right_bound); validate_path(ID, Dest, RightBound, Path, Ruleset) when Dest >= RightBound -> validate_path(ID, RightBound - 1, RightBound, Path, Ruleset); validate_path(ID, Dest, RightBound, Path, Ruleset) when Dest < 0 -> validate_path(ID, 0, RightBound, Path, Ruleset); validate_path(ID, Dest, RightBound, Path, Ruleset) -> validate_path(ID, Dest, 0, RightBound, Path, Ruleset). validate_path(ID, Dest, LeftBound, RightBound, Path, basic_ruleset) -> CheckBorders = false, CheckSplit = false, AllowRebase = false, validate_path(ID, Dest, LeftBound, RightBound, Path, CheckBorders, CheckSplit, AllowRebase); validate_path(ID, Dest, LeftBound, RightBound, Path, strict_borders_ruleset) -> CheckBorders = true, CheckSplit = false, AllowRebase = false, validate_path(ID, Dest, LeftBound, RightBound, Path, CheckBorders, CheckSplit, AllowRebase); validate_path(ID, Dest, LeftBound, RightBound, Path, strict_data_split_ruleset) -> CheckBorders = true, CheckSplit = strict, AllowRebase = false, validate_path(ID, Dest, LeftBound, RightBound, Path, CheckBorders, CheckSplit, AllowRebase); validate_path(ID, Dest, LeftBound, RightBound, Path, offset_rebase_support_ruleset) -> CheckBorders = true, CheckSplit = relaxed, AllowRebase = true, validate_path(ID, Dest, LeftBound, RightBound, Path, CheckBorders, CheckSplit, AllowRebase). validate_path(ID, Dest, LeftBound, RightBound, Path, CheckBorders, CheckSplit, AllowRebase) -> DataSize = RightBound, %% Will be set to true only if we only take right branches from the root to the leaf. In this %% case we know the leaf chunk is the final chunk in the range represented by the merkle tree. IsRightMostInItsSubTree = undefined, %% Set to non-zero when AllowRebase is true and we begin processing a subtree. LeftBoundShift = 0, validate_path(ID, Dest, LeftBound, RightBound, Path, DataSize, IsRightMostInItsSubTree, LeftBoundShift, CheckBorders, CheckSplit, AllowRebase). %% Validate the leaf of the merkle path (i.e. the data chunk) validate_path(ID, _Dest, LeftBound, RightBound, << Data:?HASH_SIZE/binary, EndOffset:(?NOTE_SIZE*8) >>, DataSize, IsRightMostInItsSubTree, LeftBoundShift, CheckBorders, CheckSplit, _AllowRebase) -> AreBordersValid = case CheckBorders of true -> %% Borders are only valid if every offset does not exceed the previous offset %% by more than ?DATA_CHUNK_SIZE EndOffset - LeftBound =< ?DATA_CHUNK_SIZE andalso RightBound - LeftBound =< ?DATA_CHUNK_SIZE; false -> %% Borders are always valid if we don't need to check them true end, IsSplitValid = case CheckSplit of strict -> ChunkSize = EndOffset - LeftBound, case validate_strict_split of _ when ChunkSize == (?DATA_CHUNK_SIZE) -> LeftBound rem (?DATA_CHUNK_SIZE) == 0; _ when EndOffset == DataSize -> Border = ar_util:floor_int(RightBound, ?DATA_CHUNK_SIZE), RightBound rem (?DATA_CHUNK_SIZE) > 0 andalso LeftBound =< Border; _ -> LeftBound rem (?DATA_CHUNK_SIZE) == 0 andalso DataSize - LeftBound > (?DATA_CHUNK_SIZE) andalso DataSize - LeftBound < 2 * (?DATA_CHUNK_SIZE) end; relaxed -> %% Reject chunks smaller than 256 KiB unless they are the last or the only chunks %% of their datasets or the second last chunks which do not exceed 256 KiB when %% combined with the following (last) chunks. Finally, reject chunks smaller than %% their Merkle proofs unless they are the last chunks of their datasets. ShiftedLeftBound = LeftBoundShift + LeftBound, ShiftedEndOffset = LeftBoundShift + EndOffset, case IsRightMostInItsSubTree of true -> %% The last chunk may either start at the bucket start or %% span two buckets. Bucket0 = ShiftedLeftBound div (?DATA_CHUNK_SIZE), Bucket1 = ShiftedEndOffset div (?DATA_CHUNK_SIZE), (ShiftedLeftBound rem (?DATA_CHUNK_SIZE) == 0) %% Make sure each chunk "steps" at least 1 byte into %% its own bucket, which is to the right from the right border %% cause since this chunk does not start at the left border, %% the bucket on the left from the right border belongs to %% the preceding chunk. orelse (Bucket0 + 1 == Bucket1 andalso ShiftedEndOffset rem ?DATA_CHUNK_SIZE /= 0); _ -> %% May also be the only chunk of a single-chunk subtree. ShiftedLeftBound rem (?DATA_CHUNK_SIZE) == 0 end; _ -> %% Split is always valid if we don't need to check it true end, case AreBordersValid andalso IsSplitValid of true -> validate_leaf(ID, Data, EndOffset, LeftBound, RightBound, LeftBoundShift); false -> false end; %% Validate the given merkle path where any subtrees may have 0-based offset. validate_path(ID, Dest, LeftBound, RightBound, << 0:(?HASH_SIZE*8), L:?HASH_SIZE/binary, R:?HASH_SIZE/binary, Note:(?NOTE_SIZE*8), Rest/binary >>, DataSize, _IsRightMostInItsSubTree, LeftBoundShift, CheckBorders, CheckSplit, true) -> case hash([hash(L), hash(R), hash(note_to_binary(Note))]) of ID -> {Path, NextLeftBound, NextRightBound, Dest2, NextLeftBoundShift} = case Dest < Note of true -> Note2 = min(RightBound, Note), {L, 0, Note2 - LeftBound, Dest - LeftBound, LeftBoundShift + LeftBound}; false -> Note2 = max(LeftBound, Note), {R, 0, RightBound - Note2, Dest - Note2, LeftBoundShift + Note2} end, validate_path(Path, Dest2, NextLeftBound, NextRightBound, Rest, DataSize, undefined, NextLeftBoundShift, CheckBorders, CheckSplit, true); _ -> false end; %% Validate a non-leaf node in the merkle path validate_path(ID, Dest, LeftBound, RightBound, << L:?HASH_SIZE/binary, R:?HASH_SIZE/binary, Note:(?NOTE_SIZE*8), Rest/binary >>, DataSize, IsRightMostInItsSubTree, LeftBoundShift, CheckBorders, CheckSplit, AllowRebase) -> validate_node(ID, Dest, LeftBound, RightBound, L, R, Note, Rest, DataSize, IsRightMostInItsSubTree, LeftBoundShift, CheckBorders, CheckSplit, AllowRebase); %% Invalid merkle path validate_path(_, _, _, _, _, _, _, _, _, _, _) -> false. validate_node(ID, Dest, LeftBound, RightBound, L, R, Note, RemainingPath, DataSize, IsRightMostInItsSubTree, LeftBoundShift, CheckBorders, CheckSplit, AllowRebase) -> case hash([hash(L), hash(R), hash(note_to_binary(Note))]) of ID -> {BranchID, NextLeftBound, NextRightBound, IsRightMostInItsSubTree2} = case Dest < Note of true -> %% Traverse left branch (at this point we know the leaf chunk will never %% be the right most in the subtree) {L, LeftBound, min(RightBound, Note), false}; false -> %% Traverse right branch {R, max(LeftBound, Note), RightBound, case IsRightMostInItsSubTree of undefined -> true; _ -> IsRightMostInItsSubTree end} end, validate_path(BranchID, Dest, NextLeftBound, NextRightBound, RemainingPath, DataSize, IsRightMostInItsSubTree2, LeftBoundShift, CheckBorders, CheckSplit, AllowRebase); _ -> false end. validate_leaf(ID, Data, EndOffset, LeftBound, RightBound, LeftBoundShift) -> case hash([hash(Data), hash(note_to_binary(EndOffset))]) of ID -> {Data, LeftBoundShift + LeftBound, LeftBoundShift + max(min(RightBound, EndOffset), LeftBound + 1)}; _ -> false end. %% @doc Get the note (offset) attached to the leaf from a path. extract_note(Path) -> binary:decode_unsigned( binary:part(Path, byte_size(Path) - ?NOTE_SIZE, ?NOTE_SIZE) ). %% @doc Get the Merkle root from a path. extract_root(<< Data:?HASH_SIZE/binary, EndOffset:(?NOTE_SIZE*8) >>) -> {ok, hash([hash(Data), hash(note_to_binary(EndOffset))])}; extract_root(<< L:?HASH_SIZE/binary, R:?HASH_SIZE/binary, Note:(?NOTE_SIZE*8), _/binary >>) -> {ok, hash([hash(L), hash(R), hash(note_to_binary(Note))])}; extract_root(_) -> {error, invalid_proof}. %%%=================================================================== %%% Private functions. %%%=================================================================== generate_tree([Element | Elements], Stack, Tree) when is_list(Element) -> {SubRoot, SubTree} = generate_tree(Element), SubTree2 = [mark_rebased(Node, SubRoot) || Node <- SubTree], SubRootN = get(SubRoot, SubTree2), generate_tree(Elements, queue:in(SubRootN, Stack), Tree ++ SubTree2); generate_tree([Element | Elements], Stack, Tree) -> Leaf = generate_leaf(Element), generate_tree(Elements, queue:in(Leaf, Stack), [Leaf | Tree]); generate_tree([], Stack, Tree) -> case queue:to_list(Stack) of [] -> {<<>>, []}; _ -> generate_all_rows(queue:to_list(Stack), Tree) end. mark_rebased(#node{ id = RootID } = Node, RootID) -> Node#node{ is_rebased = true }; mark_rebased(Node, _RootID) -> Node. generate_leaf({Data, Note}) -> Hash = hash([hash(Data), hash(note_to_binary(Note))]), #node{ id = Hash, type = leaf, data = Data, note = Note, max = Note }. %% Note: This implementation leaves some duplicates in the tree structure. %% The produced trees could be a little smaller if these duplicates were %% not present, but removing them with ar_util:unique takes far too long. generate_all_rows([RootN], Tree) -> RootID = RootN#node.id, {RootID, Tree}; generate_all_rows(Row, Tree) -> NewRow = generate_row(Row, 0), generate_all_rows(NewRow, NewRow ++ Tree). generate_row([], _Shift) -> []; generate_row([Left], _Shift) -> [Left]; generate_row([L, R | Rest], Shift) -> {N, Shift2} = generate_node(L, R, Shift), [N | generate_row(Rest, Shift2)]. generate_node(Left, empty, Shift) -> {Left, Shift}; generate_node(L, R, Shift) -> LMax = L#node.max, LMax2 = case L#node.is_rebased of true -> Shift + LMax; _ -> LMax end, RMax = R#node.max, RMax2 = case R#node.is_rebased of true -> LMax2 + RMax; _ -> RMax end, {#node{ id = hash([hash(L#node.id), hash(R#node.id), hash(note_to_binary(LMax2))]), type = branch, left = L#node.id, right = R#node.id, note = LMax2, max = RMax2 }, RMax2}. generate_path_parts(ID, Dest, Tree, PrevNote) -> case get(ID, Tree) of N when N#node.type == leaf -> [N#node.data, note_to_binary(N#node.note)]; N when N#node.type == branch -> Note = N#node.note, {Direction, NextID} = case Dest < Note of true -> {left, N#node.left}; false -> {right, N#node.right} end, NextN = get(NextID, Tree), {RebaseMark, Dest2} = case {NextN#node.is_rebased, Direction} of {false, _} -> {<<>>, Dest}; {true, right} -> {<< 0:(?HASH_SIZE * 8) >>, Dest - Note}; {true, left} -> {<< 0:(?HASH_SIZE * 8) >>, Dest - PrevNote} end, [RebaseMark, N#node.left, N#node.right, note_to_binary(Note) | generate_path_parts(NextID, Dest2, Tree, Note)] end. get(ID, Map) -> case lists:keyfind(ID, #node.id, Map) of false -> false; Node -> Node end. note_to_binary(Note) -> << Note:(?NOTE_SIZE * 8) >>. hash(Parts) when is_list(Parts) -> crypto:hash(sha256, binary:list_to_bin(Parts)); hash(Binary) -> crypto:hash(sha256, Binary). make_tags_cumulative(L) -> lists:reverse( element(2, lists:foldl( fun({X, Tag}, {AccTag, AccL}) -> Curr = AccTag + Tag, {Curr, [{X, Curr} | AccL]} end, {0, []}, L ) ) ). %%%=================================================================== %%% Tests. %%%=================================================================== -define(TEST_SIZE, 64 * 1024). -define(UNEVEN_TEST_SIZE, 35643). -define(UNEVEN_TEST_TARGET, 33271). generate_and_validate_balanced_tree_path_test_() -> {timeout, 30, fun test_generate_and_validate_balanced_tree_path/0}. test_generate_and_validate_balanced_tree_path() -> Tags = make_tags_cumulative([{<< N:256 >>, 1} || N <- lists:seq(0, ?TEST_SIZE - 1)]), {MR, Tree} = ar_merkle:generate_tree(Tags), ?assertEqual(length(Tree), (?TEST_SIZE * 2) - 1), lists:foreach( fun(_TestCase) -> RandomTarget = rand:uniform(?TEST_SIZE) - 1, Path = ar_merkle:generate_path(MR, RandomTarget, Tree), {Leaf, StartOffset, EndOffset} = ar_merkle:validate_path(MR, RandomTarget, ?TEST_SIZE, Path), {Leaf, StartOffset, EndOffset} = ar_merkle:validate_path(MR, RandomTarget, ?TEST_SIZE, Path, strict_borders_ruleset), ?assertEqual(RandomTarget, binary:decode_unsigned(Leaf)), ?assert(RandomTarget < EndOffset), ?assert(RandomTarget >= StartOffset) end, lists:seq(1, 100) ). generate_and_validate_tree_with_rebase_test_() -> [ {timeout, 30, fun test_tree_with_rebase_shallow/0}, {timeout, 30, fun test_tree_with_rebase_nested/0}, {timeout, 30, fun test_tree_with_rebase_bad_paths/0}, {timeout, 30, fun test_tree_with_rebase_partial_chunk/0}, {timeout, 30, fun test_tree_with_rebase_subtree_ids/0} ]. test_tree_with_rebase_shallow() -> Leaf1 = crypto:strong_rand_bytes(?HASH_SIZE), Leaf2 = crypto:strong_rand_bytes(?HASH_SIZE), %% Root1 %% / \ %% Leaf1 Leaf2 (with offset reset) Tags0 = [ {Leaf1, ?DATA_CHUNK_SIZE}, {Leaf2, 2 * ?DATA_CHUNK_SIZE} ], {Root0, Tree0} = ar_merkle:generate_tree(Tags0), assert_tree([ {branch, undefined, ?DATA_CHUNK_SIZE, false}, {leaf, Leaf2, 2*?DATA_CHUNK_SIZE, false}, {leaf, Leaf1, ?DATA_CHUNK_SIZE, false} ], Tree0), Tags1 = [{Leaf1, ?DATA_CHUNK_SIZE}, [{Leaf2, ?DATA_CHUNK_SIZE}]], {Root1, Tree1} = ar_merkle:generate_tree(Tags1), assert_tree([ {branch, undefined, ?DATA_CHUNK_SIZE, false}, {leaf, Leaf1, ?DATA_CHUNK_SIZE, false}, {leaf, Leaf2, ?DATA_CHUNK_SIZE, true} ], Tree1), ?assertNotEqual(Root1, Root0), Path0_1 = ar_merkle:generate_path(Root0, 0, Tree0), Path1_1 = ar_merkle:generate_path(Root1, 0, Tree1), ?assertNotEqual(Path0_1, Path1_1), {Leaf1, 0, ?DATA_CHUNK_SIZE} = ar_merkle:validate_path(Root0, 0, 2 * ?DATA_CHUNK_SIZE, Path0_1, offset_rebase_support_ruleset), {Leaf1, 0, ?DATA_CHUNK_SIZE} = ar_merkle:validate_path(Root1, 0, 2 * ?DATA_CHUNK_SIZE, Path1_1, offset_rebase_support_ruleset), ?assertEqual(false, ar_merkle:validate_path(Root1, 0, 2 * ?DATA_CHUNK_SIZE, Path0_1, offset_rebase_support_ruleset)), ?assertEqual(false, ar_merkle:validate_path(Root0, 0, 2 * ?DATA_CHUNK_SIZE, Path1_1, offset_rebase_support_ruleset)), Path0_2 = ar_merkle:generate_path(Root0, ?DATA_CHUNK_SIZE, Tree0), Path1_2 = ar_merkle:generate_path(Root1, ?DATA_CHUNK_SIZE, Tree1), ?assertNotEqual(Path1_2, Path0_2), {Leaf2, ?DATA_CHUNK_SIZE, 2 * ?DATA_CHUNK_SIZE} = ar_merkle:validate_path( Root0, ?DATA_CHUNK_SIZE, 2 * ?DATA_CHUNK_SIZE, Path0_2, offset_rebase_support_ruleset), {Leaf2, ?DATA_CHUNK_SIZE, 2 * ?DATA_CHUNK_SIZE} = ar_merkle:validate_path( Root1, ?DATA_CHUNK_SIZE, 2 * ?DATA_CHUNK_SIZE, Path1_2, offset_rebase_support_ruleset), {Leaf2, ?DATA_CHUNK_SIZE, 2 * ?DATA_CHUNK_SIZE} = ar_merkle:validate_path( Root1, 2 * ?DATA_CHUNK_SIZE - 1, 2 * ?DATA_CHUNK_SIZE, Path1_2, offset_rebase_support_ruleset), ?assertEqual(false, ar_merkle:validate_path( Root1, ?DATA_CHUNK_SIZE, 2 * ?DATA_CHUNK_SIZE, Path0_2, offset_rebase_support_ruleset)), ?assertEqual(false, ar_merkle:validate_path( Root0, ?DATA_CHUNK_SIZE, 2 * ?DATA_CHUNK_SIZE, Path1_2, offset_rebase_support_ruleset)), ?assertEqual(false, ar_merkle:validate_path( Root1, ?DATA_CHUNK_SIZE, 2 * ?DATA_CHUNK_SIZE, Path1_1, offset_rebase_support_ruleset)), ?assertEqual(false, ar_merkle:validate_path( Root1, 0, 2 * ?DATA_CHUNK_SIZE, Path1_2, offset_rebase_support_ruleset)), %% ________Root2_________ %% / \ %% Leaf1 (with offset reset) Leaf2 (with offset reset) Tags2 = [ [ {Leaf1, ?DATA_CHUNK_SIZE} ], [ {Leaf2, ?DATA_CHUNK_SIZE} ] ], {Root2, Tree2} = ar_merkle:generate_tree(Tags2), assert_tree([ {branch, undefined, ?DATA_CHUNK_SIZE, false}, {leaf, Leaf1, ?DATA_CHUNK_SIZE, true}, {leaf, Leaf2, ?DATA_CHUNK_SIZE, true} ], Tree2), Path2_1 = ar_merkle:generate_path(Root2, 0, Tree2), {Leaf1, 0, ?DATA_CHUNK_SIZE} = ar_merkle:validate_path(Root2, 0, 2 * ?DATA_CHUNK_SIZE, Path2_1, offset_rebase_support_ruleset), Path2_2 = ar_merkle:generate_path(Root2, ?DATA_CHUNK_SIZE, Tree2), {Leaf2, ?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE} = ar_merkle:validate_path(Root2, ?DATA_CHUNK_SIZE, 2 * ?DATA_CHUNK_SIZE, Path2_2, offset_rebase_support_ruleset), {Leaf2, ?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE} = ar_merkle:validate_path(Root2, 2*?DATA_CHUNK_SIZE - 1, 2*?DATA_CHUNK_SIZE, Path2_2, offset_rebase_support_ruleset), ?assertEqual(false, ar_merkle:validate_path(Root2, ?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE, Path2_1, offset_rebase_support_ruleset)), ?assertEqual(false, ar_merkle:validate_path(Root2, 0, 2*?DATA_CHUNK_SIZE, Path2_2, offset_rebase_support_ruleset)). test_tree_with_rebase_nested() -> %% _________________Root3________________ %% / \ %% _____SubTree1______________ Leaf6 %% / \ %% SubTree2 ________SubTree3_________ %% / \ / \ %% Leaf1 Leaf2 SubTree4 (with offset reset) Leaf5 %% / \ %% Leaf3 Leaf4 (with offset reset) Leaf1 = crypto:strong_rand_bytes(?HASH_SIZE), Leaf2 = crypto:strong_rand_bytes(?HASH_SIZE), Leaf3 = crypto:strong_rand_bytes(?HASH_SIZE), Leaf4 = crypto:strong_rand_bytes(?HASH_SIZE), Leaf5 = crypto:strong_rand_bytes(?HASH_SIZE), Leaf6 = crypto:strong_rand_bytes(?HASH_SIZE), Tags3 = [ {Leaf1, ?DATA_CHUNK_SIZE}, {Leaf2, 2*?DATA_CHUNK_SIZE}, [ {Leaf3, ?DATA_CHUNK_SIZE}, [ {Leaf4, ?DATA_CHUNK_SIZE} ] ], {Leaf5, 5*?DATA_CHUNK_SIZE}, {Leaf6, 6*?DATA_CHUNK_SIZE} ], {Root3, Tree3} = ar_merkle:generate_tree(Tags3), assert_tree([ {branch, undefined, 5*?DATA_CHUNK_SIZE, false}, %% Root {branch, undefined, 2*?DATA_CHUNK_SIZE, false}, %% SubTree1 {leaf, Leaf6, 6*?DATA_CHUNK_SIZE, false}, {branch, undefined, ?DATA_CHUNK_SIZE, false}, %% SubTree2 {branch, undefined, 4*?DATA_CHUNK_SIZE, false}, %% SubTree3 {leaf, Leaf6, 6*?DATA_CHUNK_SIZE, false}, %% duplicates are safe and expected {leaf, Leaf6, 6*?DATA_CHUNK_SIZE, false}, %% duplicates are safe and expected {leaf, Leaf5, 5*?DATA_CHUNK_SIZE, false}, {leaf, Leaf2, 2*?DATA_CHUNK_SIZE, false}, {leaf, Leaf1, ?DATA_CHUNK_SIZE, false}, {branch, undefined, ?DATA_CHUNK_SIZE, true}, %% SubTree4 {leaf, Leaf3, ?DATA_CHUNK_SIZE, false}, {leaf, Leaf4, ?DATA_CHUNK_SIZE, true} ], Tree3), BadRoot = crypto:strong_rand_bytes(32), Path3_1 = ar_merkle:generate_path(Root3, 0, Tree3), {Leaf1, 0, ?DATA_CHUNK_SIZE} = ar_merkle:validate_path( Root3, 0, 6*?DATA_CHUNK_SIZE, Path3_1, offset_rebase_support_ruleset), ?assertEqual(false, ar_merkle:validate_path( BadRoot, 0, 6*?DATA_CHUNK_SIZE, Path3_1, offset_rebase_support_ruleset)), Path3_2 = ar_merkle:generate_path(Root3, ?DATA_CHUNK_SIZE, Tree3), {Leaf2, ?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE} = ar_merkle:validate_path( Root3, ?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE, Path3_2, offset_rebase_support_ruleset), ?assertEqual(false, ar_merkle:validate_path( BadRoot, ?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE, Path3_2, offset_rebase_support_ruleset)), Path3_3 = ar_merkle:generate_path(Root3, ?DATA_CHUNK_SIZE * 2, Tree3), {Leaf3, 2*?DATA_CHUNK_SIZE, 3*?DATA_CHUNK_SIZE} = ar_merkle:validate_path( Root3, 2*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE, Path3_3, offset_rebase_support_ruleset), ?assertEqual(false, ar_merkle:validate_path( BadRoot, 2*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE, Path3_3, offset_rebase_support_ruleset)), Path3_4 = ar_merkle:generate_path(Root3, ?DATA_CHUNK_SIZE * 3, Tree3), {Leaf4, 3*?DATA_CHUNK_SIZE, 4*?DATA_CHUNK_SIZE} = ar_merkle:validate_path( Root3, 3*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE, Path3_4, offset_rebase_support_ruleset), ?assertEqual(false, ar_merkle:validate_path( BadRoot, 3*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE, Path3_4, offset_rebase_support_ruleset)), Path3_5 = ar_merkle:generate_path(Root3, ?DATA_CHUNK_SIZE * 4, Tree3), {Leaf5, 4*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE} = ar_merkle:validate_path( Root3, 4*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE, Path3_5, offset_rebase_support_ruleset), ?assertEqual(false, ar_merkle:validate_path( BadRoot, 4*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE, Path3_5, offset_rebase_support_ruleset)), Path3_6 = ar_merkle:generate_path(Root3, ?DATA_CHUNK_SIZE * 5, Tree3), {Leaf6, 5*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE} = ar_merkle:validate_path( Root3, 5*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE, Path3_6, offset_rebase_support_ruleset), ?assertEqual(false, ar_merkle:validate_path( BadRoot, 5*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE, Path3_6, offset_rebase_support_ruleset)), %% ________Root4_________ %% / \ %% SubTree1 _______SubTree2____________ %% / \ / \ %% Leaf1 Leaf2 SubTree3 (with offset reset) SubTree4 (with offset reset) %% / \ / \ %% Leaf3 Leaf4 Leaf5 Leaf6 Tags4 = [ {Leaf1, ?DATA_CHUNK_SIZE}, {Leaf2, 2*?DATA_CHUNK_SIZE}, [ {Leaf3, ?DATA_CHUNK_SIZE}, {Leaf4, 2*?DATA_CHUNK_SIZE} ], [ {Leaf5, ?DATA_CHUNK_SIZE}, {Leaf6, 2*?DATA_CHUNK_SIZE} ] ], {Root4, Tree4} = ar_merkle:generate_tree(Tags4), assert_tree([ {branch, undefined, 2*?DATA_CHUNK_SIZE, false}, %% Root {branch, undefined, ?DATA_CHUNK_SIZE, false}, %% SubTree1 {branch, undefined, 4*?DATA_CHUNK_SIZE, false}, %% SubTree2 {leaf, Leaf2, 2*?DATA_CHUNK_SIZE, false}, {leaf, Leaf1, ?DATA_CHUNK_SIZE, false}, {branch, undefined, ?DATA_CHUNK_SIZE, true}, %% SubTree3 {leaf, Leaf4, 2*?DATA_CHUNK_SIZE, false}, {leaf, Leaf3, ?DATA_CHUNK_SIZE, false}, {branch, undefined, ?DATA_CHUNK_SIZE, true}, %% SubTree4 {leaf, Leaf6, 2*?DATA_CHUNK_SIZE, false}, {leaf, Leaf5, ?DATA_CHUNK_SIZE, false} ], Tree4), Path4_1 = ar_merkle:generate_path(Root4, 0, Tree4), {Leaf1, 0, ?DATA_CHUNK_SIZE} = ar_merkle:validate_path(Root4, 0, 6 * ?DATA_CHUNK_SIZE, Path4_1, offset_rebase_support_ruleset), Path4_2 = ar_merkle:generate_path(Root4, ?DATA_CHUNK_SIZE, Tree4), {Leaf2, ?DATA_CHUNK_SIZE, Right4_2} = ar_merkle:validate_path(Root4, ?DATA_CHUNK_SIZE, 6 * ?DATA_CHUNK_SIZE, Path4_2, offset_rebase_support_ruleset), ?assertEqual(2 * ?DATA_CHUNK_SIZE, Right4_2), Path4_3 = ar_merkle:generate_path(Root4, ?DATA_CHUNK_SIZE * 2, Tree4), {Leaf3, Left4_3, Right4_3} = ar_merkle:validate_path(Root4, 2 * ?DATA_CHUNK_SIZE, 6 * ?DATA_CHUNK_SIZE, Path4_3, offset_rebase_support_ruleset), ?assertEqual(2 * ?DATA_CHUNK_SIZE, Left4_3), ?assertEqual(3 * ?DATA_CHUNK_SIZE, Right4_3), Path4_4 = ar_merkle:generate_path(Root4, ?DATA_CHUNK_SIZE * 3, Tree4), {Leaf4, Left4_4, Right4_4} = ar_merkle:validate_path(Root4, 3 * ?DATA_CHUNK_SIZE, 6 * ?DATA_CHUNK_SIZE, Path4_4, offset_rebase_support_ruleset), ?assertEqual(3 * ?DATA_CHUNK_SIZE, Left4_4), ?assertEqual(4 * ?DATA_CHUNK_SIZE, Right4_4), Path4_5 = ar_merkle:generate_path(Root4, ?DATA_CHUNK_SIZE * 4, Tree4), {Leaf5, Left4_5, Right4_5} = ar_merkle:validate_path(Root4, 4 * ?DATA_CHUNK_SIZE, 6 * ?DATA_CHUNK_SIZE, Path4_5, offset_rebase_support_ruleset), ?assertEqual(4 * ?DATA_CHUNK_SIZE, Left4_5), ?assertEqual(5 * ?DATA_CHUNK_SIZE, Right4_5), Path4_6 = ar_merkle:generate_path(Root4, ?DATA_CHUNK_SIZE * 5, Tree4), {Leaf6, Left4_6, Right4_6} = ar_merkle:validate_path(Root4, 5 * ?DATA_CHUNK_SIZE, 6 * ?DATA_CHUNK_SIZE, Path4_6, offset_rebase_support_ruleset), ?assertEqual(5 * ?DATA_CHUNK_SIZE, Left4_6), ?assertEqual(6 * ?DATA_CHUNK_SIZE, Right4_6), %% ______________Root__________________ %% / \ %% ____SubTree1 Leaf5 %% / \ %% Leaf1 SubTree2 (with offset reset) %% / \ %% SubTree3 Leaf4 %% / \ %% Leaf2 Leaf3 Tags5 = [ {Leaf1, ?DATA_CHUNK_SIZE}, [ {Leaf2, ?DATA_CHUNK_SIZE}, {Leaf3, 2*?DATA_CHUNK_SIZE}, {Leaf4, 3*?DATA_CHUNK_SIZE} ], {Leaf5, 5*?DATA_CHUNK_SIZE} ], {Root5, Tree5} = ar_merkle:generate_tree(Tags5), assert_tree([ {branch, undefined, 4*?DATA_CHUNK_SIZE, false}, %% Root {branch, undefined, ?DATA_CHUNK_SIZE, false}, %% SubTree1 {leaf, Leaf5, 5*?DATA_CHUNK_SIZE, false}, {leaf, Leaf5, 5*?DATA_CHUNK_SIZE, false}, %% Duplicates are safe and expected {leaf, Leaf1, ?DATA_CHUNK_SIZE, false}, {branch, undefined, 2*?DATA_CHUNK_SIZE, true}, %% SubTree2 {branch, undefined, ?DATA_CHUNK_SIZE, false}, %% SubTree3 {leaf, Leaf4, 3*?DATA_CHUNK_SIZE, false}, {leaf, Leaf4, 3*?DATA_CHUNK_SIZE, false}, {leaf, Leaf3, 2*?DATA_CHUNK_SIZE, false}, {leaf, Leaf2, ?DATA_CHUNK_SIZE, false} ], Tree5), Path5_1 = ar_merkle:generate_path(Root5, 0, Tree5), {Leaf1, 0, ?DATA_CHUNK_SIZE} = ar_merkle:validate_path(Root5, 0, 5*?DATA_CHUNK_SIZE, Path5_1, offset_rebase_support_ruleset), Path5_2 = ar_merkle:generate_path(Root5, ?DATA_CHUNK_SIZE, Tree5), {Leaf2, ?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE} = ar_merkle:validate_path( Root5, ?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE, Path5_2, offset_rebase_support_ruleset), Path5_3 = ar_merkle:generate_path(Root5, 2*?DATA_CHUNK_SIZE, Tree5), {Leaf3, 2*?DATA_CHUNK_SIZE, 3*?DATA_CHUNK_SIZE} = ar_merkle:validate_path( Root5, 2*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE, Path5_3, offset_rebase_support_ruleset), Path5_4 = ar_merkle:generate_path(Root5, 3*?DATA_CHUNK_SIZE, Tree5), {Leaf4, 3*?DATA_CHUNK_SIZE, 4*?DATA_CHUNK_SIZE} = ar_merkle:validate_path( Root5, 3*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE, Path5_4, offset_rebase_support_ruleset), Path5_5 = ar_merkle:generate_path(Root5, 4*?DATA_CHUNK_SIZE, Tree5), {Leaf5, 4*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE} = ar_merkle:validate_path( Root5, 4*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE, Path5_5, offset_rebase_support_ruleset), %% ______________Root__________________ %% / \ %% ____SubTree1 Leaf5 %% / \ %% Leaf1 SubTree2 (with offset reset) %% / \ %% Leaf2 SubTree3 (with offset reset) %% / \ %% Leaf3 Leaf4 Tags6 = [ {Leaf1, ?DATA_CHUNK_SIZE}, [ {Leaf2, ?DATA_CHUNK_SIZE}, [ {Leaf3, ?DATA_CHUNK_SIZE}, {Leaf4, 2*?DATA_CHUNK_SIZE} ] ], {Leaf5, 5*?DATA_CHUNK_SIZE} ], {Root6, Tree6} = ar_merkle:generate_tree(Tags6), assert_tree([ {branch, undefined, 4*?DATA_CHUNK_SIZE, false}, %% Root {branch, undefined, ?DATA_CHUNK_SIZE, false}, %% SubTree1 {leaf, Leaf5, 5*?DATA_CHUNK_SIZE, false}, {leaf, Leaf5, 5*?DATA_CHUNK_SIZE, false}, {leaf, Leaf1, ?DATA_CHUNK_SIZE, false}, {branch, undefined, ?DATA_CHUNK_SIZE, true}, %% SubTree2 {leaf, Leaf2, ?DATA_CHUNK_SIZE, false}, {branch, undefined, ?DATA_CHUNK_SIZE, true}, %% SubTree3 {leaf, Leaf4, 2*?DATA_CHUNK_SIZE, false}, {leaf, Leaf3, ?DATA_CHUNK_SIZE, false} ], Tree6), Path6_1 = ar_merkle:generate_path(Root6, 0, Tree6), {Leaf1, 0, ?DATA_CHUNK_SIZE} = ar_merkle:validate_path(Root6, 0, 5*?DATA_CHUNK_SIZE, Path6_1, offset_rebase_support_ruleset), Path6_2 = ar_merkle:generate_path(Root6, ?DATA_CHUNK_SIZE, Tree6), {Leaf2, ?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE} = ar_merkle:validate_path( Root6, ?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE, Path6_2, offset_rebase_support_ruleset), Path6_3 = ar_merkle:generate_path(Root6, 2*?DATA_CHUNK_SIZE, Tree6), {Leaf3, 2*?DATA_CHUNK_SIZE, 3*?DATA_CHUNK_SIZE} = ar_merkle:validate_path( Root6, 2*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE, Path6_3, offset_rebase_support_ruleset), Path6_4 = ar_merkle:generate_path(Root6, 3*?DATA_CHUNK_SIZE, Tree6), {Leaf4, 3*?DATA_CHUNK_SIZE, 4*?DATA_CHUNK_SIZE} = ar_merkle:validate_path( Root6, 3*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE, Path6_4, offset_rebase_support_ruleset), Path6_5 = ar_merkle:generate_path(Root6, 4*?DATA_CHUNK_SIZE, Tree6), {Leaf5, 4*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE} = ar_merkle:validate_path( Root6, 4*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE, Path6_5, offset_rebase_support_ruleset). test_tree_with_rebase_bad_paths() -> %% ______________Root__________________ %% / \ %% ____SubTree1 Leaf5 %% / \ %% Leaf1 SubTree2 (with offset reset) %% / \ %% Leaf2 SubTree3 (with offset reset) %% / \ %% Leaf3 Leaf4 Leaf1 = crypto:strong_rand_bytes(?HASH_SIZE), Leaf2 = crypto:strong_rand_bytes(?HASH_SIZE), Leaf3 = crypto:strong_rand_bytes(?HASH_SIZE), Leaf4 = crypto:strong_rand_bytes(?HASH_SIZE), Leaf5 = crypto:strong_rand_bytes(?HASH_SIZE), Tags = [ {Leaf1, ?DATA_CHUNK_SIZE}, [ {Leaf2, ?DATA_CHUNK_SIZE}, [ {Leaf3, ?DATA_CHUNK_SIZE}, {Leaf4, 2*?DATA_CHUNK_SIZE} ] ], {Leaf5, 5*?DATA_CHUNK_SIZE} ], {Root, Tree} = ar_merkle:generate_tree(Tags), GoodPath = ar_merkle:generate_path(Root, 3*?DATA_CHUNK_SIZE, Tree), {Leaf4, 3*?DATA_CHUNK_SIZE, 4*?DATA_CHUNK_SIZE} = ar_merkle:validate_path( Root, 3*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE, GoodPath, offset_rebase_support_ruleset), BadPath1 = change_path(GoodPath, 0), %% Change L ?assertEqual(false, ar_merkle:validate_path( Root, 3*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE, BadPath1, offset_rebase_support_ruleset)), BadPath2 = change_path(GoodPath, 2*?HASH_SIZE + 1), %% Change note ?assertEqual(false, ar_merkle:validate_path( Root, 3*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE, BadPath2, offset_rebase_support_ruleset)), BadPath3 = change_path(GoodPath, 2*?HASH_SIZE + ?NOTE_SIZE + 1), %% Change offset rebase zeros ?assertEqual(false, ar_merkle:validate_path( Root, 3*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE, BadPath3, offset_rebase_support_ruleset)), BadPath4 = change_path(GoodPath, byte_size(GoodPath) - ?NOTE_SIZE - 1), %% Change leaf data hash ?assertEqual(false, ar_merkle:validate_path( Root, 3*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE, BadPath4, offset_rebase_support_ruleset)), BadPath5 = change_path(GoodPath, byte_size(GoodPath) - 1), %% Change leaf note ?assertEqual(false, ar_merkle:validate_path( Root, 3*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE, BadPath5, offset_rebase_support_ruleset)). test_tree_with_rebase_partial_chunk() -> Leaf1 = crypto:strong_rand_bytes(?HASH_SIZE), Leaf2 = crypto:strong_rand_bytes(?HASH_SIZE), Leaf3 = crypto:strong_rand_bytes(?HASH_SIZE), %% Root5 %% / \ %% Leaf1 Leaf2 (with offset reset, < 256 KiB) Tags5 = [ {Leaf1, ?DATA_CHUNK_SIZE}, [ {Leaf2, 100} ] ], {Root5, Tree5} = ar_merkle:generate_tree(Tags5), assert_tree([ {branch, undefined, ?DATA_CHUNK_SIZE, false}, %% Root {leaf, Leaf1, ?DATA_CHUNK_SIZE, false}, {leaf, Leaf2, 100, true} ], Tree5), Path5_1 = ar_merkle:generate_path(Root5, 0, Tree5), {Leaf1, 0, ?DATA_CHUNK_SIZE} = ar_merkle:validate_path(Root5, 0, ?DATA_CHUNK_SIZE + 100, Path5_1, offset_rebase_support_ruleset), Path5_2 = ar_merkle:generate_path(Root5, ?DATA_CHUNK_SIZE, Tree5), {Leaf2, ?DATA_CHUNK_SIZE, ?DATA_CHUNK_SIZE+100} = ar_merkle:validate_path(Root5, ?DATA_CHUNK_SIZE, ?DATA_CHUNK_SIZE+100, Path5_2, offset_rebase_support_ruleset), %% Root6__________________ %% / \ %% SubTree1 (with offset reset) Leaf3 %% / \ %% Leaf1 (< 256 KiB) Leaf2 (< 256 KiB, spans two buckets) Tags6 = [ [ {Leaf1, 131070}, {Leaf2, 393213} ], {Leaf3, 655355} ], {Root6, Tree6} = ar_merkle:generate_tree(Tags6), assert_tree([ {branch, undefined, 393213, false}, %% Root {leaf, Leaf3, 655355, false}, {branch, undefined, 131070, true}, %% SubTree1 {leaf, Leaf2, 393213, false}, {leaf, Leaf1, 131070, false} ], Tree6), Path6_1 = ar_merkle:generate_path(Root6, 0, Tree6), {Leaf1, 0, 131070} = ar_merkle:validate_path(Root6, 0, 1000000, % an arbitrary bound > 655355 Path6_1, offset_rebase_support_ruleset), Path6_2 = ar_merkle:generate_path(Root6, 131070, Tree6), {Leaf2, 131070, 393213} = ar_merkle:validate_path(Root6, 131070 + 5, 655355, Path6_2, offset_rebase_support_ruleset), Path6_3 = ar_merkle:generate_path(Root6, 393213 + 1, Tree6), {Leaf3, 393213, 655355} = ar_merkle:validate_path(Root6, 393213 + 2, 655355, Path6_3, offset_rebase_support_ruleset), %% Root6 (with offset reset) %% / \ %% ____SubTree1___ Leaf3 %% / \ %% Leaf1 (< 256 KiB) Leaf2 (< 256 KiB, spans two buckets) Tags8 = [ [ {Leaf1, 131070}, {Leaf2, 393213}, {Leaf3, 655355} ] ], {Root8, Tree8} = ar_merkle:generate_tree(Tags8), assert_tree([ {branch, undefined, 393213, true}, %% Root {branch, undefined, 131070, false}, %% SubTree1 {leaf, Leaf3, 655355, false}, {leaf, Leaf3, 655355, false}, {leaf, Leaf2, 393213, false}, {leaf, Leaf1, 131070, false} ], Tree8), %% Path to first chunk in data set (even if it's a small chunk) will validate Path8_1 = ar_merkle:generate_path(Root8, 0, Tree8), {Leaf1, 0, 131070} = ar_merkle:validate_path(Root8, 0, 1000000, % an arbitrary bound > 655355 Path8_1, offset_rebase_support_ruleset), Path8_2 = ar_merkle:generate_path(Root8, 131070, Tree8), ?assertEqual(false, ar_merkle:validate_path(Root8, 131070+5, 655355, Path8_2, offset_rebase_support_ruleset)), Path8_3 = ar_merkle:generate_path(Root8, 393213 + 1, Tree8), {Leaf3, 393213, 655355} = ar_merkle:validate_path(Root8, 393213 + 2, 655355, Path8_3, offset_rebase_support_ruleset), %% Root9 %% / \ %% SubTree1 Leaf3 (1 B) %% / \ %% Leaf1 (1 B) Leaf2 (1 B) Tags9 = [ [ {Leaf1, 1} ], [ {Leaf2, 1} ], [ {Leaf3, 1} ] ], {Root9, Tree9} = ar_merkle:generate_tree(Tags9), assert_tree([ {branch, undefined, 2, false}, %% Root {branch, undefined, 1, false}, %% SubTree1 {leaf, Leaf3, 1, true}, {leaf, Leaf1, 1, true}, {leaf, Leaf2, 1, true}, {leaf, Leaf3, 1, true} %% Duplicates are safe and expected ], Tree9), %% Path to first chunk in data set (even if it's a small chunk) will validate Path9_1 = ar_merkle:generate_path(Root9, 0, Tree9), {Leaf1, 0, 1} = ar_merkle:validate_path(Root9, 0, 1, Path9_1, offset_rebase_support_ruleset), Path9_2 = ar_merkle:generate_path(Root9, 1, Tree9), ?assertEqual(false, ar_merkle:validate_path(Root9, 1, 2, Path9_2, offset_rebase_support_ruleset)), Path9_3 = ar_merkle:generate_path(Root9, 2, Tree9), ?assertEqual(false, ar_merkle:validate_path(Root9, 2, 3, Path9_3, offset_rebase_support_ruleset)), %% Root9 %% / \ %% SubTree1 Leaf3 (256 KiB) %% / \ %% Leaf1 (256 KiB) Leaf2 (1 B) %% %% Every chunk in a subtree following a small-chunk subtree should fail to validated. When %% bundling, bundlers are required to bad small chunks out to a chunk boundary. Tags10 = [ [ {Leaf1, ?DATA_CHUNK_SIZE} ], [ {Leaf2, 1} ], [ {Leaf3, ?DATA_CHUNK_SIZE} ] ], {Root10, Tree10} = ar_merkle:generate_tree(Tags10), assert_tree([ {branch, undefined, ?DATA_CHUNK_SIZE+1, false}, %% Root {branch, undefined, ?DATA_CHUNK_SIZE, false}, %% SubTree1 {leaf, Leaf3, ?DATA_CHUNK_SIZE, true}, {leaf, Leaf1, ?DATA_CHUNK_SIZE, true}, {leaf, Leaf2, 1, true}, {leaf, Leaf3, ?DATA_CHUNK_SIZE, true} %% Duplicates are safe and expected ], Tree10), Path10_1 = ar_merkle:generate_path(Root10, 0, Tree10), {Leaf1, 0, ?DATA_CHUNK_SIZE} = ar_merkle:validate_path(Root10, 0, ?DATA_CHUNK_SIZE, Path10_1, offset_rebase_support_ruleset), Path10_2 = ar_merkle:generate_path(Root10, ?DATA_CHUNK_SIZE, Tree10), {Leaf2, ?DATA_CHUNK_SIZE, ?DATA_CHUNK_SIZE+1} = ar_merkle:validate_path(Root10, ?DATA_CHUNK_SIZE, ?DATA_CHUNK_SIZE+1, Path10_2, offset_rebase_support_ruleset), Path10_3 = ar_merkle:generate_path(Root10, ?DATA_CHUNK_SIZE+1, Tree10), ?assertEqual(false, ar_merkle:validate_path(Root10, ?DATA_CHUNK_SIZE+1, (2*?DATA_CHUNK_SIZE)+1, Path10_3, offset_rebase_support_ruleset)), ok. test_tree_with_rebase_subtree_ids() -> %% Assert that the all the tree IDs are preserved when the tree is added as a subtree within %% a larger tree Leaf1 = crypto:strong_rand_bytes(?HASH_SIZE), Leaf2 = crypto:strong_rand_bytes(?HASH_SIZE), Leaf3 = crypto:strong_rand_bytes(?HASH_SIZE), SubTreeTags = [ {Leaf1, ?DATA_CHUNK_SIZE}, {Leaf2, 2 * ?DATA_CHUNK_SIZE} ], {SubTreeRoot, SubTree} = ar_merkle:generate_tree(SubTreeTags), TreeTags = [ {Leaf3, ?DATA_CHUNK_SIZE}, [ {Leaf1, ?DATA_CHUNK_SIZE}, {Leaf2, 2 * ?DATA_CHUNK_SIZE} ] ], {_TreeRoot, Tree} = ar_merkle:generate_tree(TreeTags), TreeNodes = lists:nthtail(length(Tree) - length(SubTree), Tree), TreeSubTreeRoot = lists:nth(1, TreeNodes), TreeLeaf1 = lists:nth(2, TreeNodes), SubTreeLeaf1 = lists:nth(2, SubTree), TreeLeaf2 = lists:nth(3, TreeNodes), SubTreeLeaf2 = lists:nth(3, SubTree), ?assertEqual(SubTreeRoot, TreeSubTreeRoot#node.id), ?assertEqual(SubTreeLeaf1#node.id, TreeLeaf1#node.id), ?assertEqual(SubTreeLeaf2#node.id, TreeLeaf2#node.id). generate_and_validate_uneven_tree_path_test() -> Tags = make_tags_cumulative([{<>, 1} || N <- lists:seq(0, ?UNEVEN_TEST_SIZE - 1)]), {MR, Tree} = ar_merkle:generate_tree(Tags), %% Make sure the target is in the 'uneven' ending of the tree. Path = ar_merkle:generate_path(MR, ?UNEVEN_TEST_TARGET, Tree), {Leaf, StartOffset, EndOffset} = ar_merkle:validate_path(MR, ?UNEVEN_TEST_TARGET, ?UNEVEN_TEST_SIZE, Path), {Leaf, StartOffset, EndOffset} = ar_merkle:validate_path(MR, ?UNEVEN_TEST_TARGET, ?UNEVEN_TEST_SIZE, Path, strict_borders_ruleset), ?assertEqual(?UNEVEN_TEST_TARGET, binary:decode_unsigned(Leaf)), ?assert(?UNEVEN_TEST_TARGET < EndOffset), ?assert(?UNEVEN_TEST_TARGET >= StartOffset). reject_invalid_tree_path_test_() -> {timeout, 30, fun test_reject_invalid_tree_path/0}. test_reject_invalid_tree_path() -> Tags = make_tags_cumulative([{<>, 1} || N <- lists:seq(0, ?TEST_SIZE - 1)]), {MR, Tree} = ar_merkle:generate_tree(Tags), RandomTarget = rand:uniform(?TEST_SIZE) - 2, ?assertEqual( false, ar_merkle:validate_path( MR, RandomTarget, ?TEST_SIZE, ar_merkle:generate_path(MR, RandomTarget+1, Tree) ) ). assert_node({Id, Type, Data, Note, IsRebased}, Node) -> ?assertEqual(Id, Node#node.id), assert_node({Type, Data, Note, IsRebased}, Node); assert_node({Type, Data, Note, IsRebased}, Node) -> ?assertEqual(Type, Node#node.type), ?assertEqual(Data, Node#node.data), ?assertEqual(Note, Node#node.note), ?assertEqual(IsRebased, Node#node.is_rebased). assert_tree([], []) -> ok; assert_tree([], _RestOfTree) -> ?assert(false); assert_tree(_RestOfValues, []) -> ?assert(false); assert_tree([ExpectedValues | RestOfValues], [Node | RestOfTree]) -> assert_node(ExpectedValues, Node), assert_tree(RestOfValues, RestOfTree). change_path(Path, Index) -> NewByte = (binary:at(Path, Index) + 1) rem 256, List = binary_to_list(Path), UpdatedList = lists:sublist(List, Index) ++ [NewByte] ++ lists:nthtail(Index+1, List), list_to_binary(UpdatedList). ================================================ FILE: apps/arweave/src/ar_metrics.erl ================================================ -module(ar_metrics). -include("ar.hrl"). -export([register/0, get_status_class/1, record_rate_metric/4]). %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Declare Arweave metrics. register() -> %% App info prometheus_gauge:new([ {name, arweave_release}, {help, "Arweave release number"} ]), %% Release number never changes so just set it here. prometheus_gauge:set(arweave_release, ?RELEASE_NUMBER), %% Networking. prometheus_counter:new([ {name, http_server_accepted_bytes_total}, {help, "The total amount of bytes accepted by the HTTP server, per endpoint"}, {labels, [route]} ]), prometheus_counter:new([ {name, http_server_served_bytes_total}, {help, "The total amount of bytes served by the HTTP server, per endpoint"}, {labels, [route]} ]), prometheus_counter:new([ {name, http_client_downloaded_bytes_total}, {help, "The total amount of bytes requested via HTTP, per remote endpoint"}, {labels, [route]} ]), prometheus_counter:new([ {name, http_client_uploaded_bytes_total}, {help, "The total amount of bytes posted via HTTP, per remote endpoint"}, {labels, [route]} ]), prometheus_gauge:new([ {name, arweave_peer_count}, {help, "peer count"} ]), prometheus_counter:new([ {name, gun_requests_total}, {labels, [http_method, route, status_class]}, { help, "The total number of GUN requests." } ]), %% NOTE: the erlang prometheus client looks at the metric name to determine units. %% If it sees _duration_ it assumes the observed value is in %% native units and it converts it to .To query native units, use: %% erlant:monotonic_time() without any arguments. %% See: https://github.com/deadtrickster/prometheus.erl/blob/6dd56bf321e99688108bb976283a80e4d82b3d30/src/prometheus_time.erl#L2-L84 prometheus_histogram:new([ {name, ar_http_request_duration_seconds}, {buckets, [infinity]}, %% we don't care about the histogram portion {labels, [http_method, route, status_class]}, { help, "The total duration of an ar_http:req call. This includes more than just the GUN " "request itself (e.g. establishing a connection, throttling, etc...)" } ]), prometheus_histogram:new([ {name, http_client_get_chunk_duration_seconds}, {buckets, [infinity]}, %% we don't care about the histogram portion {labels, [status_class, peer]}, { help, "The total duration of an HTTP GET chunk request made to a peer." } ]), prometheus_gauge:new([ {name, downloader_queue_size}, {help, "The size of the back-off queue for the block and transaction headers " "the node failed to sync and will retry later."} ]), prometheus_gauge:new([{name, outbound_connections}, {help, "The current number of the open outbound network connections"}]), %% Transaction and block propagation. prometheus_gauge:new([ {name, tx_queue_size}, {help, "The size of the transaction propagation queue"} ]), prometheus_counter:new([ {name, propagated_transactions_total}, {labels, [status_class]}, { help, "The total number of propagated transactions. Increases " "with the number of peers the node propagates transactions to." } ]), prometheus_histogram:declare([ {name, tx_propagation_bits_per_second}, {buckets, [infinity]}, %% we don't care about the histogram portion {help, "The throughput (in bits/s) of transaction propagation."} ]), prometheus_gauge:new([ {name, mempool_header_size_bytes}, { help, "The size (in bytes) of the memory pool of transaction headers. " "The data fields of format=1 transactions are considered to be " "parts of transaction headers." } ]), prometheus_gauge:new([ {name, mempool_data_size_bytes}, { help, "The size (in bytes) of the memory pool of transaction data. " "The data fields of format=1 transactions are NOT considered " "to be transaction data." } ]), prometheus_counter:new([{name, block_announcement_missing_transactions}, {help, "The total number of tx prefixes reported to us via " "POST /block_announcement and not found in the mempool or block cache."}]), prometheus_counter:new([{name, block_announcement_reported_transactions}, {help, "The total number of tx prefixes reported to us via " "POST /block_announcement."}]), prometheus_counter:new([{name, block2_received_transactions}, {help, "The total number of transactions received via POST /block2."}]), prometheus_counter:new([{name, block_announcement_missing_chunks}, {help, "The total number of chunks reported to us via " "POST /block_announcement and not found locally."}]), prometheus_counter:new([{name, block_announcement_reported_chunks}, {help, "The total number of chunks reported to us via " "POST /block_announcement."}]), prometheus_counter:new([{name, block2_fetched_chunks}, {help, "The total number of chunks fetched locally during the successful" " processing of POST /block2."}]), prometheus_histogram:new([ {name, ar_mempool_add_tx_duration_milliseconds}, {buckets, [infinity]}, %% we don't care about the histogram portion {help, "The duration in milliseconds it took to add a transaction to the mempool."} ]), prometheus_histogram:new([ {name, reverify_mempool_chunk_duration_milliseconds}, {buckets, [infinity]}, %% we don't care about the histogram portion {help, "The duration in milliseconds it took to reverify a chunk of transactions " "in the mempool."} ]), prometheus_histogram:new([ {name, drop_txs_duration_milliseconds}, {buckets, [infinity]}, %% we don't care about the histogram portion {help, "The duration in milliseconds it took to drop a chunk of transactions " "from the mempool."} ]), prometheus_histogram:new([ {name, del_from_propagation_queue_duration_milliseconds}, {buckets, [infinity]}, %% we don't care about the histogram portion {help, "The duration in milliseconds it took to remove a transaction from the " "propagation queue after it was emitted to peers."} ]), %% Data seeding. prometheus_gauge:new([ {name, weave_size}, {help, "The size of the weave (in bytes)."} ]), prometheus_gauge:new([ {name, v2_index_data_size}, {help, "The size (in bytes) of the data stored and indexed. Note: if " "multiple storage modules cover the same range of data, that " "range will be counted multiple times."} ]), prometheus_gauge:new([ {name, v2_index_data_size_by_packing}, {labels, [store_id, packing, partition_number, storage_module_size, storage_module_index, packing_difficulty]}, {help, "The size (in bytes) of the data stored and indexed. Grouped by the " "store ID, packing, partition number, storage module size, " "storage module index, and packing difficulty."} ]), %% Disk pool. prometheus_gauge:new([ {name, pending_chunks_size}, { help, "The total size in bytes of stored pending and seeded chunks." } ]), prometheus_gauge:new([ {name, disk_pool_chunks_count}, { help, "The approximate number of chunks in the disk pool." "The disk pool includes pending, recent, and orphaned chunks." } ]), prometheus_counter:new([ {name, disk_pool_processed_chunks}, { help, "The counter is incremented every time the periodic process" " looks up a chunk from the disk pool and decides whether to" " remove it, include it in the weave, or keep in the disk pool." } ]), %% Consensus. prometheus_gauge:new([ {name, arweave_block_height}, {help, "The block height."} ]), prometheus_gauge:new([{name, block_time}, {help, "The time in seconds between two blocks as recorded by the miners."}]), prometheus_gauge:new([ {name, block_vdf_time}, {help, "The number of the VDF steps between two consequent blocks."} ]), prometheus_gauge:new([ {name, block_vdf_advance}, {help, "The number of the VDF steps a received block is ahead of our current step."} ]), prometheus_counter:new([ {name, wallet_list_size}, { help, "The total number of wallets in the system." } ]), prometheus_histogram:new([ {name, block_pre_validation_time}, {buckets, [infinity]}, %% we don't care about the histogram portion {help, "The time in milliseconds taken to parse the POST /block input and perform a " "preliminary validation before relaying the block to peers."} ]), prometheus_histogram:new([ {name, block_processing_time}, {buckets, [infinity]}, %% we don't care about the histogram portion {help, "The time in seconds taken to validate the block and apply it on top of " "the current state, possibly involving a chain reorganisation."} ]), prometheus_gauge:new([ {name, synced_blocks}, { help, "The total number of synced block headers." } ]), %% Mining. prometheus_gauge:new([ {name, mining_rate}, {labels, [type, partition]}, {help, "Tracks 3 different mining rate metrics, each with a different type label. " "The type label can be 'read', 'raw_read', 'hash', or 'ideal'. " "'read' tracks the number of chunks read per second - recorded in MiB per second. " "This is the effective mining read rate as it considers all limiting factors like " "nonce limiter, hashing speed, etc..." "'raw_read' tracks the average read rate of the partition ignoring any other " "limiting factors - recorded in MiB per second." "'hash' tracks the number of solutions candidates generated per second. " "'ideal' tracks the ideal read rate given the current VDF step time and amount of " "data synced. The partition label breaks the mining rate down by partition. " "The overall mining rate is inidcated by 'total'."} ]), prometheus_gauge:new([ {name, cm_h1_rate}, {labels, [peer, direction]}, {help, "The number of H1 hashes exchanged with a coordinated mining peer per second. " "The peer label indicates the peer that the value is exchanged with, and the " "direction label can be 'to' or 'from'."} ]), prometheus_gauge:new([ {name, cm_h2_count}, {labels, [peer, direction]}, {help, "The total number of H2 hashes exchanged with a coordinated mining peer. " "The peer label indicates the peer that the value is exchanged with, and the " "direction label can be 'to' or 'from'."} ]), prometheus_gauge:new([ {name, mining_server_chunk_cache_size}, {labels, [partition, type]}, {help, "The amount of data (measured in bytes) " "fetched during mining and not processed yet. " "The type label can be 'total', 'reserved'."} ]), prometheus_gauge:new([ {name, mining_server_task_queue_len}, {labels, [task]}, {help, "The number of items in the mining server task queue."} ]), prometheus_gauge:new([ {name, mining_solution}, {labels, [reason]}, {help, "Incremented whenever the miner generates a solution. The 'reason' label " "will be 'success' if a block was successfully prepared from the solution, " "and will list a failure reason otherwise. Note: even if a block is " "successfully prepared from a solution, it does not necessarily mean " "the block ended up in the blockchain."} ]), prometheus_histogram:new([ {name, chunk_storage_sync_record_check_duration_milliseconds}, {labels, [requested_chunk_count]}, {buckets, [infinity]}, %% we don't care about the histogram portion {help, "The time in milliseconds it took to check the fetched chunk range " "is actually registered by the chunk storage."} ]), prometheus_gauge:new([ {name, mining_server_tasks}, {labels, [task]}, {help, "Incremented each time the mining server adds a task to the task queue."} ]), prometheus_gauge:new([ {name, mining_vdf_step}, {help, "Incremented each time the mining server processes a VDF step."} ]), %% VDF. prometheus_histogram:new([ {name, vdf_step_time_milliseconds}, {buckets, [infinity]}, %% we don't care about the histogram portion {labels, []}, {help, "The time in milliseconds it took to compute a VDF step."} ]), prometheus_gauge:new([ {name, vdf_step}, {help, "The current VDF step."} ]), prometheus_gauge:new([ {name, vdf_difficulty}, {labels, [type]}, {help, "The cached VDF difficulty. 'type' can be either 'current' or 'next'."} ]), %% Economic metrics. prometheus_gauge:new([ {name, average_network_hash_rate}, {help, "The average network hash rate measured over the last ~30 days of blocks"} ]), prometheus_gauge:new([ {name, average_block_reward}, {help, "The average block reward in Winston computed from the last ~30 days of blocks"} ]), prometheus_gauge:new([ {name, expected_block_reward}, {help, "The block reward required to sustain 20 replicas of the present weave" " as currently estimated by the protocol."} ]), prometheus_gauge:new([ {name, network_data_size}, {help, "Total size of the network data in bytes."} ]), prometheus_gauge:new([ {name, v2_price_per_gibibyte_minute}, {help, "The price of storing 1 GiB for one minute as it will be calculated once the" " transition to the new pricing protocol is complete."} ]), prometheus_gauge:new([ {name, price_per_gibibyte_minute}, {help, "The price of storing 1 GiB for one minute as currently estimated by " "the protocol."} ]), prometheus_gauge:new([ {name, legacy_price_per_gibibyte_minute}, {help, "The price of storing 1 GiB for one minute as estimated by the previous (" "USD to AR benchmark-based) version of the protocol."} ]), prometheus_gauge:new([ {name, endowment_pool}, {help, "The amount of Winston in the endowment pool."} ]), prometheus_gauge:new([ {name, kryder_plus_rate_multiplier}, {help, "Kryder+ rate multiplier."} ]), prometheus_gauge:new([ {name, endowment_pool_take}, {help, "Value we take from endowment pool to miner to compensate difference between expected and real reward."} ]), prometheus_gauge:new([ {name, endowment_pool_give}, {help, "Value we give to endowment pool from transaction fees."} ]), prometheus_gauge:new([ {name, available_supply}, {help, "The total supply minus the endowment, in Winston."} ]), prometheus_gauge:new([ {name, debt_supply}, {help, "The amount of Winston emitted when the endowment pool was not sufficiently" " large to compensate mining."} ]), prometheus_gauge:new([ {name, poa_count}, {labels, [chunks]}, {help, "A count of the number of 1-chunk and 2-chunk blocks in the last 21,600 blocks. " "The 'chunks' label is 1 for the count of 1-chunk blocks, and 2 for the count of " "2-chunk blocks."} ]), prometheus_gauge:new([ {name, log_diff}, {labels, [chunk]}, {help, "The current linear difficulty converted to log scale. The chunk label " "is either 'poa1' or 'poa2'."} ]), prometheus_gauge:new([ {name, network_hashrate}, {help, "An estimation of the network hash rate based on the mining difficulty " "of the latest block."} ]), prometheus_gauge:new([ {name, expected_minimum_200_years_storage_costs_decline_rate}, {help, "The expected minimum decline rate sufficient to subsidize storage of " "the current weave for 200 years according to the legacy (2.5) estimations."} ]), prometheus_gauge:new([ {name, expected_minimum_200_years_storage_costs_decline_rate_10_usd_ar}, {help, "The expected minimum decline rate sufficient to subsidize storage of " "the current weave for 200 years according to the legacy (2.6) estimations" "and assuming 10 $/AR."} ]), %% Packing. prometheus_histogram:new([ {name, packing_duration_milliseconds}, {labels, [type, packing, trigger]}, {buckets, [infinity]}, %% we don't care about the histogram portion {help, "The packing/unpacking time in milliseconds. The type label indicates what " "type of operation was requested either: 'pack', 'unpack'," "'unpack_sub_chunk', or 'pack_sub_chunk'. The packing " "label differs based on the type. If type is 'unpack' then the packing label " "indicates the format of the chunk before being unpacked. If type is 'pack' " "then the packing label indicates the format that the chunk will be packed " "to. In all cases its value can be 'spora_2_5', 'spora_2_6', 'composite', " "or 'replica_2_9'. The trigger label shows where the request was triggered: " "'external' (e.g. an HTTP request) or 'internal' (e.g. during syncing or " "repacking)."} ]), prometheus_counter:new([ {name, packing_requests}, {labels, [type, packing]}, {help, "The number of packing requests received. The type label indicates what " "type of operation was requested either: 'pack', 'unpack', or " "'unpack_sub_chunk'. The packing " "label differs based on the type. If type is 'unpack' then the packing label " "indicates the format of the chunk before being unpacked. If type is 'pack' " "then the packing label indicates the format that the chunk will be packed " "to. In all cases its value can be 'unpacked', 'unpacked_padded', " "'spora_2_5', 'spora_2_6', 'composite', or 'replica_2_9'."} ]), prometheus_counter:new([ {name, validating_packed_spora}, {labels, [packing]}, {help, "The number of SPoRA solutions based on packed chunks entered validation. " "The packing label can be 'spora_2_5', 'spora_2_6', 'composite', " " or replica_2_9."} ]), prometheus_gauge:new([{name, packing_buffer_size}, {help, "The number of chunks in the packing server queue."}]), prometheus_gauge:new([{name, chunk_cache_size}, {help, "The number of chunks scheduled for downloading."}]), prometheus_counter:new([{name, chunks_stored}, {labels, [packing, store_id]}, {help, "The counter is incremented every time a chunk is written to " "chunk_storage."}]), prometheus_counter:new([{name, chunks_read}, {labels, [store_id]}, {help, "The counter is incremented every time a chunk is read from " "chunk_storage."}]), prometheus_histogram:new([ {name, chunk_read_rate_bytes_per_second}, {labels, [store_id, type]}, {buckets, [infinity]}, %% we don't care about the histogram portion {help, "The rate, in bytes per second, at which chunks are read from storage. " "The type label can be 'raw' or 'repack'."} ]), prometheus_histogram:new([ {name, chunk_write_rate_bytes_per_second}, {labels, [store_id, type]}, {buckets, [infinity]}, %% we don't care about the histogram portion {help, "The rate, in bytes per second, at which chunks are written to storage."} ]), prometheus_gauge:new([{name, data_discovery}, {labels, [type, store_id, stat]}, {help, "Tracks peer availability statistics from data discovery across buckets. " "'type' is 'normal' or 'footprint'. " "'stat' is 'num_peers', 'total_buckets', 'zero_peer_count', or 'healthy_peer_count'."}]), prometheus_counter:new([{name, sync_tasks}, {labels, [state, peer]}, {help, "The number of syncing tasks. 'state' can be 'waiting_in', 'waiting_out', " "'queued_in', 'queued_out', 'dispatched', 'completed', " "'activate_footprint', or 'deactivate_footprint'. " " 'peer' is the peer the task is intended for."}]), prometheus_counter:new([{name, sync_chunks_skipped}, {labels, [reason]}, {help, "The number of chunks skipped during syncing."}]), prometheus_gauge:new([{name, device_lock_status}, {labels, [store_id, mode]}, {help, "The device lock status of the storage module. " "-1: off, 0: paused, 1: active, 2: complete -2: unknown"}]), prometheus_gauge:new([{name, sync_intervals_queue_size}, {labels, [store_id]}, {help, "The size of the syncing intervals queue."}]), prometheus_gauge:new([{name, repack_chunk_states}, {labels, [store_id, type, state]}, {help, "The count of chunks in each state. 'type' can be 'cache' or 'queue'."}]), %% --------------------------------------------------------------------------------------- %% Replica 2.9 metrics %% --------------------------------------------------------------------------------------- prometheus_counter:new([{name, replica_2_9_entropy_stored}, {labels, [store_id]}, {help, "The number of bytes of replica.2.9 entropy written to chunk storage."}]), prometheus_counter:new([{name, replica_2_9_entropy_generated}, {help, "The number of bytes of replica.2.9 entropy generated."}]), prometheus_gauge:new([{name, replica_2_9_entropy_cache}, {help, "The size (in bytes) of the replica.2.9 entropy cache."}]), prometheus_counter:new([{name, replica_2_9_entropy_stats}, {labels, [partition, stat]}, {help, "Count of different replica_2_9 entropy events: 'cache_hit', 'cache_miss', " "'redundant'."}]), prometheus_histogram:new([ {name, replica_2_9_entropy_duration_milliseconds}, {buckets, [infinity]}, %% we don't care about the histogram portion {help, "The time, in milliseconds, to generate 256 MiB of replica.2.9 entropy."} ]), %% --------------------------------------------------------------------------------------- %% Pool related metrics %% --------------------------------------------------------------------------------------- prometheus_counter:new([ {name, pool_job_request_count}, {help, "The number of requests to pool /job from start of arweave node"} ]), prometheus_counter:new([ {name, pool_total_job_got_count}, {help, "The number of jobs received from /job requests."} ]), %% --------------------------------------------------------------------------------------- %% Debug-only metrics %% --------------------------------------------------------------------------------------- prometheus_counter:new([{name, process_functions}, {labels, [process]}, {help, "Sampling active functions. The 'process' label is a fully qualified " "function name with the format 'process~module:function/arith'. " "Only set when debug=true."}]), %% process_info gets unregistered and re-registered in ar_process_sampler.erl prometheus_gauge:new([{name, process_info}, {labels, [process, type]}, {help, "Sampling info about active processes. Only set when debug=true."}]), prometheus_gauge:new([{name, scheduler_utilization}, {labels, [type]}, {help, "Average scheduler utilization. `type` maps to the sched_type as defined here: " "https://www.erlang.org/doc/man/scheduler#type-sched_util_result. " "Only set when debug=true."}]), prometheus_gauge:new([{name, allocator}, {labels, [type, instance, section, metric]}, {help, "Erlang VM memory allocator metrics. Only set when debug=true."}]). record_rate_metric(StartTime, Bytes, Metric, Labels) -> EndTime = erlang:monotonic_time(), ElapsedTime = erlang:convert_time_unit(EndTime - StartTime, native, microsecond), %% bytes per second Rate = case ElapsedTime > 0 of true -> 1_000_000 * Bytes / ElapsedTime; false -> 0 end, prometheus_histogram:observe(Metric, Labels, Rate). %% @doc Return the HTTP status class label for cowboy_requests_total and gun_requests_total %% metrics. get_status_class({ok, {{Status, _}, _, _, _, _}}) -> get_status_class(Status); get_status_class({error, connection_closed}) -> "connection_closed"; get_status_class({error, connect_timeout}) -> "connect_timeout"; get_status_class({error, timeout}) -> "timeout"; get_status_class({error,{shutdown,timeout}}) -> "shutdown_timeout"; get_status_class({error, econnrefused}) -> "econnrefused"; get_status_class({error, {shutdown,econnrefused}}) -> "shutdown_econnrefused"; get_status_class({error, {shutdown,ehostunreach}}) -> "shutdown_ehostunreach"; get_status_class({error, {shutdown,normal}}) -> "shutdown_normal"; get_status_class({error, {closed,_}}) -> "closed"; get_status_class({error, noproc}) -> "noproc"; get_status_class({error, {down,_}}) -> "down"; get_status_class({error, {stream_error,_}}) -> "stream_error"; get_status_class(Data) when is_integer(Data), Data > 0 -> integer_to_list(Data); get_status_class(Data) when is_binary(Data) -> case catch binary_to_integer(Data) of {_, _} -> ?LOG_DEBUG([{event, unknown_status}, {status, Data}]), "unknown"; Status -> get_status_class(Status) end; get_status_class(Data) when is_atom(Data) -> atom_to_list(Data); get_status_class(Data) -> ?LOG_DEBUG([{event, unknown_status}, {status, Data}]), "unknown". ================================================ FILE: apps/arweave/src/ar_metrics_collector.erl ================================================ -module(ar_metrics_collector). -behaviour(prometheus_collector). -export([ deregister_cleanup/1, collect_mf/2 ]). -import(prometheus_model_helpers, [create_mf/4]). -include_lib("prometheus/include/prometheus.hrl"). -define(METRIC_NAME_PREFIX, "arweave_"). %% =================================================================== %% API %% =================================================================== %% called to collect Metric Families -spec collect_mf(_Registry, Callback) -> ok when _Registry :: prometheus_registry:registry(), Callback :: prometheus_collector:callback(). collect_mf(_Registry, Callback) -> Metrics = metrics(), [add_metric_family(Metric, Callback) || Metric <- Metrics], ok. %% called when collector deregistered deregister_cleanup(_Registry) -> ok. %% =================================================================== %% Private functions %% =================================================================== add_metric_family({Name, Type, Help, Metrics}, Callback) -> Callback(create_mf(?METRIC_NAME(Name), Help, Type, Metrics)). metrics() -> RanchInfo = ranch:info(), [ {storage_blocks_stored, gauge, "Blocks stored", case ets:lookup(ar_header_sync, synced_blocks) of [] -> 0; [{_, N}] -> N end}, {arnode_queue_len, gauge, "Size of message queuee on ar_node_worker", element(2, erlang:process_info(whereis(ar_node_worker), message_queue_len))}, {arbridge_queue_len, gauge, "Size of message queuee on ar_bridge", element(2, erlang:process_info(whereis(ar_bridge), message_queue_len))}, {ignored_ids_len, gauge, "Size of table of Ignored/already seen IDs:", ets:info(ignored_ids, size)}, {ar_data_discovery_bytes_total, gauge, "ar_data_discovery process memory", get_process_memory(ar_data_discovery)}, {ar_node_worker_bytes_total, gauge, "ar_node_worker process memory", get_process_memory(ar_node_worker)}, {ar_header_sync_bytes_total, gauge, "ar_header_sync process memory", get_process_memory(ar_header_sync)}, {ar_wallets_bytes_total, gauge, "ar_wallets process memory", get_process_memory(ar_wallets)}, {ar_http_iface_listener_ranch_max_connections, gauge, "Maximum number of Ranch connections", get_ranch_max_connections(RanchInfo, ar_http_iface_listener)}, {ar_http_iface_listener_ranch_active_connections, gauge, "Currently active Ranch connections", get_ranch_active_connections(RanchInfo, ar_http_iface_listener)} ]. get_process_memory(Name) -> case whereis(Name) of undefined -> 0; PID -> {memory, Memory} = erlang:process_info(PID, memory), Memory end. get_ranch_max_connections(RInfo, Name) -> get_ranch_info_value(RInfo, Name, max_connections). get_ranch_active_connections(RInfo, Name) -> get_ranch_info_value(RInfo, Name, active_connections). get_ranch_info_value(RInfo, Name, Key) -> PoolDetails = proplists:get_value(Name, RInfo, []), %% Signal error condition with -1 proplists:get_value(Key, PoolDetails, -1). ================================================ FILE: apps/arweave/src/ar_mine_randomx.erl ================================================ -module(ar_mine_randomx). -export([init_fast/3, init_light/2, info/1, hash/2, hash/5, randomx_encrypt_chunk/4, randomx_decrypt_chunk/5, randomx_decrypt_sub_chunk/5, randomx_reencrypt_chunk/7, randomx_generate_replica_2_9_entropy/2, randomx_encrypt_replica_2_9_sub_chunk/1, randomx_decrypt_replica_2_9_sub_chunk/1, exor_sub_chunk/2]). %% These exports are required for the STUB mode, where these functions are unused. %% Also, some of these functions are used in ar_mine_randomx_tests. -export([jit/0, large_pages/0, hardware_aes/0, init_fast2/5, init_light2/4]). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave/include/ar_consensus.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). %%%=================================================================== %%% Public interface. %%%=================================================================== -ifdef(STUB_RANDOMX). init_fast(RxMode, Key, _Threads) -> {RxMode, {stub_state, Key}}. init_light(RxMode, Key) -> {RxMode, {stub_state, Key}}. -else. init_fast(RxMode, Key, Threads) -> init_fast2(RxMode, Key, jit(), large_pages(), Threads). init_light(RxMode, Key) -> init_light2(RxMode, jit(), large_pages(), Key). -endif. info(State) -> info2(State). hash(State, Data) -> hash(State, Data, jit(), large_pages(), hardware_aes()). hash(State, Data, JIT, LargePages, HardwareAES) -> hash2(State, Data, JIT, LargePages, HardwareAES). randomx_encrypt_chunk(Packing, RandomxState, Key, Chunk) -> case randomx_encrypt_chunk2(Packing, RandomxState, Key, Chunk) of {error, invalid_randomx_mode} -> {error, invalid_randomx_mode}; {error, Error} -> %% All other errors are from the NIF, so we treat as an exception {exception, Error}; Reply -> Reply end. randomx_decrypt_chunk(Packing, RandomxState, Key, Chunk, ChunkSize) -> PackedSize = byte_size(Chunk), %% For the spora_2_6 and composite packing schemes we want to confirm %% the padding in the unpacked chunk is all zeros. %% To do that we pass in the maximum chunk size (?DATA_CHUNK_SIZE) to prevent the NIF %% from removing the padding. We can then validate the padding and remove it in %% ar_packing_server:unpad_chunk/4. Size = case Packing of {spora_2_6, _Addr} -> ?DATA_CHUNK_SIZE; {composite, _Addr, _PackingDifficulty} -> ?DATA_CHUNK_SIZE; _ -> ChunkSize end, case randomx_decrypt_chunk2(RandomxState, Key, Chunk, Size, Packing) of {error, invalid_randomx_mode} -> {error, invalid_randomx_mode}; {error, Error} -> %% All other errors are from the NIF, so we treat as an exception {exception, Error}; {ok, Unpacked} -> %% Validating the padding (for spora_2_6 and composite) and then remove it. case ar_packing_server:unpad_chunk(Packing, Unpacked, ChunkSize, PackedSize) of error -> ?LOG_WARNING([{event, unpad_chunk_error}, {packed_size, PackedSize}, {chunk_size, ChunkSize}]), {error, invalid_padding}; UnpackedChunk -> {ok, UnpackedChunk} end end. randomx_decrypt_sub_chunk(Packing, RandomxState, Key, Chunk, SubChunkStartOffset) -> case randomx_decrypt_sub_chunk2(Packing, RandomxState, Key, Chunk, SubChunkStartOffset) of {error, invalid_randomx_mode} -> {error, invalid_randomx_mode}; {error, Error} -> %% All other errors are from the NIF, so we treat as an exception {exception, Error}; Reply -> Reply end. randomx_reencrypt_chunk(SourcePacking, TargetPacking, RandomxState, UnpackKey, PackKey, Chunk, ChunkSize) -> randomx_reencrypt_chunk2(SourcePacking, TargetPacking, RandomxState, UnpackKey, PackKey, Chunk, ChunkSize). %%% AR_TEST implementation randomx_generate_replica_2_9_entropy({_, {stub_state, _}}, Key) -> %% Make it fast, deterministic, and scoped by Key. %% Note that ?REPLICA_2_9_ENTROPY_SIZE is %% reduced significantly in the AR_TEST mode. SubChunkCount = ar_block:get_sub_chunks_per_replica_2_9_entropy(), lists:foldl( fun(N1, Acc) -> lists:foldl( fun(N2, Acc2) -> << (crypto:hash(sha256, << N1:16, N2:16, Key/binary >>))/binary, Acc2/binary >> end, Acc, lists:seq(1, ?COMPOSITE_PACKING_SUB_CHUNK_SIZE div 32) ) end, <<>>, lists:seq(1, SubChunkCount) ); %% Non-AR_TEST implementation randomx_generate_replica_2_9_entropy({rxsquared, RandomxState}, Key) -> {ok, EntropyFused} = ar_rxsquared_nif:rsp_fused_entropy_nif( RandomxState, ?COMPOSITE_PACKING_SUB_CHUNK_COUNT, ?COMPOSITE_PACKING_SUB_CHUNK_SIZE, ?REPLICA_2_9_RANDOMX_LANE_COUNT, ?REPLICA_2_9_RANDOMX_DEPTH, jit(), large_pages(), hardware_aes(), ?REPLICA_2_9_RANDOMX_PROGRAM_COUNT, Key ), EntropyFused. randomx_decrypt_replica_2_9_sub_chunk( {_PackingState, Entropy, SubChunk, EntropySubChunkIndex}) -> SubChunkSize = ?COMPOSITE_PACKING_SUB_CHUNK_SIZE, EntropyPart = binary:part(Entropy, EntropySubChunkIndex * SubChunkSize, SubChunkSize), {ok, exor_sub_chunk(SubChunk, EntropyPart)}. randomx_encrypt_replica_2_9_sub_chunk( {_PackingState, Entropy, SubChunk, EntropySubChunkIndex}) -> SubChunkSize = ?COMPOSITE_PACKING_SUB_CHUNK_SIZE, EntropyPart = binary:part(Entropy, EntropySubChunkIndex * SubChunkSize, SubChunkSize), {ok, exor_sub_chunk(SubChunk, EntropyPart)}. %% @doc Encipher/decipher the given sub-chunk using the given 2.9 entropy. -spec exor_sub_chunk( SubChunk :: binary(), EntropyPart :: binary() ) -> binary(). exor_sub_chunk(SubChunk, EntropyPart) -> crypto:exor(SubChunk, EntropyPart). %%%=================================================================== %%% Private functions. %%%=================================================================== %% ------------------------------------------------------------------------------------------- %% Helper functions %% ------------------------------------------------------------------------------------------- packing_rounds(spora_2_5) -> ?RANDOMX_PACKING_ROUNDS; packing_rounds({spora_2_6, _Addr}) -> ?RANDOMX_PACKING_ROUNDS_2_6. jit() -> {ok, Config} = arweave_config:get_env(), case lists:member(randomx_jit, Config#config.disable) of true -> 0; _ -> 1 end. large_pages() -> {ok, Config} = arweave_config:get_env(), case lists:member(randomx_large_pages, Config#config.enable) of true -> 1; _ -> 0 end. hardware_aes() -> {ok, Config} = arweave_config:get_env(), case lists:member(randomx_hardware_aes, Config#config.disable) of true -> 0; _ -> 1 end. split_into_sub_chunks(Chunk) -> split_into_sub_chunks(Chunk, 0). split_into_sub_chunks(<<>>, _StartOffset) -> []; split_into_sub_chunks(<< SubChunk:8192/binary, Rest/binary >>, StartOffset) -> [{StartOffset, SubChunk} | split_into_sub_chunks(Rest, StartOffset + 8192)]. init_fast2(rx512, Key, JIT, LargePages, Threads) -> {ok, FastState} = ar_rx512_nif:rx512_init_nif(Key, ?RANDOMX_HASHING_MODE_FAST, JIT, LargePages, Threads), {rx512, FastState}; init_fast2(rx4096, Key, JIT, LargePages, Threads) -> {ok, FastState} = ar_rx4096_nif:rx4096_init_nif(Key, ?RANDOMX_HASHING_MODE_FAST, JIT, LargePages, Threads), {rx4096, FastState}; init_fast2(rxsquared, Key, JIT, LargePages, Threads) -> {ok, FastState} = ar_rxsquared_nif:rxsquared_init_nif(Key, ?RANDOMX_HASHING_MODE_FAST, JIT, LargePages, Threads), {rxsquared, FastState}; init_fast2(RxMode, _Key, _JIT, _LargePages, _Threads) -> ?LOG_ERROR([{event, invalid_randomx_mode}, {mode, RxMode}]), {error, invalid_randomx_mode}. init_light2(rx512, Key, JIT, LargePages) -> {ok, LightState} = ar_rx512_nif:rx512_init_nif(Key, ?RANDOMX_HASHING_MODE_LIGHT, JIT, LargePages, 0), {rx512, LightState}; init_light2(rx4096, Key, JIT, LargePages) -> {ok, LightState} = ar_rx4096_nif:rx4096_init_nif(Key, ?RANDOMX_HASHING_MODE_LIGHT, JIT, LargePages, 0), {rx4096, LightState}; init_light2(rxsquared, Key, JIT, LargePages) -> {ok, LightState} = ar_rxsquared_nif:rxsquared_init_nif(Key, ?RANDOMX_HASHING_MODE_LIGHT, JIT, LargePages, 0), {rxsquared, LightState}; init_light2(RxMode, _Key, _JIT, _LargePages) -> ?LOG_ERROR([{event, invalid_randomx_mode}, {mode, RxMode}]), {exceperrortion, invalid_randomx_mode}. info2({rx512, State}) -> ar_rx512_nif:rx512_info_nif(State); info2({rx4096, State}) -> ar_rx4096_nif:rx4096_info_nif(State); info2({rxsquared, State}) -> ar_rxsquared_nif:rxsquared_info_nif(State); info2(_) -> {error, invalid_randomx_mode}. %% ------------------------------------------------------------------------------------------- %% hash2 and randomx_[encrypt|decrypt|reencrypt]_chunk2 %% STUB implementation, used in tests, is called when State is {stub_state, Key} %% Otherwise, NIF implementation is used %% We set it up this way so that we can have some tests trigger the NIF implementation %% ------------------------------------------------------------------------------------------- %% STUB implementation hash2({_, {stub_state, Key}}, Data, _JIT, _LargePages, _HardwareAES) -> crypto:hash(sha256, << Key/binary, Data/binary >>); %% Non-STUB implementation hash2({rx512, State}, Data, JIT, LargePages, HardwareAES) -> {ok, Hash} = ar_rx512_nif:rx512_hash_nif(State, Data, JIT, LargePages, HardwareAES), Hash; hash2({rx4096, State}, Data, JIT, LargePages, HardwareAES) -> {ok, Hash} = ar_rx4096_nif:rx4096_hash_nif(State, Data, JIT, LargePages, HardwareAES), Hash; hash2({rxsquared, State}, Data, JIT, LargePages, HardwareAES) -> {ok, Hash} = ar_rxsquared_nif:rxsquared_hash_nif(State, Data, JIT, LargePages, HardwareAES), Hash; hash2(_BadState, _Data, _JIT, _LargePages, _HardwareAES) -> {error, invalid_randomx_mode}. %% STUB implementation randomx_decrypt_chunk2({_, {stub_state, _}}, Key, Chunk, _ChunkSize, {composite, _, PackingDifficulty} = _Packing) -> Options = [{encrypt, false}], IV = binary:part(Key, {0, 16}), SubChunks = split_into_sub_chunks(Chunk), {ok, iolist_to_binary(lists:map( fun({SubChunkStartOffset, SubChunk}) -> Key2 = crypto:hash(sha256, << Key/binary, SubChunkStartOffset:24 >>), lists:foldl( fun(_, Acc) -> crypto:crypto_one_time(aes_256_cbc, Key2, IV, Acc, Options) end, SubChunk, lists:seq(1, PackingDifficulty) ) end, SubChunks))}; randomx_decrypt_chunk2({_, {stub_state, _}}, Key, Chunk, _ChunkSize, _Packing) -> Options = [{encrypt, false}], IV = binary:part(Key, {0, 16}), {ok, crypto:crypto_one_time(aes_256_cbc, Key, IV, Chunk, Options)}; %% Non-STUB implementation randomx_decrypt_chunk2({rx512, RandomxState}, Key, Chunk, ChunkSize, spora_2_5) -> ar_rx512_nif:rx512_decrypt_chunk_nif(RandomxState, Key, Chunk, ChunkSize, ?RANDOMX_PACKING_ROUNDS, jit(), large_pages(), hardware_aes()); randomx_decrypt_chunk2({rx512, RandomxState}, Key, Chunk, ChunkSize, {spora_2_6, _Addr}) -> ar_rx512_nif:rx512_decrypt_chunk_nif(RandomxState, Key, Chunk, ChunkSize, ?RANDOMX_PACKING_ROUNDS_2_6, jit(), large_pages(), hardware_aes()); randomx_decrypt_chunk2({rx4096, RandomxState}, Key, Chunk, ChunkSize, {composite, _Addr, PackingDifficulty}) -> ar_rx4096_nif:rx4096_decrypt_composite_chunk_nif(RandomxState, Key, Chunk, ChunkSize, jit(), large_pages(), hardware_aes(), ?COMPOSITE_PACKING_ROUND_COUNT, PackingDifficulty, ?COMPOSITE_PACKING_SUB_CHUNK_COUNT); randomx_decrypt_chunk2(_BadState, _Key, _Chunk, _ChunkSize, _Packing) -> {error, invalid_randomx_mode}. %% STUB implementation randomx_decrypt_sub_chunk2(Packing, {_, {stub_state, _}}, Key, Chunk, SubChunkStartOffset) -> {_, _, Iterations} = Packing, Options = [{encrypt, false}], Key2 = crypto:hash(sha256, << Key/binary, SubChunkStartOffset:24 >>), IV = binary:part(Key, {0, 16}), {ok, lists:foldl(fun(_, Acc) -> crypto:crypto_one_time(aes_256_cbc, Key2, IV, Acc, Options) end, Chunk, lists:seq(1, Iterations))}; %% Non-STUB implementation randomx_decrypt_sub_chunk2(Packing, {rx4096, RandomxState}, Key, Chunk, SubChunkStartOffset) -> {_, _, IterationCount} = Packing, RoundCount = ?COMPOSITE_PACKING_ROUND_COUNT, OutSize = ?COMPOSITE_PACKING_SUB_CHUNK_SIZE, ar_rx4096_nif:rx4096_decrypt_composite_sub_chunk_nif(RandomxState, Key, Chunk, OutSize, jit(), large_pages(), hardware_aes(), RoundCount, IterationCount, SubChunkStartOffset); randomx_decrypt_sub_chunk2(_Packing, _BadState, _Key, _Chunk, _SubChunkStartOffset) -> {error, invalid_randomx_mode}. %% STUB implementation randomx_encrypt_chunk2({composite, _, PackingDifficulty} = _Packing, {_, {stub_state, _}}, Key, Chunk) -> Options = [{encrypt, true}, {padding, zero}], IV = binary:part(Key, {0, 16}), SubChunks = split_into_sub_chunks(ar_packing_server:pad_chunk(Chunk)), {ok, iolist_to_binary(lists:map( fun({SubChunkStartOffset, SubChunk}) -> Key2 = crypto:hash(sha256, << Key/binary, SubChunkStartOffset:24 >>), lists:foldl( fun(_, Acc) -> crypto:crypto_one_time(aes_256_cbc, Key2, IV, Acc, Options) end, SubChunk, lists:seq(1, PackingDifficulty) ) end, SubChunks))}; randomx_encrypt_chunk2(_Packing, {_, {stub_state, _}}, Key, Chunk) -> Options = [{encrypt, true}, {padding, zero}], IV = binary:part(Key, {0, 16}), {ok, crypto:crypto_one_time(aes_256_cbc, Key, IV, ar_packing_server:pad_chunk(Chunk), Options)}; %% Non-STUB implementation randomx_encrypt_chunk2(spora_2_5, {rx512, RandomxState}, Key, Chunk) -> ar_rx512_nif:rx512_encrypt_chunk_nif(RandomxState, Key, Chunk, ?RANDOMX_PACKING_ROUNDS, jit(), large_pages(), hardware_aes()); randomx_encrypt_chunk2({spora_2_6, _Addr}, {rx512, RandomxState}, Key, Chunk) -> ar_rx512_nif:rx512_encrypt_chunk_nif(RandomxState, Key, Chunk, ?RANDOMX_PACKING_ROUNDS_2_6, jit(), large_pages(), hardware_aes()); randomx_encrypt_chunk2({composite, _Addr, PackingDifficulty}, {rx4096, RandomxState}, Key, Chunk) -> ar_rx4096_nif:rx4096_encrypt_composite_chunk_nif(RandomxState, Key, Chunk, jit(), large_pages(), hardware_aes(), ?COMPOSITE_PACKING_ROUND_COUNT, PackingDifficulty, ?COMPOSITE_PACKING_SUB_CHUNK_COUNT); randomx_encrypt_chunk2(_Packing, _BadState, _Key, _Chunk) -> {error, invalid_randomx_mode}. %% STUB implementation randomx_reencrypt_chunk2(SourcePacking, TargetPacking, {_, {stub_state, _}} = State, UnpackKey, PackKey, Chunk, ChunkSize) -> case randomx_decrypt_chunk(SourcePacking, State, UnpackKey, Chunk, ChunkSize) of {ok, UnpackedChunk} -> {ok, RepackedChunk} = randomx_encrypt_chunk2(TargetPacking, State, PackKey, ar_packing_server:pad_chunk(UnpackedChunk)), case {SourcePacking, TargetPacking} of {{composite, Addr, _}, {composite, Addr, _}} -> %% See the same function defined for the non-STUB mode. {ok, RepackedChunk, none}; _ -> {ok, RepackedChunk, UnpackedChunk} end; Error -> Error end; %% Non-STUB implementation randomx_reencrypt_chunk2({composite, Addr1, PackingDifficulty1}, {composite, Addr2, PackingDifficulty2}, {rx4096, RandomxState}, UnpackKey, PackKey, Chunk, ChunkSize) -> case ar_rx4096_nif:rx4096_reencrypt_composite_chunk_nif(RandomxState, UnpackKey, PackKey, Chunk, jit(), large_pages(), hardware_aes(), ?COMPOSITE_PACKING_ROUND_COUNT, ?COMPOSITE_PACKING_ROUND_COUNT, PackingDifficulty1, PackingDifficulty2, ?COMPOSITE_PACKING_SUB_CHUNK_COUNT, ?COMPOSITE_PACKING_SUB_CHUNK_COUNT) of {ok, Repacked, RepackInput} -> case Addr1 == Addr2 of true -> %% When the addresses match, we do not have to unpack the chunk - we may %% simply pack the missing iterations so RepackInput is not the unpacked %% chunk and we return none instead. If the caller needs the unpacked %% chunk as well, they need to make an extra call. {ok, Repacked, none}; false -> %% RepackInput is the unpacked chunk - return it. Unpadded = ar_packing_server:unpad_chunk(RepackInput, ChunkSize, ?DATA_CHUNK_SIZE), {ok, Repacked, Unpadded} end; {error, Error} -> {exception, Error}; Reply -> Reply end; randomx_reencrypt_chunk2(_SourcePacking, {composite, _Addr, _PackingDifficulty}, _RandomxState, _UnpackKey, _PackKey, _Chunk, _ChunkSize) -> {error, invalid_reencrypt_packing}; randomx_reencrypt_chunk2(SourcePacking, TargetPacking, {rx512, RandomxState}, UnpackKey, PackKey, Chunk, ChunkSize) -> UnpackRounds = packing_rounds(SourcePacking), PackRounds = packing_rounds(TargetPacking), case ar_rx512_nif:rx512_reencrypt_chunk_nif(RandomxState, UnpackKey, PackKey, Chunk, ChunkSize, UnpackRounds, PackRounds, jit(), large_pages(), hardware_aes()) of {error, Error} -> {exception, Error}; Reply -> Reply end; randomx_reencrypt_chunk2( _SourcePacking, _TargetPacking, _BadState, _UnpackKey, _PackKey, _Chunk, _ChunkSize) -> {error, invalid_randomx_mode}. ================================================ FILE: apps/arweave/src/ar_mining_cache.erl ================================================ -module(ar_mining_cache). -include_lib("arweave/include/ar_mining_cache.hrl"). -include_lib("arweave/include/ar.hrl"). -export([ new/1, new/2, set_limit/2, get_limit/1, cache_size/1, actual_cache_size/1, available_size/1, reserved_size/1, reserved_size/2, add_session/2, reserve_for_session/3, release_for_session/3, drop_session/2, session_exists/2, get_sessions/1, with_cached_value/4 ]). -define(CACHE_SESSIONS_LIMIT, 4). %%%=================================================================== %%% Public API. %%%=================================================================== %% @doc Creates a new mining cache with a default limit of 0. -spec new(Name :: term()) -> Cache :: #ar_mining_cache{}. new(Name) -> #ar_mining_cache{name = Name}. %% @doc Creates a new mining cache with a given limit. -spec new(Name :: term(), Limit :: pos_integer()) -> Cache :: #ar_mining_cache{}. new(Name, Limit) -> #ar_mining_cache{name = Name, mining_cache_limit_bytes = Limit}. %% @doc Sets the limit for the mining cache. -spec set_limit(Limit :: pos_integer(), Cache :: #ar_mining_cache{}) -> Cache :: #ar_mining_cache{}. set_limit(Limit, Cache) -> Cache#ar_mining_cache{mining_cache_limit_bytes = Limit}. %% @doc Returns the limit for the mining cache. -spec get_limit(Cache :: #ar_mining_cache{}) -> Limit :: non_neg_integer(). get_limit(Cache) -> Cache#ar_mining_cache.mining_cache_limit_bytes. %% @doc Returns the size of the cached data in bytes. %% Note, that cache size includes both the cached data and the reserved space for sessions. -spec cache_size(Cache :: #ar_mining_cache{}) -> Size :: non_neg_integer(). cache_size(Cache) -> maps:fold( fun(_, #ar_mining_cache_session{mining_cache_size_bytes = Size, reserved_mining_cache_bytes = ReservedSize}, Acc) -> Acc + Size + ReservedSize end, 0, Cache#ar_mining_cache.mining_cache_sessions ). %% @doc Returns the size of the cached data in bytes. %% Note, that cache size includes both the cached data and the reserved space for sessions. -spec actual_cache_size(Cache :: #ar_mining_cache{}) -> Size :: non_neg_integer(). actual_cache_size(Cache) -> maps:fold( fun(_, #ar_mining_cache_session{mining_cache = MiningCache}, Acc) -> Acc + maps:fold(fun(_, CacheValue, Acc0) -> Acc0 + cached_value_size(CacheValue) end, 0, MiningCache) end, 0, Cache#ar_mining_cache.mining_cache_sessions ). %% @doc Returns the available size for the mining cache. %% Note, that this value does not include the reserved space for sessions, %% as this space is considered already used. %% @see reserved_size/1,2 %% @see cache_size/1 -spec available_size(Cache :: #ar_mining_cache{}) -> Size :: non_neg_integer(). available_size(Cache) -> Cache#ar_mining_cache.mining_cache_limit_bytes - cache_size(Cache). %% @doc Returns the reserved size for a cache. -spec reserved_size(Cache0 :: #ar_mining_cache{}) -> {ok, Size :: non_neg_integer()} | {error, Reason :: term()}. reserved_size(Cache0) -> {ok, lists:sum([ begin {ok, Size} = reserved_size(SessionKey, Cache0), Size end || SessionKey <- get_sessions(Cache0) ])}. %% @doc Returns the reserved size for a session. -spec reserved_size(SessionKey :: term(), Cache0 :: #ar_mining_cache{}) -> {ok, Size :: non_neg_integer()} | {error, Reason :: term()}. reserved_size(SessionKey, Cache0) -> case with_mining_cache_session(SessionKey, fun(Session) -> {ok, Session#ar_mining_cache_session.reserved_mining_cache_bytes, Session} end, Cache0) of {ok, Size, _Cache1} -> {ok, Size}; {error, Reason} -> {error, Reason} end. %% @doc Adds a new mining cache session to the cache. %% If the cache limit is exceeded, the oldest session is dropped. -spec add_session(SessionKey :: term(), Cache0 :: #ar_mining_cache{}) -> Cache1 :: #ar_mining_cache{}. add_session(SessionKey, Cache0) -> case maps:is_key(SessionKey, Cache0#ar_mining_cache.mining_cache_sessions) of true -> Cache0; false -> Cache1 = Cache0#ar_mining_cache{ mining_cache_sessions = maps:put(SessionKey, #ar_mining_cache_session{}, Cache0#ar_mining_cache.mining_cache_sessions), mining_cache_sessions_queue = queue:in(SessionKey, Cache0#ar_mining_cache.mining_cache_sessions_queue) }, case queue:len(Cache1#ar_mining_cache.mining_cache_sessions_queue) > ?CACHE_SESSIONS_LIMIT of true -> {{value, LastSessionKey}, Queue1} = queue:out(Cache1#ar_mining_cache.mining_cache_sessions_queue), Cache2 = drop_session(LastSessionKey, Cache1), ?LOG_DEBUG([ {event, mining_cache_add_drop_session}, {cache_name, Cache1#ar_mining_cache.name}, {added_session_key, ar_nonce_limiter:encode_session_key(SessionKey)}, {dropped_session_key, ar_nonce_limiter:encode_session_key(LastSessionKey)}, {num_sessions, queue:len(Queue1)}]), Cache2#ar_mining_cache{mining_cache_sessions_queue = Queue1}; false -> ?LOG_DEBUG([ {event, mining_cache_add_session}, {cache_name, Cache1#ar_mining_cache.name}, {session_key, ar_nonce_limiter:encode_session_key(SessionKey)}, {num_sessions, queue:len(Cache1#ar_mining_cache.mining_cache_sessions_queue)}]), Cache1 end end. %% @doc Reserves a certain amount of space for a session. %% Note, that if the session already has a reserved amount of space, it will be %% added to the existing reserved space. -spec reserve_for_session(SessionKey :: term(), Size :: non_neg_integer(), Cache0 :: #ar_mining_cache{}) -> {ok, Cache1 :: #ar_mining_cache{}} | {error, Reason :: term()}. reserve_for_session(SessionKey, Size, Cache0) -> case available_size(Cache0) < Size of true -> {error, cache_limit_exceeded}; false -> with_mining_cache_session(SessionKey, fun(#ar_mining_cache_session{reserved_mining_cache_bytes = ReservedSize} = Session) -> {ok, Session#ar_mining_cache_session{reserved_mining_cache_bytes = ReservedSize + Size}} end, Cache0) end. %% @doc Releases the reserved space for a session. %% If the reserved space is less than the released size, the reserved space will be set to 0. -spec release_for_session(SessionKey :: term(), Size :: non_neg_integer(), Cache0 :: #ar_mining_cache{}) -> {ok, Cache1 :: #ar_mining_cache{}} | {error, Reason :: term()}. release_for_session(SessionKey, Size, Cache0) -> with_mining_cache_session(SessionKey, fun(#ar_mining_cache_session{reserved_mining_cache_bytes = ReservedSize} = Session) -> {ok, Session#ar_mining_cache_session{reserved_mining_cache_bytes = max(0, ReservedSize - Size)}} end, Cache0). %% @doc Drops a mining cache session from the cache. -spec drop_session(SessionKey :: term(), Cache0 :: #ar_mining_cache{}) -> Cache1 :: #ar_mining_cache{}. drop_session(SessionKey, Cache0) -> case maps:take(SessionKey, Cache0#ar_mining_cache.mining_cache_sessions) of {Session, Sessions} -> maybe_search_for_anomalies(SessionKey, Session), Queue0 = queue:filter( fun(SessionKey0) -> SessionKey0 =/= SessionKey end, Cache0#ar_mining_cache.mining_cache_sessions_queue ), ?LOG_DEBUG([ {event, mining_cache_drop_session}, {cache_name, Cache0#ar_mining_cache.name}, {session_key, ar_nonce_limiter:encode_session_key(SessionKey)}, {num_sessions, queue:len(Queue0)}]), Cache0#ar_mining_cache{ mining_cache_sessions = Sessions, mining_cache_sessions_queue = Queue0 }; _ -> Cache0 end. %% @doc Checks if a session exists in the cache. -spec session_exists(SessionKey :: term(), Cache0 :: #ar_mining_cache{}) -> Exists :: boolean(). session_exists(SessionKey, Cache0) -> maps:is_key(SessionKey, Cache0#ar_mining_cache.mining_cache_sessions). %% @doc Returns the list of sessions in the cache. %% Note, that this list is not sorted by the chronological order. -spec get_sessions(Cache0 :: #ar_mining_cache{}) -> Sessions :: [term()]. get_sessions(Cache0) -> queue:to_list(Cache0#ar_mining_cache.mining_cache_sessions_queue). %% @doc Maps a cached value for a session into a new value. %% %% This function will take care of the cache size and reserved space for the session. %% If the session does not contain a cached value for the given key, it will be generated, %% e.g. the very first event for the `Key` is a genesis event. %% %% The `Fun` must return one of the following: %% - `{ok, drop}`: drops the cached value %% - `{ok, {drop, Size}}`: drops the cached value and %% additionally releases the reserved space (`Size` bytes) %% - `{ok, Value1}`: replaces the cached value %% - `{ok, Value1, Size}`: replaces the cached value and %% reserves the reserved space (`Size` bytes) %% - `{error, Reason}`: returns an error %% %% If the returned value equals to the argument passed into the `Fun`, the cache %% will not be changed. This implies that cache will not store the empty value. -spec with_cached_value( Key :: term(), SessionKey :: term(), Cache0 :: #ar_mining_cache{}, Fun :: fun( (Value :: #ar_mining_cache_value{}) -> {ok, drop} | {ok, drop, Size :: non_neg_integer()} | {ok, Value1 :: #ar_mining_cache_value{}} | {ok, Value1 :: #ar_mining_cache_value{}, Size :: non_neg_integer()} | {error, Reason :: term()} ) ) -> Result :: {ok, Cache1 :: #ar_mining_cache{}} | {error, Reason :: term()}. with_cached_value(Key, SessionKey, Cache0, Fun) -> with_mining_cache_session(SessionKey, fun(Session) -> Value0 = maps:get(Key, Session#ar_mining_cache_session.mining_cache, #ar_mining_cache_value{}), case Fun(Value0) of {error, Reason} -> {error, Reason}; {ok, drop} -> {ok, Session#ar_mining_cache_session{ mining_cache = maps:remove(Key, Session#ar_mining_cache_session.mining_cache), mining_cache_size_bytes = max(0, Session#ar_mining_cache_session.mining_cache_size_bytes - cached_value_size(Value0)) }}; {ok, drop, ReservationSizeAdjustment} when ReservationSizeAdjustment < 0 -> {ok, Session#ar_mining_cache_session{ mining_cache = maps:remove(Key, Session#ar_mining_cache_session.mining_cache), reserved_mining_cache_bytes = max(0, Session#ar_mining_cache_session.reserved_mining_cache_bytes + ReservationSizeAdjustment), mining_cache_size_bytes = max(0, Session#ar_mining_cache_session.mining_cache_size_bytes - cached_value_size(Value0)) }}; {ok, Value0} -> {ok, Session}; {ok, Value0, ReservationSizeAdjustment} when ReservationSizeAdjustment < 0 -> {ok, Session#ar_mining_cache_session{ reserved_mining_cache_bytes = max(0, Session#ar_mining_cache_session.reserved_mining_cache_bytes + ReservationSizeAdjustment) }}; {ok, Value1} -> SizeDiff = cached_value_size(Value1) - cached_value_size(Value0), SessionAvailableSize = available_size(Cache0) + Session#ar_mining_cache_session.reserved_mining_cache_bytes, CacheLimit = get_limit(Cache0), case SizeDiff > SessionAvailableSize of true when CacheLimit =/= 0 -> {error, cache_limit_exceeded}; _ -> {ok, Session#ar_mining_cache_session{ mining_cache = maps:put(Key, Value1, Session#ar_mining_cache_session.mining_cache), reserved_mining_cache_bytes = max(0, Session#ar_mining_cache_session.reserved_mining_cache_bytes - SizeDiff), mining_cache_size_bytes = Session#ar_mining_cache_session.mining_cache_size_bytes + SizeDiff }} end; {ok, Value1, ReservationSizeAdjustment} when ReservationSizeAdjustment < 0 -> SizeDiff = cached_value_size(Value1) - cached_value_size(Value0), SessionAvailableSize = available_size(Cache0) + Session#ar_mining_cache_session.reserved_mining_cache_bytes, CacheLimit = get_limit(Cache0), case SizeDiff > SessionAvailableSize of true when CacheLimit =/= 0 -> {error, cache_limit_exceeded}; _ -> {ok, Session#ar_mining_cache_session{ mining_cache = maps:put(Key, Value1, Session#ar_mining_cache_session.mining_cache), reserved_mining_cache_bytes = max(0, Session#ar_mining_cache_session.reserved_mining_cache_bytes - SizeDiff + ReservationSizeAdjustment), mining_cache_size_bytes = Session#ar_mining_cache_session.mining_cache_size_bytes + SizeDiff }} end; Other -> ?LOG_WARNING([ {event, unexpected_return_value_from_with_cached_value}, {cache_name, Cache0#ar_mining_cache.name}, {value, Other}]), {error, unexpected_return_value_from_with_cached_value} end end, Cache0). %%%=================================================================== %%% Private functions. %%%=================================================================== %% Returns the size of the cached data in bytes. cached_value_size(#ar_mining_cache_value{ chunk1 = Chunk1, chunk2 = Chunk2 }) -> MaybeBinarySize = fun (undefined) -> 0; (Binary) -> byte_size(Binary) end, MaybeBinarySize(Chunk1) + MaybeBinarySize(Chunk2). %% Executes the `Fun` function with the chunk cache session as argument. %% If the session does not exist, it returns an error without executing the `Fun`. %% The `Fun` function should return either: %% - a new chunk cache session `{ok, Session}`, which will be used to replace the old one. %% - a new chunk cache session with return value `{ok, Return, Session}`, which will %% be used to replace the old cache session and return a value to the caller. %% - an error `{error, Reason}` to report back to the caller. with_mining_cache_session(SessionKey, Fun, Cache0) -> case maps:is_key(SessionKey, Cache0#ar_mining_cache.mining_cache_sessions) of true -> case Fun(maps:get(SessionKey, Cache0#ar_mining_cache.mining_cache_sessions)) of {ok, Return, Session1} -> {ok, Return, Cache0#ar_mining_cache{ mining_cache_sessions = maps:put(SessionKey, Session1, Cache0#ar_mining_cache.mining_cache_sessions) }}; {ok, Session1} -> {ok, Cache0#ar_mining_cache{ mining_cache_sessions = maps:put(SessionKey, Session1, Cache0#ar_mining_cache.mining_cache_sessions) }}; {error, Reason} -> {error, Reason} end; false -> {error, session_not_found} end. %% Searches for anomalies in the mining cache session. %% If the actual cache size is different from the expected cache size, %% it will log a warning. %% If the reserved cache size is different from 0, it will log a warning. %% It will also search for invalid cache values, e.g. missing chunks, or failed %% invariants. %% %% Perhaps it is a good idea to put this under a config flag, disabled by default. maybe_search_for_anomalies(SessionKey, #ar_mining_cache_session{ mining_cache = MiningCache, mining_cache_size_bytes = MiningCacheSize, reserved_mining_cache_bytes = ReservedMiningCacheBytes }) -> ActualCacheSize = maybe_search_for_anomalies_cache_values(SessionKey, MiningCache), case {ActualCacheSize, MiningCacheSize} of {0, 0} -> ok; {EqualSize, EqualSize} -> ?LOG_WARNING([ {event, mining_cache_anomaly}, {anomaly, cache_size_non_zero}, {session_key, ar_nonce_limiter:encode_session_key(SessionKey)}, {actual_size, ActualCacheSize}, {reported_size, MiningCacheSize}]); {_, _} -> ?LOG_WARNING([ {event, mining_cache_anomaly}, {anomaly, cache_size_mismatch}, {session_key, ar_nonce_limiter:encode_session_key(SessionKey)}, {actual_size, ActualCacheSize}, {reported_size, MiningCacheSize}]) end, case ReservedMiningCacheBytes of 0 -> ok; _ -> ?LOG_WARNING([ {event, mining_cache_anomaly}, {anomaly, reserved_size_non_zero}, {session_key, ar_nonce_limiter:encode_session_key(SessionKey)}, {actual_size, ReservedMiningCacheBytes}, {expected_size, 0}]) end; maybe_search_for_anomalies(SessionKey, _InvalidSession) -> ?LOG_ERROR([{event, mining_cache_anomaly}, {anomaly, invalid_session_type}, {session_key, ar_nonce_limiter:encode_session_key(SessionKey)}]), ok. maybe_search_for_anomalies_cache_values(SessionKey, MiningCache) when is_map(MiningCache) -> OuterAcc0 = {_Anomalies = #{}, _ActualSize = 0}, {Anomalies, ActualSize} = maps:fold(fun(Key, Value, {Anomalies0, ActualSize0}) -> Anomalies1 = lists:foldl(fun(Check, Anomalies) -> Check({Key, Value}, Anomalies) end, Anomalies0, [ fun maybe_search_for_anomalies_cache_values_chunk1_failed/2, fun maybe_search_for_anomalies_cache_values_chunk1_stale/2, fun maybe_search_for_anomalies_cache_values_chunk2_failed/2, fun maybe_search_for_anomalies_cache_values_chunk2_stale/2, fun maybe_search_for_anomalies_cache_values_h1_missing/2, fun maybe_search_for_anomalies_cache_values_h2_missing/2, fun maybe_search_for_anomalies_cache_values_h1_passes_diff_checks_present/2 ]), {Anomalies1, ActualSize0 + cached_value_size(Value)} end, OuterAcc0, MiningCache), case maps:size(Anomalies) > 0 of true -> ?LOG_WARNING([ {event, mining_cache_anomaly}, {anomaly, cached_values_anomalies}, {anomalies, Anomalies}, {session_key, ar_nonce_limiter:encode_session_key(SessionKey)}]); false -> ok end, ActualSize; maybe_search_for_anomalies_cache_values(SessionKey, _InvalidCache) -> ?LOG_ERROR([{event, mining_cache_anomaly}, {anomaly, invalid_cache_type}, {session_key, ar_nonce_limiter:encode_session_key(SessionKey)}]), 0. maybe_search_for_anomalies_cache_values_chunk1_failed({ Key, #ar_mining_cache_value{ chunk1 = undefined, chunk1_failed = false } = Value }, Anomalies) -> maps:update_with(chunk1_failed, fun(V) -> V + 1 end, 1, maps:update_with(chunk1_failed_sample, fun(V) -> V end, {Key, Value}, Anomalies)); maybe_search_for_anomalies_cache_values_chunk1_failed({_, _}, Anomalies) -> Anomalies. maybe_search_for_anomalies_cache_values_chunk1_stale({ Key, #ar_mining_cache_value{ chunk1 = Chunk1, chunk1_failed = true } = Value }, Anomalies) when undefined =/= Chunk1 -> maps:update_with(chunk1_stale, fun(V) -> V + 1 end, 1, maps:update_with(chunk1_stale_sample, fun(V) -> V end, {Key, Value}, Anomalies)); maybe_search_for_anomalies_cache_values_chunk1_stale({_, _}, Anomalies) -> Anomalies. maybe_search_for_anomalies_cache_values_chunk2_failed({ Key, #ar_mining_cache_value{ chunk2 = undefined, chunk2_failed = false } = Value }, Anomalies) -> maps:update_with(chunk2_failed, fun(V) -> V + 1 end, 1, maps:update_with(chunk2_failed_sample, fun(V) -> V end, {Key, Value}, Anomalies)); maybe_search_for_anomalies_cache_values_chunk2_failed({_, _}, Anomalies) -> Anomalies. maybe_search_for_anomalies_cache_values_chunk2_stale({ Key, #ar_mining_cache_value{ chunk2 = Chunk2, chunk2_failed = true } = Value }, Anomalies) when undefined =/= Chunk2 -> maps:update_with(chunk2_stale, fun(V) -> V + 1 end, 1, maps:update_with(chunk2_stale_sample, fun(V) -> V end, {Key, Value}, Anomalies)); maybe_search_for_anomalies_cache_values_chunk2_stale({_, _}, Anomalies) -> Anomalies. maybe_search_for_anomalies_cache_values_h1_missing({ Key, #ar_mining_cache_value{ h1 = undefined, chunk1 = Chunk1 } = Value }, Anomalies) when undefined =/= Chunk1 -> maps:update_with(h1_missing, fun(V) -> V + 1 end, 1, maps:update_with(h1_missing_sample, fun(V) -> V end, {Key, Value}, Anomalies)); maybe_search_for_anomalies_cache_values_h1_missing({_, _}, Anomalies) -> Anomalies. maybe_search_for_anomalies_cache_values_h2_missing({ Key, #ar_mining_cache_value{ h2 = undefined, chunk2 = Chunk2 } = Value }, Anomalies) when undefined =/= Chunk2 -> maps:update_with(h2_missing, fun(V) -> V + 1 end, 1, maps:update_with(h2_missing_sample, fun(V) -> V end, {Key, Value}, Anomalies)); maybe_search_for_anomalies_cache_values_h2_missing({_, _}, Anomalies) -> Anomalies. maybe_search_for_anomalies_cache_values_h1_passes_diff_checks_present({ Key, #ar_mining_cache_value{ h1_passes_diff_checks = true } = Value }, Anomalies) -> maps:update_with(h1_passes_diff_checks_present, fun(V) -> V + 1 end, 1, maps:update_with(h1_passes_diff_checks_present_sample, fun(V) -> V end, {Key, Value}, Anomalies)); maybe_search_for_anomalies_cache_values_h1_passes_diff_checks_present({_, _}, Anomalies) -> Anomalies. %%%=================================================================== %%% Tests. %%%=================================================================== -include_lib("eunit/include/eunit.hrl"). cache_size_test() -> Cache = new(test_cache), ?assertEqual(0, cache_size(Cache)). add_session_test() -> Cache0 = new(test_cache), SessionKey0 = session0, Cache1 = add_session(SessionKey0, Cache0), ?assert(session_exists(SessionKey0, Cache1)), ?assertEqual(0, cache_size(Cache1)), Cache1 = add_session(SessionKey0, Cache1), ?assertEqual([SessionKey0], get_sessions(Cache1)). add_session_limit_test() -> Cache0 = new(test_cache), Cache1 = add_session(session0, Cache0), Cache2 = add_session(session1, Cache1), Cache3 = add_session(session2, Cache2), Cache4 = add_session(session3, Cache3), ?assertEqual([session0, session1, session2, session3], get_sessions(Cache4)), ?assertEqual(0, cache_size(Cache4)), Cache5 = add_session(session4, Cache4), ?assertEqual([session1, session2, session3, session4], get_sessions(Cache5)), ?assertEqual(0, cache_size(Cache5)). reserve_test() -> Cache0 = new(test_cache, 1024), SessionKey0 = session0, ChunkId = chunk0, Data = <<"chunk_data">>, ReservedSize = 100, %% Add session Cache1 = add_session(SessionKey0, Cache0), %% Reserve space {ok, Cache2} = reserve_for_session(SessionKey0, ReservedSize, Cache1), ?assertEqual(ReservedSize, cache_size(Cache2)), ?assertMatch({ok, ReservedSize}, reserved_size(SessionKey0, Cache2)), %% Add chunk1 {ok, Cache3} = with_cached_value(ChunkId, SessionKey0, Cache2, fun(Value) -> {ok, Value#ar_mining_cache_value{ chunk1 = Data }} end), ?assertEqual(ReservedSize, cache_size(Cache3)), ?assertEqual(byte_size(Data), actual_cache_size(Cache3)), ExpectedReservedSize = ReservedSize - byte_size(Data), ?assertMatch({ok, ExpectedReservedSize}, reserved_size(SessionKey0, Cache3)), %% Reserve more space ?assertMatch({error, cache_limit_exceeded}, reserve_for_session(SessionKey0, 1024 + ReservedSize, Cache3)), %% Drop session Cache4 = drop_session(SessionKey0, Cache3), ?assertEqual(0, cache_size(Cache4)). release_test() -> Cache0 = new(test_cache, 1024), SessionKey0 = session0, ChunkId = chunk0, Data = <<"chunk_data">>, ReservedSize = 100, %% Add session Cache1 = add_session(SessionKey0, Cache0), %% Reserve space {ok, Cache2} = reserve_for_session(SessionKey0, ReservedSize, Cache1), ?assertEqual(ReservedSize, cache_size(Cache2)), ?assertMatch({ok, ReservedSize}, reserved_size(SessionKey0, Cache2)), %% Add chunk1 {ok, Cache3} = with_cached_value(ChunkId, SessionKey0, Cache2, fun(Value) -> {ok, Value#ar_mining_cache_value{ chunk1 = Data }} end), ExpectedReservedSize = ReservedSize - byte_size(Data), ?assertMatch({ok, ExpectedReservedSize}, reserved_size(SessionKey0, Cache3)), ?assertEqual(byte_size(Data), actual_cache_size(Cache3)), %% Release space {ok, Cache4} = release_for_session(SessionKey0, 10, Cache3), ExpectedReleasedReserveSize = ExpectedReservedSize - 10, ?assertMatch({ok, ExpectedReleasedReserveSize}, reserved_size(SessionKey0, Cache4)), ?assertEqual(byte_size(Data), actual_cache_size(Cache4)), %% Drop session Cache5 = drop_session(SessionKey0, Cache4), ?assertEqual(0, cache_size(Cache5)). with_cached_value_add_chunk_test() -> Cache0 = new(test_cache, 1024), ChunkId = chunk0, Data = <<"chunk_data">>, SessionKey0 = session0, %% Add session Cache1 = add_session(SessionKey0, Cache0), %% Add chunk1 {ok, Cache2} = with_cached_value(ChunkId, SessionKey0, Cache1, fun(Value) -> {ok, Value#ar_mining_cache_value{ chunk1 = Data }} end), ?assertEqual(byte_size(Data), cache_size(Cache2)), %% Add chunk2 {ok, Cache3} = with_cached_value(ChunkId, SessionKey0, Cache2, fun(Value) -> {ok, Value#ar_mining_cache_value{ chunk2 = Data }} end), ?assertEqual(byte_size(Data) * 2, cache_size(Cache3)). with_cached_value_add_hash_test() -> Cache0 = new(test_cache), ChunkId = chunk0, Hash = <<"hash">>, SessionKey0 = session0, %% Add session Cache1 = add_session(SessionKey0, Cache0), %% Add h1 {ok, Cache2} = with_cached_value(ChunkId, SessionKey0, Cache1, fun(Value) -> {ok, Value#ar_mining_cache_value{ h1 = Hash }} end), ?assertEqual(0, cache_size(Cache2)), %% Add h2 {ok, Cache3} = with_cached_value(ChunkId, SessionKey0, Cache2, fun(Value) -> {ok, Value#ar_mining_cache_value{ h2 = Hash }} end), ?assertEqual(0, cache_size(Cache3)). with_cached_value_drop_test() -> Cache0 = new(test_cache, 1024), ChunkId = chunk0, Data = <<"chunk_data">>, SessionKey0 = session0, %% Add session Cache1 = add_session(SessionKey0, Cache0), %% Add chunk1 {ok, Cache2} = with_cached_value(ChunkId, SessionKey0, Cache1, fun(Value) -> {ok, Value#ar_mining_cache_value{ chunk1 = Data }} end), ?assertEqual(byte_size(Data), cache_size(Cache2)), %% Drop {ok, Cache3} = with_cached_value(ChunkId, SessionKey0, Cache2, fun(_Value) -> {ok, drop} end), ?assertEqual(0, cache_size(Cache3)), ?assertEqual(0, actual_cache_size(Cache3)). set_limit_test() -> Cache0 = new(test_cache), Data = <<"chunk_data">>, SessionKey0 = session0, %% Add session Cache1 = add_session(SessionKey0, Cache0), %% Add chunk1 ChunkId0 = chunk0, {ok, Cache2} = with_cached_value(ChunkId0, SessionKey0, Cache1, fun(Value) -> {ok, Value#ar_mining_cache_value{ chunk1 = Data }} end), ?assertEqual(byte_size(Data), cache_size(Cache2)), %% Set limit ChunkId1 = chunk1, Cache3 = set_limit(5, Cache2), %% Try to add chunk2 {error, cache_limit_exceeded} = with_cached_value(ChunkId1, SessionKey0, Cache3, fun(Value) -> {ok, Value#ar_mining_cache_value{ chunk1 = Data }} end), ?assertEqual(byte_size(Data), cache_size(Cache3)). drop_session_test() -> Cache0 = new(test_cache, 1024), ChunkId = chunk0, Data = <<"chunk_data">>, SessionKey0 = session0, %% Add session Cache1 = add_session(SessionKey0, Cache0), %% Add chunk1 {ok, Cache2} = with_cached_value(ChunkId, SessionKey0, Cache1, fun(Value) -> {ok, Value#ar_mining_cache_value{ chunk1 = Data }} end), ?assertEqual(byte_size(Data), cache_size(Cache2)), %% Drop session Cache3 = drop_session(SessionKey0, Cache2), ?assertNot(session_exists(SessionKey0, Cache3)), ?assertEqual(0, cache_size(Cache3)). ================================================ FILE: apps/arweave/src/ar_mining_hash.erl ================================================ -module(ar_mining_hash). -behaviour(gen_server). -export([start_link/0, compute_h0/2, compute_h1/2, compute_h2/2, garbage_collect/0]). -export([init/1, handle_cast/2, handle_call/3, handle_info/2, terminate/2]). -include("ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include("ar_consensus.hrl"). -include("ar_mining.hrl"). -include_lib("eunit/include/eunit.hrl"). -record(state, { hashing_threads = queue:new(), hashing_thread_monitor_refs = #{} }). %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Start the gen_server. start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). compute_h0(Worker, Candidate) -> gen_server:cast(?MODULE, {compute, h0, Worker, Candidate}). compute_h1(Worker, Candidate) -> gen_server:cast(?MODULE, {compute, h1, Worker, Candidate}). compute_h2(Worker, Candidate) -> gen_server:cast(?MODULE, {compute, h2, Worker, Candidate}). garbage_collect() -> gen_server:cast(?MODULE, garbage_collect). %%%=================================================================== %%% Generic server callbacks. %%%=================================================================== init([]) -> {ok, Config} = arweave_config:get_env(), State = lists:foldl( fun(_, Acc) -> start_hashing_thread(Acc) end, #state{}, lists:seq(1, Config#config.hashing_threads) ), {ok, State}. handle_call(Request, _From, State) -> ?LOG_WARNING([{event, unhandled_call}, {module, ?MODULE}, {request, Request}]), {reply, ok, State}. handle_cast({compute, HashType, Worker, Candidate}, #state{ hashing_threads = Threads } = State) -> {Thread, Threads2} = pick_hashing_thread(Threads), Thread ! {compute, HashType, Worker, Candidate}, {noreply, State#state{ hashing_threads = Threads2 }}; handle_cast(garbage_collect, State) -> erlang:garbage_collect(self(), [{async, {ar_mining_hash, self(), erlang:monotonic_time()}}]), queue:fold( fun(Thread, _) -> erlang:garbage_collect(Thread, [{async, {ar_mining_hash_worker, Thread, erlang:monotonic_time()}}]) end, ok, State#state.hashing_threads ), {noreply, State}; handle_cast(Cast, State) -> ?LOG_WARNING([{event, unhandled_cast}, {module, ?MODULE}, {cast, Cast}]), {noreply, State}. handle_info({garbage_collect, {Name, Pid, StartTime}, GCResult}, State) -> EndTime = erlang:monotonic_time(), ElapsedTime = erlang:convert_time_unit(EndTime-StartTime, native, millisecond), case GCResult == false orelse ElapsedTime > ?GC_LOG_THRESHOLD of true -> ?LOG_DEBUG([ {event, mining_debug_garbage_collect}, {process, Name}, {pid, Pid}, {gc_time, ElapsedTime}, {gc_result, GCResult}]); false -> ok end, {noreply, State}; handle_info({'DOWN', Ref, process, _, Reason}, #state{ hashing_thread_monitor_refs = HashingThreadRefs } = State) -> case maps:is_key(Ref, HashingThreadRefs) of true -> {noreply, handle_hashing_thread_down(Ref, Reason, State)}; _ -> {noreply, State} end; handle_info(Message, State) -> ?LOG_WARNING([{event, unhandled_info}, {module, ?MODULE}, {message, Message}]), {noreply, State}. terminate(Reason, _State) -> ?LOG_INFO([{module, ?MODULE},{pid, self()},{callback, terminate},{reason, Reason}]), ok. %%%=================================================================== %%% Private functions. %%%=================================================================== start_hashing_thread(State) -> #state{ hashing_threads = Threads, hashing_thread_monitor_refs = Refs } = State, Thread = spawn_link( fun() -> hashing_thread(ar_packing_server:get_packing_state()) end ), Ref = monitor(process, Thread), Threads2 = queue:in(Thread, Threads), Refs2 = maps:put(Ref, Thread, Refs), State#state{ hashing_threads = Threads2, hashing_thread_monitor_refs = Refs2 }. handle_hashing_thread_down(Ref, Reason, #state{ hashing_threads = Threads, hashing_thread_monitor_refs = Refs } = State) -> ?LOG_WARNING([{event, mining_hashing_thread_down}, {reason, io_lib:format("~p", [Reason])}]), Thread = maps:get(Ref, Refs), Refs2 = maps:remove(Ref, Refs), Threads2 = queue:delete(Thread, Threads), start_hashing_thread(State#state{ hashing_threads = Threads2, hashing_thread_monitor_refs = Refs2 }). hashing_thread(PackingState) -> receive {compute, h0, Worker, Candidate} -> #mining_candidate{ mining_address = MiningAddress, nonce_limiter_output = Output, partition_number = PartitionNumber, seed = Seed, packing_difficulty = PackingDifficulty } = Candidate, H0 = ar_block:compute_h0(Output, PartitionNumber, Seed, MiningAddress, PackingDifficulty, PackingState), ar_mining_worker:computed_hash(Worker, computed_h0, H0, undefined, Candidate), hashing_thread(PackingState); {compute, h1, Worker, Candidate} -> #mining_candidate{ h0 = H0, nonce = Nonce, chunk1 = Chunk1 } = Candidate, {H1, Preimage} = ar_block:compute_h1(H0, Nonce, Chunk1), ar_mining_worker:computed_hash(Worker, computed_h1, H1, Preimage, Candidate), hashing_thread(PackingState); {compute, h2, Worker, Candidate} -> #mining_candidate{ h0 = H0, h1 = H1, chunk2 = Chunk2 } = Candidate, {H2, Preimage} = ar_block:compute_h2(H1, Chunk2, H0), ar_mining_worker:computed_hash(Worker, computed_h2, H2, Preimage, Candidate), hashing_thread(PackingState) end. pick_hashing_thread(Threads) -> {{value, Thread}, Threads2} = queue:out(Threads), {Thread, queue:in(Thread, Threads2)}. ================================================ FILE: apps/arweave/src/ar_mining_io.erl ================================================ -module(ar_mining_io). -behaviour(gen_server). -export([start_link/0, start_link/1, set_largest_seen_upper_bound/1, get_packing/0, get_partitions/0, get_partitions/1, get_minable_storage_modules/0, read_recall_range/4, is_recall_range_readable/2, garbage_collect/0, get_replica_format_from_packing_difficulty/1]). -export([init/1, handle_cast/2, handle_call/3, handle_info/2, terminate/2]). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("arweave/include/ar_consensus.hrl"). -include_lib("arweave/include/ar_mining.hrl"). -include_lib("eunit/include/eunit.hrl"). -define(CACHE_TTL_MS, 2000). -record(state, { mode = miner, partition_upper_bound = 0, io_threads = #{}, io_thread_monitor_refs = #{}, store_id_to_device = #{}, partition_to_store_ids = #{} }). %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Start the gen_server. start_link() -> start_link(miner). start_link(Mode) -> gen_server:start_link({local, ?MODULE}, ?MODULE, Mode, []). set_largest_seen_upper_bound(PartitionUpperBound) -> gen_server:call(?MODULE, {set_largest_seen_upper_bound, PartitionUpperBound}, 60000). get_partitions() -> gen_server:call(?MODULE, get_partitions, 60000). read_recall_range(WhichChunk, Worker, Candidate, RecallRangeStart) -> gen_server:call(?MODULE, {read_recall_range, WhichChunk, Worker, Candidate, RecallRangeStart}, 60000). is_recall_range_readable(Candidate, RecallRangeStart) -> gen_server:call(?MODULE, {is_recall_range_readable, Candidate, RecallRangeStart}, 60000). get_packing() -> %% ar_config:validate_storage_modules/1 ensures that we only mine against a single %% packing format. So we can grab any partition. case get_minable_storage_modules() of [] -> undefined; [{_, _, Packing} | _Rest] -> Packing end. get_partitions(PartitionUpperBound) when PartitionUpperBound =< 0 -> []; get_partitions(PartitionUpperBound) -> Max = ar_node:get_max_partition_number(PartitionUpperBound), AllPartitions = lists:foldl( fun (Module, Acc) -> Addr = ar_storage_module:module_address(Module), PackingDifficulty = ar_storage_module:module_packing_difficulty(Module), {Start, End} = ar_storage_module:module_range(Module, 0), Partitions = get_store_id_partitions({Start, End}, []), lists:foldl( fun(PartitionNumber, AccInner) -> sets:add_element({PartitionNumber, Addr, PackingDifficulty}, AccInner) end, Acc, Partitions ) end, sets:new(), get_minable_storage_modules() ), FilteredPartitions = sets:filter( fun ({PartitionNumber, _Addr, _PackingDifficulty}) -> PartitionNumber =< Max end, AllPartitions ), lists:sort(sets:to_list(FilteredPartitions)). get_minable_storage_modules() -> {ok, Config} = arweave_config:get_env(), lists:filter( fun (Module) -> ar_storage_module:module_address(Module) == Config#config.mining_addr end, Config#config.storage_modules ). garbage_collect() -> gen_server:cast(?MODULE, garbage_collect). %%%=================================================================== %%% Generic server callbacks. %%%=================================================================== init(Mode) -> ?LOG_INFO([{event, mining_io_init}, {mode, Mode}]), gen_server:cast(self(), initialize_state), {ok, #state{ mode = Mode }}. handle_call({set_largest_seen_upper_bound, PartitionUpperBound}, _From, State) -> #state{ partition_upper_bound = CurrentUpperBound } = State, case PartitionUpperBound > CurrentUpperBound of true -> {reply, true, State#state{ partition_upper_bound = PartitionUpperBound }}; false -> {reply, false, State} end; handle_call(get_partitions, _From, #state{ partition_upper_bound = PartitionUpperBound } = State) -> {reply, get_partitions(PartitionUpperBound), State}; handle_call({read_recall_range, WhichChunk, Worker, Candidate, RecallRangeStart}, _From, State) -> #mining_candidate{ packing_difficulty = PackingDifficulty } = Candidate, RangeEnd = RecallRangeStart + ar_block:get_recall_range_size(PackingDifficulty), ThreadFound = case find_thread(RecallRangeStart, RangeEnd, State) of not_found -> false; {Thread, StoreID} -> Thread ! {WhichChunk, {Worker, Candidate, RecallRangeStart, StoreID}}, true end, {reply, ThreadFound, State}; handle_call({is_recall_range_readable, Candidate, RecallRangeStart}, _From, State) -> #mining_candidate{ packing_difficulty = PackingDifficulty } = Candidate, RangeEnd = RecallRangeStart + ar_block:get_recall_range_size(PackingDifficulty), ThreadFound = case find_thread(RecallRangeStart, RangeEnd, State) of not_found -> false; {_, _} -> true end, {reply, ThreadFound, State}; handle_call(Request, _From, State) -> ?LOG_WARNING([{event, unhandled_call}, {module, ?MODULE}, {request, Request}]), {reply, ok, State}. handle_cast(initialize_state, State) -> State3 = case ar_device_lock:is_ready() of false -> ar_util:cast_after(1000, self(), initialize_state), State; true -> case start_io_threads(State) of {error, _} -> ar_util:cast_after(1000, self(), initialize_state), State; State2 -> State2 end end, {noreply, State3}; handle_cast(garbage_collect, State) -> erlang:garbage_collect(self(), [{async, {ar_mining_io, self(), erlang:monotonic_time()}}]), maps:fold( fun(_Key, Thread, _) -> erlang:garbage_collect(Thread, [{async, {ar_mining_io_worker, Thread, erlang:monotonic_time()}}]) end, ok, State#state.io_threads ), {noreply, State}; handle_cast(Cast, State) -> ?LOG_WARNING([{event, unhandled_cast}, {module, ?MODULE}, {cast, Cast}]), {noreply, State}. handle_info({garbage_collect, {Name, Pid, StartTime}, GCResult}, State) -> EndTime = erlang:monotonic_time(), ElapsedTime = erlang:convert_time_unit(EndTime-StartTime, native, millisecond), case GCResult == false orelse ElapsedTime > ?GC_LOG_THRESHOLD of true -> ?LOG_DEBUG([ {event, mining_debug_garbage_collect}, {process, Name}, {pid, Pid}, {gc_time, ElapsedTime}, {gc_result, GCResult}]); false -> ok end, {noreply, State}; handle_info({'DOWN', Ref, process, _, Reason}, #state{ io_thread_monitor_refs = IOThreadRefs } = State) -> case maps:is_key(Ref, IOThreadRefs) of true -> {noreply, handle_io_thread_down(Ref, Reason, State)}; _ -> {noreply, State} end; handle_info(Message, State) -> ?LOG_WARNING([{event, unhandled_info}, {module, ?MODULE}, {message, Message}]), {noreply, State}. terminate(Reason, _State) -> ?LOG_INFO([{module, ?MODULE},{pid, self()},{callback, terminate},{reason, Reason}]), ok. %%%=================================================================== %%% Private functions. %%%=================================================================== start_io_threads(State) -> #state{ mode = Mode } = State, % Step 1: Group StoreIDs by their system device case ar_device_lock:get_store_id_to_device_map() of {error, Reason} -> ?LOG_ERROR([{event, error_initializing_mining_io_state}, {module, ?MODULE}, {reason, io_lib:format("~p", [Reason])}]), {error, Reason}; StoreIDToDevice -> ?LOG_INFO([{event, starting_mining_io_threads}, {store_id_to_device, StoreIDToDevice}]), DeviceToStoreIDs = ar_util:invert_map(StoreIDToDevice), % Step 2: Start IO threads for each device and populate map indices State2 = maps:fold( fun(Device, StoreIDs, StateAcc) -> #state{ io_threads = Threads, io_thread_monitor_refs = Refs, partition_to_store_ids = PartitionToStoreIDs } = StateAcc, StoreIDs2 = sets:to_list(StoreIDs), Thread = start_io_thread(Mode, StoreIDs2), ThreadRef = monitor(process, Thread), PartitionToStoreIDs2 = map_partition_to_store_ids(StoreIDs2, PartitionToStoreIDs), StateAcc#state{ io_threads = maps:put(Device, Thread, Threads), io_thread_monitor_refs = maps:put(ThreadRef, Device, Refs), partition_to_store_ids = PartitionToStoreIDs2 } end, State, DeviceToStoreIDs ), State2#state{ store_id_to_device = StoreIDToDevice } end. start_io_thread(Mode, StoreIDs) -> Now = os:system_time(millisecond), spawn( fun() -> open_files(StoreIDs), io_thread(Mode, #{}, Now) end ). map_partition_to_store_ids([], PartitionToStoreIDs) -> PartitionToStoreIDs; map_partition_to_store_ids([StoreID | StoreIDs], PartitionToStoreIDs) -> case ar_storage_module:get_by_id(StoreID) of not_found -> %% Occasionally happens in tests. ?LOG_ERROR([{event, mining_storage_module_not_found}, {store_id, StoreID}]), map_partition_to_store_ids(StoreIDs, PartitionToStoreIDs); StorageModule -> {Start, End} = ar_storage_module:module_range(StorageModule, 0), Partitions = get_store_id_partitions({Start, End}, []), PartitionToStoreIDs2 = lists:foldl( fun(Partition, Acc) -> maps:update_with(Partition, fun(PartitionStoreIDs) -> [StoreID | PartitionStoreIDs] end, [StoreID], Acc) end, PartitionToStoreIDs, Partitions), map_partition_to_store_ids(StoreIDs, PartitionToStoreIDs2) end. get_store_id_partitions({Start, End}, Partitions) when Start >= End -> Partitions; get_store_id_partitions({Start, End}, Partitions) -> PartitionNumber = ar_node:get_partition_number(Start), get_store_id_partitions({Start + ar_block:partition_size(), End}, [PartitionNumber | Partitions]). open_files(StoreIDs) -> lists:foreach( fun(StoreID) -> case StoreID of ?DEFAULT_MODULE -> ok; _ -> ar_chunk_storage:open_files(StoreID) end end, StoreIDs). handle_io_thread_down(Ref, Reason, State) -> #state{ mode = Mode, io_threads = Threads, io_thread_monitor_refs = Refs, store_id_to_device = StoreIDToDevice } = State, ?LOG_WARNING([{event, mining_io_thread_down}, {reason, io_lib:format("~p", [Reason])}]), Device = maps:get(Ref, Refs), Refs2 = maps:remove(Ref, Refs), Threads2 = maps:remove(Device, Threads), DeviceToStoreIDs = ar_util:invert_map(StoreIDToDevice), StoreIDs = maps:get(Device, DeviceToStoreIDs, sets:new()), Thread = start_io_thread(Mode, sets:to_list(StoreIDs)), ThreadRef = monitor(process, Thread), State#state{ io_threads = maps:put(Device, Thread, Threads2), io_thread_monitor_refs = maps:put(ThreadRef, Device, Refs2) }. io_thread(Mode, Cache, LastClearTime) -> receive {WhichChunk, {Worker, Candidate, RecallRangeStart, StoreID}} -> {ChunkOffsets, Cache2} = get_chunks(Mode, WhichChunk, Candidate, RecallRangeStart, StoreID, Cache), chunks_read(Mode, Worker, WhichChunk, Candidate, RecallRangeStart, ChunkOffsets), {Cache3, LastClearTime2} = maybe_clear_cached_chunks(Cache2, LastClearTime), io_thread(Mode, Cache3, LastClearTime2) end. chunks_read(miner, Worker, WhichChunk, Candidate, RecallRangeStart, ChunkOffsets) -> ar_mining_worker:chunks_read( Worker, WhichChunk, Candidate, RecallRangeStart, ChunkOffsets); chunks_read(standalone, Worker, WhichChunk, Candidate, RecallRangeStart, ChunkOffsets) -> Worker ! {chunks_read, WhichChunk, Candidate, RecallRangeStart, ChunkOffsets}. get_packed_intervals(Start, End, MiningAddress, PackingDifficulty, ?DEFAULT_MODULE, Intervals) -> ReplicaFormat = get_replica_format_from_packing_difficulty(PackingDifficulty), Packing = ar_block:get_packing(PackingDifficulty, MiningAddress, ReplicaFormat), case ar_sync_record:get_next_synced_interval(Start, End, Packing, ar_data_sync, ?DEFAULT_MODULE) of not_found -> Intervals; {Right, Left} -> get_packed_intervals(Right, End, MiningAddress, PackingDifficulty, ?DEFAULT_MODULE, ar_intervals:add(Intervals, Right, Left)) end; get_packed_intervals(_Start, _End, _MiningAddr, _PackingDifficulty, _StoreID, _Intervals) -> no_interval_check_implemented_for_non_default_store. %% The protocol allows composite packing with the packing difficulty 25 for now, %% but it is not practical and it is convenient to exlude it from the range of %% supported storage module configurations and treat it as the 2.9 replication format %% in the mining process. get_replica_format_from_packing_difficulty(?REPLICA_2_9_PACKING_DIFFICULTY) -> 1; get_replica_format_from_packing_difficulty(_PackingDifficulty) -> 0. maybe_clear_cached_chunks(Cache, LastClearTime) -> Now = os:system_time(millisecond), case (Now - LastClearTime) > (?CACHE_TTL_MS div 2) of true -> CutoffTime = Now - ?CACHE_TTL_MS, Cache2 = maps:filter( fun(_CachedRangeStart, {CachedTime, _ChunkOffsets}) -> %% Remove all ranges that were cached before the CutoffTime %% true: keep %% false: remove case CachedTime > CutoffTime of true -> true; false -> false end end, Cache), {Cache2, Now}; false -> {Cache, LastClearTime} end. %% @doc When we're reading a range for a CM peer we'll cache it temporarily in case %% that peer has broken up the batch of H1s into multiple requests. The temporary cache %% prevents us from reading the same range from disk multiple times. %% %% However if the request is from our local miner there's no need to cache since the H1 %% batch is always handled all at once. get_chunks(Mode, WhichChunk, Candidate, RangeStart, StoreID, Cache) -> case Candidate#mining_candidate.cm_lead_peer of not_set -> ChunkOffsets = read_range(Mode, WhichChunk, Candidate, RangeStart, StoreID), {ChunkOffsets, Cache}; _ -> cached_read_range(Mode, WhichChunk, Candidate, RangeStart, StoreID, Cache) end. cached_read_range(Mode, WhichChunk, Candidate, RangeStart, StoreID, Cache) -> Now = os:system_time(millisecond), case maps:get(RangeStart, Cache, not_found) of not_found -> ChunkOffsets = read_range(Mode, WhichChunk, Candidate, RangeStart, StoreID), Cache2 = maps:put(RangeStart, {Now, ChunkOffsets}, Cache), {ChunkOffsets, Cache2}; {_CachedTime, ChunkOffsets} -> ?LOG_DEBUG([{event, mining_debug_read_cached_recall_range}, {pid, self()}, {range_start, RangeStart}, {store_id, StoreID}, {partition_number, Candidate#mining_candidate.partition_number}, {partition_number2, Candidate#mining_candidate.partition_number2}, {cm_peer, ar_util:format_peer(Candidate#mining_candidate.cm_lead_peer)}, {cache_ref, Candidate#mining_candidate.cache_ref}, {session, ar_nonce_limiter:encode_session_key(Candidate#mining_candidate.session_key)}]), {ChunkOffsets, Cache} end. read_range(Mode, WhichChunk, Candidate, RangeStart, StoreID) -> StartTime = erlang:monotonic_time(), #mining_candidate{ mining_address = MiningAddress, packing_difficulty = PackingDifficulty } = Candidate, RecallRangeSize = ar_block:get_recall_range_size(PackingDifficulty), Intervals = get_packed_intervals(RangeStart, RangeStart + RecallRangeSize, MiningAddress, PackingDifficulty, StoreID, ar_intervals:new()), ChunkOffsets = ar_chunk_storage:get_range(RangeStart, RecallRangeSize, StoreID), ChunkOffsets2 = filter_by_packing(ChunkOffsets, Intervals, StoreID), log_read_range(Mode, Candidate, WhichChunk, length(ChunkOffsets), StartTime), ChunkOffsets2. filter_by_packing([], _Intervals, _StoreID) -> []; filter_by_packing([{EndOffset, Chunk} | ChunkOffsets], Intervals, ?DEFAULT_MODULE = StoreID) -> case ar_intervals:is_inside(Intervals, EndOffset) of false -> filter_by_packing(ChunkOffsets, Intervals, StoreID); true -> [{EndOffset, Chunk} | filter_by_packing(ChunkOffsets, Intervals, StoreID)] end; filter_by_packing(ChunkOffsets, _Intervals, _StoreID) -> ChunkOffsets. log_read_range(standalone, _Candidate, _WhichChunk, _FoundChunks, _StartTime) -> ok; log_read_range(_Mode, Candidate, WhichChunk, FoundChunks, StartTime) -> EndTime = erlang:monotonic_time(), ElapsedTime = erlang:convert_time_unit(EndTime-StartTime, native, millisecond), ReadRate = case ElapsedTime > 0 of true -> (FoundChunks * 1000 div 4) div ElapsedTime; %% MiB per second false -> 0 end, PartitionNumber = case WhichChunk of chunk1 -> Candidate#mining_candidate.partition_number; chunk2 -> Candidate#mining_candidate.partition_number2 end, ar_mining_stats:raw_read_rate(PartitionNumber, ReadRate), % ?LOG_DEBUG([{event, mining_debug_read_recall_range}, % {thread, self()}, % {elapsed_time_ms, ElapsedTime}, % {chunks_read, FoundChunks}, % {mib_read, FoundChunks / 4}, % {read_rate_mibps, ReadRate}, % {chunk, WhichChunk}, % {partition_number, PartitionNumber}]), ok. find_thread(RangeStart, RangeEnd, State) -> PartitionNumber = ar_node:get_partition_number(RangeStart), StoreIDs = maps:get(PartitionNumber, State#state.partition_to_store_ids, not_found), StoreID = find_largest_intersection(StoreIDs, RangeStart, RangeEnd, 0, not_found), Device = maps:get(StoreID, State#state.store_id_to_device, not_found), Thread = maps:get(Device, State#state.io_threads, not_found), case Thread of not_found -> not_found; _ -> {Thread, StoreID} end. find_largest_intersection(not_found, _RangeStart, _RangeEnd, _Max, _MaxKey) -> not_found; find_largest_intersection([StoreID | StoreIDs], RangeStart, RangeEnd, Max, MaxKey) -> I = ar_sync_record:get_intersection_size(RangeEnd, RangeStart, ar_chunk_storage, StoreID), case I > Max of true -> find_largest_intersection(StoreIDs, RangeStart, RangeEnd, I, StoreID); false -> find_largest_intersection(StoreIDs, RangeStart, RangeEnd, Max, MaxKey) end; find_largest_intersection([], _RangeStart, _RangeEnd, _Max, MaxKey) -> MaxKey. ================================================ FILE: apps/arweave/src/ar_mining_server.erl ================================================ %%% @doc The 2.6 mining server. -module(ar_mining_server). -behaviour(ar_mining_server_behaviour). -behaviour(gen_server). -export([start_link/0, start_mining/1, is_paused/0, set_difficulty/1, set_merkle_rebase_threshold/1, set_height/1, compute_h2_for_peer/1, prepare_and_post_solution/1, prepare_poa/3, get_recall_bytes/5, get_recall_range/3, get_recall_range/5, active_sessions/0, encode_sessions/1, add_pool_job/6, is_one_chunk_solution/1, fetch_poa_from_peers/2, log_prepare_solution_failure/5, get_packing_difficulty/1, get_packing_type/1]). -export([pause/0]). -export([init/1, handle_cast/2, handle_call/3, handle_info/2, terminate/2]). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave/include/ar_consensus.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("arweave/include/ar_data_discovery.hrl"). -include_lib("arweave/include/ar_mining.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("stdlib/include/ms_transform.hrl"). -record(state, { paused = true, workers = #{}, active_sessions = sets:new(), seeds = #{}, diff_pair = not_set, chunk_cache_limit = 0, gc_frequency_ms = undefined, gc_process_ref = undefined, merkle_rebase_threshold = infinity, is_pool_client = false, allow_composite_packing = false, allow_replica_2_9_mining = false, packing_difficulty = 0 }). -ifdef(AR_TEST). -define(POST_2_8_COMPOSITE_PACKING_DELAY_BLOCKS, 0). -define(MINIMUM_CACHE_LIMIT_BYTES, 100 * ?MiB). -else. -define(POST_2_8_COMPOSITE_PACKING_DELAY_BLOCKS, 10). -define(MINIMUM_CACHE_LIMIT_BYTES, 1). -endif. %% The number of concurrent VDF steps per partition that will fit in the cache. The higher this %% number the more memory the cache can use (roughly ?IDEAL_STEPS_PER_PARTITION * 5 MiB per %% partition). Also the higher the number the more the miner is able to respond to temporary %% hashrate slowdowns (e.g. a system process temporarily consumes all CPU) or temporary VDF %% step spikes (e.g. the node validates an block with an advanced VDF step and unlocks many %% VDF steps at once) without losing hashrate. -define(IDEAL_STEPS_PER_PARTITION, 20). -define(FETCH_POA_FROM_PEERS_TIMEOUT_MS, 10000). %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Start the gen_server. start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). %% @doc Start mining. Does nothing if the mining server is already running. start_mining(Args) -> gen_server:cast(?MODULE, {start_mining, Args}). %% @doc Return true if the mining server is paused. is_paused() -> gen_server:call(?MODULE, is_paused, 60_000). %% @doc Compute H2 for a remote peer (used in coordinated mining). compute_h2_for_peer(Candidate) -> gen_server:cast(?MODULE, {compute_h2_for_peer, Candidate}). %% @doc Set the new mining difficulty. We do not recalculate it inside the mining %% server because we want to completely detach the mining server from the block %% ordering. The previous block is chosen only after the mining solution is found (if %% we choose it in advance we may miss a better option arriving in the process). %% Also, a mining session may (in practice, almost always will) span several blocks. set_difficulty(DiffPair) -> gen_server:cast(?MODULE, {set_difficulty, DiffPair}). set_merkle_rebase_threshold(Threshold) -> gen_server:cast(?MODULE, {set_merkle_rebase_threshold, Threshold}). set_height(Height) -> gen_server:cast(?MODULE, {set_height, Height}). %% @doc Add a pool job to the mining queue. add_pool_job(SessionKey, StepNumber, Output, PartitionUpperBound, Seed, PartialDiff) -> Args = {SessionKey, StepNumber, Output, PartitionUpperBound, Seed, PartialDiff}, gen_server:cast(?MODULE, {add_pool_job, Args}). prepare_and_post_solution(CandidateOrSolution) -> gen_server:cast(?MODULE, {prepare_and_post_solution, CandidateOrSolution}). active_sessions() -> gen_server:call(?MODULE, active_sessions). encode_sessions(Sessions) -> lists:map(fun(SessionKey) -> ar_nonce_limiter:encode_session_key(SessionKey) end, Sessions). is_one_chunk_solution(Solution) -> Solution#mining_solution.recall_byte2 == undefined. %% @doc Use this function every time the miner finds a solution but fails to prepare a block. %% It may happen to a standalone miner, a worker in a coordinated mining setup, the exit node, %% a pool worker in a pool or the pool server. -spec log_prepare_solution_failure( Solution :: #mining_solution{}, FailureType :: stale | rejected, FailureReason :: atom(), Source :: atom(), AdditionalLogData :: list({atom(), term()}) ) -> Ret :: ok. -ifdef(AR_TEST). log_prepare_solution_failure(_Solution, stale, _FailureReason, _Source, _AdditionalLogData) -> ok; log_prepare_solution_failure(Solution, FailureType, FailureReason, Source, AdditionalLogData) -> log_prepare_solution_failure2(Solution, FailureType, FailureReason, Source, AdditionalLogData). -else. log_prepare_solution_failure(Solution, FailureType, FailureReason, Source, AdditionalLogData) -> log_prepare_solution_failure2(Solution, FailureType, FailureReason, Source, AdditionalLogData). -endif. log_prepare_solution_failure2(Solution, FailureType, FailureReason, Source, AdditionalLogData) -> #mining_solution{ solution_hash = SolutionH, packing_difficulty = PackingDifficulty } = Solution, ar_events:send(solution, {FailureType, #{ solution_hash => SolutionH, reason => FailureReason, source => Source }}), ar:console("~nFailed to prepare block from the mining solution.. Reason: ~p~n", [FailureReason]), ?LOG_ERROR([{event, failed_to_prepare_block_from_mining_solution}, {reason, FailureReason}, {solution_hash, ar_util:safe_encode(SolutionH)}, {packing_difficulty, PackingDifficulty} | AdditionalLogData]), prometheus_gauge:inc(mining_solution, [FailureReason]). -spec get_packing_difficulty(Packing :: ar_storage_module:packing()) -> PackingDifficulty :: non_neg_integer(). get_packing_difficulty({composite, _, Difficulty}) -> Difficulty; get_packing_difficulty({replica_2_9, _}) -> ?REPLICA_2_9_PACKING_DIFFICULTY; get_packing_difficulty(_) -> 0. -spec get_packing_type(Packing :: ar_storage_module:packing()) -> PackingType :: atom(). get_packing_type({composite, _, _}) -> composite; get_packing_type({replica_2_9, _}) -> replica_2_9; get_packing_type({spora_2_6, _}) -> spora_2_6; get_packing_type(Packing) -> Packing. %%%=================================================================== %%% Generic server callbacks. %%%=================================================================== init([]) -> ok = ar_events:subscribe(nonce_limiter), Partitions = ar_mining_io:get_partitions(infinity), Packing = ar_mining_io:get_packing(), PackingDifficulty = get_packing_difficulty(Packing), Workers = lists:foldl( fun({Partition, _Addr, Difficulty}, Acc) -> maps:put({Partition, Difficulty}, ar_mining_worker:name(Partition, Difficulty), Acc) end, #{}, Partitions ), ?LOG_INFO([{event, mining_server_init}, {packing, ar_serialize:encode_packing(Packing, false)}, {partitions, [ Partition || {Partition, _, _} <- Partitions]}]), {ok, #state{ workers = Workers, is_pool_client = ar_pool:is_client(), packing_difficulty = PackingDifficulty }}. handle_call(active_sessions, _From, State) -> {reply, State#state.active_sessions, State}; handle_call(is_paused, _From, State) -> {reply, State#state.paused, State}; handle_call(Request, _From, State) -> ?LOG_WARNING([{event, unhandled_call}, {module, ?MODULE}, {request, Request}]), {reply, ok, State}. handle_cast(pause, State) -> ar:console("Pausing mining.~n"), ?LOG_INFO([{event, pause_mining}]), ar_mining_stats:mining_paused(), %% Setting paused to true allows all pending tasks to complete, but prevents new output to be %% distributed. Setting diff to infinity ensures that no solutions are found. State2 = set_difficulty({infinity, infinity}, State), {noreply, State2#state{ paused = true }}; handle_cast({start_mining, Args}, #state{ paused = false } = State) -> {noreply, State}; handle_cast({start_mining, Args}, State) -> {DiffPair, RebaseThreshold, Height} = Args, ar:console("Starting mining.~n"), ?LOG_INFO([{event, start_mining}, {difficulty, DiffPair}, {rebase_threshold, RebaseThreshold}, {height, Height}]), ar_mining_stats:start_performance_reports(), maps:foreach( fun(_Partition, Worker) -> ar_mining_worker:reset_mining_session(Worker, DiffPair) end, State#state.workers ), {noreply, State#state{ paused = false, active_sessions = sets:new(), diff_pair = DiffPair, merkle_rebase_threshold = RebaseThreshold, allow_composite_packing = allow_composite_packing(Height), allow_replica_2_9_mining = allow_replica_2_9_mining(Height) }}; handle_cast({set_difficulty, DiffPair}, State) -> State2 = set_difficulty(DiffPair, State), {noreply, State2}; handle_cast({set_merkle_rebase_threshold, Threshold}, State) -> {noreply, State#state{ merkle_rebase_threshold = Threshold }}; handle_cast({set_height, Height}, State) -> {noreply, State#state{ allow_composite_packing = allow_composite_packing(Height), allow_replica_2_9_mining = allow_replica_2_9_mining(Height) }}; handle_cast({add_pool_job, Args}, State) -> {SessionKey, StepNumber, Output, PartitionUpperBound, Seed, PartialDiff} = Args, State2 = set_seed(SessionKey, Seed, State), handle_computed_output( SessionKey, StepNumber, Output, PartitionUpperBound, PartialDiff, State2); handle_cast({compute_h2_for_peer, Candidate}, State) -> #mining_candidate{ partition_number2 = Partition2, packing_difficulty = PackingDifficulty } = Candidate, case get_worker({Partition2, PackingDifficulty}, State) of not_found -> ok; Worker -> ar_mining_worker:add_task(Worker, compute_h2_for_peer, Candidate) end, {noreply, State}; handle_cast({prepare_and_post_solution, _}, #state{ paused = true } = State) -> %% Ignore solutions when the server is paused. Should only happen in tests. {noreply, State}; handle_cast({prepare_and_post_solution, CandidateOrSolution}, State) -> prepare_and_post_solution(CandidateOrSolution, State), {noreply, State}; handle_cast({manual_garbage_collect, Ref}, #state{ gc_process_ref = Ref } = State) -> %% Reading recall ranges from disk causes a large amount of binary data to be allocated and %% references to that data is spread among all the different mining processes. Because of %% this it can take the default garbage collection to clean up all references and %% deallocate the memory - which in turn can cause memory to be exhausted. %% %% To address this the mining server will force a garbage collection on all mining %% processes every time we process a few VDF steps. The exact number of VDF steps is %% determined by the chunk cache size limit in order to roughly align garbage collection %% with when we expect all references to a recall range's chunks to be evicted from %% the cache. ?LOG_DEBUG([{event, mining_debug_garbage_collect_start}, {frequency, State#state.gc_frequency_ms}]), ar_mining_io:garbage_collect(), ar_mining_hash:garbage_collect(), erlang:garbage_collect(self(), [{async, erlang:monotonic_time()}]), maps:foreach( fun(_Partition, Worker) -> ar_mining_worker:garbage_collect(Worker) end, State#state.workers ), ar_coordination:garbage_collect(), ar_util:cast_after(State#state.gc_frequency_ms, ?MODULE, {manual_garbage_collect, Ref}), {noreply, State}; handle_cast({manual_garbage_collect, _}, State) -> %% Does not originate from the running instance of the server; happens in tests. {noreply, State}; handle_cast(Cast, State) -> ?LOG_WARNING([{event, unhandled_cast}, {module, ?MODULE}, {cast, Cast}]), {noreply, State}. handle_info({event, nonce_limiter, {computed_output, _Args}}, #state{ paused = true } = State) -> {noreply, State}; handle_info({event, nonce_limiter, {computed_output, Args}}, State) -> case ar_pool:is_client() of true -> %% Ignore VDF events because we are receiving jobs from the pool. {noreply, State}; false -> {SessionKey, StepNumber, Output, PartitionUpperBound} = Args, handle_computed_output( SessionKey, StepNumber, Output, PartitionUpperBound, not_set, State) end; handle_info({event, nonce_limiter, {valid, _}}, State) -> %% Silently ignore validation messages {noreply, State}; handle_info({event, nonce_limiter, Message}, State) -> ?LOG_DEBUG([{event, mining_debug_skipping_nonce_limiter}, {message, Message}]), {noreply, State}; handle_info({garbage_collect, StartTime, GCResult}, State) -> EndTime = erlang:monotonic_time(), ElapsedTime = erlang:convert_time_unit(EndTime-StartTime, native, millisecond), case GCResult == false orelse ElapsedTime > ?GC_LOG_THRESHOLD of true -> ?LOG_DEBUG([ {event, mining_debug_garbage_collect}, {process, ar_mining_server}, {pid, self()}, {gc_time, ElapsedTime}, {gc_result, GCResult}]); false -> ok end, {noreply, State}; handle_info({fetched_last_moment_proof, _}, State) -> %% This is a no-op to handle "slow" response from peers that were queried by %% `fetch_poa_from_peers`. Only the first peer to respond with a PoA will be handled, %% all other responses will fall through to here an be ignored. {noreply, State}; handle_info(Message, State) -> ?LOG_WARNING([{event, unhandled_info}, {module, ?MODULE}, {message, Message}]), {noreply, State}. terminate(Reason, _State) -> ?LOG_INFO([{module, ?MODULE},{pid, self()},{callback, terminate},{reason, Reason}]), ok. %%%=================================================================== %%% Private functions. %%%=================================================================== allow_composite_packing(Height) -> Height - ?POST_2_8_COMPOSITE_PACKING_DELAY_BLOCKS >= ar_fork:height_2_8() andalso Height - ?COMPOSITE_PACKING_EXPIRATION_PERIOD_BLOCKS < ar_fork:height_2_9(). allow_replica_2_9_mining(Height) -> Height >= ar_fork:height_2_9(). get_worker(Key, State) -> maps:get(Key, State#state.workers, not_found). set_difficulty(DiffPair, State) -> maps:foreach( fun(_Partition, Worker) -> ar_mining_worker:set_difficulty(Worker, DiffPair) end, State#state.workers ), State#state{ diff_pair = DiffPair }. maybe_update_sessions(SessionKey, State) -> CurrentActiveSessions = State#state.active_sessions, case sets:is_element(SessionKey, CurrentActiveSessions) of true -> State; false -> NewActiveSessions = build_active_session_set(SessionKey, CurrentActiveSessions), case sets:to_list(sets:subtract(NewActiveSessions, CurrentActiveSessions)) of [] -> State; _ -> update_sessions(NewActiveSessions, CurrentActiveSessions, State) end end. build_active_session_set(SessionKey, CurrentActiveSessions) -> CandidateSessions = [SessionKey | sets:to_list(CurrentActiveSessions)], SortedSessions = lists:sort( fun({_, StartIntervalA, _}, {_, StartIntervalB, _}) -> StartIntervalA > StartIntervalB end, CandidateSessions), build_active_session_set(SortedSessions). build_active_session_set([A, B | _]) -> sets:from_list([A, B]); build_active_session_set([A]) -> sets:from_list([A]); build_active_session_set([]) -> sets:new(). update_sessions(NewActiveSessions, CurrentActiveSessions, State) -> AddedSessions = sets:to_list(sets:subtract(NewActiveSessions, CurrentActiveSessions)), RemovedSessions = sets:to_list(sets:subtract(CurrentActiveSessions, NewActiveSessions)), maps:foreach( fun(_Partition, Worker) -> ar_mining_worker:set_sessions(Worker, sets:to_list(NewActiveSessions)) end, State#state.workers ), State2 = add_sessions(AddedSessions, State), State3 = remove_sessions(RemovedSessions, State2), State3#state{ active_sessions = NewActiveSessions }. add_sessions([], State) -> State; add_sessions([SessionKey | AddedSessions], State) -> {NextSeed, StartIntervalNumber, NextVDFDifficulty} = SessionKey, ar:console("Starting new mining session: " "next entropy nonce: ~s, interval number: ~B, next vdf difficulty: ~B.~n", [ar_util:safe_encode(NextSeed), StartIntervalNumber, NextVDFDifficulty]), ?LOG_INFO([{event, new_mining_session}, {session_key, ar_nonce_limiter:encode_session_key(SessionKey)}]), add_sessions(AddedSessions, add_seed(SessionKey, State)). remove_sessions([], State) -> State; remove_sessions([SessionKey | RemovedSessions], State) -> remove_sessions(RemovedSessions, remove_seed(SessionKey, State)). get_seed(SessionKey, State) -> maps:get(SessionKey, State#state.seeds, not_found). set_seed(SessionKey, Seed, State) -> State#state{ seeds = maps:put(SessionKey, Seed, State#state.seeds) }. remove_seed(SessionKey, State) -> State#state{ seeds = maps:remove(SessionKey, State#state.seeds) }. add_seed(SessionKey, State) -> case get_seed(SessionKey, State) of not_found -> Session = ar_nonce_limiter:get_session(SessionKey), case Session of not_found -> ?LOG_ERROR([{event, mining_session_not_found}, {session_key, ar_nonce_limiter:encode_session_key(SessionKey)}]), State; _ -> set_seed(SessionKey, Session#vdf_session.seed, State) end; _ -> State end. update_cache_limits(State) -> NumActivePartitions = length(ar_mining_io:get_partitions()), update_cache_limits(NumActivePartitions, State). update_cache_limits(0, State) -> State; update_cache_limits(NumActivePartitions, State) -> Limits = calculate_cache_limits(NumActivePartitions, State#state.packing_difficulty), maybe_update_cache_limits(Limits, State). calculate_cache_limits(NumActivePartitions, PackingDifficulty) -> IdealRangesPerStep = 2, RecallRangeSize = ar_block:get_recall_range_size(PackingDifficulty), MinimumCacheLimitBytes = max( ?MINIMUM_CACHE_LIMIT_BYTES, (?IDEAL_STEPS_PER_PARTITION * IdealRangesPerStep * RecallRangeSize * NumActivePartitions) ), {ok, Config} = arweave_config:get_env(), OverallCacheLimitBytes = case Config#config.mining_cache_size_mb of undefined -> MinimumCacheLimitBytes; N -> N * ?MiB end, %% We shard the chunk cache across every active worker. Only workers that mine a partition %% included in the current weave are active. PartitionCacheLimitBytes = OverallCacheLimitBytes div NumActivePartitions, %% Allow enough compute_h0 tasks to be queued to completely refill the chunk cache. VDFQueueLimit = max( 1, PartitionCacheLimitBytes div (2 * ar_block:get_recall_range_size(PackingDifficulty)) ), GarbageCollectionFrequency = 4 * VDFQueueLimit * 1000, {MinimumCacheLimitBytes, OverallCacheLimitBytes, PartitionCacheLimitBytes, VDFQueueLimit, GarbageCollectionFrequency}. maybe_update_cache_limits({_, _, PartitionCacheLimit, _, _}, #state{chunk_cache_limit = PartitionCacheLimit} = State) -> State; maybe_update_cache_limits(Limits, State) -> {MinimumCacheLimitBytes, OverallCacheLimitBytes, PartitionCacheLimitBytes, VDFQueueLimit, GarbageCollectionFrequency} = Limits, maps:foreach( fun(_Partition, Worker) -> ar_mining_worker:set_cache_limits( Worker, PartitionCacheLimitBytes, VDFQueueLimit) end, State#state.workers ), ar:console( "~nSetting the mining chunk cache size limit to ~B MiB " "(~B MiB per partition).~n", [OverallCacheLimitBytes div ?MiB, PartitionCacheLimitBytes div ?MiB]), ?LOG_INFO([{event, update_mining_cache_limits}, {overall_limit_mb, OverallCacheLimitBytes div ?MiB}, {per_partition_limit_mb, PartitionCacheLimitBytes div ?MiB}, {vdf_queue_limit_steps, VDFQueueLimit}]), case OverallCacheLimitBytes < MinimumCacheLimitBytes of true -> ar:console("~nChunk cache size limit (~p MiB) is below minimum limit of " "~p MiB. Mining performance may be impacted.~n" "Consider changing the 'mining_cache_size_mb' option.", [OverallCacheLimitBytes div ?MiB, MinimumCacheLimitBytes div ?MiB]); false -> ok end, State2 = reset_gc_timer(GarbageCollectionFrequency, State), State2#state{ chunk_cache_limit = PartitionCacheLimitBytes }. distribute_output(Candidate, State) -> distribute_output(ar_mining_io:get_partitions(), Candidate, State). distribute_output([], _Candidate, _State) -> ok; distribute_output([{_Partition, _MiningAddress, PackingDifficulty} | _Partitions], _Candidate, #state{ allow_composite_packing = false }) when PackingDifficulty >= 1, PackingDifficulty /= ?REPLICA_2_9_PACKING_DIFFICULTY -> %% Only mine with composite packing until some time after the fork 2.9. ok; distribute_output([{Partition, MiningAddress, PackingDifficulty} | Partitions], Candidate, State) -> case get_worker({Partition, PackingDifficulty}, State) of not_found -> ?LOG_ERROR([{event, worker_not_found}, {partition, Partition}]), ok; Worker -> ar_mining_worker:add_task( Worker, compute_h0, Candidate#mining_candidate{ partition_number = Partition, mining_address = MiningAddress, packing_difficulty = PackingDifficulty, replica_format = ar_mining_io:get_replica_format_from_packing_difficulty(PackingDifficulty) }) end, distribute_output(Partitions, Candidate, State). get_recall_bytes(H0, PartitionNumber, Nonce, PartitionUpperBound, PackingDifficulty) -> {RecallRange1Start, RecallRange2Start} = get_recall_range(H0, PartitionNumber, PartitionUpperBound), RecallByte1 = ar_block:get_recall_byte(RecallRange1Start, Nonce, PackingDifficulty), RecallByte2 = ar_block:get_recall_byte(RecallRange2Start, Nonce, PackingDifficulty), {RecallByte1, RecallByte2}. get_recall_range(H0, PartitionNumber, PartitionUpperBound) -> ar_block:get_recall_range(H0, PartitionNumber, PartitionUpperBound). get_recall_range(H0, PartitionNumber, PartitionUpperBound, RecallRange1, RecallRange2) -> ar_block:get_recall_range(H0, PartitionNumber, PartitionUpperBound, RecallRange1, RecallRange2). prepare_and_post_solution(#mining_candidate{} = Candidate, State) -> %% A solo miner builds a solution from a candidate here or a CM miner who received %% H2 from another peer reads chunk1 and sends it to the exit peer. Solution = prepare_solution_from_candidate(Candidate, State), Ret = post_solution(Solution, State), maybe_flush_solution_queue(Ret), Ret; prepare_and_post_solution(#mining_solution{} = Solution, State) -> %% An exit peer receives a mining solution, possibly without the VDF data and chunk proofs. Solution2 = prepare_solution(Solution, State), Ret = post_solution(Solution2, State), maybe_flush_solution_queue(Ret), Ret. prepare_solution(Solution, State) -> #state{ merkle_rebase_threshold = RebaseThreshold, is_pool_client = IsPoolClient } = State, #mining_solution{ mining_address = MiningAddress, next_seed = NextSeed, next_vdf_difficulty = NextVDFDifficulty, nonce = Nonce, nonce_limiter_output = NonceLimiterOutput, partition_number = PartitionNumber, partition_upper_bound = PartitionUpperBound, poa1 = PoA1, poa2 = PoA2, preimage = Preimage, seed = Seed, start_interval_number = StartIntervalNumber, step_number = StepNumber, packing_difficulty = PackingDifficulty, replica_format = ReplicaFormat } = Solution, Candidate = #mining_candidate{ mining_address = MiningAddress, next_seed = NextSeed, next_vdf_difficulty = NextVDFDifficulty, nonce = Nonce, nonce_limiter_output = NonceLimiterOutput, partition_number = PartitionNumber, partition_upper_bound = PartitionUpperBound, poa2 = PoA2, preimage = Preimage, seed = Seed, start_interval_number = StartIntervalNumber, step_number = StepNumber, packing_difficulty = PackingDifficulty, replica_format = ReplicaFormat }, H0 = ar_block:compute_h0(NonceLimiterOutput, PartitionNumber, Seed, MiningAddress, PackingDifficulty), Chunk1 = PoA1#poa.chunk, {H1, Preimage1} = ar_block:compute_h1(H0, Nonce, Chunk1), Candidate2 = Candidate#mining_candidate{ h0 = H0, h1 = H1, chunk1 = Chunk1 }, Candidate3 = case PoA2#poa.chunk of <<>> -> Preimage = Preimage1, Candidate2; Chunk2 -> {H2, Preimage} = ar_block:compute_h2(H1, Chunk2, H0), Candidate2#mining_candidate{ h2 = H2, chunk2 = Chunk2 } end, Solution2 = Solution#mining_solution{ merkle_rebase_threshold = RebaseThreshold }, %% A pool client does not validate VDF before sharing a solution. case IsPoolClient of true -> prepare_solution(proofs, Candidate3, Solution2); false -> prepare_solution(last_step_checkpoints, Candidate3, Solution2) end. prepare_solution_from_candidate(Candidate, State) -> #state{ merkle_rebase_threshold = RebaseThreshold, is_pool_client = IsPoolClient } = State, #mining_candidate{ mining_address = MiningAddress, next_seed = NextSeed, next_vdf_difficulty = NextVDFDifficulty, nonce = Nonce, nonce_limiter_output = NonceLimiterOutput, partition_number = PartitionNumber, partition_upper_bound = PartitionUpperBound, poa2 = PoA2, preimage = Preimage, seed = Seed, start_interval_number = StartIntervalNumber, step_number = StepNumber, packing_difficulty = PackingDifficulty, replica_format = ReplicaFormat } = Candidate, Solution = #mining_solution{ mining_address = MiningAddress, merkle_rebase_threshold = RebaseThreshold, next_seed = NextSeed, next_vdf_difficulty = NextVDFDifficulty, nonce = Nonce, nonce_limiter_output = NonceLimiterOutput, partition_number = PartitionNumber, partition_upper_bound = PartitionUpperBound, poa2 = PoA2, preimage = Preimage, seed = Seed, start_interval_number = StartIntervalNumber, step_number = StepNumber, packing_difficulty = PackingDifficulty, replica_format = ReplicaFormat }, %% A pool client does not validate VDF before sharing a solution. case IsPoolClient of true -> prepare_solution(proofs, Candidate, Solution); false -> prepare_solution(last_step_checkpoints, Candidate, Solution) end. prepare_solution(last_step_checkpoints, Candidate, Solution) -> #mining_candidate{ next_seed = NextSeed, next_vdf_difficulty = NextVDFDifficulty, start_interval_number = StartIntervalNumber, step_number = StepNumber } = Candidate, LastStepCheckpoints = ar_nonce_limiter:get_step_checkpoints( StepNumber, NextSeed, StartIntervalNumber, NextVDFDifficulty), LastStepCheckpoints2 = case LastStepCheckpoints of not_found -> ?LOG_WARNING([{event, found_solution_but_failed_to_find_last_step_checkpoints}]), []; _ -> LastStepCheckpoints end, prepare_solution(steps, Candidate, Solution#mining_solution{ last_step_checkpoints = LastStepCheckpoints2 }); prepare_solution(steps, Candidate, Solution) -> #mining_candidate{ step_number = StepNumber } = Candidate, [{_, TipNonceLimiterInfo}] = ets:lookup(node_state, nonce_limiter_info), #nonce_limiter_info{ global_step_number = PrevStepNumber, seed = PrevSeed, next_seed = PrevNextSeed, next_vdf_difficulty = PrevNextVDFDifficulty } = TipNonceLimiterInfo, case StepNumber > PrevStepNumber of true -> Steps = ar_nonce_limiter:get_steps( PrevStepNumber, StepNumber, PrevNextSeed, PrevNextVDFDifficulty), case Steps of not_found -> CurrentSessionKey = ar_nonce_limiter:session_key(TipNonceLimiterInfo), SolutionSessionKey = Candidate#mining_candidate.session_key, LogData = [ {current_session_key, ar_nonce_limiter:encode_session_key(CurrentSessionKey)}, {solution_session_key, ar_nonce_limiter:encode_session_key(SolutionSessionKey)}, {start_step_number, PrevStepNumber}, {next_step_number, StepNumber}, {seed, ar_util:safe_encode(PrevSeed)}, {next_seed, ar_util:safe_encode(PrevNextSeed)}, {next_vdf_difficulty, PrevNextVDFDifficulty}, {h1, ar_util:safe_encode(Candidate#mining_candidate.h1)}, {h2, ar_util:safe_encode(Candidate#mining_candidate.h2)}], ?LOG_INFO([{event, found_solution_but_failed_to_find_checkpoints} | LogData]), may_be_leave_it_to_exit_peer( prepare_solution(proofs, Candidate, Solution#mining_solution{ steps = [] }), step_checkpoints_not_found, LogData); _ -> prepare_solution(proofs, Candidate, Solution#mining_solution{ steps = Steps }) end; false -> log_prepare_solution_failure(Solution, stale, stale_step_number, miner, [ {start_step_number, PrevStepNumber}, {next_step_number, StepNumber}, {next_seed, ar_util:safe_encode(PrevNextSeed)}, {next_vdf_difficulty, PrevNextVDFDifficulty}, {h1, ar_util:safe_encode(Candidate#mining_candidate.h1)}, {h2, ar_util:safe_encode(Candidate#mining_candidate.h2)} ]), error end; prepare_solution(proofs, Candidate, Solution) -> #mining_candidate{ h0 = H0, h1 = H1, h2 = H2, partition_number = PartitionNumber, partition_upper_bound = PartitionUpperBound, packing_difficulty = PackingDifficulty, nonce = Nonce, seed = Seed, mining_address = MiningAddress, nonce_limiter_output = NonceLimiterOutput, chunk2 = Chunk2 } = Candidate, #mining_solution{ poa1 = PoA1, poa2 = PoA2 } = Solution, {RecallByte1, RecallByte2} = get_recall_bytes(H0, PartitionNumber, Nonce, PartitionUpperBound, PackingDifficulty), ExpectedH0 = ar_block:compute_h0(NonceLimiterOutput, PartitionNumber, Seed, MiningAddress, PackingDifficulty), case {H0, H1, H2} of {_, not_set, not_set} -> %% We should never end up here.. log_prepare_solution_failure(Solution, rejected, h1_h2_not_set, miner, []), error; {ExpectedH0, _H1, not_set} -> prepare_solution(poa1, Candidate, Solution#mining_solution{ solution_hash = H1, recall_byte1 = RecallByte1, poa1 = may_be_empty_poa(PoA1), poa2 = #poa{} }); {_H0, _H1, not_set} -> log_prepare_solution_failure(Solution, rejected, incorrect_h0, miner, []), error; {ExpectedH0, _H1, _H2} -> case is_h2_valid(Chunk2, H0, H1, H2) of true -> prepare_solution(poa2, Candidate, Solution#mining_solution{ solution_hash = H2, recall_byte1 = RecallByte1, recall_byte2 = RecallByte2, poa1 = may_be_empty_poa(PoA1), poa2 = may_be_empty_poa(PoA2) }); false -> log_prepare_solution_failure(Solution, rejected, incorrect_h2, miner, []), error end; _ -> log_prepare_solution_failure(Solution, rejected, incorrect_h0, miner, []), error end; prepare_solution(poa1, Candidate, Solution) -> #mining_solution{ poa1 = CurrentPoA1, recall_byte1 = RecallByte1, mining_address = MiningAddress, packing_difficulty = PackingDifficulty, replica_format = ReplicaFormat } = Solution, #mining_candidate{ h0 = H0, h1 = H1, chunk1 = Chunk1, nonce = Nonce, partition_number = PartitionNumber } = Candidate, case prepare_poa(poa1, Candidate, CurrentPoA1) of {ok, PoA1} -> case is_h1_valid(Chunk1, PoA1, H0, H1, Nonce) of true -> Solution#mining_solution{ poa1 = PoA1 }; false -> log_prepare_solution_failure(Solution, rejected, incorrect_h1, miner, []), error end; {error, Error} -> Modules = ar_storage_module:get_all(RecallByte1 + 1), ModuleIDs = [ar_storage_module:id(Module) || Module <- Modules], LogData = [{recall_byte, RecallByte1}, {modules_covering_recall_byte, ModuleIDs}, {fetch_proofs_error, io_lib:format("~p", [Error])}, {nonce, Nonce}, {partition_number, PartitionNumber}], case Chunk1 of not_set -> Packing = ar_block:get_packing(PackingDifficulty, MiningAddress, ReplicaFormat), ?LOG_WARNING([{event, failed_to_find_poa1_proofs_for_h2_solution}, {error, io_lib:format("~p", [Error])}, {tags, [solution_proofs]} | LogData]), case ar_storage_module:get(RecallByte1 + 1, Packing) of {_BucketSize, _Bucket, Packing} = StorageModule -> StoreID = ar_storage_module:id(StorageModule), case ar_chunk_storage:get(RecallByte1, StoreID) of not_found -> log_prepare_solution_failure(Solution, rejected, chunk1_for_h2_solution_not_found, miner, LogData), error; {_EndOffset, Chunk} -> SubChunk = get_sub_chunk(Chunk, PackingDifficulty, Nonce), %% If we are a coordinated miner and not an exit node - %% the exit node will fetch the proofs. may_be_leave_it_to_exit_peer( Solution#mining_solution{ poa1 = #poa{ chunk = SubChunk } }, chunk1_proofs_for_h2_solution_not_found, LogData) end; _ -> log_prepare_solution_failure(Solution, rejected, storage_module_for_chunk1_for_h2_solution_not_found, miner, LogData), error end; _ -> %% If we are a coordinated miner and not an exit node - the exit %% node will fetch the proofs. may_be_leave_it_to_exit_peer( Solution#mining_solution{ poa1 = #poa{ chunk = Chunk1 } }, chunk1_proofs_not_found, LogData) end end; prepare_solution(poa2, Candidate, Solution) -> #mining_solution{ poa2 = CurrentPoA2, recall_byte2 = RecallByte2 } = Solution, #mining_candidate{ chunk2 = Chunk2 } = Candidate, case prepare_poa(poa2, Candidate, CurrentPoA2) of {ok, PoA2} -> prepare_solution(poa1, Candidate, Solution#mining_solution{ poa2 = PoA2 }); {error, _Error} -> Modules = ar_storage_module:get_all(RecallByte2 + 1), ModuleIDs = [ar_storage_module:id(Module) || Module <- Modules], LogData = [{recall_byte2, RecallByte2}, {modules_covering_recall_byte, ModuleIDs}], %% If we are a coordinated miner and not an exit node - the exit %% node will fetch the proofs. may_be_leave_it_to_exit_peer( prepare_solution(poa1, Candidate, Solution#mining_solution{ poa2 = #poa{ chunk = Chunk2 } }), chunk2_proofs_not_found, LogData) end. prepare_poa(PoAType, Candidate, CurrentPoA) -> #mining_candidate{ packing_difficulty = PackingDifficulty, replica_format = ReplicaFormat, mining_address = MiningAddress, nonce = Nonce, partition_number = PartitionNumber, partition_upper_bound = PartitionUpperBound, h0 = H0, chunk1 = Chunk1, chunk2 = Chunk2 } = Candidate, {RecallByte1, RecallByte2} = get_recall_bytes(H0, PartitionNumber, Nonce, PartitionUpperBound, PackingDifficulty), {RecallByte, Chunk} = case PoAType of poa1 -> {RecallByte1, Chunk1}; poa2 -> {RecallByte2, Chunk2} end, Packing = ar_block:get_packing(PackingDifficulty, MiningAddress, ReplicaFormat), case is_poa_complete(CurrentPoA, PackingDifficulty) of true -> {ok, CurrentPoA}; false -> case read_poa(RecallByte, Chunk, Packing, Nonce) of {ok, PoA} -> {ok, PoA}; {error, Error} -> Modules = ar_storage_module:get_all(RecallByte + 1), ModuleIDs = [ar_storage_module:id(Module) || Module <- Modules], ?LOG_INFO([{event, failed_to_find_poa_proofs_locally}, {poa, PoAType}, {error, io_lib:format("~p", [Error])}, {tags, [solution_proofs]}, {recall_byte, RecallByte}, {packing, ar_serialize:encode_packing(Packing, true)}, {packing_difficulty, PackingDifficulty}, {modules_covering_recall_byte, ModuleIDs}]), ChunkBinary = case Chunk of not_set -> <<>>; _ -> Chunk end, case fetch_poa_from_peers(RecallByte, PackingDifficulty) of not_found -> ?LOG_INFO([{event, failed_to_fetch_proofs_from_peers}, {tags, [solution_proofs]}, {poa, PoAType}, {recall_byte, RecallByte}, {nonce, Nonce}, {partition, PartitionNumber}, {mining_address, ar_util:safe_encode(MiningAddress)}, {packing, ar_serialize:encode_packing(Packing, true)}, {packing_difficulty, PackingDifficulty}]), {error, Error}; PoA -> {ok, PoA#poa{ chunk = ChunkBinary }} end end end. %% Check if the chunk proof has been assembed already by the mining nodes. %% If not, we are the exit node and we will fetch the missing data. is_poa_complete(#poa{ chunk = <<>> }, _) -> false; is_poa_complete(#poa{ data_path = <<>> }, _) -> false; is_poa_complete(#poa{ tx_path = <<>> }, _) -> false; is_poa_complete(#poa{ unpacked_chunk = <<>> }, PackingDifficulty) when PackingDifficulty >= 1 -> false; is_poa_complete(_, _) -> true. may_be_leave_it_to_exit_peer(error, _FailureReason, _AdditionalLogData) -> error; may_be_leave_it_to_exit_peer(Solution, FailureReason, AdditionalLogData) -> case ar_coordination:is_coordinated_miner() andalso not ar_coordination:is_exit_peer() of true -> Solution; false -> log_prepare_solution_failure( Solution, rejected, FailureReason, miner, AdditionalLogData), error end. is_h1_valid(Chunk, PoA, H0, H1, Nonce) -> Chunk1 = case Chunk of not_set -> PoA#poa.chunk; _ -> Chunk end, {ExpectedH1, _} = ar_block:compute_h1(H0, Nonce, Chunk1), H1 == ExpectedH1. is_h2_valid(Chunk, H0, H1, H2) -> {ExpectedH2, _} = case Chunk of not_set -> {H2, not_set}; _ -> ar_block:compute_h2(H1, Chunk, H0) end, H2 == ExpectedH2. post_solution(error, _State) -> ?LOG_WARNING([{event, found_solution_but_could_not_build_a_block}]), error; post_solution(Solution, State) -> {ok, Config} = arweave_config:get_env(), post_solution(Config#config.cm_exit_peer, Solution, State). post_solution(not_set, Solution, #state{ is_pool_client = true }) -> %% When posting a partial solution the pool client will skip many of the validation steps %% that are normally performed before sharing a solution. ar_pool:post_partial_solution(Solution), ok; post_solution(not_set, Solution, State) -> #state{ diff_pair = DiffPair } = State, #mining_solution{ mining_address = MiningAddress, nonce_limiter_output = NonceLimiterOutput, partition_number = PartitionNumber, recall_byte1 = RecallByte1, recall_byte2 = RecallByte2, solution_hash = H, step_number = StepNumber } = Solution, case validate_solution(Solution, DiffPair) of error -> ?LOG_WARNING([{event, failed_to_validate_solution}, {partition, PartitionNumber}, {step_number, StepNumber}, {mining_address, ar_util:safe_encode(MiningAddress)}, {recall_byte1, RecallByte1}, {recall_byte2, RecallByte2}, {solution_h, ar_util:safe_encode(H)}, {nonce_limiter_output, ar_util:safe_encode(NonceLimiterOutput)}, {diff_pair, DiffPair}]), ar:console("WARNING: we failed to validate our solution. Check logs for more " "details~n"), error; {false, Reason} -> ar_events:send(solution, {rejected, #{ reason => Reason, source => miner }}), ?LOG_WARNING([{event, found_invalid_solution}, {reason, Reason}, {partition, PartitionNumber}, {step_number, StepNumber}, {mining_address, ar_util:safe_encode(MiningAddress)}, {recall_byte1, RecallByte1}, {recall_byte2, RecallByte2}, {solution_h, ar_util:safe_encode(H)}, {nonce_limiter_output, ar_util:safe_encode(NonceLimiterOutput)}, {diff_pair, DiffPair}]), ar:console("WARNING: the solution we found is invalid. Check logs for more " "details~n"), error; {true, PoACache, PoA2Cache} -> ar_node_worker:found_solution(miner, Solution, PoACache, PoA2Cache), ok end; post_solution(ExitPeer, Solution, #state{ is_pool_client = true }) -> case ar_http_iface_client:post_partial_solution(ExitPeer, Solution) of {ok, _} -> ok; {error, Reason} -> ?LOG_WARNING([{event, found_partial_solution_but_failed_to_reach_exit_node}, {reason, io_lib:format("~p", [Reason])}]), ar:console("We found a partial solution but failed to reach the exit node, " "error: ~p.", [io_lib:format("~p", [Reason])]), error end; post_solution(ExitPeer, Solution, _State) -> case ar_http_iface_client:cm_publish_send(ExitPeer, Solution) of {ok, _} -> ok; {error, Reason} -> ?LOG_WARNING([{event, found_solution_but_failed_to_reach_exit_node}, {reason, io_lib:format("~p", [Reason])}]), ar:console("We found a solution but failed to reach the exit node, " "error: ~p.", [io_lib:format("~p", [Reason])]), error end. -ifdef(AR_TEST). %% @doc During tests the miner can mine so many solutions in parallel %% that it fills up the ar_mining_server queue and can cause flaky timeouts. %% To avoid this we'll flush the queue after a successful solution is posted. maybe_flush_solution_queue(ok) -> flush_solution_queue(), ok; maybe_flush_solution_queue(_Other) -> ok. flush_solution_queue() -> receive {'$gen_cast', {prepare_and_post_solution, _}} -> flush_solution_queue() after 0 -> ok end. -else. maybe_flush_solution_queue(_Ret) -> ok. -endif. may_be_empty_poa(not_set) -> #poa{}; may_be_empty_poa(#poa{} = PoA) -> PoA. fetch_poa_from_peers(_RecallByte, PackingDifficulty) when PackingDifficulty >= 1 -> not_found; fetch_poa_from_peers(RecallByte, _PackingDifficulty) -> BucketPeers = ar_data_discovery:get_bucket_peers(RecallByte div ?NETWORK_DATA_BUCKET_SIZE), Peers = ar_data_discovery:pick_peers(BucketPeers, ?QUERY_BEST_PEERS_COUNT), From = self(), lists:foreach( fun(Peer) -> spawn( fun() -> ?LOG_INFO([{event, last_moment_proof_search}, {peer, ar_util:format_peer(Peer)}, {recall_byte, RecallByte}]), case fetch_poa_from_peer(Peer, RecallByte) of not_found -> ok; PoA -> From ! {fetched_last_moment_proof, PoA} end end) end, Peers ), receive %% The first spawned process to fetch a PoA from a peer will trigger this `receive` %% and allow `fetch_poa_from_peers` to exit. All other processes that complete later %% will trigger the %% `handle_info({fetched_last_moment_proof, _}, State) ->` above (which is a no-op). {fetched_last_moment_proof, PoA} -> PoA after ?FETCH_POA_FROM_PEERS_TIMEOUT_MS -> not_found end. fetch_poa_from_peer(Peer, RecallByte) -> case ar_http_iface_client:get_chunk_binary(Peer, RecallByte + 1, any) of {ok, #{ data_path := DataPath, tx_path := TXPath }, _, _} -> #poa{ data_path = DataPath, tx_path = TXPath }; _ -> not_found end. handle_computed_output(SessionKey, StepNumber, Output, PartitionUpperBound, PartialDiff, State) -> true = is_integer(StepNumber), ar_mining_stats:vdf_computed(), State2 = case ar_mining_io:set_largest_seen_upper_bound(PartitionUpperBound) of true -> %% If the largest seen upper bound changed, a new partition may have been added %% to the mining set, so we may need to update the chunk cache size limit. update_cache_limits(State); false -> State end, State3 = maybe_update_sessions(SessionKey, State2), case sets:is_element(SessionKey, State3#state.active_sessions) of false -> ?LOG_DEBUG([{event, mining_debug_skipping_vdf_output}, {reason, stale_session}, {step_number, StepNumber}, {session_key, ar_nonce_limiter:encode_session_key(SessionKey)}, {active_sessions, encode_sessions(sets:to_list(State#state.active_sessions))}]); true -> {NextSeed, StartIntervalNumber, NextVDFDifficulty} = SessionKey, Candidate = #mining_candidate{ session_key = SessionKey, seed = get_seed(SessionKey, State3), next_seed = NextSeed, next_vdf_difficulty = NextVDFDifficulty, start_interval_number = StartIntervalNumber, step_number = StepNumber, nonce_limiter_output = Output, partition_upper_bound = PartitionUpperBound, cm_diff = PartialDiff }, prometheus_gauge:inc(mining_vdf_step), distribute_output(Candidate, State3), ?LOG_DEBUG([{event, mining_debug_processing_vdf_output}, {step_number, StepNumber}, {output, ar_util:safe_encode(Output)}, {start_interval_number, StartIntervalNumber}, {session_key, ar_nonce_limiter:encode_session_key(SessionKey)}, {partition_upper_bound, PartitionUpperBound}]) end, {noreply, State3}. read_poa(RecallByte, ChunkOrSubChunk, Packing, Nonce) -> PoAReply = read_poa(RecallByte, Packing), case {ChunkOrSubChunk, PoAReply, Packing} of {not_set, {ok, #poa{ chunk = Chunk } = PoA}, {replica_2_9, _}} -> PackingDifficulty = ?REPLICA_2_9_PACKING_DIFFICULTY, SubChunk = get_sub_chunk(Chunk, PackingDifficulty, Nonce), {ok, PoA#poa{ chunk = SubChunk }}; {not_set, {ok, #poa{ chunk = Chunk } = PoA}, {composite, _, PackingDifficulty}} -> SubChunk = get_sub_chunk(Chunk, PackingDifficulty, Nonce), {ok, PoA#poa{ chunk = SubChunk }}; {_ChunkOrSubChunk, {ok, #poa{ chunk = Chunk } = PoA}, {replica_2_9, _}} -> case sub_chunk_belongs_to_chunk(ChunkOrSubChunk, Chunk) of true -> {ok, PoA#poa{ chunk = ChunkOrSubChunk }}; false -> dump_invalid_solution_data({sub_chunk_mismatch, RecallByte, ChunkOrSubChunk, PoA, Packing, PoAReply, Nonce}), {error, sub_chunk_mismatch}; Error2 -> Error2 end; {_ChunkOrSubChunk, {ok, #poa{ chunk = Chunk } = PoA}, {composite, _, _}} -> case sub_chunk_belongs_to_chunk(ChunkOrSubChunk, Chunk) of true -> {ok, PoA#poa{ chunk = ChunkOrSubChunk }}; false -> dump_invalid_solution_data({sub_chunk_mismatch, RecallByte, ChunkOrSubChunk, PoA, Packing, PoAReply, Nonce}), {error, sub_chunk_mismatch}; Error2 -> Error2 end; {not_set, {ok, #poa{} = PoA}, _Packing} -> {ok, PoA}; {_ChunkOrSubChunk, {ok, #poa{ chunk = ChunkOrSubChunk } = PoA}, _Packing} -> {ok, PoA}; {_ChunkOrSubChunk, {ok, #poa{} = PoA}, _Packing} -> dump_invalid_solution_data({chunk_mismatch, RecallByte, ChunkOrSubChunk, PoA, Packing, PoAReply, Nonce}), {error, chunk_mismatch}; {_ChunkOrSubChunk, Error, _Packing} -> Error end. dump_invalid_solution_data(Data) -> {ok, Config} = arweave_config:get_env(), ID = binary_to_list(ar_util:encode(crypto:strong_rand_bytes(16))), File = filename:join(Config#config.data_dir, "invalid_solution_data_dump_" ++ ID), file:write_file(File, term_to_binary(Data)). get_sub_chunk(Chunk, 0, _Nonce) -> Chunk; get_sub_chunk(Chunk, PackingDifficulty, Nonce) -> SubChunkSize = ?COMPOSITE_PACKING_SUB_CHUNK_SIZE, SubChunkIndex = ar_block:get_sub_chunk_index(PackingDifficulty, Nonce), SubChunkStartOffset = SubChunkSize * SubChunkIndex, binary:part(Chunk, SubChunkStartOffset, SubChunkSize). sub_chunk_belongs_to_chunk(SubChunk, << SubChunk:?COMPOSITE_PACKING_SUB_CHUNK_SIZE/binary, _Rest/binary >>) -> true; sub_chunk_belongs_to_chunk(SubChunk, << _SubChunk:?COMPOSITE_PACKING_SUB_CHUNK_SIZE/binary, Rest/binary >>) -> sub_chunk_belongs_to_chunk(SubChunk, Rest); sub_chunk_belongs_to_chunk(_SubChunk, <<>>) -> false; sub_chunk_belongs_to_chunk(_SubChunk, _Chunk) -> {error, uneven_chunk}. read_poa(RecallByte, Packing) -> Options = #{ pack => true, packing => Packing, origin => miner }, case ar_data_sync:get_chunk(RecallByte + 1, Options) of {ok, Proof} -> #{ chunk := Chunk, tx_path := TXPath, data_path := DataPath } = Proof, case get_packing_type(Packing) of Type when Type == replica_2_9; Type == composite -> case maps:get(unpacked_chunk, Proof, not_found) of not_found -> read_unpacked_chunk(RecallByte, Proof); UnpackedChunk -> {ok, #poa{ option = 1, chunk = Chunk, unpacked_chunk = ar_packing_server:pad_chunk(UnpackedChunk), tx_path = TXPath, data_path = DataPath }} end; _ -> {ok, #poa{ option = 1, chunk = Chunk, tx_path = TXPath, data_path = DataPath }} end; Error -> Error end. read_unpacked_chunk(RecallByte, Proof) -> Options = #{ pack => true, packing => unpacked, origin => miner }, case ar_data_sync:get_chunk(RecallByte + 1, Options) of {ok, #{ chunk := UnpackedChunk, tx_path := TXPath, data_path := DataPath }} -> {ok, #poa{ option = 1, chunk = maps:get(chunk, Proof), unpacked_chunk = ar_packing_server:pad_chunk(UnpackedChunk), tx_path = TXPath, data_path = DataPath }}; Error -> Error end. validate_solution(Solution, DiffPair) -> #mining_solution{ mining_address = MiningAddress, nonce = Nonce, nonce_limiter_output = NonceLimiterOutput, partition_number = PartitionNumber, partition_upper_bound = PartitionUpperBound, poa1 = PoA1, recall_byte1 = RecallByte1, seed = Seed, solution_hash = SolutionHash, packing_difficulty = PackingDifficulty, replica_format = ReplicaFormat } = Solution, H0 = ar_block:compute_h0(NonceLimiterOutput, PartitionNumber, Seed, MiningAddress, PackingDifficulty), {H1, _Preimage1} = ar_block:compute_h1(H0, Nonce, PoA1#poa.chunk), {RecallRange1Start, RecallRange2Start} = get_recall_range(H0, PartitionNumber, PartitionUpperBound), %% Assert recall_byte1 is computed correctly. RecallByte1 = ar_block:get_recall_byte(RecallRange1Start, Nonce, PackingDifficulty), {BlockStart1, BlockEnd1, TXRoot1} = ar_block_index:get_block_bounds(RecallByte1), BlockSize1 = BlockEnd1 - BlockStart1, Packing = ar_block:get_packing(PackingDifficulty, MiningAddress, ReplicaFormat), SubChunkIndex = ar_block:get_sub_chunk_index(PackingDifficulty, Nonce), case ar_poa:validate({BlockStart1, RecallByte1, TXRoot1, BlockSize1, PoA1, Packing, SubChunkIndex, not_set}) of {true, ChunkID} -> PoACache = {{BlockStart1, RecallByte1, TXRoot1, BlockSize1, Packing, SubChunkIndex}, ChunkID}, case ar_node_utils:h1_passes_diff_check(H1, DiffPair, PackingDifficulty) of true -> case SolutionHash of H1 -> {true, PoACache, undefined}; _ -> ?LOG_ERROR([{event, invalid_solution_hash}, {solution_hash, ar_util:encode(SolutionHash)}, {h1, ar_util:encode(H1)}]), error end; false -> case is_one_chunk_solution(Solution) of true -> %% This can happen if the difficulty has increased between the %% time the H1 solution was found and now. In this case, %% there is no H2 solution, so we flag the solution invalid. {Diff1, _} = DiffPair, {false, {h1_diff_check, ar_util:safe_encode(H0), ar_util:safe_encode(H1), binary:decode_unsigned(H1), ar_node_utils:scaled_diff(Diff1, PackingDifficulty) }}; false -> #mining_solution{ recall_byte2 = RecallByte2, poa2 = PoA2 } = Solution, {H2, _Preimage2} = ar_block:compute_h2(H1, PoA2#poa.chunk, H0), case ar_node_utils:h2_passes_diff_check(H2, DiffPair, PackingDifficulty) of false -> {_, Diff2} = DiffPair, {false, {h2_diff_check, ar_util:safe_encode(H0), ar_util:safe_encode(H1), ar_util:safe_encode(H2), binary:decode_unsigned(H2), ar_node_utils:scaled_diff(Diff2, PackingDifficulty) }}; true -> SolutionHash = H2, RecallByte2 = ar_block:get_recall_byte(RecallRange2Start, Nonce, PackingDifficulty), {BlockStart2, BlockEnd2, TXRoot2} = ar_block_index:get_block_bounds(RecallByte2), BlockSize2 = BlockEnd2 - BlockStart2, case ar_poa:validate({BlockStart2, RecallByte2, TXRoot2, BlockSize2, PoA2, Packing, SubChunkIndex, not_set}) of {true, Chunk2ID} -> PoA2Cache = {{BlockStart2, RecallByte2, TXRoot2, BlockSize2, Packing, SubChunkIndex}, Chunk2ID}, {true, PoACache, PoA2Cache}; error -> log_prepare_solution_failure(Solution, rejected, poa2_validation_error, miner, []), error; false -> {false, poa2} end end end end; error -> log_prepare_solution_failure(Solution, rejected, poa1_validation_error, miner, []), error; false -> {false, poa1} end. reset_gc_timer(GarbageCollectionFrequency, State) -> State2 = maybe_cancel_gc_timer(State), Ref = erlang:make_ref(), ar_util:cast_after(GarbageCollectionFrequency, ?MODULE, {manual_garbage_collect, Ref}), State2#state{ gc_process_ref = Ref, gc_frequency_ms = GarbageCollectionFrequency }. maybe_cancel_gc_timer(#state{gc_process_ref = undefined} = State) -> State; maybe_cancel_gc_timer(State) -> erlang:cancel_timer(State#state.gc_process_ref), State#state{ gc_process_ref = undefined }. %%%=================================================================== %%% Public Test interface. %%%=================================================================== %% @doc Pause the mining server. Only used in tests. pause() -> gen_server:cast(?MODULE, pause). setup() -> {ok, Config} = arweave_config:get_env(), Config. cleanup(Config) -> arweave_config:set_env(Config). calculate_cache_limits_test_() -> {setup, fun setup/0, fun cleanup/1, [ {timeout, 30, fun test_calculate_cache_limits_default/0}, {timeout, 30, fun test_calculate_cache_limits_custom_low/0}, {timeout, 30, fun test_calculate_cache_limits_custom_high/0} ] }. test_calculate_cache_limits_default() -> {ok, Config} = arweave_config:get_env(), arweave_config:set_env(Config#config{ mining_cache_size_mb = undefined }), ?assertEqual( { ?IDEAL_STEPS_PER_PARTITION * 100 * ?MiB, ?IDEAL_STEPS_PER_PARTITION * 100 * ?MiB, ?IDEAL_STEPS_PER_PARTITION * ?MiB, ?IDEAL_STEPS_PER_PARTITION, ?IDEAL_STEPS_PER_PARTITION * 4000}, calculate_cache_limits(100, 0) ), ?assertEqual( { ?IDEAL_STEPS_PER_PARTITION * 200 * ?MiB, ?IDEAL_STEPS_PER_PARTITION * 200 * ?MiB, ?IDEAL_STEPS_PER_PARTITION * ?MiB, ?IDEAL_STEPS_PER_PARTITION, ?IDEAL_STEPS_PER_PARTITION * 4000}, calculate_cache_limits(200, 0) ), ?assertEqual( { ?IDEAL_STEPS_PER_PARTITION * 1000 * ?MiB, ?IDEAL_STEPS_PER_PARTITION * 1000 * ?MiB, ?IDEAL_STEPS_PER_PARTITION * ?MiB, ?IDEAL_STEPS_PER_PARTITION, ?IDEAL_STEPS_PER_PARTITION * 4000}, calculate_cache_limits(1000, 0) ), ?assertEqual( { ?IDEAL_STEPS_PER_PARTITION * 25 * ?MiB, ?IDEAL_STEPS_PER_PARTITION * 25 * ?MiB, ?IDEAL_STEPS_PER_PARTITION * 256 * ?KiB, ?IDEAL_STEPS_PER_PARTITION, ?IDEAL_STEPS_PER_PARTITION * 4000}, calculate_cache_limits(100, 1) ), ?assertEqual( { ?IDEAL_STEPS_PER_PARTITION * 50 * ?MiB, ?IDEAL_STEPS_PER_PARTITION * 50 * ?MiB, ?IDEAL_STEPS_PER_PARTITION * 256 * ?KiB, ?IDEAL_STEPS_PER_PARTITION, ?IDEAL_STEPS_PER_PARTITION * 4000}, calculate_cache_limits(200, 1) ), ?assertEqual( { ?IDEAL_STEPS_PER_PARTITION * 250 * ?MiB, ?IDEAL_STEPS_PER_PARTITION * 250 * ?MiB, ?IDEAL_STEPS_PER_PARTITION * 256 * ?KiB, ?IDEAL_STEPS_PER_PARTITION, ?IDEAL_STEPS_PER_PARTITION * 4000}, calculate_cache_limits(1000, 1) ), ?assertEqual( { ?IDEAL_STEPS_PER_PARTITION * 25 * ?MiB, ?IDEAL_STEPS_PER_PARTITION * 25 * ?MiB, ?IDEAL_STEPS_PER_PARTITION * 128 * ?KiB, ?IDEAL_STEPS_PER_PARTITION, ?IDEAL_STEPS_PER_PARTITION * 4000}, calculate_cache_limits(200, 2) ), ?assertEqual( { ?IDEAL_STEPS_PER_PARTITION * 50 * ?MiB, ?IDEAL_STEPS_PER_PARTITION * 50 * ?MiB, ?IDEAL_STEPS_PER_PARTITION * 128 * ?KiB, ?IDEAL_STEPS_PER_PARTITION, ?IDEAL_STEPS_PER_PARTITION * 4000}, calculate_cache_limits(400, 2) ), ?assertEqual( { ?IDEAL_STEPS_PER_PARTITION * 125 * ?MiB, ?IDEAL_STEPS_PER_PARTITION * 125 * ?MiB, ?IDEAL_STEPS_PER_PARTITION * 128 * ?KiB, ?IDEAL_STEPS_PER_PARTITION, ?IDEAL_STEPS_PER_PARTITION * 4000}, calculate_cache_limits(1000, 2) ), ?assertEqual( { ?IDEAL_STEPS_PER_PARTITION * 50 * ?MiB, ?IDEAL_STEPS_PER_PARTITION * 50 * ?MiB, ?IDEAL_STEPS_PER_PARTITION * 8 * ?KiB, ?IDEAL_STEPS_PER_PARTITION, ?IDEAL_STEPS_PER_PARTITION * 4000}, calculate_cache_limits(6_400, 32) ), ?assertEqual( { ?IDEAL_STEPS_PER_PARTITION * 100 * ?MiB, ?IDEAL_STEPS_PER_PARTITION * 100 * ?MiB, ?IDEAL_STEPS_PER_PARTITION * 8 * ?KiB, ?IDEAL_STEPS_PER_PARTITION, ?IDEAL_STEPS_PER_PARTITION * 4000}, calculate_cache_limits(12_800, 32) ), ?assertEqual( { trunc(?IDEAL_STEPS_PER_PARTITION * 156.25 * ?MiB), trunc(?IDEAL_STEPS_PER_PARTITION * 156.25 * ?MiB), ?IDEAL_STEPS_PER_PARTITION * 8 * ?KiB, ?IDEAL_STEPS_PER_PARTITION, ?IDEAL_STEPS_PER_PARTITION * 4000}, calculate_cache_limits(20_000, 32) ). test_calculate_cache_limits_custom_low() -> {ok, Config} = arweave_config:get_env(), arweave_config:set_env(Config#config{ mining_cache_size_mb = 1 }), ?assertEqual( {?MINIMUM_CACHE_LIMIT_BYTES, 1 * ?MiB, 1 * ?MiB, 1, 4_000}, calculate_cache_limits(1, 0) ), ?assertEqual( {?MINIMUM_CACHE_LIMIT_BYTES, 1 * ?MiB, 512 * ?KiB, 1, 4_000}, calculate_cache_limits(2, 0) ), ?assertEqual( {?IDEAL_STEPS_PER_PARTITION * 1000 * ?MiB, 1 * ?MiB, (1 * ?MiB) div 1_000, 1, 4_000}, calculate_cache_limits(1000, 0) ), ?assertEqual( {?MINIMUM_CACHE_LIMIT_BYTES, 1 * ?MiB, 1 * ?MiB, 4, 16_000}, calculate_cache_limits(1, 1) ), ?assertEqual( {?MINIMUM_CACHE_LIMIT_BYTES, 1 * ?MiB, 512 * ?KiB, 2, 8_000}, calculate_cache_limits(2, 1) ), ?assertEqual( {?IDEAL_STEPS_PER_PARTITION * 250 * ?MiB, 1 * ?MiB, (1 * ?MiB) div 1_000, 1, 4_000}, calculate_cache_limits(1000, 1) ), ?assertEqual( {?MINIMUM_CACHE_LIMIT_BYTES, 1 * ?MiB, 1 * ?MiB, 8, 32_000}, calculate_cache_limits(1, 2) ), ?assertEqual( {?MINIMUM_CACHE_LIMIT_BYTES, 1 * ?MiB, 512 * ?KiB, 4, 16_000}, calculate_cache_limits(2, 2) ), ?assertEqual( {?IDEAL_STEPS_PER_PARTITION * 128_000 * ?KiB, 1 * ?MiB, (1 * ?MiB) div 1_000, 1, 4_000}, calculate_cache_limits(1000, 2) ), ?assertEqual( {?MINIMUM_CACHE_LIMIT_BYTES, 1 * ?MiB, 1 * ?MiB, 128, 512_000}, calculate_cache_limits(1, 32) ), ?assertEqual( {?MINIMUM_CACHE_LIMIT_BYTES, 1 * ?MiB, 512 * ?KiB, 64, 256_000}, calculate_cache_limits(2, 32) ), ?assertEqual( {?IDEAL_STEPS_PER_PARTITION * 500 * ?MiB, 1 * ?MiB, (1 * ?MiB) div 64_000, 1, 4_000}, calculate_cache_limits(64_000, 32) ). test_calculate_cache_limits_custom_high() -> {ok, Config} = arweave_config:get_env(), arweave_config:set_env(Config#config{ mining_cache_size_mb = 500_000 }), ?assertEqual( {?MINIMUM_CACHE_LIMIT_BYTES, 512_000_000 * ?KiB, 512_000_000 * ?KiB, 500_000, 2_000_000_000}, calculate_cache_limits(1, 0) ), ?assertEqual( {?MINIMUM_CACHE_LIMIT_BYTES, 512_000_000 * ?KiB, 256_000_000 * ?KiB, 250_000, 1_000_000_000}, calculate_cache_limits(2, 0) ), ?assertEqual( {?IDEAL_STEPS_PER_PARTITION * 1000 * ?MiB, 512_000_000 * ?KiB, 512_000 * ?KiB, 500, 2_000_000}, calculate_cache_limits(1000, 0) ), ?assertEqual( {?MINIMUM_CACHE_LIMIT_BYTES, 512_000_000 * ?KiB, 512_000_000 * ?KiB, 2_000_000, 8_000_000_000}, calculate_cache_limits(1, 1) ), ?assertEqual( {?MINIMUM_CACHE_LIMIT_BYTES, 512_000_000 * ?KiB, 256_000_000 * ?KiB, 1_000_000, 4_000_000_000}, calculate_cache_limits(2, 1) ), ?assertEqual( {?IDEAL_STEPS_PER_PARTITION * 250 * ?MiB, 512_000_000 * ?KiB, 512_000 * ?KiB, 2_000, 8_000_000}, calculate_cache_limits(1000, 1) ), ?assertEqual( {?MINIMUM_CACHE_LIMIT_BYTES, 512_000_000 * ?KiB, 512_000_000 * ?KiB, 4_000_000, 16_000_000_000}, calculate_cache_limits(1, 2) ), ?assertEqual( {?MINIMUM_CACHE_LIMIT_BYTES, 512_000_000 * ?KiB, 256_000_000 * ?KiB, 2_000_000, 8_000_000_000}, calculate_cache_limits(2, 2) ), ?assertEqual( {?IDEAL_STEPS_PER_PARTITION * 128_000 * ?KiB, 512_000_000 * ?KiB, 512_000 * ?KiB, 4_000, 16_000_000}, calculate_cache_limits(1000, 2) ), ?assertEqual( {?MINIMUM_CACHE_LIMIT_BYTES, 512_000_000 * ?KiB, 512_000_000 * ?KiB, 64_000_000, 256_000_000_000}, calculate_cache_limits(1, 32) ), ?assertEqual( {?MINIMUM_CACHE_LIMIT_BYTES, 512_000_000 * ?KiB, 256_000_000 * ?KiB, 32_000_000, 128_000_000_000}, calculate_cache_limits(2, 32) ), ?assertEqual( {(?IDEAL_STEPS_PER_PARTITION * 2 * (?RECALL_RANGE_SIZE div 32) * 1000), 512_000_000 * ?KiB, 512_000 * ?KiB, 64_000, 256_000_000}, calculate_cache_limits(1000, 32) ). ================================================ FILE: apps/arweave/src/ar_mining_server_behaviour.erl ================================================ -module(ar_mining_server_behaviour). -include("ar.hrl"). -include("ar_mining.hrl"). -callback start_mining({DiffPair, MerkleRebaseThreshold, Height}) -> ok when DiffPair :: {non_neg_integer() | infinity, non_neg_integer() | infinity}, MerkleRebaseThreshold :: non_neg_integer() | infinity, Height :: non_neg_integer(). -callback pause() -> ok. -callback is_paused() -> boolean(). -callback set_difficulty(DiffPair :: {non_neg_integer() | infinity, non_neg_integer() | infinity}) -> ok. -callback set_merkle_rebase_threshold(Threshold :: non_neg_integer() | infinity) -> ok. -callback set_height(Height :: integer()) -> ok. ================================================ FILE: apps/arweave/src/ar_mining_stats.erl ================================================ -module(ar_mining_stats). -behaviour(gen_server). -export([start_link/0, start_performance_reports/0, pause_performance_reports/1, mining_paused/0, set_storage_module_data_size/6, vdf_computed/0, raw_read_rate/2, chunks_read/2, h1_computed/2, h2_computed/2, h1_solution/0, h2_solution/0, block_found/0, block_mined_but_orphaned/0, h1_sent_to_peer/2, h1_received_from_peer/2, h2_sent_to_peer/1, h2_received_from_peer/1, get_partition_data_size/2]). -export([init/1, handle_cast/2, handle_call/3, handle_info/2, terminate/2]). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("arweave/include/ar_consensus.hrl"). -include_lib("eunit/include/eunit.hrl"). -record(state, { pause_performance_reports = false, pause_performance_reports_timeout }). -record(report, { now, vdf_speed = undefined, h1_solution = 0, h2_solution = 0, confirmed_block = 0, total_data_size = 0, optimal_overall_read_mibps = 0.0, optimal_overall_hash_hps = 0.0, average_read_mibps = 0.0, current_read_mibps = 0.0, average_hash_hps = 0.0, current_hash_hps = 0.0, average_h1_to_peer_hps = 0.0, current_h1_to_peer_hps = 0.0, average_h1_from_peer_hps = 0.0, current_h1_from_peer_hps = 0.0, total_h2_to_peer = 0, total_h2_from_peer = 0, partitions = [], peers = [] }). -record(partition_report, { partition_number, data_size, optimal_read_mibps, optimal_hash_hps, average_read_mibps, current_read_mibps, average_hash_hps, current_hash_hps }). -record(peer_report, { peer, average_h1_to_peer_hps, current_h1_to_peer_hps, average_h1_from_peer_hps, current_h1_from_peer_hps, total_h2_to_peer, total_h2_from_peer }). %% ETS table structure: %% %% {vdf, StartTime, Samples, VDFStepCount} %% {h1_solution, StartTime, Samples, TotalH1SolutionsFound} %% {h2_solution, StartTime, Samples, TotalH2SolutionsFound} %% {confirmed_block, StartTime, Samples, TotalConfirmedBlocksMined} %% {{partition, PartitionNumber, read, total}, StartTime, Samples, TotalChunksRead} %% {{partition, PartitionNumber, read, current}, StartTime, Samples, CurrentChunksRead} %% {{partition, PartitionNumber, h1, total}, StartTime, Samples, TotalH1} %% {{partition, PartitionNumber, h1, current}, StartTime, Samples, CurrentH1} %% {{partition, PartitionNumber, h2, total}, StartTime, Samples, TotalH2} %% {{partition, PartitionNumber, h2, current}, StartTime, Samples, CurrentH2} %% {total_data_size, TotalBytesPacked} %% {{partition, PartitionNumber, storage_module, StoreID}, BytesPacked} %% {{peer, Peer, h1_to_peer, total}, StartTime, Samples, TotalH1sSentToPeer} %% {{peer, Peer, h1_to_peer, current}, StartTime, Samples, CurrentH1sSentToPeer} %% {{peer, Peer, h1_from_peer, total}, StartTime, Samples, TotalH1sReceivedFromPeer} %% {{peer, Peer, h1_from_peer, current}, StartTime, Samples, CurrentH1sReceivedFromPeer} %% {{peer, Peer, h2_to_peer, total}, StartTime, Samples, TotalH2sSentToPeer} %% {{peer, Peer, h2_from_peer, total}, StartTime, Samples, TotalH2sReceivedFromPeer} -define(PERFORMANCE_REPORT_FREQUENCY_MS, 10000). %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Start the gen_server. start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). start_performance_reports() -> reset_all_stats(), ar_util:cast_after(?PERFORMANCE_REPORT_FREQUENCY_MS, ?MODULE, report_performance). %% @doc Stop logging performance reports for the given number of milliseconds. pause_performance_reports(Time) -> gen_server:call(?MODULE, {pause_performance_reports, Time}). vdf_computed() -> vdf_computed(erlang:monotonic_time(millisecond)). vdf_computed(Now) -> increment_count(vdf, 1, Now). raw_read_rate(PartitionNumber, ReadRate) -> prometheus_gauge:set(mining_rate, [raw_read, PartitionNumber], ReadRate). chunks_read(PartitionNumber, Count) -> chunks_read(PartitionNumber, Count, erlang:monotonic_time(millisecond)). chunks_read(PartitionNumber, Count, Now) -> increment_count({partition, PartitionNumber, read, total}, Count, Now), increment_count({partition, PartitionNumber, read, current}, Count, Now). h1_computed(PartitionNumber, Count) -> h1_computed(PartitionNumber, Count, erlang:monotonic_time(millisecond)). h1_computed(PartitionNumber, Count, Now) -> increment_count({partition, PartitionNumber, h1, total}, Count, Now), increment_count({partition, PartitionNumber, h1, current}, Count, Now). h2_computed(PartitionNumber, Count) -> h2_computed(PartitionNumber, Count, erlang:monotonic_time(millisecond)). h2_computed(PartitionNumber, Count, Now) -> increment_count({partition, PartitionNumber, h2, total}, Count, Now), increment_count({partition, PartitionNumber, h2, current}, Count, Now). h1_sent_to_peer(Peer, H1Count) -> h1_sent_to_peer(Peer, H1Count, erlang:monotonic_time(millisecond)). h1_sent_to_peer(Peer, H1Count, Now) -> increment_count({peer, Peer, h1_to_peer, total}, H1Count, Now), increment_count({peer, Peer, h1_to_peer, current}, H1Count, Now). h1_received_from_peer(Peer, H1Count) -> h1_received_from_peer(Peer, H1Count, erlang:monotonic_time(millisecond)). h1_received_from_peer(Peer, H1Count, Now) -> increment_count({peer, Peer, h1_from_peer, total}, H1Count, Now), increment_count({peer, Peer, h1_from_peer, current}, H1Count, Now). h2_sent_to_peer(Peer) -> h2_sent_to_peer(Peer, erlang:monotonic_time(millisecond)). h2_sent_to_peer(Peer, Now) -> increment_count({peer, Peer, h2_to_peer, total}, 1, Now). h2_received_from_peer(Peer) -> h2_received_from_peer(Peer, erlang:monotonic_time(millisecond)). h2_received_from_peer(Peer, Now) -> increment_count({peer, Peer, h2_from_peer, total}, 1, Now). h1_solution() -> h1_solution(erlang:monotonic_time(millisecond)). h1_solution(Now) -> increment_count(h1_solution, 1, Now). h2_solution() -> h2_solution(erlang:monotonic_time(millisecond)). h2_solution(Now) -> increment_count(h2_solution, 1, Now). block_found() -> block_found(erlang:monotonic_time(millisecond)). block_found(Now) -> increment_count(confirmed_block, 1, Now). block_mined_but_orphaned() -> block_mined_but_orphaned(erlang:monotonic_time(millisecond)). block_mined_but_orphaned(Now) -> increment_count(block_mined_but_orphaned, 1, Now). update_total_data_size() -> Pattern = { {partition, '_', storage_module, '_', packing, '_'}, '$1' }, Sizes = [Size || [Size] <- ets:match(?MODULE, Pattern)], TotalDataSize = lists:sum(Sizes), try prometheus_gauge:set(v2_index_data_size, TotalDataSize), ets:insert(?MODULE, {total_data_size, TotalDataSize}) catch error:badarg -> ?LOG_WARNING([{event, set_total_data_size_failed}, {reason, prometheus_not_started}, {data_size, TotalDataSize}]); Type:Reason -> ?LOG_ERROR([{event, set_total_data_size_failed}, {type, Type}, {reason, Reason}, {data_size, TotalDataSize}]) end. set_storage_module_data_size( StoreID, Packing, PartitionNumber, StorageModuleSize, StorageModuleIndex, DataSize) -> StoreIDLabel = ar_storage_module:label(StoreID), PackingLabel = ar_storage_module:packing_label(Packing), try PackingDifficulty = ar_mining_server:get_packing_difficulty(Packing), prometheus_gauge:set(v2_index_data_size_by_packing, [StoreIDLabel, PackingLabel, PartitionNumber, StorageModuleSize, StorageModuleIndex, PackingDifficulty], DataSize), ets:insert(?MODULE, { {partition, PartitionNumber, storage_module, StoreID, packing, Packing}, DataSize}), update_total_data_size() catch error:badarg -> ?LOG_WARNING([{event, set_storage_module_data_size_failed}, {reason, prometheus_not_started}, {store_id, StoreID}, {store_id_label, StoreIDLabel}, {packing, ar_serialize:encode_packing(Packing, true)}, {packing_label, PackingLabel}, {partition_number, PartitionNumber}, {storage_module_size, StorageModuleSize}, {storage_module_index, StorageModuleIndex}, {data_size, DataSize}]); error:{unknown_metric,default,v2_index_data_size_by_packing} -> ?LOG_WARNING([{event, set_storage_module_data_size_failed}, {reason, prometheus_not_started}, {store_id, StoreID}, {store_id_label, StoreIDLabel}, {packing, ar_serialize:encode_packing(Packing, true)}, {packing_label, PackingLabel}, {partition_number, PartitionNumber}, {storage_module_size, StorageModuleSize}, {storage_module_index, StorageModuleIndex}, {data_size, DataSize}]); Type:Reason -> ?LOG_ERROR([{event, set_storage_module_data_size_failed}, {type, Type}, {reason, Reason}, {store_id, StoreID}, {store_id_label, StoreIDLabel}, {packing, ar_serialize:encode_packing(Packing, true)}, {packing_label, PackingLabel}, {partition_number, PartitionNumber}, {storage_module_size, StorageModuleSize}, {storage_module_index, StorageModuleIndex}, {data_size, DataSize} ]) end. mining_paused() -> clear_metrics(). %%%=================================================================== %%% Generic server callbacks. %%%=================================================================== init([]) -> {ok, #state{}}. handle_call({pause_performance_reports, Time}, _From, State) -> Now = os:system_time(millisecond), Timeout = Now + Time, {reply, ok, State#state{ pause_performance_reports = true, pause_performance_reports_timeout = Timeout }}; handle_call(Request, _From, State) -> ?LOG_WARNING([{event, unhandled_call}, {module, ?MODULE}, {request, Request}]), {reply, ok, State}. handle_cast(report_performance, #state{ pause_performance_reports = true, pause_performance_reports_timeout = Timeout } = State) -> Now = os:system_time(millisecond), case Now > Timeout of true -> gen_server:cast(?MODULE, report_performance), {noreply, State#state{ pause_performance_reports = false }}; false -> ar_util:cast_after(?PERFORMANCE_REPORT_FREQUENCY_MS, ?MODULE, report_performance), {noreply, State} end; handle_cast(report_performance, State) -> report_performance(), ar_util:cast_after(?PERFORMANCE_REPORT_FREQUENCY_MS, ?MODULE, report_performance), {noreply, State}; handle_cast(Cast, State) -> ?LOG_WARNING([{event, unhandled_cast}, {module, ?MODULE}, {cast, Cast}]), {noreply, State}. handle_info(Message, State) -> ?LOG_WARNING([{event, unhandled_info}, {module, ?MODULE}, {message, Message}]), {noreply, State}. terminate(Reason, _State) -> ?LOG_INFO([{module, ?MODULE},{pid, self()},{callback, terminate},{reason, Reason}]), ok. %%%=================================================================== %%% Private functions. %%%=================================================================== reset_all_stats() -> ets:delete_all_objects(?MODULE). %% @doc Atomically increments the count for ETS records stored in the format: %% {Key, StartTimestamp, Count} %% If the Key doesn't exist, it is initialized with the current timestamp and a count of Amount increment_count(_Key, 0, _Now) -> ok; increment_count(Key, Amount, Now) -> ets:update_counter(?MODULE, Key, [{3, 1}, {4, Amount}], %% increment samples by 1, count by Amount {Key, Now, 0, 0} %% initialize timestamp, samples, count ). reset_count(Key, Now) -> ets:insert(?MODULE, [{Key, Now, 0, 0}]). get_average_count_by_time(Key, Now) -> {_AvgSamples, AvgCount} = get_average_by_time(Key, Now), AvgCount. get_average_samples_by_time(Key, Now) -> {AvgSamples, _AvgCount} = get_average_by_time(Key, Now), AvgSamples. get_average_by_time(Key, Now) -> case ets:lookup(?MODULE, Key) of [] -> {0.0, 0.0}; [{_, Start, _Samples, _Count}] when Now - Start =:= 0 -> {0.0, 0.0}; [{_, Start, Samples, Count}] -> Elapsed = (Now - Start) / 1000, {Samples / Elapsed, Count / Elapsed} end. get_average_by_samples(Key) -> case ets:lookup(?MODULE, Key) of [] -> 0.0; [{_, _Start, Samples, _Count}] when Samples == 0 -> 0.0; [{_, _Start, Samples, Count}] -> Count / Samples end. get_count(Key) -> case ets:lookup(?MODULE, Key) of [] -> 0; [{_, _Start, _Samples, Count}] -> Count end. get_start(Key) -> case ets:lookup(?MODULE, Key) of [] -> undefined; [{_, Start, _Samples, _Count}] -> Start end. get_hashrate_divisor(PackingDifficulty) -> %% Raw hashrate varies based on packing difficulty. Assuming a spora_2_6 base hashrate %% of 404, the raw hashrate at different packing difficulties is: %% spora_2_6: 404 %% composite, 1: 404 * 32 / 4 / 1 = 3232 %% composite, 2: 404 * 32 / 4 / 2 = 1616 %% composite, 32: 404 * 32 / 4 / 32 = 101 %% %% Basically: %% - composite packing generate 32x the number of hashes, but they are compared against %% a higher solution difficulty %% - composite uses a 4x lower read recall range which *reduces* the number of hashes %% 4-fold, and increases the solution difficulty %% - finally as the difficulty increases, the number of hashes generated decreases as does %% the solution difficulty %% %% This function returns a divisor we can use to normalize the hahsrate to 404. case PackingDifficulty of 0 -> 1.0; _ -> (32.0 / 4.0) / PackingDifficulty end. get_total_minable_data_size(Packing) -> Pattern = { {partition, '_', storage_module, '_', packing, Packing}, '$1' }, Sizes = [Size || [Size] <- ets:match(?MODULE, Pattern)], TotalDataSize = lists:sum(Sizes), WeaveSize = ar_node:get_weave_size(), TipPartition = ar_node:get_max_partition_number(WeaveSize) + 1, TipPartitionSize = get_partition_data_size(TipPartition, Packing), ?LOG_DEBUG([{event, get_total_minable_data_size}, {total_data_size, TotalDataSize}, {weave_size, WeaveSize}, {tip_partition, TipPartition}, {tip_partition_size, TipPartitionSize}, {total_minable_data_size, TotalDataSize - TipPartitionSize} ]), TotalDataSize - TipPartitionSize. get_overall_total(PartitionPeer, Stat, TotalCurrent) -> Pattern = {{PartitionPeer, '_', Stat, TotalCurrent}, '_', '_', '$1'}, Matches = ets:match(?MODULE, Pattern), Counts = [Count || [Count] <- Matches], lists:sum(Counts). get_partition_data_size(PartitionNumber, Packing) -> Pattern = {{partition, PartitionNumber, storage_module, '_', packing, Packing }, '$1'}, Sizes = [Size || [Size] <- ets:match(?MODULE, Pattern)], lists:sum(Sizes). vdf_speed(Now) -> case get_average_count_by_time(vdf, Now) of 0.0 -> undefined; StepsPerSecond -> reset_count(vdf, Now), 1.0 / StepsPerSecond end. get_hash_hps(PoA1Multiplier, Packing, PartitionNumber, TotalCurrent, Now) -> H1 = get_average_count_by_time({partition, PartitionNumber, h1, TotalCurrent}, Now), H2 = get_average_count_by_time({partition, PartitionNumber, h2, TotalCurrent}, Now), PackingDifficulty = ar_mining_server:get_packing_difficulty(Packing), ((H1 / PoA1Multiplier) + H2) / get_hashrate_divisor(PackingDifficulty). %% @doc calculate the maximum hash rate (in MiB per second read from disk) for the given VDF %% speed at the current weave size. optimal_partition_read_mibps(_Packing, undefined, _PartitionDataSize, _TotalDataSize, _WeaveSize) -> 0.0; optimal_partition_read_mibps(Packing, VDFSpeed, PartitionDataSize, TotalDataSize, WeaveSize) -> PackingDifficulty = ar_mining_server:get_packing_difficulty(Packing), RecallRangeSize = ar_block:get_recall_range_size(PackingDifficulty) / ?MiB, (RecallRangeSize / VDFSpeed) * min(1.0, (PartitionDataSize / ar_block:partition_size())) * (1 + min(1.0, (TotalDataSize / WeaveSize))). %% @doc calculate the maximum hash rate (in hashes per second) for the given VDF speed %% at the current weave size. optimal_partition_hash_hps(_PoA1Multiplier, undefined, _PartitionDataSize, _TotalDataSize, _WeaveSize) -> 0.0; optimal_partition_hash_hps(PoA1Multiplier, VDFSpeed, PartitionDataSize, TotalDataSize, WeaveSize) -> BasePartitionHashes = (400.0 / VDFSpeed) * min(1.0, (PartitionDataSize / ar_block:partition_size())), H1Optimal = BasePartitionHashes / PoA1Multiplier, H2Optimal = BasePartitionHashes * min(1.0, (TotalDataSize / WeaveSize)), H1Optimal + H2Optimal. generate_report() -> {ok, Config} = arweave_config:get_env(), Height = ar_node:get_height(), Packing = ar_mining_io:get_packing(), Partitions = ar_mining_io:get_partitions(), generate_report( Height, Packing, Partitions, Config#config.cm_peers, ar_node:get_weave_size(), erlang:monotonic_time(millisecond) ). generate_report(_Height, _Packing, [], _Peers, _WeaveSize, Now) -> #report{ now = Now }; generate_report(Height, Packing, Partitions, Peers, WeaveSize, Now) -> PoA1Multiplier = ar_difficulty:poa1_diff_multiplier(Height), VDFSpeed = vdf_speed(Now), TotalDataSize = get_total_minable_data_size(Packing), Report = #report{ now = Now, vdf_speed = VDFSpeed, h1_solution = get_count(h1_solution), h2_solution = get_count(h2_solution), confirmed_block = get_count(confirmed_block), total_data_size = TotalDataSize, total_h2_to_peer = get_overall_total(peer, h2_to_peer, total), total_h2_from_peer = get_overall_total(peer, h2_from_peer, total) }, Report2 = generate_partition_reports( PoA1Multiplier, Partitions, Packing, Report, WeaveSize), Report3 = generate_peer_reports(Peers, Report2), Report3. generate_partition_reports(PoA1Multiplier, Partitions, Packing, Report, WeaveSize) -> lists:foldr( fun({PartitionNumber, _MiningAddr, _PackingDifficulty}, Acc) -> generate_partition_report( PoA1Multiplier, PartitionNumber, Packing, Acc, WeaveSize) end, Report, Partitions ). generate_partition_report( PoA1Multiplier, PartitionNumber, Packing, Report, WeaveSize) -> #report{ now = Now, vdf_speed = VDFSpeed, total_data_size = TotalDataSize, partitions = Partitions, optimal_overall_read_mibps = OptimalOverallRead, optimal_overall_hash_hps = OptimalOverallHash, average_read_mibps = AverageRead, current_read_mibps = CurrentRead, average_hash_hps = AverageHash, current_hash_hps = CurrentHash } = Report, DataSize = get_partition_data_size(PartitionNumber, Packing), PartitionReport = #partition_report{ partition_number = PartitionNumber, data_size = DataSize, average_read_mibps = get_average_count_by_time( {partition, PartitionNumber, read, total}, Now) / 4, current_read_mibps = get_average_count_by_time( {partition, PartitionNumber, read, current}, Now) / 4, average_hash_hps = get_hash_hps( PoA1Multiplier, Packing, PartitionNumber, total, Now), current_hash_hps = get_hash_hps( PoA1Multiplier, Packing, PartitionNumber, current, Now), optimal_read_mibps = optimal_partition_read_mibps( Packing, VDFSpeed, DataSize, TotalDataSize, WeaveSize), optimal_hash_hps = optimal_partition_hash_hps( PoA1Multiplier, VDFSpeed, DataSize, TotalDataSize, WeaveSize) }, reset_count({partition, PartitionNumber, read, current}, Now), reset_count({partition, PartitionNumber, h1, current}, Now), reset_count({partition, PartitionNumber, h2, current}, Now), Report#report{ optimal_overall_read_mibps = OptimalOverallRead + PartitionReport#partition_report.optimal_read_mibps, optimal_overall_hash_hps = OptimalOverallHash + PartitionReport#partition_report.optimal_hash_hps, average_read_mibps = AverageRead + PartitionReport#partition_report.average_read_mibps, current_read_mibps = CurrentRead + PartitionReport#partition_report.current_read_mibps, average_hash_hps = AverageHash + PartitionReport#partition_report.average_hash_hps, current_hash_hps = CurrentHash + PartitionReport#partition_report.current_hash_hps, partitions = Partitions ++ [PartitionReport] }. generate_peer_reports(Peers, Report) -> lists:foldr( fun(Peer, Acc) -> generate_peer_report(Peer, Acc) end, Report, Peers ). generate_peer_report(Peer, Report) -> #report{ now = Now, peers = Peers, average_h1_to_peer_hps = AverageH1ToPeer, current_h1_to_peer_hps = CurrentH1ToPeer, average_h1_from_peer_hps = AverageH1FromPeer, current_h1_from_peer_hps = CurrentH1FromPeer } = Report, PeerReport = #peer_report{ peer = Peer, average_h1_to_peer_hps = get_average_count_by_time({peer, Peer, h1_to_peer, total}, Now), current_h1_to_peer_hps = get_average_count_by_time({peer, Peer, h1_to_peer, current}, Now), average_h1_from_peer_hps = get_average_count_by_time({peer, Peer, h1_from_peer, total}, Now), current_h1_from_peer_hps = get_average_count_by_time({peer, Peer, h1_from_peer, current}, Now), total_h2_to_peer = get_count({peer, Peer, h2_to_peer, total}), total_h2_from_peer = get_count({peer, Peer, h2_from_peer, total}) }, reset_count({peer, Peer, h1_to_peer, current}, Now), reset_count({peer, Peer, h1_from_peer, current}, Now), Report#report{ peers = Peers ++ [PeerReport], average_h1_to_peer_hps = AverageH1ToPeer + PeerReport#peer_report.average_h1_to_peer_hps, current_h1_to_peer_hps = CurrentH1ToPeer + PeerReport#peer_report.current_h1_to_peer_hps, average_h1_from_peer_hps = AverageH1FromPeer + PeerReport#peer_report.average_h1_from_peer_hps, current_h1_from_peer_hps = CurrentH1FromPeer + PeerReport#peer_report.current_h1_from_peer_hps }. report_performance() -> Report = generate_report(), case Report#report.partitions of [] -> ok; _ -> set_metrics(Report), ReportString = format_report(Report), ar:console("~s", [ReportString]), log_report(ReportString) end. log_report(ReportString) -> Lines = string:tokens(lists:flatten(ReportString), "\n"), log_report_lines(Lines). log_report_lines([]) -> ok; log_report_lines([Line | Lines]) -> ?LOG_INFO(Line), log_report_lines(Lines). set_metrics(Report) -> prometheus_gauge:set(mining_rate, [read, total], Report#report.current_read_mibps), prometheus_gauge:set(mining_rate, [hash, total], Report#report.current_hash_hps), prometheus_gauge:set(mining_rate, [ideal_read, total], Report#report.optimal_overall_read_mibps), prometheus_gauge:set(mining_rate, [ideal_hash, total], Report#report.optimal_overall_hash_hps), prometheus_gauge:set(cm_h1_rate, [total, to], Report#report.current_h1_to_peer_hps), prometheus_gauge:set(cm_h1_rate, [total, from], Report#report.current_h1_from_peer_hps), prometheus_gauge:set(cm_h2_count, [total, to], Report#report.total_h2_to_peer), prometheus_gauge:set(cm_h2_count, [total, from], Report#report.total_h2_from_peer), set_partition_metrics(Report#report.partitions), set_peer_metrics(Report#report.peers). set_partition_metrics([]) -> ok; set_partition_metrics([PartitionReport | PartitionReports]) -> PartitionNumber = PartitionReport#partition_report.partition_number, prometheus_gauge:set(mining_rate, [read, PartitionNumber], PartitionReport#partition_report.current_read_mibps), prometheus_gauge:set(mining_rate, [hash, PartitionNumber], PartitionReport#partition_report.current_hash_hps), prometheus_gauge:set(mining_rate, [ideal_read, PartitionNumber], PartitionReport#partition_report.optimal_read_mibps), prometheus_gauge:set(mining_rate, [ideal_hash, PartitionNumber], PartitionReport#partition_report.optimal_hash_hps), set_partition_metrics(PartitionReports). set_peer_metrics([]) -> ok; set_peer_metrics([PeerReport | PeerReports]) -> Peer = ar_util:format_peer(PeerReport#peer_report.peer), prometheus_gauge:set(cm_h1_rate, [Peer, to], PeerReport#peer_report.current_h1_to_peer_hps), prometheus_gauge:set(cm_h1_rate, [Peer, from], PeerReport#peer_report.current_h1_from_peer_hps), prometheus_gauge:set(cm_h2_count, [Peer, to], PeerReport#peer_report.total_h2_to_peer), prometheus_gauge:set(cm_h2_count, [Peer, from], PeerReport#peer_report.total_h2_from_peer), set_peer_metrics(PeerReports). clear_metrics() -> Report = generate_report(), prometheus_gauge:set(mining_rate, [read, total], 0), prometheus_gauge:set(mining_rate, [hash, total], 0), prometheus_gauge:set(mining_rate, [ideal, total], 0), prometheus_gauge:set(cm_h1_rate, [total, to], 0), prometheus_gauge:set(cm_h1_rate, [total, from], 0), prometheus_gauge:set(cm_h2_count, [total, to], 0), prometheus_gauge:set(cm_h2_count, [total, from], 0), clear_partition_metrics(Report#report.partitions), clear_peer_metrics(Report#report.peers). clear_partition_metrics([]) -> ok; clear_partition_metrics([PartitionReport | PartitionReports]) -> PartitionNumber = PartitionReport#partition_report.partition_number, prometheus_gauge:set(mining_rate, [read, PartitionNumber], 0), prometheus_gauge:set(mining_rate, [hash, PartitionNumber], 0), prometheus_gauge:set(mining_rate, [ideal, PartitionNumber], 0), clear_partition_metrics(PartitionReports). clear_peer_metrics([]) -> ok; clear_peer_metrics([PeerReport | PeerReports]) -> Peer = ar_util:format_peer(PeerReport#peer_report.peer), prometheus_gauge:set(cm_h1_rate, [Peer, to], 0), prometheus_gauge:set(cm_h1_rate, [Peer, from], 0), prometheus_gauge:set(cm_h2_count, [Peer, to], 0), prometheus_gauge:set(cm_h2_count, [Peer, from], 0), clear_peer_metrics(PeerReports). format_report(Report) -> format_report(Report, ar_node:get_weave_size()). format_report(Report, WeaveSize) -> Preamble = io_lib:format( "================================================= Mining Performance Report =================================================\n" "\n" "VDF Speed: ~s\n" "H1 Solutions: ~B\n" "H2 Solutions: ~B\n" "Confirmed Blocks: ~B\n" "\n", [format_vdf_speed(Report#report.vdf_speed), Report#report.h1_solution, Report#report.h2_solution, Report#report.confirmed_block] ), PartitionTable = format_partition_report(Report, WeaveSize), PeerTable = format_peer_report(Report), io_lib:format("\n~s~s~s", [Preamble, PartitionTable, PeerTable]). format_partition_report(Report, WeaveSize) -> Header = "Local mining stats:\n" "+-----------+-----------+----------+---------------+---------------+---------------+------------+------------+--------------+\n" "| Partition | Data Size | % of Max | Read (Cur) | Read (Avg) | Read (Ideal) | Hash (Cur) | Hash (Avg) | Hash (Ideal) |\n" "+-----------+-----------+----------+---------------+---------------+---------------+------------+------------+--------------+\n", TotalRow = format_partition_total_row(Report, WeaveSize), PartitionRows = format_partition_rows(Report#report.partitions), Footer = "+-----------+-----------+----------+---------------+---------------+---------------+------------+------------+--------------+\n", io_lib:format("~s~s~s~s", [Header, TotalRow, PartitionRows, Footer]). format_partition_total_row(Report, WeaveSize) -> #report{ total_data_size = TotalDataSize, optimal_overall_read_mibps = OptimalOverallRead, optimal_overall_hash_hps = OptimalOverallHash, average_read_mibps = AverageRead, current_read_mibps = CurrentRead, average_hash_hps = AverageHash, current_hash_hps = CurrentHash } = Report, TotalTiB = TotalDataSize / ?TiB, PctOfWeave = floor((TotalDataSize / WeaveSize) * 100), io_lib:format( "| Total | ~5.1f TiB | ~6.B % " "| ~7.1f MiB/s | ~7.1f MiB/s | ~7.1f MiB/s " "| ~6B h/s | ~6B h/s | ~8B h/s |\n", [ TotalTiB, PctOfWeave, CurrentRead, AverageRead, OptimalOverallRead, floor(CurrentHash), floor(AverageHash), floor(OptimalOverallHash)]). format_partition_rows([]) -> ""; format_partition_rows([PartitionReport | PartitionReports]) -> format_partition_rows(PartitionReports) ++ [format_partition_row(PartitionReport)]. format_partition_row(PartitionReport) -> #partition_report{ partition_number = PartitionNumber, data_size = DataSize, optimal_read_mibps = OptimalRead, average_read_mibps = AverageRead, current_read_mibps = CurrentRead, optimal_hash_hps = OptimalHash, average_hash_hps = AverageHash, current_hash_hps = CurrentHash } = PartitionReport, TiB = DataSize / ?TiB, PctOfPartition = floor((DataSize / ar_block:partition_size()) * 100), io_lib:format( "| ~9.B | ~5.1f TiB | ~6.B % " "| ~7.1f MiB/s | ~7.1f MiB/s | ~7.1f MiB/s " "| ~6B h/s | ~6B h/s | ~8B h/s |\n", [ PartitionNumber, TiB, PctOfPartition, CurrentRead, AverageRead, OptimalRead, floor(CurrentHash), floor(AverageHash), floor(OptimalHash)]). format_peer_report(#report{ peers = [] }) -> ""; format_peer_report(Report) -> Header = "\n" "Coordinated mining cluster stats:\n" "+----------------------+--------------+--------------+-------------+-------------+--------+--------+\n" "| Peer | H1 Out (Cur) | H1 Out (Avg) | H1 In (Cur) | H1 In (Avg) | H2 Out | H2 In |\n" "+----------------------+--------------+--------------+-------------+-------------+--------+--------+\n", TotalRow = format_peer_total_row(Report), PartitionRows = format_peer_rows(Report#report.peers), Footer = "+----------------------+--------------+--------------+-------------+-------------+--------+--------+\n", io_lib:format("~s~s~s~s", [Header, TotalRow, PartitionRows, Footer]). format_peer_total_row(Report) -> #report{ average_h1_to_peer_hps = AverageH1To, current_h1_to_peer_hps = CurrentH1To, average_h1_from_peer_hps = AverageH1From, current_h1_from_peer_hps = CurrentH1From, total_h2_to_peer = TotalH2To, total_h2_from_peer = TotalH2From } = Report, io_lib:format( "| All | ~8B h/s | ~8B h/s | ~7B h/s | ~7B h/s | ~6B | ~6B |\n", [ floor(CurrentH1To), floor(AverageH1To), floor(CurrentH1From), floor(AverageH1From), TotalH2To, TotalH2From ]). format_peer_rows([]) -> ""; format_peer_rows([PeerReport | PeerReports]) -> format_peer_rows(PeerReports) ++ [format_peer_row(PeerReport)]. format_peer_row(PeerReport) -> #peer_report{ peer = Peer, average_h1_to_peer_hps = AverageH1To, current_h1_to_peer_hps = CurrentH1To, average_h1_from_peer_hps = AverageH1From, current_h1_from_peer_hps = CurrentH1From, total_h2_to_peer = TotalH2To, total_h2_from_peer = TotalH2From } = PeerReport, io_lib:format( "| ~20s | ~8B h/s | ~8B h/s | ~7B h/s | ~7B h/s | ~6B | ~6B |\n", [ ar_util:format_peer(Peer), floor(CurrentH1To), floor(AverageH1To), floor(CurrentH1From), floor(AverageH1From), TotalH2To, TotalH2From ]). format_vdf_speed(undefined) -> " undefined"; format_vdf_speed(VDFSpeed) -> io_lib:format("~5.2f s", [VDFSpeed]). %%%=================================================================== %%% Tests %%%=================================================================== mining_stats_test_() -> [ar_test_node:test_with_mocked_functions( [ {ar_block, partition_size, fun() -> 2097152 end} ], fun test_read_stats/0), ar_test_node:test_with_mocked_functions( [ {ar_block, partition_size, fun() -> 2097152 end} ], fun test_h1_stats/0), ar_test_node:test_with_mocked_functions( [ {ar_block, partition_size, fun() -> 2097152 end} ], fun test_h2_stats/0), ar_test_node:test_with_mocked_functions( [ {ar_block, partition_size, fun() -> 2097152 end} ], fun test_vdf_stats/0), ar_test_node:test_with_mocked_functions( [ {ar_block, partition_size, fun() -> 2097152 end} ], fun test_data_size_stats/0), ar_test_node:test_with_mocked_functions( [ {ar_block, partition_size, fun() -> 2097152 end} ], fun test_h1_sent_to_peer_stats/0), ar_test_node:test_with_mocked_functions( [ {ar_block, partition_size, fun() -> 2097152 end} ], fun test_h1_received_from_peer_stats/0), ar_test_node:test_with_mocked_functions( [ {ar_block, partition_size, fun() -> 2097152 end} ], fun test_h2_peer_stats/0), ar_test_node:test_with_mocked_functions( [ {ar_block, partition_size, fun() -> 2097152 end} ], fun test_optimal_stats_poa1_multiple_1/0), ar_test_node:test_with_mocked_functions( [ {ar_block, partition_size, fun() -> 2097152 end} ], fun test_optimal_stats_poa1_multiple_2/0), ar_test_node:test_with_mocked_functions( [ {ar_block, partition_size, fun() -> 2097152 end} ], fun test_report_poa1_multiple_1/0), ar_test_node:test_with_mocked_functions( [ {ar_block, partition_size, fun() -> 2097152 end}, {ar_difficulty, poa1_diff_multiplier, fun(_) -> 2 end} ], fun test_report_poa1_multiple_2/0 ) ]. test_read_stats() -> test_local_stats(fun chunks_read/2, read). test_h1_stats() -> test_local_stats(fun h1_computed/2, h1). test_h2_stats() -> test_local_stats(fun h2_computed/2, h2). test_local_stats(Fun, Stat) -> ar_mining_stats:pause_performance_reports(120000), reset_all_stats(), Fun(1, 1), TotalStart1 = get_start({partition, 1, Stat, total}), CurrentStart1 = get_start({partition, 1, Stat, current}), timer:sleep(1000), Fun(1, 1), Fun(1, 1), Fun(2, 1), TotalStart2 = get_start({partition, 2, Stat, total}), CurrentStart2 = get_start({partition, 2, Stat, current}), Fun(2, 1), ?assert(TotalStart1 /= TotalStart2), ?assert(CurrentStart1 /= CurrentStart2), ?assertEqual(0.0, get_average_count_by_time({partition, 1, Stat, total}, TotalStart1)), ?assertEqual(0.0, get_average_count_by_time({partition, 1, Stat, current}, CurrentStart1)), ?assertEqual(0.0, get_average_count_by_time({partition, 2, Stat, total}, TotalStart2)), ?assertEqual(0.0, get_average_count_by_time({partition, 2, Stat, current}, CurrentStart2)), ?assertEqual(6.0, get_average_count_by_time({partition, 1, Stat, total}, TotalStart1 + 500)), ?assertEqual(0.25, get_average_count_by_time({partition, 1, Stat, current}, CurrentStart1 + 12000)), ?assertEqual(0.5, get_average_count_by_time({partition, 2, Stat, total}, TotalStart2 + 4000)), ?assertEqual(8.0, get_average_count_by_time({partition, 2, Stat, current}, CurrentStart2 + 250)), ?assertEqual(0.0, get_average_count_by_time({partition, 3, Stat, total}, TotalStart1 + 4000)), ?assertEqual(0.0, get_average_count_by_time({partition, 3, Stat, current}, TotalStart1 + 250)), ?assertEqual(6.0, get_average_samples_by_time({partition, 1, Stat, total}, TotalStart1 + 500)), ?assertEqual(0.25, get_average_samples_by_time({partition, 1, Stat, current}, CurrentStart1 + 12000)), ?assertEqual(0.5, get_average_samples_by_time({partition, 2, Stat, total}, TotalStart2 + 4000)), ?assertEqual(8.0, get_average_samples_by_time({partition, 2, Stat, current}, CurrentStart2 + 250)), ?assertEqual(0.0, get_average_samples_by_time({partition, 3, Stat, total}, TotalStart1 + 4000)), ?assertEqual(0.0, get_average_samples_by_time({partition, 3, Stat, current}, TotalStart1 + 250)), ?assertEqual(1.0, get_average_by_samples({partition, 1, Stat, total})), ?assertEqual(1.0, get_average_by_samples({partition, 1, Stat, current})), ?assertEqual(1.0, get_average_by_samples({partition, 2, Stat, total})), ?assertEqual(1.0, get_average_by_samples({partition, 2, Stat, current})), ?assertEqual(0.0, get_average_by_samples({partition, 3, Stat, total})), ?assertEqual(0.0, get_average_by_samples({partition, 3, Stat, current})), Now = CurrentStart2 + 1000, reset_count({partition, 1, Stat, current}, Now), ?assertEqual(Now, get_start({partition, 1, Stat, current})), ?assertEqual(6.0, get_average_count_by_time({partition, 1, Stat, total}, TotalStart1 + 500)), ?assertEqual(0.0, get_average_count_by_time({partition, 1, Stat, current}, Now + 12000)), ?assertEqual(0.5, get_average_count_by_time({partition, 2, Stat, total}, TotalStart2 + 4000)), ?assertEqual(8.0, get_average_count_by_time({partition, 2, Stat, current}, CurrentStart2 + 250)), ?assertEqual(0.0, get_average_count_by_time({partition, 3, Stat, total}, TotalStart1 + 4000)), ?assertEqual(0.0, get_average_count_by_time({partition, 3, Stat, current}, CurrentStart1 + 250)), ?assertEqual(6.0, get_average_samples_by_time({partition, 1, Stat, total}, TotalStart1 + 500)), ?assertEqual(0.0, get_average_samples_by_time({partition, 1, Stat, current}, Now + 12000)), ?assertEqual(0.5, get_average_samples_by_time({partition, 2, Stat, total}, TotalStart2 + 4000)), ?assertEqual(8.0, get_average_samples_by_time({partition, 2, Stat, current}, CurrentStart2 + 250)), ?assertEqual(0.0, get_average_samples_by_time({partition, 3, Stat, total}, TotalStart1 + 4000)), ?assertEqual(0.0, get_average_samples_by_time({partition, 3, Stat, current}, CurrentStart1 + 250)), ?assertEqual(1.0, get_average_by_samples({partition, 1, Stat, total})), ?assertEqual(0.0, get_average_by_samples({partition, 1, Stat, current})), ?assertEqual(1.0, get_average_by_samples({partition, 2, Stat, total})), ?assertEqual(1.0, get_average_by_samples({partition, 2, Stat, current})), ?assertEqual(0.0, get_average_by_samples({partition, 3, Stat, total})), ?assertEqual(0.0, get_average_by_samples({partition, 3, Stat, current})), reset_all_stats(), ?assertEqual(0.0, get_average_count_by_time({partition, 1, Stat, total}, Now + 500)), ?assertEqual(0.0, get_average_count_by_time({partition, 1, Stat, current}, Now + 12000)), ?assertEqual(0.0, get_average_count_by_time({partition, 2, Stat, total}, TotalStart2 + 4000)), ?assertEqual(0.0, get_average_count_by_time({partition, 2, Stat, current}, CurrentStart2 + 250)), ?assertEqual(0.0, get_average_count_by_time({partition, 3, Stat, total}, TotalStart1 + 4000)), ?assertEqual(0.0, get_average_count_by_time({partition, 3, Stat, current}, TotalStart1 + 250)), ?assertEqual(0.0, get_average_samples_by_time({partition, 1, Stat, total}, Now + 500)), ?assertEqual(0.0, get_average_samples_by_time({partition, 1, Stat, current}, Now + 12000)), ?assertEqual(0.0, get_average_samples_by_time({partition, 2, Stat, total}, TotalStart2 + 4000)), ?assertEqual(0.0, get_average_samples_by_time({partition, 2, Stat, current}, CurrentStart2 + 250)), ?assertEqual(0.0, get_average_samples_by_time({partition, 3, Stat, total}, TotalStart1 + 4000)), ?assertEqual(0.0, get_average_samples_by_time({partition, 3, Stat, current}, TotalStart1 + 250)), ?assertEqual(0.0, get_average_by_samples({partition, 1, Stat, total})), ?assertEqual(0.0, get_average_by_samples({partition, 1, Stat, current})), ?assertEqual(0.0, get_average_by_samples({partition, 2, Stat, total})), ?assertEqual(0.0, get_average_by_samples({partition, 2, Stat, current})), ?assertEqual(0.0, get_average_by_samples({partition, 3, Stat, total})), ?assertEqual(0.0, get_average_by_samples({partition, 3, Stat, current})). test_vdf_stats() -> ar_mining_stats:pause_performance_reports(120000), reset_all_stats(), ar_mining_stats:vdf_computed(), Start = get_start(vdf), ar_mining_stats:vdf_computed(), ar_mining_stats:vdf_computed(), ?assertEqual(0.0, get_average_count_by_time(vdf, Start)), ?assertEqual(6.0, get_average_count_by_time(vdf, Start + 500)), ?assertEqual(0.0, get_average_samples_by_time(vdf, Start)), ?assertEqual(6.0, get_average_samples_by_time(vdf, Start + 500)), ?assertEqual(1.0, get_average_by_samples(vdf)), Now = Start + 1000, ?assertEqual(1.0/3.0, vdf_speed(Now)), ?assertEqual(Now, get_start(vdf)), ?assertEqual(undefined, vdf_speed(Now)), ?assertEqual(0.0, get_average_count_by_time(vdf, Now + 500)), ?assertEqual(0.0, get_average_samples_by_time(vdf, Now + 500)), ?assertEqual(0.0, get_average_by_samples(vdf)), ?assertEqual(undefined, vdf_speed(Now + 500)), ar_mining_stats:vdf_computed(), Start2 = get_start(vdf), ?assertEqual(0.5, vdf_speed(Start2 + 500)), ar_mining_stats:vdf_computed(), reset_all_stats(), ?assertEqual(undefined, get_start(vdf)), ?assertEqual(0.0, get_average_count_by_time(vdf, 1000)), ?assertEqual(0.0, get_average_samples_by_time(vdf, 1000)), ?assertEqual(0.0, get_average_by_samples(vdf)), ?assertEqual(undefined, vdf_speed(1000)). test_data_size_stats() -> {ok, Config} = arweave_config:get_env(), try arweave_config:set_env(Config#config{ mining_addr = <<"MINING">> }), WeaveSize = floor(2 * ar_block:partition_size()), ets:insert(node_state, [{weave_size, WeaveSize}]), ar_mining_stats:pause_performance_reports(120000), do_test_data_size_stats(Config, {spora_2_6, <<"MINING">>}, {spora_2_6, <<"PACKING">>}), do_test_data_size_stats(Config, {composite, <<"MINING">>, 1}, {composite, <<"PACKING">>, 1}), do_test_data_size_stats(Config, {composite, <<"MINING">>, 2}, {composite, <<"PACKING">>, 2}) after arweave_config:set_env(Config) end. do_test_data_size_stats(Config, Mining, Packing) -> arweave_config:set_env(Config#config{ storage_modules = [ {floor(0.1 * ar_block:partition_size()), 10, unpacked}, {floor(0.1 * ar_block:partition_size()), 10, Mining}, {floor(0.1 * ar_block:partition_size()), 10, Packing}, {floor(0.3 * ar_block:partition_size()), 4, unpacked}, {floor(0.3 * ar_block:partition_size()), 4, Mining}, {floor(0.3 * ar_block:partition_size()), 4, Packing}, {floor(0.2 * ar_block:partition_size()), 8, unpacked}, {floor(0.2 * ar_block:partition_size()), 8, Mining}, {floor(0.2 * ar_block:partition_size()), 8, Packing}, {ar_block:partition_size(), 2, unpacked}, {ar_block:partition_size(), 2, Mining}, {ar_block:partition_size(), 2, Packing} ] }), reset_all_stats(), ?assertEqual(0, get_total_minable_data_size(Mining)), ?assertEqual(0, get_partition_data_size(1, Mining)), ?assertEqual(0, get_partition_data_size(2, Mining)), ar_mining_stats:set_storage_module_data_size( ar_storage_module:id({floor(0.1 * ar_block:partition_size()), 10, unpacked}), unpacked, 1, floor(0.1 * ar_block:partition_size()), 10, 101), ar_mining_stats:set_storage_module_data_size( ar_storage_module:id({floor(0.1 * ar_block:partition_size()), 10, Mining}), Mining, 1, floor(0.1 * ar_block:partition_size()), 10, 102), ar_mining_stats:set_storage_module_data_size( ar_storage_module:id({floor(0.1 * ar_block:partition_size()), 10, Packing}), Packing, 1, floor(0.1 * ar_block:partition_size()), 10, 103), ar_mining_stats:set_storage_module_data_size( ar_storage_module:id({floor(0.3 * ar_block:partition_size()), 4, unpacked}), unpacked, 1, floor(0.3 * ar_block:partition_size()), 4, 111), ar_mining_stats:set_storage_module_data_size( ar_storage_module:id({floor(0.3 * ar_block:partition_size()), 4, Mining}), Mining, 1, floor(0.3 * ar_block:partition_size()), 4, 112), ar_mining_stats:set_storage_module_data_size( ar_storage_module:id({floor(0.3 * ar_block:partition_size()), 4, Packing}), Packing, 1, floor(0.3 * ar_block:partition_size()), 4, 113), ar_mining_stats:set_storage_module_data_size( ar_storage_module:id({ar_block:partition_size(), 2, unpacked}), unpacked, 2, ar_block:partition_size(), 2, 201), ar_mining_stats:set_storage_module_data_size( ar_storage_module:id({ar_block:partition_size(), 2, Mining}), Mining, 2, ar_block:partition_size(), 2, 202), ar_mining_stats:set_storage_module_data_size( ar_storage_module:id({ar_block:partition_size(), 2, Packing}), Packing, 2, ar_block:partition_size(), 2, 203), ?assertEqual(214, get_partition_data_size(1, Mining)), ?assertEqual(202, get_partition_data_size(2, Mining)), ?assertEqual(214, get_total_minable_data_size(Mining)), ar_mining_stats:set_storage_module_data_size( ar_storage_module:id({floor(0.2 * ar_block:partition_size()), 8, unpacked}), unpacked, 1, floor(0.2 * ar_block:partition_size()), 8, 121), ar_mining_stats:set_storage_module_data_size( ar_storage_module:id({floor(0.2 * ar_block:partition_size()), 8, Mining}), Mining, 1, floor(0.2 * ar_block:partition_size()), 8, 122), ar_mining_stats:set_storage_module_data_size( ar_storage_module:id({floor(0.2 * ar_block:partition_size()), 8, Packing}), Packing, 1, floor(0.2 * ar_block:partition_size()), 8, 123), ar_mining_stats:set_storage_module_data_size( ar_storage_module:id({ar_block:partition_size(), 2, unpacked}), unpacked, 2, ar_block:partition_size(), 2, 51), ar_mining_stats:set_storage_module_data_size( ar_storage_module:id({ar_block:partition_size(), 2, Mining}), Mining, 2, ar_block:partition_size(), 2, 52), ar_mining_stats:set_storage_module_data_size( ar_storage_module:id({ar_block:partition_size(), 2, Packing}), Packing, 2, ar_block:partition_size(), 2, 53), ?assertEqual(336, get_partition_data_size(1, Mining)), ?assertEqual(52, get_partition_data_size(2, Mining)), ?assertEqual(336, get_total_minable_data_size(Mining)), reset_all_stats(), ?assertEqual(0, get_total_minable_data_size(Mining)), ?assertEqual(0, get_partition_data_size(1, Mining)), ?assertEqual(0, get_partition_data_size(2, Mining)). test_h1_sent_to_peer_stats() -> test_peer_stats(fun h1_sent_to_peer/2, h1_to_peer). test_h1_received_from_peer_stats() -> test_peer_stats(fun h1_received_from_peer/2, h1_from_peer). test_peer_stats(Fun, Stat) -> ar_mining_stats:pause_performance_reports(120000), reset_all_stats(), Peer1 = ar_test_node:peer_ip(peer1), Peer2 = ar_test_node:peer_ip(peer2), Peer3 = ar_test_node:peer_ip(peer3), Fun(Peer1, 10), TotalStart1 = get_start({peer, Peer1, Stat, total}), CurrentStart1 = get_start({peer, Peer1, Stat, current}), timer:sleep(1000), Fun(Peer1, 5), Fun(Peer1, 15), Fun(Peer2, 1), TotalStart2 = get_start({peer, Peer2, Stat, total}), CurrentStart2 = get_start({peer, Peer2, Stat, current}), Fun(Peer2, 19), ?assert(TotalStart1 /= TotalStart2), ?assert(CurrentStart1 /= CurrentStart2), ?assertEqual(0.0, get_average_count_by_time({peer, Peer1, Stat, total}, TotalStart1)), ?assertEqual(0.0, get_average_count_by_time({peer, Peer1, Stat, current}, CurrentStart1)), ?assertEqual(0.0, get_average_count_by_time({peer, Peer2, Stat, total}, TotalStart2)), ?assertEqual(0.0, get_average_count_by_time({peer, Peer2, Stat, current}, CurrentStart2)), ?assertEqual(60.0, get_average_count_by_time({peer, Peer1, Stat, total}, TotalStart1 + 500)), ?assertEqual(2.5, get_average_count_by_time({peer, Peer1, Stat, current}, CurrentStart1 + 12000)), ?assertEqual(5.0, get_average_count_by_time({peer, Peer2, Stat, total}, TotalStart2 + 4000)), ?assertEqual(80.0, get_average_count_by_time({peer, Peer2, Stat, current}, CurrentStart2 + 250)), ?assertEqual(0.0, get_average_count_by_time({peer, Peer3, Stat, total}, TotalStart1 + 4000)), ?assertEqual(0.0, get_average_count_by_time({peer, Peer3, Stat, current}, TotalStart1 + 250)), ?assertEqual(0.0, get_average_samples_by_time({peer, Peer1, Stat, total}, TotalStart1)), ?assertEqual(0.0, get_average_samples_by_time({peer, Peer1, Stat, current}, CurrentStart1)), ?assertEqual(0.0, get_average_samples_by_time({peer, Peer2, Stat, total}, TotalStart2)), ?assertEqual(0.0, get_average_samples_by_time({peer, Peer2, Stat, current}, CurrentStart2)), ?assertEqual(6.0, get_average_samples_by_time({peer, Peer1, Stat, total}, TotalStart1 + 500)), ?assertEqual(0.25, get_average_samples_by_time({peer, Peer1, Stat, current}, CurrentStart1 + 12000)), ?assertEqual(0.5, get_average_samples_by_time({peer, Peer2, Stat, total}, TotalStart2 + 4000)), ?assertEqual(8.0, get_average_samples_by_time({peer, Peer2, Stat, current}, CurrentStart2 + 250)), ?assertEqual(0.0, get_average_samples_by_time({peer, Peer3, Stat, total}, TotalStart1 + 4000)), ?assertEqual(0.0, get_average_samples_by_time({peer, Peer3, Stat, current}, TotalStart1 + 250)), ?assertEqual(10.0, get_average_by_samples({peer, Peer1, Stat, total})), ?assertEqual(10.0, get_average_by_samples({peer, Peer1, Stat, current})), ?assertEqual(10.0, get_average_by_samples({peer, Peer2, Stat, total})), ?assertEqual(10.0, get_average_by_samples({peer, Peer2, Stat, current})), ?assertEqual(0.0, get_average_by_samples({peer, Peer3, Stat, total})), ?assertEqual(0.0, get_average_by_samples({peer, Peer3, Stat, current})), Now = CurrentStart2 + 1000, reset_count({peer, Peer1, Stat, current}, Now), ?assertEqual(Now, get_start({peer, Peer1, Stat, current})), ?assertEqual(60.0, get_average_count_by_time({peer, Peer1, Stat, total}, TotalStart1 + 500)), ?assertEqual(0.0, get_average_count_by_time({peer, Peer1, Stat, current}, Now + 12000)), ?assertEqual(5.0, get_average_count_by_time({peer, Peer2, Stat, total}, TotalStart2 + 4000)), ?assertEqual(80.0, get_average_count_by_time({peer, Peer2, Stat, current}, CurrentStart2 + 250)), ?assertEqual(0.0, get_average_count_by_time({peer, Peer3, Stat, total}, TotalStart1 + 4000)), ?assertEqual(0.0, get_average_count_by_time({peer, Peer3, Stat, current}, CurrentStart1 + 250)), ?assertEqual(6.0, get_average_samples_by_time({peer, Peer1, Stat, total}, TotalStart1 + 500)), ?assertEqual(0.0, get_average_samples_by_time({peer, Peer1, Stat, current}, Now + 12000)), ?assertEqual(0.5, get_average_samples_by_time({peer, Peer2, Stat, total}, TotalStart2 + 4000)), ?assertEqual(8.0, get_average_samples_by_time({peer, Peer2, Stat, current}, CurrentStart2 + 250)), ?assertEqual(0.0, get_average_samples_by_time({peer, Peer3, Stat, total}, TotalStart1 + 4000)), ?assertEqual(0.0, get_average_samples_by_time({peer, Peer3, Stat, current}, CurrentStart1 + 250)), ?assertEqual(10.0, get_average_by_samples({peer, Peer1, Stat, total})), ?assertEqual(0.0, get_average_by_samples({peer, Peer1, Stat, current})), ?assertEqual(10.0, get_average_by_samples({peer, Peer2, Stat, total})), ?assertEqual(10.0, get_average_by_samples({peer, Peer2, Stat, current})), ?assertEqual(0.0, get_average_by_samples({peer, Peer3, Stat, total})), ?assertEqual(0.0, get_average_by_samples({peer, Peer3, Stat, current})), reset_all_stats(), ?assertEqual(0.0, get_average_count_by_time({peer, Peer1, Stat, total}, TotalStart1 + 500)), ?assertEqual(0.0, get_average_count_by_time({peer, Peer1, Stat, current}, Now + 12000)), ?assertEqual(0.0, get_average_count_by_time({peer, Peer2, Stat, total}, TotalStart2 + 4000)), ?assertEqual(0.0, get_average_count_by_time({peer, Peer2, Stat, current}, CurrentStart2 + 250)), ?assertEqual(0.0, get_average_count_by_time({peer, Peer3, Stat, total}, TotalStart1 + 4000)), ?assertEqual(0.0, get_average_count_by_time({peer, Peer3, Stat, current}, CurrentStart1 + 250)), ?assertEqual(0.0, get_average_samples_by_time({peer, Peer1, Stat, total}, TotalStart1 + 500)), ?assertEqual(0.0, get_average_samples_by_time({peer, Peer1, Stat, current}, Now + 12000)), ?assertEqual(0.0, get_average_samples_by_time({peer, Peer2, Stat, total}, TotalStart2 + 4000)), ?assertEqual(0.0, get_average_samples_by_time({peer, Peer2, Stat, current}, CurrentStart2 + 250)), ?assertEqual(0.0, get_average_samples_by_time({peer, Peer3, Stat, total}, TotalStart1 + 4000)), ?assertEqual(0.0, get_average_samples_by_time({peer, Peer3, Stat, current}, CurrentStart1 + 250)), ?assertEqual(0.0, get_average_by_samples({peer, Peer1, Stat, total})), ?assertEqual(0.0, get_average_by_samples({peer, Peer1, Stat, current})), ?assertEqual(0.0, get_average_by_samples({peer, Peer2, Stat, total})), ?assertEqual(0.0, get_average_by_samples({peer, Peer2, Stat, current})), ?assertEqual(0.0, get_average_by_samples({peer, Peer3, Stat, total})), ?assertEqual(0.0, get_average_by_samples({peer, Peer3, Stat, current})). test_h2_peer_stats() -> ar_mining_stats:pause_performance_reports(120000), reset_all_stats(), Peer1 = ar_test_node:peer_ip(peer1), Peer2 = ar_test_node:peer_ip(peer2), Peer3 = ar_test_node:peer_ip(peer3), ar_mining_stats:h2_sent_to_peer(Peer1), ar_mining_stats:h2_sent_to_peer(Peer1), ar_mining_stats:h2_sent_to_peer(Peer1), ar_mining_stats:h2_sent_to_peer(Peer2), ar_mining_stats:h2_sent_to_peer(Peer2), ?assertEqual(3, get_count({peer, Peer1, h2_to_peer, total})), ?assertEqual(2, get_count({peer, Peer2, h2_to_peer, total})), ?assertEqual(0, get_count({peer, Peer3, h2_to_peer, total})), ?assertEqual(5, get_overall_total(peer, h2_to_peer, total)), ar_mining_stats:h2_received_from_peer(Peer1), ar_mining_stats:h2_received_from_peer(Peer1), ar_mining_stats:h2_received_from_peer(Peer1), ar_mining_stats:h2_received_from_peer(Peer2), ar_mining_stats:h2_received_from_peer(Peer2), ?assertEqual(3, get_count({peer, Peer1, h2_from_peer, total})), ?assertEqual(2, get_count({peer, Peer2, h2_from_peer, total})), ?assertEqual(0, get_count({peer, Peer3, h2_from_peer, total})), ?assertEqual(5, get_overall_total(peer, h2_from_peer, total)), reset_count({peer, Peer1, h2_to_peer, total}, 1000), reset_count({peer, Peer2, h2_from_peer, total}, 1000), ?assertEqual(0, get_count({peer, Peer1, h2_to_peer, total})), ?assertEqual(2, get_count({peer, Peer2, h2_to_peer, total})), ?assertEqual(0, get_count({peer, Peer3, h2_to_peer, total})), ?assertEqual(3, get_count({peer, Peer1, h2_from_peer, total})), ?assertEqual(0, get_count({peer, Peer2, h2_from_peer, total})), ?assertEqual(0, get_count({peer, Peer3, h2_from_peer, total})), ?assertEqual(2, get_overall_total(peer, h2_to_peer, total)), ?assertEqual(3, get_overall_total(peer, h2_from_peer, total)), reset_all_stats(), ?assertEqual(0, get_count({peer, Peer1, h2_to_peer, total})), ?assertEqual(0, get_count({peer, Peer2, h2_to_peer, total})), ?assertEqual(0, get_count({peer, Peer3, h2_to_peer, total})), ?assertEqual(0, get_count({peer, Peer1, h2_from_peer, total})), ?assertEqual(0, get_count({peer, Peer2, h2_from_peer, total})), ?assertEqual(0, get_count({peer, Peer3, h2_from_peer, total})), ?assertEqual(0, get_overall_total(peer, h2_to_peer, total)), ?assertEqual(0, get_overall_total(peer, h2_from_peer, total)). test_optimal_stats_poa1_multiple_1() -> test_optimal_stats({spora_2_6, <<"MINING">>}, 1), test_optimal_stats({composite, <<"MINING">>, 1}, 1), test_optimal_stats({composite, <<"MINING">>, 2}, 1). test_optimal_stats_poa1_multiple_2() -> test_optimal_stats({spora_2_6, <<"MINING">>}, 2), test_optimal_stats({composite, <<"MINING">>, 1}, 2), test_optimal_stats({composite, <<"MINING">>, 2}, 2). test_optimal_stats(Packing, PoA1Multiplier) -> PackingDifficulty = ar_mining_server:get_packing_difficulty(Packing), RecallRangeSize = case PackingDifficulty of 0 -> 0.5; 1 -> 0.125; 2 -> 0.0625 end, ?assertEqual(0.0, optimal_partition_read_mibps( Packing, undefined, ar_block:partition_size(), floor(10 * ar_block:partition_size()), floor(10 * ar_block:partition_size()))), ?assertEqual(RecallRangeSize * 2, optimal_partition_read_mibps( Packing, 1.0, ar_block:partition_size(), floor(10 * ar_block:partition_size()), floor(10 * ar_block:partition_size()))), ?assertEqual(RecallRangeSize, optimal_partition_read_mibps( Packing, 2.0, ar_block:partition_size(), floor(10 * ar_block:partition_size()), floor(10 * ar_block:partition_size()))), ?assertEqual(RecallRangeSize / 2, optimal_partition_read_mibps( Packing, 1.0, floor(0.25 * ar_block:partition_size()), floor(10 * ar_block:partition_size()), floor(10 * ar_block:partition_size()))), ?assertEqual(RecallRangeSize * 1.6, optimal_partition_read_mibps( Packing, 1.0, ar_block:partition_size(), floor(6 * ar_block:partition_size()), floor(10 * ar_block:partition_size()))), {FullWeave, SlowVDF, SmallPartition, SmallWeave} = case PoA1Multiplier of 1 -> {800.0, 400.0, 200.0, 640.0}; 2 -> {600.0, 300.0, 150.0, 440.0} end, ?assertEqual(0.0, optimal_partition_hash_hps( PoA1Multiplier, undefined, ar_block:partition_size(), floor(10 * ar_block:partition_size()), floor(10 * ar_block:partition_size()))), ?assertEqual(FullWeave, optimal_partition_hash_hps( PoA1Multiplier, 1.0, ar_block:partition_size(), floor(10 * ar_block:partition_size()), floor(10 * ar_block:partition_size()))), ?assertEqual(SlowVDF, optimal_partition_hash_hps( PoA1Multiplier, 2.0, ar_block:partition_size(), floor(10 * ar_block:partition_size()), floor(10 * ar_block:partition_size()))), ?assertEqual(SmallPartition, optimal_partition_hash_hps( PoA1Multiplier, 1.0, floor(0.25 * ar_block:partition_size()), floor(10 * ar_block:partition_size()), floor(10 * ar_block:partition_size()))), ?assertEqual(SmallWeave, optimal_partition_hash_hps( PoA1Multiplier, 1.0, ar_block:partition_size(), floor(6 * ar_block:partition_size()), floor(10 * ar_block:partition_size()))). test_report_poa1_multiple_1() -> test_report({spora_2_6, <<"MINING">>}, {spora_2_6, <<"PACKING">>}, 1), test_report({composite, <<"MINING">>, 1}, {composite, <<"PACKING">>, 1}, 1), test_report({composite, <<"MINING">>, 2}, {composite, <<"PACKING">>, 2}, 1). test_report_poa1_multiple_2() -> test_report({spora_2_6, <<"MINING">>}, {spora_2_6, <<"PACKING">>}, 2), test_report({composite, <<"MINING">>, 1}, {composite, <<"PACKING">>, 1}, 2), test_report({composite, <<"MINING">>, 2}, {composite, <<"PACKING">>, 2}, 2). test_report(Mining, Packing, PoA1Multiplier) -> {ok, Config} = arweave_config:get_env(), MiningAddress = case Mining of {spora_2_6, Addr} -> Addr; {composite, Addr, _} -> Addr end, PackingDifficulty = ar_mining_server:get_packing_difficulty(Mining), DifficultyDivisor = case PackingDifficulty of 0 -> 1.0; 1 -> 8.0; 2 -> 4.0 end, RecallRangeSize = case PackingDifficulty of 0 -> 0.5; 1 -> 0.125; 2 -> 0.0625 end, StorageModules = [ %% partition 1 {floor(0.1 * ar_block:partition_size()), 10, unpacked}, {floor(0.1 * ar_block:partition_size()), 10, Mining}, {floor(0.1 * ar_block:partition_size()), 10, Packing}, {floor(0.3 * ar_block:partition_size()), 4, unpacked}, {floor(0.3 * ar_block:partition_size()), 4, Mining}, {floor(0.3 * ar_block:partition_size()), 4, Packing}, {floor(0.2 * ar_block:partition_size()), 8, unpacked}, {floor(0.2 * ar_block:partition_size()), 8, Mining}, {floor(0.2 * ar_block:partition_size()), 8, Packing}, %% partition 2 {ar_block:partition_size(), 2, unpacked}, {ar_block:partition_size(), 2, Mining}, {ar_block:partition_size(), 2, Packing} ], try arweave_config:set_env(Config#config{ storage_modules = StorageModules, mining_addr = MiningAddress }), ar_mining_stats:pause_performance_reports(120000), reset_all_stats(), Partitions = [ {1, MiningAddress, 0}, {2, MiningAddress, 0}, {3, MiningAddress, 0} ], Peer1 = ar_test_node:peer_ip(peer1), Peer2 = ar_test_node:peer_ip(peer2), Peer3 = ar_test_node:peer_ip(peer3), Peers = [Peer1, Peer2, Peer3], Now = erlang:monotonic_time(millisecond), WeaveSize = floor(10 * ar_block:partition_size()), ets:insert(node_state, [{weave_size, WeaveSize}]), ar_mining_stats:set_storage_module_data_size( ar_storage_module:id({floor(0.1 * ar_block:partition_size()), 10, Mining}), Mining, 1, floor(0.1 * ar_block:partition_size()), 10, floor(0.1 * ar_block:partition_size())), ar_mining_stats:set_storage_module_data_size( ar_storage_module:id({floor(0.3 * ar_block:partition_size()), 4, Mining}), Mining, 1, floor(0.3 * ar_block:partition_size()), 4, floor(0.2 * ar_block:partition_size())), ar_mining_stats:set_storage_module_data_size( ar_storage_module:id({floor(0.2 * ar_block:partition_size()), 8, Mining}), Mining, 1, floor(0.2 * ar_block:partition_size()), 8, floor(0.05 * ar_block:partition_size())), ar_mining_stats:set_storage_module_data_size( ar_storage_module:id({ar_block:partition_size(), 2, Mining}), Mining, 2, ar_block:partition_size(), 2, floor(0.25 * ar_block:partition_size())), vdf_computed(Now), vdf_computed(Now), vdf_computed(Now), h1_solution(Now), h2_solution(Now), h2_solution(Now), block_found(Now), chunks_read(1, 1, Now), chunks_read(1, 2, Now), chunks_read(2, 2, Now), h1_computed(1, 1, Now), h1_computed(1, 2, Now), h2_computed(1, 2, Now), h1_computed(2, 4, Now), h1_sent_to_peer(Peer1, 10, Now), h1_sent_to_peer(Peer1, 5, Now), h1_sent_to_peer(Peer1, 15, Now), h1_sent_to_peer(Peer2, 1, Now), h1_sent_to_peer(Peer2, 19, Now), h1_received_from_peer(Peer2, 10, Now), h1_received_from_peer(Peer2, 5, Now), h1_received_from_peer(Peer2, 15, Now), h1_received_from_peer(Peer1, 1, Now), h1_received_from_peer(Peer1, 19, Now), h2_sent_to_peer(Peer1, Now), h2_sent_to_peer(Peer1, Now), h2_sent_to_peer(Peer1, Now), h2_sent_to_peer(Peer2, Now), h2_sent_to_peer(Peer2, Now), h2_received_from_peer(Peer1, Now), h2_received_from_peer(Peer1, Now), h2_received_from_peer(Peer2, Now), h2_received_from_peer(Peer2, Now), h2_received_from_peer(Peer2, Now), Report1 = generate_report(0, Mining, [], [], WeaveSize, Now+1000), ?assertEqual(#report{ now = Now+1000 }, Report1), log_report(format_report(Report1, WeaveSize)), Report2 = generate_report(0, Mining, Partitions, Peers, WeaveSize, Now+1000), ReportString = format_report(Report2, WeaveSize), log_report(ReportString), { TotalHash, Partition1Hash, Partition2Hash, TotalOptimal, Partition1Optimal, Partition2Optimal } = case PoA1Multiplier of 1 -> {9.0, 5.0, 4.0, 763.1992309570705, 445.19924812320824, 317.9999828338623}; 2 -> {5.5, 3.5, 2.0, 403.19957427982445, 235.19959144596214, 167.9999828338623} end, ?assertEqual(#report{ now = Now+1000, vdf_speed = 1.0 / 3.0, h1_solution = 1, h2_solution = 2, confirmed_block = 1, total_data_size = floor(0.1 * ar_block:partition_size()) + floor(0.2 * ar_block:partition_size()) + floor(0.05 * ar_block:partition_size()) + floor(0.25 * ar_block:partition_size()), optimal_overall_read_mibps = 0.9539990386963382 * 2 * RecallRangeSize, optimal_overall_hash_hps = TotalOptimal, average_read_mibps = 1.25, current_read_mibps = 1.25, average_hash_hps = TotalHash / DifficultyDivisor, current_hash_hps = TotalHash / DifficultyDivisor, average_h1_to_peer_hps = 50.0, current_h1_to_peer_hps = 50.0, average_h1_from_peer_hps = 50.0, current_h1_from_peer_hps = 50.0, total_h2_to_peer = 5, total_h2_from_peer = 5, partitions = [ #partition_report{ partition_number = 3, data_size = 0, optimal_read_mibps = 0.0, average_read_mibps = 0.0, current_read_mibps = 0.0, optimal_hash_hps = 0.0, average_hash_hps = 0.0, current_hash_hps = 0.0 }, #partition_report{ partition_number = 2, data_size = floor(0.25 * ar_block:partition_size()), optimal_read_mibps = 0.3974999785423279 * 2 * RecallRangeSize, average_read_mibps = 0.5, current_read_mibps = 0.5, optimal_hash_hps = Partition2Optimal, average_hash_hps = Partition2Hash / DifficultyDivisor, current_hash_hps = Partition2Hash / DifficultyDivisor }, #partition_report{ partition_number = 1, data_size = 734002, optimal_read_mibps = 0.5564990601540103 * 2 * RecallRangeSize, average_read_mibps = 0.75, current_read_mibps = 0.75, optimal_hash_hps = Partition1Optimal, average_hash_hps = Partition1Hash / DifficultyDivisor, current_hash_hps = Partition1Hash / DifficultyDivisor } ], peers = [ #peer_report{ peer = Peer3, average_h1_to_peer_hps = 0.0, current_h1_to_peer_hps = 0.0, average_h1_from_peer_hps = 0.0, current_h1_from_peer_hps = 0.0, total_h2_to_peer = 0, total_h2_from_peer = 0 }, #peer_report{ peer = Peer2, average_h1_to_peer_hps = 20.0, current_h1_to_peer_hps = 20.0, average_h1_from_peer_hps = 30.0, current_h1_from_peer_hps = 30.0, total_h2_to_peer = 2, total_h2_from_peer = 3 }, #peer_report{ peer = Peer1, average_h1_to_peer_hps = 30.0, current_h1_to_peer_hps = 30.0, average_h1_from_peer_hps = 20.0, current_h1_from_peer_hps = 20.0, total_h2_to_peer = 3, total_h2_from_peer = 2 } ] }, Report2) after arweave_config:set_env(Config) end. ================================================ FILE: apps/arweave/src/ar_mining_sup.erl ================================================ -module(ar_mining_sup). -behaviour(supervisor). -export([start_link/0]). -export([init/1]). -include_lib("arweave/include/ar_sup.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). %%%=================================================================== %%% Public interface. %%%=================================================================== start_link() -> supervisor:start_link({local, ?MODULE}, ?MODULE, []). %% =================================================================== %% Supervisor callbacks. %% =================================================================== init([]) -> %% We'll create workers for all configured parititions - even those partitions that %% currently exceed the weave size. Those workers will just lie dormant until the %% weave size grows to meet them. MiningWorkers = lists:map( fun({Partition, _MiningAddr, PackingDifficulty}) -> ?CHILD_WITH_ARGS( ar_mining_worker, worker, ar_mining_worker:name(Partition, PackingDifficulty), [Partition, PackingDifficulty]) end, ar_mining_io:get_partitions(infinity) ), Children = [ ?CHILD(ar_mining_stats, worker), ?CHILD(ar_mining_hash, worker), ?CHILD(ar_mining_io, worker) ] ++ MiningWorkers ++ [?CHILD(ar_mining_server, worker)], {ok, {{one_for_one, 5, 10}, Children}}. ================================================ FILE: apps/arweave/src/ar_mining_worker.erl ================================================ -module(ar_mining_worker). -behaviour(gen_server). -export([start_link/2, name/2, reset_mining_session/2, set_sessions/2, chunks_read/5, computed_hash/5, set_difficulty/2, set_cache_limits/3, add_task/3, garbage_collect/1]). -export([init/1, handle_cast/2, handle_call/3, handle_info/2, terminate/2]). -include("ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include("ar_mining.hrl"). -include("ar_mining_cache.hrl"). -include_lib("eunit/include/eunit.hrl"). -record(state, { name = not_set, partition_number = not_set, diff_pair = not_set, packing_difficulty = 0, task_queue = gb_sets:new(), chunk_cache = undefined, vdf_queue_limit = 0, latest_vdf_step_number = 0, is_pool_client = false, h1_hashes = #{}, h2_hashes = #{} }). -define(TASK_CHECK_INTERVAL_MS, 200). -define(STATUS_CHECK_INTERVAL_MS, 5000). -define(REPORT_CHUNK_CACHE_METRICS_INTERVAL_MS, 30000). -define(CACHE_KEY(CacheRef, Nonce), {CacheRef, Nonce}). %%%=================================================================== %%% Messages %%%=================================================================== -define(MSG_RESET_MINING_SESSION(DiffPair), {reset_mining_session, DiffPair}). -define(MSG_SET_SESSIONS(ActiveSessions), {set_sessions, ActiveSessions}). -define(MSG_ADD_TASK(Task), {add_task, Task}). -define(MSG_SET_DIFFICULTY(DiffPair), {set_difficulty, DiffPair}). -define(MSG_SET_CACHE_LIMITS(CacheLimitBytes, VDFQueueLimit), {set_cache_limits, CacheLimitBytes, VDFQueueLimit}). -define(MSG_CHECK_WORKER_STATUS, {check_worker_status}). -define(MSG_HANDLE_TASK, {handle_task}). -define(MSG_GARBAGE_COLLECT(StartTime, GCResult), {garbage_collect, StartTime, GCResult}). -define(MSG_GARBAGE_COLLECT, {garbage_collect}). -define(MSG_FETCHED_LAST_MOMENT_PROOF(Any), {fetched_last_moment_proof, Any}). -define(MSG_REPORT_CHUNK_CACHE_METRICS, {report_chunk_cache_metrics}). %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Start the gen_server. start_link(Partition, PackingDifficulty) -> Name = name(Partition, PackingDifficulty), gen_server:start_link({local, Name}, ?MODULE, {Partition, PackingDifficulty}, []). -spec name(Partition :: non_neg_integer(), PackingDifficulty :: non_neg_integer()) -> atom(). name(Partition, PackingDifficulty) -> list_to_atom(lists:flatten(["ar_mining_worker_", integer_to_list(Partition), "_", integer_to_list(PackingDifficulty)])). -spec reset_mining_session(Worker :: pid(), DiffPair :: {non_neg_integer(), non_neg_integer()}) -> ok. reset_mining_session(Worker, DiffPair) -> gen_server:cast(Worker, ?MSG_RESET_MINING_SESSION(DiffPair)). -spec set_sessions(Worker :: pid(), ActiveSessions :: [ar_nonce_limiter:session_key()]) -> ok. set_sessions(Worker, ActiveSessions) -> gen_server:cast(Worker, ?MSG_SET_SESSIONS(ActiveSessions)). -spec add_task(Worker :: pid(), TaskType :: atom(), Candidate :: #mining_candidate{}) -> ok. add_task(Worker, TaskType, Candidate) -> add_task(Worker, TaskType, Candidate, []). -spec add_task(Worker :: pid(), TaskType :: atom(), Candidate :: #mining_candidate{}, ExtraArgs :: [term()]) -> ok. add_task(Worker, TaskType, Candidate, ExtraArgs) -> gen_server:cast(Worker, ?MSG_ADD_TASK({TaskType, Candidate, ExtraArgs})). -spec add_delayed_task(Worker :: pid(), TaskType :: atom(), Candidate :: #mining_candidate{}) -> ok. add_delayed_task(Worker, TaskType, Candidate) -> %% Delay task by random amount between ?TASK_CHECK_INTERVAL_MS and 2*?TASK_CHECK_INTERVAL_MS %% The reason for the randomization to avoid a glut tasks to all get added at the same time - %% in particular when the chunk cache fills up it's possible for all queued compute_h0 tasks %% to be delayed at about the same time. Delay = rand:uniform(?TASK_CHECK_INTERVAL_MS) + ?TASK_CHECK_INTERVAL_MS, ar_util:cast_after(Delay, Worker, ?MSG_ADD_TASK({TaskType, Candidate, []})). -spec chunks_read( Worker :: pid(), WhichChunk :: atom(), Candidate :: #mining_candidate{}, RangeStart :: non_neg_integer(), ChunkOffsets :: [non_neg_integer()] ) -> ok. chunks_read(Worker, WhichChunk, Candidate, RangeStart, ChunkOffsets) -> add_task(Worker, WhichChunk, Candidate, [RangeStart, ChunkOffsets]). %% @doc Callback from the hashing threads when a hash is computed -spec computed_hash( Worker :: pid(), TaskType :: atom(), Hash :: binary(), Preimage :: binary(), Candidate :: #mining_candidate{} ) -> ok. computed_hash(Worker, computed_h0, H0, undefined, Candidate) -> add_task(Worker, computed_h0, Candidate#mining_candidate{ h0 = H0 }); computed_hash(Worker, computed_h1, H1, Preimage, Candidate) -> add_task(Worker, computed_h1, Candidate#mining_candidate{ h1 = H1, preimage = Preimage }); computed_hash(Worker, computed_h2, H2, Preimage, Candidate) -> add_task(Worker, computed_h2, Candidate#mining_candidate{ h2 = H2, preimage = Preimage }). %% @doc Set the new mining difficulty. We do not recalculate it inside the mining %% server or worker because we want to completely detach the mining server from the block %% ordering. The previous block is chosen only after the mining solution is found (if %% we choose it in advance we may miss a better option arriving in the process). %% Also, a mining session may (in practice, almost always will) span several blocks. -spec set_difficulty(Worker :: pid(), DiffPair :: {non_neg_integer(), non_neg_integer()}) -> ok. set_difficulty(Worker, DiffPair) -> gen_server:cast(Worker, ?MSG_SET_DIFFICULTY(DiffPair)). -spec set_cache_limits(Worker :: pid(), CacheLimitBytes :: non_neg_integer(), VDFQueueLimit :: non_neg_integer()) -> ok. set_cache_limits(Worker, CacheLimitBytes, VDFQueueLimit) -> gen_server:cast(Worker, ?MSG_SET_CACHE_LIMITS(CacheLimitBytes, VDFQueueLimit)). -spec garbage_collect(Worker :: pid()) -> ok. garbage_collect(Worker) -> gen_server:cast(Worker, ?MSG_GARBAGE_COLLECT). %%%=================================================================== %%% Generic server callbacks. %%%=================================================================== init({Partition, PackingDifficulty}) -> Name = name(Partition, PackingDifficulty), ?LOG_INFO([{event, mining_worker_started}, {worker, Name}, {pid, self()}, {partition, Partition}]), ChunkCache = ar_mining_cache:new(Name), State0 = #state{ name = Name, chunk_cache = ChunkCache, partition_number = Partition, is_pool_client = ar_pool:is_client(), packing_difficulty = PackingDifficulty }, gen_server:cast(self(), ?MSG_HANDLE_TASK), gen_server:cast(self(), ?MSG_CHECK_WORKER_STATUS), gen_server:cast(self(), ?MSG_REPORT_CHUNK_CACHE_METRICS), {ok, report_chunk_cache_metrics(State0)}. handle_call(Request, _From, State) -> ?LOG_WARNING([{event, unhandled_call}, {module, ?MODULE}, {request, Request}]), {reply, ok, State}. handle_cast(?MSG_SET_CACHE_LIMITS(CacheLimitBytes, VDFQueueLimit), State) -> State1 = State#state{ chunk_cache = ar_mining_cache:set_limit(CacheLimitBytes, State#state.chunk_cache), vdf_queue_limit = VDFQueueLimit }, {noreply, State1}; handle_cast(?MSG_SET_DIFFICULTY(DiffPair), State) -> State1 = State#state{ diff_pair = DiffPair }, {noreply, State1}; handle_cast(?MSG_RESET_MINING_SESSION(DiffPair), State) -> State1 = update_sessions([], State), State2 = State1#state{ diff_pair = DiffPair }, {noreply, State2}; handle_cast(?MSG_SET_SESSIONS(ActiveSessions), State) -> State1 = update_sessions(ActiveSessions, State), {noreply, State1}; handle_cast(?MSG_ADD_TASK({TaskType, Candidate, _ExtraArgs} = Task), State) -> case is_session_valid(State, Candidate) of true -> State1 = add_task(Task, State), {noreply, State1}; false -> log_debug(mining_debug_add_stale_task, Candidate, State, [ {task, TaskType}, {active_sessions, ar_mining_server:encode_sessions(ar_mining_cache:get_sessions(State#state.chunk_cache))}]), {noreply, State} end; handle_cast(?MSG_HANDLE_TASK, #state{ task_queue = Q } = State) -> case gb_sets:is_empty(Q) of true -> ar_util:cast_after(?TASK_CHECK_INTERVAL_MS, self(), ?MSG_HANDLE_TASK), {noreply, State}; _ -> gen_server:cast(self(), ?MSG_HANDLE_TASK), {{_Priority, _ID, {TaskType, Candidate, _ExtraArgs} = Task}, Q2} = gb_sets:take_smallest(Q), prometheus_gauge:dec(mining_server_task_queue_len, [TaskType]), case is_session_valid(State, Candidate) of true -> State1 = handle_task(Task, State#state{ task_queue = Q2 }), {noreply, State1}; false -> log_debug(mining_debug_handle_stale_task, Candidate, State, [ {task, TaskType}, {active_sessions, ar_mining_server:encode_sessions(ar_mining_cache:get_sessions(State#state.chunk_cache))}]), {noreply, State} end end; handle_cast(?MSG_CHECK_WORKER_STATUS, State) -> maybe_warn_about_lag(State#state.task_queue, State#state.name), ar_util:cast_after(?STATUS_CHECK_INTERVAL_MS, self(), ?MSG_CHECK_WORKER_STATUS), {noreply, State}; handle_cast(?MSG_GARBAGE_COLLECT, State) -> erlang:garbage_collect(self(), [{async, erlang:monotonic_time()}]), {noreply, State}; handle_cast(?MSG_REPORT_CHUNK_CACHE_METRICS, State) -> ar_util:cast_after(?REPORT_CHUNK_CACHE_METRICS_INTERVAL_MS, self(), ?MSG_REPORT_CHUNK_CACHE_METRICS), {noreply, report_chunk_cache_metrics(State)}; handle_cast(Cast, State) -> ?LOG_WARNING([{event, unhandled_cast}, {module, ?MODULE}, {cast, Cast}]), {noreply, State}. handle_info(?MSG_GARBAGE_COLLECT(StartTime, GCResult), State) -> EndTime = erlang:monotonic_time(), ElapsedTime = erlang:convert_time_unit(EndTime-StartTime, native, millisecond), case GCResult == false orelse ElapsedTime > ?GC_LOG_THRESHOLD of true -> log_debug(mining_debug_garbage_collect, State, [{pid, self()}, {gc_time, ElapsedTime}, {gc_result, GCResult}]); false -> ok end, {noreply, State}; handle_info(?MSG_FETCHED_LAST_MOMENT_PROOF(_), State) -> %% This is a no-op to handle "slow" response from peers that were queried by `fetch_poa_from_peers` %% Only the first peer to respond with a PoA will be handled, all other responses will fall through to here %% an be ignored. {noreply, State}; handle_info(Message, State) -> ?LOG_WARNING([{event, unhandled_info}, {module, ?MODULE}, {message, Message}]), {noreply, State}. terminate(Reason, _State) -> ?LOG_INFO([{module, ?MODULE},{pid, self()},{callback, terminate},{reason, Reason}]), ok. %%%=================================================================== %%% Mining tasks. %%%=================================================================== add_task({TaskType, Candidate, _ExtraArgs} = Task, State) -> #state{ task_queue = Q } = State, StepNumber = Candidate#mining_candidate.step_number, Q2 = gb_sets:insert({priority(TaskType, StepNumber), make_ref(), Task}, Q), prometheus_gauge:inc(mining_server_task_queue_len, [TaskType]), prometheus_gauge:inc(mining_server_tasks, [TaskType]), State#state{ task_queue = Q2 }. -spec handle_task( Task :: { EventType :: compute_h0 | computed_h0 | chunk1 | chunk2 | computed_h1 | computed_h2 | compute_h2_for_peer, Candidate :: #mining_candidate{}, ExtraArgs :: term() }, State :: #state{} ) -> State :: #state{}. %% @doc Handle the `compute_h0` task. %% Indicates that the VDF step has been computed. handle_task({compute_h0, Candidate, _ExtraArgs}, State) -> #state{ latest_vdf_step_number = LatestVDFStepNumber, vdf_queue_limit = VDFQueueLimit } = State, #mining_candidate{ step_number = StepNumber } = Candidate, State1 = report_and_reset_hashes(State), % Only mine recent VDF Steps case StepNumber >= LatestVDFStepNumber - VDFQueueLimit of true -> %% Try to reserve the cache space for both partitions, as we do not know if we have both, one, or none. case try_to_reserve_cache_range_space(2, Candidate#mining_candidate.session_key, State1) of {true, State2} -> %% Cache space reserved, compute h0. ar_mining_hash:compute_h0(self(), Candidate), State2#state{ latest_vdf_step_number = max(StepNumber, LatestVDFStepNumber) }; false -> %% We don't have enough cache space to read the recall ranges, so we'll try again later. add_delayed_task(self(), compute_h0, Candidate), State1#state{ latest_vdf_step_number = max(StepNumber, LatestVDFStepNumber) } end; false -> State1 end; %% @doc Handle the `computed_h0` task. %% Indicates that the hash for the VDF step has been computed. handle_task({computed_h0, Candidate, _ExtraArgs}, State) -> #mining_candidate{ h0 = H0, partition_number = Partition1, partition_upper_bound = PartitionUpperBound } = Candidate, {RecallRange1Start, RecallRange2Start} = ar_mining_server:get_recall_range(H0, Partition1, PartitionUpperBound), Partition2 = ar_node:get_partition_number(RecallRange2Start), Candidate1 = generate_cache_ref(Candidate#mining_candidate{ partition_number2 = Partition2 }), %% Check if the recall ranges are readable to avoid reserving cache space for non-existent data. Range1Exists = ar_mining_io:is_recall_range_readable(Candidate1, RecallRange1Start), Range2Exists = ar_mining_io:is_recall_range_readable(Candidate1, RecallRange2Start), %% We already reserved the cache space for both partitions, so we need to release the reserved space %% if we're missing one or both of the recall ranges. case {Range1Exists, Range2Exists} of {true, true} -> %% Both recall ranges are readable, no release needed. %% Read the recall ranges; the result of the read will be reported by the `chunk1` and `chunk2` tasks. ar_mining_io:read_recall_range(chunk1, self(), Candidate1, RecallRange1Start), ar_mining_io:read_recall_range(chunk2, self(), Candidate1, RecallRange2Start), State; {true, false} -> %% Only the first recall range is readable, so we need to release the reserved space for the second %% recall range. State1 = release_cache_range_space(1, Candidate1#mining_candidate.session_key, State), %% Mark second recall range as failed, not to wait for it to arrive. State2 = mark_recall_range_failed(chunk2, Candidate1, State1), %% Read the recall range; the result of the read will be reported by the `chunk1` task. ar_mining_io:read_recall_range(chunk1, self(), Candidate1, RecallRange1Start), State2; {false, _} -> %% We don't have the recall ranges, so we need to release the reserved space for both partitions. State1 = release_cache_range_space(2, Candidate1#mining_candidate.session_key, State), State1 end; %% @doc Handle the `chunk1` task. %% Indicates that the first recall range has been read. handle_task({chunk1, Candidate, [RangeStart, ChunkOffsets]}, State) -> State1 = process_chunks(chunk1, Candidate, RangeStart, ChunkOffsets, State), State1; %% @doc Handle the `chunk2` task. %% Indicates that the second recall range has been read. handle_task({chunk2, Candidate, [RangeStart, ChunkOffsets]}, State) -> State1 = process_chunks(chunk2, Candidate, RangeStart, ChunkOffsets, State), State1; %% @doc Handle the `computed_h1` task. %% Indicates that the single hash for the first recall range has been computed. handle_task({computed_h1, Candidate, _ExtraArgs}, State) -> #mining_candidate{ h1 = H1 } = Candidate, State1 = hash_computed(h1, Candidate, State), H1PassesDiffChecks = h1_passes_diff_checks(H1, Candidate, State1), case H1PassesDiffChecks of false -> ok; partial -> ar_mining_server:prepare_and_post_solution(Candidate); true -> log_info(found_h1_solution, Candidate, State1, [ {h1, ar_util:encode(H1)}, {difficulty, get_difficulty(State1, Candidate)}]), ar_mining_server:prepare_and_post_solution(Candidate), ar_mining_stats:h1_solution() end, %% Check if we need to compute H2. %% Also store H1 in the cache if needed. case ar_mining_cache:with_cached_value( ?CACHE_KEY(Candidate#mining_candidate.cache_ref, Candidate#mining_candidate.nonce), Candidate#mining_candidate.session_key, State1#state.chunk_cache, fun (#ar_mining_cache_value{ chunk2_failed = true }) -> %% This node does not store chunk2. If we're part of a coordinated %% mining set, we can try one of our peers, but this node is done with %% this VDF step. {ok, Config} = arweave_config:get_env(), case Config#config.coordinated_mining of false -> ok; true -> DiffPair = case get_partial_difficulty(State1, Candidate) of not_set -> get_difficulty(State1, Candidate); PartialDiffPair -> PartialDiffPair end, ar_coordination:computed_h1(Candidate, DiffPair) end, %% Remove the cached value from the cache. {ok, drop}; (#ar_mining_cache_value{ chunk2 = undefined } = CachedValue) -> %% chunk2 hasn't been read yet, so we cache H1 and wait for it. %% If H1 passes diff checks, we will skip H2 for this nonce. {ok, CachedValue#ar_mining_cache_value{ h1 = H1, h1_passes_diff_checks = H1PassesDiffChecks }}; (#ar_mining_cache_value{ chunk2 = Chunk2 } = CachedValue) when not H1PassesDiffChecks -> %% chunk2 has already been read, so we can compute H2 now. ar_mining_hash:compute_h2(self(), Candidate#mining_candidate{ chunk2 = Chunk2 }), {ok, CachedValue#ar_mining_cache_value{ h1 = H1 }}; (#ar_mining_cache_value{chunk2 = _Chunk2} = _CachedValue) when H1PassesDiffChecks -> %% H1 passes diff checks, so we skip H2 for this nonce. %% Might as well drop the cached data, we don't need it anymore. {ok, drop} end ) of {ok, ChunkCache2} -> State1#state{ chunk_cache = ChunkCache2 }; {error, Reason} -> log_error(mining_worker_failed_to_process_h1, Candidate, State1, [{reason, Reason}]), State1 end; %% @doc Handle the `computed_h2` task. %% Indicates that the single hash for the second recall range has been computed. handle_task({computed_h2, Candidate, _ExtraArgs}, State) -> #mining_candidate{ h2 = H2, cm_lead_peer = Peer } = Candidate, State1 = hash_computed(h2, Candidate, State), PassesDiffChecks = h2_passes_diff_checks(H2, Candidate, State1), case PassesDiffChecks of false -> ok; partial -> log_info(found_h2_partial_solution, Candidate, State1, [ {h0, ar_util:safe_encode(Candidate#mining_candidate.h0)}, {h2, ar_util:safe_encode(H2)}, {partial_difficulty, get_partial_difficulty(State1, Candidate)}]); true -> log_info(found_h2_solution, Candidate, State1, [ {h0, ar_util:safe_encode(Candidate#mining_candidate.h0)}, {h2, ar_util:safe_encode(H2)}, {difficulty, get_difficulty(State1, Candidate)}, {partial_difficulty, get_partial_difficulty(State1, Candidate)}]), ar_mining_stats:h2_solution() end, case {PassesDiffChecks, Peer} of {false, _} -> %% H2 does not pass diff checks, do nothing. ok; {Check, not_set} when partial == Check orelse true == Check -> %% This branch only handles the case where we're not part of a coordinated mining set. %% This includes the solo mining setup, and pool mining setup. %% In case of solo mining, the `Check` will always be `true`. %% In case of pool mining, the `Check` will be `partial` or `true`. %% In either case, we prepare and post the solution. ar_mining_server:prepare_and_post_solution(Candidate); {Check, _} when partial == Check orelse true == Check -> %% This branch only handles the case where we're part of a coordinated mining set. %% In this case, we prepare the PoA2 and send it to the lead peer. ar_coordination:computed_h2_for_peer(Candidate) end, %% Remove the cached value from the cache. case ar_mining_cache:with_cached_value( ?CACHE_KEY(Candidate#mining_candidate.cache_ref, Candidate#mining_candidate.nonce), Candidate#mining_candidate.session_key, State1#state.chunk_cache, fun(_) -> {ok, drop} end ) of {ok, ChunkCache2} -> State1#state{ chunk_cache = ChunkCache2 }; {error, Reason} -> log_error(mining_worker_failed_to_process_computed_h2, Candidate, State1, [{reason, Reason}]), State1 end; %% @doc Handle the `compute_h2_for_peer` task. %% Indicates that we got a request to compute H2 for a peer. handle_task({compute_h2_for_peer, Candidate, _ExtraArgs}, State) -> #mining_candidate{ h0 = H0, partition_number = Partition1, partition_upper_bound = PartitionUpperBound, cm_h1_list = H1List, cm_lead_peer = Peer } = Candidate, {_, RecallRange2Start} = ar_mining_server:get_recall_range(H0, Partition1, PartitionUpperBound), Candidate1 = generate_cache_ref(Candidate), %% Clear the list so we aren't copying it around all over the place. Candidate3 = Candidate1#mining_candidate{ cm_h1_list = [] }, Range2Exists = ar_mining_io:read_recall_range(chunk2, self(), Candidate3, RecallRange2Start), case Range2Exists of true -> ar_mining_stats:h1_received_from_peer(Peer, length(H1List)), %% Add the candidate session to the cache. This is only needed during rare occasions %% where a CM peer has added a new session a few seconds before this node does. %% Typically all CM peers will be on the same VDF sessions within a few seconds, but %% this step prevents the H1s shared during those few seconds from being rejected. ChunkCache = ar_mining_cache:add_session( Candidate3#mining_candidate.session_key, State#state.chunk_cache), State1 = State#state{ chunk_cache = ChunkCache }, %% First we mark the whole first recall range as failed %% Then we can cache the H1 list. During this process, we also reset the chunk1_failed %% flag to false for the entries we have H1 for. %% After these manipulations we will only handle the second recall range nonces that %% have corresponding H1s. State2 = mark_recall_range_failed(chunk1, Candidate3, State1), cache_h1_list(Candidate3, H1List, State2); false -> %% This can happen for two reasons: %% 1. (most common) Remote peer has requested a range we don't have from a %% partition that we do have. %% 2. (rare, but possible) Remote peer has an outdated partition table and we %% don't even have the requested partition. %% In both cases, we don't even need to cache the H1 list as we cannot %% find a valid H2. State end. %%%=================================================================== %%% Private functions. %%%=================================================================== process_chunks(WhichChunk, Candidate, RangeStart, ChunkOffsets, State) -> PackingDifficulty = Candidate#mining_candidate.packing_difficulty, NoncesPerRecallRange = ar_block:get_max_nonce(PackingDifficulty), NoncesPerChunk = ar_block:get_nonces_per_chunk(PackingDifficulty), SubChunkSize = ar_block:get_sub_chunk_size(PackingDifficulty), process_chunks( WhichChunk, Candidate, RangeStart, 0, NoncesPerChunk, NoncesPerRecallRange, ChunkOffsets, SubChunkSize, 0, State ). %% Processing chunks for a recall range. %% %% Recall range offset is not aligned to chunk size. %% When reading data from disk, we always read the entire chunk. %% This means that the amount of data read from disk is always bigger than the %% recall range size: %% %% |<- recall range ->| %% [ ][ 1 ][ 2 ] .... [n-2 ][n-1 ][ n ] %% ^ %% recall range start offset %% falls into chunk 1 %% %% When determining which chunks to process, we find the first chunk that %% contains the first nonce of the recall range, and start processing from this %% chunk. This effectively shifts the recall range to the left: %% %% |<- recall range ->| %% [ ][ 1 ][ 2 ] .... [n-2 ][n-1 ][ n ] %% |<- effective recall range ->| %% %% If the recall range start offset aligns with the chunk size accidentally, %% current implementation skips the first chunk completely. Fixing this %% inconsistency will require a hard fork: %% %% |<- recall range ->| %% [ ][ 1 ][ 2 ] .... [n-2 ][n-1 ][ n ] %% |<- effective recall range ->| %% %% The ultimate goal is to process all the sub-chunks in the recall range. %% The count of subchunks in the recall range is `NoncesPerRecallRange`. %% replica packing: 10 chunks, 32 nonces per chunk, 320 nonces per recall range. %% spora 2.6: 200 chunks, 1 nonce per chunk, 200 nonces per recall range. %% %% Some of the chunks inside (including first and last) might be missing. %% This cases must be handled correctly to avoid keeping not needed chunks in %% the cache. process_chunks( WhichChunk, Candidate, _RangeStart, Nonce, _NoncesPerChunk, NoncesPerRecallRange, _ChunkOffsets, _SubChunkSize, Count, State ) when Nonce > NoncesPerRecallRange -> %% We've processed all the sub_chunks in the recall range. ar_mining_stats:chunks_read(case WhichChunk of chunk1 -> Candidate#mining_candidate.partition_number; chunk2 -> Candidate#mining_candidate.partition_number2 end, Count), State; process_chunks( WhichChunk, Candidate, RangeStart, Nonce, NoncesPerChunk, NoncesPerRecallRange, [], SubChunkSize, Count, State ) -> %% No more ChunkOffsets means no more chunks have been read. Iterate through all the %% remaining nonces and remove the full chunks from the cache. %% mark_single_chunk*_failed_or_drop releases SubChunkSize per nonce via %% ReservationSizeAdjustment, which sums to NoncesPerChunk * SubChunkSize = %% DATA_CHUNK_SIZE per chunk — matching the reservation made in compute_h0. State1 = case WhichChunk of chunk1 -> mark_single_chunk1_failed_or_drop(Nonce, Candidate, State); chunk2 -> mark_single_chunk2_failed_or_drop(Nonce, Candidate, State) end, %% Process the next chunk. process_chunks( WhichChunk, Candidate, RangeStart, Nonce + NoncesPerChunk, NoncesPerChunk, NoncesPerRecallRange, [], SubChunkSize, Count, State1 ); process_chunks( WhichChunk, Candidate, RangeStart, Nonce, NoncesPerChunk, NoncesPerRecallRange, [{ChunkEndOffset, Chunk} | ChunkOffsets], SubChunkSize, Count, State ) -> NonceOffset = RangeStart + Nonce * SubChunkSize, ChunkStartOffset = ChunkEndOffset - ?DATA_CHUNK_SIZE, case {NonceOffset < ChunkStartOffset, NonceOffset >= ChunkEndOffset, WhichChunk} of {true, _, chunk1} -> %% Skip these nonces (starting from Nonce to Nonce + NoncesPerChunk - 1). %% Nonce falls in a chunk which wasn't read from disk (for example, because there are holes %% in the recall range), e.g. the nonce is in the middle of a non-existent chunk. %% Mark single chunk1 as failed or remove it if the corresponding chunk is already read or marked as failed. State1 = mark_single_chunk1_failed_or_drop(Nonce, Candidate, State), process_chunks( WhichChunk, Candidate, RangeStart, Nonce + NoncesPerChunk, NoncesPerChunk, NoncesPerRecallRange, [{ChunkEndOffset, Chunk} | ChunkOffsets], SubChunkSize, Count, State1 ); {true, _, chunk2} -> %% Skip these nonces (starting from Nonce to Nonce + NoncesPerChunk - 1). %% Nonce falls in a chunk which wasn't read from disk (for example, because there are holes %% in the recall range), e.g. the nonce is in the middle of a non-existent chunk. %% Mark single chunk2 as failed or remove it if the corresponding chunk is already read and H1 is calculated. State1 = mark_single_chunk2_failed_or_drop(Nonce, Candidate, State), process_chunks( WhichChunk, Candidate, RangeStart, Nonce + NoncesPerChunk, NoncesPerChunk, NoncesPerRecallRange, [{ChunkEndOffset, Chunk} | ChunkOffsets], SubChunkSize, Count, State1 ); {_, true, _} -> %% Skip this chunk. %% Nonce falls in a chunk beyond the current chunk offset, (for example, because we %% read extra chunk in the beginning of recall range). Move ahead to the next %% chunk offset. process_chunks( WhichChunk, Candidate, RangeStart, Nonce, NoncesPerChunk, NoncesPerRecallRange, ChunkOffsets, SubChunkSize, Count, State ); {false, false, _} -> %% Process all sub-chunks in Chunk, and then advance to the next chunk. State1 = process_all_sub_chunks(WhichChunk, Chunk, Candidate, Nonce, State), process_chunks( WhichChunk, Candidate, RangeStart, Nonce + NoncesPerChunk, NoncesPerChunk, NoncesPerRecallRange, ChunkOffsets, SubChunkSize, Count + 1, State1 ) end. process_all_sub_chunks(_WhichChunk, <<>>, _Candidate, _Nonce, State) -> State; process_all_sub_chunks(WhichChunk, Chunk, Candidate, Nonce, State) when Candidate#mining_candidate.packing_difficulty == 0 -> ?LOG_ERROR([{event, process_all_sub_chunks}, {packing_difficulty, 0}]), %% Spora 2.6 packing (aka difficulty 0). Candidate1 = Candidate#mining_candidate{ nonce = Nonce }, process_sub_chunk(WhichChunk, Candidate1, Chunk, State); process_all_sub_chunks( WhichChunk, << SubChunk:?COMPOSITE_PACKING_SUB_CHUNK_SIZE/binary, Rest/binary >>, Candidate, Nonce, State ) -> %% Composite packing / replica packing (aka difficulty 1+). Candidate1 = Candidate#mining_candidate{ nonce = Nonce }, State1 = process_sub_chunk(WhichChunk, Candidate1, SubChunk, State), process_all_sub_chunks(WhichChunk, Rest, Candidate1, Nonce + 1, State1); process_all_sub_chunks(WhichChunk, Rest, Candidate, Nonce, State) -> %% The chunk is not a multiple of the subchunk size. log_error(failed_to_split_chunk_into_sub_chunks, Candidate, State, [ {remaining_size, byte_size(Rest)}, {nonce, Nonce}, {chunk, WhichChunk}]), State. process_sub_chunk(chunk1, Candidate, SubChunk, State) -> %% Store the chunk1 in the cache first; only compute h1 if the store succeeds. case ar_mining_cache:with_cached_value( ?CACHE_KEY(Candidate#mining_candidate.cache_ref, Candidate#mining_candidate.nonce), Candidate#mining_candidate.session_key, State#state.chunk_cache, fun(CachedValue) -> {ok, CachedValue#ar_mining_cache_value{ chunk1 = SubChunk }} end ) of {ok, ChunkCache2} -> ar_mining_hash:compute_h1(self(), Candidate#mining_candidate{ chunk1 = SubChunk }), State#state{ chunk_cache = ChunkCache2 }; {error, Reason} -> log_error(mining_worker_failed_to_process_chunk1, Candidate, State, [{reason, Reason}]), %% Mark chunk1 as failed for this nonce. This releases the sub-chunk reservation %% (pre-allocated in compute_h0) and ensures that when chunk2 arrives, the entry %% will be dropped immediately — preventing orphaned entries from filling the cache. %% Marking as failed adds no binary data (size difference = 0), so it succeeds even %% when the cache is full. SubChunkSize = ar_block:get_sub_chunk_size(Candidate#mining_candidate.packing_difficulty), case ar_mining_cache:with_cached_value( ?CACHE_KEY(Candidate#mining_candidate.cache_ref, Candidate#mining_candidate.nonce), Candidate#mining_candidate.session_key, State#state.chunk_cache, fun (#ar_mining_cache_value{ chunk2_failed = true }) -> {ok, drop, -SubChunkSize}; (#ar_mining_cache_value{ chunk2 = Chunk2 }) when is_binary(Chunk2) -> {ok, drop, -SubChunkSize}; (#ar_mining_cache_value{ chunk2 = undefined } = CachedValue) -> {ok, CachedValue#ar_mining_cache_value{ chunk1_failed = true }, -SubChunkSize} end ) of {ok, ChunkCache3} -> State#state{ chunk_cache = ChunkCache3 }; {error, Reason2} -> log_error(mining_worker_failed_to_release_reservation_for_session, Candidate, State, [{reason, Reason2}]), State end end; process_sub_chunk(chunk2, Candidate, SubChunk, State) -> Candidate1 = Candidate#mining_candidate{ chunk2 = SubChunk }, case ar_mining_cache:with_cached_value( ?CACHE_KEY(Candidate1#mining_candidate.cache_ref, Candidate1#mining_candidate.nonce), Candidate#mining_candidate.session_key, State#state.chunk_cache, fun (#ar_mining_cache_value{ chunk1_failed = true }) -> %% chunk1 already failed, so there was no reservation for it. %% Since there is no need to calculate H2, we can just drop the cached value. {ok, drop, -ar_block:get_sub_chunk_size(Candidate#mining_candidate.packing_difficulty)}; (#ar_mining_cache_value{ h1_passes_diff_checks = true } = _CachedValue) -> %% H1 passes diff checks, so we skip H2 for this nonce. %% Drop the cached data, we don't need it anymore. %% Since we already reserved the cache size for chunk2, but we never store it, %% we need to drop the reservation here. {ok, drop, -ar_block:get_sub_chunk_size(Candidate#mining_candidate.packing_difficulty)}; (#ar_mining_cache_value{ h1 = undefined } = CachedValue) -> %% H1 is not yet calculated, cache the chunk2 for this nonce. {ok, CachedValue#ar_mining_cache_value{ chunk2 = SubChunk }}; (#ar_mining_cache_value{ h1 = H1, chunk1 = Chunk1 } = CachedValue) -> %% H1 is already calculated, compute H2 and cache the chunk2 for this nonce. ar_mining_hash:compute_h2(self(), Candidate1#mining_candidate{ h1 = H1, chunk1 = Chunk1 }), {ok, CachedValue#ar_mining_cache_value{ chunk2 = SubChunk }} end ) of {ok, ChunkCache2} -> State#state{ chunk_cache = ChunkCache2 }; {error, Reason} -> log_error(mining_worker_failed_to_process_chunk2, Candidate1, State, [{reason, Reason}]), %% Mark chunk2 as failed for this nonce. This releases the sub-chunk reservation %% and prevents orphaned cache entries from accumulating. SubChunkSize = ar_block:get_sub_chunk_size(Candidate#mining_candidate.packing_difficulty), case ar_mining_cache:with_cached_value( ?CACHE_KEY(Candidate1#mining_candidate.cache_ref, Candidate1#mining_candidate.nonce), Candidate#mining_candidate.session_key, State#state.chunk_cache, fun (#ar_mining_cache_value{ chunk1_failed = true }) -> {ok, drop, -SubChunkSize}; (#ar_mining_cache_value{ chunk1 = Chunk1, h1 = undefined } = CachedValue) when is_binary(Chunk1) -> {ok, CachedValue#ar_mining_cache_value{ chunk2_failed = true }, -SubChunkSize}; (#ar_mining_cache_value{ h1 = H1 }) when is_binary(H1) -> {ok, drop, -SubChunkSize}; (CachedValue) -> {ok, CachedValue#ar_mining_cache_value{ chunk2_failed = true }, -SubChunkSize} end ) of {ok, ChunkCache3} -> State#state{ chunk_cache = ChunkCache3 }; {error, Reason2} -> log_error(mining_worker_failed_to_release_reservation_for_session, Candidate1, State, [{reason, Reason2}]), State end end. priority(computed_h2, StepNumber) -> {1, -StepNumber}; priority(computed_h1, StepNumber) -> {2, -StepNumber}; priority(compute_h2_for_peer, StepNumber) -> {2, -StepNumber}; priority(chunk2, StepNumber) -> {3, -StepNumber}; priority(chunk1, StepNumber) -> {4, -StepNumber}; priority(computed_h0, StepNumber) -> {5, -StepNumber}; priority(compute_h0, StepNumber) -> {6, -StepNumber}. %% @doc Returns true if the mining candidate belongs to a valid mining session. Always assume %% that a coordinated mining candidate is valid (its cm_lead_peer is set). is_session_valid(_State, #mining_candidate{ cm_lead_peer = Peer }) when Peer /= not_set -> true; is_session_valid(State, #mining_candidate{ session_key = SessionKey }) -> ar_mining_cache:session_exists(SessionKey, State#state.chunk_cache). h1_passes_diff_checks(H1, Candidate, State) -> passes_diff_checks(H1, true, Candidate, State). h2_passes_diff_checks(H2, Candidate, State) -> passes_diff_checks(H2, false, Candidate, State). passes_diff_checks(SolutionHash, IsPoA1, Candidate, State) -> DiffPair = get_difficulty(State, Candidate), #mining_candidate{ packing_difficulty = PackingDifficulty } = Candidate, case ar_node_utils:passes_diff_check(SolutionHash, IsPoA1, DiffPair, PackingDifficulty) of true -> true; false -> case get_partial_difficulty(State, Candidate) of not_set -> false; PartialDiffPair -> case ar_node_utils:passes_diff_check( SolutionHash, IsPoA1, PartialDiffPair, PackingDifficulty ) of true -> partial; false -> false end end end. maybe_warn_about_lag(Q, Name) -> case gb_sets:is_empty(Q) of true -> ok; false -> case gb_sets:take_smallest(Q) of {{_Priority, _ID, {compute_h0, _}}, Q3} -> %% Since we sample the queue asynchronously, we expect there to regularly %% be a queue of length 1 (i.e. a task may have just been added to the %% queue when we run this check). %% %% To further reduce log spam, we'll only warn if the queue is greater %% than 2. We really only care if a queue is consistently long or if %% it's getting longer. Temporary blips are fine. We may incrase %% the threshold in the future. N = count_h0_tasks(Q3) + 1, case N > 2 of false -> ok; true -> ?LOG_WARNING([ {event, mining_worker_lags_behind_the_nonce_limiter}, {worker, Name}, {step_count, N}]) end; _ -> ok end end. count_h0_tasks(Q) -> case gb_sets:is_empty(Q) of true -> 0; false -> case gb_sets:take_smallest(Q) of {{_Priority, _ID, {compute_h0, _Args}}, Q2} -> 1 + count_h0_tasks(Q2); _ -> 0 end end. update_sessions(ActiveSessions, State) -> CurrentSessions = ar_mining_cache:get_sessions(State#state.chunk_cache), AddedSessions = lists:subtract(ActiveSessions, CurrentSessions), RemovedSessions = lists:subtract(CurrentSessions, ActiveSessions), add_sessions(AddedSessions, remove_sessions(RemovedSessions, State)). add_sessions([], State) -> State; add_sessions([SessionKey | AddedSessions], State) -> ChunkCache = ar_mining_cache:add_session(SessionKey, State#state.chunk_cache), log_debug(mining_debug_add_session, State, [ {session_key, ar_nonce_limiter:encode_session_key(SessionKey)}]), add_sessions(AddedSessions, State#state{chunk_cache = ChunkCache}). remove_sessions([], State) -> State; remove_sessions([SessionKey | RemovedSessions], State) -> ChunkCache = ar_mining_cache:drop_session(SessionKey, State#state.chunk_cache), TaskQueue = remove_tasks(SessionKey, State#state.task_queue), log_debug(mining_debug_remove_session, State, [ {session_key, ar_nonce_limiter:encode_session_key(SessionKey)}]), remove_sessions(RemovedSessions, State#state{ task_queue = TaskQueue, chunk_cache = ChunkCache }). remove_tasks(SessionKey, TaskQueue) -> gb_sets:filter( fun({_Priority, _ID, {TaskType, Candidate, _ExtraArgs}}) -> case Candidate#mining_candidate.session_key == SessionKey of true -> prometheus_gauge:dec(mining_server_task_queue_len, [TaskType]), false; false -> true end end, TaskQueue ). try_to_reserve_cache_range_space(Multiplier, SessionKey, #state{ packing_difficulty = PackingDifficulty, chunk_cache = ChunkCache0 } = State) -> ReserveSize = Multiplier * ar_block:get_recall_range_size(PackingDifficulty), case ar_mining_cache:reserve_for_session(SessionKey, ReserveSize, ChunkCache0) of {ok, ChunkCache1} -> State1 = State#state{ chunk_cache = ChunkCache1 }, {true, State1}; {error, Reason} -> log_warning(mining_worker_failed_to_reserve_cache_space, State, [ {session_key, ar_nonce_limiter:encode_session_key(SessionKey)}, {cache_size, ar_mining_cache:cache_size(ChunkCache0)}, {cache_limit, ar_mining_cache:get_limit(ChunkCache0)}, {reserved_size, ar_mining_cache:reserved_size(ChunkCache0)}, {reserve_size, ReserveSize}, {reason, Reason}]), false end. release_cache_range_space(Multiplier, SessionKey, #state{ packing_difficulty = PackingDifficulty, chunk_cache = ChunkCache0 } = State) -> ReleaseSize = Multiplier * ar_block:get_recall_range_size(PackingDifficulty), case ar_mining_cache:release_for_session(SessionKey, ReleaseSize, ChunkCache0) of {ok, ChunkCache1} -> State#state{ chunk_cache = ChunkCache1 }; {error, Reason} -> log_error(mining_worker_failed_to_release_cache_space, State, [ {session_key, ar_nonce_limiter:encode_session_key(SessionKey)}, {cache_size, ar_mining_cache:cache_size(ChunkCache0)}, {cache_limit, ar_mining_cache:get_limit(ChunkCache0)}, {reserved_size, ar_mining_cache:reserved_size(ChunkCache0)}, {release_size, ReleaseSize}, {reason, Reason}]), State end. %% @doc Mark the chunk1 as failed or drop the cache and reservation for this chunk. %% This function is called for one chunk1. mark_single_chunk1_failed_or_drop(Nonce, Candidate, State) -> #mining_candidate{ packing_difficulty = PackingDifficulty } = Candidate, SubChunksPerChunk = ar_block:get_nonces_per_chunk(PackingDifficulty), mark_single_chunk1_failed_or_drop(Nonce, SubChunksPerChunk, Candidate, State). mark_single_chunk1_failed_or_drop(_Nonce, 0, _Candidate, State) -> State; mark_single_chunk1_failed_or_drop(Nonce, NoncesLeft, Candidate, State) -> %% Mark the chunk1 as failed. %% The cache reservation for this chunk1 will be dropped in the final (first) clause of the function. case ar_mining_cache:with_cached_value( ?CACHE_KEY(Candidate#mining_candidate.cache_ref, Nonce), Candidate#mining_candidate.session_key, State#state.chunk_cache, fun (#ar_mining_cache_value{ chunk2_failed = true }) -> %% chunk2 already failed, so there was no reservation for it. %% We can just drop the cached value. {ok, drop, -ar_block:get_sub_chunk_size(Candidate#mining_candidate.packing_difficulty)}; (#ar_mining_cache_value{chunk2 = Chunk2}) when is_binary(Chunk2) -> %% We've already read the chunk2 from disk, so we can just drop the cached value. %% The cache reservation for corresponding chunk2 was already consumed. {ok, drop, -ar_block:get_sub_chunk_size(Candidate#mining_candidate.packing_difficulty)}; (#ar_mining_cache_value{chunk2 = undefined} = CachedValue) -> %% Mark the chunk1 as failed. %% When the corresponding chunk2 will be read from disk, it will be dropped immediately. {ok, CachedValue#ar_mining_cache_value{ chunk1_failed = true }, -ar_block:get_sub_chunk_size(Candidate#mining_candidate.packing_difficulty)} end ) of {ok, ChunkCache1} -> mark_single_chunk1_failed_or_drop(Nonce + 1, NoncesLeft - 1, Candidate, State#state{ chunk_cache = ChunkCache1 }); {error, Reason} -> log_error(mining_worker_failed_to_mark_chunk1_failed, Candidate, State, [{reason, Reason}]), mark_single_chunk1_failed_or_drop(Nonce + 1, NoncesLeft - 1, Candidate, State) end. %% @doc Mark the chunk2 as failed for a single chunk. mark_single_chunk2_failed_or_drop(Nonce, Candidate, State) -> #mining_candidate{ packing_difficulty = PackingDifficulty } = Candidate, SubChunksPerChunk = ar_block:get_nonces_per_chunk(PackingDifficulty), mark_single_chunk2_failed_or_drop(Nonce, SubChunksPerChunk, Candidate, State). mark_single_chunk2_failed_or_drop(_Nonce, 0, _Candidate, State) -> State; mark_single_chunk2_failed_or_drop(Nonce, NoncesLeft, Candidate, State) -> case ar_mining_cache:with_cached_value( ?CACHE_KEY(Candidate#mining_candidate.cache_ref, Nonce), Candidate#mining_candidate.session_key, State#state.chunk_cache, fun (#ar_mining_cache_value{ chunk1_failed = true }) -> %% chunk1 already failed, so the reservation for it was released. %% We can just drop the cached value and release the reservation for a single subchunk. {ok, drop, -ar_block:get_sub_chunk_size(Candidate#mining_candidate.packing_difficulty)}; (#ar_mining_cache_value{chunk1 = Chunk1, h1 = undefined} = CachedValue) when is_binary(Chunk1) -> %% We have the corresponding chunk1, but we didn't calculate H1 yet. %% Mark chunk2 as failed to drop the cached value after we calculate H1. %% Drop the reservation for a single subchunk. {ok, CachedValue#ar_mining_cache_value{ chunk2_failed = true }, -ar_block:get_sub_chunk_size(Candidate#mining_candidate.packing_difficulty)}; (#ar_mining_cache_value{h1 = H1}) when is_binary(H1) -> %% We've already calculated H1, so we can drop the cached value. %% Drop the reservation for a single subchunk. {ok, drop, -ar_block:get_sub_chunk_size(Candidate#mining_candidate.packing_difficulty)}; (CachedValue) -> %% chunk1 hasn't arrived yet, so %% we just mark the chunk2 as failed and continue. %% Drop the reservation for a single subchunk. {ok, CachedValue#ar_mining_cache_value{ chunk2_failed = true }, -ar_block:get_sub_chunk_size(Candidate#mining_candidate.packing_difficulty)} end ) of {ok, ChunkCache1} -> mark_single_chunk2_failed_or_drop(Nonce + 1, NoncesLeft - 1, Candidate, State#state{ chunk_cache = ChunkCache1 }); {error, Reason} -> %% NB: this clause may cause a memory leak, because mining worker will wait for %% chunk2 to arrive. log_error(mining_worker_failed_to_mark_chunk2_failed, Candidate, State, [{reason, Reason}]), mark_single_chunk2_failed_or_drop(Nonce + 1, NoncesLeft - 1, Candidate, State) end. %% @doc Mark the chunk1 or chunk2 as failed for the whole recall range. mark_recall_range_failed(WhichChunk, Candidate, State) -> #mining_candidate{ packing_difficulty = PackingDifficulty } = Candidate, mark_recall_range_failed(WhichChunk, 0, ar_block:get_nonces_per_recall_range(PackingDifficulty), Candidate, State). mark_recall_range_failed(_WhichChunk, _Nonce, 0, _Candidate, State) -> State; mark_recall_range_failed(WhichChunk, Nonce, NoncesLeft, Candidate, State) -> case ar_mining_cache:with_cached_value( ?CACHE_KEY(Candidate#mining_candidate.cache_ref, Nonce), Candidate#mining_candidate.session_key, State#state.chunk_cache, fun(CachedValue) -> case WhichChunk of chunk1 -> {ok, CachedValue#ar_mining_cache_value{ chunk1_failed = true }}; chunk2 -> {ok, CachedValue#ar_mining_cache_value{ chunk2_failed = true }} end end ) of {ok, ChunkCache1} -> mark_recall_range_failed(WhichChunk, Nonce + 1, NoncesLeft - 1, Candidate, State#state{ chunk_cache = ChunkCache1 }); {error, Reason} -> %% NB: this clause may cause a memory leak, because mining worker will wait for %% WhichChunk to arrive. log_error(mining_worker_failed_to_add_chunk_to_cache, Candidate, State, [{reason, Reason}]), mark_recall_range_failed(WhichChunk, Nonce + 1, NoncesLeft - 1, Candidate, State) end. cache_h1_list(_Candidate, [], State) -> State; cache_h1_list(#mining_candidate{ cache_ref = not_set } = _Candidate, [], State) -> State; cache_h1_list(Candidate, [ {H1, Nonce} | H1List ], State) -> case ar_mining_cache:with_cached_value( ?CACHE_KEY(Candidate#mining_candidate.cache_ref, Nonce), Candidate#mining_candidate.session_key, State#state.chunk_cache, fun(CachedValue) -> %% Store the H1 received from peer, and set chunk1_failed to false, %% marking that we have a recall range for this H1 list. {ok, CachedValue#ar_mining_cache_value{ h1 = H1, chunk1_failed = false }} end ) of {ok, ChunkCache1} -> cache_h1_list(Candidate, H1List, State#state{ chunk_cache = ChunkCache1 }); {error, Reason} -> log_error(mining_worker_failed_to_cache_h1, Candidate, State, [{reason, Reason}]), cache_h1_list(Candidate, H1List, State) end. get_difficulty(State, #mining_candidate{ cm_diff = not_set }) -> State#state.diff_pair; get_difficulty(_State, #mining_candidate{ cm_diff = DiffPair }) -> DiffPair. get_partial_difficulty(#state{ is_pool_client = false }, _Candidate) -> not_set; get_partial_difficulty(_State, #mining_candidate{ cm_diff = DiffPair }) -> DiffPair. generate_cache_ref(Candidate) -> #mining_candidate{ partition_number = Partition1, partition_number2 = Partition2, partition_upper_bound = PartitionUpperBound } = Candidate, CacheRef = {Partition1, Partition2, PartitionUpperBound, make_ref()}, Candidate#mining_candidate{ cache_ref = CacheRef }. hash_computed(WhichHash, Candidate, State) -> case WhichHash of h1 -> PartitionNumber = Candidate#mining_candidate.partition_number, Hashes = maps:get(PartitionNumber, State#state.h1_hashes, 0), State#state{ h1_hashes = maps:put(PartitionNumber, Hashes+1, State#state.h1_hashes) }; h2 -> PartitionNumber = Candidate#mining_candidate.partition_number2, Hashes = maps:get(PartitionNumber, State#state.h2_hashes, 0), State#state{ h2_hashes = maps:put(PartitionNumber, Hashes+1, State#state.h2_hashes) } end. report_and_reset_hashes(State) -> maps:foreach( fun(PartitionNumber, Count) -> ar_mining_stats:h1_computed(PartitionNumber, Count) end, State#state.h1_hashes ), maps:foreach( fun(PartitionNumber, Count) -> ar_mining_stats:h2_computed(PartitionNumber, Count) end, State#state.h2_hashes ), State#state{ h1_hashes = #{}, h2_hashes = #{} }. report_chunk_cache_metrics(#state{chunk_cache = ChunkCache, partition_number = Partition} = State) -> prometheus_gauge:set(mining_server_chunk_cache_size, [Partition, "total"], ar_mining_cache:cache_size(ChunkCache)), case ar_mining_cache:reserved_size(ChunkCache) of {ok, ReservedSize} -> prometheus_gauge:set(mining_server_chunk_cache_size, [Partition, "reserved"], ReservedSize); {error, Reason} -> log_error(mining_worker_failed_to_report_chunk_cache_metrics, State, [{reason, Reason}]) end, State. format_logs(State = #state{}) -> #state{ name = Name, partition_number = PartitionNumber, latest_vdf_step_number = LatestStepNumber } = State, [ {worker, Name}, {state_partition, PartitionNumber}, {latest_vdf_step_number, LatestStepNumber} ]; format_logs(Candidate = #mining_candidate{}) -> #mining_candidate{ cm_lead_peer = Peer, session_key = SessionKey, step_number = StepNumber, partition_number = Partition, partition_number2 = Partition2, nonce = Nonce } = Candidate, [{cm_peer, Peer}, {candidate_session, ar_nonce_limiter:encode_session_key(SessionKey)}, {candidate_step_number, StepNumber}, {candidate_nonce, Nonce}, {candidate_partition, Partition}, {candidate_partition2, Partition2}]; format_logs(undefined) -> []. format_logs(Event, Candidate, State, ExtraLogs) -> [{event, Event}] ++ format_logs(State) ++ format_logs(Candidate) ++ ExtraLogs. log_debug(Event, Candidate, State, ExtraLogs) -> ?LOG_DEBUG(format_logs(Event, Candidate, State, ExtraLogs)). log_debug(Event, State, ExtraLogs) -> log_debug(Event, undefined, State, ExtraLogs). log_error(Event, Candidate, State, ExtraLogs) -> ?LOG_ERROR(format_logs(Event, Candidate, State, ExtraLogs)). log_error(Event, State, ExtraLogs) -> log_error(Event, undefined, State, ExtraLogs). log_info(Event, Candidate, State, ExtraLogs) -> ?LOG_INFO(format_logs(Event, Candidate, State, ExtraLogs)). log_info(Event, State, ExtraLogs) -> log_info(Event, undefined, State, ExtraLogs). log_warning(Event, Candidate, State, ExtraLogs) -> ?LOG_WARNING(format_logs(Event, Candidate, State, ExtraLogs)). log_warning(Event, State, ExtraLogs) -> log_warning(Event, undefined, State, ExtraLogs). %%%=================================================================== %%% Public Test interface. %%%=================================================================== ================================================ FILE: apps/arweave/src/ar_network_middleware.erl ================================================ -module(ar_network_middleware). -behaviour(cowboy_middleware). -export([execute/2]). -include_lib("arweave/include/ar.hrl"). execute(Req, Env) -> case cowboy_req:header(<<"x-network">>, Req, <>) of <> -> maybe_add_peer(ar_http_util:arweave_peer(Req), Req), {ok, Req, Env}; _ -> case cowboy_req:method(Req) of <<"GET">> -> {ok, Req, Env}; <<"HEAD">> -> {ok, Req, Env}; <<"OPTIONS">> -> {ok, Req, Env}; _ -> wrong_network(Req) end end. %% @doc When a node receives a request that includes the x-p2p-port header, it will attempt to %% add the requesting node to its peer list. maybe_add_peer(Peer, Req) -> case cowboy_req:header(<<"x-p2p-port">>, Req, not_set) of not_set -> ok; _ -> ar_peers:add_peer(Peer, get_release(Req)) end. wrong_network(Req) -> {stop, cowboy_req:reply(412, #{}, jiffy:encode(#{ error => wrong_network }), Req)}. get_release(Req) -> case cowboy_req:header(<<"x-release">>, Req, -1) of -1 -> -1; ReleaseBin -> case catch binary_to_integer(ReleaseBin) of {'EXIT', _} -> -1; Release -> Release end end. ================================================ FILE: apps/arweave/src/ar_node.erl ================================================ %% This Source Code Form is subject to the terms of the GNU General %% Public License, v. 2.0. If a copy of the GPLv2 was not distributed %% with this file, You can obtain one at %% https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html -module(ar_node). -export([get_recent_block_hash_by_height/1, get_blocks/0, get_block_index/0, get_current_block/0, get_current_diff/0, is_in_block_index/1, get_block_index_and_height/0, get_height/0, get_weave_size/0, get_balance/1, get_last_tx/1, get_ready_for_mining_txs/0, get_current_usd_to_ar_rate/0, get_current_block_hash/0, get_block_index_entry/1, get_2_0_hash_of_1_0_block/1, is_joined/0, get_block_anchors/0, get_recent_txs_map/0, get_mempool_size/0, get_block_shadow_from_cache/1, get_recent_partition_upper_bound_by_prev_h/1, get_block_txs_pairs/0, get_partition_upper_bound/1, get_nth_or_last/2, get_partition_number/1, get_max_partition_number/1, get_current_weave_size/0, get_recent_max_block_size/0, read_recent_blocks/3]). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("arweave/include/ar_consensus.hrl"). -include_lib("eunit/include/eunit.hrl"). %%%=================================================================== %%% API %%%=================================================================== %% @doc Return the hash of the block of the given Height. Return not_found %% if Height is bigger than the current height or too small. get_recent_block_hash_by_height(Height) -> Props = ets:select( node_state, [{{'$1', '$2'}, [{'or', {'==', '$1', height}, {'==', '$1', block_anchors}}], ['$_']}] ), CurrentHeight = proplists:get_value(height, Props), Anchors = proplists:get_value(block_anchors, Props), case Height > CurrentHeight orelse Height =< CurrentHeight - length(Anchors) of true -> not_found; false -> lists:nth(CurrentHeight - Height + 1, Anchors) end. %% @doc Get the current block index (the list of {block hash, weave size, tx root} triplets). get_blocks() -> get_block_index(). %% @doc Get the current block index (the list of {block hash, weave size, tx root} triplets). get_block_index() -> case ar_util:safe_ets_lookup(node_state, is_joined) of [{_, true}] -> element(2, get_block_index_and_height()); _ -> [] end. %% @doc Return the current tip block. Assume the node has joined the network and %% initialized the state. get_current_block() -> case ar_util:safe_ets_lookup(node_state, current) of [{_, Current}] -> ar_block_cache:get(block_cache, Current); _ -> not_joined end. %% @doc Return the current network difficulty. Assume the node has joined the network and %% initialized the state. get_current_diff() -> case ar_util:safe_ets_lookup(node_state, diff_pair) of [{_, DiffPair}] -> DiffPair; _ -> not_joined end. get_block_index_and_height() -> Props = ets:select( node_state, [{{'$1', '$2'}, [{'or', {'==', '$1', height}, {'==', '$1', recent_block_index}}], ['$_']}] ), CurrentHeight = proplists:get_value(height, Props), RecentBI = proplists:get_value(recent_block_index, Props), {CurrentHeight, merge(RecentBI, ar_block_index:get_list(CurrentHeight - length(RecentBI)))}. merge([Elem | BI], BI2) -> [Elem | merge(BI, BI2)]; merge([], BI) -> BI. %% @doc Get the list of being mined or ready to be mined transactions. %% The list does _not_ include transactions waiting for network propagation. get_ready_for_mining_txs() -> gb_sets:fold( fun ({_Utility, TXID, ready_for_mining}, Acc) -> [TXID | Acc]; (_, Acc) -> Acc end, [], ar_mempool:get_priority_set() ). %% @doc Return true if the given block hash is found in the block index. is_in_block_index(H) -> ar_block_index:member(H). %% @doc Get the current block hash. get_current_block_hash() -> case ar_util:safe_ets_lookup(node_state, current) of [{current, H}] -> H; [] -> not_joined end. read_recent_blocks(BI, SearchDepth, CustomDir) -> read_recent_blocks2(lists:sublist(BI, 2 * ar_block:get_max_tx_anchor_depth() + SearchDepth), SearchDepth, 0, CustomDir). read_recent_blocks2(_BI, Depth, Skipped, _CustomDir) when Skipped > Depth orelse (Skipped > 0 andalso Depth == Skipped) -> not_found; read_recent_blocks2([], _SearchDepth, Skipped, _CustomDir) -> {Skipped, []}; read_recent_blocks2([{BH, _, _} | BI], SearchDepth, Skipped, CustomDir) -> case ar_storage:read_block(BH, CustomDir) of B = #block{} -> TXs = ar_storage:read_tx(B#block.txs, CustomDir), case lists:any(fun(TX) -> TX == unavailable end, TXs) of true -> read_recent_blocks2(BI, SearchDepth, Skipped + 1, CustomDir); false -> SizeTaggedTXs = ar_block:generate_size_tagged_list_from_txs(TXs, B#block.height), case read_recent_blocks3(BI, 2 * ar_block:get_max_tx_anchor_depth() - 1, [B#block{ size_tagged_txs = SizeTaggedTXs, txs = TXs }], CustomDir) of not_found -> not_found; Blocks -> {Skipped, Blocks} end end; Error -> ar:console("Skipping the block ~s, reason: ~p.~n", [ar_util:encode(BH), io_lib:format("~p", [Error])]), read_recent_blocks2(BI, SearchDepth, Skipped + 1, CustomDir) end. read_recent_blocks3([], _BlocksToRead, Blocks, _CustomDir) -> lists:reverse(Blocks); read_recent_blocks3(_BI, 0, Blocks, _CustomDir) -> lists:reverse(Blocks); read_recent_blocks3([{BH, _, _} | BI], BlocksToRead, Blocks, CustomDir) -> case ar_storage:read_block(BH, CustomDir) of B = #block{} -> TXs = ar_storage:read_tx(B#block.txs, CustomDir), case lists:any(fun(TX) -> TX == unavailable end, TXs) of true -> ar:console("Failed to find all transaction headers for the block ~s.~n", [ar_util:encode(BH)]), not_found; false -> SizeTaggedTXs = ar_block:generate_size_tagged_list_from_txs(TXs, B#block.height), read_recent_blocks3(BI, BlocksToRead - 1, [B#block{ size_tagged_txs = SizeTaggedTXs, txs = TXs } | Blocks], CustomDir) end; Error -> ar:console("Failed to read block header ~s, reason: ~p.~n", [ar_util:encode(BH), io_lib:format("~p", [Error])]), not_found end. %% @doc Get the block index entry by height. get_block_index_entry(Height) -> case ar_util:safe_ets_lookup(node_state, is_joined) of [] -> not_joined; [{_, false}] -> not_joined; [{_, true}] -> ar_block_index:get_element_by_height(Height) end. %% @doc Get the 2.0 hash for a 1.0 block. %% Before 2.0, to compute a block hash, the complete wallet list %% and all the preceding hashes were required. Getting a wallet list %% and a hash list for every historical block to verify it belongs to %% the weave is very costly. Therefore, a list of 2.0 hashes for 1.0 %% blocks was computed and stored along with the network client. %% @end get_2_0_hash_of_1_0_block(Height) -> [{hash_list_2_0_for_1_0_blocks, HL}] = ar_util:safe_ets_lookup(node_state, hash_list_2_0_for_1_0_blocks), Fork_2_0 = ar_fork:height_2_0(), case Height > Fork_2_0 of true -> invalid_height; false -> lists:nth(Fork_2_0 - Height, HL) end. %% @doc Return the current height of the blockweave. get_height() -> case ar_util:safe_ets_lookup(node_state, height) of [{height, Height}] -> Height; [] -> -1 end. get_weave_size() -> case ar_util:safe_ets_lookup(node_state, weave_size) of [{weave_size, WeaveSize}] -> WeaveSize; [] -> -1 end. %% @doc Check whether the node has joined the network. is_joined() -> case ar_util:safe_ets_lookup(node_state, is_joined) of [{is_joined, IsJoined}] -> IsJoined; [] -> false end. %% @doc Get the currently estimated USD to AR exchange rate. get_current_usd_to_ar_rate() -> [{_, Rate}] = ar_util:safe_ets_lookup(node_state, usd_to_ar_rate), Rate. %% @doc Returns a list of block anchors corrsponding to the current state - %% the hashes of the recent blocks that can be used in transactions as anchors. %% @end get_block_anchors() -> case ar_util:safe_ets_lookup(node_state, block_anchors) of [{block_anchors, BlockAnchors}] -> BlockAnchors; [] -> not_joined end. %% @doc Return a map TXID -> ok containing all the recent transaction identifiers. %% Used for preventing replay attacks. %% @end get_recent_txs_map() -> [{recent_txs_map, RecentTXMap}] = ar_util:safe_ets_lookup(node_state, recent_txs_map), RecentTXMap. %% @doc Return memory pool size get_mempool_size() -> [{mempool_size, MempoolSize}] = ar_util:safe_ets_lookup(node_state, mempool_size), MempoolSize. %% @doc Get the block shadow from the block cache. get_block_shadow_from_cache(H) -> ar_block_cache:get(block_cache, H). %% @doc Get the current balance of a given wallet address. %% The balance returned is in relation to the nodes current wallet list. get_balance({SigType, PubKey}) -> get_balance(ar_wallet:to_address(PubKey, SigType)); get_balance(MaybeRSAPub) when byte_size(MaybeRSAPub) == 512 -> %% A legacy feature where we may search the public key instead of address. ar_wallets:get_balance(ar_wallet:hash_pub_key(MaybeRSAPub)); get_balance(Addr) -> ar_wallets:get_balance(Addr). %% @doc Get the last tx id associated with a given wallet address. %% Should the wallet not have made a tx the empty binary will be returned. get_last_tx({SigType, PubKey}) -> get_last_tx(ar_wallet:to_address(PubKey, SigType)); get_last_tx(MaybeRSAPub) when byte_size(MaybeRSAPub) == 512 -> %% A legacy feature where we may search the public key instead of address. get_last_tx(ar_wallet:hash_pub_key(MaybeRSAPub)); get_last_tx(Addr) -> {ok, ar_wallets:get_last_tx(Addr)}. get_recent_partition_upper_bound_by_prev_h(H) -> get_recent_partition_upper_bound_by_prev_h(H, 0). %% @doc Get the list of the recent {H, TXIDs} pairs sorted from latest to earliest. get_block_txs_pairs() -> [{_, BlockTXPairs}] = ar_util:safe_ets_lookup(node_state, block_txs_pairs), BlockTXPairs. get_nth_or_last(N, BI) -> case length(BI) < N of true -> lists:last(BI); false -> lists:nth(N, BI) end. get_partition_upper_bound(BI) -> element(2, get_nth_or_last(?SEARCH_SPACE_UPPER_BOUND_DEPTH, BI)). get_recent_partition_upper_bound_by_prev_h(H, Diff) -> case ar_block_cache:get_block_and_status(block_cache, H) of {_B, {on_chain, _}} -> [{_, BI}] = ar_util:safe_ets_lookup(node_state, recent_block_index), Genesis = length(BI) =< ?SEARCH_SPACE_UPPER_BOUND_DEPTH, get_recent_partition_upper_bound_by_prev_h(H, Diff, BI, Genesis); {#block{ indep_hash = H2, previous_block = PrevH, weave_size = WeaveSize }, _} -> case Diff == ?SEARCH_SPACE_UPPER_BOUND_DEPTH - 1 of true -> {H2, WeaveSize}; false -> get_recent_partition_upper_bound_by_prev_h(PrevH, Diff + 1) end; not_found -> ?LOG_INFO([{event, prev_block_not_found}, {h, ar_util:encode(H)}, {depth, Diff}]), not_found end. get_recent_partition_upper_bound_by_prev_h(H, Diff, [{H, _, _} | _] = BI, Genesis) -> PartitionUpperBoundDepth = ?SEARCH_SPACE_UPPER_BOUND_DEPTH, Depth = PartitionUpperBoundDepth - Diff, case length(BI) < Depth of true -> case Genesis of true -> {H2, PartitionUpperBound, _TXRoot} = lists:last(BI), {H2, PartitionUpperBound}; false -> not_found end; false -> {H2, PartitionUpperBound, _TXRoot} = lists:nth(Depth, BI), {H2, PartitionUpperBound} end; get_recent_partition_upper_bound_by_prev_h(H, Diff, [_ | BI], Genesis) -> get_recent_partition_upper_bound_by_prev_h(H, Diff, BI, Genesis); get_recent_partition_upper_bound_by_prev_h(H, Diff, [], _Genesis) -> ?LOG_INFO([{event, prev_block_not_found_when_scanning_recent_block_index}, {h, ar_util:encode(H)}, {depth, Diff}]), not_found. get_partition_number(undefined) -> undefined; get_partition_number(infinity) -> infinity; get_partition_number(Offset) -> Offset div ar_block:partition_size(). %% @doc Excludes the last partition as it may be incomplete and therefore provides %% a mining advantage (e.g. it can fit in RAM) get_max_partition_number(infinity) -> infinity; get_max_partition_number(PartitionUpperBound) -> max(0, PartitionUpperBound div ar_block:partition_size() - 1). %% @doc Return the current weave size. Assume the node has joined the network and %% initialized the state. get_current_weave_size() -> [{_, WeaveSize}] = ar_util:safe_ets_lookup(node_state, weave_size), WeaveSize. %% @doc Return the maximum block size among the latest ?BLOCK_INDEX_HEAD_LEN blocks. %% Assume the node has joined the network and initialized the state. get_recent_max_block_size() -> [{_, MaxBlockSize}] = ar_util:safe_ets_lookup(node_state, recent_max_block_size), MaxBlockSize. %%%=================================================================== %%% Tests. %%%=================================================================== get_recent_partition_upper_bound_by_prev_h_short_cache_test() -> ar_block_cache:new(block_cache, B0 = test_block(1, 1, <<>>)), H0 = B0#block.indep_hash, BI = lists:reverse([{H0, 20, <<>>} | [{crypto:strong_rand_bytes(48), 20, <<>>} || _ <- lists:seq(1, 99)]]), ets:insert(node_state, {recent_block_index, BI}), ?assertEqual(not_found, get_recent_partition_upper_bound_by_prev_h(B0#block.indep_hash)), ?assertEqual(not_found, get_recent_partition_upper_bound_by_prev_h(crypto:strong_rand_bytes(48))), {HPrev, _, _} = lists:nth(length(BI) - ?SEARCH_SPACE_UPPER_BOUND_DEPTH + 2, BI), ?assertEqual(not_found, get_recent_partition_upper_bound_by_prev_h(HPrev)), {H, _, _} = lists:nth(length(BI) - ?SEARCH_SPACE_UPPER_BOUND_DEPTH + 1, BI), ?assertEqual(not_found, get_recent_partition_upper_bound_by_prev_h(H)), add_blocks(tl(lists:reverse(BI)), 2, 2, H0), ?assertEqual(not_found, get_recent_partition_upper_bound_by_prev_h(HPrev)), ?assertEqual({H0, 20}, get_recent_partition_upper_bound_by_prev_h(H)), {HNext, _, _} = lists:nth(length(BI) - ?SEARCH_SPACE_UPPER_BOUND_DEPTH, BI), {H1, _, _} = lists:nth(99, BI), ?assertEqual({H1, 20}, get_recent_partition_upper_bound_by_prev_h(HNext)). get_recent_partition_upper_bound_by_prev_h_genesis_test() -> ar_block_cache:new(block_cache, B0 = test_block(0, 1, <<>>)), H0 = B0#block.indep_hash, ets:insert(node_state, {recent_block_index, [{H0, 20, <<>>}]}), ?assertEqual({H0, 20}, get_recent_partition_upper_bound_by_prev_h(H0)). test_block(Height, CDiff, PrevH) -> test_block(crypto:strong_rand_bytes(48), Height, CDiff, PrevH). test_block(H, Height, CDiff, PrevH) -> #block{ indep_hash = H, height = Height, cumulative_diff = CDiff, previous_block = PrevH }. add_blocks([{H, _, _} | BI], Height, CDiff, PrevH) -> ar_block_cache:add_validated(block_cache, test_block(H, Height, CDiff, PrevH)), ar_block_cache:mark_tip(block_cache, H), add_blocks(BI, Height + 1, CDiff + 1, H); add_blocks([], _Height, _CDiff, _PrevH) -> ok. ================================================ FILE: apps/arweave/src/ar_node_sup.erl ================================================ %% This Source Code Form is subject to the terms of the GNU General %% Public License, v. 2.0. If a copy of the GPLv2 was not distributed %% with this file, You can obtain one at %% https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html -module(ar_node_sup). -behaviour(supervisor). %% API -export([start_link/0]). %% Supervisor callbacks -export([init/1]). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("arweave/include/ar_sup.hrl"). %% =================================================================== %% API functions %% =================================================================== start_link() -> supervisor:start_link({local, ?MODULE}, ?MODULE, []). %% =================================================================== %% Supervisor callbacks %% =================================================================== init([]) -> {ok, {supervisor_spec(), children_spec()}}. supervisor_spec() -> #{ strategy => one_for_all , intensity => 5 , period => 10 }. %%-------------------------------------------------------------------- %% the order is important. the first process to be started is %% ar_node_worker, then other processes in order. The shutdown %% is in reverse, the last process to be stopped is ar_node_worker. %%-------------------------------------------------------------------- children_spec() -> lists:flatten([ ar_node_worker_spec(), ar_semaphores_spec(), ar_blacklist_middleware_spec(), ar_http_iface_server_spec() ]). %%-------------------------------------------------------------------- %% ar_node_worker is the main process, must be started before others, %% and should be stopped at last. This process should not be restarted. %%-------------------------------------------------------------------- ar_node_worker_spec() -> #{ id => ar_node_worker , start => {ar_node_worker, start_link, []} , type => worker , shutdown => ?SHUTDOWN_TIMEOUT , restart => temporary }. %%-------------------------------------------------------------------- %% ar_http_iface_server process is a frontend to cowboy:start*/3, %% and will return a worker. This worker is protecting the %% cowboy listener (stored in its state). The timeout should be %% greater or equal to the TCP_MAX_CONNECTION to avoid killing the %% child too early during the shutdown procedure. %%-------------------------------------------------------------------- ar_http_iface_server_spec() -> #{ id => ar_http_iface_server , start => {ar_http_iface_server, start_link, []} , type => worker , shutdown => ?SHUTDOWN_TCP_CONNECTION_TIMEOUT*2*1000 }. %%-------------------------------------------------------------------- %% ar_blacklist_middle process is transient, it will configure %% a timer and then return. In case of error, it should be %% restarted. %%-------------------------------------------------------------------- ar_blacklist_middleware_spec() -> #{ id => ar_blacklist_middleware , start => {ar_blacklist_middleware, start_link, []} , type => worker , restart => transient , shutdown => ?SHUTDOWN_TIMEOUT }. %%-------------------------------------------------------------------- %% ar_semaphores are processes started based on arweave %% configuration. %%-------------------------------------------------------------------- ar_semaphores_spec() -> {ok, Config} = arweave_config:get_env(), Semaphores = Config#config.semaphores, [ ar_semaphore_spec(Name, N) || {Name, N} <- maps:to_list(Semaphores) ]. ar_semaphore_spec(Name, N) -> #{ id => Name , start => {ar_semaphore, start_link, [Name, N]} , type => worker , shutdown => ?SHUTDOWN_TIMEOUT }. ================================================ FILE: apps/arweave/src/ar_node_utils.erl ================================================ %%% @doc Different utility functions for node and node worker. -module(ar_node_utils). -export([apply_tx/3, apply_txs/3, update_accounts/3, validate/6, h1_passes_diff_check/3, h2_passes_diff_check/3, solution_passes_diff_check/2, block_passes_diff_check/1, block_passes_diff_check/2, passes_diff_check/4, scaled_diff/2, update_account/6, is_account_banned/2]). -include("ar.hrl"). -include("ar_pricing.hrl"). -include("ar_consensus.hrl"). -include("ar_mining.hrl"). -include_lib("eunit/include/eunit.hrl"). %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Update the given accounts by applying a transaction. apply_tx(Accounts, Denomination, TX) -> Addr = ar_tx:get_owner_address(TX), case maps:get(Addr, Accounts, not_found) of not_found -> Accounts; _ -> apply_tx2(Accounts, Denomination, Addr, TX) end. %% @doc Update the given accounts by applying the given transactions. apply_txs(Accounts, Denomination, TXs) -> lists:foldl(fun(TX, Acc) -> apply_tx(Acc, Denomination, TX) end, Accounts, TXs). %% @doc Distribute transaction fees across accounts and the endowment pool, %% reserve a reward for the current miner, release the reserved reward to the corresponding %% miner. If a double-signing proof is provided, ban the account and assign a %% reward to the prover. update_accounts(B, PrevB, Accounts) -> EndowmentPool = PrevB#block.reward_pool, Rate = ar_pricing:usd_to_ar_rate(PrevB), PricePerGiBMinute = PrevB#block.price_per_gib_minute, KryderPlusRateMultiplierLatch = PrevB#block.kryder_plus_rate_multiplier_latch, KryderPlusRateMultiplier = PrevB#block.kryder_plus_rate_multiplier, Denomination = PrevB#block.denomination, DebtSupply = PrevB#block.debt_supply, TXs = B#block.txs, BlockInterval = ar_block_time_history:compute_block_interval(PrevB), Args = get_miner_reward_and_endowment_pool({EndowmentPool, DebtSupply, TXs, B#block.reward_addr, B#block.weave_size, B#block.height, B#block.timestamp, Rate, PricePerGiBMinute, KryderPlusRateMultiplierLatch, KryderPlusRateMultiplier, Denomination, BlockInterval}), Accounts2 = apply_txs(Accounts, Denomination, TXs), true = B#block.height >= ar_fork:height_2_6(), update_accounts2(B, PrevB, Accounts2, Args). %%-------------------------------------------------------------------- %% @doc Perform the last stage of block validation. The majority of %% the checks are made in `ar_block_pre_validator.erl', %% `ar_nonce_limiter.erl', and `ar_node_utils:update_accounts/3'. %% @end %%-------------------------------------------------------------------- -spec validate(NewB, B, Wallets, BlocksAnchors, RecentTXMap, PartitionUpperBound) -> Return when NewB :: #block{}, B :: #block{}, Wallets :: term(), BlocksAnchors :: term(), RecentTXMap :: term(), PartitionUpperBound :: term(), Return :: valid | {invalid, Reason}, Reason :: term(). validate(NewB, B, Wallets, BlockAnchors, RecentTXMap, PartitionUpperBound) -> ?LOG_INFO([{event, validating_block}, {hash, ar_util:encode(NewB#block.indep_hash)}]), case timer:tc( fun() -> try do_validate(NewB, B, Wallets, BlockAnchors, RecentTXMap, PartitionUpperBound) catch C:R:S -> ?LOG_ERROR([ {event, block_validation_exception}, {class, C}, {reason, R}, {stacktrace, S}, {hash, ar_util:encode(NewB#block.indep_hash)}, {height, NewB#block.height} ]), {invalid, validation_exception} end end ) of {TimeTaken, valid} -> ?LOG_INFO([{event, block_validation_successful}, {hash, ar_util:encode(NewB#block.indep_hash)}, {time_taken_us, TimeTaken}]), valid; {TimeTaken, {invalid, Reason}} -> ?LOG_INFO([{event, block_validation_failed}, {reason, Reason}, {hash, ar_util:encode(NewB#block.indep_hash)}, {time_taken_us, TimeTaken}]), {invalid, Reason}; {TimeTaken, {error, Reason}} -> ?LOG_INFO([{event, block_validation_failed}, {reason, Reason}, {hash, ar_util:encode(NewB#block.indep_hash)}, {time_taken_us, TimeTaken}]), {invalid, Reason}; {TimeTaken, Else} -> ?LOG_ERROR([{event, block_validation_failed}, {reason, Else}, {hash, ar_util:encode(NewB#block.indep_hash)}, {time_taken_us, TimeTaken}]), {invalid, Else} end. h1_passes_diff_check(H1, DiffPair, PackingDifficulty) -> passes_diff_check(H1, true, DiffPair, PackingDifficulty). h2_passes_diff_check(H2, DiffPair, PackingDifficulty) -> passes_diff_check(H2, false, DiffPair, PackingDifficulty). solution_passes_diff_check(Solution, DiffPair) -> SolutionHash = Solution#mining_solution.solution_hash, PackingDifficulty = Solution#mining_solution.packing_difficulty, IsPoA1 = ar_mining_server:is_one_chunk_solution(Solution), passes_diff_check(SolutionHash, IsPoA1, DiffPair, PackingDifficulty). block_passes_diff_check(Block) -> SolutionHash = Block#block.hash, block_passes_diff_check(SolutionHash, Block). block_passes_diff_check(SolutionHash, Block) -> IsPoA1 = (Block#block.recall_byte2 == undefined), PackingDifficulty = Block#block.packing_difficulty, DiffPair = ar_difficulty:diff_pair(Block), passes_diff_check(SolutionHash, IsPoA1, DiffPair, PackingDifficulty). -ifdef(LOCALNET). %% We skip difficulty checks on localnet for faster block production. passes_diff_check(_SolutionHash, _IsPoA1, _DiffPair, _PackingDifficulty) -> true. -else. passes_diff_check(SolutionHash, IsPoA1, not_set, _PackingDifficulty) -> ?LOG_ERROR([{event, diff_check_not_set}, {solution_hash, SolutionHash}, {is_poa1, IsPoA1}]), false; passes_diff_check(SolutionHash, IsPoA1, {PoA1Diff, Diff}, PackingDifficulty) -> Diff2 = case IsPoA1 of true -> PoA1Diff; false -> Diff end, binary:decode_unsigned(SolutionHash) > scaled_diff(Diff2, PackingDifficulty). -endif. scaled_diff(RawDiff, PackingDifficulty) -> case PackingDifficulty of 0 -> RawDiff; _ -> SubDiff = ar_difficulty:sub_diff(RawDiff, ?COMPOSITE_PACKING_SUB_CHUNK_COUNT), %% We are introducing composite packing along with reducing the recall range %% from 200 MiB to 50 MiB while keeping the total worth of the nonces constant %% so we want the mining difficulty to be 1 / (PackingDifficulty * 4) %% of the difficulty applied to the 200 MiB-range nonces. ar_difficulty:scale_diff(SubDiff, {1, PackingDifficulty * 4}, %% The minimal difficulty height. It does not change at the %% packing difficulty fork. ar_fork:height_2_8()) end. update_account(Addr, Balance, LastTX, 1, true, Accounts) -> maps:put(Addr, {Balance, LastTX}, Accounts); update_account(Addr, Balance, LastTX, Denomination, MiningPermission, Accounts) -> maps:put(Addr, {Balance, LastTX, Denomination, MiningPermission}, Accounts). is_account_banned(Addr, Accounts) -> case maps:get(Addr, Accounts, not_found) of not_found -> false; {_, _} -> false; {_, _, _, MiningPermission} -> not MiningPermission end. %%%=================================================================== %%% Private functions. %%%=================================================================== apply_tx2(Accounts, Denomination, Addr, TX) -> update_recipient_balance( update_sender_balance(Accounts, Denomination, Addr, TX), Denomination, TX). update_sender_balance(Accounts, Denomination, Addr, #tx{ id = ID, quantity = Qty, reward = Reward, denomination = TXDenomination }) -> case maps:get(Addr, Accounts, not_found) of {Balance, _LastTX} -> Balance2 = ar_pricing:redenominate(Balance, 1, Denomination), Spent = ar_pricing:redenominate(Qty + Reward, TXDenomination, Denomination), update_account(Addr, Balance2 - Spent, ID, Denomination, true, Accounts); {Balance, _LastTX, AccountDenomination, MiningPermission} -> Balance2 = ar_pricing:redenominate(Balance, AccountDenomination, Denomination), Spent = ar_pricing:redenominate(Qty + Reward, TXDenomination, Denomination), update_account(Addr, Balance2 - Spent, ID, Denomination, MiningPermission, Accounts); _ -> Accounts end. update_recipient_balance(Accounts, _Denomination, #tx{ quantity = 0 }) -> Accounts; update_recipient_balance(Accounts, Denomination, #tx{ target = To, quantity = Qty, denomination = TXDenomination }) -> case maps:get(To, Accounts, not_found) of not_found -> Qty2 = ar_pricing:redenominate(Qty, TXDenomination, Denomination), update_account(To, Qty2, <<>>, Denomination, true, Accounts); {Balance, LastTX} -> Qty2 = ar_pricing:redenominate(Qty, TXDenomination, Denomination), Balance2 = ar_pricing:redenominate(Balance, 1, Denomination), update_account(To, Balance2 + Qty2, LastTX, Denomination, true, Accounts); {Balance, LastTX, AccountDenomination, MiningPermission} -> Qty2 = ar_pricing:redenominate(Qty, TXDenomination, Denomination), Balance2 = ar_pricing:redenominate(Balance, AccountDenomination, Denomination), update_account(To, Balance2 + Qty2, LastTX, Denomination, MiningPermission, Accounts) end. get_miner_reward_and_endowment_pool(Args) -> {EndowmentPool, DebtSupply, TXs, RewardAddr, WeaveSize, Height, Timestamp, Rate, PricePerGiBMinute, KryderPlusRateMultiplierLatch, KryderPlusRateMultiplier, Denomination, BlockInterval} = Args, true = Height >= ar_fork:height_2_4(), case ar_pricing_transition:is_v2_pricing_height(Height) of true -> {MinerReward, EndowmentPool2, DebtSupply2, KryderPlusRateMultiplierLatch2, KryderPlusRateMultiplier2, _, _} = ar_pricing:get_miner_reward_endowment_pool_debt_supply({EndowmentPool, DebtSupply, TXs, WeaveSize, Height, PricePerGiBMinute, KryderPlusRateMultiplierLatch, KryderPlusRateMultiplier, Denomination, BlockInterval}), {MinerReward, EndowmentPool2, DebtSupply2, KryderPlusRateMultiplierLatch2, KryderPlusRateMultiplier2}; false -> {MinerReward, EndowmentPool2} = ar_pricing:get_miner_reward_and_endowment_pool({ EndowmentPool, TXs, RewardAddr, WeaveSize, Height, Timestamp, Rate}), {MinerReward, EndowmentPool2, 0, 0, 1} end. validate_account_anchors(Accounts, TXs) -> not lists:any(fun(TX) -> is_wallet_invalid(TX, Accounts) end, TXs). update_accounts2(B, PrevB, Accounts, Args) -> case is_account_banned(B#block.reward_addr, Accounts) of true -> {error, mining_address_banned}; false -> update_accounts3(B, PrevB, Accounts, Args) end. update_accounts3(B, PrevB, Accounts, Args) -> case may_be_apply_double_signing_proof(B, PrevB, Accounts) of {ok, Accounts2} -> Accounts3 = ar_rewards:apply_rewards(PrevB, Accounts2), update_accounts4(B, PrevB, Accounts3, Args); Error -> Error end. may_be_apply_double_signing_proof(#block{ double_signing_proof = undefined }, _PrevB, Accounts) -> {ok, Accounts}; may_be_apply_double_signing_proof(#block{ double_signing_proof = {_Pub, Sig, _, _, _, Sig, _, _, _} }, _PrevB, _Accounts) -> {error, invalid_double_signing_proof_same_signature}; may_be_apply_double_signing_proof(B, PrevB, Accounts) -> {_Pub, _Signature1, CDiff1, PrevCDiff1, _Preimage1, _Signature2, CDiff2, PrevCDiff2, _Preimage2} = B#block.double_signing_proof, case ar_block:get_double_signing_condition(CDiff1, PrevCDiff1, CDiff2, PrevCDiff2) of false -> {error, invalid_double_signing_proof_cdiff}; true -> may_be_apply_double_signing_proof2(B, PrevB, Accounts) end. may_be_apply_double_signing_proof2(B, PrevB, Accounts) -> {Pub, _Signature1, _CDiff1, _PrevCDiff1, _Preimage1, _Signature2, _CDiff2, _PrevCDiff2, _Preimage2} = B#block.double_signing_proof, Key = ar_block:get_reward_key(Pub, B#block.height), case B#block.reward_key == Key of true -> {error, invalid_double_signing_proof_same_address}; false -> Addr = ar_wallet:to_address(Key), case is_account_banned(Addr, Accounts) of true -> {error, invalid_double_signing_proof_already_banned}; false -> LockedRewards = ar_rewards:get_locked_rewards(PrevB), case ar_rewards:has_locked_reward(Addr, LockedRewards) of false -> {error, invalid_double_signing_proof_not_in_reward_history}; true -> may_be_apply_double_signing_proof3(B, PrevB, Accounts) end end end. may_be_apply_double_signing_proof3(B, PrevB, Accounts) -> #block{ height = Height } = B, {Pub, Signature1, CDiff1, PrevCDiff1, Preimage1, Signature2, CDiff2, PrevCDiff2, Preimage2} = B#block.double_signing_proof, SignaturePreimage1 = ar_block:get_block_signature_preimage(CDiff1, PrevCDiff1, Preimage1, Height), Key = ar_block:get_reward_key(Pub, B#block.height), Addr = ar_wallet:to_address(Key), case ar_wallet:verify(Key, SignaturePreimage1, Signature1) of false -> {error, invalid_double_signing_proof_invalid_signature}; true -> SignaturePreimage2 = ar_block:get_block_signature_preimage(CDiff2, PrevCDiff2, Preimage2, Height), case ar_wallet:verify(Key, SignaturePreimage2, Signature2) of false -> {error, invalid_double_signing_proof_invalid_signature}; true -> ?LOG_INFO([{event, banning_account}, {address, ar_util:encode(Addr)}, {previous_block, ar_util:encode(B#block.previous_block)}, {height, Height}]), {ok, ban_account(Addr, Accounts, PrevB#block.denomination)} end end. ban_account(Addr, Accounts, Denomination) -> case maps:get(Addr, Accounts, not_found) of not_found -> maps:put(Addr, {1, <<>>, Denomination, false}, Accounts); {Balance, LastTX} -> Balance2 = ar_pricing:redenominate(Balance, 1, Denomination), maps:put(Addr, {Balance2 + 1, LastTX, Denomination, false}, Accounts); {Balance, LastTX, AccountDenomination, _MiningPermission} -> Balance2 = ar_pricing:redenominate(Balance, AccountDenomination, Denomination), maps:put(Addr, {Balance2 + 1, LastTX, Denomination, false}, Accounts) end. update_accounts4(B, PrevB, Accounts, Args) -> {MinerReward, EndowmentPool, DebtSupply, KryderPlusRateMultiplierLatch, KryderPlusRateMultiplier} = Args, case B#block.double_signing_proof of undefined -> update_accounts5(B, Accounts, Args); Proof -> Denomination = PrevB#block.denomination, BannedAddr = ar_wallet:hash_pub_key(element(1, Proof)), Sum = ar_rewards:get_total_reward_for_address(BannedAddr, PrevB) - 1, {Dividend, Divisor} = ?DOUBLE_SIGNING_PROVER_REWARD_SHARE, LockedRewards = ar_rewards:get_locked_rewards(PrevB), Sample = lists:sublist(LockedRewards, ?DOUBLE_SIGNING_REWARD_SAMPLE_SIZE), {Min, MinDenomination} = get_minimal_reward(Sample), Min2 = ar_pricing:redenominate(Min, MinDenomination, Denomination), ProverReward = min(Min2 * Dividend div Divisor, Sum), {MinerReward, EndowmentPool, DebtSupply, KryderPlusRateMultiplierLatch, KryderPlusRateMultiplier} = Args, EndowmentPool2 = EndowmentPool + Sum - ProverReward, Accounts2 = ar_rewards:apply_reward(Accounts, B#block.reward_addr, ProverReward, Denomination), Args2 = {MinerReward, EndowmentPool2, DebtSupply, KryderPlusRateMultiplierLatch, KryderPlusRateMultiplier}, update_accounts5(B, Accounts2, Args2) end. get_minimal_reward(RewardHistory) -> get_minimal_reward( %% Make sure to traverse in the order of not decreasing denomination. lists:reverse(RewardHistory), infinity, 1). get_minimal_reward([], Min, Denomination) -> {Min, Denomination}; get_minimal_reward([{_Addr, _HashRate, Reward, RewardDenomination} | RewardHistory], Min, Denomination) -> Min2 = case Min of infinity -> infinity; _ -> ar_pricing:redenominate(Min, Denomination, RewardDenomination) end, case Reward < Min2 of true -> get_minimal_reward(RewardHistory, Reward, RewardDenomination); false -> get_minimal_reward(RewardHistory, Min, Denomination) end. update_accounts5(B, Accounts, Args) -> {MinerReward, EndowmentPool, DebtSupply, KryderPlusRateMultiplierLatch, KryderPlusRateMultiplier} = Args, case validate_account_anchors(Accounts, B#block.txs) of true -> Accounts2 = ar_testnet:top_up_test_wallet(Accounts, B#block.height), {ok, {EndowmentPool, MinerReward, DebtSupply, KryderPlusRateMultiplierLatch, KryderPlusRateMultiplier, Accounts2}}; false -> {error, invalid_account_anchors} end. do_validate(NewB, OldB, Wallets, BlockAnchors, RecentTXMap, PartitionUpperBound) -> validate_block(weave_size, {NewB, OldB, Wallets, BlockAnchors, RecentTXMap, PartitionUpperBound}). validate_block(weave_size, {#block{ txs = TXs } = NewB, OldB, Wallets, BlockAnchors, RecentTXMap, PartitionUpperBound}) -> case ar_block:verify_weave_size(NewB, OldB, TXs) of false -> {invalid, invalid_weave_size}; true -> validate_block(previous_block, {NewB, OldB, Wallets, BlockAnchors, RecentTXMap, PartitionUpperBound}) end; validate_block(previous_block, {NewB, OldB, Wallets, BlockAnchors, RecentTXMap, PartitionUpperBound}) -> case OldB#block.indep_hash == NewB#block.previous_block of false -> {invalid, invalid_previous_block}; true -> validate_block(previous_solution_hash, {NewB, OldB, Wallets, BlockAnchors, RecentTXMap, PartitionUpperBound}) end; validate_block(previous_solution_hash, {NewB, OldB, Wallets, BlockAnchors, RecentTXMap, PartitionUpperBound}) -> true = NewB#block.height >= ar_fork:height_2_6(), case NewB#block.previous_solution_hash == OldB#block.hash of false -> {invalid, invalid_previous_solution_hash}; true -> validate_block(packing_2_5_threshold, {NewB, OldB, Wallets, BlockAnchors, RecentTXMap, PartitionUpperBound}) end; validate_block(packing_2_5_threshold, {NewB, OldB, Wallets, BlockAnchors, RecentTXMap, PartitionUpperBound}) -> ExpectedPackingThreshold = ar_block:get_packing_threshold(OldB, PartitionUpperBound), Valid = case ExpectedPackingThreshold of undefined -> true; _ -> NewB#block.packing_2_5_threshold == ExpectedPackingThreshold end, case Valid of true -> validate_block(strict_data_split_threshold, {NewB, OldB, Wallets, BlockAnchors, RecentTXMap}); false -> {error, invalid_packing_2_5_threshold} end; validate_block(strict_data_split_threshold, {NewB, OldB, Wallets, BlockAnchors, RecentTXMap}) -> Height = NewB#block.height, Fork_2_5 = ar_fork:height_2_5(), Valid = case Height == Fork_2_5 of true -> NewB#block.strict_data_split_threshold == OldB#block.weave_size; false -> case Height > Fork_2_5 of true -> NewB#block.strict_data_split_threshold == OldB#block.strict_data_split_threshold; false -> true end end, case Valid of true -> true = NewB#block.height >= ar_fork:height_2_6(), validate_block(usd_to_ar_rate, {NewB, OldB, Wallets, BlockAnchors, RecentTXMap}); false -> {error, invalid_strict_data_split_threshold} end; validate_block(difficulty, {NewB, OldB, Wallets, BlockAnchors, RecentTXMap}) -> case ar_retarget:validate_difficulty(NewB, OldB) of false -> {invalid, invalid_difficulty}; true -> validate_block(usd_to_ar_rate, {NewB, OldB, Wallets, BlockAnchors, RecentTXMap}) end; validate_block(usd_to_ar_rate, {NewB, OldB, Wallets, BlockAnchors, RecentTXMap}) -> {USDToARRate, ScheduledUSDToARRate} = ar_pricing:recalculate_usd_to_ar_rate(OldB), case NewB#block.usd_to_ar_rate == USDToARRate andalso NewB#block.scheduled_usd_to_ar_rate == ScheduledUSDToARRate of false -> {invalid, invalid_usd_to_ar_rate}; true -> validate_block(denomination, {NewB, OldB, Wallets, BlockAnchors, RecentTXMap}) end; validate_block(denomination, {NewB, OldB, Wallets, BlockAnchors, RecentTXMap}) -> #block{ height = Height, denomination = Denomination, redenomination_height = RedenominationHeight } = NewB, true = Height >= ar_fork:height_2_6(), case ar_pricing:may_be_redenominate(OldB) of {Denomination, RedenominationHeight} -> validate_block(reward_history_hash, {NewB, OldB, Wallets, BlockAnchors, RecentTXMap}); _ -> {invalid, invalid_denomination} end; validate_block(reward_history_hash, {NewB, OldB, Wallets, BlockAnchors, RecentTXMap}) -> #block{ reward = Reward, reward_history_hash = RewardHistoryHash, denomination = Denomination, height = Height } = NewB, #block{ reward_history = RewardHistory, reward_history_hash = PreviousRewardHistoryHash } = OldB, HashRate = ar_difficulty:get_hash_rate_fixed_ratio(NewB), RewardAddr = NewB#block.reward_addr, %% Pre-2.8: slice the reward history to compute the hash %% Post-2.8: use the previous reward history hash and the head of the history to compute %% the new hash. LockedRewards = ar_rewards:trim_locked_rewards(Height, [{RewardAddr, HashRate, Reward, Denomination} | RewardHistory]), case ar_rewards:reward_history_hash(Height, PreviousRewardHistoryHash, LockedRewards) of RewardHistoryHash -> validate_block(block_time_history_hash, {NewB, OldB, Wallets, BlockAnchors, RecentTXMap}); _ -> {invalid, invalid_reward_history_hash} end; validate_block(block_time_history_hash, {NewB, OldB, Wallets, BlockAnchors, RecentTXMap}) -> case NewB#block.height >= ar_fork:height_2_7() of false -> validate_block(next_vdf_difficulty, {NewB, OldB, Wallets, BlockAnchors, RecentTXMap}); true -> #block{ block_time_history_hash = HistoryHash } = NewB, History = ar_block_time_history:update_history(NewB, OldB), case ar_block_time_history:hash(History) of HistoryHash -> validate_block(next_vdf_difficulty, {NewB, OldB, Wallets, BlockAnchors, RecentTXMap}); _ -> {invalid, invalid_block_time_history_hash} end end; validate_block(next_vdf_difficulty, {NewB, OldB, Wallets, BlockAnchors, RecentTXMap}) -> case NewB#block.height >= ar_fork:height_2_7() of false -> validate_block(price_per_gib_minute, {NewB, OldB, Wallets, BlockAnchors, RecentTXMap}); true -> ExpectedNextVDFDifficulty = ar_block:compute_next_vdf_difficulty(OldB), #nonce_limiter_info{ next_vdf_difficulty = NextVDFDifficulty } = NewB#block.nonce_limiter_info, case ExpectedNextVDFDifficulty == NextVDFDifficulty of false -> {invalid, invalid_next_vdf_difficulty}; true -> validate_block(price_per_gib_minute, {NewB, OldB, Wallets, BlockAnchors, RecentTXMap}) end end; validate_block(price_per_gib_minute, {NewB, OldB, Wallets, BlockAnchors, RecentTXMap}) -> #block{ denomination = Denomination } = NewB, #block{ denomination = PrevDenomination } = OldB, {Price, ScheduledPrice} = ar_pricing:recalculate_price_per_gib_minute(OldB), Price2 = ar_pricing:redenominate(Price, PrevDenomination, Denomination), ScheduledPrice2 = ar_pricing:redenominate(ScheduledPrice, PrevDenomination, Denomination), case NewB#block.price_per_gib_minute == Price2 andalso NewB#block.scheduled_price_per_gib_minute == ScheduledPrice2 of false -> {invalid, invalid_price_per_gib_minute}; true -> validate_block(txs, {NewB, OldB, Wallets, BlockAnchors, RecentTXMap}) end; validate_block(txs, {NewB = #block{ timestamp = Timestamp, height = Height, txs = TXs }, OldB, Wallets, BlockAnchors, RecentTXMap}) -> Rate = ar_pricing:usd_to_ar_rate(OldB), PricePerGiBMinute = OldB#block.price_per_gib_minute, KryderPlusRateMultiplier = OldB#block.kryder_plus_rate_multiplier, Denomination = OldB#block.denomination, RedenominationHeight = OldB#block.redenomination_height, Args = {TXs, Rate, PricePerGiBMinute, KryderPlusRateMultiplier, Denomination, Height - 1, RedenominationHeight, Timestamp, Wallets, BlockAnchors, RecentTXMap}, case ar_tx_replay_pool:verify_block_txs(Args) of invalid -> {invalid, invalid_txs}; valid -> true = Height >= ar_fork:height_2_6(), %% The field size limits in 2.6 are naturally asserted in %% ar_serialize:binary_to_block/1. validate_block(tx_root, {NewB, OldB}) end; validate_block(block_field_sizes, {NewB, OldB, _Wallets, _BlockAnchors, _RecentTXMap}) -> case ar_block:block_field_size_limit(NewB) of false -> {invalid, invalid_field_size}; true -> validate_block(tx_root, {NewB, OldB}) end; validate_block(tx_root, {NewB, OldB}) -> case ar_block:verify_tx_root(NewB) of false -> {invalid, invalid_tx_root}; true -> validate_block(block_index_root, {NewB, OldB}) end; validate_block(block_index_root, {NewB, OldB}) -> case ar_block:verify_block_hash_list_merkle(NewB, OldB) of false -> {invalid, invalid_block_index_root}; true -> validate_block(last_retarget, {NewB, OldB}) end; validate_block(last_retarget, {NewB, OldB}) -> case ar_block:verify_last_retarget(NewB, OldB) of false -> {invalid, invalid_last_retarget}; true -> validate_block(cumulative_diff, {NewB, OldB}) end; validate_block(cumulative_diff, {NewB, OldB}) -> case ar_block:verify_cumulative_diff(NewB, OldB) of false -> {invalid, invalid_cumulative_difficulty}; true -> validate_block(merkle_rebase_support_threshold, {NewB, OldB}) end; validate_block(merkle_rebase_support_threshold, {NewB, OldB}) -> #block{ height = Height } = NewB, case Height > ar_fork:height_2_7() of true -> case NewB#block.merkle_rebase_support_threshold == OldB#block.merkle_rebase_support_threshold of false -> {error, invalid_merkle_rebase_support_threshold}; true -> valid end; false -> case Height == ar_fork:height_2_7() of true -> case NewB#block.merkle_rebase_support_threshold == OldB#block.weave_size of true -> valid; false -> {error, invalid_merkle_rebase_support_threshold} end; false -> valid end end. -ifdef(AR_TEST). is_wallet_invalid(#tx{ signature = <<>> }, _Wallets) -> false; is_wallet_invalid(TX, Wallets) -> OwnerAddress = ar_tx:get_owner_address(TX), case maps:get(OwnerAddress, Wallets, not_found) of {Balance, LastTX} when Balance >= 0 -> case Balance of 0 -> byte_size(LastTX) == 0; _ -> false end; {Balance, LastTX, _Denomination, _MiningPermission} when Balance >= 0 -> case Balance of 0 -> byte_size(LastTX) == 0; _ -> false end; _ -> true end. -else. is_wallet_invalid(TX, Wallets) -> OwnerAddress = ar_tx:get_owner_address(TX), case maps:get(OwnerAddress, Wallets, not_found) of {Balance, LastTX} when Balance >= 0 -> case Balance of 0 -> byte_size(LastTX) == 0; _ -> false end; {Balance, LastTX, _Denomination, _MiningPermission} when Balance >= 0 -> case Balance of 0 -> byte_size(LastTX) == 0; _ -> false end; _ -> true end. -endif. %%%=================================================================== %%% Tests. %%%=================================================================== block_validation_test_() -> {timeout, 90, fun test_block_validation/0}. test_block_validation() -> Wallet = {_, Pub} = ar_wallet:new(), [B0] = ar_weave:init([{ar_wallet:to_address(Pub), ?AR(200), <<>>}]), ar_test_node:start(B0), %% Add at least 10 KiB of data to the weave and mine a block on top, %% to make sure SPoRA mining activates. PrevTX = ar_test_node:sign_tx(main, Wallet, #{ reward => ?AR(10), data => crypto:strong_rand_bytes(10 * ?MiB) }), ar_test_node:assert_post_tx_to_peer(main, PrevTX), ar_test_node:mine(), [_ | _] = ar_test_node:wait_until_height(main, 1), ar_test_node:mine(), [{PrevH, _, _} | _ ] = ar_test_node:wait_until_height(main, 2), PrevB = ar_node:get_block_shadow_from_cache(PrevH), BI = ar_node:get_block_index(), PartitionUpperBound = ar_node:get_partition_upper_bound(BI), BlockAnchors = ar_node:get_block_anchors(), RecentTXMap = ar_node:get_recent_txs_map(), TX = ar_test_node:sign_tx(main, Wallet, #{ reward => ?AR(10), data => crypto:strong_rand_bytes(7 * ?MiB), last_tx => PrevH }), ar_test_node:assert_post_tx_to_peer(main, TX), ar_test_node:mine(), [{H, _, _} | _] = ar_test_node:wait_until_height(main, 3), B = ar_node:get_block_shadow_from_cache(H), Wallets = #{ ar_wallet:to_address(Pub) => {?AR(200), <<>>} }, ?assertEqual(valid, validate(B, PrevB, Wallets, BlockAnchors, RecentTXMap, PartitionUpperBound)), ?assertMatch({ok, _}, update_accounts(B, PrevB, Wallets)), ?assertEqual({invalid, invalid_weave_size}, validate(B#block{ weave_size = PrevB#block.weave_size + 1 }, PrevB, Wallets, BlockAnchors, RecentTXMap, PartitionUpperBound)), ?assertEqual({invalid, invalid_previous_block}, validate(B#block{ previous_block = B#block.indep_hash }, PrevB, Wallets, BlockAnchors, RecentTXMap, PartitionUpperBound)), % AVDE-2026-4: invalid block ?assertMatch( {invalid, _}, validate( B, PrevB#block{ strict_data_split_threshold = PrevB#block.strict_data_split_threshold + 1 }, Wallets, BlockAnchors, RecentTXMap, PartitionUpperBound ) ), InvLastRetargetB = B#block{ last_retarget = B#block.timestamp }, InvDataRootB = B#block{ tx_root = crypto:strong_rand_bytes(32) }, InvBlockIndexRootB = B#block{ hash_list_merkle = crypto:strong_rand_bytes(32) }, InvCDiffB = B#block{ cumulative_diff = PrevB#block.cumulative_diff * 1000 }, ?assertEqual({invalid, invalid_difficulty}, validate_block(difficulty, { B#block{ diff = PrevB#block.diff - 1 }, PrevB, Wallets, BlockAnchors, RecentTXMap})), ?assertEqual({invalid, invalid_usd_to_ar_rate}, validate_block(usd_to_ar_rate, { B#block{ usd_to_ar_rate = {0, 0} }, PrevB, Wallets, BlockAnchors, RecentTXMap})), ?assertEqual({invalid, invalid_usd_to_ar_rate}, validate_block(usd_to_ar_rate, { B#block{ scheduled_usd_to_ar_rate = {0, 0} }, PrevB, Wallets, BlockAnchors, RecentTXMap})), ?assertEqual({invalid, invalid_txs}, validate_block(txs, {B#block{ txs = [#tx{ signature = <<1>> }] }, PrevB, Wallets, BlockAnchors, RecentTXMap})), ?assertEqual({invalid, invalid_txs}, validate(B#block{ txs = [TX#tx{ reward = ?AR(201) }] }, PrevB, Wallets, BlockAnchors, RecentTXMap, PartitionUpperBound)), ?assertEqual({error, invalid_account_anchors}, update_accounts(B#block{ txs = [TX#tx{ reward = ?AR(201) }] }, PrevB, Wallets)), ?assertEqual({invalid, invalid_tx_root}, validate_block(tx_root, { InvDataRootB#block{ indep_hash = ar_block:indep_hash(InvDataRootB) }, PrevB})), ?assertEqual({invalid, invalid_difficulty}, validate_block(difficulty, { InvLastRetargetB#block{ indep_hash = ar_block:indep_hash(InvLastRetargetB) }, PrevB, Wallets, BlockAnchors, RecentTXMap})), ?assertEqual({invalid, invalid_block_index_root}, validate_block(block_index_root, { InvBlockIndexRootB#block{ indep_hash = ar_block:indep_hash(InvBlockIndexRootB) }, PrevB})), ?assertEqual({invalid, invalid_last_retarget}, validate_block(last_retarget, {B#block{ last_retarget = 0 }, PrevB})), ?assertEqual( {invalid, invalid_cumulative_difficulty}, validate_block(cumulative_diff, { InvCDiffB#block{ indep_hash = ar_block:indep_hash(InvCDiffB) }, PrevB})), BI2 = ar_node:get_block_index(), PartitionUpperBound2 = ar_node:get_partition_upper_bound(BI2), BlockAnchors2 = ar_node:get_block_anchors(), RecentTXMap2 = ar_node:get_recent_txs_map(), ar_test_node:mine(), [{H2, _, _} | _ ] = ar_test_node:wait_until_height(main, 4), B2 = ar_node:get_block_shadow_from_cache(H2), ?assertEqual(valid, validate(B2, B, Wallets, BlockAnchors2, RecentTXMap2, PartitionUpperBound2)). update_accounts_rejects_same_signature_in_double_signing_proof_test_() -> {timeout, 30, fun test_update_accounts_rejects_same_signature_in_double_signing_proof/0}. test_update_accounts_rejects_same_signature_in_double_signing_proof() -> Accounts = #{}, Key = ar_wallet:new(), Pub = element(2, element(2, Key)), Random = crypto:strong_rand_bytes(64), Preimage = << (ar_serialize:encode_int(1, 16))/binary, (ar_serialize:encode_int(1, 16))/binary, Random/binary >>, Sig1 = ar_wallet:sign(element(1, Key), Preimage), DoubleSigningProof = {Pub, Sig1, 1, 1, Random, Sig1, 1, 1, Random}, BannedAddr = ar_wallet:to_address(Key), ProverKey = ar_wallet:new(), RewardAddr = ar_wallet:to_address(ProverKey), B = #block{ timestamp = os:system_time(second), reward_addr = RewardAddr, weave_size = 1, double_signing_proof = DoubleSigningProof }, Reward = 12, PrevB = #block{ reward_history = [{RewardAddr, 0, Reward, 1}, {BannedAddr, 0, 10, 1}], usd_to_ar_rate = {1, 5}, reward_pool = 0 }, ?assertEqual({error, invalid_double_signing_proof_same_signature}, update_accounts(B, PrevB, Accounts)). update_accounts_receives_released_reward_and_prover_reward_test_() -> {timeout, 30, fun test_update_accounts_receives_released_reward_and_prover_reward/0}. % this function will prepend reward_history up to ?REWARD_HISTORY_BLOCKS % elements will keep pattern of changed values augment_reward_history(PrevB = #block{ reward_history = RewardHistory }) -> [First, Second | _] = RewardHistory, NewRewardHistory = case length(RewardHistory) >= ?REWARD_HISTORY_BLOCKS of true -> RewardHistory; _ -> PairsToAdd = (?REWARD_HISTORY_BLOCKS - length(RewardHistory)) div 2, AdditionalElements = lists:flatten(lists:duplicate(PairsToAdd, [First, Second])), case (?REWARD_HISTORY_BLOCKS - length(RewardHistory)) rem 2 of 1 -> [Second | AdditionalElements]; 0 -> AdditionalElements end ++ RewardHistory end, PrevB#block{ reward_history = NewRewardHistory }. test_update_accounts_receives_released_reward_and_prover_reward() -> ?assert(?DOUBLE_SIGNING_REWARD_SAMPLE_SIZE == 2), ?assert(?LOCKED_REWARDS_BLOCKS >= 3), ?assert(?DOUBLE_SIGNING_PROVER_REWARD_SHARE == {1, 2}), Accounts = #{}, Key = ar_wallet:new(), Pub = element(2, element(2, Key)), Random = crypto:strong_rand_bytes(64), Preimage = << 0:256, (ar_serialize:encode_int(1, 16))/binary, (ar_serialize:encode_int(1, 16))/binary, Random/binary >>, Sig1 = ar_wallet:sign(element(1, Key), Preimage), Sig2 = ar_wallet:sign(element(1, Key), Preimage), DoubleSigningProof = {Pub, Sig1, 1, 1, Random, Sig2, 1, 1, Random}, BannedAddr = ar_wallet:to_address(Key), ProverKey = ar_wallet:new(), RewardAddr = ar_wallet:to_address(ProverKey), B = #block{ timestamp = os:system_time(second), reward_addr = RewardAddr, weave_size = 1, double_signing_proof = DoubleSigningProof }, Reward = 13, ProverReward = 5, % 1/2 of min(10, 12) PrevB = #block{ reward_history = [{stub, stub, 12, 1}, {BannedAddr, 0, 10, 1}, {RewardAddr, 0, Reward, 1}], usd_to_ar_rate = {1, 5}, reward_pool = 0 }, PrevB1 = augment_reward_history(PrevB), {ok, {_EndowmentPool2, _MinerReward, _DebtSupply2, _KryderPlusRateMultiplierLatch2, _KryderPlusRateMultiplier2, Accounts2}} = update_accounts(B, PrevB1, Accounts), ?assertEqual({ProverReward + Reward, <<>>}, maps:get(RewardAddr, Accounts2)), ?assertEqual({1, <<>>, 1, false}, maps:get(BannedAddr, Accounts2)). update_accounts_does_not_let_banned_account_take_reward_test_() -> {timeout, 30, fun test_update_accounts_does_not_let_banned_account_take_reward/0}. test_update_accounts_does_not_let_banned_account_take_reward() -> ?assert(?DOUBLE_SIGNING_REWARD_SAMPLE_SIZE == 2), ?assert(?LOCKED_REWARDS_BLOCKS >= 3), ?assert(?DOUBLE_SIGNING_PROVER_REWARD_SHARE == {1, 2}), Accounts = #{}, Key = ar_wallet:new(), Pub = element(2, element(2, Key)), Random = crypto:strong_rand_bytes(64), Preimage = << 0:256, (ar_serialize:encode_int(1, 16))/binary, (ar_serialize:encode_int(1, 16))/binary, Random/binary >>, Sig1 = ar_wallet:sign(element(1, Key), Preimage), Sig2 = ar_wallet:sign(element(1, Key), Preimage), DoubleSigningProof = {Pub, Sig1, 1, 1, Random, Sig2, 1, 1, Random}, BannedAddr = ar_wallet:to_address(Key), ProverKey = ar_wallet:new(), RewardAddr = ar_wallet:to_address(ProverKey), B = #block{ timestamp = os:system_time(second), reward_addr = RewardAddr, weave_size = 1, double_signing_proof = DoubleSigningProof }, Reward = 12, ProverReward = 3, % 1/2 of min(7, 8) PrevB = #block{ reward_history = [{stub, stub, 7, 1}, {stub, stub, 8, 1}, {BannedAddr, 0, Reward, 1}, {BannedAddr, 0, 10, 1}], usd_to_ar_rate = {1, 5}, reward_pool = 0 }, PrevB1 = augment_reward_history(PrevB), {ok, {_EndowmentPool2, _MinerReward, _DebtSupply2, _KryderPlusRateMultiplierLatch2, _KryderPlusRateMultiplier2, Accounts2}} = update_accounts(B, PrevB1, Accounts), ?assertEqual({ProverReward, <<>>}, maps:get(RewardAddr, Accounts2)), ?assertEqual({1, <<>>, 1, false}, maps:get(BannedAddr, Accounts2)). ================================================ FILE: apps/arweave/src/ar_node_worker.erl ================================================ %% This Source Code Form is subject to the terms of the GNU General %% Public License, v. 2.0. If a copy of the GPLv2 was not distributed %% with this file, You can obtain one at %% https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html %% %%% @doc The server responsible for processing blocks and transactions and %%% maintaining the node state. Blocks are prioritized over transactions. -module(ar_node_worker). -export([start_link/0, calculate_delay/1, is_mempool_or_block_cache_tx/1, tx_id_prefix/1, found_solution/4, pause/0, start_mining/0, mine_one_block/0, mine_until_height/1]). -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2]). -export([set_reward_addr/1]). -include("ar.hrl"). -include("ar_consensus.hrl"). -include("ar_pricing.hrl"). -include("ar_data_sync.hrl"). -include("ar_vdf.hrl"). -include("ar_mining.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -ifdef(LOCALNET). -define(MINING_SERVER, ar_localnet_mining_server). -else. -define(MINING_SERVER, ar_mining_server). -endif. -include_lib("eunit/include/eunit.hrl"). -ifdef(AR_TEST). -define(PROCESS_TASK_QUEUE_FREQUENCY_MS, 10). -else. -ifdef(LOCALNET). -define(PROCESS_TASK_QUEUE_FREQUENCY_MS, 10). -else. -define(PROCESS_TASK_QUEUE_FREQUENCY_MS, 200). -endif. -endif. -define(FILTER_MEMPOOL_CHUNK_SIZE, 100). -ifdef(AR_TEST). -define(BLOCK_INDEX_HEAD_LEN, (?STORE_BLOCKS_BEHIND_CURRENT * 2)). -else. -define(BLOCK_INDEX_HEAD_LEN, 10000). -endif. %% How deep into the past do we search for the state data starting from the tip of %% the extracted block index. Normally, the very recent block and transaction headers %% would be found, but in case something goes wrong we may skip up to this many missing %% records and start from a slightly older state. Also very helpful for testing, e.g., when %% we want to restart a testnet from a certain point in the past. -ifndef(START_FROM_STATE_SEARCH_DEPTH). -define(START_FROM_STATE_SEARCH_DEPTH, 100). -endif. %% How frequently (in seconds) to recompute the mining difficulty at the retarget blocks. -ifdef(AR_TEST). -define(COMPUTE_MINING_DIFFICULTY_INTERVAL, 1). -else. -define(COMPUTE_MINING_DIFFICULTY_INTERVAL, 10). -endif. -ifndef(LOCALNET_BALANCE). -define(LOCALNET_BALANCE, 1000000000000). -endif. %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Start the server. start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). %% @doc Return the prefix used to inform block receivers about the block's transactions %% via POST /block_announcement. tx_id_prefix(TXID) -> binary:part(TXID, 0, 8). %% @doc Return true if the given transaction identifier is found in the mempool or %% block cache (the last ar_block:get_consensus_window_size() blocks). is_mempool_or_block_cache_tx(TXID) -> ets:match_object(tx_prefixes, {tx_id_prefix(TXID), TXID}) /= []. set_reward_addr(Addr) -> gen_server:call(?MODULE, {set_reward_addr, Addr}). found_solution(Source, Solution, PoACache, PoA2Cache) -> gen_server:cast(?MODULE, {found_solution, Source, Solution, PoACache, PoA2Cache}). %% @doc Start the mining server. It will be running indefinitely until paused. start_mining() -> gen_server:cast(?MODULE, start_mining). %% @doc Mine until a block is found. The default server may produce several block %% candidates (happens often in tests). The localnet mining server only produces %% one candidate and one block. mine_one_block() -> gen_server:cast(?MODULE, mine_one_block). %% @doc Mine blocks until the given height is reached. mine_until_height(Height) -> gen_server:cast(?MODULE, {mine_until_height, Height}). %% @doc Pause the mining server. pause() -> gen_server:cast(?MODULE, pause). %%%=================================================================== %%% Generic server callbacks. %%%=================================================================== init([]) -> ?LOG_INFO([{start, ?MODULE}, {pid,self()}]), %% Trap exit to avoid corrupting any open files on quit. process_flag(trap_exit, true), [ok, ok, ok, ok] = ar_events:subscribe([tx, block, nonce_limiter, node_state]), %% Read persisted mempool. ar_mempool:load_from_disk(), %% Join the network. {ok, Config} = arweave_config:get_env(), validate_trusted_peers(Config), StartFromLocalState = Config#config.start_from_latest_state orelse Config#config.start_from_block /= not_set, case {StartFromLocalState, Config#config.init, Config#config.auto_join} of {false, false, true} -> ar_join:start(ar_peers:get_trusted_peers()); {true, _, _} -> case ar_storage:read_block_index(Config#config.start_from_state) of not_found -> block_index_not_found([]); BI -> case get_block_index_at_state(BI, Config) of not_found -> block_index_not_found(BI); BI2 -> Height = length(BI2) - 1, case start_from_state(BI2, Height) of ok -> ok; Error -> ar:console("~n~n\tFailed to read the local state: ~p.~n", [Error]), ?LOG_INFO([{event, failed_to_read_local_state}, {reason, io_lib:format("~p", [Error])}]), timer:sleep(1000), init:stop(1) end end end; {false, true, _} -> Config2 = Config#config{ init = false }, arweave_config:set_env(Config2), InitialBalance = ?AR(?LOCALNET_BALANCE), [B0] = ar_weave:init([{Config#config.mining_addr, InitialBalance, <<>>}], ar_retarget:switch_to_linear_diff(Config#config.diff)), RootHash0 = B0#block.wallet_list, RootHash0 = ar_storage:write_wallet_list(0, B0#block.account_tree), start_from_state([B0]); _ -> ok end, %% Add pending transactions from the persisted mempool to the propagation queue. gb_sets:filter( fun ({_Utility, _TXID, ready_for_mining}) -> false; ({_Utility, TXID, waiting}) -> start_tx_mining_timer(ar_mempool:get_tx(TXID)), true end, ar_mempool:get_priority_set() ), %% May be start mining. case Config#config.mine of true -> gen_server:cast(?MODULE, start_mining); _ -> ok end, gen_server:cast(?MODULE, process_task_queue), ets:insert(node_state, [ {is_joined, false}, {hash_list_2_0_for_1_0_blocks, read_hash_list_2_0_for_1_0_blocks()} ]), gen_server:cast(?MODULE, compute_mining_difficulty), {ok, #{ miner_state => undefined, io_threads => [], automine => false, mine_until_height => undefined, tags => [], blocks_missing_txs => sets:new(), missing_txs_lookup_processes => #{}, task_queue => gb_sets:new(), solution_cache => #{}, solution_cache_records => queue:new() }}. get_block_index_at_state(BI, Config) -> case Config#config.start_from_latest_state of true -> BI; false -> H = Config#config.start_from_block, get_block_index_at_state2(BI, H) end. get_block_index_at_state2([], _H) -> not_found; get_block_index_at_state2([{H, _, _} | _] = BI, H) -> BI; get_block_index_at_state2([_ | BI], H) -> get_block_index_at_state2(BI, H). block_index_not_found([]) -> ar:console("~n~n\tThe local state is empty, consider joining the network " "via the trusted peers.~n"), ?LOG_INFO([{event, local_state_empty}]), timer:sleep(1000), init:stop(1); block_index_not_found(BI) -> {Last, _, _} = hd(BI), {First, _, _} = lists:last(BI), ar:console("~n~n\tThe local state is missing the target block. Available height range: ~p to ~p.~n", [ar_util:encode(First), ar_util:encode(Last)]), ?LOG_INFO([{event, local_state_missing_target}, {first, ar_util:encode(First)}, {last, ar_util:encode(Last)}]), timer:sleep(1000), init:stop(1). validate_trusted_peers(#config{ peers = [] }) -> ok; validate_trusted_peers(Config) -> Peers = Config#config.peers, ValidPeers = filter_valid_peers(Peers), case ValidPeers of [] -> ar:console("The specified trusted peers are not valid.~n", []), ?LOG_INFO([{event, no_valid_trusted_peers}]), timer:sleep(2000), init:stop(1); _ -> arweave_config:set_env(Config#config{ peers = ValidPeers }), case lists:member(time_syncing, Config#config.disable) of false -> validate_clock_sync(ValidPeers); true -> ok end end. %% @doc Verify peers are on the same network as us. filter_valid_peers(Peers) -> lists:filter( fun(Peer) -> case ar_http_iface_client:get_info(Peer, network) of info_unavailable -> io:format("~n\tPeer ~s is not available.~n~n", [ar_util:format_peer(Peer)]), false; <> -> true; _ -> io:format( "~n\tPeer ~s does not belong to the network ~s.~n~n", [ar_util:format_peer(Peer), ?NETWORK_NAME] ), false end end, Peers ). %% @doc Validate our clocks are in sync with the trusted peers' clocks. validate_clock_sync(Peers) -> ValidatePeerClock = fun(Peer) -> case ar_http_iface_client:get_time(Peer, 5 * 1000) of {ok, {RemoteTMin, RemoteTMax}} -> LocalT = os:system_time(second), Tolerance = ?JOIN_CLOCK_TOLERANCE, case LocalT of T when T < RemoteTMin - Tolerance -> log_peer_clock_diff(Peer, RemoteTMin - Tolerance - T), false; T when T < RemoteTMin - Tolerance div 2 -> log_peer_clock_diff(Peer, RemoteTMin - T), true; T when T > RemoteTMax + Tolerance -> log_peer_clock_diff(Peer, T - RemoteTMax - Tolerance), false; T when T > RemoteTMax + Tolerance div 2 -> log_peer_clock_diff(Peer, T - RemoteTMax), true; _ -> true end; {error, Err} -> ar:console( "Failed to get time from peer ~s: ~p.", [ar_util:format_peer(Peer), Err] ), false end end, Responses = ar_util:pmap(ValidatePeerClock, [P || P <- Peers, not is_pid(P)]), case checker(Responses) of % If more valid nodes are present than invalid nodes, it should be % good {X, #{true := True, false := False}} when X>0, True>False -> ok; % If all nodes are valid, then its good {_, #{ true := _ }} -> ok; % Else there is a problem somewhere. Too many peers % with clock issues will only cause problems. _ -> ar:console( "~n\tInvalid peers. A valid peer must be part of the" " network ~s and its clock must deviate from ours by no" " more than ~B seconds.~n", [?NETWORK_NAME, ?JOIN_CLOCK_TOLERANCE] ), ?LOG_INFO([{event, invalid_peer}]), timer:sleep(1000), init:stop(1) end. log_peer_clock_diff(Peer, Delta) -> Warning = "Your local clock deviates from peer ~s by ~B seconds or more.", WarningArgs = [ar_util:format_peer(Peer), Delta], io:format(Warning, WarningArgs), ?LOG_WARNING(Warning, WarningArgs). start_tx_mining_timer(TX) -> %% Calling with ar_node_worker: allows to mock calculate_delay/1 in tests. erlang:send_after(ar_node_worker:calculate_delay(tx_propagated_size(TX)), ?MODULE, {tx_ready_for_mining, TX}). tx_propagated_size(#tx{ format = 2 }) -> ?TX_SIZE_BASE; tx_propagated_size(#tx{ format = 1, data = Data }) -> ?TX_SIZE_BASE + byte_size(Data). %% @doc Return a delay in milliseconds to wait before including a transaction %% into a block. The delay is computed as base delay + a function of data size with %% a conservative estimation of the network speed. calculate_delay(Bytes) -> BaseDelay = (?BASE_TX_PROPAGATION_DELAY) * 1000, NetworkDelay = Bytes * 8 div (?TX_PROPAGATION_BITS_PER_SECOND) * 1000, BaseDelay + NetworkDelay. handle_call({set_reward_addr, Addr}, _From, State) -> {reply, ok, State#{ reward_addr => Addr }}. handle_cast({found_solution, miner, _Solution, _PoACache, _PoA2Cache}, #{ automine := false, miner_state := undefined } = State) -> {noreply, State}; handle_cast({found_solution, Source, Solution, PoACache, PoA2Cache}, State) -> [{_, PrevH}] = ets:lookup(node_state, current), PrevB = ar_block_cache:get(block_cache, PrevH), handle_found_solution({Source, Solution, PoACache, PoA2Cache}, PrevB, State, false); handle_cast(process_task_queue, #{ task_queue := TaskQueue } = State) -> RunTask = case gb_sets:is_empty(TaskQueue) of true -> false; false -> case ets:lookup(node_state, is_joined) of [{_, true}] -> true; _ -> false end end, case RunTask of true -> record_metrics(), {{_Priority, Task}, TaskQueue2} = gb_sets:take_smallest(TaskQueue), gen_server:cast(self(), process_task_queue), handle_task(Task, State#{ task_queue => TaskQueue2 }); false -> ar_util:cast_after(?PROCESS_TASK_QUEUE_FREQUENCY_MS, ?MODULE, process_task_queue), {noreply, State} end; handle_cast(Message, #{ task_queue := TaskQueue } = State) -> Task = {priority(Message), Message}, case gb_sets:is_element(Task, TaskQueue) of true -> {noreply, State}; false -> {noreply, State#{ task_queue => gb_sets:insert(Task, TaskQueue) }} end. handle_info({join_from_state, Height, BI, Blocks, CustomDir}, State) -> {ok, _} = ar_wallets:start_link([{blocks, Blocks}, {from_state, ?START_FROM_STATE_SEARCH_DEPTH}, {custom_dir, CustomDir}]), ets:insert(node_state, {join_state, {Height, Blocks, BI, CustomDir}}), {noreply, State}; handle_info({join, Height, BI, Blocks}, State) -> Peers = ar_peers:get_trusted_peers(), {ok, _} = ar_wallets:start_link([{blocks, Blocks}, {from_peers, Peers}]), ets:insert(node_state, {join_state, {Height, Blocks, BI, not_set}}), {noreply, State}; handle_info({event, node_state, {account_tree_initialized, Height}}, State) -> [{_, {Height2, Blocks, BI, CustomDir}}] = ets:lookup(node_state, join_state), ?LOG_INFO([{event, account_tree_initialized}, {height, Height}]), ar:console("The account tree has been initialized at the block height ~B.~n", [Height]), case CustomDir of not_set -> ok; _ -> ar_storage:close_start_from_state_databases() end, %% Take the latest block the account tree is stored for. Blocks2 = lists:nthtail(Height2 - Height, Blocks), BI2 = lists:nthtail(Height2 - Height, BI), ar_block_index:init(BI2), Blocks3 = lists:sublist(Blocks2, ?SEARCH_SPACE_UPPER_BOUND_DEPTH), Blocks4 = may_be_initialize_nonce_limiter(Blocks3, BI2), Blocks5 = Blocks4 ++ lists:nthtail(length(Blocks3), Blocks2), ets:insert(node_state, {join_state, {Height, Blocks5, BI2, CustomDir}}), ar_nonce_limiter:account_tree_initialized(Blocks5), {noreply, State}; handle_info({event, node_state, _Event}, State) -> {noreply, State}; handle_info({event, nonce_limiter, initialized}, State) -> [{_, {Height, Blocks, BI, _CustomDir}}] = ets:lookup(node_state, join_state), ar_storage:store_block_index(BI), RecentBI = lists:sublist(BI, ?BLOCK_INDEX_HEAD_LEN), Current = element(1, hd(RecentBI)), RecentBlocks = lists:sublist(Blocks, ar_block:get_consensus_window_size()), RecentBlocks2 = set_poa_caches(RecentBlocks), ar_block_cache:initialize_from_list(block_cache, RecentBlocks2), B = hd(RecentBlocks2), RewardHistory = [{H, {Addr, HashRate, Reward, Denomination}} || {{Addr, HashRate, Reward, Denomination}, {H, _, _}} <- lists:zip(B#block.reward_history, lists:sublist(BI, length(B#block.reward_history)))], ar_storage:store_reward_history_part2(RewardHistory), BlockTimeHistory = [{H, {BlockInterval, VDFInterval, ChunkCount}} || {{BlockInterval, VDFInterval, ChunkCount}, {H, _, _}} <- lists:zip(B#block.block_time_history, lists:sublist(BI, length(B#block.block_time_history)))], ar_storage:store_block_time_history_part2(BlockTimeHistory), Height = B#block.height, ar_disk_cache:write_block(B), ar_data_sync:join(RecentBI), ar_header_sync:join(Height, RecentBI, Blocks), ar_tx_blacklist:start_taking_down(), BlockTXPairs = [block_txs_pair(Block) || Block <- Blocks], {BlockAnchors, RecentTXMap} = get_block_anchors_and_recent_txs_map(BlockTXPairs), {Rate, ScheduledRate} = {B#block.usd_to_ar_rate, B#block.scheduled_usd_to_ar_rate}, RecentBI2 = lists:sublist(BI, ?BLOCK_INDEX_HEAD_LEN), ets:insert(node_state, [ {recent_block_index, RecentBI2}, {recent_max_block_size, get_max_block_size(RecentBI2)}, {is_joined, true}, {current, Current}, {timestamp, B#block.timestamp}, {nonce_limiter_info, B#block.nonce_limiter_info}, {wallet_list, B#block.wallet_list}, {height, Height}, {hash, B#block.hash}, {reward_pool, B#block.reward_pool}, {diff_pair, ar_difficulty:diff_pair(B)}, {cumulative_diff, B#block.cumulative_diff}, {last_retarget, B#block.last_retarget}, {weave_size, B#block.weave_size}, {block_txs_pairs, BlockTXPairs}, {block_anchors, BlockAnchors}, {recent_txs_map, RecentTXMap}, {usd_to_ar_rate, Rate}, {scheduled_usd_to_ar_rate, ScheduledRate}, {price_per_gib_minute, B#block.price_per_gib_minute}, {kryder_plus_rate_multiplier, B#block.kryder_plus_rate_multiplier}, {denomination, B#block.denomination}, {redenomination_height, B#block.redenomination_height}, {scheduled_price_per_gib_minute, B#block.scheduled_price_per_gib_minute}, {merkle_rebase_support_threshold, get_merkle_rebase_threshold(B)} ]), SearchSpaceUpperBound = ar_node:get_partition_upper_bound(RecentBI), ar_events:send(node_state, {search_space_upper_bound, SearchSpaceUpperBound}), ar_events:send(node_state, {initialized, B}), ar_events:send(node_state, {checkpoint_block, ar_block_cache:get_checkpoint_block(RecentBI)}), ar:console("Joined the Arweave network successfully at the block ~s, height ~B.~n", [ar_util:encode(Current), Height]), ?LOG_INFO([{event, joined_the_network}, {block, ar_util:encode(Current)}, {height, Height}]), ets:delete(node_state, join_state), {noreply, maybe_reset_miner(State)}; handle_info({event, nonce_limiter, {invalid, H, Code}}, State) -> ?LOG_WARNING([{event, received_block_with_invalid_nonce_limiter_chain}, {block, ar_util:encode(H)}, {code, Code}]), ar_block_cache:remove(block_cache, H), ar_ignore_registry:add(H), gen_server:cast(?MODULE, apply_block), {noreply, maps:remove({nonce_limiter_validation_scheduled, H}, State)}; handle_info({event, nonce_limiter, {valid, H}}, State) -> ?LOG_INFO([{event, vdf_validation_successful}, {block, ar_util:encode(H)}]), ar_block_cache:mark_nonce_limiter_validated(block_cache, H), gen_server:cast(?MODULE, apply_block), {noreply, maps:remove({nonce_limiter_validation_scheduled, H}, State)}; handle_info({event, nonce_limiter, {validation_error, H}}, State) -> ?LOG_WARNING([{event, vdf_validation_error}, {block, ar_util:encode(H)}]), ar_block_cache:remove(block_cache, H), gen_server:cast(?MODULE, apply_block), {noreply, maps:remove({nonce_limiter_validation_scheduled, H}, State)}; handle_info({event, nonce_limiter, {refuse_validation, H}}, State) -> ar_util:cast_after(500, ?MODULE, apply_block), {noreply, maps:remove({nonce_limiter_validation_scheduled, H}, State)}; handle_info({event, nonce_limiter, _}, State) -> {noreply, State}; handle_info({tx_ready_for_mining, TX}, State) -> ar_mempool:add_tx(TX, ready_for_mining), ar_events:send(tx, {ready_for_mining, TX}), {noreply, State}; handle_info({event, block, {new, Block, _Source}}, State) when length(Block#block.txs) > ?BLOCK_TX_COUNT_LIMIT -> ?LOG_WARNING([{event, received_block_with_too_many_txs}, {block, ar_util:encode(Block#block.indep_hash)}, {txs, length(Block#block.txs)}]), {noreply, State}; handle_info({event, block, {new, B, _Source}}, State) -> H = B#block.indep_hash, %% Record the block in the block cache. Schedule an application of the %% earliest not validated block from the longest chain, if any. case ar_block_cache:get(block_cache, H) of not_found -> case ar_block_cache:get(block_cache, B#block.previous_block) of not_found -> %% The cache should have been just pruned and this block is old. ?LOG_WARNING([{event, block_cache_missing_block}, {previous_block, ar_util:encode(B#block.previous_block)}, {previous_height, B#block.height - 1}, {block, ar_util:encode(H)}]), ar_ignore_registry:remove(H), {noreply, State}; _PrevB -> State2 = may_be_report_double_signing(B, State), ar_block_cache:add(block_cache, B), gen_server:cast(?MODULE, apply_block), {noreply, State2} end; _ -> %% The block's already received from a different peer or %% fetched by ar_poller. {noreply, State} end; handle_info({event, block, {mined_block_received, H, ReceiveTimestamp}}, State) -> ar_block_cache:update_timestamp(block_cache, H, ReceiveTimestamp), {noreply, State}; handle_info({event, block, _}, State) -> {noreply, State}; %% Add the new waiting transaction to the server state. handle_info({event, tx, {new, TX, _Source}}, State) -> TXID = TX#tx.id, case ar_mempool:has_tx(TXID) of false -> InitialStatus = case maps:get(automine, State) of false -> ready_for_mining; true -> waiting end, ar_mempool:add_tx(TX, InitialStatus), case ar_mempool:has_tx(TXID) of true -> case maps:get(automine, State) of true -> %% Do not include transactions into blocks until %% they had time to propagate around the network. start_tx_mining_timer(TX); false -> ok end; false -> %% The transaction has been dropped because more valuable transactions %% exceed the mempool limit. ok end, {noreply, State}; true -> {noreply, State} end; handle_info({event, tx, {emitting_scheduled, Utility, TXID}}, State) -> ar_mempool:del_from_propagation_queue(Utility, TXID), {noreply, State}; %% Add the transaction to the mining pool, to be included in the mined block. handle_info({event, tx, {ready_for_mining, TX}}, State) -> ar_mempool:add_tx(TX, ready_for_mining), {noreply, State}; handle_info({event, tx, _}, State) -> {noreply, State}; handle_info({'DOWN', _Ref, process, PID, _Info}, State) -> #{ blocks_missing_txs := Set, missing_txs_lookup_processes := Map } = State, BH = maps:get(PID, Map), {noreply, State#{ missing_txs_lookup_processes => maps:remove(PID, Map), blocks_missing_txs => sets:del_element(BH, Set) }}; handle_info({'EXIT', _PID, normal}, State) -> {noreply, State}; handle_info(shutdown, State) -> {stop, shutdown, State}; handle_info(Info, State) -> ?LOG_ERROR([{event, unhandled_info}, {module, ?MODULE}, {message, Info}]), {noreply, State}. terminate(Reason, _State) -> case ets:lookup(node_state, is_joined) of [{_, true}] -> [{mempool_size, MempoolSize}] = ets:lookup(node_state, mempool_size), Mempool = gb_sets:fold( fun({_Utility, TXID, Status}, Acc) -> maps:put(TXID, {ar_mempool:get_tx(TXID), Status}, Acc) end, #{}, ar_mempool:get_priority_set() ), dump_mempool(Mempool, MempoolSize); _ -> ok end, ?LOG_INFO([{event, ar_node_worker_terminated}, {reason, Reason}]). %%%=================================================================== %%% Private functions. %%%=================================================================== record_metrics() -> [{mempool_size, MempoolSize}] = ets:lookup(node_state, mempool_size), prometheus_gauge:set(arweave_block_height, ar_node:get_height()), record_mempool_size_metrics(MempoolSize), prometheus_gauge:set(weave_size, ar_node:get_weave_size()). record_mempool_size_metrics({HeaderSize, DataSize}) -> prometheus_gauge:set(mempool_header_size_bytes, HeaderSize), prometheus_gauge:set(mempool_data_size_bytes, DataSize). may_be_initialize_nonce_limiter([#block{ height = Height } = B | Blocks], BI) -> case Height + 1 == ar_fork:height_2_6() of true -> {Seed, PartitionUpperBound, _TXRoot} = ar_node:get_nth_or_last( ?SEARCH_SPACE_UPPER_BOUND_DEPTH, BI), Output = crypto:hash(sha256, Seed), NextSeed = B#block.indep_hash, NextPartitionUpperBound = B#block.weave_size, Info = #nonce_limiter_info{ output = Output, seed = Seed, next_seed = NextSeed, partition_upper_bound = PartitionUpperBound, next_partition_upper_bound = NextPartitionUpperBound }, [B#block{ nonce_limiter_info = Info } | Blocks]; false -> [B | may_be_initialize_nonce_limiter(Blocks, tl(BI))] end; may_be_initialize_nonce_limiter([], _BI) -> []. handle_task(apply_block, State) -> apply_block(State); handle_task({cache_missing_txs, BH, TXs}, State) -> case ar_block_cache:get_block_and_status(block_cache, BH) of not_found -> %% The block should have been pruned while we were fetching the missing txs. {noreply, State}; {B, {{not_validated, _}, _}} -> case ar_block_cache:get(block_cache, B#block.previous_block) of not_found -> ok; _ -> ar_block_cache:add(block_cache, B#block{ txs = TXs }) end, gen_server:cast(?MODULE, apply_block), {noreply, State}; {_B, _AnotherStatus} -> %% The transactions should have been received and the block validated while %% we were looking for previously missing transactions. {noreply, State} end; handle_task(start_mining, State) -> {noreply, start_mining(State#{ automine => true })}; handle_task(mine_one_block, State) -> case maps:get(miner_state, State) of undefined -> {noreply, start_mining(State)}; _ -> {noreply, State} end; handle_task({mine_until_height, Height}, State) -> {noreply, start_mining(State#{ mine_until_height => {height, Height}, automine => true })}; handle_task(pause, State) -> case maps:get(miner_state, State) of undefined -> ok; _ -> ?MINING_SERVER:pause() end, {noreply, State#{ miner_state => undefined, automine => false, mine_until_height => undefined }}; handle_task({filter_mempool, Mempool}, State) -> {ok, List, RemainingMempool} = ar_mempool:take_chunk(Mempool, ?FILTER_MEMPOOL_CHUNK_SIZE), case List of [] -> {noreply, State}; _ -> [{wallet_list, WalletList}] = ets:lookup(node_state, wallet_list), Height = ar_node:get_height(), [{usd_to_ar_rate, Rate}] = ets:lookup(node_state, usd_to_ar_rate), [{price_per_gib_minute, Price}] = ets:lookup(node_state, price_per_gib_minute), [{kryder_plus_rate_multiplier, KryderPlusRateMultiplier}] = ets:lookup(node_state, kryder_plus_rate_multiplier), [{denomination, Denomination}] = ets:lookup(node_state, denomination), [{redenomination_height, RedenominationHeight}] = ets:lookup(node_state, redenomination_height), [{block_anchors, BlockAnchors}] = ets:lookup(node_state, block_anchors), [{recent_txs_map, RecentTXMap}] = ets:lookup(node_state, recent_txs_map), Wallets = ar_wallets:get(WalletList, ar_tx:get_addresses(List)), InvalidTXs = prometheus_histogram:observe_duration( reverify_mempool_chunk_duration_milliseconds, fun() -> lists:foldl( fun(TX, Acc) -> case ar_tx_replay_pool:verify_tx({TX, Rate, Price, KryderPlusRateMultiplier, Denomination, Height, RedenominationHeight, BlockAnchors, RecentTXMap, #{}, Wallets}, do_not_verify_signature) of valid -> Acc; {invalid, _Reason} -> [TX | Acc] end end, [], List ) end ), ar_mempool:drop_txs(InvalidTXs), case RemainingMempool of [] -> scan_complete; _ -> gen_server:cast(self(), {filter_mempool, RemainingMempool}) end, {noreply, State} end; handle_task(compute_mining_difficulty, State) -> Diff = get_current_diff(), case ar_node:get_height() of Height when (Height + 1) rem 10 == 0 -> ?LOG_INFO([{event, current_mining_difficulty}, {height, Height}, {difficulty, Diff}]); _ -> ok end, case maps:get(miner_state, State) of undefined -> ok; _ -> ?MINING_SERVER:set_difficulty(Diff) end, ar_util:cast_after((?COMPUTE_MINING_DIFFICULTY_INTERVAL) * 1000, ?MODULE, compute_mining_difficulty), {noreply, State}; handle_task(Msg, State) -> ?LOG_ERROR([ {event, ar_node_worker_received_unknown_message}, {message, Msg} ]), {noreply, State}. get_block_anchors_and_recent_txs_map(BlockTXPairs) -> lists:foldr( fun({BH, L}, {Acc1, Acc2}) -> Acc3 = lists:foldl( fun({{TXID, _}, _}, Acc4) -> %% We use a map instead of a set here because it is faster. maps:put(TXID, ok, Acc4) end, Acc2, L ), {[BH | Acc1], Acc3} end, {[], #{}}, lists:sublist(BlockTXPairs, ar_block:get_max_tx_anchor_depth()) ). get_max_block_size([_SingleElement]) -> 0; get_max_block_size([{_BH, WeaveSize, _TXRoot} | BI]) -> get_max_block_size(BI, WeaveSize, 0). get_max_block_size([], _WeaveSize, Max) -> Max; get_max_block_size([{_BH, PrevWeaveSize, _TXRoot} | BI], WeaveSize, Max) -> Max2 = max(Max, WeaveSize - PrevWeaveSize), get_max_block_size(BI, PrevWeaveSize, Max2). apply_block(State) -> {ok, Config} = arweave_config:get_env(), AllowRebase = Config#config.allow_rebase, case ar_block_cache:get_earliest_not_validated_from_longest_chain(block_cache) of not_found when AllowRebase == true -> maybe_rebase(State); not_found when AllowRebase == false -> {noreply, State}; Args -> %% Cancel the pending rebase, if there is one. State2 = State#{ pending_rebase => false }, apply_block(Args, State2) end. apply_block({B, [PrevB | _PrevBlocks], {{not_validated, awaiting_nonce_limiter_validation}, _Timestamp}}, State) -> H = B#block.indep_hash, case maps:get({nonce_limiter_validation_scheduled, H}, State, false) of true -> %% Waiting until the nonce limiter chain is validated. {noreply, State}; false -> ?LOG_DEBUG([{event, schedule_nonce_limiter_validation}, {block, ar_util:encode(B#block.indep_hash)}]), request_nonce_limiter_validation(B, PrevB), {noreply, State#{ {nonce_limiter_validation_scheduled, H} => true }} end; apply_block({B, PrevBlocks, {{not_validated, nonce_limiter_validated}, Timestamp}}, State) -> apply_block(B, PrevBlocks, Timestamp, State). maybe_rebase(#{ pending_rebase := {PrevH, H} } = State) -> case ar_block_cache:get_block_and_status(block_cache, PrevH) of not_found -> {noreply, State}; {PrevB, {validated, _}} -> case get_cached_solution(H, State) of not_found -> ?LOG_WARNING([{event, failed_to_find_cached_solution_for_rebasing}, {h, ar_util:encode(H)}, {prev_h, ar_util:encode(PrevH)}]), {noreply, State}; Args -> SolutionH = (element(2, Args))#mining_solution.solution_hash, ?LOG_INFO([{event, rebasing_block}, {h, ar_util:encode(H)}, {prev_h, ar_util:encode(PrevH)}, {solution_h, ar_util:encode(SolutionH)}, {expected_new_height, PrevB#block.height + 1}]), ar:console("Rebasing block ~s (solution ~s, previous block ~s, height ~B).", [ ar_util:encode(H), ar_util:encode(SolutionH), ar_util:encode(PrevH), PrevB#block.height + 1 ]), handle_found_solution(Args, PrevB, State, true) end; {B, {Status, Timestamp}} -> PrevBlocks = ar_block_cache:get_fork_blocks(block_cache, B), Args = {B, PrevBlocks, {Status, Timestamp}}, apply_block(Args, State) end; maybe_rebase(State) -> [{_, H}] = ets:lookup(node_state, current), B = ar_block_cache:get(block_cache, H), {ok, Config} = arweave_config:get_env(), case B#block.reward_addr == Config#config.mining_addr of false -> {noreply, State}; true -> case ar_block_cache:get_siblings(block_cache, B) of [] -> {noreply, State}; Siblings -> maybe_rebase(B, Siblings, State) end end. maybe_rebase(_B, [], State) -> {noreply, State}; maybe_rebase(B, [Sib | Siblings], State) -> #block{ nonce_limiter_info = Info, cumulative_diff = CDiff } = B, #block{ nonce_limiter_info = SibInfo, cumulative_diff = SibCDiff } = Sib, StepNumber = Info#nonce_limiter_info.global_step_number, SibStepNumber = SibInfo#nonce_limiter_info.global_step_number, case {CDiff == SibCDiff, StepNumber > SibStepNumber, Sib#block.reward_addr == B#block.reward_addr} of {true, true, false} -> %% See if the solution is cached to avoid wasting time. case get_cached_solution(B#block.indep_hash, State) of not_found -> maybe_rebase(B, Siblings, State); _Args -> rebase(B, Sib, State) end; _ -> maybe_rebase(B, Siblings, State) end. rebase(B, PrevB, State) -> H = B#block.indep_hash, PrevH = PrevB#block.indep_hash, gen_server:cast(?MODULE, apply_block), PrevBlocks = ar_block_cache:get_fork_blocks(block_cache, PrevB), {_, {Status, Timestamp}} = ar_block_cache:get_block_and_status(block_cache, PrevH), State2 = State#{ pending_rebase => {PrevH, H} }, case Status of validated -> {noreply, State2}; _ -> apply_block({PrevB, PrevBlocks, {Status, Timestamp}}, State2) end. get_cached_solution(H, State) -> maps:get(H, maps:get(solution_cache, State), not_found). apply_block(B, PrevBlocks, Timestamp, State) -> #{ blocks_missing_txs := BlocksMissingTXs } = State, case sets:is_element(B#block.indep_hash, BlocksMissingTXs) of true -> ?LOG_DEBUG([{event, block_is_missing_txs}, {block, ar_util:encode(B#block.indep_hash)}]), %% We do not have some of the transactions from this block, %% searching for them at the moment. {noreply, State}; false -> apply_block2(B, PrevBlocks, Timestamp, State) end. apply_block2(BShadow, PrevBlocks, Timestamp, State) -> #{ blocks_missing_txs := BlocksMissingTXs, missing_txs_lookup_processes := MissingTXsLookupProcesses } = State, {TXs, MissingTXIDs} = pick_txs(BShadow#block.txs), case MissingTXIDs of [] -> Height = BShadow#block.height, SizeTaggedTXs = ar_block:generate_size_tagged_list_from_txs(TXs, Height), B = BShadow#block{ txs = TXs, size_tagged_txs = SizeTaggedTXs }, apply_block3(B, PrevBlocks, Timestamp, State); _ -> ?LOG_INFO([{event, missing_txs_for_block}, {count, length(MissingTXIDs)}]), Self = self(), monitor( process, PID = spawn(fun() -> get_missing_txs_and_retry(BShadow, Self) end) ), BH = BShadow#block.indep_hash, {noreply, State#{ blocks_missing_txs => sets:add_element(BH, BlocksMissingTXs), missing_txs_lookup_processes => maps:put(PID, BH, MissingTXsLookupProcesses) }} end. apply_block3(B, [PrevB | _] = PrevBlocks, Timestamp, State) -> [{block_txs_pairs, BlockTXPairs}] = ets:lookup(node_state, block_txs_pairs), [{recent_block_index, RecentBI}] = ets:lookup(node_state, recent_block_index), RootHash = PrevB#block.wallet_list, TXs = B#block.txs, Accounts = ar_wallets:get(RootHash, [B#block.reward_addr | ar_tx:get_addresses(TXs)]), {Orphans, RecentBI2} = update_block_index(B, PrevBlocks, RecentBI), BlockTXPairs2 = update_block_txs_pairs(B, PrevBlocks, BlockTXPairs), BlockTXPairs3 = tl(BlockTXPairs2), {BlockAnchors, RecentTXMap} = get_block_anchors_and_recent_txs_map(BlockTXPairs3), RecentBI3 = tl(RecentBI2), PartitionUpperBound = ar_node:get_partition_upper_bound(RecentBI3), case ar_node_utils:validate(B, PrevB, Accounts, BlockAnchors, RecentTXMap, PartitionUpperBound) of error -> ?LOG_WARNING([{event, failed_to_validate_block}, {h, ar_util:encode(B#block.indep_hash)}]), gen_server:cast(?MODULE, apply_block), {noreply, State}; {invalid, Reason} -> ?LOG_WARNING([{event, received_invalid_block}, {validation_error, Reason}, {h, ar_util:encode(B#block.indep_hash)}]), ar_events:send(block, {rejected, Reason, B#block.indep_hash, no_peer}), BH = B#block.indep_hash, ar_block_cache:remove(block_cache, BH), ar_ignore_registry:add(BH), gen_server:cast(?MODULE, apply_block), {noreply, State}; valid -> case validate_wallet_list(B, PrevB) of error -> BH = B#block.indep_hash, ?LOG_WARNING([{event, failed_to_validate_wallet_list}, {h, ar_util:encode(BH)}]), ar_block_cache:remove(block_cache, BH), ar_ignore_registry:add(BH), gen_server:cast(?MODULE, apply_block), {noreply, State}; ok -> B2 = case B#block.height >= ar_fork:height_2_6() of true -> B#block{ reward_history = ar_rewards:add_element(B, PrevB#block.reward_history) }; false -> B end, B3 = case B#block.height >= ar_fork:height_2_7() of true -> BlockTimeHistory2 = ar_block_time_history:update_history(B, PrevB), Len2 = ar_block_time_history:history_length() + ar_block:get_consensus_window_size(), BlockTimeHistory3 = lists:sublist(BlockTimeHistory2, Len2), B2#block{ block_time_history = BlockTimeHistory3 }; false -> B2 end, State2 = apply_validated_block(State, B3, PrevBlocks, Orphans, RecentBI2, BlockTXPairs2), record_processing_time(Timestamp), {noreply, State2} end end. request_nonce_limiter_validation(#block{ indep_hash = H } = B, PrevB) -> Info = B#block.nonce_limiter_info, PrevInfo = ar_nonce_limiter:get_or_init_nonce_limiter_info(PrevB), ar_nonce_limiter:request_validation(H, Info, PrevInfo). pick_txs(TXIDs) -> Mempool = ar_mempool:get_map(), lists:foldr( fun (TX, {Found, Missing}) when is_record(TX, tx) -> {[TX | Found], Missing}; (TXID, {Found, Missing}) -> case maps:get(TXID, Mempool, tx_not_in_mempool) of tx_not_in_mempool -> %% This disk read should almost never be useful. Presumably, %% the only reason to find some of these transactions on disk %% is they had been written prior to the call, what means they are %% from an orphaned fork, more than one block behind. case ar_storage:read_tx(TXID) of unavailable -> {Found, [TXID | Missing]}; TX -> {[TX | Found], Missing} end; _Status -> {[ar_mempool:get_tx(TXID) | Found], Missing} end end, {[], []}, TXIDs ). may_be_get_double_signing_proof(PrevB, State) -> LockedRewards = ar_rewards:get_locked_rewards(PrevB), Proofs = maps:get(double_signing_proofs, State, #{}), RootHash = PrevB#block.wallet_list, Height = PrevB#block.height + 1, may_be_get_double_signing_proof2(maps:iterator(Proofs), RootHash, LockedRewards, Height). may_be_get_double_signing_proof2(Iterator, RootHash, LockedRewards, Height) -> case maps:next(Iterator) of none -> undefined; {Addr, {_Timestamp, Proof2}, Iterator2} -> {Pub, Sig1, CDiff1, PrevCDiff1, Preimage1, Sig2, CDiff2, PrevCDiff2, Preimage2} = Proof2, ?LOG_INFO([{event, evaluating_double_signing_proof}, {key_size, byte_size(Pub)}, {sig1_size, byte_size(Sig1)}, {sig2_size, byte_size(Sig2)}, {height, Height}]), CheckKeyType = case {byte_size(Pub) == ?ECDSA_PUB_KEY_SIZE, Height >= ar_fork:height_2_9()} of {true, false} -> false; {true, true} -> byte_size(Sig1) == ?ECDSA_SIG_SIZE andalso byte_size(Sig2) == ?ECDSA_SIG_SIZE; _ -> byte_size(Pub) == ?RSA_BLOCK_SIG_SIZE andalso byte_size(Sig1) == ?RSA_BLOCK_SIG_SIZE andalso byte_size(Sig2) == ?RSA_BLOCK_SIG_SIZE end, CheckDifferentSignatures = case CheckKeyType of false -> false; true -> Sig1 /= Sig2 end, HasLockedReward = case CheckDifferentSignatures of false -> false; true -> ar_rewards:has_locked_reward(Addr, LockedRewards) end, ValidSignatures = case HasLockedReward of false -> false; true -> SignaturePreimage1 = ar_block:get_block_signature_preimage( CDiff1, PrevCDiff1, Preimage1, Height), SignaturePreimage2 = ar_block:get_block_signature_preimage( CDiff2, PrevCDiff2, Preimage2, Height), Key = ar_block:get_reward_key(Pub, Height), ar_wallet:verify(Key, SignaturePreimage1, Sig1) andalso ar_wallet:verify(Key, SignaturePreimage2, Sig2) end, ValidCDiffs = case ValidSignatures of false -> false; true -> ar_block:get_double_signing_condition(CDiff1, PrevCDiff1, CDiff2, PrevCDiff2) end, case ValidCDiffs of false -> may_be_get_double_signing_proof2(Iterator2, RootHash, LockedRewards, Height); true -> Accounts = ar_wallets:get(RootHash, [Addr]), case ar_node_utils:is_account_banned(Addr, Accounts) of true -> may_be_get_double_signing_proof2(Iterator2, RootHash, LockedRewards, Height); false -> Proof2 end end end. get_chunk_hash(#poa{ chunk = Chunk }, Height) -> case Height >= ar_fork:height_2_7() of false -> undefined; true -> case Chunk of <<>> -> undefined; _ -> crypto:hash(sha256, Chunk) end end. get_unpacked_chunk_hash(PoA, PackingDifficulty, RecallByte) -> case PackingDifficulty >= 1 of false -> undefined; true -> case RecallByte of undefined -> undefined; _ -> crypto:hash(sha256, PoA#poa.unpacked_chunk) end end. pack_block_with_transactions(B, PrevB) -> #block{ reward_history = RewardHistory, reward_history_hash = PreviousRewardHistoryHash } = PrevB, TXs = collect_mining_transactions(?BLOCK_TX_COUNT_LIMIT), Rate = ar_pricing:usd_to_ar_rate(PrevB), PricePerGiBMinute = PrevB#block.price_per_gib_minute, PrevDenomination = PrevB#block.denomination, Height = B#block.height, Denomination = B#block.denomination, KryderPlusRateMultiplier = PrevB#block.kryder_plus_rate_multiplier, RedenominationHeight = PrevB#block.redenomination_height, Addresses = [B#block.reward_addr | ar_tx:get_addresses(TXs)], Addresses2 = [ar_rewards:get_oldest_locked_address(PrevB) | Addresses], Addresses3 = case B#block.double_signing_proof of undefined -> Addresses2; Proof -> [ar_wallet:hash_pub_key(element(1, Proof)) | Addresses2] end, Accounts = ar_wallets:get(PrevB#block.wallet_list, Addresses3), [{block_txs_pairs, BlockTXPairs}] = ets:lookup(node_state, block_txs_pairs), PrevBlocks = ar_block_cache:get_fork_blocks(block_cache, B), BlockTXPairs2 = update_block_txs_pairs(B, PrevBlocks, BlockTXPairs), BlockTXPairs3 = tl(BlockTXPairs2), {BlockAnchors, RecentTXMap} = get_block_anchors_and_recent_txs_map(BlockTXPairs3), ValidTXs = ar_tx_replay_pool:pick_txs_to_mine({BlockAnchors, RecentTXMap, Height - 1, RedenominationHeight, Rate, PricePerGiBMinute, KryderPlusRateMultiplier, PrevDenomination, B#block.timestamp, Accounts, TXs}), BlockSize = lists:foldl( fun(TX, Acc) -> Acc + ar_tx:get_weave_size_increase(TX, Height) end, 0, ValidTXs ), WeaveSize = PrevB#block.weave_size + BlockSize, B2 = B#block{ txs = ValidTXs, block_size = BlockSize, weave_size = WeaveSize, tx_root = ar_block:generate_tx_root_for_block(ValidTXs, Height), size_tagged_txs = ar_block:generate_size_tagged_list_from_txs(ValidTXs, Height) }, {ok, {EndowmentPool, Reward, DebtSupply, KryderPlusRateMultiplierLatch, KryderPlusRateMultiplier2, Accounts2}} = ar_node_utils:update_accounts(B2, PrevB, Accounts), Reward2 = ar_pricing:redenominate(Reward, PrevDenomination, Denomination), EndowmentPool2 = ar_pricing:redenominate(EndowmentPool, PrevDenomination, Denomination), DebtSupply2 = ar_pricing:redenominate(DebtSupply, PrevDenomination, Denomination), {ok, RootHash} = ar_wallets:add_wallets(PrevB#block.wallet_list, Accounts2, Height, Denomination), RewardHistory2 = ar_rewards:add_element(B2#block{ reward = Reward2 }, RewardHistory), %% Pre-2.8: slice the reward history to compute the hash %% Post-2.8: use the previous reward history hash and the head of the history to compute %% the new hash. LockedRewards = ar_rewards:trim_locked_rewards(Height, RewardHistory2), B2#block{ wallet_list = RootHash, reward_pool = EndowmentPool2, reward = Reward2, reward_history = RewardHistory2, reward_history_hash = ar_rewards:reward_history_hash(Height, PreviousRewardHistoryHash, LockedRewards), debt_supply = DebtSupply2, kryder_plus_rate_multiplier_latch = KryderPlusRateMultiplierLatch, kryder_plus_rate_multiplier = KryderPlusRateMultiplier2 }. update_block_index(B, PrevBlocks, BI) -> #block{ indep_hash = H } = lists:last(PrevBlocks), {Orphans, Base} = get_orphans(BI, H), {Orphans, [block_index_entry(B) | [block_index_entry(PrevB) || PrevB <- PrevBlocks] ++ Base]}. get_orphans(BI, H) -> get_orphans(BI, H, []). get_orphans([{H, _, _} | BI], H, Orphans) -> {Orphans, BI}; get_orphans([{OrphanH, _, _} | BI], H, Orphans) -> get_orphans(BI, H, [OrphanH | Orphans]). block_index_entry(B) -> {B#block.indep_hash, B#block.weave_size, B#block.tx_root}. update_block_txs_pairs(B, PrevBlocks, BlockTXPairs) -> lists:sublist(update_block_txs_pairs2(B, PrevBlocks, BlockTXPairs), 2 * ar_block:get_max_tx_anchor_depth()). update_block_txs_pairs2(B, [PrevB, PrevPrevB | PrevBlocks], BP) -> [block_txs_pair(B) | update_block_txs_pairs2(PrevB, [PrevPrevB | PrevBlocks], BP)]; update_block_txs_pairs2(B, [#block{ indep_hash = H }], BP) -> [block_txs_pair(B) | lists:dropwhile(fun({Hash, _}) -> Hash /= H end, BP)]. block_txs_pair(B) -> {B#block.indep_hash, B#block.size_tagged_txs}. validate_wallet_list(#block{ indep_hash = H } = B, PrevB) -> case ar_wallets:apply_block(B, PrevB) of {error, invalid_denomination} -> ?LOG_WARNING([{event, received_invalid_block}, {validation_error, invalid_denomination}, {h, ar_util:encode(H)}]), ar_events:send(block, {rejected, invalid_denomination, H, no_peer}), error; {error, mining_address_banned} -> ?LOG_WARNING([{event, received_invalid_block}, {validation_error, mining_address_banned}, {h, ar_util:encode(H)}, {mining_address, ar_util:encode(B#block.reward_addr)}]), ar_events:send(block, {rejected, mining_address_banned, H, no_peer}), error; {error, invalid_double_signing_proof_same_signature} -> ?LOG_WARNING([{event, received_invalid_block}, {validation_error, invalid_double_signing_proof_same_signature}, {h, ar_util:encode(H)}]), ar_events:send(block, {rejected, invalid_double_signing_proof_same_signature, H, no_peer}), error; {error, invalid_double_signing_proof_cdiff} -> ?LOG_WARNING([{event, received_invalid_block}, {validation_error, invalid_double_signing_proof_cdiff}, {h, ar_util:encode(H)}]), ar_events:send(block, {rejected, invalid_double_signing_proof_cdiff, H, no_peer}), error; {error, invalid_double_signing_proof_same_address} -> ?LOG_WARNING([{event, received_invalid_block}, {validation_error, invalid_double_signing_proof_same_address}, {h, ar_util:encode(H)}]), ar_events:send(block, {rejected, invalid_double_signing_proof_same_address, H, no_peer}), error; {error, invalid_double_signing_proof_not_in_reward_history} -> ?LOG_WARNING([{event, received_invalid_block}, {validation_error, invalid_double_signing_proof_not_in_reward_history}, {h, ar_util:encode(H)}]), ar_events:send(block, {rejected, invalid_double_signing_proof_not_in_reward_history, H, no_peer}), error; {error, invalid_double_signing_proof_already_banned} -> ?LOG_WARNING([{event, received_invalid_block}, {validation_error, invalid_double_signing_proof_already_banned}, {h, ar_util:encode(H)}]), ar_events:send(block, {rejected, invalid_double_signing_proof_already_banned, H, no_peer}), error; {error, invalid_double_signing_proof_invalid_signature} -> ?LOG_WARNING([{event, received_invalid_block}, {validation_error, invalid_double_signing_proof_invalid_signature}, {h, ar_util:encode(H)}]), ar_events:send(block, {rejected, invalid_double_signing_proof_invalid_signature, H, no_peer}), error; {error, invalid_account_anchors} -> ?LOG_WARNING([{event, received_invalid_block}, {validation_error, invalid_account_anchors}, {h, ar_util:encode(H)}]), ar_events:send(block, {rejected, invalid_account_anchors, H, no_peer}), error; {error, invalid_reward_pool} -> ?LOG_WARNING([{event, received_invalid_block}, {validation_error, invalid_reward_pool}, {h, ar_util:encode(H)}]), ar_events:send(block, {rejected, invalid_reward_pool, H, no_peer}), error; {error, invalid_miner_reward} -> ?LOG_WARNING([{event, received_invalid_block}, {validation_error, invalid_miner_reward}, {h, ar_util:encode(H)}]), ar_events:send(block, {rejected, invalid_miner_reward, H, no_peer}), error; {error, invalid_debt_supply} -> ?LOG_WARNING([{event, received_invalid_block}, {validation_error, invalid_debt_supply}, {h, ar_util:encode(H)}]), ar_events:send(block, {rejected, invalid_debt_supply, H, no_peer}), error; {error, invalid_kryder_plus_rate_multiplier_latch} -> ?LOG_WARNING([{event, received_invalid_block}, {validation_error, invalid_kryder_plus_rate_multiplier_latch}, {h, ar_util:encode(H)}]), ar_events:send(block, {rejected, invalid_kryder_plus_rate_multiplier_latch, H, no_peer}), error; {error, invalid_kryder_plus_rate_multiplier} -> ?LOG_WARNING([{event, received_invalid_block}, {validation_error, invalid_kryder_plus_rate_multiplier}, {h, ar_util:encode(H)}]), ar_events:send(block, {rejected, invalid_kryder_plus_rate_multiplier, H, no_peer}), error; {error, invalid_wallet_list} -> ?LOG_WARNING([{event, received_invalid_block}, {validation_error, invalid_wallet_list}, {h, ar_util:encode(H)}]), ar_events:send(block, {rejected, invalid_wallet_list, H, no_peer}), error; {ok, _RootHash2} -> ok end. get_missing_txs_and_retry(#block{ txs = TXIDs }, _Worker) when length(TXIDs) > 1000 -> ?LOG_WARNING([{event, ar_node_worker_downloaded_txs_count_exceeds_limit}]), ok; get_missing_txs_and_retry(BShadow, Worker) -> get_missing_txs_and_retry(BShadow#block.indep_hash, BShadow#block.txs, Worker, ar_peers:get_peers(current), [], 0). get_missing_txs_and_retry(_H, _TXIDs, _Worker, _Peers, _TXs, TotalSize) when TotalSize > ?BLOCK_TX_DATA_SIZE_LIMIT -> ?LOG_WARNING([{event, ar_node_worker_downloaded_txs_exceed_block_size_limit}]), ok; get_missing_txs_and_retry(H, [], Worker, _Peers, TXs, _TotalSize) -> gen_server:cast(Worker, {cache_missing_txs, H, lists:reverse(TXs)}); get_missing_txs_and_retry(H, TXIDs, Worker, Peers, TXs, TotalSize) -> Split = min(5, length(TXIDs)), {Bulk, Rest} = lists:split(Split, TXIDs), Fetch = lists:foldl( fun (TX = #tx{ format = 1, data_size = DataSize }, {Acc1, Acc2}) -> {[TX | Acc1], Acc2 + DataSize}; (TX = #tx{}, {Acc1, Acc2}) -> {[TX | Acc1], Acc2}; (_, failed_to_fetch_tx) -> failed_to_fetch_tx; (_, _) -> failed_to_fetch_tx end, {TXs, TotalSize}, ar_util:pmap( fun(TXID) -> ar_http_iface_client:get_tx(Peers, TXID) end, Bulk ) ), case Fetch of failed_to_fetch_tx -> ?LOG_WARNING([{event, ar_node_worker_failed_to_fetch_missing_tx}]), ok; {TXs2, TotalSize2} -> get_missing_txs_and_retry(H, Rest, Worker, Peers, TXs2, TotalSize2) end. apply_validated_block(State, B, PrevBlocks, Orphans, RecentBI, BlockTXPairs) -> ?LOG_DEBUG([{event, apply_validated_block}, {block, ar_util:encode(B#block.indep_hash)}]), case ar_watchdog:is_mined_block(B) of true -> ar_events:send(block, {new, B, #{ source => miner }}); false -> ok end, [{_, CDiff}] = ets:lookup(node_state, cumulative_diff), case B#block.cumulative_diff =< CDiff of true -> %% The block is from the longest fork, but not the latest known block from there. ar_block_cache:add_validated(block_cache, B), gen_server:cast(?MODULE, apply_block), log_applied_block(B), State; false -> apply_validated_block2(State, B, PrevBlocks, Orphans, RecentBI, BlockTXPairs) end. apply_validated_block2(State, B, PrevBlocks, Orphans, RecentBI, BlockTXPairs) -> [{current, CurrentH}] = ets:lookup(node_state, current), BH = B#block.indep_hash, %% Overwrite the block to store computed size tagged txs - they %% may be needed for reconstructing block_txs_pairs if there is a reorg %% off and then back on this fork. ar_block_cache:add(block_cache, B), ar_block_cache:mark_tip(block_cache, BH), ar_block_cache:prune(block_cache, ar_block:get_consensus_window_size()), %% We could have missed a few blocks due to networking issues, which would then %% be picked by ar_poller and end up waiting for missing transactions to be fetched. %% Thefore, it is possible (although not likely) that there are blocks above the new tip, %% for which we trigger a block application here, in order not to wait for the next %% arrived or fetched block to trigger it. gen_server:cast(?MODULE, apply_block), log_applied_block(B), log_tip(B), maybe_report_n_confirmations(B, RecentBI), PrevB = hd(PrevBlocks), ForkRootB = lists:last(PrevBlocks), %% The root of any detected fork prometheus_gauge:set(block_time, B#block.timestamp - PrevB#block.timestamp), record_economic_metrics(B, PrevB), lists:foldl( fun(OrphanH, OrphanHeight) -> ar_watchdog:block_orphaned(OrphanH, OrphanHeight), OrphanHeight + 1 end, ForkRootB#block.height + 1, Orphans ), ar_chain_stats:log_fork(Orphans, ForkRootB), record_vdf_metrics(B, PrevB), return_orphaned_txs_to_mempool(CurrentH, ForkRootB#block.indep_hash), lists:foldl( fun (CurrentB, start) -> CurrentB; (CurrentB, _CurrentPrevB) -> Wallets = CurrentB#block.wallet_list, %% Use a twice bigger depth than the depth requested on join to serve %% the wallet trees to the joining nodes. ok = ar_wallets:set_current( Wallets, CurrentB#block.height, ar_block:get_consensus_window_size() * 2), CurrentB end, start, lists:reverse([B | PrevBlocks]) ), ar_disk_cache:write_block(B), BlockTXs = B#block.txs, ar_mempool:drop_txs(BlockTXs, false, false), gen_server:cast(self(), {filter_mempool, ar_mempool:get_all_txids()}), {BlockAnchors, RecentTXMap} = get_block_anchors_and_recent_txs_map(BlockTXPairs), Height = B#block.height, {Rate, ScheduledRate} = case Height >= ar_fork:height_2_5() of true -> {B#block.usd_to_ar_rate, B#block.scheduled_usd_to_ar_rate}; false -> {?INITIAL_USD_TO_AR((Height + 1))(), ?INITIAL_USD_TO_AR((Height + 1))()} end, AddedBlocks = tl(lists:reverse([B | [PrevB2 || PrevB2 <- PrevBlocks]])), AddedBIElements = [block_index_entry(Blck) || Blck <- AddedBlocks], OrphanCount = length(Orphans), ar_block_index:update(AddedBIElements, OrphanCount), RecentBI2 = lists:sublist(RecentBI, ?BLOCK_INDEX_HEAD_LEN), ar_data_sync:add_tip_block(BlockTXPairs, RecentBI2), ar_header_sync:add_tip_block(B, RecentBI2), lists:foreach( fun(PrevB3) -> ar_header_sync:add_block(PrevB3), ar_disk_cache:write_block(PrevB3) end, tl(lists:reverse(PrevBlocks)) ), ar_storage:update_block_index(B#block.height, OrphanCount, AddedBIElements), ar_storage:store_reward_history_part(AddedBlocks), ar_storage:store_block_time_history_part(AddedBlocks, ForkRootB), ets:insert(node_state, [ {recent_block_index, RecentBI2}, {recent_max_block_size, get_max_block_size(RecentBI2)}, {current, B#block.indep_hash}, {timestamp, B#block.timestamp}, {wallet_list, B#block.wallet_list}, {height, B#block.height}, {hash, B#block.hash}, {reward_pool, B#block.reward_pool}, {diff_pair, ar_difficulty:diff_pair(B)}, {cumulative_diff, B#block.cumulative_diff}, {last_retarget, B#block.last_retarget}, {weave_size, B#block.weave_size}, {nonce_limiter_info, B#block.nonce_limiter_info}, {block_txs_pairs, BlockTXPairs}, {block_anchors, BlockAnchors}, {recent_txs_map, RecentTXMap}, {usd_to_ar_rate, Rate}, {scheduled_usd_to_ar_rate, ScheduledRate}, {price_per_gib_minute, B#block.price_per_gib_minute}, {kryder_plus_rate_multiplier, B#block.kryder_plus_rate_multiplier}, {denomination, B#block.denomination}, {redenomination_height, B#block.redenomination_height}, {scheduled_price_per_gib_minute, B#block.scheduled_price_per_gib_minute}, {merkle_rebase_support_threshold, get_merkle_rebase_threshold(B)} ]), SearchSpaceUpperBound = ar_node:get_partition_upper_bound(RecentBI), ar_events:send(node_state, {search_space_upper_bound, SearchSpaceUpperBound}), ar_events:send(node_state, {new_tip, B, PrevB}), ar_events:send(node_state, {checkpoint_block, ar_block_cache:get_checkpoint_block(RecentBI)}), maybe_reset_miner(State). log_applied_block(B) -> Partition1 = ar_node:get_partition_number(B#block.recall_byte), Partition2 = ar_node:get_partition_number(B#block.recall_byte2), NumChunks = case {Partition1, Partition2} of {undefined, undefined} -> 0; {undefined, _} -> 1; {_, undefined} -> 1; _ -> 2 end, ?LOG_INFO([ {event, applied_block}, {indep_hash, ar_util:encode(B#block.indep_hash)}, {height, B#block.height}, {partition1, Partition1}, {partition2, Partition2}, {num_chunks, NumChunks} ]). log_tip(B) -> ?LOG_INFO([{event, new_tip_block}, {indep_hash, ar_util:encode(B#block.indep_hash)}, {height, B#block.height}, {weave_size, B#block.weave_size}, {reward_addr, ar_util:encode(B#block.reward_addr)}]). maybe_report_n_confirmations(B, BI) -> N = 10, LastNBlocks = lists:sublist(BI, N), case length(LastNBlocks) == N of true -> {H, _, _} = lists:last(LastNBlocks), ar_watchdog:block_received_n_confirmations(H, B#block.height - N + 1); false -> do_nothing end. record_economic_metrics(B, PrevB) -> case B#block.height >= ar_fork:height_2_5() of false -> ok; true -> record_economic_metrics2(B, PrevB) end. record_economic_metrics2(B, PrevB) -> {PoA1Diff, Diff} = ar_difficulty:diff_pair(B), prometheus_gauge:set(log_diff, [poa1], ar_retarget:switch_to_log_diff(PoA1Diff)), prometheus_gauge:set(log_diff, [poa2], ar_retarget:switch_to_log_diff(Diff)), prometheus_gauge:set(network_hashrate, ar_difficulty:get_hash_rate_fixed_ratio(B)), prometheus_gauge:set(endowment_pool, B#block.reward_pool), prometheus_gauge:set(kryder_plus_rate_multiplier, B#block.kryder_plus_rate_multiplier), Period_200_Years = 200 * 365 * 24 * 60 * 60, case B#block.height >= ar_fork:height_2_6() of true -> #block{ reward_history = RewardHistory } = B, RewardHistorySize = length(RewardHistory), AverageHashRate = ar_util:safe_divide(lists:sum([HR || {_, HR, _, _} <- RewardHistory]), RewardHistorySize), prometheus_gauge:set(average_network_hash_rate, AverageHashRate), AverageBlockReward = ar_util:safe_divide(lists:sum([R || {_, _, R, _} <- RewardHistory]), RewardHistorySize), prometheus_gauge:set(average_block_reward, AverageBlockReward), prometheus_gauge:set(price_per_gibibyte_minute, B#block.price_per_gib_minute), BlockInterval = ar_block_time_history:compute_block_interval(PrevB), Args = {PrevB#block.reward_pool, PrevB#block.debt_supply, B#block.txs, B#block.weave_size, B#block.height, PrevB#block.price_per_gib_minute, PrevB#block.kryder_plus_rate_multiplier_latch, PrevB#block.kryder_plus_rate_multiplier, PrevB#block.denomination, BlockInterval}, {ExpectedBlockReward, _, _, _, _, Give, Take} = ar_pricing:get_miner_reward_endowment_pool_debt_supply(Args), prometheus_gauge:set(endowment_pool_take, Take), prometheus_gauge:set(endowment_pool_give, Give), prometheus_gauge:set(expected_block_reward, ExpectedBlockReward), LegacyPricePerGibibyte = ar_pricing:get_storage_cost(?MiB * 1024, os:system_time(second), PrevB#block.usd_to_ar_rate, B#block.height), prometheus_gauge:set(legacy_price_per_gibibyte_minute, LegacyPricePerGibibyte), prometheus_gauge:set(available_supply, ?TOTAL_SUPPLY - B#block.reward_pool + B#block.debt_supply), prometheus_gauge:set(debt_supply, B#block.debt_supply); false -> ok end, case catch ar_pricing:get_expected_min_decline_rate(B#block.timestamp, Period_200_Years, B#block.reward_pool, B#block.weave_size, B#block.usd_to_ar_rate, B#block.height) of {'EXIT', _} -> ?LOG_ERROR([{event, failed_to_compute_expected_min_decline_rate}]); {RateDivisor, RateDividend} -> prometheus_gauge:set(expected_minimum_200_years_storage_costs_decline_rate, ar_util:safe_divide(RateDivisor, RateDividend)) end, case catch ar_pricing:get_expected_min_decline_rate(B#block.timestamp, Period_200_Years, B#block.reward_pool, B#block.weave_size, {1, 10}, B#block.height) of {'EXIT', _} -> ?LOG_ERROR([{event, failed_to_compute_expected_min_decline_rate2}]); {RateDivisor2, RateDividend2} -> prometheus_gauge:set( expected_minimum_200_years_storage_costs_decline_rate_10_usd_ar, ar_util:safe_divide(RateDivisor2, RateDividend2)) end. record_vdf_metrics(#block{ height = Height } = B, PrevB) -> case Height >= ar_fork:height_2_6() of true -> StepNumber = ar_block:vdf_step_number(B), PrevBStepNumber = ar_block:vdf_step_number(PrevB), prometheus_gauge:set(block_vdf_time, StepNumber - PrevBStepNumber); false -> ok end. return_orphaned_txs_to_mempool(H, H) -> ok; return_orphaned_txs_to_mempool(H, BaseH) -> #block{ txs = TXs, previous_block = PrevH } = ar_block_cache:get(block_cache, H), lists:foreach(fun(TX) -> ar_events:send(tx, {orphaned, TX}), ar_events:send(tx, {ready_for_mining, TX}), %% Add it to the mempool here even though have triggered an event - processes %% do not handle their own events. ar_mempool:add_tx(TX, ready_for_mining) end, TXs), return_orphaned_txs_to_mempool(PrevH, BaseH). %% @doc Stop the current mining session and optionally start a new one, %% depending on the automine setting. maybe_reset_miner(#{ mine_until_height := {height, TargetHeight} } = State) -> case ar_node:get_height() >= TargetHeight of true -> maybe_reset_miner(State#{ mine_until_height => undefined, automine => false }); false -> start_mining(State) end; maybe_reset_miner(#{ miner_state := MinerState, automine := false } = State) -> case MinerState of undefined -> ok; _ -> ?MINING_SERVER:pause() end, State#{ miner_state => undefined }; maybe_reset_miner(State) -> start_mining(State). start_mining(State) -> DiffPair = get_current_diff(), [{_, MerkleRebaseThreshold}] = ets:lookup(node_state, merkle_rebase_support_threshold), [{_, Height}] = ets:lookup(node_state, height), ?MINING_SERVER:start_mining({DiffPair, MerkleRebaseThreshold, Height}), case maps:get(miner_state, State) of undefined -> State#{ miner_state => running }; running -> ?MINING_SERVER:set_difficulty(DiffPair), ?MINING_SERVER:set_merkle_rebase_threshold(MerkleRebaseThreshold), ?MINING_SERVER:set_height(Height), State end. get_current_diff() -> get_current_diff(os:system_time(second)). get_current_diff(TS) -> Props = ets:select( node_state, [{{'$1', '$2'}, [{'or', {'==', '$1', height}, {'==', '$1', diff_pair}, {'==', '$1', last_retarget}, {'==', '$1', timestamp}}], ['$_']}] ), Height = proplists:get_value(height, Props), DiffPair = proplists:get_value(diff_pair, Props), LastRetarget = proplists:get_value(last_retarget, Props), PrevTS = proplists:get_value(timestamp, Props), ar_retarget:maybe_retarget(Height + 1, DiffPair, TS, LastRetarget, PrevTS). get_merkle_rebase_threshold(PrevB) -> case PrevB#block.height + 1 == ar_fork:height_2_7() of true -> PrevB#block.weave_size; _ -> PrevB#block.merkle_rebase_support_threshold end. collect_mining_transactions(Limit) -> collect_mining_transactions(Limit, ar_mempool:get_priority_set(), []). collect_mining_transactions(0, _Set, TXs) -> TXs; collect_mining_transactions(Limit, Set, TXs) -> case gb_sets:is_empty(Set) of true -> TXs; false -> {{_Utility, TXID, Status}, Set2} = gb_sets:take_largest(Set), case Status of ready_for_mining -> TX = ar_mempool:get_tx(TXID), collect_mining_transactions(Limit - 1, Set2, [TX | TXs]); _ -> collect_mining_transactions(Limit, Set2, TXs) end end. record_processing_time(StartTimestamp) -> ProcessingTime = timer:now_diff(erlang:timestamp(), StartTimestamp) / 1000000, prometheus_histogram:observe(block_processing_time, ProcessingTime). priority(apply_block) -> {1, 1}; priority({work_complete, _, _, _, _, _}) -> {2, 1}; priority({cache_missing_txs, _, _}) -> {3, 1}; priority(_) -> {os:system_time(second), 1}. read_hash_list_2_0_for_1_0_blocks() -> Fork_2_0 = ar_fork:height_2_0(), case Fork_2_0 > 0 of true -> File = filename:join(["genesis_data", "hash_list_1_0"]), {ok, Binary} = file:read_file(File), HL = lists:map(fun ar_util:decode/1, jiffy:decode(Binary)), Fork_2_0 = length(HL), HL; false -> [] end. start_from_state([#block{} = GenesisB]) -> RewardHistory = GenesisB#block.reward_history, BlockTimeHistory = GenesisB#block.block_time_history, BI = [ar_util:block_index_entry_from_block(GenesisB)], self() ! {join_from_state, 0, BI, [GenesisB#block{ reward_history = RewardHistory, block_time_history = BlockTimeHistory }], not_set}. start_from_state(BI, Height) -> {ok, Config} = arweave_config:get_env(), start_from_state(BI, Height, Config#config.start_from_state). start_from_state(BI, Height, CustomDir) -> case ar_node:read_recent_blocks(BI, min(length(BI) - 1, ?START_FROM_STATE_SEARCH_DEPTH), CustomDir) of not_found -> ?LOG_ERROR([{event, start_from_state}, {reason, block_headers_not_found}]), block_headers_not_found; {Skipped, Blocks} -> BI2 = lists:nthtail(Skipped, BI), Height2 = Height - Skipped, RewardHistoryBI = ar_rewards:interim_reward_history_bi(Height, BI2), BlockTimeHistoryBI = lists:sublist(BI2, ar_block_time_history:history_length() + ar_block:get_consensus_window_size()), case {ar_storage:read_reward_history(RewardHistoryBI, CustomDir), ar_storage:read_block_time_history(Height2, BlockTimeHistoryBI, CustomDir)} of {not_found, _} -> ?LOG_ERROR([{event, start_from_state_error}, {reason, reward_history_not_found}, {height, Height2}, {block_index, length(BI2)}, {reward_history, length(RewardHistoryBI)}]), reward_history_not_found; {_, not_found} -> ?LOG_ERROR([{event, start_from_state_error}, {reason, block_time_history_not_found}, {height, Height2}, {block_index, length(BI2)}, {block_time_history, length(BlockTimeHistoryBI)}]), block_time_history_not_found; {RewardHistory, BlockTimeHistory} -> Blocks2 = ar_rewards:set_reward_history(Blocks, RewardHistory), Blocks3 = ar_block_time_history:set_history(Blocks2, BlockTimeHistory), self() ! {join_from_state, Height2, BI2, Blocks3, CustomDir}, ok end end. set_poa_caches([]) -> []; set_poa_caches([B | Blocks]) -> [set_poa_cache(B) | set_poa_caches(Blocks)]. set_poa_cache(B) -> PoA1 = B#block.poa, PoA2 = B#block.poa2, MiningAddress = B#block.reward_addr, PackingDifficulty = B#block.packing_difficulty, ReplicaFormat = B#block.replica_format, Nonce = B#block.nonce, RecallByte1 = B#block.recall_byte, RecallByte2 = B#block.recall_byte2, Packing = ar_block:get_packing(PackingDifficulty, MiningAddress, ReplicaFormat), PoACache = compute_poa_cache(B, PoA1, RecallByte1, Nonce, Packing), B2 = B#block{ poa_cache = PoACache }, %% Compute PoA2 cache if PoA2 is present. case RecallByte2 of undefined -> B2; _ -> PoA2Cache = compute_poa_cache(B, PoA2, RecallByte2, Nonce, Packing), B2#block{ poa2_cache = PoA2Cache } end. compute_poa_cache(#block{ height = 0 }, _PoA, _RecallByte, _Nonce, _Packing) -> undefined; compute_poa_cache(B, PoA, RecallByte, Nonce, Packing) -> PackingDifficulty = B#block.packing_difficulty, SubChunkIndex = ar_block:get_sub_chunk_index(PackingDifficulty, Nonce), {BlockStart, BlockEnd, TXRoot} = ar_block_index:get_block_bounds(RecallByte), BlockSize = BlockEnd - BlockStart, ChunkID = case PoA#poa.unpacked_chunk of <<>> -> Args = {BlockStart, RecallByte, TXRoot, BlockSize, PoA, Packing, SubChunkIndex, not_set}, {true, ComputedChunkID} = ar_poa:validate(Args), ComputedChunkID; _ -> ar_tx:generate_chunk_id(PoA#poa.unpacked_chunk) end, {{BlockStart, RecallByte, TXRoot, BlockSize, Packing, SubChunkIndex}, ChunkID}. dump_mempool(TXs, MempoolSize) -> SerializedTXs = maps:map(fun(_, {TX, St}) -> {ar_serialize:tx_to_binary(TX), St} end, TXs), case ar_storage:write_term(mempool, {SerializedTXs, MempoolSize}) of ok -> ok; {error, Reason} -> ?LOG_ERROR([{event, failed_to_dump_mempool}, {reason, Reason}]) end. handle_found_solution(Args, PrevB, State, IsRebase) -> {Source, Solution, PoACache, PoA2Cache} = Args, #mining_solution{ last_step_checkpoints = LastStepCheckpoints, mining_address = MiningAddress, seed = NonceLimiterSeed, next_seed = NonceLimiterNextSeed, next_vdf_difficulty = NonceLimiterNextVDFDifficulty, nonce = Nonce, nonce_limiter_output = NonceLimiterOutput, partition_number = PartitionNumber, poa1 = PoA1, poa2 = PoA2, preimage = SolutionPreimage, recall_byte1 = RecallByte1, recall_byte2 = RecallByte2, solution_hash = SolutionH, start_interval_number = IntervalNumber, step_number = StepNumber, steps = SuppliedSteps, packing_difficulty = PackingDifficulty, replica_format = ReplicaFormat } = Solution, ?LOG_INFO([{event, handle_found_solution}, {solution, ar_util:encode(SolutionH)}]), MerkleRebaseThreshold = ?MERKLE_REBASE_SUPPORT_THRESHOLD, #block{ indep_hash = PrevH, timestamp = PrevTimestamp, wallet_list = WalletList, nonce_limiter_info = PrevNonceLimiterInfo, height = PrevHeight } = PrevB, Height = PrevHeight + 1, Now = os:system_time(second), MaxDeviation = ar_block:get_max_timestamp_deviation(), Timestamp = case Now < PrevTimestamp - MaxDeviation of true -> ?LOG_WARNING([{event, clock_out_of_sync}, {previous_block, ar_util:encode(PrevH)}, {previous_block_timestamp, PrevTimestamp}, {our_time, Now}, {max_allowed_deviation, MaxDeviation}]), PrevTimestamp - MaxDeviation; false -> Now end, IsBanned = ar_node_utils:is_account_banned(MiningAddress, ar_wallets:get(WalletList, MiningAddress)), %% Check the solution is ahead of the previous solution on the timeline. NonceLimiterInfo = #nonce_limiter_info{ global_step_number = StepNumber, output = NonceLimiterOutput, prev_output = PrevNonceLimiterInfo#nonce_limiter_info.output }, PassesTimelineCheck = case IsBanned of true -> ar_mining_server:log_prepare_solution_failure(Solution, rejected, mining_address_banned, Source, []), {false, address_banned}; false -> case ar_block:validate_replica_format(Height, PackingDifficulty, ReplicaFormat) of false -> ar_mining_server:log_prepare_solution_failure(Solution, rejected, invalid_packing_difficulty, Source, []), {false, invalid_packing_difficulty}; true -> case ar_nonce_limiter:is_ahead_on_the_timeline(NonceLimiterInfo, PrevNonceLimiterInfo) of false -> SolutionVDF = NonceLimiterInfo#nonce_limiter_info.global_step_number, PrevBlockVDF = PrevNonceLimiterInfo#nonce_limiter_info.global_step_number, ar_mining_server:log_prepare_solution_failure(Solution, stale, stale_solution, Source, [ {solution_vdf, SolutionVDF}, {prev_block_vdf, PrevBlockVDF} ]), {false, timeline}; true -> true end end end, %% Check solution seed. #nonce_limiter_info{ next_seed = PrevNextSeed, next_vdf_difficulty = PrevNextVDFDifficulty, global_step_number = PrevStepNumber, seed = PrevSeed } = PrevNonceLimiterInfo, PrevIntervalNumber = PrevStepNumber div ar_nonce_limiter:get_reset_frequency(), PassesSeedCheck = case PassesTimelineCheck of {false, Reason} -> {false, Reason}; true -> case {IntervalNumber, NonceLimiterNextSeed, NonceLimiterNextVDFDifficulty, NonceLimiterSeed} == {PrevIntervalNumber, PrevNextSeed, PrevNextVDFDifficulty, PrevSeed} of false -> ar_mining_server:log_prepare_solution_failure(Solution, stale, vdf_seed_data_does_not_match_current_block, Source, [ {output, ar_util:encode(NonceLimiterOutput)}, {interval_number, IntervalNumber}, {prev_interval_number, PrevIntervalNumber}, {nonce_limiter_next_seed, ar_util:encode(NonceLimiterNextSeed)}, {nonce_limiter_seed, ar_util:encode(NonceLimiterSeed)}, {prev_nonce_limiter_next_seed, ar_util:encode(PrevNextSeed)}, {prev_nonce_limiter_seed, ar_util:encode(PrevSeed)}, {nonce_limiter_next_vdf_difficulty, NonceLimiterNextVDFDifficulty}, {prev_nonce_limiter_next_vdf_difficulty, PrevNextVDFDifficulty} ]), {false, seed_data}; true -> true end end, %% Check solution difficulty PrevDiffPair = ar_difficulty:diff_pair(PrevB), LastRetarget = PrevB#block.last_retarget, PrevTS = PrevB#block.timestamp, DiffPair = {_PoA1Diff, Diff} = ar_retarget:maybe_retarget(PrevB#block.height + 1, PrevDiffPair, Timestamp, LastRetarget, PrevTS), PassesDiffCheck = case PassesSeedCheck of {false, Reason2} -> {false, Reason2}; true -> case ar_node_utils:solution_passes_diff_check(Solution, DiffPair) of false -> ar_mining_server:log_prepare_solution_failure(Solution, partial, does_not_pass_diff_check, Source, []), {false, diff}; true -> true end end, RewardKey = case ar_wallet:load_key(MiningAddress) of not_found -> ?LOG_WARNING([{event, mined_block_but_no_mining_key_found}, {node, node()}, {mining_address, ar_util:encode(MiningAddress)}]), ar:console("WARNING. Can't find key ~s~n", [ar_util:encode(MiningAddress)]), not_found; Key -> Key end, PassesKeyCheck = case PassesDiffCheck of {false, Reason3} -> {false, Reason3}; true -> case RewardKey of not_found -> ar_mining_server:log_prepare_solution_failure(Solution, rejected, missing_key_file, Source, []), {false, wallet_not_found}; _ -> true end end, CorrectRebaseThreshold = case PassesKeyCheck of {false, Reason4} -> {false, Reason4}; true -> case get_merkle_rebase_threshold(PrevB) of MerkleRebaseThreshold -> true; _ -> ar_mining_server:log_prepare_solution_failure(Solution, rejected, invalid_merkle_rebase_threshold, Source, []), {false, rebase_threshold} end end, PrevCDiff = PrevB#block.cumulative_diff, CDiff = ar_difficulty:next_cumulative_diff(PrevCDiff, Diff, Height), NoDoubleSigning = case CorrectRebaseThreshold of {false, Reason5} -> {false, Reason5}; true -> case check_no_double_signing(CDiff, PrevCDiff, MiningAddress, Height) of false -> {false, double_signing}; true -> true end end, %% Check steps and step checkpoints. HaveSteps = case NoDoubleSigning of {false, Reason6} -> ?LOG_WARNING([{event, ignore_mining_solution}, {reason, Reason6}, {solution, ar_util:encode(SolutionH)}]), false; true -> ar_nonce_limiter:get_steps(PrevStepNumber, StepNumber, PrevNextSeed, PrevNextVDFDifficulty) end, HaveSteps2 = case HaveSteps of not_found -> % TODO verify SuppliedSteps; _ -> HaveSteps end, %% Pack, build, and sign block. case HaveSteps2 of false -> {noreply, State}; not_found -> ?LOG_WARNING([{event, did_not_find_steps_for_mined_block}, {seed, ar_util:encode(PrevNextSeed)}, {prev_step_number, PrevStepNumber}, {step_number, StepNumber}]), ar_mining_server:log_prepare_solution_failure(Solution, rejected, vdf_steps_not_found, Source, []), {noreply, State}; [NonceLimiterOutput | _] = Steps -> {Seed, NextSeed, PartitionUpperBound, NextPartitionUpperBound, VDFDifficulty} = ar_nonce_limiter:get_seed_data(StepNumber, PrevB), LastStepCheckpoints2 = case LastStepCheckpoints of Empty when Empty == not_found orelse Empty == [] -> PrevOutput = case Steps of [_, PrevStepOutput | _] -> PrevStepOutput; _ -> PrevNonceLimiterInfo#nonce_limiter_info.output end, PrevOutput2 = ar_nonce_limiter:maybe_add_entropy( PrevOutput, PrevStepNumber, StepNumber, PrevNextSeed), {ok, NonceLimiterOutput, Checkpoints} = ar_nonce_limiter:compute( StepNumber, PrevOutput2, VDFDifficulty), Checkpoints; _ -> LastStepCheckpoints end, NextVDFDifficulty = ar_block:compute_next_vdf_difficulty(PrevB), NonceLimiterInfo2 = NonceLimiterInfo#nonce_limiter_info{ seed = Seed, next_seed = NextSeed, partition_upper_bound = PartitionUpperBound, next_partition_upper_bound = NextPartitionUpperBound, vdf_difficulty = VDFDifficulty, next_vdf_difficulty = NextVDFDifficulty, last_step_checkpoints = LastStepCheckpoints2, steps = Steps }, {Rate, ScheduledRate} = ar_pricing:recalculate_usd_to_ar_rate(PrevB), {PricePerGiBMinute, ScheduledPricePerGiBMinute} = ar_pricing:recalculate_price_per_gib_minute(PrevB), Denomination = PrevB#block.denomination, {Denomination2, RedenominationHeight2} = ar_pricing:may_be_redenominate(PrevB), PricePerGiBMinute2 = ar_pricing:redenominate(PricePerGiBMinute, Denomination, Denomination2), ScheduledPricePerGiBMinute2 = ar_pricing:redenominate(ScheduledPricePerGiBMinute, Denomination, Denomination2), UnsignedB = pack_block_with_transactions(#block{ nonce = Nonce, previous_block = PrevH, timestamp = Timestamp, last_retarget = case ar_retarget:is_retarget_height(Height) of true -> Timestamp; false -> PrevB#block.last_retarget end, diff = Diff, height = Height, hash = SolutionH, hash_list_merkle = ar_block:compute_hash_list_merkle(PrevB), reward_addr = ar_wallet:to_address(RewardKey), tags = [], cumulative_diff = CDiff, previous_cumulative_diff = PrevB#block.cumulative_diff, poa = PoA1, poa_cache = PoACache, usd_to_ar_rate = Rate, scheduled_usd_to_ar_rate = ScheduledRate, packing_2_5_threshold = 0, strict_data_split_threshold = PrevB#block.strict_data_split_threshold, hash_preimage = SolutionPreimage, recall_byte = RecallByte1, previous_solution_hash = PrevB#block.hash, partition_number = PartitionNumber, nonce_limiter_info = NonceLimiterInfo2, poa2 = case PoA2 of not_set -> #poa{}; _ -> PoA2 end, poa2_cache = PoA2Cache, recall_byte2 = RecallByte2, reward_key = element(2, RewardKey), price_per_gib_minute = PricePerGiBMinute2, scheduled_price_per_gib_minute = ScheduledPricePerGiBMinute2, denomination = Denomination2, redenomination_height = RedenominationHeight2, double_signing_proof = may_be_get_double_signing_proof(PrevB, State), merkle_rebase_support_threshold = MerkleRebaseThreshold, chunk_hash = get_chunk_hash(PoA1, Height), chunk2_hash = get_chunk_hash(PoA2, Height), packing_difficulty = PackingDifficulty, replica_format = ReplicaFormat, unpacked_chunk_hash = get_unpacked_chunk_hash( PoA1, PackingDifficulty, RecallByte1), unpacked_chunk2_hash = get_unpacked_chunk_hash( PoA2, PackingDifficulty, RecallByte2) }, PrevB), BlockTimeHistory2 = lists:sublist( ar_block_time_history:update_history(UnsignedB, PrevB), ar_block_time_history:history_length() + ar_block:get_consensus_window_size()), UnsignedB2 = UnsignedB#block{ block_time_history = BlockTimeHistory2, block_time_history_hash = ar_block_time_history:hash(BlockTimeHistory2) }, SignedH = ar_block:generate_signed_hash(UnsignedB2), PrevCDiff = PrevB#block.cumulative_diff, SignaturePreimage = ar_block:get_block_signature_preimage(CDiff, PrevCDiff, << (PrevB#block.hash)/binary, SignedH/binary >>, Height), assert_key_type(RewardKey, Height), Signature = ar_wallet:sign(element(1, RewardKey), SignaturePreimage), H = ar_block:indep_hash2(SignedH, Signature), B = UnsignedB2#block{ indep_hash = H, signature = Signature }, ar_watchdog:mined_block(H, Height, PrevH), ?LOG_INFO([{event, mined_block}, {indep_hash, ar_util:encode(H)}, {solution, ar_util:encode(SolutionH)}, {height, Height}, {step_number, StepNumber}, {steps, length(Steps)}, {txs, length(B#block.txs)}, {recall_byte1, B#block.recall_byte}, {recall_byte2, B#block.recall_byte2}, {chunks, case B#block.recall_byte2 of undefined -> 1; _ -> 2 end}]), prometheus_gauge:inc(mining_solution, [success]), ar_block_cache:add(block_cache, B), ar_events:send(solution, {accepted, #{ indep_hash => H, source => Source, is_rebase => IsRebase }}), apply_block(update_solution_cache(H, Args, State)); _Steps -> ar_mining_server:log_prepare_solution_failure( Solution, rejected, bad_vdf, Source, [ {event, bad_steps}, {prev_block, ar_util:encode(PrevH)}, {step_number, StepNumber}, {prev_step_number, PrevStepNumber}, {prev_next_seed, ar_util:encode(PrevNextSeed)}, {output, ar_util:encode(NonceLimiterOutput)} ]), {noreply, State} end. assert_key_type(RewardKey, Height) -> case Height >= ar_fork:height_2_9() of false -> case RewardKey of {{?RSA_KEY_TYPE, _, _}, {?RSA_KEY_TYPE, Pub}} -> true = byte_size(Pub) == 512, ok; _ -> exit(invalid_reward_key) end; true -> case RewardKey of {{?RSA_KEY_TYPE, _, _}, {?RSA_KEY_TYPE, Pub}} -> true = byte_size(Pub) == 512, ok; {{?ECDSA_KEY_TYPE, _, _}, {?ECDSA_KEY_TYPE, Pub}} -> true = byte_size(Pub) == ?ECDSA_PUB_KEY_SIZE, ok; _ -> exit(invalid_reward_key) end end. check_no_double_signing(CDiff, PrevCDiff, MiningAddress, Height) -> Blocks = ar_block_cache:get_blocks_by_miner(block_cache, MiningAddress), not lists:any( fun(B) -> case ar_block:get_double_signing_condition( B#block.cumulative_diff, B#block.previous_cumulative_diff, CDiff, PrevCDiff) of true -> ?LOG_WARNING([{event, avoiding_double_signing}, {block, ar_util:encode(B#block.indep_hash)}, {height, B#block.height}, {new_height, Height}, {cdiff, B#block.cumulative_diff}, {prev_cdiff, B#block.previous_cumulative_diff}, {new_cdiff, CDiff}, {new_prev_cdiff, PrevCDiff}]), true; false -> false end end, Blocks). update_solution_cache(H, Args, State) -> %% Maintain a cache of mining solutions for potential reuse in rebasing. %% %% - We only want to cache 5 solutions at max. %% - If we exceed 5, we remove the oldest one from the solution_cache. %% - solution_cache_records is only used to track which solution is oldest. #{ solution_cache := Map, solution_cache_records := Q } = State, case maps:is_key(H, Map) of true -> State; false -> Q2 = queue:in(H, Q), Map2 = maps:put(H, Args, Map), {Map3, Q3} = case queue:len(Q2) > 5 of true -> {{value, H2}, Q4} = queue:out(Q2), {maps:remove(H2, Map2), Q4}; false -> {Map2, Q2} end, State#{ solution_cache => Map3, solution_cache_records => Q3 } end. may_be_report_double_signing(B, State) -> #block{ indep_hash = H, hash = SolutionH, cumulative_diff = CDiff1, previous_cumulative_diff = PrevCDiff1, previous_solution_hash = PrevSolutionH1, reward_key = {_, Key}, signature = Signature1 } = B, case ar_block_cache:get_by_solution_hash(block_cache, SolutionH, H, CDiff1, PrevCDiff1) of not_found -> State; CacheB -> #block{ hash = SolutionH, cumulative_diff = CDiff2, previous_cumulative_diff = PrevCDiff2, previous_solution_hash = PrevSolutionH2, reward_key = {_, Key}, signature = Signature2 } = CacheB, case ar_block:get_double_signing_condition(CDiff1, PrevCDiff1, CDiff2, PrevCDiff2) of true -> Preimage1 = << PrevSolutionH1/binary, (ar_block:generate_signed_hash(B))/binary >>, Preimage2 = << PrevSolutionH2/binary, (ar_block:generate_signed_hash(CacheB))/binary >>, Proof = {Key, Signature1, CDiff1, PrevCDiff1, Preimage1, Signature2, CDiff2, PrevCDiff2, Preimage2}, ?LOG_INFO([{event, report_double_signing}, {key, ar_util:encode(Key)}, {block1, ar_util:encode(H)}, {block2, ar_util:encode(CacheB#block.indep_hash)}, {height1, B#block.height}, {height2, CacheB#block.height}]), cache_double_signing_proof(Proof, State); false -> State end end. cache_double_signing_proof(Proof, State) -> Map = maps:get(double_signing_proofs, State, #{}), Key = element(1, Proof), Addr = ar_wallet:hash_pub_key(Key), case is_map_key(Addr, Map) of true -> State; false -> Map2 = maps:put(Addr, {os:system_time(second), Proof}, Map), State#{ double_signing_proofs => Map2 } end. %%-------------------------------------------------------------------- %% @hidden %% @doc A simple list term checker. The idea is to get some information %% regarding the content of a list (e.g. number of same item). %% @end %%-------------------------------------------------------------------- -spec checker(List) -> Return when List :: [term()], Return :: {Length, Counter}, Length :: pos_integer(), Counter :: #{ term() => pos_integer() }. checker(List) -> checker(List, length(List), #{}). checker([], Length, Buffer) -> {Length, Buffer}; checker([H|T], Length, Buffer) -> V = maps:get(H, Buffer, 0), checker(T, Length, Buffer#{ H => V+1 }). checker_test() -> ?assertEqual({0, #{}}, checker([])), ?assertEqual({3, #{ true => 3 }}, checker([true, true, true])), ?assertEqual({3, #{ true => 2, false => 1}}, checker([true, true, false])), ?assertEqual({3, #{ true => 1, false => 2}}, checker([true, false, false])). ================================================ FILE: apps/arweave/src/ar_nonce_limiter.erl ================================================ -module(ar_nonce_limiter). -behaviour(gen_server). -export([start_link/0, account_tree_initialized/1, encode_session_key/1, session_key/1, is_ahead_on_the_timeline/2, get_current_step_number/0, get_current_step_number/1, get_step_triplets/3, get_seed_data/2, get_step_checkpoints/2, get_step_checkpoints/4, get_steps/4, get_seed/1, get_active_partition_upper_bound/2, get_reset_frequency/0, get_entropy_reset_point/2, validate_last_step_checkpoints/3, request_validation/3, get_or_init_nonce_limiter_info/1, get_or_init_nonce_limiter_info/2, apply_external_update/2, get_session/1, get_current_session/0, get_current_sessions/0, compute/3, maybe_add_entropy/4, mix_seed/2]). -export([init/1, handle_cast/2, handle_call/3, handle_info/2, terminate/2]). -include("ar.hrl"). -include("ar_vdf.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include("ar_consensus.hrl"). -include_lib("eunit/include/eunit.hrl"). -record(state, { current_session_key, sessions = gb_sets:new(), session_by_key = #{}, % {NextSeed, StartIntervalNumber, NextVDFDifficulty} => #vdf_session worker, worker_monitor_ref, autocompute = true, computing = false, last_external_update = {not_set, 0}, emit_initialized_event = true }). %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Start the server. start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). account_tree_initialized(Blocks) -> gen_server:cast(?MODULE, {account_tree_initialized, Blocks}). encode_session_key({NextSeed, StartIntervalNumber, NextVDFDifficulty}) -> {ar_util:safe_encode(NextSeed), StartIntervalNumber, NextVDFDifficulty}; encode_session_key(SessionKey) -> SessionKey. %% @doc Return true if the first solution is above the second one according %% to the protocol ordering. -ifdef(LOCALNET). is_ahead_on_the_timeline(NonceLimiterInfo1, NonceLimiterInfo2) -> #nonce_limiter_info{ global_step_number = N1 } = NonceLimiterInfo1, #nonce_limiter_info{ global_step_number = N2 } = NonceLimiterInfo2, N1 >= N2. -else. is_ahead_on_the_timeline(NonceLimiterInfo1, NonceLimiterInfo2) -> #nonce_limiter_info{ global_step_number = N1 } = NonceLimiterInfo1, #nonce_limiter_info{ global_step_number = N2 } = NonceLimiterInfo2, N1 > N2. -endif. session_key(#nonce_limiter_info{ next_seed = NextSeed, global_step_number = StepNumber, next_vdf_difficulty = NextVDFDifficulty }) -> session_key(NextSeed, StepNumber, NextVDFDifficulty). %% @doc Return the nonce limiter session with the given key. get_session(SessionKey) -> gen_server:call(?MODULE, {get_session, SessionKey}, ?DEFAULT_CALL_TIMEOUT). %% @doc Return {SessionKey, Session} for the current VDF session. get_current_session() -> gen_server:call(?MODULE, get_current_session, ?DEFAULT_CALL_TIMEOUT). %% @doc Return a list of up to two {SessionKey, Session} pairs %% where the first pair corresponds to the current VDF session %% and the second pair is its previous session, if any. get_current_sessions() -> gen_server:call(?MODULE, get_current_sessions, ?DEFAULT_CALL_TIMEOUT). %% @doc Return the latest known step number. get_current_step_number() -> gen_server:call(?MODULE, get_current_step_number, ?DEFAULT_CALL_TIMEOUT). %% @doc Return the latest known step number in the session of the given (previous) block. %% Return not_found if the session is not found. get_current_step_number(B) -> SessionKey = session_key(B#block.nonce_limiter_info), gen_server:call(?MODULE, {get_current_step_number, SessionKey}, ?DEFAULT_CALL_TIMEOUT). %% @doc Return {Output, StepNumber, PartitionUpperBound} for up to N latest steps %% from the VDF session of Info, if any. If PrevOutput is among the N latest steps, %% return only the steps strictly above PrevOutput. get_step_triplets(Info, PrevOutput, N) -> SessionKey = session_key(Info), Steps = gen_server:call(?MODULE, {get_latest_step_triplets, SessionKey, N}, ?DEFAULT_CALL_TIMEOUT), filter_step_triplets(Steps, [PrevOutput, Info#nonce_limiter_info.output]). -ifdef(LOCALNET). assert_step_number_is_ahead(StepNumber, PrevStepNumber) -> true = StepNumber >= PrevStepNumber. -else. assert_step_number_is_ahead(StepNumber, PrevStepNumber) -> true = StepNumber > PrevStepNumber. -endif. %% @doc Return {Seed, NextSeed, PartitionUpperBound, NextPartitionUpperBound, VDFDifficulty} %% for the block mined at StepNumber considering its previous block PrevB. %% The previous block's independent hash, weave size, and VDF difficulty %% become the new NextSeed, NextPartitionUpperBound, and NextVDFDifficulty %% accordingly when we cross the next reset line. %% Note: next_vdf_difficulty is not part of the seed data as it is computed using the %% block_time_history - which is a heavier operation handled separate from the (quick) seed data %% retrieval get_seed_data(StepNumber, PrevB) -> NonceLimiterInfo = PrevB#block.nonce_limiter_info, #nonce_limiter_info{ global_step_number = PrevStepNumber, seed = Seed, next_seed = NextSeed, partition_upper_bound = PartitionUpperBound, next_partition_upper_bound = NextPartitionUpperBound, %% VDF difficulty in use at the previous block vdf_difficulty = VDFDifficulty, %% Next VDF difficulty scheduled at the previous block next_vdf_difficulty = PrevNextVDFDifficulty } = NonceLimiterInfo, assert_step_number_is_ahead(StepNumber, PrevStepNumber), case get_entropy_reset_point(PrevStepNumber, StepNumber) of none -> %% Entropy reset line was not crossed between previous and current block { Seed, NextSeed, PartitionUpperBound, NextPartitionUpperBound, VDFDifficulty }; _ -> %% Entropy reset line was crossed between previous and current block { NextSeed, PrevB#block.indep_hash, NextPartitionUpperBound, PrevB#block.weave_size, %% The next VDF difficulty that was scheduled at the previous block %% (PrevNextVDFDifficulty) was applied when we crossed the entropy reset line and %% is now the current VDF difficulty. PrevNextVDFDifficulty } end. %% @doc Return the cached checkpoints for the given step. Return not_found if %% none found. get_step_checkpoints(StepNumber, NextSeed, StartIntervalNumber, NextVDFDifficulty) -> SessionKey = {NextSeed, StartIntervalNumber, NextVDFDifficulty}, get_step_checkpoints(StepNumber, SessionKey). get_step_checkpoints(StepNumber, SessionKey) -> gen_server:call(?MODULE, {get_step_checkpoints, StepNumber, SessionKey}, ?DEFAULT_CALL_TIMEOUT). %% @doc Return the entropy seed of the given session. %% Return not_found if the VDF session is not found. get_seed(SessionKey) -> gen_server:call(?MODULE, {get_seed, SessionKey}, ?DEFAULT_CALL_TIMEOUT). %% @doc Return the active partition upper bound for the given step (chosen among %% session's upper_bound and next_upper_bound depending on whether the step number has %% reached the entropy reset point). %% Return not_found if the VDF session is not found. get_active_partition_upper_bound(StepNumber, SessionKey) -> gen_server:call(?MODULE, {get_active_partition_upper_bound, StepNumber, SessionKey}, ?DEFAULT_CALL_TIMEOUT). %% @doc Return the steps of the given interval. The steps are chosen %% according to the protocol. Return not_found if the corresponding hash chain is not %% computed yet. -ifdef(LOCALNET). %% On localnet two blocks may be mined on the same step so this function may be called %% with the same StepNumber for both the StartStepNumber and EndStepNumber arguments. get_steps(StepNumber, StepNumber, _NextSeed, _NextVDFDifficulty) -> not_found; get_steps(StartStepNumber, EndStepNumber, NextSeed, NextVDFDifficulty) when EndStepNumber > StartStepNumber -> SessionKey = session_key(NextSeed, StartStepNumber, NextVDFDifficulty), gen_server:call(?MODULE, {get_steps, StartStepNumber, EndStepNumber, SessionKey}, ?DEFAULT_CALL_TIMEOUT). -else. get_steps(StartStepNumber, EndStepNumber, NextSeed, NextVDFDifficulty) when EndStepNumber > StartStepNumber -> SessionKey = session_key(NextSeed, StartStepNumber, NextVDFDifficulty), gen_server:call(?MODULE, {get_steps, StartStepNumber, EndStepNumber, SessionKey}, ?DEFAULT_CALL_TIMEOUT). -endif. %% @doc Quickly validate the checkpoints of the latest step. validate_last_step_checkpoints(B = #block{ nonce_limiter_info = #nonce_limiter_info{ global_step_number = StepNumber } }, PrevB = #block{ nonce_limiter_info = #nonce_limiter_info{ global_step_number = StepNumber } }, _PrevOutput) -> validate_last_step_checkpoints_same_step_number(B, PrevB); validate_last_step_checkpoints(#block{ nonce_limiter_info = #nonce_limiter_info{ output = Output, global_step_number = StepNumber, seed = Seed, vdf_difficulty = VDFDifficulty, last_step_checkpoints = [Output | _] = LastStepCheckpoints } }, PrevB, PrevOutput) when length(LastStepCheckpoints) == ?VDF_CHECKPOINT_COUNT_IN_STEP -> PrevInfo = get_or_init_nonce_limiter_info(PrevB), #nonce_limiter_info{ global_step_number = PrevBStepNumber } = PrevInfo, SessionKey = session_key(PrevInfo), case get_step_checkpoints(StepNumber, SessionKey) of LastStepCheckpoints -> {true, cache_match}; not_found -> PrevOutput2 = ar_nonce_limiter:maybe_add_entropy( PrevOutput, PrevBStepNumber, StepNumber, Seed), PrevStepNumber = StepNumber - 1, {ok, Config} = arweave_config:get_env(), ThreadCount = Config#config.max_nonce_limiter_last_step_validation_thread_count, case verify_no_reset(PrevStepNumber, PrevOutput2, 1, lists:reverse(LastStepCheckpoints), ThreadCount, VDFDifficulty) of {true, _Steps} -> true; false -> false end; CachedSteps -> {false, cache_mismatch, CachedSteps} end; validate_last_step_checkpoints(_B, _PrevB, _PrevOutput) -> false. -ifdef(LOCALNET). %% On localnet two blocks may be mined on the same step. In this case %% we validate the last step checkpoints come from the previous block. validate_last_step_checkpoints_same_step_number( #block{ nonce_limiter_info = #nonce_limiter_info{ output = Output, last_step_checkpoints = [Output | _] = LastStepCheckpoints } }, PrevB ) when length(LastStepCheckpoints) == ?VDF_CHECKPOINT_COUNT_IN_STEP -> PrevInfo = get_or_init_nonce_limiter_info(PrevB), PrevInfo#nonce_limiter_info.last_step_checkpoints == LastStepCheckpoints; validate_last_step_checkpoints_same_step_number(_B, _PrevB) -> false. -else. validate_last_step_checkpoints_same_step_number(_B, _PrevB) -> false. -endif. get_reset_frequency() -> ?NONCE_LIMITER_RESET_FREQUENCY. %% @doc Determine whether StepNumber has passed the entropy reset line. If it has return the %% reset line, otherwise return none. get_entropy_reset_point(PrevStepNumber, StepNumber) -> ResetLine = (PrevStepNumber div ar_nonce_limiter:get_reset_frequency() + 1) * ar_nonce_limiter:get_reset_frequency(), case ResetLine > StepNumber of true -> none; false -> ResetLine end. %% @doc Conditionally add entropy to PrevOutput if the configured number of steps have %% passed. See ar_nonce_limiter:get_reset_frequency() for more details. maybe_add_entropy(PrevOutput, PrevStepNumber, StepNumber, Seed) -> case get_entropy_reset_point(PrevStepNumber, StepNumber) of StepNumber -> mix_seed(PrevOutput, Seed); _ -> PrevOutput end. %% @doc Add entropy to an earlier VDF output to mitigate the impact of a miner with a %% fast VDF compute. See ar_nonce_limiter:get_reset_frequency() for more details. mix_seed(PrevOutput, Seed) -> SeedH = crypto:hash(sha256, Seed), mix_seed2(PrevOutput, SeedH). mix_seed2(PrevOutput, SeedH) -> crypto:hash(sha256, << PrevOutput/binary, SeedH/binary >>). %% @doc Validate the nonce limiter chain between two blocks in the background. %% Assume the seeds are correct and the first block is above the second one %% according to the protocol. %% Emit {nonce_limiter, {invalid, H, ErrorCode}} or {nonce_limiter, {valid, H}}. -ifdef(LOCALNET). %% On localnet two blocks may be mined on the same step. In this case %% we validate steps come from the previous block. request_validation_same_step_number(H, #nonce_limiter_info{ steps = Steps }, #nonce_limiter_info{ steps = Steps }) -> spawn(fun() -> ar_events:send(nonce_limiter, {valid, H}) end); request_validation_same_step_number(H, _Info, _PrevInfo) -> spawn(fun() -> ar_events:send(nonce_limiter, {invalid, H, 1}) end). -else. request_validation_same_step_number(H, _Info, _PrevInfo) -> spawn(fun() -> ar_events:send(nonce_limiter, {invalid, H, 1}) end). -endif. request_validation(H, #nonce_limiter_info{ global_step_number = N } = Info, #nonce_limiter_info{ global_step_number = N } = PrevInfo) -> request_validation_same_step_number(H, Info, PrevInfo); request_validation(H, #nonce_limiter_info{ output = Output, steps = [Output | _] = StepsToValidate } = Info, PrevInfo) -> #nonce_limiter_info{ output = PrevOutput, global_step_number = PrevStepNumber, vdf_difficulty = PrevVDFDifficulty } = PrevInfo, #nonce_limiter_info{ output = Output, seed = Seed, vdf_difficulty = VDFDifficulty, next_vdf_difficulty = NextVDFDifficulty, partition_upper_bound = UpperBound, next_partition_upper_bound = NextUpperBound, global_step_number = StepNumber, steps = StepsToValidate } = Info, EntropyResetPoint = get_entropy_reset_point(PrevStepNumber, StepNumber), SessionKey = session_key(PrevInfo), %% The steps that fall at the intersection of the PrevStepNumber to StepNumber range %% and the SessionKey session. SessionSteps = gen_server:call(?MODULE, {get_session_steps, PrevStepNumber, StepNumber, SessionKey}, ?DEFAULT_CALL_TIMEOUT), NextSessionKey = session_key(Info), %% We need to validate all the steps from PrevStepNumber to StepNumber: %% PrevStepNumber <--------------------------------------------> StepNumber %% PrevOutput x %% |----------------------| StepsToValidate %% |-----------------------------------| SessionSteps %% StartStepNumber x %% StartOutput x %% |-------------| ComputedSteps %% --------------> NumAlreadyComputed %% StartStepNumber2 x %% StartOutput2 x %% |--------| RemainingStepsToValidate %% {StartStepNumber, StartOutput, ComputedSteps} = skip_already_computed_steps(PrevStepNumber, StepNumber, PrevOutput, StepsToValidate, SessionSteps), ?LOG_INFO([{event, vdf_validation_start}, {block, ar_util:encode(H)}, {session_key, encode_session_key(SessionKey)}, {next_session_key, encode_session_key(NextSessionKey)}, {prev_step_number, PrevStepNumber}, {step_number, StepNumber}, {start_step_number, StartStepNumber}, {step_count, StepNumber - PrevStepNumber}, {steps, length(StepsToValidate)}, {session_steps, length(SessionSteps)}, {prev_vdf_difficulty, PrevVDFDifficulty}, {vdf_difficulty, VDFDifficulty}, {next_vdf_difficulty, NextVDFDifficulty}, {pid, self()}]), case exclude_computed_steps_from_steps_to_validate( lists:reverse(StepsToValidate), ComputedSteps) of invalid -> ErrorID = dump_error({PrevStepNumber, StepNumber, StepsToValidate, SessionSteps}), ?LOG_WARNING([{event, nonce_limiter_validation_failed}, {step, exclude_computed_steps_from_steps_to_validate}, {error_dump, ErrorID}]), spawn(fun() -> ar_events:send(nonce_limiter, {invalid, H, 2}) end); {[], NumAlreadyComputed} when StartStepNumber + NumAlreadyComputed == StepNumber -> %% We've already computed up to StepNumber, so we can use the checkpoints from the %% current session LastStepCheckpoints = get_step_checkpoints(StepNumber, SessionKey), Args = {StepNumber, SessionKey, NextSessionKey, Seed, UpperBound, NextUpperBound, VDFDifficulty, NextVDFDifficulty, SessionSteps, LastStepCheckpoints}, gen_server:cast(?MODULE, {validated_steps, Args}), spawn(fun() -> ar_events:send(nonce_limiter, {valid, H}) end); {_, NumAlreadyComputed} when StartStepNumber + NumAlreadyComputed >= StepNumber -> ErrorID = dump_error({PrevStepNumber, StepNumber, StepsToValidate, SessionSteps, StartStepNumber, NumAlreadyComputed}), ?LOG_WARNING([{event, nonce_limiter_validation_failed}, {step, exclude_computed_steps_from_steps_to_validate_shift}, {start_step_number, StartStepNumber}, {shift2, NumAlreadyComputed}, {error_dump, ErrorID}]), spawn(fun() -> ar_events:send(nonce_limiter, {invalid, H, 2}) end); {RemainingStepsToValidate, NumAlreadyComputed} when StartStepNumber + NumAlreadyComputed < StepNumber -> case ar_config:use_remote_vdf_server() and not ar_config:compute_own_vdf() of true -> %% Wait for our VDF server(s) to validate the remaining steps. %% Alternatively, the network may abandon this block. ar_nonce_limiter_client:maybe_request_sessions(SessionKey), spawn(fun() -> ar_events:send(nonce_limiter, {refuse_validation, H}) end); false -> %% Validate the remaining steps. StartOutput2 = case NumAlreadyComputed of 0 -> StartOutput; _ -> lists:nth(NumAlreadyComputed, ComputedSteps) end, spawn(fun() -> StartStepNumber2 = StartStepNumber + NumAlreadyComputed, {ok, Config} = arweave_config:get_env(), ThreadCount = Config#config.max_nonce_limiter_validation_thread_count, Result = case is_integer(EntropyResetPoint) andalso EntropyResetPoint > StartStepNumber2 of true -> catch verify(StartStepNumber2, StartOutput2, ?VDF_CHECKPOINT_COUNT_IN_STEP, RemainingStepsToValidate, EntropyResetPoint, crypto:hash(sha256, Seed), ThreadCount, PrevVDFDifficulty, VDFDifficulty); _ -> catch verify_no_reset(StartStepNumber2, StartOutput2, ?VDF_CHECKPOINT_COUNT_IN_STEP, RemainingStepsToValidate, ThreadCount, VDFDifficulty) end, case Result of {'EXIT', Exc} -> ErrorID = dump_error( {StartStepNumber2, StartOutput2, ?VDF_CHECKPOINT_COUNT_IN_STEP, RemainingStepsToValidate, EntropyResetPoint, crypto:hash(sha256, Seed), ThreadCount, VDFDifficulty}), ?LOG_ERROR([{event, nonce_limiter_validation_failed}, {block, ar_util:encode(H)}, {start_step_number, StartStepNumber2}, {error_id, ErrorID}, {prev_output, ar_util:encode(StartOutput2)}, {exception, io_lib:format("~p", [Exc])}]), ar_events:send(nonce_limiter, {validation_error, H}); false -> ar_events:send(nonce_limiter, {invalid, H, 3}); {true, ValidatedSteps} -> AllValidatedSteps = ValidatedSteps ++ SessionSteps, %% The last_step_checkpoints in Info were validated as part %% of an earlier call to %% ar_block_pre_validator:pre_validate_nonce_limiter, so %% we can trust them here. LastStepCheckpoints = get_last_step_checkpoints(Info), Args = {StepNumber, SessionKey, NextSessionKey, Seed, UpperBound, NextUpperBound, VDFDifficulty, NextVDFDifficulty, AllValidatedSteps, LastStepCheckpoints}, gen_server:cast(?MODULE, {validated_steps, Args}), ar_events:send(nonce_limiter, {valid, H}) end end) end; Data -> ErrorID = dump_error(Data), ar_events:send(nonce_limiter, {validation_error, H}), ?LOG_ERROR([{event, unexpected_error_during_nonce_limiter_validation}, {error_id, ErrorID}]) end; request_validation(H, _Info, _PrevInfo) -> spawn(fun() -> ar_events:send(nonce_limiter, {invalid, H, 4}) end). get_last_step_checkpoints(Info) -> Info#nonce_limiter_info.last_step_checkpoints. get_or_init_nonce_limiter_info(#block{ height = Height, indep_hash = H } = B) -> case Height >= ar_fork:height_2_6() of true -> B#block.nonce_limiter_info; false -> {Seed, PartitionUpperBound} = ar_node:get_recent_partition_upper_bound_by_prev_h(H), get_or_init_nonce_limiter_info(B, Seed, PartitionUpperBound) end. get_or_init_nonce_limiter_info(#block{ height = Height } = B, RecentBI) -> case Height >= ar_fork:height_2_6() of true -> B#block.nonce_limiter_info; false -> {Seed, PartitionUpperBound, _TXRoot} = lists:last(lists:sublist(RecentBI, ?SEARCH_SPACE_UPPER_BOUND_DEPTH)), get_or_init_nonce_limiter_info(B, Seed, PartitionUpperBound) end. %% @doc Apply the nonce limiter update provided by the configured trusted peer. apply_external_update(Update, Peer) -> gen_server:call(?MODULE, {apply_external_update, Update, Peer}, ?DEFAULT_CALL_TIMEOUT). %%%=================================================================== %%% Generic server callbacks. %%%=================================================================== init([]) -> ?LOG_INFO([{event, nonce_limiter_init}]), ok = ar_events:subscribe(node_state), State = case ar_node:is_joined() of true -> Blocks = get_blocks(), handle_initialized(Blocks, #state{}); _ -> #state{} end, case ar_config:use_remote_vdf_server() and not ar_config:compute_own_vdf() of true -> gen_server:cast(?MODULE, check_external_vdf_server_input); false -> ok end, {ok, start_worker(State#state{ autocompute = ar_config:compute_own_vdf() })}. get_blocks() -> B = ar_node:get_current_block(), [B | get_blocks(B#block.previous_block, 1)]. get_blocks(H, N) -> case N >= ar_block:get_consensus_window_size() of true -> []; false -> #block{} = B = ar_block_cache:get(block_cache, H), [B | get_blocks(B#block.previous_block, N + 1)] end. handle_call(get_current_step_number, _From, #state{ current_session_key = undefined } = State) -> {reply, 0, State}; handle_call(get_current_step_number, _From, State) -> #state{ current_session_key = Key } = State, #vdf_session{ step_number = StepNumber } = get_session(Key, State), {reply, StepNumber, State}; handle_call({get_current_step_number, SessionKey}, _From, State) -> case get_session(SessionKey, State) of not_found -> {reply, not_found, State}; #vdf_session{ step_number = StepNumber } -> {reply, StepNumber, State} end; handle_call({get_latest_step_triplets, SessionKey, N}, _From, State) -> case get_session(SessionKey, State) of not_found -> {reply, [], State}; #vdf_session{ step_number = StepNumber, steps = Steps, step_checkpoints_map = Map, upper_bound = UpperBound, next_upper_bound = NextUpperBound } -> {_, IntervalNumber, _} = SessionKey, IntervalStart = IntervalNumber * ar_nonce_limiter:get_reset_frequency(), ResetPoint = get_entropy_reset_point(IntervalStart, StepNumber), Triplets = get_triplets(StepNumber, Steps, ResetPoint, UpperBound, NextUpperBound, N), {Triplets2, NSkipped} = filter_step_triplets_with_checkpoints(Triplets, Map), case NSkipped > 0 of true -> ?LOG_INFO([{event, missing_step_checkpoints}, {count, NSkipped}]); false -> ok end, {reply, Triplets2, State} end; handle_call({get_step_checkpoints, StepNumber, SessionKey}, _From, State) -> case get_session(SessionKey, State) of not_found -> {reply, not_found, State}; #vdf_session{ step_checkpoints_map = Map } -> {reply, maps:get(StepNumber, Map, not_found), State} end; handle_call({get_seed, SessionKey}, _From, State) -> case get_session(SessionKey, State) of not_found -> {reply, not_found, State}; #vdf_session{ seed = Seed } -> {reply, Seed, State} end; handle_call({get_active_partition_upper_bound, StepNumber, SessionKey}, _From, State) -> case get_session(SessionKey, State) of not_found -> {reply, not_found, State}; #vdf_session{ upper_bound = UpperBound, next_upper_bound = NextUpperBound } -> {_NextSeed, IntervalNumber, _NextVDFDifficulty} = SessionKey, IntervalStart = IntervalNumber * ar_nonce_limiter:get_reset_frequency(), UpperBound2 = case get_entropy_reset_point(IntervalStart, StepNumber) of none -> UpperBound; _ -> NextUpperBound end, {reply, UpperBound2, State} end; handle_call({get_steps, StartStepNumber, EndStepNumber, SessionKey}, _From, State) -> case get_steps2(StartStepNumber, EndStepNumber, SessionKey, State) of not_found -> {reply, not_found, State}; Steps -> TakeN = min(?NONCE_LIMITER_MAX_CHECKPOINTS_COUNT, EndStepNumber - StartStepNumber), {reply, lists:sublist(Steps, TakeN), State} end; %% @doc Get all the steps in the current session that fall between %% StartStepNumber+1 and EndStepNumber (inclusive) handle_call({get_session_steps, StartStepNumber, EndStepNumber, SessionKey}, _From, State) -> Session = get_session(SessionKey, State), {_, Steps} = get_step_range(Session, StartStepNumber + 1, EndStepNumber), {reply, Steps, State}; handle_call(get_steps, _From, #state{ current_session_key = undefined } = State) -> {reply, [], State}; handle_call(get_steps, _From, State) -> #state{ current_session_key = SessionKey } = State, #vdf_session{ step_number = StepNumber } = get_session(SessionKey, State), {reply, get_steps2(1, StepNumber, SessionKey, State), State}; handle_call({apply_external_update, Update, Peer}, _From, State) -> Now = os:system_time(millisecond), #nonce_limiter_update{ session_key = SessionKey } = Update, %% The client consults the latest session key by peer to decide whether to request the %% missing VDF session when we call ar_nonce_limiter_client:maybe_request_sessions/1 %% during VDF validation. gen_server:cast(ar_nonce_limiter_client, {update_latest_session_key, Peer, SessionKey}), apply_external_update2(Update, State#state{ last_external_update = {Peer, Now} }); handle_call({get_session, SessionKey}, _From, State) -> {reply, get_session(SessionKey, State), State}; handle_call(get_current_session, _From, State) -> #state{ current_session_key = CurrentSessionKey } = State, {reply, {CurrentSessionKey, get_session(CurrentSessionKey, State)}, State}; handle_call(get_current_sessions, _From, State) -> #state{ current_session_key = CurrentSessionKey } = State, Session = get_session(CurrentSessionKey, State), PreviousSessionKey = Session#vdf_session.prev_session_key, case get_session(PreviousSessionKey, State) of not_found -> ?LOG_DEBUG([{event, request_current_sessions_missing_previous_session}, {current_session_key, encode_session_key(CurrentSessionKey)}, {previous_session_key, encode_session_key(PreviousSessionKey)}]), {reply, [{CurrentSessionKey, Session}], State}; PrevSession -> {reply, [{CurrentSessionKey, Session}, {PreviousSessionKey, PrevSession}], State} end; handle_call(Request, _From, State) -> ?LOG_WARNING([{event, unhandled_call}, {module, ?MODULE}, {request, Request}]), {reply, ok, State}. handle_cast(check_external_vdf_server_input, #state{ last_external_update = {_, 0} } = State) -> ar_util:cast_after(1000, ?MODULE, check_external_vdf_server_input), {noreply, State}; handle_cast(check_external_vdf_server_input, #state{ last_external_update = {_, Time} } = State) -> Now = os:system_time(millisecond), case Now - Time > 2000 of true -> ?LOG_WARNING([{event, no_message_from_any_vdf_servers}, {last_message_seconds_ago, (Now - Time) div 1000}]), ar_util:cast_after(30000, ?MODULE, check_external_vdf_server_input); false -> ar_util:cast_after(1000, ?MODULE, check_external_vdf_server_input) end, {noreply, State}; handle_cast(initialized, State) -> gen_server:cast(?MODULE, schedule_step), case State#state.emit_initialized_event of true -> ar_events:send(nonce_limiter, initialized); false -> ok end, {noreply, State}; handle_cast({initialize, [PrevB, B | Blocks]}, State) -> apply_chain(B#block.nonce_limiter_info, PrevB#block.nonce_limiter_info), gen_server:cast(?MODULE, {apply_tip, B, PrevB}), gen_server:cast(?MODULE, {initialize, [B | Blocks]}), {noreply, State}; handle_cast({initialize, _}, State) -> gen_server:cast(?MODULE, initialized), {noreply, State}; handle_cast({account_tree_initialized, Blocks}, State) -> {noreply, handle_initialized(lists:sublist(Blocks, ar_block:get_consensus_window_size()), State)}; handle_cast({apply_tip, B, PrevB}, State) -> {noreply, apply_tip2(B, PrevB, State)}; handle_cast({validated_steps, Args}, State) -> {StepNumber, SessionKey, NextSessionKey, Seed, UpperBound, NextUpperBound, VDFDifficulty, NextVDFDifficulty, Steps, LastStepCheckpoints} = Args, case get_session(SessionKey, State) of not_found -> %% The corresponding fork origin should have just dropped below the %% checkpoint height. ?LOG_WARNING([{event, session_not_found_for_validated_steps}, {session_key, encode_session_key(SessionKey)}, {interval, element(2, SessionKey)}, {vdf_difficulty, element(3, SessionKey)}]), {noreply, State}; Session -> #vdf_session{ step_number = CurrentStepNumber } = Session, Session2 = case CurrentStepNumber < StepNumber of true -> %% Update the current Session with all the newly validated steps and %% as well as the checkpoints associated with step StepNumber. %% This branch occurs when a block is received that is ahead of us %% in the VDF chain. ?LOG_DEBUG([{event, new_vdf_step}, {source, validated_steps}, {session_key, encode_session_key(SessionKey)}, {step_number, StepNumber}]), {_, Steps2} = get_step_range(Steps, StepNumber, CurrentStepNumber + 1, StepNumber), update_session(Session, StepNumber, #{ StepNumber => LastStepCheckpoints }, Steps2, validated_steps); false -> Session end, State2 = cache_session(State, SessionKey, Session2), State3 = cache_block_session(State2, NextSessionKey, SessionKey, #{}, Seed, UpperBound, NextUpperBound, VDFDifficulty, NextVDFDifficulty), {noreply, State3} end; handle_cast(schedule_step, #state{ autocompute = false } = State) -> {noreply, State#state{ computing = false }}; handle_cast(schedule_step, State) -> {noreply, schedule_step(State#state{ computing = true })}; handle_cast(compute_step, State) -> {noreply, schedule_step(State)}; handle_cast(reset_and_pause, State) -> {noreply, State#state{ autocompute = false, computing = false, current_session_key = undefined, sessions = gb_sets:new(), session_by_key = #{} }}; handle_cast(turn_off_initialized_event, State) -> {noreply, State#state{ emit_initialized_event = false }}; handle_cast(Cast, State) -> ?LOG_WARNING([{event, unhandled_cast}, {module, ?MODULE}, {cast, Cast}]), {noreply, State}. handle_info({event, node_state, {new_tip, B, PrevB}}, State) -> {noreply, apply_tip(B, PrevB, State)}; handle_info({event, node_state, {checkpoint_block, _B}}, #state{ current_session_key = undefined } = State) -> %% The server has been restarted after a crash and a base block has not been %% applied yet. {noreply, State}; handle_info({event, node_state, {checkpoint_block, B}}, State) -> case B#block.height < ar_fork:height_2_6() of true -> {noreply, State}; false -> #state{ sessions = Sessions, session_by_key = SessionByKey, current_session_key = CurrentSessionKey } = State, StepNumber = ar_block:vdf_step_number(B), BaseInterval = StepNumber div ar_nonce_limiter:get_reset_frequency(), {Sessions2, SessionByKey2} = prune_old_sessions(Sessions, SessionByKey, BaseInterval), true = maps:is_key(CurrentSessionKey, SessionByKey2), {noreply, State#state{ sessions = Sessions2, session_by_key = SessionByKey2 }} end; handle_info({event, node_state, _}, State) -> {noreply, State}; handle_info({'DOWN', Ref, process, _, Reason}, #state{ worker_monitor_ref = Ref } = State) -> ?LOG_WARNING([{event, nonce_limiter_worker_down}, {reason, io_lib:format("~p", [Reason])}]), {noreply, start_worker(State)}; handle_info({computed, _Args}, #state{ current_session_key = undefined } = State) -> %% Practically, only happens in tests. ?LOG_WARNING([{event, computed_without_current_session}]), {noreply, State}; handle_info({computed, Args}, State) -> #state{ current_session_key = CurrentSessionKey } = State, {StepNumber, PrevOutput, Output, Checkpoints, SessionKey} = Args, Session = get_session(CurrentSessionKey, State), #vdf_session{ next_vdf_difficulty = NextVDFDifficulty, steps = [SessionOutput | _] } = Session, {NextSeed, IntervalNumber, NextVDFDifficulty} = CurrentSessionKey, IntervalStart = IntervalNumber * ar_nonce_limiter:get_reset_frequency(), SessionOutput2 = ar_nonce_limiter:maybe_add_entropy( SessionOutput, IntervalStart, StepNumber, NextSeed), gen_server:cast(?MODULE, schedule_step), case {PrevOutput == SessionOutput2, SessionKey == CurrentSessionKey} of {true, false} -> ?LOG_INFO([{event, received_computed_output_for_different_session_key}]), {noreply, State}; {false, _} -> case ar_config:use_remote_vdf_server() of true -> ok; false -> ?LOG_WARNING([{event, computed_for_outdated_key}, {step_number, StepNumber}, {output, ar_util:encode(Output)}, {prev_output, ar_util:encode(PrevOutput)}, {session_output, ar_util:encode(SessionOutput2)}, {current_session_key, encode_session_key(CurrentSessionKey)}, {session_key, encode_session_key(SessionKey)}]) end, {noreply, State}; {true, true} -> Session2 = update_session(Session, StepNumber, #{ StepNumber => Checkpoints }, [Output], computed_step), State2 = cache_session(State, CurrentSessionKey, Session2), ?LOG_DEBUG([{event, new_vdf_step}, {source, computed}, {session_key, encode_session_key(CurrentSessionKey)}, {step_number, StepNumber}]), send_output(CurrentSessionKey, Session2), {noreply, State2} end; handle_info(Message, State) -> ?LOG_WARNING([{event, unhandled_info}, {module, ?MODULE}, {message, Message}]), {noreply, State}. terminate(Reason, #state{ worker = W }) -> W ! stop, ?LOG_INFO([{module, ?MODULE},{pid, self()},{callback, terminate},{reason, Reason}]), ok. %%%=================================================================== %%% Private functions. %%%=================================================================== session_key(NextSeed, StepNumber, NextVDFDifficulty) -> {NextSeed, StepNumber div ar_nonce_limiter:get_reset_frequency(), NextVDFDifficulty}. get_session(SessionKey, #state{ session_by_key = SessionByKey }) -> maps:get(SessionKey, SessionByKey, not_found). update_session(Session, StepNumber, StepCheckpointsMap, Steps, Source) -> #vdf_session{ step_checkpoints_map = Map } = Session, case find_step_checkpoints_mismatch(StepCheckpointsMap, Map) of {true, MismatchStepNumber} -> ?LOG_ERROR([{event, step_checkpoints_mismatch}, {step_number, StepNumber}, {mismatch_step_number, MismatchStepNumber}, {source, Source}]); false -> false end, Map2 = maps:merge(StepCheckpointsMap, Map), update_session(Session#vdf_session{ step_checkpoints_map = Map2 }, StepNumber, Steps). find_step_checkpoints_mismatch(StepCheckpointsMap, Map) -> maps:fold(fun(StepNumber, Checkpoints, Acc) -> case maps:get(StepNumber, Map, not_found) of not_found -> Acc; Checkpoints -> Acc; _Checkpoints2 -> {true, StepNumber} end end, false, StepCheckpointsMap). update_session(Session, StepNumber, Steps) -> #vdf_session{ steps = CurrentSteps } = Session, Session#vdf_session{ step_number = StepNumber, steps = Steps ++ CurrentSteps }. send_output(SessionKey, Session) -> {_, IntervalNumber, _} = SessionKey, #vdf_session{ step_number = StepNumber, steps = [Output | _] } = Session, IntervalStart = IntervalNumber * ar_nonce_limiter:get_reset_frequency(), UpperBound = case get_entropy_reset_point(IntervalStart, StepNumber) of none -> Session#vdf_session.upper_bound; _ -> Session#vdf_session.next_upper_bound end, ar_events:send(nonce_limiter, {computed_output, {SessionKey, StepNumber, Output, UpperBound}}). dump_error(Data) -> {ok, Config} = arweave_config:get_env(), ErrorID = binary_to_list(ar_util:encode(crypto:strong_rand_bytes(8))), ErrorDumpFile = filename:join(Config#config.data_dir, "error_dump_" ++ ErrorID), file:write_file(ErrorDumpFile, term_to_binary(Data)), ErrorID. %% @doc %% PrevStepNumber <------------------------------------------------------> StepNumber %% PrevOutput x %% |----------------------| StepsToValidate %% |-------------------------------| NumStepsBefore %% |---------------------------------------------| SessionSteps %% skip_already_computed_steps(PrevStepNumber, StepNumber, PrevOutput, StepsToValidate, SessionSteps) -> ComputedSteps = lists:reverse(SessionSteps), %% Number of steps in the PrevStepNumber to StepNumber range that fall before the %% beginning of the StepsToValidate list. To avoid computing these steps we will look for %% them in the current VDF session (i.e. in the SessionSteps list) NumStepsBefore = StepNumber - PrevStepNumber - length(StepsToValidate), case NumStepsBefore > 0 andalso length(ComputedSteps) >= NumStepsBefore of false -> {PrevStepNumber, PrevOutput, ComputedSteps}; true -> { PrevStepNumber + NumStepsBefore, lists:nth(NumStepsBefore, ComputedSteps), lists:nthtail(NumStepsBefore, ComputedSteps) } end. exclude_computed_steps_from_steps_to_validate(StepsToValidate, ComputedSteps) -> exclude_computed_steps_from_steps_to_validate(StepsToValidate, ComputedSteps, 1, 0). exclude_computed_steps_from_steps_to_validate(StepsToValidate, [], _I, NumAlreadyComputed) -> {StepsToValidate, NumAlreadyComputed}; exclude_computed_steps_from_steps_to_validate(StepsToValidate, [_Step | ComputedSteps], I, NumAlreadyComputed) when I /= 1 -> exclude_computed_steps_from_steps_to_validate(StepsToValidate, ComputedSteps, I + 1, NumAlreadyComputed); exclude_computed_steps_from_steps_to_validate([Step], [Step | _ComputedSteps], _I, NumAlreadyComputed) -> {[], NumAlreadyComputed + 1}; exclude_computed_steps_from_steps_to_validate([Step | StepsToValidate], [Step | ComputedSteps], _I, NumAlreadyComputed) -> exclude_computed_steps_from_steps_to_validate(StepsToValidate, ComputedSteps, 1, NumAlreadyComputed + 1); exclude_computed_steps_from_steps_to_validate(_StepsToValidate, _ComputedSteps, _I, _NumAlreadyComputed) -> invalid. handle_initialized([B | Blocks], State) -> ?LOG_INFO([{event, handle_initialized}, {module, ar_nonce_limiter}, {blocks, length([B | Blocks])}]), Blocks2 = take_blocks_after_fork([B | Blocks]), handle_initialized2(lists:reverse(Blocks2), State). take_blocks_after_fork([#block{ height = Height } = B | Blocks]) -> case Height + 1 >= ar_fork:height_2_6() of true -> [B | take_blocks_after_fork(Blocks)]; false -> [] end; take_blocks_after_fork([]) -> []. handle_initialized2([B | Blocks], State) -> State2 = apply_base_block(B, State), gen_server:cast(?MODULE, {initialize, [B | Blocks]}), State2. apply_base_block(B, State) -> #nonce_limiter_info{ seed = Seed, output = Output, partition_upper_bound = UpperBound, next_partition_upper_bound = NextUpperBound, global_step_number = StepNumber, last_step_checkpoints = LastStepCheckpoints, vdf_difficulty = VDFDifficulty, next_vdf_difficulty = NextVDFDifficulty } = B#block.nonce_limiter_info, Session = #vdf_session{ seed = Seed, step_number = StepNumber, upper_bound = UpperBound, next_upper_bound = NextUpperBound, vdf_difficulty = VDFDifficulty, next_vdf_difficulty = NextVDFDifficulty , step_checkpoints_map = #{ StepNumber => LastStepCheckpoints }, steps = [Output] }, SessionKey = session_key(B#block.nonce_limiter_info), ?LOG_DEBUG([{event, new_vdf_step}, {source, base_block}, {session_key, encode_session_key(SessionKey)}, {step_number, StepNumber}]), State2 = set_current_session(State, SessionKey), cache_session(State2, SessionKey, Session). apply_chain(#nonce_limiter_info{ global_step_number = StepNumber }, #nonce_limiter_info{ global_step_number = PrevStepNumber }) when StepNumber - PrevStepNumber > ?NONCE_LIMITER_MAX_CHECKPOINTS_COUNT -> ar:console("Cannot do a trusted join - there are not enough checkpoints" " to apply quickly; step number: ~B, previous step number: ~B.", [StepNumber, PrevStepNumber]), timer:sleep(1000), init:stop(1); %% @doc Apply the pre-validated / trusted nonce_limiter_info. Since the info is trusted %% we don't validate it here. apply_chain(Info, PrevInfo) -> #nonce_limiter_info{ global_step_number = PrevStepNumber } = PrevInfo, #nonce_limiter_info{ output = Output, seed = Seed, vdf_difficulty = VDFDifficulty, next_vdf_difficulty = NextVDFDifficulty, partition_upper_bound = UpperBound, next_partition_upper_bound = NextUpperBound, global_step_number = StepNumber, steps = Steps, last_step_checkpoints = LastStepCheckpoints } = Info, Output = hd(Steps), assert_step_count(StepNumber, PrevStepNumber, Steps), SessionKey = session_key(PrevInfo), NextSessionKey = session_key(Info), Args = {StepNumber, SessionKey, NextSessionKey, Seed, UpperBound, NextUpperBound, VDFDifficulty, NextVDFDifficulty, Steps, LastStepCheckpoints}, gen_server:cast(?MODULE, {validated_steps, Args}). -ifdef(LOCALNET). assert_step_count(StepNumber, PrevStepNumber, Steps) -> case StepNumber == PrevStepNumber of true -> true = length(Steps) > 0; false -> true = StepNumber - PrevStepNumber == length(Steps) end. -else. assert_step_count(StepNumber, PrevStepNumber, Steps) -> true = StepNumber - PrevStepNumber == length(Steps). -endif. apply_tip(#block{ height = Height } = B, PrevB, #state{ sessions = Sessions } = State) -> case Height + 1 < ar_fork:height_2_6() of true -> State; false -> State2 = case State#state.computing of false -> gen_server:cast(?MODULE, schedule_step), State#state{ computing = true }; true -> State end, case gb_sets:is_empty(Sessions) of true -> true = (Height + 1) == ar_fork:height_2_6(), State3 = apply_base_block(B, State2), State3; false -> apply_tip2(B, PrevB, State2) end end. apply_tip2(B, PrevB, State) -> #nonce_limiter_info{ seed = Seed, partition_upper_bound = UpperBound, next_partition_upper_bound = NextUpperBound, global_step_number = StepNumber, last_step_checkpoints = LastStepCheckpoints, vdf_difficulty = VDFDifficulty, next_vdf_difficulty = NextVDFDifficulty } = B#block.nonce_limiter_info, SessionKey = session_key(B#block.nonce_limiter_info), PrevSessionKey = session_key(PrevB#block.nonce_limiter_info), State2 = set_current_session(State, SessionKey), State3 = cache_block_session(State2, SessionKey, PrevSessionKey, #{ StepNumber => LastStepCheckpoints }, Seed, UpperBound, NextUpperBound, VDFDifficulty, NextVDFDifficulty), State3. prune_old_sessions(Sessions, SessionByKey, BaseInterval) -> {{Interval, NextSeed, NextVdfDifficulty}, Sessions2} = gb_sets:take_smallest(Sessions), SessionKey = {NextSeed, Interval, NextVdfDifficulty}, case BaseInterval > Interval + 10 of true -> ?LOG_DEBUG([{event, prune_old_vdf_session}, {session_key, encode_session_key(SessionKey)}]), SessionByKey2 = maps:remove(SessionKey, SessionByKey), prune_old_sessions(Sessions2, SessionByKey2, BaseInterval); false -> {Sessions, SessionByKey} end. start_worker(State) -> Worker = spawn(fun() -> process_flag(priority, high), worker() end), Ref = monitor(process, Worker), State#state{ worker = Worker, worker_monitor_ref = Ref }. compute(StepNumber, PrevOutput, VDFDifficulty) -> {ok, Output, Checkpoints} = ar_vdf:compute2(StepNumber, PrevOutput, VDFDifficulty), debug_double_check( "compute", {ok, Output, Checkpoints}, fun ar_vdf:compute_legacy/3, [StepNumber, PrevOutput, VDFDifficulty]). verify(StartStepNumber, PrevOutput, NumCheckpointsBetweenHashes, Hashes, ResetStepNumber, ResetSeed, ThreadCount, VDFDifficulty, NextVDFDifficulty) -> {Result1, PrevOutput2, ValidatedSteps1} = case lists:sublist(Hashes, ResetStepNumber - StartStepNumber - 1) of [] -> {true, mix_seed2(PrevOutput, ResetSeed), []}; Hashes1 -> case verify_no_reset(StartStepNumber, PrevOutput, NumCheckpointsBetweenHashes, Hashes1, ThreadCount, VDFDifficulty) of {true, ValidatedSteps} -> {true, mix_seed2(hd(ValidatedSteps), ResetSeed), ValidatedSteps}; false -> {false, undefined, undefined} end end, case Result1 of false -> false; true -> Hashes2 = lists:nthtail(ResetStepNumber - StartStepNumber - 1, Hashes), case verify_no_reset(ResetStepNumber - 1, PrevOutput2, NumCheckpointsBetweenHashes, Hashes2, ThreadCount, NextVDFDifficulty) of {true, ValidatedSteps2} -> {true, ValidatedSteps2 ++ ValidatedSteps1}; false -> false end end. verify_no_reset(StartStepNumber, PrevOutput, NumCheckpointsBetweenHashes, Hashes, ThreadCount, VDFDifficulty) -> Garbage = crypto:strong_rand_bytes(32), Result = ar_vdf:verify2(StartStepNumber, PrevOutput, NumCheckpointsBetweenHashes, Hashes, 0, Garbage, ThreadCount, VDFDifficulty), debug_double_check( "verify_no_reset", Result, fun ar_vdf:debug_sha_verify_no_reset/6, [StartStepNumber, PrevOutput, NumCheckpointsBetweenHashes, Hashes, ThreadCount, VDFDifficulty]). worker() -> receive {compute, {StepNumber, PrevOutput, VDFDifficulty, SessionKey}, From} -> {ok, Output, Checkpoints} = prometheus_histogram:observe_duration( vdf_step_time_milliseconds, [], fun() -> compute(StepNumber, PrevOutput, VDFDifficulty) end), From ! {computed, {StepNumber, PrevOutput, Output, Checkpoints, SessionKey}}, worker(); stop -> ok end. %% @doc Get all the steps that fall between StartStepNumber and EndStepNumber, traversing %% multiple sessions if needed. get_steps2(StartStepNumber, EndStepNumber, SessionKey, State) -> case get_session(SessionKey, State) of #vdf_session{ step_number = StepNumber, prev_session_key = PrevSessionKey } = Session when StepNumber >= EndStepNumber -> %% Get the steps within the current session that fall within the StartStepNumber+1 %% and EndStepNumber (inclusive) range. {_, Steps} = get_step_range(Session, StartStepNumber + 1, EndStepNumber), TotalCount = EndStepNumber - StartStepNumber - 1, Count = length(Steps), %% If we haven't found all the steps, recurse into the previous session. case TotalCount > Count of true -> case get_steps2(StartStepNumber, EndStepNumber - Count, PrevSessionKey, State) of not_found -> not_found; PrevSteps -> Steps ++ PrevSteps end; false -> Steps end; _ -> not_found end. schedule_step(State) -> #state{ current_session_key = {NextSeed, IntervalNumber, NextVDFDifficulty} = Key, worker = Worker } = State, #vdf_session{ step_number = PrevStepNumber, vdf_difficulty = VDFDifficulty, next_vdf_difficulty = NextVDFDifficulty, steps = Steps } = get_session(Key, State), PrevOutput = hd(Steps), StepNumber = PrevStepNumber + 1, IntervalStart = IntervalNumber * ar_nonce_limiter:get_reset_frequency(), PrevOutput2 = ar_nonce_limiter:maybe_add_entropy( PrevOutput, IntervalStart, StepNumber, NextSeed), VDFDifficulty2 = case get_entropy_reset_point(IntervalStart, StepNumber) of none -> VDFDifficulty; _ -> ?LOG_DEBUG([{event, entropy_reset_point_found}, {step_number, StepNumber}, {interval_start, IntervalStart}, {vdf_difficulty, VDFDifficulty}, {next_vdf_difficulty, NextVDFDifficulty}, {session_key, encode_session_key(Key)}]), NextVDFDifficulty end, Worker ! {compute, {StepNumber, PrevOutput2, VDFDifficulty2, Key}, self()}, State. get_or_init_nonce_limiter_info(#block{ height = Height } = B, Seed, PartitionUpperBound) -> NextSeed = B#block.indep_hash, NextPartitionUpperBound = B#block.weave_size, case Height + 1 == ar_fork:height_2_6() of true -> Output = crypto:hash(sha256, Seed), #nonce_limiter_info{ output = Output, seed = Seed, next_seed = NextSeed, partition_upper_bound = PartitionUpperBound, next_partition_upper_bound = NextPartitionUpperBound }; false -> undefined end. apply_external_update2(Update, State) -> #state{ last_external_update = {Peer, _} } = State, #nonce_limiter_update{ session_key = SessionKey, session = #vdf_session{ step_number = StepNumber } } = Update, case get_session(SessionKey, State) of not_found -> apply_external_update_session_not_found(Update, State); #vdf_session{ step_number = CurrentStepNumber } = CurrentSession -> case CurrentStepNumber >= StepNumber of true -> %% Inform the peer we are ahead. case CurrentStepNumber > StepNumber of true -> ?LOG_DEBUG([{event, apply_external_vdf}, {result, ahead_of_server}, {vdf_server, ar_util:format_peer(Peer)}, {session_key, encode_session_key(SessionKey)}, {client_step_number, CurrentStepNumber}, {server_step_number, StepNumber}]); false -> ok end, {reply, #nonce_limiter_update_response{ step_number = CurrentStepNumber }, State}; false -> apply_external_update3(Update, CurrentSession, State) end end. apply_external_update_session_not_found(Update, State) -> #state{ last_external_update = {Peer, _} } = State, #nonce_limiter_update{ session_key = SessionKey, session = #vdf_session{ prev_session_key = PrevSessionKey, step_number = StepNumber } = Session, is_partial = IsPartial } = Update, {_SessionSeed, SessionInterval, _SessionVDFDifficulty} = SessionKey, case IsPartial of true -> %% Inform the peer we have not initialized the corresponding session yet. ?LOG_DEBUG([{event, apply_external_vdf}, {result, session_not_found}, {vdf_server, ar_util:format_peer(Peer)}, {is_partial, IsPartial}, {session_key, encode_session_key(SessionKey)}, {server_step_number, StepNumber}]), {reply, #nonce_limiter_update_response{ session_found = false }, State}; false -> %% Handle the case where The VDF server has processed a block and re-allocated %% steps from the previous session to the new session. In this case we only %% want to apply new steps - steps in Session that weren't already applied as %% part of PrevSession. %% Start after the last step of the previous session RangeStart = case get_session(PrevSessionKey, State) of not_found -> 0; PrevSession -> PrevSession#vdf_session.step_number + 1 end, %% But start no later than the beginning of the session 2 after PrevSession. %% This is because the steps in that session - which may have been previously %% computed - have now been invalidated. NextSessionStart = (SessionInterval + 1) * ar_nonce_limiter:get_reset_frequency(), {_, Steps} = get_step_range(Session, min(RangeStart, NextSessionStart), StepNumber), State2 = apply_external_update4(State, SessionKey, Session, Steps), {reply, ok, State2} end. apply_external_update3(Update, CurrentSession, State) -> #state{ last_external_update = {Peer, _} } = State, #nonce_limiter_update{ session_key = SessionKey, session = #vdf_session{ step_checkpoints_map = StepCheckpointsMap, step_number = StepNumber, steps = Steps } = Session, is_partial = IsPartial } = Update, #vdf_session{ step_number = CurrentStepNumber } = CurrentSession, %% CurrentStepNumber < StepNumber by construction. StepCount = length(Steps), StartStepNumber = StepNumber - StepCount, case CurrentStepNumber >= StartStepNumber of true -> Steps2 = lists:sublist(Steps, StepNumber - max(CurrentStepNumber, StartStepNumber)), CurrentSession2 = update_session(CurrentSession, StepNumber, StepCheckpointsMap, Steps2, apply_external_update), State2 = apply_external_update4(State, SessionKey, CurrentSession2, Steps2), {reply, ok, State2}; false -> case IsPartial of true -> %% Inform the peer we miss some steps. ?LOG_DEBUG([{event, apply_external_vdf}, {result, missing_steps}, {vdf_server, ar_util:format_peer(Peer)}, {is_partial, IsPartial}, {session_key, encode_session_key(SessionKey)}, {client_step_number, CurrentStepNumber}, {server_step_number, StepNumber}]), {reply, #nonce_limiter_update_response{ step_number = CurrentStepNumber }, State}; false -> %% Handle the case where the VDF client has dropped of the %% network briefly and the VDF server has advanced several %% steps within the same session. In this case the client has %% noticed the gap and requested the full VDF session be sent - %% which may contain previously processed steps in a addition to %% the missing ones. %% %% To avoid processing those steps twice, the client grabs %% CurrentStepNumber (our most recently processed step number) %% and ignores it and any lower steps found in Session. {_, Steps} = get_step_range(Session, CurrentStepNumber + 1, StepNumber), State2 = apply_external_update4(State, SessionKey, Session, Steps), {reply, ok, State2} end end. %% Note: we do not take the VDF steps from Session but accept them separately in Steps, %% where only unique steps are included to ensure that VDF steps are only processed once. %% In the first place, it is important for avoiding extra mining work. apply_external_update4(State, SessionKey, Session, Steps) -> #state{ last_external_update = {Peer, _} } = State, ?LOG_DEBUG([{event, new_vdf_step}, {source, apply_external_vdf}, {vdf_server, ar_util:format_peer(Peer)}, {session_key, encode_session_key(SessionKey)}, {step_number, Session#vdf_session.step_number}, {length, length(Steps)}]), State2 = cache_session(State, SessionKey, Session), send_events_for_external_update(SessionKey, Session#vdf_session{ steps = Steps }), State2. %% @doc Returns a sub-range of steps out of a larger list of steps. This is %% primarily used to manage "overflow" steps. %% %% Between blocks nodes will add all computed VDF steps to the same session - %% *even if* the new steps have crossed the entropy reset line and therefore %% could be added to a new session (i.e. "overflow steps"). Once a block is %% processed the node will open a new session and re-allocate all the steps past %% the entropy reset line to that new session. However, any steps that have crossed %% *TWO* entropy reset lines are no longer valid (the seed they were generated with %% has changed with the arrival of a new block) %% %% Note: This overlap in session caching is intentional. The intention is to %% quickly access the steps when validating B1 -> reset line -> B2 given the %% current fork of B1 -> B2' -> reset line -> B3 i.e. we can query all steps by %% B1.next_seed even though on our fork the reset line determined a different %% next_seed for the latest session. get_step_range_from_interval(Session, SessionInterval, ResetFrequency) -> SessionStart = SessionInterval * ResetFrequency, SessionEnd = (SessionInterval + 1) * ResetFrequency - 1, get_step_range(Session, SessionStart, SessionEnd). get_step_range(not_found, _RangeStart, _RangeEnd) -> {0, []}; get_step_range(Session, RangeStart, RangeEnd) -> #vdf_session{ step_number = StepNumber, steps = Steps } = Session, get_step_range(Steps, StepNumber, RangeStart, RangeEnd). get_step_range([], _StepNumber, _RangeStart, _RangeEnd) -> {0, []}; get_step_range(_Steps, _StepNumber, RangeStart, RangeEnd) when RangeStart > RangeEnd -> {0, []}; get_step_range(_Steps, StepNumber, RangeStart, _RangeEnd) when StepNumber < RangeStart -> {0, []}; get_step_range(Steps, StepNumber, _RangeStart, RangeEnd) when StepNumber - length(Steps) + 1 > RangeEnd -> {0, []}; get_step_range(Steps, StepNumber, RangeStart, RangeEnd) -> %% Clip RangeStart to the earliest step number in Steps RangeStart2 = max(RangeStart, StepNumber - length(Steps) + 1), RangeSteps = case StepNumber > RangeEnd of true -> %% Exclude steps beyond the end of the session lists:nthtail(StepNumber - RangeEnd, Steps); false -> Steps end, %% The highest step number in the range RangeEnd2 = min(StepNumber, RangeEnd), %% Exclude the steps before the start of the session RangeSteps2 = lists:sublist(RangeSteps, RangeEnd2 - RangeStart2 + 1), {RangeEnd2, RangeSteps2}. set_current_session(State, SessionKey) -> ?LOG_DEBUG([{event, set_current_session}, {new_session_key, encode_session_key(SessionKey)}, {old_session_key, encode_session_key(State#state.current_session_key)}]), State#state{ current_session_key = SessionKey }. %% @doc Update the VDF session cache based on new info from a validated block. cache_block_session(State, SessionKey, PrevSessionKey, StepCheckpointsMap, Seed, UpperBound, NextUpperBound, VDFDifficulty, NextVDFDifficulty) -> Session = case get_session(SessionKey, State) of not_found -> {_, Interval, NextVDFDifficulty} = SessionKey, PrevSession = get_session(PrevSessionKey, State), {StepNumber, Steps} = get_step_range_from_interval( PrevSession, Interval, ar_nonce_limiter:get_reset_frequency()), ?LOG_DEBUG([{event, new_vdf_step}, {source, block}, {session_key, encode_session_key(SessionKey)}, {step_number, StepNumber}]), #vdf_session{ step_number = StepNumber, seed = Seed, upper_bound = UpperBound, next_upper_bound = NextUpperBound, prev_session_key = PrevSessionKey, vdf_difficulty = VDFDifficulty, next_vdf_difficulty = NextVDFDifficulty, step_checkpoints_map = StepCheckpointsMap, steps = Steps }; ExistingSession -> ExistingSession end, cache_session(State, SessionKey, Session). cache_session(State, SessionKey, Session) -> #state{ current_session_key = CurrentSessionKey, session_by_key = SessionByKey, sessions = Sessions } = State, {NextSeed, Interval, NextVDFDifficulty} = SessionKey, maybe_set_vdf_metrics(SessionKey, CurrentSessionKey, Session), SessionByKey2 = maps:put(SessionKey, Session, SessionByKey), %% If Session exists, then {Interval, NextSeed} will already exist in the Sessions set and %% gb_sets:add_element will not cause a change. Sessions2 = gb_sets:add_element({Interval, NextSeed, NextVDFDifficulty}, Sessions), State#state{ sessions = Sessions2, session_by_key = SessionByKey2 }. maybe_set_vdf_metrics(SessionKey, CurrentSessionKey, Session) -> case SessionKey == CurrentSessionKey of true -> #vdf_session{ step_number = StepNumber, vdf_difficulty = VDFDifficulty, next_vdf_difficulty = NextVDFDifficulty } = Session, prometheus_gauge:set(vdf_step, StepNumber), prometheus_gauge:set(vdf_difficulty, [current], VDFDifficulty), prometheus_gauge:set(vdf_difficulty, [next], NextVDFDifficulty); false -> ok end. send_events_for_external_update(_SessionKey, #vdf_session{ steps = [] }) -> ok; send_events_for_external_update(SessionKey, Session) -> send_output(SessionKey, Session), #vdf_session{ step_number = StepNumber, steps = [_ | RemainingSteps] } = Session, send_events_for_external_update(SessionKey, Session#vdf_session{ step_number = StepNumber-1, steps = RemainingSteps }). debug_double_check(Label, Result, Func, Args) -> {ok, Config} = arweave_config:get_env(), case lists:member(double_check_nonce_limiter, Config#config.enable) of false -> Result; true -> Check = apply(Func, Args), case Result == Check of true -> Result; false -> ID = ar_util:encode(crypto:strong_rand_bytes(16)), file:write_file(Label ++ "_" ++ binary_to_list(ID), term_to_binary(Args)), Event = "nonce_limiter_" ++ Label ++ "_mismatch", ?LOG_ERROR([{event, list_to_atom(Event)}, {report_id, ID}]), Result end end. filter_step_triplets([], _LowerBounds) -> []; filter_step_triplets([{O, _, _} = Triplet | Triplets], LowerBounds) -> case lists:member(O, LowerBounds) of true -> []; false -> [Triplet | filter_step_triplets(Triplets, LowerBounds)] end. get_triplets(_StepNumber, _Steps, _ResetPoint, _UpperBound, _NextUpperBound, 0) -> []; get_triplets(_StepNumber, [], _ResetPoint, _UpperBound, _NextUpperBound, _N) -> []; get_triplets(StepNumber, [Step | Steps], ResetPoint, UpperBound, NextUpperBound, N) -> U = case ResetPoint of none -> UpperBound; _ when StepNumber >= ResetPoint -> NextUpperBound; _ -> UpperBound end, [{Step, StepNumber, U} | get_triplets(StepNumber - 1, Steps, ResetPoint, UpperBound, NextUpperBound, N - 1)]. filter_step_triplets_with_checkpoints([], _Map) -> {[], 0}; filter_step_triplets_with_checkpoints([{_, StepNumber, _} = Triplet | Triplets], Map) -> {List, NSkipped} = filter_step_triplets_with_checkpoints(Triplets, Map), case maps:is_key(StepNumber, Map) of true -> {[Triplet | List], NSkipped}; false -> {List, NSkipped + 1} end. %%%=================================================================== %%% Tests. %%%=================================================================== exclude_computed_steps_from_steps_to_validate_test() -> C1 = crypto:strong_rand_bytes(32), C2 = crypto:strong_rand_bytes(32), C3 = crypto:strong_rand_bytes(32), C4 = crypto:strong_rand_bytes(32), C5 = crypto:strong_rand_bytes(32), Cases = [ {{[], []}, {[], 0}, "Case 1"}, {{[C1], []}, {[C1], 0}, "Case 2"}, {{[C1], [C1]}, {[], 1}, "Case 3"}, {{[C1, C2], []}, {[C1, C2], 0}, "Case 4"}, {{[C1, C2], [C2]}, invalid, "Case 5"}, {{[C1, C2], [C1]}, {[C2], 1}, "Case 6"}, {{[C1, C2], [C2, C1]}, invalid, "Case 7"}, {{[C1, C2], [C1, C2]}, {[], 2}, "Case 8"}, {{[C1, C2], [C1, C2, C3, C4, C5]}, {[], 2}, "Case 9"} ], test_exclude_computed_steps_from_steps_to_validate(Cases). test_exclude_computed_steps_from_steps_to_validate([Case | Cases]) -> {Input, Expected, Title} = Case, {StepsToValidate, ComputedSteps} = Input, Got = exclude_computed_steps_from_steps_to_validate(StepsToValidate, ComputedSteps), ?assertEqual(Expected, Got, Title), test_exclude_computed_steps_from_steps_to_validate(Cases); test_exclude_computed_steps_from_steps_to_validate([]) -> ok. get_entropy_reset_point_test() -> ResetFreq = ar_nonce_limiter:get_reset_frequency(), ?assertEqual(none, get_entropy_reset_point(1, ResetFreq - 1)), ?assertEqual(ResetFreq, get_entropy_reset_point(1, ResetFreq)), ?assertEqual(none, get_entropy_reset_point(ResetFreq, ResetFreq + 1)), ?assertEqual(2 * ResetFreq, get_entropy_reset_point(ResetFreq, ResetFreq * 2)), ?assertEqual(ResetFreq * 3, get_entropy_reset_point(ResetFreq * 3 - 1, ResetFreq * 3 + 2)), ?assertEqual(ResetFreq * 4, get_entropy_reset_point(ResetFreq * 3, ResetFreq * 4 + 1)). reorg_after_join_test_() -> {timeout, ?TEST_NODE_TIMEOUT, fun test_reorg_after_join/0}. test_reorg_after_join() -> [B0] = ar_weave:init(), ar_test_node:start(B0), ar_test_node:start_peer(peer1, B0), ar_test_node:connect_to_peer(peer1), ar_test_node:mine(), ar_test_node:assert_wait_until_height(peer1, 1), ar_test_node:disconnect_from(peer1), ar_test_node:start_peer(peer1, B0), ar_test_node:join_on(#{ node => main, join_on => peer1 }), ar_test_node:mine(peer1), ar_test_node:assert_wait_until_height(peer1, 1), ar_test_node:mine(peer1), ar_test_node:wait_until_height(main, 2). reorg_after_join2_test_() -> {timeout, ?TEST_NODE_TIMEOUT, fun test_reorg_after_join2/0}. test_reorg_after_join2() -> [B0] = ar_weave:init(), ar_test_node:start(B0), ar_test_node:start_peer(peer1, B0), ar_test_node:connect_to_peer(peer1), ar_test_node:mine(), ar_test_node:assert_wait_until_height(peer1, 1), ar_test_node:join_on(#{ node => main, join_on => peer1 }), ar_test_node:mine(), ar_test_node:wait_until_height(main, 2), ar_test_node:disconnect_from(peer1), ar_test_node:start_peer(peer1, B0), ar_test_node:mine(peer1), ar_test_node:assert_wait_until_height(peer1, 1), ar_test_node:mine(peer1), ar_test_node:assert_wait_until_height(peer1, 2), ar_test_node:connect_to_peer(peer1), ar_test_node:mine(peer1), ar_test_node:wait_until_height(main, 3). get_step_range_test() -> ?assertEqual( {0, []}, get_step_range(lists:seq(9, 5, -1), 9, 0, 4), "Disjoint range A" ), ?assertEqual( {0, []}, get_step_range(lists:seq(9, 5, -1), 9 , 10, 14), "Disjoint range B" ), ?assertEqual( {0, []}, get_step_range([], 9, 0, 4), "Empty steps" ), ?assertEqual( {0, []}, get_step_range(lists:seq(9, 5, -1), 9, 9, 5), "Invalid range" ), ?assertEqual( {9, [9, 8, 7, 6, 5]}, get_step_range(lists:seq(9, 5, -1), 9, 5, 9), "Full intersection" ), ?assertEqual( {9, [9, 8, 7, 6, 5]}, get_step_range(lists:seq(9, 5, -1), 9, 3, 9), "Clipped RangeStart" ), ?assertEqual( {9, [9, 8, 7, 6]}, get_step_range(lists:seq(9, 5, -1), 9, 6, 12), "Clipped RangeEnd" ), ?assertEqual( {8, [8, 7]}, get_step_range(lists:seq(20, 5, -1), 20, 7, 8), "Clipped Steps above" ), ?assertEqual( {9, [9, 8, 7, 6, 5]}, get_step_range(lists:seq(9, 0, -1), 9, 5, 9), "Clipped Steps below" ), ?assertEqual( {6, [6]}, get_step_range(lists:seq(9, 5, -1), 9, 6, 6), "Range length 1" ), ?assertEqual( {8, [8]}, get_step_range([8], 8, 8, 8), "Steps length 1" ), ResetFrequency = 5, ?assertEqual( {9, [9, 8, 7, 6, 5]}, get_step_range_from_interval( #vdf_session{ step_number = 12, steps = lists:seq(12, 0, -1) }, 1, ResetFrequency), "Session and Interval" ), ?assertEqual( {0, []}, get_step_range_from_interval(not_found, 1, ResetFrequency), "not_found and Interval" ), ?assertEqual( {9, [9, 8, 7]}, get_step_range( #vdf_session{ step_number = 12, steps = lists:seq(12, 0, -1) }, 7, 9), "Session and Range" ), ?assertEqual( {0, []}, get_step_range(not_found, 7, 9), "not_found and Range" ), ok. filter_step_triplets_test() -> ?assertEqual([], filter_step_triplets([], [a, b])), ?assertEqual([], filter_step_triplets([{a, 1, s}], [a, b])), ?assertEqual([], filter_step_triplets([{b, 1, s}], [a, b])), ?assertEqual([], filter_step_triplets([{b, 1, s}, {x, 1, s}], [a, b])), ?assertEqual([{y, 1, s}], filter_step_triplets([{y, 1, s}, {a, 1, s}, {x, 1, s}], [a, b])), ?assertEqual([{y, 1, s}, {x, 1, s}], filter_step_triplets([{y, 1, s}, {x, 1, s}], [a, b])). get_triplets_test() -> ?assertEqual([], get_triplets(1, [a], none, 2, 3, 0)), ?assertEqual([], get_triplets(2, [], 2, 4, 5, 2)), ?assertEqual([{a, 1, 2}], get_triplets(1, [a], none, 2, 3, 2)), ?assertEqual([{a, 1, 2}], get_triplets(1, [a], 2, 2, 3, 2)), ?assertEqual([{a, 1, 3}], get_triplets(1, [a], 1, 2, 3, 2)), ?assertEqual([{a, 2, 3}, {b, 1, 2}], get_triplets(2, [a, b], 2, 2, 3, 2)), ?assertEqual([{a, 2, 3}, {b, 1, 2}], get_triplets(2, [a, b, c], 2, 2, 3, 2)), ?assertEqual([{a, 3, 3}, {b, 2, 3}, {c, 1, 3}], get_triplets(3, [a, b, c], 0, 2, 3, 3)), ?assertEqual([{a, 3, 2}, {b, 2, 2}, {c, 1, 2}], get_triplets(3, [a, b, c], none, 2, 3, 4)). ================================================ FILE: apps/arweave/src/ar_nonce_limiter_client.erl ================================================ -module(ar_nonce_limiter_client). -behaviour(gen_server). -export([start_link/0, maybe_request_sessions/1]). -export([init/1, handle_cast/2, handle_call/3, handle_info/2, terminate/2]). -include("ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -record(state, { remote_servers, latest_session_keys = #{}, %% request_sessions is set to true when the node is unable to validate a block due %% to a gap in its cached step numbers. When true, the node will query the full %% session and previous session from a VDF server. request_sessions = false, latest_remote_server_rotation_timestamp = erlang:system_time(millisecond) }). -define(PULL_FREQUENCY_MS, 800). -define(NO_UPDATE_PULL_FREQUENCY_MS, 200). -define(PULL_THROTTLE_MS, 200). -ifdef(AR_TEST). -define(ROTATE_REMOTE_SERVERS_MS, 2_000). -else. -define(ROTATE_REMOTE_SERVERS_MS, 30_000). -endif. %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Start the server. start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). %% @doc Look at the session key of the last update from the VDF server we are currently %% working with and in case it does not match the given session key, request complete %% sessions from this VDF server. %% %% The client may need this additional request around a VDF reset when a new session is %% created but the previous session is not completed because the VDF server instantiated %% the new session before sending the last computed output(s) of the previous session to %% the client. maybe_request_sessions(SessionKey) -> gen_server:cast(?MODULE, {maybe_request_sessions, SessionKey}). %%%=================================================================== %%% Generic server callbacks. %%%=================================================================== init([]) -> {ok, Config} = arweave_config:get_env(), Peers = Config#config.nonce_limiter_server_trusted_peers, case ar_config:use_remote_vdf_server() of false -> ok; true -> %% Resolve and cache all VDF server peers upfront so that pushes %% (POST /vdf) may be accepted even when pulling is disabled. resolve_server_peers(Peers), gen_server:cast(?MODULE, pull) end, {ok, #state{ remote_servers = queue:from_list(Peers) }}. handle_call(Request, _From, State) -> ?LOG_WARNING([{event, unhandled_call}, {module, ?MODULE}, {request, Request}]), {reply, ok, State}. handle_cast(pull, State = #state{ request_sessions = RequestSessions }) -> DoPull = ( ar_config:pull_from_remote_vdf_server() orelse RequestSessions == true ), case DoPull of true -> {Delay, State1} = do_pull(State), ar_util:cast_after(Delay, ?MODULE, pull), {noreply, State1}; false -> %% Even when pulling is disabled, periodically re-resolve VDF server peers %% so that pushes (POST /vdf) continue to work (e.g., after DNS changes). {ok, Config} = arweave_config:get_env(), resolve_server_peers(Config#config.nonce_limiter_server_trusted_peers), ar_util:cast_after(?PULL_FREQUENCY_MS, ?MODULE, pull), {noreply, State} end; handle_cast({maybe_request_sessions, SessionKey}, State) -> #state{ remote_servers = Q } = State, {{value, RawPeer}, _Q2} = queue:out(Q), RotatedServers = rotate_servers(Q), case ar_peers:resolve_and_cache_peer(RawPeer, vdf_server_peer) of {error, _} -> %% Push the peer to the back of the queue. We'll also wait and see if another %% `maybe_request_sessions` message comes in before we fetch the full session. {noreply, State#state{ remote_servers = RotatedServers }}; {ok, Peer} -> case get_latest_session_key(Peer, State) of SessionKey -> %% No reason to make extra requests. And don't rotate the peers. {noreply, State}; _ -> %% Ensure the current and previous sessions are fetched and applied on %% the next `pull` message. ?LOG_DEBUG([{event, vdf_request_sessions}, {peer, ar_util:format_peer(Peer)}, {session_key, ar_nonce_limiter:encode_session_key(SessionKey)}]), {noreply, State#state{ request_sessions = true }} end end; handle_cast({update_latest_session_key, Peer, SessionKey}, State) -> {noreply, update_latest_session_key(Peer, SessionKey, State)}; handle_cast(Cast, State) -> ?LOG_WARNING([{event, unhandled_cast}, {module, ?MODULE}, {cast, Cast}]), {noreply, State}. handle_info(Message, State) -> ?LOG_WARNING([{event, unhandled_info}, {module, ?MODULE}, {message, Message}]), {noreply, State}. terminate(Reason, _State) -> ?LOG_INFO([{module, ?MODULE},{pid, self()},{callback, terminate},{reason, Reason}]), ok. %%%=================================================================== %%% Private functions. %%%=================================================================== resolve_server_peers(RawPeers) -> lists:foreach( fun(RawPeer) -> ar_peers:resolve_and_cache_peer(RawPeer, vdf_server_peer) end, RawPeers ). do_pull(State) -> #state{ remote_servers = Q } = State, RotatedServers = rotate_servers(Q), {RawPeer, State2} = get_raw_peer_and_update_remote_servers(State), case ar_peers:resolve_and_cache_peer(RawPeer, vdf_server_peer) of {error, _} -> ?LOG_WARNING([{event, failed_to_resolve_peer}, {raw_peer, io_lib:format("~p", [RawPeer])}]), %% Push the peer to the back of the queue. {?PULL_THROTTLE_MS, State#state{ remote_servers = RotatedServers }}; {ok, Peer} -> case ar_http_iface_client:get_vdf_update(Peer) of {ok, Update} -> #nonce_limiter_update{ session_key = SessionKey, session = #vdf_session{ step_number = SessionStepNumber } } = Update, State3 = update_latest_session_key(Peer, SessionKey, State2), UpdateResponse = ar_nonce_limiter:apply_external_update(Update, Peer), SessionFound = case UpdateResponse of #nonce_limiter_update_response{ session_found = false } -> false; _ -> true end, RequestSessions = ( State3#state.request_sessions == true orelse not SessionFound ), case RequestSessions of true -> case fetch_and_apply_session_and_previous_session(Peer) of {error, _} -> {?PULL_THROTTLE_MS, State3#state{ remote_servers = RotatedServers }}; _ -> {?PULL_FREQUENCY_MS, State3#state{ request_sessions = false }} end; false -> case UpdateResponse of ok -> {?PULL_FREQUENCY_MS, State3}; #nonce_limiter_update_response{ step_number = StepNumber } when StepNumber > SessionStepNumber -> %% We are ahead of the server - may be, it is not %% the fastest server in the list so try another one, %% if there are more servers in the configuration %% and they are not on timeout. {0, State3#state{ remote_servers = RotatedServers }}; #nonce_limiter_update_response{ step_number = StepNumber } when StepNumber == SessionStepNumber -> %% We are in sync with the server. Re-try soon. {?NO_UPDATE_PULL_FREQUENCY_MS, State3}; _ -> %% We have received a partial session, but there's a gap %% in the step numbers, e.g., the update we received is at %% step 100, but our last seen step was 90. case fetch_and_apply_session(Peer) of {error, _} -> {?PULL_THROTTLE_MS, State3#state{ remote_servers = RotatedServers }}; _ -> {?PULL_FREQUENCY_MS, State3} end end end; {error, not_found} -> ?LOG_WARNING([{event, failed_to_fetch_vdf_update}, {peer, ar_util:format_peer(Peer)}, {error, not_found}]), %% The server might be restarting. %% Try another one, if there are any. {?PULL_THROTTLE_MS, State#state{ remote_servers = RotatedServers }}; {error, Reason} -> ?LOG_WARNING([{event, failed_to_fetch_vdf_update}, {peer, ar_util:format_peer(Peer)}, {error, io_lib:format("~p", [Reason])}]), %% Try another server, if there are any. {?PULL_THROTTLE_MS, State#state{ remote_servers = RotatedServers }} end end. get_raw_peer_and_update_remote_servers(State) -> #state{ remote_servers = Q, latest_remote_server_rotation_timestamp = Timestamp } = State, {{value, RawPeer}, Q2} = queue:out(Q), Now = erlang:system_time(millisecond), case Now < Timestamp + ?ROTATE_REMOTE_SERVERS_MS of true -> {RawPeer, State}; false -> {RawPeer, State#state{ latest_remote_server_rotation_timestamp = Now, remote_servers = queue:in(RawPeer, Q2) }} end. rotate_servers(Q) -> {{value, RawPeer}, Q2} = queue:out(Q), queue:in(RawPeer, Q2). fetch_and_apply_session_and_previous_session(Peer) -> case ar_http_iface_client:get_vdf_session(Peer) of {ok, #nonce_limiter_update{ session = #vdf_session{ prev_session_key = PrevSessionKey } } = Update} -> case ar_http_iface_client:get_previous_vdf_session(Peer) of {ok, #nonce_limiter_update{ session_key = PrevSessionKey } = Update2} -> ar_nonce_limiter:apply_external_update(Update2, Peer), ar_nonce_limiter:apply_external_update(Update, Peer); {ok, _} -> %% The session should have just changed, retry. fetch_and_apply_session_and_previous_session(Peer); {error, Reason} = Error -> ?LOG_WARNING([{event, failed_to_fetch_previous_vdf_session}, {peer, ar_util:format_peer(Peer)}, {error, io_lib:format("~p", [Reason])}]), Error end; {error, Reason2} = Error2 -> ?LOG_WARNING([{event, failed_to_fetch_vdf_session}, {peer, ar_util:format_peer(Peer)}, {error, io_lib:format("~p", [Reason2])}]), Error2 end. fetch_and_apply_session(Peer) -> case ar_http_iface_client:get_vdf_session(Peer) of {ok, Update} -> ar_nonce_limiter:apply_external_update(Update, Peer); {error, Reason} = Error -> ?LOG_WARNING([{event, failed_to_fetch_vdf_session}, {peer, ar_util:format_peer(Peer)}, {error, io_lib:format("~p", [Reason])}]), Error end. get_latest_session_key(Peer, State) -> #state{ latest_session_keys = Map } = State, maps:get(Peer, Map, not_found). update_latest_session_key(Peer, SessionKey, State) -> #state{ latest_session_keys = Map } = State, State#state{ latest_session_keys = maps:put(Peer, SessionKey, Map) }. ================================================ FILE: apps/arweave/src/ar_nonce_limiter_server.erl ================================================ -module(ar_nonce_limiter_server). -behaviour(gen_server). -export([start_link/0, make_full_nonce_limiter_update/2, make_partial_nonce_limiter_update/4, get_update/1, get_full_update/1, get_full_prev_update/1]). -export([init/1, handle_cast/2, handle_call/3, handle_info/2, terminate/2]). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -record(state, { session_key, step_number }). %% @doc The number of steps and the corresponding step checkpoints to include in every %% regular update. -define(REGULAR_UPDATE_INCLUDE_STEPS_COUNT, 2). %% @doc The number of steps for which we include step checkpoints in the full session update. %% Does not apply to previous session updates. -define(SESSION_UPDATE_INCLUDE_STEP_CHECKPOINTS_COUNT, 20). %%%=================================================================== %%% Public interface. %%%=================================================================== start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). make_partial_nonce_limiter_update(SessionKey, Session, StepNumber, Output) -> #vdf_session{ steps = Steps, step_number = SessionStepNumber } = Session, StepNumberMinusOne = StepNumber - 1, Steps2 = case SessionStepNumber of StepNumber -> Steps; StepNumberMinusOne -> [Output | Steps]; _ -> ?LOG_WARNING([{event, vdf_gap}, {session_step_number, SessionStepNumber}, {computed_output, StepNumber}]), [Output] end, make_nonce_limiter_update( SessionKey, Session#vdf_session{ step_number = StepNumber, steps = lists:sublist(Steps2, ?REGULAR_UPDATE_INCLUDE_STEPS_COUNT) }, true). make_full_nonce_limiter_update(SessionKey, Session) -> make_nonce_limiter_update(SessionKey, Session, false). %% @doc Return the minimal VDF update, i.e., the latest computed output. get_update(Format) -> case ets:lookup(?MODULE, {partial_update, Format}) of [] -> not_found; [{_, PartialUpdate}] -> PartialUpdate end. %% @doc Return the "full update" including the latest VDF session. get_full_update(Format) -> case ets:lookup(?MODULE, {full_update, Format}) of [] -> not_found; [{_, Session}] -> Session end. %% @doc Return the "full previous update" including the latest but one VDF session. get_full_prev_update(Format) -> case ets:lookup(?MODULE, {full_prev_update, Format}) of [] -> not_found; [{_, PrevSession}] -> PrevSession end. %%%=================================================================== %%% Generic server callbacks. %%%=================================================================== init([]) -> ?LOG_INFO([{event, nonce_limiter_server_init}]), ok = ar_events:subscribe(nonce_limiter), {ok, #state{}}. handle_call(Request, _From, State) -> ?LOG_WARNING([{event, unhandled_call}, {module, ?MODULE}, {request, Request}]), {reply, ok, State}. handle_cast(Cast, State) -> ?LOG_WARNING([{event, unhandled_cast}, {module, ?MODULE}, {cast, Cast}]), {noreply, State}. handle_info({event, nonce_limiter, {computed_output, Args}}, State) -> handle_computed_output(Args, State); handle_info({event, nonce_limiter, _Args}, State) -> {noreply, State}; handle_info(Message, State) -> ?LOG_WARNING([{event, unhandled_info}, {module, ?MODULE}, {message, Message}]), {noreply, State}. terminate(Reason, _State) -> ?LOG_INFO([{module, ?MODULE},{pid, self()},{callback, terminate},{reason, Reason}]), ok. %%%=================================================================== %%% Private functions. %%%=================================================================== make_nonce_limiter_update(_SessionKey, not_found, _IsPartial) -> not_found; make_nonce_limiter_update(SessionKey, Session, IsPartial) -> #vdf_session{ step_number = StepNumber, steps = Steps, step_checkpoints_map = StepCheckpointsMap } = Session, %% Clear the step_checkpoints_map to cut down on the amount of data pushed to each client. RecentStepNumbers = case IsPartial of false -> %% There is an upper bound on the number of steps with step checkpoints %% because the total number of steps in the session updates is often large. get_recent_step_numbers(StepNumber); true -> %% Include step checkpoints for every step included in the regular %% update. get_recent_step_numbers_from_steps(StepNumber, Steps) end, StepCheckpointsMap2 = maps:with(RecentStepNumbers, StepCheckpointsMap), #nonce_limiter_update{ session_key = SessionKey, is_partial = IsPartial, session = Session#vdf_session{ step_checkpoints_map = StepCheckpointsMap2 } }. handle_computed_output({SessionKey, StepNumber, _, _}, #state{ session_key = SessionKey, step_number = CurrentStepNumber } = State) when CurrentStepNumber >= StepNumber -> {noreply, State}; handle_computed_output(Args, State) -> {SessionKey, StepNumber, Output, _PartitionUpperBound} = Args, case ar_nonce_limiter:get_session(SessionKey) of not_found -> ?LOG_WARNING([{event, computed_output_session_not_found}, {session_key, ar_nonce_limiter:encode_session_key(SessionKey)}, {step_number, StepNumber}]), {noreply, State}; Session -> PrevSessionKey = Session#vdf_session.prev_session_key, PrevSession = ar_nonce_limiter:get_session(PrevSessionKey), PartialUpdate = make_partial_nonce_limiter_update(SessionKey, Session, StepNumber, Output), FullUpdate = make_full_nonce_limiter_update(SessionKey, Session), PartialUpdateBin2 = ar_serialize:nonce_limiter_update_to_binary(2, PartialUpdate), PartialUpdateBin3 = ar_serialize:nonce_limiter_update_to_binary(3, PartialUpdate), FullUpdateBin2 = ar_serialize:nonce_limiter_update_to_binary(2, FullUpdate), FullUpdateBin3 = ar_serialize:nonce_limiter_update_to_binary(3, FullUpdate), FullUpdateBin4 = ar_serialize:nonce_limiter_update_to_binary(4, FullUpdate), Keys = [ {{partial_update, 2}, PartialUpdateBin2}, {{partial_update, 3}, PartialUpdateBin3}, {{full_update, 2}, FullUpdateBin2}, {{full_update, 3}, FullUpdateBin3}, {{full_update, 4}, FullUpdateBin4} ], Keys2 = case PrevSession of not_found -> Keys; _ -> FullPrevUpdate = make_full_nonce_limiter_update( PrevSessionKey, PrevSession), FullPrevUpdateBin2 = ar_serialize:nonce_limiter_update_to_binary( 2, FullPrevUpdate), FullPrevUpdateBin3 = ar_serialize:nonce_limiter_update_to_binary( 3, FullPrevUpdate), FullPrevUpdateBin4 = ar_serialize:nonce_limiter_update_to_binary( 4, FullPrevUpdate), Keys ++ [ {{full_prev_update, 2}, FullPrevUpdateBin2}, {{full_prev_update, 3}, FullPrevUpdateBin3}, {{full_prev_update, 4}, FullPrevUpdateBin4}] end, ets:insert(?MODULE, Keys2), {noreply, State#state{ session_key = SessionKey, step_number = StepNumber }} end. get_recent_step_numbers(StepNumber) -> get_recent_step_numbers(StepNumber, 0). get_recent_step_numbers(_, Taken) when Taken == ?SESSION_UPDATE_INCLUDE_STEP_CHECKPOINTS_COUNT -> []; get_recent_step_numbers(-1, _Taken) -> []; get_recent_step_numbers(StepNumber, Taken) -> [StepNumber | get_recent_step_numbers(StepNumber - 1, Taken + 1)]. get_recent_step_numbers_from_steps(_StepNumber, []) -> []; get_recent_step_numbers_from_steps(StepNumber, [_Step | Steps]) -> [StepNumber | get_recent_step_numbers_from_steps(StepNumber - 1, Steps)]. ================================================ FILE: apps/arweave/src/ar_nonce_limiter_server_worker.erl ================================================ -module(ar_nonce_limiter_server_worker). -behaviour(gen_server). -export([start_link/2]). -export([init/1, handle_cast/2, handle_call/3, handle_info/2, terminate/2]). -include_lib("arweave/include/ar.hrl"). -record(state, { raw_peer, pause_until = 0, format = 2 }). %% The frequency in milliseconds of re-resolving the domain name of the client, %% if the client is configured via the domain name. %% %% ar_nonce_limiter_server_worker periodically re-resolves and caches the address %% of the corresponding client such that they can be identified upon request, %% unless we are configured as a public VDF server. -define(RE_RESOLVE_PEER_DOMAIN_MS, (30 * 1000)). %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Start the server. start_link(Name, RawPeer) -> gen_server:start_link({local, Name}, ?MODULE, RawPeer, []). %%%=================================================================== %%% Generic server callbacks. %%%=================================================================== init(RawPeer) -> ?LOG_INFO([{event, nonce_limiter_server_worker_init}, {raw_peer, RawPeer}]), ok = ar_events:subscribe(nonce_limiter), case ar_config:is_public_vdf_server() of false -> gen_server:cast(self(), re_resolve_peer_domain); true -> ok end, {ok, #state{ raw_peer = RawPeer }}. handle_call(Request, _From, State) -> ?LOG_WARNING([{event, unhandled_call}, {module, ?MODULE}, {request, Request}]), {reply, ok, State}. handle_cast(re_resolve_peer_domain, #state{ raw_peer = RawPeer } = State) -> case ar_peers:resolve_and_cache_peer(RawPeer, vdf_client_peer) of {ok, _} -> ok; Error -> ?LOG_WARNING([{event, failed_to_re_resolve_peer_domain}, {error, io_lib:format("~p", [Error])}, {peer, io_lib:format("~p", [RawPeer])}]) end, ar_util:cast_after(?RE_RESOLVE_PEER_DOMAIN_MS, ?MODULE, re_resolve_peer_domain), {noreply, State}; handle_cast(Cast, State) -> ?LOG_WARNING([{event, unhandled_cast}, {module, ?MODULE}, {cast, Cast}]), {noreply, State}. handle_info({event, nonce_limiter, {computed_output, Args}}, State) -> #state{ raw_peer = RawPeer } = State, case ar_peers:resolve_and_cache_peer(RawPeer, vdf_client_peer) of {error, _} -> ?LOG_WARNING([{event, failed_to_resolve_vdf_client_peer_before_push}, {raw_peer, io_lib:format("~p", [RawPeer])}]), {noreply, State}; {ok, Peer} -> handle_computed_output(Peer, Args, State) end; handle_info({event, nonce_limiter, _Args}, State) -> {noreply, State}; handle_info(Message, State) -> ?LOG_WARNING([{event, unhandled_info}, {module, ?MODULE}, {message, Message}]), {noreply, State}. terminate(Reason, _State) -> ?LOG_INFO([{module, ?MODULE},{pid, self()},{callback, terminate},{reason, Reason}]), ok. %%%=================================================================== %%% Private functions. %%%=================================================================== handle_computed_output(Peer, Args, State) -> #state{ pause_until = Timestamp, format = Format } = State, {SessionKey, StepNumber, Output, _PartitionUpperBound} = Args, CurrentStepNumber = ar_nonce_limiter:get_current_step_number(), ?LOG_DEBUG([{event, handle_computed_output}, {peer, ar_util:format_peer(Peer)}, {session_key, ar_nonce_limiter:encode_session_key(SessionKey)}, {step_number, StepNumber}, {current_step_number, CurrentStepNumber}, {timestamp, Timestamp}, {format, Format}]), case os:system_time(second) < Timestamp of true -> {noreply, State}; false -> case StepNumber < CurrentStepNumber of true -> {noreply, State}; false -> {noreply, push_update(SessionKey, StepNumber, Output, Peer, Format, State)} end end. push_update(SessionKey, StepNumber, Output, Peer, Format, State) -> Session = ar_nonce_limiter:get_session(SessionKey), Update = ar_nonce_limiter_server:make_partial_nonce_limiter_update( SessionKey, Session, StepNumber, Output), case Update of not_found -> State; _ -> case ar_http_iface_client:push_nonce_limiter_update(Peer, Update, Format) of ok -> State; {ok, Response} -> RequestedFormat = Response#nonce_limiter_update_response.format, Postpone = Response#nonce_limiter_update_response.postpone, SessionFound = Response#nonce_limiter_update_response.session_found, RequestedStepNumber = Response#nonce_limiter_update_response.step_number, case { RequestedFormat == Format, Postpone == 0, SessionFound, RequestedStepNumber >= StepNumber - 1 } of {false, _, _, _} -> %% Client requested a different payload format ?LOG_DEBUG([{event, vdf_client_requested_different_format}, {peer, ar_util:format_peer(Peer)}, {step_number, StepNumber}, {format, Format}, {requested_format, RequestedFormat}]), push_update(SessionKey, StepNumber, Output, Peer, RequestedFormat, State#state{ format = RequestedFormat }); {true, false, _, _} -> %% Client requested we pause updates Now = os:system_time(second), State#state{ pause_until = Now + Postpone }; {true, true, false, _} -> %% Client requested the full session PrevSessionKey = Session#vdf_session.prev_session_key, PrevSession = ar_nonce_limiter:get_session(PrevSessionKey), case push_session(PrevSessionKey, PrevSession, Peer, Format) of ok -> %% Do not push the new session until the previous %% session is in line with our view (i.e., has steps %% at least up to StepNumber where the new session begins). push_session(SessionKey, Session, Peer, Format); fail -> ok end, State; {true, true, true, false} -> %% Client requested missing steps push_session(SessionKey, Session, Peer, Format), State; _ -> %% Client is ahead of the server State end; {error, Error} -> log_failure(Peer, SessionKey, Update, Error, []), State end end. push_session(SessionKey, Session, Peer, Format) -> Update = ar_nonce_limiter_server:make_full_nonce_limiter_update(SessionKey, Session), case Update of not_found -> ok; _ -> case ar_http_iface_client:push_nonce_limiter_update(Peer, Update, Format) of ok -> ok; {ok, #nonce_limiter_update_response{ step_number = ClientStepNumber, session_found = ReportedSessionFound }} -> log_failure(Peer, SessionKey, Update, behind_client, [{client_step_number, ClientStepNumber}, {session_found, ReportedSessionFound}]), fail; {error, Error} -> log_failure(Peer, SessionKey, Update, Error, []), fail end end. log_failure(Peer, SessionKey, Update, Error, Extra) -> {SessionSeed, SessionInterval, NextVDFDifficulty} = SessionKey, StepNumber = Update#nonce_limiter_update.session#vdf_session.step_number, Log = [{event, failed_to_push_nonce_limiter_update_to_peer}, {reason, io_lib:format("~p", [Error])}, {peer, ar_util:format_peer(Peer)}, {session_seed, ar_util:encode(SessionSeed)}, {session_interval, SessionInterval}, {session_difficulty, NextVDFDifficulty}, {server_step_number, StepNumber}] ++ Extra, case Error of behind_client -> ?LOG_DEBUG(Log); {shutdown, econnrefused} -> ?LOG_DEBUG(Log); {shutdown, timeout} -> ?LOG_DEBUG(Log); {shutdown, ehostunreach} -> ?LOG_DEBUG(Log); {closed, "The connection was lost."} -> ?LOG_DEBUG(Log); timeout -> ?LOG_DEBUG(Log); {<<"400">>, <<>>} -> ?LOG_DEBUG(Log); {<<"503">>, <<"{\"error\":\"not_joined\"}">>} -> ?LOG_DEBUG(Log); _ -> ?LOG_WARNING(Log) end. ================================================ FILE: apps/arweave/src/ar_nonce_limiter_sup.erl ================================================ -module(ar_nonce_limiter_sup). -behaviour(supervisor). -export([start_link/0]). -export([init/1]). -include_lib("arweave/include/ar_sup.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). %%%=================================================================== %%% Public interface. %%%=================================================================== start_link() -> supervisor:start_link({local, ?MODULE}, ?MODULE, []). %% =================================================================== %% Supervisor callbacks. %% =================================================================== init([]) -> {ok, Config} = arweave_config:get_env(), ServerWorkers = lists:map( fun(Peer) -> Name = list_to_atom("ar_nonce_limiter_server_worker_" ++ ar_util:peer_to_str(Peer)), ?CHILD_WITH_ARGS(ar_nonce_limiter_server_worker, worker, Name, [Name, Peer]) end, Config#config.nonce_limiter_client_peers ), Client = ?CHILD(ar_nonce_limiter_client, worker), Server = ?CHILD(ar_nonce_limiter_server, worker), NonceLimiter = ?CHILD(ar_nonce_limiter, worker), Workers = case ar_config:is_vdf_server() of true -> [NonceLimiter, Server, Client | ServerWorkers]; false -> [NonceLimiter, Client] end, ?LOG_INFO([{event, nonce_limiter_sup_init}, {workers, Workers}]), {ok, {{one_for_one, 5, 10}, Workers}}. ================================================ FILE: apps/arweave/src/ar_packing_server.erl ================================================ -module(ar_packing_server). -behaviour(gen_server). -export([start_link/0, packing_atom/1, get_packing_state/0, get_randomx_state_for_h0/2, request_unpack/2, request_unpack/3, request_repack/2, request_repack/3, request_encipher/3, request_decipher/3, pack/4, unpack/5, repack/6, unpack_sub_chunk/5, is_buffer_full/0, record_buffer_size_metric/0, pad_chunk/1, unpad_chunk/3, unpad_chunk/4, encipher_replica_2_9_chunk/2, decipher_replica_2_9_chunk/2, exor_replica_2_9_chunk/2, pack_replica_2_9_chunk/3, request_entropy_generation/3]). -export([init/1, handle_cast/2, handle_call/3, handle_info/2, terminate/2]). -include("ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include("ar_consensus.hrl"). -include_lib("eunit/include/eunit.hrl"). -record(state, { workers, num_workers }). %% We remember the earliest entropy generation per mining address %% until it falls out of this window. Used to track the amount of %% redundant entropy generation. -define(ENTROPY_GENERATION_STATS_WINDOW_MS, 1000 * 60 * 30). % 30 minutes %%%=================================================================== %%% Public interface. %%%=================================================================== packing_atom(Packing) when is_atom(Packing) -> Packing; packing_atom({spora_2_6, _Addr}) -> spora_2_6; packing_atom({composite, _Addr, _Diff}) -> composite; packing_atom({replica_2_9, _Addr}) -> replica_2_9. request_unpack(Ref, Args) -> request_unpack(Ref, self(), Args). request_unpack(Ref, ReplyTo, Args) -> ar_util:cast_after(600000, ReplyTo, {expire_unpack_request, Ref}), gen_server:cast(?MODULE, {unpack_request, ReplyTo, Ref, Args}). request_repack(Ref, Args) -> request_repack(Ref, self(), Args). request_repack(Ref, ReplyTo, Args) -> ar_util:cast_after(600000, ReplyTo, {expire_repack_request, Ref}), gen_server:cast(?MODULE, {repack_request, ReplyTo, Ref, Args}). request_encipher(Ref, ReplyTo, {Chunk, Entropy}) -> ar_util:cast_after(600000, ReplyTo, {expire_encipher_request, Ref}), gen_server:cast(?MODULE, {encipher_request, ReplyTo, Ref, {Chunk, Entropy}}). request_decipher(Ref, ReplyTo, {Chunk, Entropy}) -> ar_util:cast_after(600000, ReplyTo, {expire_decipher_request, Ref}), gen_server:cast(?MODULE, {decipher_request, ReplyTo, Ref, {Chunk, Entropy}}). request_entropy_generation( Ref, ReplyTo, {RewardAddr, BucketEndOffset, SubChunkStart, CacheEntropy}) -> gen_server:cast(?MODULE, {generate_entropy, ReplyTo, Ref, {RewardAddr, BucketEndOffset, SubChunkStart, CacheEntropy}}). %% @doc Pack the chunk for mining. Packing ensures every mined chunk of data is globally %% unique and cannot be easily inferred during mining from any metadata stored in RAM. pack(Packing, ChunkOffset, TXRoot, Chunk) -> PackingState = get_packing_state(), record_packing_request(pack, Packing, unpacked), case pack(Packing, ChunkOffset, TXRoot, Chunk, PackingState, external) of {ok, Packed, _} -> {ok, Packed}; Reply -> Reply end. %% @doc Unpack the chunk packed for mining. %% %% Return {ok, UnpackedChunk} or {error, invalid_packed_size} or {error, invalid_chunk_size} %% or {error, invalid_padding}. unpack(Packing, ChunkOffset, TXRoot, Chunk, ChunkSize) -> PackingState = get_packing_state(), record_packing_request(unpack, unpacked, Packing), case unpack(Packing, ChunkOffset, TXRoot, Chunk, ChunkSize, PackingState, external) of {ok, Unpacked, _WasAlreadyUnpacked} -> {ok, Unpacked}; Reply -> Reply end. %% @doc Unpack the packed sub-chunk of a composite packing or shared entropy replica. %% %% Return {ok, UnpackedSubChunk} or {error, invalid_packed_size}. unpack_sub_chunk({composite, _, _} = Packing, AbsoluteEndOffset, TXRoot, Chunk, SubChunkStartOffset) -> case byte_size(Chunk) == ?COMPOSITE_PACKING_SUB_CHUNK_SIZE of false -> {error, invalid_packed_size}; true -> PackingState = get_packing_state(), record_packing_request(unpack_sub_chunk, not_set, Packing), {PackingAtom, Key} = chunk_key(Packing, AbsoluteEndOffset, TXRoot), RandomXState = get_randomx_state_by_packing(Packing, PackingState), case prometheus_histogram:observe_duration(packing_duration_milliseconds, [unpack_sub_chunk, PackingAtom, external], fun() -> ar_mine_randomx:randomx_decrypt_sub_chunk(Packing, RandomXState, Key, Chunk, SubChunkStartOffset) end) of {ok, UnpackedSubChunk} -> {ok, UnpackedSubChunk}; Error -> Error end end; unpack_sub_chunk({replica_2_9, RewardAddr} = Packing, AbsoluteEndOffset, _TXRoot, Chunk, SubChunkStartOffset) -> case byte_size(Chunk) == ?COMPOSITE_PACKING_SUB_CHUNK_SIZE of false -> {error, invalid_packed_size}; true -> PackingState = get_packing_state(), record_packing_request(unpack_sub_chunk, not_set, Packing), Entropy = generate_replica_2_9_entropy( RewardAddr, AbsoluteEndOffset, SubChunkStartOffset), RandomXState = get_randomx_state_by_packing(Packing, PackingState), EntropySubChunkIndex = ar_replica_2_9:get_slice_index(AbsoluteEndOffset), case prometheus_histogram:observe_duration(packing_duration_milliseconds, [unpack_sub_chunk, replica_2_9, external], fun() -> ar_mine_randomx:randomx_decrypt_replica_2_9_sub_chunk({RandomXState, Entropy, Chunk, EntropySubChunkIndex}) end) of {ok, UnpackedSubChunk} -> {ok, UnpackedSubChunk}; Error -> Error end end. repack(RequestedPacking, StoredPacking, ChunkOffset, TXRoot, Chunk, ChunkSize) -> PackingState = get_packing_state(), record_packing_request(repack, RequestedPacking, StoredPacking), repack( RequestedPacking, StoredPacking, ChunkOffset, TXRoot, Chunk, ChunkSize, PackingState, external). %% @doc Return true if the packing server buffer is considered full, to apply %% some back-pressure on the pack/4 and unpack/5 callers. is_buffer_full() -> [{_, Limit}] = ets:lookup(?MODULE, buffer_size_limit), case ets:lookup(?MODULE, buffer_size) of [{_, Size}] when Size > Limit -> true; _ -> false end. pad_chunk(Chunk) -> pad_chunk(Chunk, byte_size(Chunk)). pad_chunk(Chunk, ChunkSize) when ChunkSize == (?DATA_CHUNK_SIZE) -> Chunk; pad_chunk(Chunk, ChunkSize) -> Zeros = case erlang:get(zero_chunk) of undefined -> ZeroChunk = << <<0>> || _ <- lists:seq(1, ?DATA_CHUNK_SIZE) >>, %% Cache the zero chunk in the process memory, constructing %% it is expensive. erlang:put(zero_chunk, ZeroChunk), ZeroChunk; ZeroChunk -> ZeroChunk end, PaddingSize = (?DATA_CHUNK_SIZE) - ChunkSize, << Chunk/binary, (binary:part(Zeros, 0, PaddingSize))/binary >>. unpad_chunk(spora_2_5, Unpacked, ChunkSize, _PackedSize) -> binary:part(Unpacked, 0, ChunkSize); unpad_chunk({spora_2_6, _Addr}, Unpacked, ChunkSize, PackedSize) -> unpad_chunk(Unpacked, ChunkSize, PackedSize); unpad_chunk({composite, _Addr, _PackingDifficulty}, Unpacked, ChunkSize, PackedSize) -> unpad_chunk(Unpacked, ChunkSize, PackedSize); unpad_chunk({replica_2_9, _Addr}, Unpacked, ChunkSize, PackedSize) -> unpad_chunk(Unpacked, ChunkSize, PackedSize); unpad_chunk(unpacked, Unpacked, ChunkSize, _PackedSize) -> binary:part(Unpacked, 0, ChunkSize). unpad_chunk(Unpacked, ChunkSize, PackedSize) -> Padding = binary:part(Unpacked, ChunkSize, PackedSize - ChunkSize), case Padding of <<>> -> Unpacked; _ -> case is_zero(Padding) of false -> error; true -> binary:part(Unpacked, 0, ChunkSize) end end. is_zero(<< 0:8, Rest/binary >>) -> is_zero(Rest); is_zero(<<>>) -> true; is_zero(_Rest) -> false. start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). get_packing_state() -> [{_, PackingState}] = ets:lookup(?MODULE, randomx_packing_state), PackingState. get_randomx_state_for_h0(PackingDifficulty, PackingState) -> {RandomXState512, RandomXState4096, _} = PackingState, case PackingDifficulty of 0 -> RandomXState512; _ -> RandomXState4096 end. %% @doc Encipher the given chunk with the given 2.9 entropy assembled for this chunk. %% Encipher and decipher are the same operation, only difference is how we record the operation. -spec encipher_replica_2_9_chunk( Chunk :: binary(), Entropy :: binary() ) -> binary(). encipher_replica_2_9_chunk(Chunk, Entropy) -> record_packing_request(encipher, {replica_2_9, <<>>}, unpacked_padded), exor_replica_2_9_chunk(Chunk, Entropy). %% @doc Decipher the given chunk with the given 2.9 entropy assembled for this chunk. %% Encipher and decipher are the same operation, only difference is how we record the operation. -spec decipher_replica_2_9_chunk( Chunk :: binary(), Entropy :: binary() ) -> binary(). decipher_replica_2_9_chunk(Chunk, Entropy) -> record_packing_request(decipher, unpacked_padded, {replica_2_9, <<>>}), exor_replica_2_9_chunk(Chunk, Entropy). %% @doc Generate the 2.9 entropy. -spec generate_replica_2_9_entropy( RewardAddr :: binary(), BucketEndOffset :: non_neg_integer(), SubChunkStartOffset :: non_neg_integer() ) -> binary(). generate_replica_2_9_entropy(RewardAddr, BucketEndOffset, SubChunkStartOffset) -> generate_replica_2_9_entropy(RewardAddr, BucketEndOffset, SubChunkStartOffset, true). -spec generate_replica_2_9_entropy( RewardAddr :: binary(), BucketEndOffset :: non_neg_integer(), SubChunkStartOffset :: non_neg_integer(), CacheEntropy :: boolean() ) -> binary(). generate_replica_2_9_entropy(RewardAddr, BucketEndOffset, SubChunkStartOffset, false) -> Key = ar_replica_2_9:get_entropy_key(RewardAddr, BucketEndOffset, SubChunkStartOffset), do_generate_entropy(RewardAddr, Key); generate_replica_2_9_entropy(RewardAddr, BucketEndOffset, SubChunkStartOffset, true) -> Key = ar_replica_2_9:get_entropy_key(RewardAddr, BucketEndOffset, SubChunkStartOffset), Partition = ar_node:get_partition_number(BucketEndOffset), entropy_generation_lock(Key, RewardAddr, BucketEndOffset, SubChunkStartOffset), case ar_entropy_cache:get(Key) of {ok, Entropy} -> prometheus_counter:inc(replica_2_9_entropy_stats, [Partition, cache_hit]), entropy_generation_release(Key), Entropy; not_found -> prometheus_counter:inc(replica_2_9_entropy_stats, [Partition, cache_miss]), Entropy = do_generate_entropy(RewardAddr, Key), update_entropy_generation_stats(Key, RewardAddr, BucketEndOffset, SubChunkStartOffset), {ok, Config} = arweave_config:get_env(), MaxSize = Config#config.replica_2_9_entropy_cache_size_mb * ?MiB, ar_entropy_cache:clean_up_space(?REPLICA_2_9_ENTROPY_SIZE, MaxSize), ar_entropy_cache:put(Key, Entropy, ?REPLICA_2_9_ENTROPY_SIZE), entropy_generation_release(Key), Entropy end. do_generate_entropy(RewardAddr, Key) -> PackingState = get_packing_state(), RandomXState = get_randomx_state_by_packing({replica_2_9, RewardAddr}, PackingState), Entropy = ar_mine_randomx:randomx_generate_replica_2_9_entropy(RandomXState, Key), %% Primarily needed for testing where the entropy generated exceeds the entropy %% needed for tests. binary_part(Entropy, 0, ?REPLICA_2_9_ENTROPY_SIZE). %% @doc Pad (to ?DATA_CHUNK_SIZE) and pack the chunk according to the 2.9 replication format. %% Return the chunk and the combined entropy used on that chunk. -spec pack_replica_2_9_chunk( RewardAddr :: binary(), AbsoluteEndOffset :: non_neg_integer(), Chunk :: binary() ) -> {ok, binary(), binary()}. pack_replica_2_9_chunk(RewardAddr, AbsoluteEndOffset, Chunk) -> PackingState = get_packing_state(), RandomXState = get_randomx_state_by_packing({replica_2_9, RewardAddr}, PackingState), PaddedChunk = pad_chunk(Chunk), SubChunks = get_sub_chunks(PaddedChunk), pack_replica_2_9_sub_chunks(RewardAddr, AbsoluteEndOffset, RandomXState, SubChunks). %%%=================================================================== %%% Generic server callbacks. %%%=================================================================== init([]) -> {ok, Config} = arweave_config:get_env(), ar:console("~nInitialising RandomX datasets. Keys: ~p, ~p. " "The process may take several minutes.~n", [ar_util:encode(?RANDOMX_PACKING_KEY), ar_util:encode(?RANDOMX_PACKING_KEY)]), {RandomXState512, _RandomXState4096, _RandomXStateSharedEntropy} = PackingState = init_packing_state(), ar:console("RandomX dataset initialisation complete.~n", []), {H0, H1} = ar_bench_hash:run_benchmark(RandomXState512), H0String = io_lib:format("~.3f", [H0 / 1000]), H1String = io_lib:format("~.3f", [H1 / 1000]), ar:console("Hashing benchmark~nH0: ~s ms~nH1/H2: ~s ms~n", [H0String, H1String]), ?LOG_INFO([{event, hash_benchmark}, {h0_ms, H0String}, {h1_ms, H1String}]), NumWorkers = Config#config.packing_workers, ar:console("~nStarting ~B packing threads.~n", [NumWorkers]), ?LOG_INFO([{event, starting_packing_threads}, {num_threads, NumWorkers}]), Workers = queue:from_list( [spawn_link(fun() -> worker(PackingState) end) || _ <- lists:seq(1, NumWorkers)]), ets:insert(?MODULE, {buffer_size, 0}), MaxSize = case Config#config.packing_cache_size_limit of undefined -> Free = proplists:get_value(free_memory, memsup:get_system_memory_data(), 2000000000), Limit2 = min(1200, erlang:ceil(Free * 0.9 / 3 / ?DATA_CHUNK_SIZE)), Limit3 = ar_util:ceil_int(Limit2, 100), Limit3; Limit -> Limit end, ar:console("~nSetting the packing chunk cache size limit to ~B chunks.~n", [MaxSize]), ?LOG_INFO([{event, packing_chunk_cache_size_limit}, {max_size, MaxSize}]), ets:insert(?MODULE, {buffer_size_limit, MaxSize}), {ok, _} = ar_timer:apply_interval( 200, ?MODULE, record_buffer_size_metric, [], #{ skip_on_shutdown => false } ), {ok, #state{ workers = Workers, num_workers = NumWorkers }}. handle_call(Request, _From, State) -> ?LOG_WARNING([{event, unhandled_call}, {module, ?MODULE}, {request, Request}]), {reply, ok, State}. handle_cast({unpack_request, _, _, _}, #state{ num_workers = 0 } = State) -> ?LOG_WARNING([{event, got_unpack_request_while_packing_is_disabled}]), {noreply, State}; handle_cast({unpack_request, From, Ref, Args}, State) -> #state{ workers = Workers } = State, {Packing, _Chunk, _AbsoluteOffset, _TXRoot, _ChunkSize} = Args, {{value, Worker}, Workers2} = queue:out(Workers), increment_buffer_size(), record_packing_request(unpack, unpacked, Packing), Worker ! {unpack, Ref, From, Args}, {noreply, State#state{ workers = queue:in(Worker, Workers2) }}; handle_cast({repack_request, _, _, _}, #state{ num_workers = 0 } = State) -> ?LOG_WARNING([{event, got_repack_request_while_packing_is_disabled}]), {noreply, State}; handle_cast({repack_request, From, Ref, Args}, State) -> #state{ workers = Workers } = State, {RequestedPacking, Packing, Chunk, AbsoluteOffset, TXRoot, ChunkSize} = Args, {{value, Worker}, Workers2} = queue:out(Workers), case {RequestedPacking, Packing} of {unpacked, unpacked} -> From ! {chunk, {packed, Ref, {unpacked, Chunk, AbsoluteOffset, TXRoot, ChunkSize}}}, {noreply, State}; {_, unpacked} -> increment_buffer_size(), record_packing_request(pack, RequestedPacking, unpacked), Worker ! {pack, Ref, From, {RequestedPacking, Chunk, AbsoluteOffset, TXRoot, ChunkSize}}, {noreply, State#state{ workers = queue:in(Worker, Workers2) }}; _ -> increment_buffer_size(), record_packing_request(repack, RequestedPacking, Packing), Worker ! { repack, Ref, From, {RequestedPacking, Packing, Chunk, AbsoluteOffset, TXRoot, ChunkSize} }, {noreply, State#state{ workers = queue:in(Worker, Workers2) }} end; handle_cast({encipher_request, From, Ref, {Chunk, Entropy}}, State) -> #state{ workers = Workers } = State, {{value, Worker}, Workers2} = queue:out(Workers), Worker ! {encipher, Ref, From, {Chunk, Entropy}}, {noreply, State#state{ workers = queue:in(Worker, Workers2) }}; handle_cast({decipher_request, From, Ref, {Chunk, Entropy}}, State) -> #state{ workers = Workers } = State, {{value, Worker}, Workers2} = queue:out(Workers), Worker ! {decipher, Ref, From, {Chunk, Entropy}}, {noreply, State#state{ workers = queue:in(Worker, Workers2) }}; handle_cast({generate_entropy, From, Ref, {RewardAddr, BucketEndOffset, SubChunkStart, CacheEntropy}}, State) -> #state{ workers = Workers } = State, {{value, Worker}, Workers2} = queue:out(Workers), Worker ! {generate_entropy, Ref, From, {RewardAddr, BucketEndOffset, SubChunkStart, CacheEntropy}}, {noreply, State#state{ workers = queue:in(Worker, Workers2) }}; handle_cast(Cast, State) -> ?LOG_WARNING([{event, unhandled_cast}, {module, ?MODULE}, {cast, Cast}]), {noreply, State}. handle_info(Message, State) -> ?LOG_WARNING([{event, unhandled_info}, {module, ?MODULE}, {message, Message}]), {noreply, State}. terminate(Reason, _State) -> ?LOG_INFO([{module, ?MODULE},{pid, self()},{callback, terminate},{reason, Reason}]), ok. %%%=================================================================== %%% Private functions. %%%=================================================================== init_packing_state() -> Schedulers = erlang:system_info(dirty_cpu_schedulers_online), RandomXState512 = ar_mine_randomx:init_fast(rx512, ?RANDOMX_PACKING_KEY, Schedulers), RandomXState4096 = ar_mine_randomx:init_fast(rx4096, ?RANDOMX_PACKING_KEY, Schedulers), RandomXStateSharedEntropy = ar_mine_randomx:init_fast(rxsquared, ?RANDOMX_PACKING_KEY, Schedulers), PackingState = {RandomXState512, RandomXState4096, RandomXStateSharedEntropy}, ets:insert(?MODULE, {randomx_packing_state, PackingState}), PackingState. get_randomx_state_by_packing({composite, _, _}, {_, RandomXState, _}) -> RandomXState; get_randomx_state_by_packing({replica_2_9, _}, {_, _, RandomXState}) -> RandomXState; get_randomx_state_by_packing({spora_2_6, _}, {RandomXState, _, _}) -> RandomXState; get_randomx_state_by_packing(spora_2_5, {RandomXState, _, _}) -> RandomXState. worker(PackingState) -> receive {unpack, Ref, From, Args} -> {Packing, Chunk, AbsoluteOffset, TXRoot, ChunkSize} = Args, case unpack(Packing, AbsoluteOffset, TXRoot, Chunk, ChunkSize, PackingState, internal) of {ok, U, _AlreadyUnpacked} -> From ! {chunk, {unpacked, Ref, {Packing, U, AbsoluteOffset, TXRoot, ChunkSize}}}; {error, invalid_packed_size} -> From ! {chunk, {unpack_error, Ref, Args, invalid_packed_size}}; {error, invalid_chunk_size} -> From ! {chunk, {unpack_error, Ref, Args, invalid_chunk_size}}; {error, invalid_padding} -> From ! {chunk, {unpack_error, Ref, Args, invalid_padding}}; {exception, Error} -> ?LOG_ERROR([{event, failed_to_unpack_chunk}, {absolute_end_offset, AbsoluteOffset}, {error, io_lib:format("~p", [Error])}]) end, decrement_buffer_size(), worker(PackingState); {pack, Ref, From, Args} -> {Packing, Chunk, AbsoluteOffset, TXRoot, ChunkSize} = Args, case pack(Packing, AbsoluteOffset, TXRoot, Chunk, PackingState, internal) of {ok, Packed, _AlreadyPacked} -> From ! {chunk, {packed, Ref, {Packing, Packed, AbsoluteOffset, TXRoot, ChunkSize}}}; {error, invalid_unpacked_size} -> ?LOG_WARNING([{event, got_unpacked_chunk_of_invalid_size}]); {exception, Error} -> ?LOG_ERROR([{event, failed_to_pack_chunk}, {absolute_end_offset, AbsoluteOffset}, {error, io_lib:format("~p", [Error])}]) end, decrement_buffer_size(), worker(PackingState); {repack, Ref, From, Args} -> {RequestedPacking, Packing, Chunk, AbsoluteOffset, TXRoot, ChunkSize} = Args, case repack(RequestedPacking, Packing, AbsoluteOffset, TXRoot, Chunk, ChunkSize, PackingState, internal) of {ok, Packed, _RepackInput} -> From ! {chunk, {packed, Ref, {RequestedPacking, Packed, AbsoluteOffset, TXRoot, ChunkSize}}}; {error, invalid_packed_size} -> ?LOG_WARNING([{event, got_packed_chunk_of_invalid_size}]); {error, invalid_chunk_size} -> ?LOG_WARNING([{event, got_packed_chunk_with_invalid_chunk_size}]); {error, invalid_padding} -> ?LOG_WARNING([{event, got_packed_chunk_with_invalid_padding}, {absolute_end_offset, AbsoluteOffset}]); {error, invalid_unpacked_size} -> ?LOG_WARNING([{event, got_unpacked_chunk_of_invalid_size}]); {exception, Error} -> ?LOG_ERROR([{event, failed_to_repack_chunk}, {absolute_end_offset, AbsoluteOffset}, {error, io_lib:format("~p", [Error])}]) end, decrement_buffer_size(), worker(PackingState); {encipher, Ref, From, {Chunk, Entropy}} -> PackedChunk = encipher_replica_2_9_chunk(Chunk, Entropy), From ! {chunk, {enciphered, Ref, PackedChunk}}, worker(PackingState); {decipher, Ref, From, {Chunk, Entropy}} -> UnpackedChunk = decipher_replica_2_9_chunk(Chunk, Entropy), From ! {chunk, {deciphered, Ref, UnpackedChunk}}, worker(PackingState); {generate_entropy, Ref, From, {RewardAddr, BucketEndOffset, SubChunkStart, CacheEntropy}} -> Entropy = generate_replica_2_9_entropy( RewardAddr, BucketEndOffset, SubChunkStart, CacheEntropy), From ! {entropy_generated, Ref, Entropy}, worker(PackingState) end. chunk_key(spora_2_5, ChunkOffset, TXRoot) -> %% The presence of the absolute end offset in the key makes sure %% packing of every chunk is unique, even when the same chunk is %% present in the same transaction or across multiple transactions %% or blocks. The presence of the transaction root in the key %% ensures one cannot find data that has certain patterns after %% packing. {spora_2_5, crypto:hash(sha256, << ChunkOffset:256, TXRoot/binary >>)}; chunk_key({spora_2_6, RewardAddr}, ChunkOffset, TXRoot) -> %% The presence of the absolute end offset in the key makes sure %% packing of every chunk is unique, even when the same chunk is %% present in the same transaction or across multiple transactions %% or blocks. The presence of the transaction root in the key %% ensures one cannot find data that has certain patterns after %% packing. The presence of the reward address, combined with %% the 2.6 mining mechanics, puts a relatively low cap on the performance %% of a single dataset replica, essentially incentivizing miners to create %% more weave replicas per invested dollar. { spora_2_6, crypto:hash(sha256, << ChunkOffset:256, TXRoot:32/binary, RewardAddr/binary >>) }; chunk_key({composite, RewardAddr, PackingDiff}, ChunkOffset, TXRoot) -> %% This is only a part of the packing key. Each sub-chunk is packed using a different %% key composed from the key returned by this function and the relative sub-chunk offset. { composite, crypto:hash(sha256, << ChunkOffset:256, TXRoot:32/binary, PackingDiff:8, RewardAddr/binary >>) }. pack(unpacked, _ChunkOffset, _TXRoot, Chunk, _PackingState, _External) -> %% Allows to reuse the same interface for unpacking and repacking. {ok, Chunk, already_packed}; pack(unpacked_padded, _ChunkOffset, _TXRoot, Chunk, _PackingState, _External) -> %% Allows to reuse the same interface for unpacking and repacking. {ok, pad_chunk(Chunk), was_not_already_packed}; pack({replica_2_9, RewardAddr} = Packing, AbsoluteEndOffset, _TXRoot, Chunk, PackingState, _External) -> case byte_size(Chunk) > ?DATA_CHUNK_SIZE of true -> {error, invalid_unpacked_size}; false -> RandomXState = get_randomx_state_by_packing(Packing, PackingState), PaddedChunk = pad_chunk(Chunk), SubChunks = get_sub_chunks(PaddedChunk), case pack_replica_2_9_sub_chunks(RewardAddr, AbsoluteEndOffset, RandomXState, SubChunks) of {ok, Packed, _Entropy} -> {ok, Packed, was_not_already_packed}; Error -> Error end end; pack(Packing, ChunkOffset, TXRoot, Chunk, PackingState, External) -> case byte_size(Chunk) > ?DATA_CHUNK_SIZE of true -> {error, invalid_unpacked_size}; false -> {PackingAtom, Key} = chunk_key(Packing, ChunkOffset, TXRoot), RandomXState = get_randomx_state_by_packing(Packing, PackingState), case prometheus_histogram:observe_duration(packing_duration_milliseconds, [pack, PackingAtom, External], fun() -> ar_mine_randomx:randomx_encrypt_chunk(Packing, RandomXState, Key, Chunk) end) of {ok, Packed} -> {ok, Packed, was_not_already_packed}; Error -> Error end end. get_sub_chunks(<< SubChunk:(?COMPOSITE_PACKING_SUB_CHUNK_SIZE)/binary, Rest/binary >>) -> [SubChunk | get_sub_chunks(Rest)]; get_sub_chunks(<<>>) -> []. pack_replica_2_9_sub_chunks(RewardAddr, AbsoluteEndOffset, RandomXState, SubChunks) -> pack_replica_2_9_sub_chunks(RewardAddr, AbsoluteEndOffset, RandomXState, 0, SubChunks, [], []). pack_replica_2_9_sub_chunks(_RewardAddr, _AbsoluteEndOffset, _RandomXState, _SubChunkStartOffset, [], PackedSubChunks, EntropyParts) -> {ok, iolist_to_binary(lists:reverse(PackedSubChunks)), iolist_to_binary(lists:reverse(EntropyParts))}; pack_replica_2_9_sub_chunks(RewardAddr, AbsoluteEndOffset, RandomXState, SubChunkStartOffset, [SubChunk | SubChunks], PackedSubChunks, EntropyParts) -> EntropySubChunkIndex = ar_replica_2_9:get_slice_index(AbsoluteEndOffset), Entropy = generate_replica_2_9_entropy(RewardAddr, AbsoluteEndOffset, SubChunkStartOffset), case prometheus_histogram:observe_duration(packing_duration_milliseconds, [pack_sub_chunk, replica_2_9, internal], fun() -> ar_mine_randomx:randomx_encrypt_replica_2_9_sub_chunk({RandomXState, Entropy, SubChunk, EntropySubChunkIndex}) end) of {ok, PackedSubChunk} -> SubChunkSize = ?COMPOSITE_PACKING_SUB_CHUNK_SIZE, EntropyPart = binary:part(Entropy, EntropySubChunkIndex * ?COMPOSITE_PACKING_SUB_CHUNK_SIZE, ?COMPOSITE_PACKING_SUB_CHUNK_SIZE), pack_replica_2_9_sub_chunks(RewardAddr, AbsoluteEndOffset, RandomXState, SubChunkStartOffset + SubChunkSize, SubChunks, [PackedSubChunk | PackedSubChunks], [EntropyPart | EntropyParts]); Error -> Error end. unpack_replica_2_9_sub_chunks(RewardAddr, AbsoluteEndOffset, RandomXState, SubChunks) -> unpack_replica_2_9_sub_chunks( RewardAddr, AbsoluteEndOffset, RandomXState, 0, SubChunks, []). unpack_replica_2_9_sub_chunks(_RewardAddr, _AbsoluteEndOffset, _RandomXState, _SubChunkStartOffset, [], UnpackedSubChunks) -> {ok, iolist_to_binary(lists:reverse(UnpackedSubChunks))}; unpack_replica_2_9_sub_chunks(RewardAddr, AbsoluteEndOffset, RandomXState, SubChunkStartOffset, [SubChunk | SubChunks], UnpackedSubChunks) -> EntropySubChunkIndex = ar_replica_2_9:get_slice_index(AbsoluteEndOffset), Entropy = generate_replica_2_9_entropy(RewardAddr, AbsoluteEndOffset, SubChunkStartOffset), case prometheus_histogram:observe_duration(packing_duration_milliseconds, [unpack_sub_chunk, replica_2_9, internal], fun() -> ar_mine_randomx:randomx_decrypt_replica_2_9_sub_chunk({RandomXState, Entropy, SubChunk, EntropySubChunkIndex}) end) of {ok, UnpackedSubChunk} -> SubChunkSize = ?COMPOSITE_PACKING_SUB_CHUNK_SIZE, unpack_replica_2_9_sub_chunks(RewardAddr, AbsoluteEndOffset, RandomXState, SubChunkStartOffset + SubChunkSize, SubChunks, [UnpackedSubChunk | UnpackedSubChunks]); Error -> Error end. unpack({replica_2_9, RewardAddr} = Packing, AbsoluteEndOffset, _TXRoot, Chunk, ChunkSize, PackingState, _External) -> case validate_chunk_size(Packing, Chunk, ChunkSize) of {error, Reason} -> ?LOG_ERROR([{event, unpack_chunk_size_error}, {error, Reason}, {chunk_offset, AbsoluteEndOffset}, {packing, ar_serialize:encode_packing(Packing, true)}, {expected_chunk_size, ChunkSize}, {actual_chunk_size, byte_size(Chunk)}]), {error, Reason}; {ok, PackedSize} -> SubChunks = get_sub_chunks(Chunk), RandomXState = get_randomx_state_by_packing(Packing, PackingState), case unpack_replica_2_9_sub_chunks(RewardAddr, AbsoluteEndOffset, RandomXState, SubChunks) of {ok, Unpacked} -> case ar_packing_server:unpad_chunk(Packing, Unpacked, ChunkSize, PackedSize) of error -> ?LOG_WARNING([{event, unpad_chunk_error}, {packed_size, PackedSize}, {chunk_size, ChunkSize}, {absolute_end_offset, AbsoluteEndOffset}]), {error, invalid_padding}; UnpackedChunk -> {ok, UnpackedChunk, was_not_already_unpacked} end; Error -> ?LOG_ERROR([{event, unpack_replica_2_9_sub_chunks_error}, {error, Error}]), Error end end; unpack(unpacked, _ChunkOffset, _TXRoot, Chunk, _ChunkSize, _PackingState, _External) -> %% Allows to reuse the same interface for unpacking and repacking. {ok, Chunk, already_unpacked}; unpack(unpacked_padded, _ChunkOffset, _TXRoot, Chunk, ChunkSize, _PackingState, _External) -> {ok, binary:part(Chunk, 0, ChunkSize), was_not_already_unpacked}; unpack(Packing, ChunkOffset, TXRoot, Chunk, ChunkSize, PackingState, External) -> case validate_chunk_size(Packing, Chunk, ChunkSize) of {error, Reason} -> ?LOG_ERROR([{event, unpack_chunk_size_error}, {error, Reason}, {chunk_offset, ChunkOffset}, {packing, ar_serialize:encode_packing(Packing, true)}, {expected_chunk_size, ChunkSize}, {actual_chunk_size, byte_size(Chunk)}]), {error, Reason}; {ok, _PackedSize} -> {PackingAtom, Key} = chunk_key(Packing, ChunkOffset, TXRoot), RandomXState = get_randomx_state_by_packing(Packing, PackingState), case prometheus_histogram:observe_duration(packing_duration_milliseconds, [unpack, PackingAtom, External], fun() -> ar_mine_randomx:randomx_decrypt_chunk(Packing, RandomXState, Key, Chunk, ChunkSize) end) of {ok, Unpacked} -> {ok, Unpacked, was_not_already_unpacked}; Error -> Error end end. repack(unpacked, unpacked, _ChunkOffset, _TXRoot, Chunk, _ChunkSize, _PackingState, _External) -> %% The difference with the next clause is that here we know the unpacked chunk %% and can explicitly return it as unpacked. {ok, Chunk, Chunk}; repack(RequestedPacking, StoredPacking, _ChunkOffset, _TXRoot, Chunk, _ChunkSize, _PackingState, _External) when StoredPacking == RequestedPacking -> %% StoredPacking and Packing are in the same format and neither is unpacked. To %% avoid uneccessary unpacking we'll return none for the UnpackedChunk. If a caller %% needs the UnpackedChunk they should call unpack explicity. {ok, Chunk, none}; repack(RequestedPacking, unpacked_padded, ChunkOffset, TXRoot, Chunk, ChunkSize, PackingState, External) -> Unpacked = binary:part(Chunk, 0, ChunkSize), repack(RequestedPacking, unpacked, ChunkOffset, TXRoot, Unpacked, ChunkSize, PackingState, External); repack(RequestedPacking, unpacked, ChunkOffset, TXRoot, Chunk, _ChunkSize, PackingState, External) -> case pack(RequestedPacking, ChunkOffset, TXRoot, Chunk, PackingState, External) of {ok, Packed, _WasAlreadyPacked} -> {ok, Packed, Chunk}; Error -> Error end; repack(unpacked_padded, StoredPacking, ChunkOffset, TXRoot, Chunk, ChunkSize, PackingState, External) -> case unpack(StoredPacking, ChunkOffset, TXRoot, Chunk, ChunkSize, PackingState, External) of {ok, Unpacked, _WasAlreadyUnpacked} -> {ok, pad_chunk(Unpacked), Unpacked}; Error -> Error end; repack(unpacked, StoredPacking, ChunkOffset, TXRoot, Chunk, ChunkSize, PackingState, External) -> case unpack(StoredPacking, ChunkOffset, TXRoot, Chunk, ChunkSize, PackingState, External) of {ok, Unpacked, _WasAlreadyUnpacked} -> {ok, Unpacked, Unpacked}; Error -> Error end; repack({replica_2_9, _} = RequestedPacking, StoredPacking, ChunkOffset, TXRoot, Chunk, ChunkSize, PackingState, External) -> repack_no_nif({RequestedPacking, StoredPacking, ChunkOffset, TXRoot, Chunk, ChunkSize, PackingState, External}); repack(RequestedPacking, {replica_2_9, _} = StoredPacking, ChunkOffset, TXRoot, Chunk, ChunkSize, PackingState, External) -> repack_no_nif({RequestedPacking, StoredPacking, ChunkOffset, TXRoot, Chunk, ChunkSize, PackingState, External}); repack({composite, RequestedAddr, RequestedPackingDifficulty} = RequestedPacking, {composite, StoredAddr, StoredPackingDifficulty} = StoredPacking, ChunkOffset, TXRoot, Chunk, ChunkSize, PackingState, External) when RequestedAddr == StoredAddr, StoredPackingDifficulty > RequestedPackingDifficulty -> repack_no_nif({RequestedPacking, StoredPacking, ChunkOffset, TXRoot, Chunk, ChunkSize, PackingState, External}); repack({composite, _Addr, _PackingDifficulty} = RequestedPacking, {spora_2_6, _StoredAddr} = StoredPacking, ChunkOffset, TXRoot, Chunk, ChunkSize, PackingState, External) -> repack_no_nif({RequestedPacking, StoredPacking, ChunkOffset, TXRoot, Chunk, ChunkSize, PackingState, External}); repack({spora_2_6, _StoredAddr} = RequestedPacking, {composite, _Addr, _PackingDifficulty} = StoredPacking, ChunkOffset, TXRoot, Chunk, ChunkSize, PackingState, External) -> repack_no_nif({RequestedPacking, StoredPacking, ChunkOffset, TXRoot, Chunk, ChunkSize, PackingState, External}); repack({composite, _Addr, _PackingDifficulty} = RequestedPacking, spora_2_5 = StoredPacking, ChunkOffset, TXRoot, Chunk, ChunkSize, PackingState, External) -> repack_no_nif({RequestedPacking, StoredPacking, ChunkOffset, TXRoot, Chunk, ChunkSize, PackingState, External}); repack(RequestedPacking, StoredPacking, ChunkOffset, TXRoot, Chunk, ChunkSize, PackingState, External) -> {SourcePackingAtom, UnpackKey} = chunk_key(StoredPacking, ChunkOffset, TXRoot), {TargetPackingAtom, PackKey} = chunk_key(RequestedPacking, ChunkOffset, TXRoot), case validate_chunk_size(StoredPacking, Chunk, ChunkSize) of {ok, _} -> PrometheusLabel = atom_to_list(SourcePackingAtom) ++ "_to_" ++ atom_to_list(TargetPackingAtom), %% By the time we hit this branch both RequestedPacking and StoredPacking should %% use the same RandomX state (i.e. both are either spora_2_5/spora_2_6 or both %% composite). RandomXState = get_randomx_state_by_packing(RequestedPacking, PackingState), prometheus_histogram:observe_duration(packing_duration_milliseconds, [repack, PrometheusLabel, External], fun() -> ar_mine_randomx:randomx_reencrypt_chunk(StoredPacking, RequestedPacking, RandomXState, UnpackKey, PackKey, Chunk, ChunkSize) end); Error -> ?LOG_ERROR([{event, repack_chunk_size_error}, {error, Error}, {chunk_offset, ChunkOffset}, {requested_packing, ar_serialize:encode_packing(RequestedPacking, true)}, {stored_packing, ar_serialize:encode_packing(StoredPacking, true)}, {expected_chunk_size, ChunkSize}, {actual_chunk_size, byte_size(Chunk)}]), Error end. repack_no_nif(Args) -> {RequestedPacking, StoredPacking, ChunkOffset, TXRoot, Chunk, ChunkSize, PackingState, External} = Args, case unpack(StoredPacking, ChunkOffset, TXRoot, Chunk, ChunkSize, PackingState, External) of {ok, Unpacked, _WasAlreadyUnpacked} -> case pack(RequestedPacking, ChunkOffset, TXRoot, Unpacked, PackingState, External) of {ok, Packed, _WasAlreadyPacked} -> {ok, Packed, Unpacked}; Error2 -> Error2 end; Error -> Error end. validate_chunk_size(spora_2_5, Chunk, ChunkSize) -> PackedSize = byte_size(Chunk), case PackedSize == (((ChunkSize - 1) div (?DATA_CHUNK_SIZE)) + 1) * (?DATA_CHUNK_SIZE) of false -> {error, invalid_packed_size}; true -> {ok, PackedSize} end; validate_chunk_size({spora_2_6, _Addr}, Chunk, ChunkSize) -> validate_chunk_size(Chunk, ChunkSize); validate_chunk_size({composite, _Addr, _PackingDifficulty}, Chunk, ChunkSize) -> validate_chunk_size(Chunk, ChunkSize); validate_chunk_size({replica_2_9, _Addr}, Chunk, ChunkSize) -> validate_chunk_size(Chunk, ChunkSize). validate_chunk_size(Chunk, ChunkSize) -> PackedSize = byte_size(Chunk), case {PackedSize == ?DATA_CHUNK_SIZE, ChunkSize =< PackedSize andalso ChunkSize > 0} of {false, _} -> {error, invalid_packed_size}; {true, false} -> %% In practice, we would never get here because the merkle proof %% validation does not allow ChunkSize to exceed ?DATA_CHUNK_SIZE. {error, invalid_chunk_size}; _ -> {ok, PackedSize} end. increment_buffer_size() -> ets:update_counter(?MODULE, buffer_size, {2, 1}, {buffer_size, 1}). decrement_buffer_size() -> ets:update_counter(?MODULE, buffer_size, {2, -1}, {buffer_size, 0}). %%%=================================================================== %%% Prometheus metrics %%%=================================================================== record_buffer_size_metric() -> case ets:lookup(?MODULE, buffer_size) of [{_, Size}] -> prometheus_gauge:set(packing_buffer_size, Size); _ -> ok end. %% @doc Log actual packings and unpackings %% where the StoredPacking does not match the RequestedPacking. record_packing_request(_Type, RequestedPacking, StoredPacking) when RequestedPacking == StoredPacking -> ok; record_packing_request(Type, RequestedPacking, StoredPacking) -> Packing = case Type of unpack -> StoredPacking; unpack_sub_chunk -> StoredPacking; decipher -> StoredPacking; pack -> RequestedPacking; repack -> RequestedPacking; encipher -> RequestedPacking end, prometheus_counter:inc(packing_requests, [Type, packing_atom(Packing)]). exor_replica_2_9_chunk(Chunk, Entropy) -> iolist_to_binary(exor_replica_2_9_sub_chunks(Chunk, Entropy)). exor_replica_2_9_sub_chunks(<<>>, <<>>) -> []; exor_replica_2_9_sub_chunks( << SubChunk:(?COMPOSITE_PACKING_SUB_CHUNK_SIZE)/binary, ChunkRest/binary >>, << EntropyPart:(?COMPOSITE_PACKING_SUB_CHUNK_SIZE)/binary, EntropyRest/binary >>) -> [ar_mine_randomx:exor_sub_chunk(SubChunk, EntropyPart) | exor_replica_2_9_sub_chunks(ChunkRest, EntropyRest)]. entropy_generation_lock(Key, RewardAddr, BucketEndOffset, SubChunkStartOffset) -> case ets:insert_new(?MODULE, {{entropy_generation_lock, Key}}) of true -> ok; false -> timer:sleep(100), entropy_generation_lock(Key, RewardAddr, BucketEndOffset, SubChunkStartOffset) end. entropy_generation_release(Key) -> ets:delete(?MODULE, {entropy_generation_lock, Key}). update_entropy_generation_stats(Key, RewardAddr, BucketEndOffset, SubChunkStartOffset) -> Tab = entropy_generation_stats, Time = erlang:monotonic_time(millisecond), ets:update_counter(Tab, Key, {2, 1}, {Key, 0, Time}), prometheus_counter:inc(replica_2_9_entropy_generated, ?REPLICA_2_9_ENTROPY_SIZE), maybe_report_redundant_entropy_generation(Key, RewardAddr, BucketEndOffset, SubChunkStartOffset), remove_outdated_entropy_generation_stats(). maybe_report_redundant_entropy_generation(Key, RewardAddr, BucketEndOffset, SubChunkStartOffset) -> Tab = entropy_generation_stats, Now = erlang:monotonic_time(millisecond), [{_, Count, Time}] = ets:lookup(Tab, Key), case Count > 1 of true -> Partition = ar_node:get_partition_number(BucketEndOffset), prometheus_counter:inc(replica_2_9_entropy_stats, [Partition, redundant]), ?LOG_DEBUG([{event, possibly_redundant_entropy_generation}, {reward_addr, ar_util:encode(RewardAddr)}, {key, ar_util:encode(Key)}, {bucket_end_offset, BucketEndOffset}, {sub_chunk_start_offset, SubChunkStartOffset}, {count, Count}, {seconds_since_first_generation, (Now - Time) / 1_000}, {avg_per_second, Count / ((Now - Time) / 1_000)}]); false -> ok end. remove_outdated_entropy_generation_stats() -> Tab = entropy_generation_stats, Cursor = ets:first(Tab), Now = erlang:monotonic_time(millisecond), case ets:lookup(Tab, Cursor) of [{_, _, Time}] when Time < Now - ?ENTROPY_GENERATION_STATS_WINDOW_MS -> ets:delete(Tab, Cursor), remove_outdated_entropy_generation_stats(); _ -> ok end. %%%=================================================================== %%% Tests. %%%=================================================================== pack_test() -> Root = crypto:strong_rand_bytes(32), Cases = [ {<<1>>, 1, Root}, {<<1>>, 2, Root}, {<<0>>, 1, crypto:strong_rand_bytes(32)}, {<<0>>, 2, crypto:strong_rand_bytes(32)}, {<<0>>, 1234234534535, crypto:strong_rand_bytes(32)}, {crypto:strong_rand_bytes(2), 234134234, crypto:strong_rand_bytes(32)}, {crypto:strong_rand_bytes(3), 333, crypto:strong_rand_bytes(32)}, {crypto:strong_rand_bytes(15), 9999999999999999999999999999, crypto:strong_rand_bytes(32)}, {crypto:strong_rand_bytes(16), 16, crypto:strong_rand_bytes(32)}, {crypto:strong_rand_bytes(256 * 1024), 100000000000000, crypto:strong_rand_bytes(32)}, {crypto:strong_rand_bytes(256 * 1024 - 1), 100000000000000, crypto:strong_rand_bytes(32)} ], PackingState = init_packing_state(), PackedList = lists:flatten(lists:map( fun({Chunk, Offset, TXRoot}) -> ECDSA = ar_wallet:to_address(ar_wallet:new({ecdsa, secp256k1})), EDDSA = ar_wallet:to_address(ar_wallet:new({eddsa, ed25519})), {ok, Chunk, already_packed} = pack(unpacked, Offset, TXRoot, Chunk, PackingState, external), {ok, Packed, was_not_already_packed} = pack(spora_2_5, Offset, TXRoot, Chunk, PackingState, external), {ok, Packed2, was_not_already_packed} = pack({spora_2_6, ECDSA}, Offset, TXRoot, Chunk, PackingState, external), {ok, Packed3, was_not_already_packed} = pack({spora_2_6, EDDSA}, Offset, TXRoot, Chunk, PackingState, external), {ok, Packed4, was_not_already_packed} = pack({composite, ECDSA, 1}, Offset, TXRoot, Chunk, PackingState, external), {ok, Packed5, was_not_already_packed} = pack({composite, EDDSA, 1}, Offset, TXRoot, Chunk, PackingState, external), {ok, Packed6, was_not_already_packed} = pack({composite, ECDSA, 2}, Offset, TXRoot, Chunk, PackingState, external), {ok, Packed7, was_not_already_packed} = pack({composite, EDDSA, 2}, Offset, TXRoot, Chunk, PackingState, external), ?assertNotEqual(Packed, Chunk), ?assertNotEqual(Packed2, Chunk), ?assertNotEqual(Packed3, Chunk), ?assertNotEqual(Packed4, Chunk), ?assertNotEqual(Packed5, Chunk), ?assertNotEqual(Packed6, Chunk), ?assertNotEqual(Packed7, Chunk), ?assertEqual({ok, Packed, already_unpacked}, unpack(unpacked, Offset, TXRoot, Packed, byte_size(Chunk), PackingState, internal)), ?assertEqual({ok, Chunk, was_not_already_unpacked}, unpack(spora_2_5, Offset, TXRoot, Packed, byte_size(Chunk), PackingState, internal)), ?assertEqual({ok, Chunk, was_not_already_unpacked}, unpack({spora_2_6, ECDSA}, Offset, TXRoot, Packed2, byte_size(Chunk), PackingState, internal)), ?assertEqual({ok, Chunk, was_not_already_unpacked}, unpack({spora_2_6, EDDSA}, Offset, TXRoot, Packed3, byte_size(Chunk), PackingState, internal)), ?assertEqual({ok, Chunk, was_not_already_unpacked}, unpack({composite, ECDSA, 1}, Offset, TXRoot, Packed4, byte_size(Chunk), PackingState, internal)), ?assertEqual({ok, Chunk, was_not_already_unpacked}, unpack({composite, EDDSA, 1}, Offset, TXRoot, Packed5, byte_size(Chunk), PackingState, internal)), ?assertEqual({ok, Chunk, was_not_already_unpacked}, unpack({composite, ECDSA, 2}, Offset, TXRoot, Packed6, byte_size(Chunk), PackingState, internal)), ?assertEqual({ok, Chunk, was_not_already_unpacked}, unpack({composite, EDDSA, 2}, Offset, TXRoot, Packed7, byte_size(Chunk), PackingState, internal)), [Packed, Packed2, Packed3, Packed4, Packed5, Packed6, Packed7] end, Cases )), ?assertEqual(length(PackedList), sets:size(sets:from_list(PackedList))). ================================================ FILE: apps/arweave/src/ar_packing_sup.erl ================================================ -module(ar_packing_sup). -behaviour(supervisor). -export([start_link/0]). -export([init/1]). -include_lib("arweave/include/ar_sup.hrl"). %%%=================================================================== %%% Public interface. %%%=================================================================== start_link() -> supervisor:start_link({local, ?MODULE}, ?MODULE, []). %% =================================================================== %% Supervisor callbacks. %% =================================================================== init([]) -> ets:new(ar_packing_server, [set, public, named_table]), ets:new(ar_entropy_cache, [set, public, named_table]), ets:new(ar_entropy_cache_ordered_keys, [ordered_set, public, named_table]), {ok, {{one_for_one, 5, 10}, [ ?CHILD(ar_packing_server, worker) ]}}. ================================================ FILE: apps/arweave/src/ar_patricia_tree.erl ================================================ %%% @doc An implementation of a tree closely resembling a merkle patricia tree. -module(ar_patricia_tree). -export([new/0, insert/3, get/2, size/1, compute_hash/2, foldr/3, is_empty/1, from_proplist/1, delete/2, get_range/2, get_range/3]). -include_lib("eunit/include/eunit.hrl"). %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Return a new tree. new() -> #{ root => {no_parent, gb_sets:new(), no_hash, no_prefix, no_value}, size => 0 }. %% @doc Insert the given value under the given binary key. insert(Key, Value, Tree) when is_binary(Key) -> insert(Key, Value, Tree, 1, root). %% @doc Get the value stored under the given key or not_found. get(Key, Tree) when is_binary(Key) -> case get(Key, Tree, 1) of {_, {_, _, _, _, {v, Value}}} -> Value; not_found -> not_found end; get(_Key, _Tree) -> not_found. %% @doc Return the number of values in the tree. size(Tree) -> maps:get(size, Tree). %% @doc Compute the root hash by recursively hashing the tree values. %% Each key value pair is hashed via the provided hash function. The hashes of the siblings %% are combined using ar_deep_hash:hash/1. The keys are traversed in the alphabetical order. compute_hash(#{ size := 0 } = Tree, _HashFun) -> {<<>>, Tree, #{}}; compute_hash(Tree, HashFun) -> compute_hash(Tree, HashFun, root, #{}). %% @doc Traverse the keys in the reversed alphabetical order iteratively applying %% the given function of a key, a value, and an accumulator. foldr(Fun, Acc, Tree) -> case is_empty(Tree) of true -> Acc; false -> foldr(Fun, Acc, Tree, root) end. %% @doc Return true if the tree stores no values. is_empty(Tree) -> maps:get(size, Tree) == 0. %% @doc Create a tree from the given list of {Key, Value} pairs. from_proplist(Proplist) -> lists:foldl( fun({Key, Value}, Acc) -> ar_patricia_tree:insert(Key, Value, Acc) end, new(), Proplist ). %% @doc Delete the given key. delete(Key, Tree) -> delete(Key, Tree, 1). %% @doc Return the list of up to Count key-value tuples collected by traversing the keys %% in the alphabetical order. The keys in the returned list are sorted in the descending order. get_range(Count, Tree) -> Iterator = iterator(Tree), get_range(Iterator, Count, 0, []). %% @doc Return the list of up to Count key-value tuples collected by traversing the keys %% in the alphabetical order starting from the Start key. The keys in the returned list %% are sorted in the descending order. If Start is not a key or Count is not positive, %% return an empty list. get_range(Start, Count, Tree) when is_binary(Start) -> Iterator = iterator_from(Start, Tree), get_range(Iterator, Count, 0, []); get_range(_, _, _) -> []. %%%=================================================================== %%% Private functions. %%%=================================================================== insert(Key, Value, Tree, Level, Parent) -> {KeyPrefix, KeySuffix} = split_by_pos(Key, Level), case maps:get(KeyPrefix, Tree, not_found) of {NodeParent, NodeChildren, NodeHash, NodeSuffix, NodeValue} -> {Common, KeySuffix2, NodeSuffix2} = join(KeySuffix, NodeSuffix), case {KeySuffix == NodeSuffix, Common == KeySuffix, Common == NodeSuffix} of {true, _, _} -> Size = maps:get(size, Tree), Size2 = case NodeValue of no_value -> Size + 1; _ -> Size end, UpdatedNode = {NodeParent, NodeChildren, no_hash, NodeSuffix, {v, Value}}, invalidate_hash(NodeParent, Tree#{ KeyPrefix => UpdatedNode, size => Size2 }); {_, _, true} when KeySuffix > NodeSuffix -> insert(Key, Value, Tree, Level + byte_size(NodeSuffix) + 1, KeyPrefix); {_, true, _} when KeySuffix < NodeSuffix -> {Head, NodeSuffix3} = strip_head(NodeSuffix2), UpdatedNodeKey = << KeyPrefix/binary, Common/binary, Head/binary >>, PivotChildren = gb_sets:from_list([UpdatedNodeKey]), PivotNode = {NodeParent, PivotChildren, no_hash, KeySuffix, {v, Value}}, UpdatedNode = {KeyPrefix, NodeChildren, NodeHash, NodeSuffix3, NodeValue}, Size = maps:get(size, Tree), Tree2 = Tree#{ KeyPrefix => PivotNode, UpdatedNodeKey => UpdatedNode, size => Size + 1 }, Tree3 = update_children_parent(UpdatedNodeKey, NodeChildren, Tree2), invalidate_hash(NodeParent, Tree3); {false, false, false} -> {KeyHead, KeySuffix3} = strip_head(KeySuffix2), NewNodeKey = << KeyPrefix/binary, Common/binary, KeyHead/binary >>, NewNode = {KeyPrefix, gb_sets:new(), no_hash, KeySuffix3, {v, Value}}, {NodeKeyHead, NodeSuffix3} = strip_head(NodeSuffix2), UpdatedNodeKey = << KeyPrefix/binary, Common/binary, NodeKeyHead/binary >>, UpdatedNode = {KeyPrefix, NodeChildren, NodeHash, NodeSuffix3, NodeValue}, PivotChildren = gb_sets:from_list([NewNodeKey, UpdatedNodeKey]), PivotNode = {NodeParent, PivotChildren, no_hash, Common, no_value}, Size = maps:get(size, Tree), Tree2 = Tree#{ NewNodeKey => NewNode, UpdatedNodeKey => UpdatedNode, KeyPrefix => PivotNode, size => Size + 1 }, Tree3 = update_children_parent(UpdatedNodeKey, NodeChildren, Tree2), invalidate_hash(NodeParent, Tree3) end; not_found -> NewNode = {Parent, gb_sets:new(), no_hash, KeySuffix, {v, Value}}, {NextParent, Children, _Hash, NextSuffix, ParentValue} = maps:get(Parent, Tree), UpdatedChildren = gb_sets:insert(KeyPrefix, Children), Size = maps:get(size, Tree), Tree2 = Tree#{ KeyPrefix => NewNode, Parent => {NextParent, UpdatedChildren, no_hash, NextSuffix, ParentValue}, size => Size + 1 }, invalidate_hash(NextParent, Tree2) end. split_by_pos(<<>>, _Pos) -> {<<>>, <<>>}; split_by_pos(Binary, Pos) -> {binary:part(Binary, {0, Pos}), binary:part(Binary, {Pos, byte_size(Binary) - Pos})}. join(Binary1, Binary2) -> %% Return the longest common prefix and the diverged suffixes of the two binaries. PrefixLen = binary:longest_common_prefix([Binary1, Binary2]), Prefix = binary:part(Binary1, {0, PrefixLen}), Suffix1 = binary:part(Binary1, {PrefixLen, byte_size(Binary1) - PrefixLen}), Suffix2 = binary:part(Binary2, {PrefixLen, byte_size(Binary2) - PrefixLen}), {Prefix, Suffix1, Suffix2}. update_children_parent(Key, Children, Tree) -> gb_sets:fold( fun(ChildKey, Acc) -> {_, C, H, S, V} = maps:get(ChildKey, Acc), ChildNode2 = {Key, C, H, S, V}, Acc#{ ChildKey => ChildNode2 } end, Tree, Children ). invalidate_hash(no_parent, Tree) -> Tree; invalidate_hash(Key, Tree) -> {Parent, Children, _Hash, Suffix, Value} = maps:get(Key, Tree), InvalidatedHashNode = {Parent, Children, no_hash, Suffix, Value}, invalidate_hash(Parent, Tree#{ Key => InvalidatedHashNode }). strip_head(Binary) -> {binary:part(Binary, {0, 1}), binary:part(Binary, {1, byte_size(Binary) - 1})}. get(Key, Tree, Level) -> {KeyPrefix, KeySuffix} = split_by_pos(Key, Level), case maps:get(KeyPrefix, Tree, not_found) of not_found -> not_found; {_, _, _, Suffix, MaybeValue} = NodeData -> Len = binary:longest_common_prefix([KeySuffix, Suffix]), SuffixSize = byte_size(Suffix), case Len < SuffixSize of true -> not_found; false -> case KeySuffix == Suffix of false -> get(Key, Tree, Level + SuffixSize + 1); true -> case MaybeValue of no_value -> not_found; {v, _Value} -> {KeyPrefix, NodeData} end end end end. compute_hash(Tree, HashFun, KeyPrefix, UpdateMap) -> {Parent, Children, Hash, Suffix, MaybeValue} = maps:get(KeyPrefix, Tree), case Hash of no_hash -> case gb_sets:is_empty(Children) of true -> {v, Value} = MaybeValue, Key = << KeyPrefix/binary, Suffix/binary >>, NewHash = HashFun(Key, Value), NewTree = Tree#{ KeyPrefix => {Parent, gb_sets:new(), NewHash, Suffix, {v, Value}} }, UpdateMap2 = maps:put({NewHash, KeyPrefix}, {Key, Value}, UpdateMap), {NewHash, NewTree, UpdateMap2}; false -> {Hashes, UpdatedTree, UpdateMap2} = gb_sets_foldr( fun(Child, {HashesAcc, TreeAcc, UpdateMapAcc}) -> {ChildHash, TreeAcc2, UpdateMapAcc2} = compute_hash(TreeAcc, HashFun, Child, UpdateMapAcc), {[{ChildHash, Child} | HashesAcc], TreeAcc2, UpdateMapAcc2} end, {[], Tree, UpdateMap}, Children ), {NewHash, UpdateMap3} = case MaybeValue of {v, Value} -> Key = << KeyPrefix/binary, Suffix/binary >>, NewHash2 = HashFun(Key, Value), Hashes2 = [H || {H, _} <- Hashes], NewHash3 = ar_deep_hash:hash([NewHash2 | Hashes2]), {NewHash3, UpdateMap2#{ {NewHash2, KeyPrefix} => {Key, Value}, {NewHash3, KeyPrefix} => [{NewHash2, KeyPrefix} | Hashes] }}; no_value -> case Hashes of [{SingleHash, _}] -> {SingleHash, UpdateMap2#{ {SingleHash, KeyPrefix} => Hashes }}; _ -> Hashes2 = [H || {H, _} <- Hashes], NewHash2 = ar_deep_hash:hash(Hashes2), {NewHash2, UpdateMap2#{ {NewHash2, KeyPrefix} => Hashes }} end end, {NewHash, UpdatedTree#{ KeyPrefix => {Parent, Children, NewHash, Suffix, MaybeValue} }, UpdateMap3} end; _ -> {Hash, Tree, UpdateMap} end. foldr(Fun, Acc, Tree, KeyPrefix) -> {_, Children, _, Suffix, MaybeValue} = maps:get(KeyPrefix, Tree), case gb_sets:is_empty(Children) of true -> {v, Value} = MaybeValue, Key = << KeyPrefix/binary, Suffix/binary >>, Fun(Key, Value, Acc); false -> Acc2 = gb_sets_foldr( fun(Child, ChildrenAcc) -> foldr(Fun, ChildrenAcc, Tree, Child) end, Acc, Children ), case MaybeValue of {v, Value} -> Key = << KeyPrefix/binary, Suffix/binary >>, Fun(Key, Value, Acc2); _ -> Acc2 end end. gb_sets_foldr(Fun, Acc, G) -> case gb_sets:is_empty(G) of true -> Acc; false -> {Largest, G2} = gb_sets:take_largest(G), gb_sets_foldr(Fun, Fun(Largest, Acc), G2) end. delete(Key, Tree, Level) -> {KeyPrefix, KeySuffix} = split_by_pos(Key, Level), case maps:get(KeyPrefix, Tree, not_found) of not_found -> Tree; {Parent, Children, _Hash, Suffix, MaybeValue} -> Len = binary:longest_common_prefix([KeySuffix, Suffix]), SuffixSize = byte_size(Suffix), case Len < SuffixSize of true -> Tree; false -> case KeySuffix == Suffix of false -> delete(Key, Tree, Level + SuffixSize + 1); true -> case MaybeValue of no_value -> Tree; _ -> Size = maps:get(size, Tree), Tree2 = Tree#{ size => Size - 1 }, case gb_sets:is_empty(Children) of true -> delete2(KeyPrefix, Parent, Tree2); false -> Node2 = {Parent, Children, no_hash, Suffix, no_value}, invalidate_hash(Parent, Tree2#{ KeyPrefix => Node2 }) end end end end end. delete2(Key, Parent, Tree) -> Tree2 = maps:remove(Key, Tree), {ParentParent, ParentChildren, _Hash, Suffix, ParentValue} = maps:get(Parent, Tree), ParentChildren2 = gb_sets:del_element(Key, ParentChildren), Tree3 = Tree2#{ Parent => {ParentParent, ParentChildren2, no_hash, Suffix, ParentValue} }, case {Parent == root, gb_sets:is_empty(ParentChildren2), ParentValue} of {false, true, no_value} -> delete2(Parent, ParentParent, Tree3); _ -> invalidate_hash(ParentParent, Tree3) end. iterator(Tree) -> iterator(Tree, root). iterator(Tree, Key) -> {_, Children, _, _, MaybeValue} = NodeData = maps:get(Key, Tree), case MaybeValue of {v, _Value} -> {{Key, NodeData}, Tree}; no_value -> case gb_sets:is_empty(Children) of true -> none; false -> iterator(Tree, gb_sets:smallest(Children)) end end. iterator_from(Start, Tree) -> case get(Start, Tree, 1) of not_found -> none; {Prefix, NodeData} -> {{Prefix, NodeData}, Tree} end. get_range(_Iterator, Count, Count, List) -> List; get_range(Iterator, Count, Got, List) -> case next(Iterator) of none -> List; {{Key, Value}, UpdatedIterator} -> get_range(UpdatedIterator, Count, Got + 1, [{Key, Value} | List]) end. next({{Prefix, {Parent, Children, _Hash, Suffix, {v, Value}}}, Tree}) -> Key = << Prefix/binary, Suffix/binary >>, {{Key, Value}, get_next_start_from_children(Prefix, Parent, Children, Tree)}; next(none) -> none. get_next_start_from_children(Key, Parent, Children, Tree) -> NextChild = case gb_sets:is_empty(Children) of true -> none; false -> Child = gb_sets:smallest(Children), {Child, maps:get(Child, Tree)} end, case NextChild of none -> get_next_start_from_sibling(Key, Parent, Tree); _ -> {ChildKey, {_, ChildChildren, _, _, MaybeValue}} = NextChild, case MaybeValue of no_value -> get_next_start_from_children(ChildKey, Key, ChildChildren, Tree); {v, _} -> {NextChild, Tree} end end. get_next_start_from_sibling(root, no_parent, _Tree) -> none; get_next_start_from_sibling(Key, Parent, Tree) -> {ParentParent, Children, _, _, _} = maps:get(Parent, Tree), Iterator = gb_sets:iterator_from(Key, Children), Start = case gb_sets:next(Iterator) of none -> none; {Key, UpdatedIterator} -> gb_sets:next(UpdatedIterator); Next -> Next end, case Start of none -> get_next_start_from_sibling(Parent, ParentParent, Tree); {NextSiblingKey, _} -> NextSibling = maps:get(NextSiblingKey, Tree), {_, NextSiblingChildren, _, _, MaybeValue} = NextSibling, case MaybeValue of no_value -> get_next_start_from_children(NextSiblingKey, Key, NextSiblingChildren, Tree); {v, _} -> {{NextSiblingKey, NextSibling}, Tree} end end. %%%=================================================================== %%% Tests. %%%=================================================================== trie_test() -> T1 = new(), ?assertEqual(not_found, get(<<"aaa">>, T1)), ?assertEqual(true, is_empty(T1)), HashFun = fun(K, V) -> crypto:hash(sha256, << K/binary, (term_to_binary(V))/binary >>) end, ?assertEqual(<<>>, element(1, compute_hash(T1, HashFun))), ?assertEqual(true, is_empty(delete(<<"a">>, T1))), ?assertEqual(0, ar_patricia_tree:size(T1)), ?assertEqual([], get_range(1, T1)), ?assertEqual([], get_range(<<>>, 1, T1)), ?assertEqual([], get_range(0, T1)), ?assertEqual([], get_range(<<>>, 0, T1)), ?assertEqual([], get_range(<<"aaa">>, 10, T1)), %% a -> a -> 1 %% b -> 1 T1_2 = insert(<<"ab">>, 1, insert(<<"aa">>, 1, T1)), ?assertEqual(not_found, get(<<"a">>, T1_2)), T1_3 = delete(<<"ab">>, delete(<<"aa">>, T1_2)), ?assertEqual(true, is_empty(T1_3)), ?assertEqual(<<>>, element(1, compute_hash(T1_3, HashFun))), ?assertEqual(true, is_empty(delete(<<"a">>, T1_3))), ?assertEqual(not_found, get(<<"a">>, T1_3)), %% aaa -> 1 T2 = insert(<<"aaa">>, 1, T1_3), ?assertEqual(false, is_empty(T2)), ?assertEqual(1, ar_patricia_tree:size(T2)), {H2, T2_2, _} = compute_hash(T2, HashFun), {H2_2, _, _} = compute_hash(T2_2, HashFun), ?assertEqual(H2, H2_2), ?assertEqual(1, get(<<"aaa">>, T2)), ?assertEqual([], get_range(<<>>, 1, T2)), ?assertEqual([{<<"aaa">>, 1}], get_range(1, T2)), ?assertEqual([{<<"aaa">>, 1}], get_range(<<"aaa">>, 1, T2)), %% aa -> a -> 1 %% b -> 2 T3 = insert(<<"aab">>, 2, T2), ?assertEqual(2, ar_patricia_tree:size(T3)), {H3, _, _} = compute_hash(T3, HashFun), ?assertNotEqual(H2, H3), {H3_2, _, _} = compute_hash(insert(<<"aaa">>, 1, insert(<<"aab">>, 2, new())), HashFun), ?assertEqual(H3, H3_2), {H3_3, _, _} = compute_hash( insert(<<"aaa">>, 1, insert(<<"aab">>, 2, insert(<<"a">>, 3, new()))), HashFun ), {H3_4, _, _} = compute_hash(insert(<<"a">>, 3, T3), HashFun), ?assertEqual(H3_3, H3_4), ?assertEqual(1, get(<<"aaa">>, T3)), ?assertEqual(2, get(<<"aab">>, T3)), ?assertEqual([{<<"aaa">>, 1}], get_range(<<"aaa">>, 1, T3)), ?assertEqual([{<<"aaa">>, 1}], get_range(1, T3)), ?assertEqual([{<<"aab">>, 2}, {<<"aaa">>, 1}], get_range(<<"aaa">>, 2, T3)), ?assertEqual([{<<"aab">>, 2}, {<<"aaa">>, 1}], get_range(2, T3)), ?assertEqual([{<<"aab">>, 2}, {<<"aaa">>, 1}], get_range(<<"aaa">>, 20, T3)), ?assertEqual([{<<"aab">>, 2}, {<<"aaa">>, 1}], get_range(20, T3)), ?assertEqual([], get_range(<<"a">>, 2, T3)), ?assertEqual([], get_range(<<"aa">>, 2, T3)), ?assertEqual([{<<"aab">>, 2}], get_range(<<"aab">>, 2, T3)), ?assertEqual([], get_range(<<"aac">>, 2, T3)), ?assertEqual([], get_range(<<"b">>, 2, T3)), T4 = insert(<<"aab">>, 3, T3), ?assertEqual(2, ar_patricia_tree:size(T4)), {H4, _, _} = compute_hash(T4, HashFun), ?assertNotEqual(H3, H4), ?assertEqual(1, get(<<"aaa">>, T4)), ?assertEqual(3, get(<<"aab">>, T4)), %% a -> a -> a -> 1 %% b -> 3 %% b -> 2 T5 = insert(<<"ab">>, 2, T4), ?assertEqual(3, ar_patricia_tree:size(T5)), ?assertEqual(1, gb_sets:size(element(2, maps:get(root, T5)))), {H5, _, _} = compute_hash(T5, HashFun), ?assertNotEqual(H4, H5), {H5_2, _, _} = compute_hash( insert(<<"aab">>, 3, insert(<<"aaa">>, 1, insert(<<"ab">>, 2, new()))), HashFun ), ?assertEqual(H5, H5_2), {_H5_3, T5_2, _} = compute_hash(insert(<<"aaa">>, 1, new()), HashFun), {_H5_4, T5_3, _} = compute_hash(insert(<<"ab">>, 2, T5_2), HashFun), {H5_5, _T5_4, _} = compute_hash(insert(<<"aab">>, 3, T5_3), HashFun), ?assertEqual(H5, H5_5), ?assertEqual(1, get(<<"aaa">>, T5)), ?assertEqual(3, get(<<"aab">>, T5)), ?assertEqual(2, get(<<"ab">>, T5)), ?assertEqual([{<<"ab">>, 2}, {<<"aab">>, 3}], get_range(<<"aab">>, 20, T5)), ?assertEqual([{<<"aab">>, 3}, {<<"aaa">>, 1}], get_range(2, T5)), %% a -> a -> a -> 1 %% b -> 3 %% b -> 2 %% c -> 4 T6 = insert(<<"abc">>, 4, T5), ?assertEqual(4, ar_patricia_tree:size(T6)), ?assertEqual(1, gb_sets:size(element(2, maps:get(root, T6)))), ?assertEqual(1, get(<<"aaa">>, T6)), ?assertEqual(3, get(<<"aab">>, T6)), ?assertEqual(2, get(<<"ab">>, T6)), ?assertEqual(4, get(<<"abc">>, T6)), ?assertEqual([{<<"abc">>, 4}, {<<"ab">>, 2}, {<<"aab">>, 3}], get_range(<<"aab">>, 20, T6)), ?assertEqual([{<<"abc">>, 4}], get_range(<<"abc">>, 20, T6)), ?assertEqual([{<<"abc">>, 4}, {<<"ab">>, 2}], get_range(<<"ab">>, 20, T6)), ?assertEqual( [{<<"abc">>, 4}, {<<"ab">>, 2}, {<<"aab">>, 3}, {<<"aaa">>, 1}], get_range(20, T6) ), %% a -> a -> a -> 1 %% b -> 3 %% b -> 2 %% c -> 4 %% bcdefj -> 4 T7 = insert(<<"bcdefj">>, 4, T6), ?assertEqual(5, ar_patricia_tree:size(T7)), ?assertEqual(2, gb_sets:size(element(2, maps:get(root, T7)))), ?assertEqual(1, get(<<"aaa">>, T7)), ?assertEqual(3, get(<<"aab">>, T7)), ?assertEqual(4, get(<<"abc">>, T7)), ?assertEqual(4, get(<<"bcdefj">>, T7)), ?assertEqual([{<<"bcdefj">>, 4}, {<<"abc">>, 4}, {<<"ab">>, 2}], get_range(<<"ab">>, 3, T7)), ?assertEqual([], get_range(0, T7)), %% a -> a -> a -> 1 %% b -> 3 %% b -> 2 %% c -> 4 %% bcd -> efj -> 4 %% bcd -> 5 T8 = insert(<<"bcdbcd">>, 5, T7), ?assertEqual(6, ar_patricia_tree:size(T8)), ?assertEqual(4, get(<<"bcdefj">>, T8)), ?assertEqual(5, get(<<"bcdbcd">>, T8)), T9 = insert(<<"bcdbcd">>, 6, T8), ?assertEqual(6, ar_patricia_tree:size(T9)), ?assertEqual(4, get(<<"bcdefj">>, T9)), ?assertEqual(6, get(<<"bcdbcd">>, T9)), %% a -> a -> a -> 1 %% b -> 3 %% b -> 2 %% c -> 4 %% bab -> 7 %% bcd -> efj -> 4 %% bcd -> 6 T10 = insert(<<"bab">>, 7, T9), ?assertEqual(7, ar_patricia_tree:size(T10)), ?assertEqual(1, get(<<"aaa">>, T10)), ?assertEqual(3, get(<<"aab">>, T10)), ?assertEqual(4, get(<<"abc">>, T10)), ?assertEqual(4, get(<<"bcdefj">>, T10)), ?assertEqual(6, get(<<"bcdbcd">>, T10)), ?assertEqual(7, get(<<"bab">>, T10)), ?assertEqual( [ {<<"aaa">>, 1}, {<<"aab">>, 3}, {<<"ab">>, 2}, {<<"abc">>, 4}, {<<"bab">>, 7}, {<<"bcdbcd">>, 6}, {<<"bcdefj">>, 4} ], foldr(fun(K, V, Acc) -> [{K, V} | Acc] end, [], T10) ), ?assertEqual( [ {<<"bcdefj">>, 4}, {<<"bcdbcd">>, 6}, {<<"bab">>, 7}, {<<"abc">>, 4}, {<<"ab">>, 2}, {<<"aab">>, 3}, {<<"aaa">>, 1} ], get_range(<<"aaa">>, 20, T10) ), ?assertEqual( [ {<<"bcdefj">>, 4}, {<<"bcdbcd">>, 6}, {<<"bab">>, 7}, {<<"abc">>, 4}, {<<"ab">>, 2}, {<<"aab">>, 3}, {<<"aaa">>, 1} ], get_range(7, T10) ), {H10, _, _} = compute_hash(T10, HashFun), {H10_1, _, _} = compute_hash( insert( <<"ab">>, 2, insert(<<"abc">>, 4, insert(<<"aab">>, 3, insert(<<"aaa">>, 1, new())))), HashFun ), {H10_2, _, _} = compute_hash( insert(<<"bcdefj">>, 4, insert(<<"bab">>, 7, insert(<<"bcdbcd">>, 6, new()))), HashFun ), ?assertEqual(H10, ar_deep_hash:hash([H10_1, H10_2])), {H10_2_1, _, _} = compute_hash(insert(<<"bab">>, 7, new()), HashFun), {H10_2_2, _, _} = compute_hash(insert(<<"bcdbcd">>, 6, insert(<<"bcdefj">>, 4, new())), HashFun), ?assertEqual(H10_2, ar_deep_hash:hash([H10_2_1, H10_2_2])), ?assertNotEqual(H10, element(1, compute_hash(delete(<<"ab">>, T10), HashFun))), %% a -> a -> a -> 1 %% b -> 3 %% b -> 2 %% c -> 4 %% b -> a -> b -> 7 %% a -> a -> 8 %% bcd -> efj -> 4 %% bcd -> 6 T11 = insert(<<"baa">>, 8, T10), ?assertEqual(8, ar_patricia_tree:size(T11)), ?assertEqual(1, get(<<"aaa">>, T11)), ?assertEqual(3, get(<<"aab">>, T11)), ?assertEqual(4, get(<<"abc">>, T11)), ?assertEqual(4, get(<<"bcdefj">>, T11)), ?assertEqual(6, get(<<"bcdbcd">>, T11)), ?assertEqual(7, get(<<"bab">>, T11)), ?assertEqual(8, get(<<"baa">>, T11)), %% a -> a -> a -> 1 %% b -> 3 %% b -> 2 %% c -> 4 %% b -> a -> b -> 7 %% a -> a -> 8 %% bcd -> efj -> 4 %% bcd -> 6 %% <<>> -> empty T12 = insert(<<>>, empty, T11), ?assertEqual(9, ar_patricia_tree:size(T12)), ?assertEqual(1, get(<<"aaa">>, T12)), ?assertEqual(3, get(<<"aab">>, T12)), ?assertEqual(4, get(<<"abc">>, T12)), ?assertEqual(4, get(<<"bcdefj">>, T12)), ?assertEqual(6, get(<<"bcdbcd">>, T12)), ?assertEqual(7, get(<<"bab">>, T12)), ?assertEqual(8, get(<<"baa">>, T12)), ?assertEqual(empty, get(<<>>, T12)), ?assertEqual( [ {<<>>, empty}, {<<"aaa">>, 1}, {<<"aab">>, 3}, {<<"ab">>, 2}, {<<"abc">>, 4}, {<<"baa">>, 8}, {<<"bab">>, 7}, {<<"bcdbcd">>, 6}, {<<"bcdefj">>, 4} ], foldr(fun(K, V, Acc) -> [{K, V} | Acc] end, [], T12) ), {H12, _, _} = compute_hash(T12, HashFun), T13 = from_proplist([ {<<"bcdbcd">>, 6}, {<<>>, empty}, {<<"ab">>, 2}, {<<"baa">>, 8}, {<<"aab">>, 3}, {<<"bab">>, 7}, {<<"aaa">>, 1}, {<<"abc">>, 4}, {<<"bcdefj">>, 4} ]), {H13, _, _} = compute_hash(T13, HashFun), ?assertEqual(H12, H13), ?assertEqual(1, get(<<"aaa">>, T13)), ?assertEqual(3, get(<<"aab">>, T13)), ?assertEqual(4, get(<<"abc">>, T13)), ?assertEqual(4, get(<<"bcdefj">>, T13)), ?assertEqual(6, get(<<"bcdbcd">>, T13)), ?assertEqual(7, get(<<"bab">>, T13)), ?assertEqual(8, get(<<"baa">>, T13)), ?assertEqual(empty, get(<<>>, T13)), %% a -> a -> a -> 1 %% b -> 3 %% b -> 2 %% c -> 4 %% b -> a -> b -> 7 %% a -> a -> 8 %% bcd -> efj -> 4 %% bc -> 9 %% d -> 6 %% <<>> -> empty T14 = insert(<<"bcdbc">>, 9, T13), ?assertEqual(10, ar_patricia_tree:size(T14)), ?assertEqual(1, get(<<"aaa">>, T14)), ?assertEqual(3, get(<<"aab">>, T14)), ?assertEqual(4, get(<<"abc">>, T14)), ?assertEqual(4, get(<<"bcdefj">>, T14)), ?assertEqual(6, get(<<"bcdbcd">>, T14)), ?assertEqual(7, get(<<"bab">>, T14)), ?assertEqual(8, get(<<"baa">>, T14)), ?assertEqual(9, get(<<"bcdbc">>, T14)), ?assertEqual(empty, get(<<>>, T14)), T15 = insert(<<"bcdbc">>, 10, T14), ?assertEqual(10, ar_patricia_tree:size(T15)), ?assertEqual(10, get(<<"bcdbc">>, T15)), ?assertEqual(6, get(<<"bcdbcd">>, T15)), %% a -> a -> a -> 1 %% b -> 3 %% b -> 2 %% c -> 4 %% b -> a -> b -> 7 %% bcd -> efj -> 4 %% bc -> 10 %% d -> 6 %% <<>> -> empty {H15, T15_2, _} = compute_hash(T15, HashFun), T16 = delete(<<"baa">>, T15_2), ?assertEqual(1, get(<<"aaa">>, T16)), ?assertEqual(3, get(<<"aab">>, T16)), ?assertEqual(4, get(<<"abc">>, T16)), ?assertEqual(4, get(<<"bcdefj">>, T16)), ?assertEqual(6, get(<<"bcdbcd">>, T16)), ?assertEqual(7, get(<<"bab">>, T16)), ?assertEqual(not_found, get(<<"baa">>, T16)), ?assertEqual(10, get(<<"bcdbc">>, T16)), ?assertEqual(empty, get(<<>>, T16)), {H16, T16_2, _} = compute_hash(T16, HashFun), ?assertNotEqual(H16, H15), %% a -> a -> a -> 1 %% b -> 3 %% b -> 2 %% c -> 4 %% b -> a -> b -> 7 %% bcd -> efj -> 4 %% bc -> 10 %% <<>> -> empty T17 = delete(<<"bcdbcd">>, T16_2), ?assertEqual(1, get(<<"aaa">>, T17)), ?assertEqual(3, get(<<"aab">>, T17)), ?assertEqual(4, get(<<"abc">>, T17)), ?assertEqual(4, get(<<"bcdefj">>, T17)), ?assertEqual(not_found, get(<<"bcdbcd">>, T17)), ?assertEqual(7, get(<<"bab">>, T17)), ?assertEqual(10, get(<<"bcdbc">>, T17)), ?assertEqual(empty, get(<<>>, T17)), {H17, T17_2, _} = compute_hash(T17, HashFun), ?assertNotEqual(H17, H16), %% a -> a -> b -> 3 %% b -> 2 %% c -> 4 %% b -> a -> b -> 7 %% a -> a -> 9 %% bcd -> efj -> 4 %% bc -> 10 %% <<>> -> empty T18 = insert(<<"baa">>, 9, delete(<<"aaa">>, T17_2)), {H18, _, _} = compute_hash(T18, HashFun), ?assertEqual(not_found, get(<<"aaa">>, T18)), ?assertEqual(3, get(<<"aab">>, T18)), ?assertEqual(4, get(<<"abc">>, T18)), ?assertEqual(4, get(<<"bcdefj">>, T18)), ?assertEqual(9, get(<<"baa">>, T18)), ?assertEqual(7, get(<<"bab">>, T18)), ?assertEqual(10, get(<<"bcdbc">>, T18)), ?assertEqual(empty, get(<<>>, T18)), ?assertNotEqual(H18, H17), ?assertEqual([{<<>>, empty}], get_range(<<>>, 1, T18)), ?assertEqual([{<<>>, empty}], get_range(1, T18)), ?assertEqual([{<<"bcdefj">>, 4}, {<<"bcdbc">>, 10}, {<<"bab">>, 7}, {<<"baa">>, 9}, {<<"abc">>, 4}, {<<"ab">>, 2}, {<<"aab">>, 3}, {<<>>, empty}], get_range(<<>>, 20, T18)), ?assertEqual( [ {<<"bcdefj">>, 4}, {<<"bcdbc">>, 10}, {<<"bab">>, 7}, {<<"baa">>, 9}, {<<"abc">>, 4}, {<<"ab">>, 2}, {<<"aab">>, 3}, {<<>>, empty} ], get_range(8, T18) ), T19 = insert(<<"a">>, 11, T18), ?assertEqual(11, get(<<"a">>, T19)), %% a -> 11 %% a -> b -> 3 %% b -> c -> 4 %% b -> a -> b -> 7 %% a -> a -> 9 %% bcd -> efj -> 4 %% bc -> 10 %% <<>> -> empty T20 = delete(<<"ab">>, T19), ?assertEqual(not_found, get(<<"ab">>, T20)), ?assertEqual(11, get(<<"a">>, T20)), ?assertEqual(3, get(<<"aab">>, T20)), ?assertEqual(4, get(<<"abc">>, T20)), ?assertEqual(4, get(<<"bcdefj">>, T20)), ?assertEqual(9, get(<<"baa">>, T20)), ?assertEqual(7, get(<<"bab">>, T20)), ?assertEqual(10, get(<<"bcdbc">>, T20)), ?assertEqual(empty, get(<<>>, T20)), ?assertEqual(8, ar_patricia_tree:size(T20)), %% abc -> 1 %% def -> 1 T21 = delete(<<"def">>, insert(<<"def">>, 1, insert(<<"abc">>, 1, new()))), ?assertEqual(not_found, get(<<"def">>, T21)), ?assertNotEqual( element(1, compute_hash(insert(<<"aab">>, 1, insert(<<"aaa">>, 1, insert(<<"a">>, 2, new()))), HashFun)), element(1, compute_hash(insert(<<"aab">>, 1, insert(<<"aaa">>, 1, insert(<<"aa">>, 2, new()))), HashFun)) ). stochastic_test() -> lists:foreach( fun(_Case) -> KeyValues = random_key_values(3), lists:foldl( %% Assert all the permutations of the order of insertion of elements %% produce the tree with the same root hash. Assert that each of the %% elements removed from the tree after each permutation produces the tree %% with the same root hash as the trees produced by building up the tree %% without this element. fun(Permutation, Acc) -> Tree = from_proplist(Permutation), Map = maps:from_list(Permutation), compare_with_map(Tree, Map), SHA256Fun = fun(K, V) -> crypto:hash(sha256, << K/binary, (term_to_binary(V))/binary >>) end, lists:foreach( fun({K, V}) -> Tree1 = delete(K, Tree), M = maps:remove(K, Map), compare_with_map(Tree1, M), {H1, _, _} = compute_hash(Tree1, SHA256Fun), Tree2 = from_proplist(Permutation -- [{K, V}]), {H2, _, _} = compute_hash(Tree2, SHA256Fun), ?assertEqual(H1, H2, [{tree1, Tree1}, {tree2, Tree2}]) end, Permutation ), {H, _, _} = compute_hash(Tree, SHA256Fun), case Acc of start -> do_not_assert; _ -> ?assertEqual(H, Acc) end, Acc end, start, permutations(KeyValues) ) end, lists:seq(1, 1000) ). random_key_values(N) -> lists:foldl( fun(_, Acc) -> [{crypto:strong_rand_bytes(5), crypto:strong_rand_bytes(30)} | Acc] end, [], lists:seq(1, N) ). compare_with_map(Tree, Map) -> ?assertEqual(map_size(Map), ar_patricia_tree:size(Tree)), maps:map( fun(Key, Value) -> ?assertEqual(Value, get(Key, Tree)) end, Map ). permutations([]) -> [[]]; permutations(L) -> [[KV | T] || KV <- L, T <- permutations(L -- [KV])]. ================================================ FILE: apps/arweave/src/ar_peer_intervals.erl ================================================ -module(ar_peer_intervals). -export([fetch/5]). -include_lib("arweave_config/include/arweave_config.hrl"). -include("ar.hrl"). -include("ar_data_discovery.hrl"). -ifdef(AR_TEST). -include_lib("eunit/include/eunit.hrl"). -endif. %% The size of the span of the weave we search at a time. %% By searching we mean asking peers about the intervals they have in the given span %% and finding the intersection with the unsynced intervals. -ifdef(AR_TEST). -define(QUERY_RANGE_STEP_SIZE, 10_000_000). % 10 MB -else. -define(QUERY_RANGE_STEP_SIZE, 1_000_000_000). % 1 GB -endif. %% Fetch at most this many sync intervals from a peer at a time. -ifdef(AR_TEST). -define(QUERY_SYNC_INTERVALS_COUNT_LIMIT, 10). -else. -define(QUERY_SYNC_INTERVALS_COUNT_LIMIT, 1000). -endif. %% The number of peers to fetch sync intervals from in parallel at a time. -define(GET_SYNC_RECORD_BATCH_SIZE, 2). -define(GET_SYNC_RECORD_COOLDOWN_MS, 60 * 1000). -define(GET_SYNC_RECORD_RPM_KEY, data_sync_record). -define(GET_FOOTPRINT_RECORD_RPM_KEY, footprints). -define(GET_FOOTPRINT_RECORD_COOLDOWN_MS, 60 * 1000). -define(GET_SYNC_RECORD_PATH, [<<"data_sync_record">>]). -define(GET_FOOTPRINT_RECORD_PATH, [<<"footprints">>]). %%%=================================================================== %%% Public interface. %%%=================================================================== fetch(Offset, Start, End, StoreID, Type) when Offset >= End -> ?LOG_DEBUG([{event, fetch_peer_intervals_end}, {store_id, StoreID}, {offset, Offset}, {range_start, Start}, {range_end, End}, {type, Type}]), gen_server:cast(ar_data_sync:name(StoreID), {collect_peer_intervals, Offset, Start, End, Type}); fetch(Offset, Start, End, StoreID, Type) -> Parent = ar_data_sync:name(StoreID), spawn_link(fun() -> case do_fetch(Offset, Start, End, StoreID, Type) of {End2, EnqueueIntervals} -> gen_server:cast(Parent, {enqueue_intervals, EnqueueIntervals}), gen_server:cast(Parent, {collect_peer_intervals, End2, Start, End, Type}); wait -> ar_util:cast_after(1000, Parent, {collect_peer_intervals, Offset, Start, End, Type}) end end). do_fetch(Offset, Start, End, StoreID, normal) -> Parent = ar_data_sync:name(StoreID), try case get_peers(Offset, normal) of wait -> wait; Peers -> End2 = min(Offset + ?QUERY_RANGE_STEP_SIZE, End), UnsyncedIntervals = get_unsynced_intervals(Offset, End2, StoreID), %% Schedule the next sync bucket. The cast handler logic will pause collection %% if needed. case ar_intervals:is_empty(UnsyncedIntervals) of true -> {End2, []}; false -> {End3, EnqueueIntervals2} = fetch_peer_intervals(Parent, Offset, Peers, UnsyncedIntervals), {min(End2, End3), EnqueueIntervals2} end end catch Class:Reason:Stacktrace -> ?LOG_WARNING([{event, fetch_peers_process_exit}, {store_id, StoreID}, {offset, Offset}, {range_start, Start}, {range_end, End}, {type, normal}, {class, Class}, {reason, Reason}, {stacktrace, Stacktrace}]), {Offset, []} end; do_fetch(Offset, Start, End, StoreID, footprint) -> Parent = ar_data_sync:name(StoreID), try case get_peers(Offset, footprint) of wait -> wait; Peers -> Partition = ar_replica_2_9:get_entropy_partition(Offset + ?DATA_CHUNK_SIZE), Footprint = ar_footprint_record:get_footprint(Offset + ?DATA_CHUNK_SIZE), UnsyncedIntervals = ar_footprint_record:get_unsynced_intervals(Partition, Footprint, StoreID), EnqueueIntervals = case ar_intervals:is_empty(UnsyncedIntervals) of true -> []; false -> fetch_peer_footprint_intervals( Parent, Partition, Footprint, Offset, End, Peers, UnsyncedIntervals) end, Offset2 = get_next_fetch_offset(Offset, Start, End), %% Schedule the next sync bucket. The cast handler logic will pause collection if needed. {Offset2, EnqueueIntervals} end catch Class:Reason:Stacktrace -> ?LOG_WARNING([{event, fetch_footprint_intervals_process_exit}, {store_id, StoreID}, {offset, Offset}, {range_start, Start}, {range_end, End}, {type, footprint}, {class, Class}, {reason, Reason}, {stacktrace, Stacktrace}]), {Offset, []} end. %% @doc Calculate the next fetch start position after processing a sector. %% Advances by one chunk within a sector, or jumps to the next partition boundary %% when near the sector end. get_next_fetch_offset(Offset, Start, End) -> SectorSize = ar_block:get_replica_2_9_entropy_sector_size(), Partition = ar_replica_2_9:get_entropy_partition(Offset + ?DATA_CHUNK_SIZE), {PartitionStart, PartitionEnd} = ar_replica_2_9:get_entropy_partition_range(Partition), SectorStart = max(Start, PartitionStart), SectorEnd = min(PartitionEnd, SectorStart + SectorSize), Offset2 = case Offset + 2 * ?DATA_CHUNK_SIZE > SectorEnd of true -> PartitionEnd; false -> Offset + ?DATA_CHUNK_SIZE end, min(Offset2, End). %%%=================================================================== %%% Private functions. %%%=================================================================== get_peers(Offset, normal) -> Bucket = Offset div ?NETWORK_DATA_BUCKET_SIZE, get_peers2(Bucket, fun(B) -> ar_data_discovery:get_bucket_peers(B) end, ?GET_SYNC_RECORD_RPM_KEY, ?GET_SYNC_RECORD_PATH); get_peers(Offset, footprint) -> FootprintBucket = ar_footprint_record:get_footprint_bucket(Offset + ?DATA_CHUNK_SIZE), get_peers2(FootprintBucket, fun(B) -> ar_data_discovery:get_footprint_bucket_peers(B) end, ?GET_FOOTPRINT_RECORD_RPM_KEY, ?GET_FOOTPRINT_RECORD_PATH). get_peers2(Bucket, GetPeersFun, RPMKey, Path) -> {ok, Config} = arweave_config:get_env(), AllPeers = case Config#config.sync_from_local_peers_only of true -> Config#config.local_peers; false -> GetPeersFun(Bucket) end, HotPeers = [ Peer || Peer <- AllPeers, not ar_rate_limiter:is_on_cooldown(Peer, RPMKey) andalso not ar_rate_limiter:is_throttled(Peer, Path) ], case length(AllPeers) > 0 andalso length(HotPeers) == 0 of true -> % There are peers for this Offset, but they are all on cooldown/throttled, so % we'll give them time to recover. wait; false -> ar_data_discovery:pick_peers(HotPeers, ?QUERY_BEST_PEERS_COUNT) end. %% @doc Collect the unsynced intervals between Start and End excluding the blocklisted %% intervals. get_unsynced_intervals(Start, End, StoreID) -> UnsyncedIntervals = get_unsynced_intervals(Start, End, ar_intervals:new(), StoreID), BlacklistedIntervals = ar_tx_blacklist:get_blacklisted_intervals(Start, End), ar_intervals:outerjoin(BlacklistedIntervals, UnsyncedIntervals). get_unsynced_intervals(Start, End, Intervals, _StoreID) when Start >= End -> Intervals; get_unsynced_intervals(Start, End, Intervals, StoreID) -> case ar_sync_record:get_next_synced_interval(Start, End, ar_data_sync, StoreID) of not_found -> ar_intervals:add(Intervals, End, Start); {End2, Start2} -> case Start2 > Start of true -> End3 = min(Start2, End), get_unsynced_intervals(End2, End, ar_intervals:add(Intervals, End3, Start), StoreID); _ -> get_unsynced_intervals(End2, End, Intervals, StoreID) end end. fetch_peer_intervals(Parent, Start, Peers, UnsyncedIntervals) -> Intervals = ar_util:batch_pmap( fun(Peer) -> case maybe_get_peer_intervals(Peer, Start, UnsyncedIntervals) of {ok, SoughtIntervals, PeerRightBound} -> {Peer, SoughtIntervals, PeerRightBound}; {error, cooldown} -> %% Skipping peer because we hit a 429 and put it on cooldown. ok; {error, Reason} -> ar_http_iface_client:log_failed_request(Reason, [{event, failed_to_fetch_peer_intervals}, {parent, Parent}, {peer, ar_util:format_peer(Peer)}, {reason, io_lib:format("~p", [Reason])}]), ok end end, Peers, ?GET_SYNC_RECORD_BATCH_SIZE, % fetch sync intervals from so many peers at a time %% We'll rely on the timeout to also flag when we are approaching a peer's RPM %% limit. As we approach the limit we will self-throttle the requests. Eventually this %% throttling will exceed 60s and we'll timout the batch_pmap and flag the peer for %% cooldown. 60 * 1000 ), {EnqueueIntervals, MinRightBound} = lists:foldl( fun ({error, batch_pmap_timeout, Peer}, Acc) -> ?LOG_DEBUG([{event, failed_to_fetch_peer_intervals}, {parent, Parent}, {peer, ar_util:format_peer(Peer)}, {reason, batch_pmap_timeout}]), ar_rate_limiter:set_cooldown( Peer, ?GET_SYNC_RECORD_RPM_KEY, ?GET_SYNC_RECORD_COOLDOWN_MS), Acc; ({Peer, SoughtIntervals, RightBound}, {IntervalsAcc, RightBoundAcc}) -> case ar_intervals:is_empty(SoughtIntervals) of true -> {IntervalsAcc, RightBoundAcc}; false -> %% FootprintKey = none for normal syncing {[{Peer, SoughtIntervals, none} | IntervalsAcc], min(RightBound, RightBoundAcc)} end; (ok, Acc) -> Acc; (Error, Acc) -> ar_http_iface_client:log_failed_request(Error, [{event, failed_to_fetch_peer_intervals}, {parent, Parent}, {peer, unknown}, {reason, io_lib:format("~p", [Error])}]), Acc end, {[], infinity}, Intervals ), {MinRightBound, EnqueueIntervals}. %% @doc %% @return {ok, Intervals, PeerRightBound} | Error %% Intervals: the intersection of the intervals we are looking for and the intervals that %% the peer advertised inside the recently queried range %% PeerRightBound: the right bound of the intervals the peer advertised; for example, %% we may ask for at most 100 continuous intervals inside the given gigabyte, %% but the peer may have this region very fractured and 100 intervals will %% not be all intervals covering this gigabyte, so we take the right bound %% to know where to query next maybe_get_peer_intervals(Peer, Left, SoughtIntervals) -> case ar_rate_limiter:is_on_cooldown(Peer, ?GET_SYNC_RECORD_RPM_KEY) of true -> {error, cooldown}; false -> get_peer_intervals(Peer, Left, SoughtIntervals) end. get_peer_intervals(Peer, Left, SoughtIntervals) -> Limit = ?QUERY_SYNC_INTERVALS_COUNT_LIMIT, Right = element(1, ar_intervals:largest(SoughtIntervals)), PeerReply = case ar_peers:get_peer_release(Peer) >= ?GET_SYNC_RECORD_RIGHT_BOUND_SUPPORT_RELEASE of true -> ar_http_iface_client:get_sync_record(Peer, Left + 1, Right, Limit); false -> ar_http_iface_client:get_sync_record(Peer, Left + 1, Limit) end, case PeerReply of {ok, PeerIntervals2} -> PeerRightBound = case ar_intervals:is_empty(PeerIntervals2) of true -> infinity; false -> element(1, ar_intervals:largest(PeerIntervals2)) end, {ok, ar_intervals:intersection(PeerIntervals2, SoughtIntervals), PeerRightBound}; {error, too_many_requests} = Error -> ar_rate_limiter:set_cooldown(Peer, ?GET_SYNC_RECORD_RPM_KEY, ?GET_SYNC_RECORD_COOLDOWN_MS), Error; Error -> Error end. fetch_peer_footprint_intervals(Parent, Partition, Footprint, Start, End, Peers, UnsyncedIntervals) -> Intervals = ar_util:batch_pmap( fun(Peer) -> case maybe_get_peer_footprint_intervals( Peer, Partition, Footprint, UnsyncedIntervals) of {ok, SoughtIntervals} -> {Peer, SoughtIntervals}; {error, cooldown} -> %% Skipping peer because we hit a 429 and put it on cooldown. ok; {error, Reason} -> ?LOG_DEBUG([{event, failed_to_fetch_peer_footprint_intervals}, {parent, Parent}, {peer, ar_util:format_peer(Peer)}, {reason, io_lib:format("~p", [Reason])}]), ok end end, Peers, ?GET_SYNC_RECORD_BATCH_SIZE, % fetch sync intervals from so many peers at a time %% We'll rely on the timeout to also flag when we are approaching a peer's RPM %% limit. As we approach the limit we will self-throttle the requests. Eventually this %% throttling will exceed 60s and we'll timout the batch_pmap and flag the peer for %% cooldown. 60 * 1000 ), EnqueueIntervals = lists:foldl( fun ({error, batch_pmap_timeout, Peer}, Acc) -> ?LOG_DEBUG([{event, failed_to_fetch_peer_footprint_intervals}, {parent, Parent}, {peer, ar_util:format_peer(Peer)}, {reason, batch_pmap_timeout}]), ar_rate_limiter:set_cooldown( Peer, ?GET_FOOTPRINT_RECORD_RPM_KEY, ?GET_FOOTPRINT_RECORD_COOLDOWN_MS), Acc; ({Peer, SoughtIntervals}, IntervalsAcc) -> case ar_intervals:is_empty(SoughtIntervals) of true -> IntervalsAcc; false -> ByteIntervals = cut_peer_footprint_intervals(SoughtIntervals, Start, End), ?LOG_DEBUG([{event, fetch_peer_intervals}, {function, fetch_peer_footprint_intervals}, {peer, ar_util:format_peer(Peer)}, {partition, Partition}, {footprint, Footprint}, {unsynced_intervals, ar_intervals:sum(UnsyncedIntervals)}, {sought_intervals, ar_intervals:sum(SoughtIntervals)}, {intervals, length(Intervals)}, {byte_intervals, ar_intervals:sum(ByteIntervals)}]), FootprintKey = {Partition, Footprint, Peer}, [{Peer, ByteIntervals, FootprintKey} | IntervalsAcc] end; (ok, Acc) -> Acc; (Error, Acc) -> ?LOG_DEBUG([{event, failed_to_fetch_peer_footprint_intervals}, {parent, Parent}, {peer, unknown}, {reason, io_lib:format("~p", [Error])}]), Acc end, [], Intervals ), EnqueueIntervals. maybe_get_peer_footprint_intervals(Peer, Partition, Footprint, SoughtIntervals) -> case ar_rate_limiter:is_on_cooldown(Peer, ?GET_FOOTPRINT_RECORD_RPM_KEY) of true -> {error, cooldown}; false -> get_peer_footprint_intervals(Peer, Partition, Footprint, SoughtIntervals) end. get_peer_footprint_intervals(Peer, Partition, Footprint, SoughtIntervals) -> PeerReply = case ar_peers:get_peer_release(Peer) >= ?GET_FOOTPRINT_SUPPORT_RELEASE of true -> ar_http_iface_client:get_footprints(Peer, Partition, Footprint); false -> %% We expect to get here only if the peer is upgraded and then downgraded again, %% because we check the peer release at the bucket collection stage. not_found end, case PeerReply of {ok, Intervals} -> {ok, ar_intervals:intersection(Intervals, SoughtIntervals)}; not_found -> {ok, ar_intervals:new()}; {error, too_many_requests} = Error -> ar_rate_limiter:set_cooldown(Peer, ?GET_FOOTPRINT_RECORD_RPM_KEY, ?GET_FOOTPRINT_RECORD_COOLDOWN_MS), Error; Error -> Error end. %% @doc The intervals returned by a peer may include intervals beyond the %% storage module boundaries. This is because we end up querying all advertised %% intervals belonging to a footprint that intersects this node's unsynced %% intervals. This can cause this node to try to store a chunk that lies beyond %% its configured storage module range. To avoid this we explicitly remove all %% intervals beyond the provided boundaries. cut_peer_footprint_intervals(FootprintIntervals, Start, End) -> ByteIntervals = ar_footprint_record:get_intervals_from_footprint_intervals(FootprintIntervals), ByteIntervals2 = ar_intervals:cut(ByteIntervals, End), PaddedStart = case ar_block:get_chunk_padded_offset(Start) of Start -> Start; Offset -> Offset - ?DATA_CHUNK_SIZE end, ar_intervals:outerjoin( ar_intervals:from_list([{PaddedStart, -1}]), ByteIntervals2). %%%=================================================================== %%% Tests %%%=================================================================== -ifdef(AR_TEST). cut_peer_footprint_intervals_test() -> ?assertEqual( ar_intervals:from_list([{786432, 524288}, {1310720, 1048576}]), cut_peer_footprint_intervals( ar_intervals:from_list([{4, 0}]), 262144, 1572864), "Full Footprint 0, aligned boundaries"), ?assertEqual( ar_intervals:from_list([{524288,262144}, {1048576,786432}, {1572864,1310720}]), cut_peer_footprint_intervals( ar_intervals:from_list([{8, 4}]), 262144, 1572864), "Full Footprint 1 cut to aligned boundaries"), ?assertEqual( ar_intervals:from_list([ {262144,200000}, {786432, 524288}, {1310720, 1048576}, {1600000,1572864}]), cut_peer_footprint_intervals( ar_intervals:from_list([{4, 0}]), 200000, 1600000), "Full Footprint 0, unaligned boundaries, pre-strict"), ?assertEqual( ar_intervals:from_list([{524288,262144}, {1048576, 786432}, {1572864, 1310720}]), cut_peer_footprint_intervals( ar_intervals:from_list([{8, 4}]), 200000, 1600000), "Full Footprint 1, unaligned boundaries, pre-strict"), ?assertEqual( ar_intervals:from_list([{2883584,2621440}, {3407872,3145728}]), cut_peer_footprint_intervals( ar_intervals:from_list([{12, 8}]), 2400000, 3500000), "Full Footprint 2, unaligned boundaries, post-strict"), ?assertEqual( ar_intervals:from_list([{2621440,2359296}, {3145728,2883584}, {3500000,3407872}]), cut_peer_footprint_intervals( ar_intervals:from_list([{16, 12}]), 2400000, 3500000), "Full Footprint 3, unaligned boundaries, post-strict"), ?assertEqual( ar_intervals:from_list([{2621440,2359296}, {3500000,3407872}]), cut_peer_footprint_intervals( ar_intervals:from_list([{16, 14}, {13, 12}]), 2400000, 3500000), "Partial Footprint 3, unaligned boundaries, post-strict"), ok. %% Tests for get_next_fetch_offset/4 %% 4 binary conditions (shown as debug output 0/1 for each): %% 1. Start > PartitionStart %% 2. PartitionEnd > SectorStart + SectorSize %% 3. Offset + 2*CHUNK > SectorEnd %% 4. Offset2 > End %% Pattern labeled 0-F in hex (e.g., 0101 = 5) %% Note: 0xxx (cond1=0, cond2=0) requires partition < SectorSize, impossible in tests get_next_fetch_offset_test() -> SectorSize = ar_block:get_replica_2_9_entropy_sector_size(), {P0Start, P0End} = ar_replica_2_9:get_entropy_partition_range(0), Chunk = ?DATA_CHUNK_SIZE, ?assertEqual(P0Start + Chunk, get_next_fetch_offset(P0Start, P0Start, P0End), "simple advance"), ?assertEqual(P0Start + 1000, get_next_fetch_offset(P0Start, P0Start, P0Start + 1000), "simple advance, limited by End"), ?assertEqual(P0End, get_next_fetch_offset(P0Start + SectorSize - 1, P0Start, P0End), "jump to PartitionEnd"), ?assertEqual(P0Start + SectorSize, get_next_fetch_offset(P0Start + SectorSize - 1, P0Start, P0Start + SectorSize), "jump to PartitionEnd, limited by End"), Start8 = P0End - SectorSize, ?assertEqual(Start8 + Chunk, get_next_fetch_offset(Start8, Start8, P0End), "simple advance, mid-partition Start"), Start9 = P0End - SectorSize, ?assertEqual(Start9 + 1000, get_next_fetch_offset(Start9, Start9, Start9 + 1000), "simple advance, mid-partition Start, limited by End"), StartA = P0End - SectorSize, ?assertEqual(P0End, get_next_fetch_offset(StartA + Chunk, StartA, P0End), "jump to PartitionEnd, mid-partition Start"), StartB = P0End - SectorSize, SmallEndB = P0End - Chunk, ?assertEqual(SmallEndB, get_next_fetch_offset(StartB + Chunk, StartB, SmallEndB), "jump to PartitionEnd, mid-partition Start, limited by End"), MidStart = P0Start + SectorSize, ?assertEqual(MidStart + Chunk, get_next_fetch_offset(MidStart, MidStart, P0End), "simple advance, mid-partition Start, SectorEnd past PartitionEnd"), ?assertEqual(MidStart + 1000, get_next_fetch_offset(MidStart, MidStart, MidStart + 1000), "simple advance, mid-partition Start, SectorEnd past PartitionEnd, limited by End"), ?assertEqual(P0End, get_next_fetch_offset(MidStart + Chunk, MidStart, P0End), "jump to PartitionEnd, mid-partition Start, SectorEnd past PartitionEnd"), SmallEndF = MidStart + SectorSize, ?assertEqual(SmallEndF, get_next_fetch_offset(MidStart + Chunk, MidStart, SmallEndF), "jump to PartitionEnd, mid-partition Start, SectorEnd past PartitionEnd, limited by End"), ok. -endif. ================================================ FILE: apps/arweave/src/ar_peer_worker.erl ================================================ %%% @doc Per-peer process managing sync task queue, dispatch state, and footprints. %%% %%% Each peer gets its own worker process to isolate state and prevent %%% stale-state bugs from interleaved updates. This process manages: %%% %%% Peer Queue Management: %%% - Maintains a queue of tasks ready to be dispatched for this peer %%% - Tracks dispatched_count (number of tasks currently being processed) %%% - Maintains max_dispatched limit that controls how many tasks can be %%% concurrently dispatched for this peer (adjusted via rebalancing) %%% - If too many requests are made to a peer, it can be blocked or throttled %%% - Peer performance varies over time; dispatch limits adapt to avoid getting %%% "stuck" syncing many chunks from a slow peer %%% %%% Footprint Management: %%% - Tasks with a FootprintKey are grouped by footprint to limit concurrent %%% processing and avoid overloading the entropy cache %%% - Footprints can be active (tasks being processed) or waiting (task_queue for later) %%% - Each peer has a max_footprints limit to prevent overloading entropy cache %%% - When a footprint becomes active, waiting tasks are moved to the peer queue %%% - When all tasks in a footprint complete, it's deactivated and the next %%% waiting footprint may be activated %%% - Long-running footprints are detected and logged per-peer %%% %%% Performance Tracking: %%% - Tracks task completion times and data sizes for performance metrics %%% - Integrates with ar_peers module for peer rating and performance tracking %%% %%% Rebalancing: %%% - Responds to rebalance requests from the coordinator %%% - Adjusts max_dispatched based on peer performance vs target latency %%% - Cuts queue if it exceeds the calculated max_queue size -module(ar_peer_worker). -behaviour(gen_server). %% Lifecycle -export([start_link/1, get_or_start/1, stop/1]). %% Operations (all take Pid as first arg - coordinator caches Peer->Pid mapping) -export([enqueue_task/5, task_completed/5, process_queue/2, get_max_dispatched/1, rebalance/3, try_activate_footprint/1]). -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2]). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave/include/ar_peers.hrl"). -define(MIN_MAX_DISPATCHED, 8). -define(MIN_PEER_QUEUE, 20). -define(CHECK_LONG_RUNNING_FOOTPRINTS_MS, 60000). %% Check every 60 seconds -define(LONG_RUNNING_FOOTPRINT_THRESHOLD_S, 120). -define(IDLE_SHUTDOWN_THRESHOLD_S, 300). %% Shutdown after 5 minutes of no tasks -define(CALL_TIMEOUT_MS, 30000). -record(footprint, { waiting = queue:new(), %% queue of waiting tasks active_count = 0, %% count of active tasks (0 = inactive) activation_time %% monotonic time when activated (undefined if inactive) }). -record(state, { peer, peer_formatted, %% cached ar_util:format_peer(Peer) for metrics task_queue = queue:new(), dispatched_count = 0, waiting_count = 0, max_dispatched = ?MIN_MAX_DISPATCHED, last_task_time, %% monotonic time when last task was received %% Footprint management (coordinator tracks global limits) footprints = #{}, %% FootprintKey => #footprint{} active_footprints = sets:new() %% set of active FootprintKeys }). %%%=================================================================== %%% Public interface. %%%=================================================================== start_link(Peer) -> case gen_server:start_link(?MODULE, [Peer], []) of {ok, Pid} -> ets:insert(?MODULE, {Peer, Pid}), {ok, Pid}; Error -> Error end. %% @doc Lookup a peer worker pid by ETS registry. lookup(Peer) -> case ets:lookup(?MODULE, Peer) of [] -> undefined; [{_, Pid}] -> {ok, Pid} end. %% @doc Get the pid of an existing peer worker or start a new one. get_or_start(Peer) -> case lookup(Peer) of {ok, Pid} -> {ok, Pid}; undefined -> case supervisor:start_child(ar_peer_worker_sup, [Peer]) of {ok, Pid} -> {ok, Pid}; {error, {already_started, Pid}} -> {ok, Pid}; Error -> Error end end. stop(Pid) -> gen_server:stop(Pid). %%%=================================================================== %%% Operations (all take Pid as first argument). %%% Coordinator caches Peer->Pid mapping to avoid lookup overhead. %%%=================================================================== %% @doc Enqueue a task and process the queue synchronously. %% Returns {WasActivated, TasksToDispatch} where: %% - WasActivated: true if a new footprint was just activated, false otherwise %% - TasksToDispatch: list of tasks ready to dispatch %% HasCapacity indicates whether the global footprint limit allows activating new footprints. %% WorkerCount is used to calculate available dispatch slots. enqueue_task(Pid, FootprintKey, Args, HasCapacity, WorkerCount) -> try gen_server:call(Pid, {enqueue_task, FootprintKey, Args, HasCapacity, WorkerCount}, ?CALL_TIMEOUT_MS) catch exit:{timeout, _} -> {false, []}; _:_ -> {false, []} end. %% @doc Try to activate a waiting footprint (called when global capacity becomes available). %% Returns true if a footprint was activated, false otherwise. try_activate_footprint(Pid) -> try gen_server:call(Pid, try_activate_footprint, ?CALL_TIMEOUT_MS) catch exit:{timeout, _} -> false; _:_ -> false end. %% @doc Process the queue and return tasks ready for dispatch. %% Used to drain queued tasks without enqueuing new ones (e.g., after task completion). process_queue(Pid, WorkerCount) -> try gen_server:call(Pid, {process_queue, WorkerCount}, ?CALL_TIMEOUT_MS) catch exit:{timeout, _} -> []; _:_ -> [] end. %% @doc Notify task completed, update footprint accounting and rate data fetched. task_completed(Pid, FootprintKey, Result, ElapsedNative, DataSize) -> gen_server:cast(Pid, {task_completed, FootprintKey, Result, ElapsedNative, DataSize}). %% @doc Get max_dispatched for this peer. get_max_dispatched(Pid) -> try gen_server:call(Pid, get_max_dispatched, ?CALL_TIMEOUT_MS) catch exit:{timeout, _} -> {error, timeout}; _:_ -> {error, error} end. %% @doc Rebalance based on performance and targets. %% Returns RemovedCount (number of tasks cut from queue). rebalance(Pid, Performance, RebalanceParams) -> try gen_server:call(Pid, {rebalance, Performance, RebalanceParams}, ?CALL_TIMEOUT_MS) catch exit:{timeout, _} -> {error, timeout}; _:_ -> {error, timeout} end. %%%=================================================================== %%% gen_server callbacks. %%%=================================================================== init([Peer]) -> %% Schedule periodic check for long-running footprints erlang:send_after(?CHECK_LONG_RUNNING_FOOTPRINTS_MS, self(), check_long_running_footprints), PeerFormatted = ar_util:format_peer(Peer), %% Notify coordinator of our PID (handles restarts updating stale cached PIDs) gen_server:cast(ar_data_sync_coordinator, {peer_worker_started, Peer, self()}), ?LOG_INFO([{event, init}, {module, ?MODULE}, {peer, PeerFormatted}]), {ok, #state{ peer = Peer, peer_formatted = PeerFormatted, last_task_time = erlang:monotonic_time() }}. handle_call(get_max_dispatched, _From, State) -> {reply, State#state.max_dispatched, State}; handle_call(get_state, _From, State) -> %% Test-only: keep for tests to access raw state {reply, {ok, State}, State}; handle_call({enqueue_task, FootprintKey, Args, HasCapacity, WorkerCount}, _From, State) -> State1 = State#state{ last_task_time = erlang:monotonic_time() }, {WasActivated, State2} = do_enqueue_task(FootprintKey, Args, HasCapacity, State1), {TasksToDispatch, State3} = do_process_queue(State2, WorkerCount), {reply, {WasActivated, TasksToDispatch}, State3}; handle_call({process_queue, WorkerCount}, _From, State) -> {TasksToDispatch, State2} = do_process_queue(State, WorkerCount), {reply, TasksToDispatch, State2}; handle_call(try_activate_footprint, _From, State) -> %% Global capacity became available - try to activate a waiting footprint {Activated, State2} = try_activate_waiting_footprint(State), {reply, Activated, State2}; handle_call({rebalance, Performance, RebalanceParams}, _From, State) -> {QueueScalingFactor, TargetLatency, WorkersStarved} = RebalanceParams, #state{ task_queue = Queue, max_dispatched = MaxDispatched, dispatched_count = Dispatched, waiting_count = Waiting, peer_formatted = PeerFormatted, last_task_time = LastTaskTime } = State, %% 1. Cut queue if needed MaxQueueLen = max_queue_length(Performance, QueueScalingFactor), QueueLen = queue:len(Queue), {State2, RemovedCount} = case MaxQueueLen =/= infinity andalso QueueLen > MaxQueueLen of true -> {NewQueued, RemovedQueue} = queue:split(MaxQueueLen, Queue), Removed = queue:len(RemovedQueue), RemovedTasks = queue:to_list(RemovedQueue), increment_metrics(queued_out, State, Removed), S2 = cut_footprint_task_counts(RemovedTasks, State#state{ task_queue = NewQueued }), {S2, Removed}; false -> {State, 0} end, %% 2. Update max_dispatched FasterThanTarget = Performance#performance.average_latency < TargetLatency, TargetMax = case FasterThanTarget orelse WorkersStarved of true -> MaxDispatched + 1; false -> MaxDispatched - 1 end, MaxTasks = max(Dispatched, Waiting + queue:len(State2#state.task_queue)), NewMaxDispatched = ar_util:between(TargetMax, ?MIN_MAX_DISPATCHED, max(MaxTasks, ?MIN_MAX_DISPATCHED)), State3 = State2#state{ max_dispatched = NewMaxDispatched }, %% 3. Check if we should shutdown (idle worker) NewQueueLen = queue:len(State3#state.task_queue), NewWaiting = State3#state.waiting_count, NewDispatched = State3#state.dispatched_count, IdleSeconds = erlang:convert_time_unit( erlang:monotonic_time() - LastTaskTime, native, second), ShouldShutdown = (NewDispatched == 0) andalso (NewQueueLen == 0) andalso (NewWaiting == 0) andalso (IdleSeconds >= ?IDLE_SHUTDOWN_THRESHOLD_S), %% 4. Log rebalance ?LOG_DEBUG([{event, rebalance_peer}, {peer, PeerFormatted}, {dispatched_count, NewDispatched}, {queued_count, NewQueueLen}, {waiting_count, NewWaiting}, {max_queue_len, MaxQueueLen}, {faster_than_target, FasterThanTarget}, {workers_starved, WorkersStarved}, {max_dispatched, NewMaxDispatched}, {removed_count, RemovedCount}, {idle_seconds, IdleSeconds}, {should_shutdown, ShouldShutdown}]), Result = case ShouldShutdown of true -> {shutdown, RemovedCount}; false -> {ok, RemovedCount} end, {reply, Result, State3}; handle_call(Request, _From, State) -> ?LOG_WARNING([{event, unhandled_call}, {module, ?MODULE}, {request, Request}]), {reply, {error, unhandled}, State}. handle_cast({task_completed, FootprintKey, Result, ElapsedNative, DataSize}, State) -> #state{ dispatched_count = DispatchedCount, max_dispatched = MaxDispatched, peer = Peer } = State, NewDispatchedCount = max(0, DispatchedCount - 1), increment_metrics(completed, State, 1), %% Rate the fetched data with ar_peers ElapsedMicroseconds = erlang:convert_time_unit(ElapsedNative, native, microsecond), ar_peers:rate_fetched_data(Peer, chunk, Result, ElapsedMicroseconds, DataSize, MaxDispatched), %% Complete footprint task State2 = do_complete_footprint_task( FootprintKey, State#state{ dispatched_count = NewDispatchedCount }), {noreply, State2}; handle_cast(Cast, State) -> ?LOG_WARNING([{event, unhandled_cast}, {module, ?MODULE}, {cast, Cast}]), {noreply, State}. handle_info(check_long_running_footprints, State) -> %% Schedule next check erlang:send_after(?CHECK_LONG_RUNNING_FOOTPRINTS_MS, self(), check_long_running_footprints), log_long_running_footprints(State), {noreply, State}; handle_info(Info, State) -> ?LOG_WARNING([{event, unhandled_info}, {module, ?MODULE}, {info, Info}]), {noreply, State}. terminate(_Reason, State) -> %% Clean up ETS entry when process terminates ets:delete(?MODULE, State#state.peer), ok. %%%=================================================================== %%% Private functions - Task management %%%=================================================================== dequeue_tasks(Queue, 0, Acc) -> {lists:reverse(Acc), Queue}; dequeue_tasks(Queue, N, Acc) -> case queue:out(Queue) of {empty, _} -> {lists:reverse(Acc), Queue}; {{value, Args}, NewQueued} -> dequeue_tasks(NewQueued, N - 1, [Args | Acc]) end. %% @doc Calculate max queue size for this peer. %% MaxQueue = max(PeerThroughput * ScalingFactor, MIN_PEER_QUEUE) max_queue_length(#performance{ current_rating = 0 }, _ScalingFactor) -> infinity; max_queue_length(#performance{ current_rating = 0.0 }, _ScalingFactor) -> infinity; max_queue_length(_Performance, infinity) -> infinity; max_queue_length(Performance, ScalingFactor) -> PeerThroughput = Performance#performance.current_rating, max(trunc(PeerThroughput * ScalingFactor), ?MIN_PEER_QUEUE). %%%=================================================================== %%% Private functions - Task management %%%=================================================================== %% @doc Process tasks from queue based on sync workers. do_process_queue(State, WorkerCount) -> #state{ dispatched_count = Dispatched, max_dispatched = MaxDispatched, task_queue = Queue } = State, AvailableSlots = min(WorkerCount, MaxDispatched - Dispatched), {Tasks, RQ} = dequeue_tasks(Queue, AvailableSlots, []), TaskCount = length(Tasks), case TaskCount > 0 of true -> increment_metrics(dispatched, State, TaskCount), increment_metrics(queued_out, State, TaskCount); false -> ok end, NewDispatched = Dispatched + TaskCount, State2 = State#state{ task_queue = RQ, dispatched_count = NewDispatched }, {Tasks, State2}. %% @doc Enqueue a 'normal' task. Footprint limits can be ignored. do_enqueue_task(none, Args, _HasCapacity, State) -> %% No footprint key - enqueue directly to peer queue #state{ task_queue = Queue } = State, NewQueue = queue:in(Args, Queue), increment_metrics(queued_in, State, 1), {false, State#state{ task_queue = NewQueue }}; %% @doc Enqueue a 'footprint' task respecting footprint limits. do_enqueue_task(FootprintKey, Args, HasCapacity, State) -> #state{ footprints = Footprints, task_queue = Queue, active_footprints = ActiveFootprints } = State, Footprint = maps:get(FootprintKey, Footprints, #footprint{}), IsActive = sets:is_element(FootprintKey, ActiveFootprints), case IsActive of true -> %% Footprint is already active, add task to it (not a new activation) NewQueue = queue:in(Args, Queue), Footprint2 = Footprint#footprint{ active_count = Footprint#footprint.active_count + 1 }, increment_metrics(queued_in, State, 1), increment_metrics(activate_footprint_task, State, 1), {false, State#state{ task_queue = NewQueue, footprints = maps:put(FootprintKey, Footprint2, Footprints) }}; false when HasCapacity -> %% New footprint and global capacity available - activate it (new activation) NewQueue = queue:in(Args, Queue), Footprint2 = Footprint#footprint{ active_count = 1 }, increment_metrics(queued_in, State, 1), increment_metrics(activate_footprint_task, State, 1), State2 = State#state{ task_queue = NewQueue, footprints = maps:put(FootprintKey, Footprint2, Footprints) }, State3 = activate_footprint(FootprintKey, State2), {true, State3}; false -> %% No global capacity - queue task for later (no activation) Footprint2 = Footprint#footprint{ waiting = queue:in(Args, Footprint#footprint.waiting) }, increment_metrics(waiting_in, State, 1), {false, State#state{ footprints = maps:put(FootprintKey, Footprint2, Footprints), waiting_count = State#state.waiting_count + 1 }} end. %% @doc Handle completion of a footprint task. do_complete_footprint_task(none, State) -> State; do_complete_footprint_task(FootprintKey, State) -> #state{ footprints = Footprints, peer_formatted = PeerFormatted } = State, case Footprints of #{ FootprintKey := Footprint } -> increment_metrics(deactivate_footprint_task, State, 1), NewActiveCount = Footprint#footprint.active_count - 1, case NewActiveCount =< 0 of true -> %% Footprint has no more active tasks case queue:is_empty(Footprint#footprint.waiting) of true -> %% No waiting tasks - deactivate footprint deactivate_footprint(FootprintKey, Footprint, State); false -> %% Has waiting tasks - activate them activate_waiting_tasks(FootprintKey, Footprint, State) end; false -> %% Still has active tasks Footprint2 = Footprint#footprint{ active_count = NewActiveCount }, State#state{ footprints = maps:put(FootprintKey, Footprint2, Footprints) } end; _ -> ?LOG_WARNING([{event, complete_footprint_task_not_found}, {footprint_key, FootprintKey}, {peer, PeerFormatted}]), State end. %% @doc Deactivate a footprint. deactivate_footprint(FootprintKey, Footprint, State) -> #state{ footprints = Footprints, peer_formatted = PeerFormatted, active_footprints = ActiveFootprints } = State, %% Log deactivation with duration case Footprint#footprint.activation_time of undefined -> ok; ActivationTime -> DurationMs = erlang:convert_time_unit( erlang:monotonic_time() - ActivationTime, native, millisecond), ?LOG_DEBUG([{event, footprint_deactivated}, {peer, PeerFormatted}, {footprint_key, FootprintKey}, {duration_ms, DurationMs}]) end, increment_metrics(deactivate_footprint, State, 1), notify_footprint_deactivated(State#state.peer), State#state{ footprints = maps:remove(FootprintKey, Footprints), active_footprints = sets:del_element(FootprintKey, ActiveFootprints) }. %% @doc Activate waiting tasks from a footprint that was already active. %% Called when active_count reaches 0 but footprint has waiting tasks - just cycles tasks. activate_waiting_tasks(FootprintKey, Footprint, State) -> #state{ task_queue = Queue, footprints = Footprints } = State, WaitingQueue = Footprint#footprint.waiting, WaitingCount = queue:len(WaitingQueue), NewQueue = queue:join(Queue, WaitingQueue), increment_metrics(waiting_out, State, WaitingCount), increment_metrics(queued_in, State, WaitingCount), increment_metrics(activate_footprint_task, State, WaitingCount), Footprint2 = Footprint#footprint{ waiting = queue:new(), active_count = WaitingCount }, State#state{ task_queue = NewQueue, footprints = maps:put(FootprintKey, Footprint2, Footprints), waiting_count = State#state.waiting_count - WaitingCount }. %% @doc Activate a footprint - common logic for new activations. %% Sets activation_time, adds to active set, notifies coordinator, logs. activate_footprint(FootprintKey, State) -> #state{ footprints = Footprints, active_footprints = ActiveFootprints } = State, Footprint = maps:get(FootprintKey, Footprints), Footprint2 = Footprint#footprint{ activation_time = erlang:monotonic_time() }, increment_metrics(activate_footprint, State, 1), State#state{ footprints = maps:put(FootprintKey, Footprint2, Footprints), active_footprints = sets:add_element(FootprintKey, ActiveFootprints) }. %% @doc Try to activate the next waiting footprint if any. %% Returns {Activated, NewState} where Activated is true if a footprint was activated. try_activate_waiting_footprint(State) -> #state{ footprints = Footprints, active_footprints = ActiveFootprints } = State, case find_waiting_footprint(Footprints, ActiveFootprints) of none -> {false, State}; {FootprintKey, Footprint} -> %% Move waiting tasks to queue, then activate the footprint State2 = activate_waiting_tasks(FootprintKey, Footprint, State), State3 = activate_footprint(FootprintKey, State2), {true, State3} end. %% @doc Find the inactive footprint with the most waiting tasks. find_waiting_footprint(Footprints, ActiveFootprints) -> maps:fold( fun(Key, Footprint, Acc) -> IsActive = sets:is_element(Key, ActiveFootprints), HasWaiting = not queue:is_empty(Footprint#footprint.waiting), case not IsActive andalso HasWaiting of true -> WaitingCount = queue:len(Footprint#footprint.waiting), case Acc of none -> {Key, Footprint}; {_AccKey, AccFP} -> AccWaitingCount = queue:len(AccFP#footprint.waiting), case WaitingCount > AccWaitingCount of true -> {Key, Footprint}; false -> Acc end end; false -> Acc end end, none, Footprints). %% @doc Decrement footprint counts for tasks removed from queue (e.g., during cut). cut_footprint_task_counts([], State) -> State; cut_footprint_task_counts([Args | Rest], State) -> FootprintKey = element(5, Args), State2 = case FootprintKey of none -> State; _ -> do_complete_footprint_task(FootprintKey, State) end, cut_footprint_task_counts(Rest, State2). %%%=================================================================== %%% Private functions - Long-running footprint detection (debugging) %%%=================================================================== log_long_running_footprints(State) -> #state{ peer_formatted = PeerFormatted, footprints = Footprints } = State, Now = erlang:monotonic_time(), ThresholdNative = erlang:convert_time_unit(?LONG_RUNNING_FOOTPRINT_THRESHOLD_S, second, native), LongRunning = maps:fold( fun(FootprintKey, Footprint, Acc) -> case Footprint#footprint.activation_time of undefined -> Acc; ActivationTime -> Duration = Now - ActivationTime, case Duration > ThresholdNative of true -> DurationS = erlang:convert_time_unit(Duration, native, second), [#{key => FootprintKey, duration_s => DurationS, active_count => Footprint#footprint.active_count, waiting_count => queue:len(Footprint#footprint.waiting)} | Acc]; false -> Acc end end end, [], Footprints), case LongRunning of [] -> ok; _ -> ?LOG_WARNING([{event, long_running_footprints}, {peer, PeerFormatted}, {count, length(LongRunning)}, {footprints, LongRunning}]) end. %%%=================================================================== %%% Private functions - Coordinator notifications %%%=================================================================== %% Notify coordinator that a footprint was deactivated (for global tracking) notify_footprint_deactivated(Peer) -> gen_server:cast(ar_data_sync_coordinator, {footprint_deactivated, Peer}). %%%=================================================================== %%% Private functions - Metrics %%%=================================================================== %% Increment prometheus counter - catches errors when prometheus isn't initialized (e.g. in tests) %% Metric is an atom (e.g., dispatched, queued_in) increment_metrics(Metric, #state{ peer_formatted = PeerFormatted }, Value) -> try prometheus_counter:inc(sync_tasks, [Metric, PeerFormatted], Value) catch _:_ -> ok end. %%%=================================================================== %%% Tests. %%%=================================================================== -ifdef(AR_TEST). -include_lib("eunit/include/eunit.hrl"). %% @doc Test-only helper to get footprint stats by calling get_state. get_footprint_stats(Pid) -> case gen_server:call(Pid, get_state) of {ok, State} -> #state{ footprints = Footprints } = State, TotalActive = maps:fold(fun(_, Footprint, Acc) -> Acc + Footprint#footprint.active_count end, 0, Footprints), #{ footprint_count => maps:size(Footprints), active_footprint_count => maps:size(Footprints), %% All footprints are now active total_active_tasks => TotalActive }; Error -> Error end. lookup_test() -> Peer1 = {10, 20, 30, 40, 9999}, ?assertEqual(undefined, lookup(Peer1)), Peer2 = {50, 60, 70, 80, 1234}, TestPid = spawn(fun() -> receive stop -> ok end end), ets:insert(?MODULE, {Peer2, TestPid}), ?assertEqual({ok, TestPid}, lookup(Peer2)), %% Cleanup TestPid ! stop, ets:delete(?MODULE, Peer2). %% Tests that require setup/cleanup peer_worker_test_() -> {foreach, fun setup/0, fun cleanup/1, [ fun test_enqueue_and_process/1, fun test_task_completed/1, fun test_cut_queue/1, fun test_footprint_basic/1, fun test_multiple_footprints/1, fun test_footprint_completion/1, fun test_footprint_waiting_queue/1, fun test_try_activate_footprint/1, fun test_footprint_task_cycling/1, fun test_active_footprints_set/1, fun test_add_task_to_active_footprint/1, fun test_footprint_deactivation_removes_from_map/1 ] }. setup() -> Peer = {1, 2, 3, 4, 1984}, %% Start peer worker directly (not via supervisor, unnamed for test isolation) {ok, Pid} = gen_server:start(?MODULE, [Peer], []), {Peer, Pid}. cleanup({Peer, Pid}) -> %% Clean up ETS entry when removing the worker ets:delete(?MODULE, Peer), gen_server:stop(Pid), ok. test_enqueue_and_process({Peer, Pid}) -> fun() -> %% Enqueue some tasks (no footprint) {false, []} = enqueue_task(Pid, none, {0, 100, Peer, store1, none}, true, 0), {false, []} = enqueue_task(Pid, none, {100, 200, Peer, store1, none}, true, 0), {false, []} = enqueue_task(Pid, none, {200, 300, Peer, store1, none}, true, 0), %% Sync to ensure casts processed {ok, _} = gen_server:call(Pid, get_state), %% Process queue - should get up to 2 tasks Tasks = process_queue(Pid, 2), ?assertEqual(2, length(Tasks)), {false, Tasks2} = enqueue_task(Pid, none, {300, 400, Peer, store1, none}, true, 1), ?assertEqual(1, length(Tasks2)), %% Check state {ok, State} = gen_server:call(Pid, get_state), ?assertEqual(1, queue:len(State#state.task_queue)), ?assertEqual(3, State#state.dispatched_count) end. test_task_completed({Peer, Pid}) -> fun() -> %% Enqueue and dispatch a task (no footprint) enqueue_task(Pid, none, {0, 100, Peer, store1, none}, true, 0), {ok, _} = gen_server:call(Pid, get_state), %% sync [_Task] = process_queue(Pid, 1), %% Complete the task (FootprintKey = none) task_completed(Pid, none, ok, 0, 100), %% Wait for cast to be processed by doing a sync call {ok, State} = gen_server:call(Pid, get_state), ?assertEqual(0, State#state.dispatched_count) end. test_cut_queue({Peer, Pid}) -> fun() -> %% Enqueue 25 tasks (more than MIN_PEER_QUEUE = 20) lists:foreach(fun(I) -> enqueue_task(Pid, none, {I * 100, (I + 1) * 100, Peer, store1, none}, true, 0) end, lists:seq(0, 24)), %% Sync and check we have 25 tasks {ok, State1} = gen_server:call(Pid, get_state), ?assertEqual(25, queue:len(State1#state.task_queue)), %% Rebalance with scaling factor that gives MaxQueue = 20 (MIN_PEER_QUEUE) %% MaxQueue = max(PeerThroughput * ScalingFactor, MIN_PEER_QUEUE) %% With PeerThroughput = 100, ScalingFactor = 0.1 => MaxQueue = max(10, 20) = 20 Performance = #performance{ current_rating = 100.0, average_latency = 50.0 }, %% RebalanceParams = {QueueScalingFactor, TargetLatency, WorkersStarved} %% FasterThanTarget = (50.0 < 100.0) = true RebalanceParams = {0.1, 100.0, false}, Result = rebalance(Pid, Performance, RebalanceParams), ?assertEqual({ok, 5}, Result), %% Check we have 20 tasks left (MIN_PEER_QUEUE) {ok, State2} = gen_server:call(Pid, get_state), ?assertEqual(20, queue:len(State2#state.task_queue)) end. test_footprint_basic({Peer, Pid}) -> fun() -> FootprintKey = {store1, 1000, Peer}, %% Enqueue task with footprint - should activate it (HasCapacity=true) enqueue_task(Pid, FootprintKey, {0, 100, Peer, store1, FootprintKey}, true, 0), %% Check footprint stats (get_footprint_stats syncs via call) Stats = get_footprint_stats(Pid), ?assertEqual(1, maps:get(active_footprint_count, Stats)), ?assertEqual(1, maps:get(total_active_tasks, Stats)), %% Complete the task task_completed(Pid, FootprintKey, ok, 0, 100), %% Footprint should be deactivated (get_footprint_stats will wait for cast to process) Stats2 = get_footprint_stats(Pid), ?assertEqual(0, maps:get(active_footprint_count, Stats2)) end. test_multiple_footprints({Peer, Pid}) -> fun() -> %% Verify state is accessible {ok, _State0} = gen_server:call(Pid, get_state), %% Test that multiple footprints can be activated (HasCapacity=true) FootprintKey1 = {store1, 1000, Peer}, FootprintKey2 = {store1, 2000, Peer}, %% Enqueue tasks for two footprints enqueue_task(Pid, FootprintKey1, {0, 100, Peer, store1, FootprintKey1}, true, 0), enqueue_task(Pid, FootprintKey2, {100, 200, Peer, store1, FootprintKey2}, true, 0), Stats = get_footprint_stats(Pid), ?assertEqual(2, maps:get(active_footprint_count, Stats)) end. test_footprint_completion({Peer, Pid}) -> fun() -> FootprintKey = {store1, 1000, Peer}, %% Enqueue multiple tasks for same footprint (HasCapacity=true) enqueue_task(Pid, FootprintKey, {0, 100, Peer, store1, FootprintKey}, true, 0), enqueue_task(Pid, FootprintKey, {100, 200, Peer, store1, FootprintKey}, true, 0), enqueue_task(Pid, FootprintKey, {200, 300, Peer, store1, FootprintKey}, true, 0), Stats1 = get_footprint_stats(Pid), %% Syncs via call ?assertEqual(3, maps:get(total_active_tasks, Stats1)), %% Complete tasks one by one task_completed(Pid, FootprintKey, ok, 0, 100), Stats2 = get_footprint_stats(Pid), %% Wait for cast to process ?assertEqual(2, maps:get(total_active_tasks, Stats2)), ?assertEqual(1, maps:get(active_footprint_count, Stats2)), %% Still active task_completed(Pid, FootprintKey, ok, 0, 100), task_completed(Pid, FootprintKey, ok, 0, 100), Stats3 = get_footprint_stats(Pid), %% Wait for casts to process ?assertEqual(0, maps:get(total_active_tasks, Stats3)), ?assertEqual(0, maps:get(active_footprint_count, Stats3)) %% Deactivated end. test_footprint_waiting_queue({Peer, Pid}) -> fun() -> FootprintKey = {store1, 1000, Peer}, %% Enqueue task with HasCapacity=false - should go to waiting queue enqueue_task(Pid, FootprintKey, {0, 100, Peer, store1, FootprintKey}, false, 0), enqueue_task(Pid, FootprintKey, {100, 200, Peer, store1, FootprintKey}, false, 0), %% Sync and check state {ok, State} = gen_server:call(Pid, get_state), %% Task queue should be empty (tasks went to waiting) ?assertEqual(0, queue:len(State#state.task_queue)), %% waiting_count should be 2 ?assertEqual(2, State#state.waiting_count), %% Footprint should NOT be in active_footprints set ?assertEqual(false, sets:is_element(FootprintKey, State#state.active_footprints)), %% Footprint should have 2 waiting tasks Footprint = maps:get(FootprintKey, State#state.footprints), ?assertEqual(2, queue:len(Footprint#footprint.waiting)), ?assertEqual(0, Footprint#footprint.active_count) end. test_try_activate_footprint({Peer, Pid}) -> fun() -> FootprintKey = {store1, 1000, Peer}, %% First, no waiting footprints - should return false Result1 = try_activate_footprint(Pid), ?assertEqual(false, Result1), %% Enqueue task with HasCapacity=false - goes to waiting enqueue_task(Pid, FootprintKey, {0, 100, Peer, store1, FootprintKey}, false, 0), {ok, _} = gen_server:call(Pid, get_state), %% sync %% Now try_activate_footprint should return true Result2 = try_activate_footprint(Pid), ?assertEqual(true, Result2), %% Check state - task should now be in task_queue {ok, State} = gen_server:call(Pid, get_state), ?assertEqual(1, queue:len(State#state.task_queue)), ?assertEqual(0, State#state.waiting_count), %% Footprint should be in active_footprints set ?assertEqual(true, sets:is_element(FootprintKey, State#state.active_footprints)), %% Footprint should have active_count = 1 Footprint = maps:get(FootprintKey, State#state.footprints), ?assertEqual(1, Footprint#footprint.active_count), ?assertEqual(0, queue:len(Footprint#footprint.waiting)) end. test_footprint_task_cycling({Peer, Pid}) -> fun() -> FootprintKey = {store1, 1000, Peer}, %% Enqueue one task with HasCapacity=true (activates footprint) enqueue_task(Pid, FootprintKey, {0, 100, Peer, store1, FootprintKey}, true, 0), {ok, _} = gen_server:call(Pid, get_state), %% sync %% Dispatch the task [_Task] = process_queue(Pid, 1), %% Verify the basic behavior that when a footprint completes %% and has no waiting tasks, it deactivates. %% Complete the task task_completed(Pid, FootprintKey, ok, 0, 100), {ok, State} = gen_server:call(Pid, get_state), %% Footprint should be removed (no waiting tasks) ?assertEqual(false, maps:is_key(FootprintKey, State#state.footprints)), ?assertEqual(false, sets:is_element(FootprintKey, State#state.active_footprints)) end. test_active_footprints_set({Peer, Pid}) -> fun() -> FootprintKey1 = {store1, 1000, Peer}, FootprintKey2 = {store1, 2000, Peer}, FootprintKey3 = {store1, 3000, Peer}, %% Initially set should be empty {ok, State0} = gen_server:call(Pid, get_state), ?assertEqual(0, sets:size(State0#state.active_footprints)), %% Activate two footprints enqueue_task(Pid, FootprintKey1, {0, 100, Peer, store1, FootprintKey1}, true, 0), enqueue_task(Pid, FootprintKey2, {100, 200, Peer, store1, FootprintKey2}, true, 0), %% Third one goes to waiting enqueue_task(Pid, FootprintKey3, {200, 300, Peer, store1, FootprintKey3}, false, 0), {ok, State1} = gen_server:call(Pid, get_state), ?assertEqual(2, sets:size(State1#state.active_footprints)), ?assertEqual(true, sets:is_element(FootprintKey1, State1#state.active_footprints)), ?assertEqual(true, sets:is_element(FootprintKey2, State1#state.active_footprints)), ?assertEqual(false, sets:is_element(FootprintKey3, State1#state.active_footprints)), %% Deactivate one footprint task_completed(Pid, FootprintKey1, ok, 0, 100), {ok, State2} = gen_server:call(Pid, get_state), ?assertEqual(1, sets:size(State2#state.active_footprints)), ?assertEqual(false, sets:is_element(FootprintKey1, State2#state.active_footprints)), ?assertEqual(true, sets:is_element(FootprintKey2, State2#state.active_footprints)) end. test_add_task_to_active_footprint({Peer, Pid}) -> fun() -> FootprintKey = {store1, 1000, Peer}, %% Activate footprint with one task enqueue_task(Pid, FootprintKey, {0, 100, Peer, store1, FootprintKey}, true, 0), {ok, State1} = gen_server:call(Pid, get_state), Footprint1 = maps:get(FootprintKey, State1#state.footprints), ?assertEqual(1, Footprint1#footprint.active_count), ?assertEqual(1, queue:len(State1#state.task_queue)), %% Add more tasks to same footprint (regardless of HasCapacity, goes to task_queue since active) enqueue_task(Pid, FootprintKey, {100, 200, Peer, store1, FootprintKey}, true, 0), enqueue_task(Pid, FootprintKey, {200, 300, Peer, store1, FootprintKey}, false, 0), {ok, State2} = gen_server:call(Pid, get_state), Footprint2 = maps:get(FootprintKey, State2#state.footprints), %% active_count should be 3 (all tasks added to active footprint) ?assertEqual(3, Footprint2#footprint.active_count), %% All 3 tasks should be in task_queue ?assertEqual(3, queue:len(State2#state.task_queue)), %% waiting should still be empty ?assertEqual(0, queue:len(Footprint2#footprint.waiting)), ?assertEqual(0, State2#state.waiting_count) end. test_footprint_deactivation_removes_from_map({Peer, Pid}) -> fun() -> FootprintKey = {store1, 1000, Peer}, %% Activate footprint enqueue_task(Pid, FootprintKey, {0, 100, Peer, store1, FootprintKey}, true, 0), {ok, State1} = gen_server:call(Pid, get_state), %% Footprint should exist in map ?assertEqual(true, maps:is_key(FootprintKey, State1#state.footprints)), ?assertEqual(true, sets:is_element(FootprintKey, State1#state.active_footprints)), %% Complete the task (footprint has no waiting tasks) task_completed(Pid, FootprintKey, ok, 0, 100), {ok, State2} = gen_server:call(Pid, get_state), %% Footprint should be completely removed from both structures ?assertEqual(false, maps:is_key(FootprintKey, State2#state.footprints)), ?assertEqual(false, sets:is_element(FootprintKey, State2#state.active_footprints)), ?assertEqual(0, State2#state.waiting_count) end. -endif. ================================================ FILE: apps/arweave/src/ar_peer_worker_sup.erl ================================================ %%% @doc Supervisor for ar_peer_worker processes. %%% Uses simple_one_for_one to dynamically spawn peer workers on demand. -module(ar_peer_worker_sup). -behaviour(supervisor). -export([start_link/0]). -export([init/1]). %%%=================================================================== %%% Public interface. %%%=================================================================== start_link() -> supervisor:start_link({local, ?MODULE}, ?MODULE, []). %%%=================================================================== %%% Supervisor callbacks. %%%=================================================================== init([]) -> ets:new(ar_peer_worker, [set, public, named_table, {read_concurrency, true}]), ChildSpec = #{ id => ar_peer_worker, start => {ar_peer_worker, start_link, []}, restart => temporary, %% Don't restart - will be recreated on demand shutdown => 5000, type => worker, modules => [ar_peer_worker] }, {ok, {#{strategy => simple_one_for_one, intensity => 10, period => 60}, [ChildSpec]}}. ================================================ FILE: apps/arweave/src/ar_peers.erl ================================================ %%% @doc Tracks the availability and performance of the network peers. -module(ar_peers). -behaviour(gen_server). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("arweave/include/ar_peers.hrl"). -include_lib("eunit/include/eunit.hrl"). -export([ add_peer/2, connected_peer/1, disconnected_peer/1, discover_peers/0, filter_peers/2, get_connection_timestamp_peer/1, get_peer_performances/1, get_peer_release/1, get_peers/1, get_tag/2, get_trusted_peers/0, is_connected_peer/1, is_public_peer/1, issue_warning/3, rate_fetched_data/4, rate_fetched_data/6, rate_gossiped_data/4, resolve_and_cache_peer/2, resolve_and_cache_peer/3, set_tag/3, start_link/0, stats/1 ]). -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2]). %% The frequency in seconds of re-resolving DNS of peers configured by domain names. -define(STORE_RESOLVED_DOMAIN_S, 60). %% The frequency in milliseconds of ranking the known peers. -ifdef(AR_TEST). -define(RANK_PEERS_FREQUENCY_MS, 2 * 1000). -else. -define(RANK_PEERS_FREQUENCY_MS, 2 * 60 * 1000). -endif. %% The frequency in milliseconds of asking some peers for their peers. -ifdef(AR_TEST). -define(GET_MORE_PEERS_FREQUENCY_MS, 5000). -else. -define(GET_MORE_PEERS_FREQUENCY_MS, 240 * 1000). -endif. %% Peers to never add to the peer list. -define(PEER_PERMANENT_BLACKLIST, []). %% The maximum number of peers to return from get_peers/0. -define(MAX_PEERS, 1000). %% Minimum average_success we'll tolerate before dropping a peer. -define(MINIMUM_SUCCESS, 0.8). %% The alpha value in an EMA calculation is somewhat unintuitive: %% %% NewEma = (1 - Alpha) * OldEma + Alpha * NewValue %% %% When calculating the SuccessEma the NewValue is always either 1 or 0. So if we want to see how %% many consecutive failures it will take to drop the SuccessEma from 1 to 0.5 (i.e. 50% failure %% rate), a number of terms in the equation drop out and we're left with: %% %% 0.5 = (1 - Alpha) ^ N %% %% Where N is the number of consecutive failures. %% %% Setting Alpha to 0.1 we can determine the number of consecutive failures: %% 0.5 = 0.9 ^ N %% log(0.5) = N * log(0.9) %% N = log(0.5) / log(0.9) %% N = 6.58 %% %% And if we want to set the Alpha such that it takes 20 consecutive failures to go from 1 to 0.5: %% 0.5 = (1 - Alpha) ^ 20 %% log(0.5) = 20 * log(1 - Alpha) %% 1 - Alpha = 10 ^ (log(0.5) / 20) %% Alpha = 1 - 10 ^ (log(0.5) / 20) %% Alpha = 0.035 -define(SUCCESS_ALPHA, 0.035). %% The THROUGHPUT_ALPHA is even harder to intuit since the values being averaged can be any %% positive number and are not just limited to 0 or 1. Perhaps one way to think about it is: %% When a datapoint is first added to the average it is scaled by Alpha, and then every time %% another datapoint is added, the contribution of all prior datapoints are scaled by (1-Alpha). %% So how many new datapoints will it take to reduce the contribution of an earlier datapoint %% to "virtually" 0? %% %% If we assume "virtually 0" is the same as 1% of its true value (i.e. if the datapoint was %% originaly 100, it now contributes 1 to the average), then we can use a similar equation as %% the SUCCESS_ALPHA equation to determine how many datapoints materially contribute to the average: %% %% 0.01 = (1 - Alpha) ^ N) * Alpha %% %% The additional "* Alpha" term is to account for the scaling that happens when a datapoint is %% first added. %% %% With an Alpha of 0.05 we're essentially saying that the last ~31 datapoints contribute 99% of %% the average: %% %% 0.01 = ((1 - 0.05) ^ N) * 0.05 %% 0.01 / 0.05 = (1 - 0.05) ^ N %% log(0.2) = N * log(0.95) %% N = log(0.2) / log(0.95) %% N = 31.38 -define(THROUGHPUT_ALPHA, 0.05). %% When processing block rejected events for blocks received from a peer, we handle rejections %% differently based on the rejection reason. -define(BLOCK_REJECTION_WARNING, [ failed_to_fetch_first_chunk, failed_to_fetch_second_chunk, failed_to_fetch_chunk ]). -define(BLOCK_REJECTION_BAN, [ invalid_previous_solution_hash, invalid_last_retarget, invalid_difficulty, invalid_cumulative_difficulty, invalid_hash_preimage, invalid_nonce_limiter_seed_data, invalid_partition_number, invalid_nonce, invalid_pow, invalid_poa, invalid_poa2, invalid_nonce_limiter, invalid_nonce_limiter_cache_mismatch, invalid_packing_difficulty ]). -define(BLOCK_REJECTION_IGNORE, [ invalid_signature, invalid_proof_size, invalid_first_chunk, invalid_second_chunk, invalid_poa2_recall_byte2_undefined, invalid_hash, invalid_timestamp, invalid_resigned_solution_hash, invalid_nonce_limiter_global_step_number, invalid_first_unpacked_chunk, invalid_second_unpacked_chunk, invalid_first_unpacked_chunk_hash, invalid_second_unpacked_chunk_hash ]). %% We only do scoring of this many TCP ports per IP address. When there are not enough slots, %% we remove the peer from the first slot. -define(DEFAULT_PEER_PORT_MAP, {empty_slot, empty_slot, empty_slot, empty_slot, empty_slot, empty_slot, empty_slot, empty_slot, empty_slot, empty_slot}). -record(state, {}). %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Start the server. start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). %%-------------------------------------------------------------------- %% @doc Return the list of peers in the given ranking order. %% Rating is an estimate of the peer's effective throughput in %% bytes per millisecond. %% %% `lifetime' considers all data ever received from this peer and %% is most useful when we care more about identifying "good %% samaritans" rather than maximizing throughput (e.g. when %% polling for new blocks are determing which peer's blocks to %% validated first). %% %% `current' weights recently received data higher than old data %% and is most useful when we care more about maximizing throughput %% (e.g. when syncing chunks). %% %% @end %%-------------------------------------------------------------------- get_peers(Ranking) -> case catch ets:lookup(?MODULE, {peers, Ranking}) of {'EXIT', _} -> []; [] -> []; [{{peers, lifetime}, Peers}] -> Peers; [{{peers, current}, Peers}] -> filter_peers(Peers, {timestamp, ?CURRENT_PEERS_LIST_FILTER}); [{_, Peers}] -> Peers end. filter_peers(Peers, {timestamp, Seconds}) when is_integer(Seconds) -> Timefilter = erlang:system_time(seconds) - Seconds, Tag = {connection, last}, Pattern = {{ar_tags, ?MODULE, '$1', Tag}, '$3'}, Guard = [{'>=', '$3', Timefilter}], Select = ['$1'], TaggedPeers = ets:select(?MODULE, [{Pattern, Guard, Select}]), [ P || T <- TaggedPeers, P <- Peers, T =:= P ]. get_peer_performances(Peers) -> lists:foldl( fun(Peer, Map) -> Performance = get_or_init_performance(Peer), maps:put(Peer, Performance, Map) end, #{}, Peers). -if(?NETWORK_NAME == "arweave.N.1"). resolve_peers([]) -> []; resolve_peers([RawPeer | Peers]) -> case ar_util:safe_parse_peer(RawPeer) of {ok, Peer} -> Peer ++ resolve_peers(Peers); {error, invalid} -> ?LOG_WARNING([{event, failed_to_resolve_trusted_peer}, {peer, RawPeer}]), resolve_peers(Peers) end. get_trusted_peers() -> {ok, Config} = arweave_config:get_env(), case Config#config.peers of [] -> ArweavePeers = [ "asia.peers.arweave.xyz", "europe.peers.arweave.xyz", "india.peers.arweave.xyz", "north-america.peers.arweave.xyz", "oceania.peers.arweave.xyz" ], resolve_peers(ArweavePeers); Peers -> Peers end. -else. get_trusted_peers() -> {ok, Config} = arweave_config:get_env(), Config#config.peers. -endif. %% @doc Return true if the given peer has a public IPv4 address. %% https://en.wikipedia.org/wiki/Reserved_IP_addresses. is_public_peer({Oct1, Oct2, Oct3, Oct4, _Port}) -> is_public_peer({Oct1, Oct2, Oct3, Oct4}); is_public_peer({0, _, _, _}) -> false; is_public_peer({10, _, _, _}) -> false; is_public_peer({127, _, _, _}) -> false; is_public_peer({100, Oct2, _, _}) when Oct2 >= 64 andalso Oct2 =< 127 -> false; is_public_peer({169, 254, _, _}) -> false; is_public_peer({172, Oct2, _, _}) when Oct2 >= 16 andalso Oct2 =< 31 -> false; is_public_peer({192, 0, 0, _}) -> false; is_public_peer({192, 0, 2, _}) -> false; is_public_peer({192, 88, 99, _}) -> false; is_public_peer({192, 168, _, _}) -> false; is_public_peer({198, 18, _, _}) -> false; is_public_peer({198, 19, _, _}) -> false; is_public_peer({198, 51, 100, _}) -> false; is_public_peer({203, 0, 113, _}) -> false; is_public_peer({Oct1, _, _, _}) when Oct1 >= 224 -> false; is_public_peer(_) -> true. %% @doc Return the release nubmer reported by the peer. %% Return -1 if the release is not known. get_peer_release(Peer) -> case catch ets:lookup(?MODULE, {peer, Peer}) of [{_, #performance{ release = Release }}] -> Release; _ -> -1 end. rate_fetched_data(Peer, DataType, LatencyMicroseconds, DataSize) -> rate_fetched_data(Peer, DataType, ok, LatencyMicroseconds, DataSize, 1). rate_fetched_data(Peer, DataType, ok, LatencyMicroseconds, DataSize, Concurrency) -> try gen_server:cast(?MODULE, {valid_data, Peer, DataType, LatencyMicroseconds / 1000, DataSize, Concurrency}) catch _:_ -> ok end; rate_fetched_data(Peer, DataType, _, _LatencyMicroseconds, _DataSize, _Concurrency) -> try gen_server:cast(?MODULE, {invalid_data, Peer, DataType}) catch _:_ -> ok end. rate_gossiped_data(Peer, DataType, LatencyMicroseconds, DataSize) -> case check_peer(Peer) of ok -> gen_server:cast(?MODULE, {valid_data, Peer, DataType, LatencyMicroseconds / 1000, DataSize, 1}); _ -> ok end. issue_warning(Peer, _Type, _Reason) -> gen_server:cast(?MODULE, {warning, Peer}). add_peer(Peer, Release) -> gen_server:cast(?MODULE, {add_peer, Peer, Release}). %% @doc Print statistics about the current peers. stats(Ranking) -> Connected = get_peers(Ranking), io:format("Connected peers, in ~s order:~n", [Ranking]), stats(Ranking, Connected), io:format("Other known peers:~n"), All = ets:foldl( fun ({{peer, Peer}, _}, Acc) -> [Peer | Acc]; (_, Acc) -> Acc end, [], ?MODULE ), stats(All -- Connected). stats(Ranking, Peers) -> lists:foreach( fun(Peer) -> format_stats(Ranking, Peer, get_or_init_performance(Peer)) end, Peers ). discover_peers() -> case get_peers(current) of [] -> ok; Peers -> Peer = ar_util:pick_random(Peers), discover_peers(get_peer_peers(Peer)) end. %%-------------------------------------------------------------------- %% @doc %% @see resolve_and_cache_peer/3 %% @end %%-------------------------------------------------------------------- -spec resolve_and_cache_peer(RawPeer, Type) -> Return when RawPeer :: string(), Type :: term(), Return :: {ok, {A,A,A,A,Port}} | {error, term()}, A :: pos_integer(), Port :: pos_integer(). resolve_and_cache_peer(RawPeer, Type) -> resolve_and_cache_peer(RawPeer, Type, #{}). %%-------------------------------------------------------------------- %% @doc Resolve the domain name of the given peer (if the given peer %% is an IP address) and cache it. Invalidate the cache after %% `?STORE_RESOLVED_DOMAIN_S seconds.' Return {ok, Peer} | {error, %% Reason}. %% @end %%-------------------------------------------------------------------- -spec resolve_and_cache_peer(RawPeer, Type, Opts) -> Return when RawPeer :: string(), Type :: term(), Opts :: map(), Return :: {ok, {A,A,A,A,Port}} | {error, term()}, A :: pos_integer(), Port :: pos_integer(). resolve_and_cache_peer(RawPeer, Type, Opts) -> Now = maps:get(now, Opts, erlang:system_time(second)), CacheTTL = maps:get(cache_ttl, Opts, ?STORE_RESOLVED_DOMAIN_S), State = #{ raw_peer => RawPeer, type => Type, now => Now, opts => Opts, cache_ttl => CacheTTL }, % first check if the peer as string (so using a name % record) is present in ets table. If the peer is not present % in cache, then it will be updated. Else, the timestamp needs % to be checked. case ets:lookup(?MODULE, {raw_peer, RawPeer}) of [] -> resolve_and_cache_peer_empty(State); [{_, {CachedPeer, CachedTimestamp}}] -> NewState = State#{ cache_timestamp => CachedTimestamp, cache_peer => CachedPeer }, resolve_and_cache_peer2(CachedPeer, NewState) end. %%-------------------------------------------------------------------- %% @hidden %% @private %% @doc check if peer cache did not expired. %% @end %%-------------------------------------------------------------------- resolve_and_cache_peer2(CachedPeer, State) -> Now = maps:get(now, State), CachedTimestamp = maps:get(cache_timestamp, State), CacheTTL = maps:get(cache_ttl, State), % if the peer present in cache expired, it needs to be % refreshed, else it can be returned. case CachedTimestamp + CacheTTL < Now of true -> resolve_and_cache_peer_refresh(CachedPeer, State); false -> {ok, CachedPeer} end. %%-------------------------------------------------------------------- %% @hidden %% @private %% @doc the cache expired. %% @end %%-------------------------------------------------------------------- resolve_and_cache_peer_refresh(_CachedPeer, State) -> RawPeer = maps:get(raw_peer, State), Opts = maps:get(opts, State, #{}), % the cache entry expired, in this case, raw peer needs to be % reparsed and checked. It will return a list of peers. case ar_util:safe_parse_peer(RawPeer, Opts) of {ok, NewPeers} when is_list(NewPeers) -> %% The cache entry has expired. cache_update_peers(NewPeers, State); {error, Error} -> {error, Error} end. %%-------------------------------------------------------------------- %% @hidden %% @private %% @doc No peer cached available, we need to update it. %% @end %%-------------------------------------------------------------------- resolve_and_cache_peer_empty(State) -> RawPeer = maps:get(raw_peer, State), case ar_util:safe_parse_peer(RawPeer) of {ok, Peers} when is_list(Peers) -> cache_insert_peers(Peers, State); {error, Error} -> {error, Error} end. %%-------------------------------------------------------------------- %% @hidden %% @private %% @doc insert peers in the cache, when the peer is a DNS containing %% more than one entry. %% @end %%-------------------------------------------------------------------- cache_insert_peers(Peers, State) -> cache_insert_peers(Peers, [], State). cache_insert_peers([], Buffer, _State) -> [Peer] = ar_util:pick_random(Buffer, 1), {ok, Peer}; cache_insert_peers([Peer|Rest], Buffer, State) -> RawPeer = maps:get(raw_peer, State), Type = maps:get(type, State), Now = maps:get(now, State), _ = ets:insert(?MODULE, {{raw_peer, RawPeer}, {Peer, Now}}), _ = ets:insert(?MODULE, {{Type, Peer}, RawPeer}), cache_insert_peers(Rest, [Peer|Buffer], State). %%-------------------------------------------------------------------- %% @hidden %% @private %% @doc Update a list of peers. %% @end %%-------------------------------------------------------------------- cache_update_peers(Peers, State) -> cache_update_peers(Peers, [], State). cache_update_peers([], Buffer, _State) -> [Peer] = ar_util:pick_random(Buffer, 1), {ok, Peer}; cache_update_peers([Peer|Rest], Buffer, State) -> RawPeer = maps:get(raw_peer, State), CacheTimestamp = maps:get(cache_timestamp, State), Type = maps:get(type, State), Now = maps:get(now, State), ets:delete(?MODULE, {Type, {Peer, CacheTimestamp}}), ets:insert(?MODULE, {{raw_peer, RawPeer}, {Peer, Now}}), ets:insert(?MODULE, {{Type, Peer}, RawPeer}), cache_update_peers(Rest, [Peer|Buffer], State). %%%=================================================================== %%% Generic server callbacks. %%%=================================================================== init([]) -> {ok, Config} = arweave_config:get_env(), case Config#config.verify of false -> %% Trap exit to avoid corrupting any open files on quit. process_flag(trap_exit, true), ok = ar_events:subscribe(block), load_peers(), gen_server:cast(?MODULE, rank_peers), gen_server:cast(?MODULE, ping_peers), _ = ar_timer:apply_interval( ?GET_MORE_PEERS_FREQUENCY_MS, ?MODULE, discover_peers, [], #{ skip_on_shutdown => true } ); _ -> ok end, {ok, #state{}}. handle_call(Request, _From, State) -> ?LOG_WARNING([{event, unhandled_call}, {module, ?MODULE}, {request, Request}]), {reply, ok, State}. handle_cast({add_peer, Peer, Release}, State) -> maybe_add_peer(Peer, Release), {noreply, State}; handle_cast(rank_peers, State) -> LifetimePeers = score_peers(lifetime), CurrentPeers = score_peers(current), prometheus_gauge:set(arweave_peer_count, length(LifetimePeers)), set_ranked_peers(lifetime, rank_peers(LifetimePeers)), set_ranked_peers(current, rank_peers(CurrentPeers)), ar_util:cast_after(?RANK_PEERS_FREQUENCY_MS, ?MODULE, rank_peers), {noreply, State}; handle_cast(ping_peers, State) -> Peers = get_peers(lifetime), ping_peers(lists:sublist(Peers, 100)), {noreply, State}; handle_cast({valid_data, Peer, _DataType, LatencyMilliseconds, DataSize, Concurrency}, State) -> update_rating(Peer, LatencyMilliseconds, DataSize, Concurrency, true), {noreply, State}; handle_cast({invalid_data, Peer, _DataType}, State) -> update_rating(Peer, false), {noreply, State}; handle_cast({warning, Peer}, State) -> Performance = update_rating(Peer, false), case Performance#performance.average_success < ?MINIMUM_SUCCESS of true -> remove_peer(low_success, Peer); false -> ok end, {noreply, State}; handle_cast(Cast, State) -> ?LOG_WARNING([{event, unhandled_cast}, {module, ?MODULE}, {cast, Cast}]), {noreply, State}. handle_info({event, block, {rejected, Reason, _H, Peer}}, State) when Peer /= no_peer -> IssueBan = lists:member(Reason, ?BLOCK_REJECTION_BAN), IssueWarning = lists:member(Reason, ?BLOCK_REJECTION_WARNING), Ignore = lists:member(Reason, ?BLOCK_REJECTION_IGNORE), case {IssueBan, IssueWarning, Ignore} of {true, false, false} -> ar_blacklist_middleware:ban_peer(Peer, ?BAD_BLOCK_BAN_TIME), remove_peer(banned, Peer); {false, true, false} -> issue_warning(Peer, block_rejected, Reason); {false, false, true} -> %% ignore ok; _ -> %% Ever reason should be in exactly 1 list. error("invalid block rejection reason") end, {noreply, State}; handle_info({event, block, _}, State) -> {noreply, State}; handle_info({'EXIT', _, normal}, State) -> {noreply, State}; handle_info(Message, State) -> ?LOG_WARNING([{event, unhandled_info}, {module, ?MODULE}, {message, Message}]), {noreply, State}. terminate(Reason, _State) -> store_peers(), ?LOG_INFO([{module, ?MODULE},{pid, self()},{callback, terminate},{reason, Reason}]). %%%=================================================================== %%% Private functions. %%%=================================================================== get_peer_peers(Peer) -> case ar_http_iface_client:get_peers(Peer) of unavailable -> []; Peers -> Peers end. get_or_init_performance(Peer) -> case ets:lookup(?MODULE, {peer, Peer}) of [] -> #performance{}; [{_, Performance}] -> Performance end. set_performance(Peer, Performance) -> ets:insert(?MODULE, [{{peer, Peer}, Performance}]). get_total_rating(Rating) -> case ets:lookup(?MODULE, {rating_total, Rating}) of [] -> 0; [{_, Total}] -> Total end. set_total_rating(Rating, Total) -> ets:insert(?MODULE, {{rating_total, Rating}, Total}). recalculate_total_rating(Rating) -> TotalRating = ets:foldl( fun ({{peer, _Peer}, Performance}, Acc) -> Acc + get_peer_rating(Rating, Performance); (_, Acc) -> Acc end, 0, ?MODULE ), set_total_rating(Rating, TotalRating). get_peer_rating(Rating, Performance) -> case Rating of lifetime -> Performance#performance.lifetime_rating; current -> Performance#performance.current_rating end. discover_peers([]) -> ok; discover_peers([Peer | Peers]) -> case ets:member(?MODULE, {peer, Peer}) of true -> ok; false -> case check_peer(Peer, is_public_peer(Peer)) of ok -> case ar_http_iface_client:get_info(Peer) of info_unavailable -> ok; Info -> case maps:get(atom_to_binary(network), Info, no_key) of <> -> case maps:get(atom_to_binary(release), Info, no_key) of Release when is_integer(Release) -> maybe_add_peer(Peer, Release); no_key -> maybe_add_peer(Peer, 0) end; _ -> ok end end; _ -> ok end end, discover_peers(Peers). format_stats(lifetime, Peer, Perf) -> KB = Perf#performance.total_bytes / 1024, io:format( "\t~s ~.2f kB/s (~.2f kB, ~.2f success, ~p transfers)~n", [string:pad(ar_util:format_peer(Peer), 21, trailing, $\s), float(Perf#performance.lifetime_rating), KB, Perf#performance.average_success, Perf#performance.total_transfers]); format_stats(current, Peer, Perf) -> io:format( "\t~s ~.2f kB/s (~.2f success)~n", [string:pad(ar_util:format_peer(Peer), 21, trailing, $\s), float(Perf#performance.current_rating), Perf#performance.average_success]). load_peers() -> case ar_storage:read_term(peers) of not_found -> ok; {ok, {_TotalRating, Records}} -> ?LOG_INFO([{event, polling_saved_peers}, {records, length(Records)}]), ar:console("Polling saved peers...~n"), load_peers(Records), recalculate_total_rating(lifetime), recalculate_total_rating(current), ?LOG_INFO([{event, polled_saved_peers}]), ar:console("Polled saved peers.~n"); {ok, {_TotalRating, Records, Tags}} -> ?LOG_INFO([{event, polling_saved_peers}, {records, length(Records)}]), ar:console("Polling saved peers...~n"), load_peers(Records), recalculate_total_rating(lifetime), recalculate_total_rating(current), [ ets:insert(?MODULE, {K, V}) || {K, V} <- Tags ], ?LOG_INFO([{event, polled_saved_peers}]), ar:console("Polled saved peers.~n") end. load_peers(Peers) when length(Peers) < 20 -> ar_util:pmap(fun load_peer/1, Peers); load_peers(Peers) -> {Peers2, Peers3} = lists:split(20, Peers), ar_util:pmap(fun load_peer/1, Peers2), load_peers(Peers3). load_peer({Peer, Performance}) -> case ar_http_iface_client:get_info(Peer, network) of info_unavailable -> ?LOG_DEBUG([{event, peer_unavailable}, {peer, ar_util:format_peer(Peer)}]), ok; <> -> maybe_rotate_peer_ports(Peer), case Performance of {performance, TotalBytes, _TotalLatency, Transfers, _Failures, Rating} -> %% For backwards compatibility. set_performance(Peer, #performance{ total_bytes = TotalBytes, total_throughput = Rating, total_transfers = Transfers, average_throughput = Rating, lifetime_rating = Rating, current_rating = Rating }); {performance, TotalBytes, _TotalLatency, Transfers, _Failures, Rating, Release} -> %% For backwards compatibility. set_performance(Peer, #performance{ release = Release, total_bytes = TotalBytes, total_throughput = Rating, total_transfers = Transfers, average_throughput = Rating, lifetime_rating = Rating, current_rating = Rating }); {performance, 3, _Release, _TotalBytes, _TotalThroughput, _TotalTransfers, _AverageLatency, _AverageThroughput, _AverageSuccess, _LifetimeRating, _CurrentRating} -> %% Going forward whenever we change the #performance record we should increment the %% version field so we can match on it when doing a load. Here we're handling the %% version 3 format. set_performance(Peer, Performance) end, ok; Network -> ?LOG_DEBUG([{event, peer_from_the_wrong_network}, {peer, ar_util:format_peer(Peer)}, {network, Network}]), ok end. maybe_rotate_peer_ports(Peer) -> {IP, Port} = get_ip_port(Peer), case ets:lookup(?MODULE, {peer_ip, IP}) of [] -> ets:insert(?MODULE, {{peer_ip, IP}, {erlang:setelement(1, ?DEFAULT_PEER_PORT_MAP, Port), 1}}); [{_, {PortMap, Position}}] -> case is_in_port_map(Port, PortMap) of {true, _} -> ok; false -> MaxSize = erlang:size(?DEFAULT_PEER_PORT_MAP), case Position < MaxSize of true -> ets:insert(?MODULE, {{peer_ip, IP}, {erlang:setelement(Position + 1, PortMap, Port), Position + 1}}); false -> RemovedPeer = construct_peer(IP, element(1, PortMap)), PortMap2 = shift_port_map_left(PortMap), PortMap3 = erlang:setelement(MaxSize, PortMap2, Port), ets:insert(?MODULE, {{peer_ip, IP}, {PortMap3, MaxSize}}), remove_peer(rotated, RemovedPeer) end end end. get_ip_port({A, B, C, D, Port}) -> {{A, B, C, D}, Port}. construct_peer({A, B, C, D}, Port) -> {A, B, C, D, Port}. is_in_port_map(Port, PortMap) -> is_in_port_map(Port, PortMap, erlang:size(PortMap), 1). is_in_port_map(_Port, _PortMap, Max, N) when N > Max -> false; is_in_port_map(Port, PortMap, Max, N) -> case element(N, PortMap) == Port of true -> {true, N}; false -> is_in_port_map(Port, PortMap, Max, N + 1) end. shift_port_map_left(PortMap) -> shift_port_map_left(PortMap, erlang:size(PortMap), 1). shift_port_map_left(PortMap, Max, N) when N == Max -> erlang:setelement(N, PortMap, empty_slot); shift_port_map_left(PortMap, Max, N) -> PortMap2 = erlang:setelement(N, PortMap, element(N + 1, PortMap)), shift_port_map_left(PortMap2, Max, N + 1). ping_peers(Peers) when length(Peers) < 100 -> ar_util:pmap(fun ar_http_iface_client:add_peer/1, Peers); ping_peers(Peers) -> {Send, Rest} = lists:split(100, Peers), ar_util:pmap(fun ar_http_iface_client:add_peer/1, Send), ping_peers(Rest). -ifdef(AR_TEST). %% Do not filter out loopback IP addresses with custom port in the debug mode %% to allow multiple local VMs to peer with each other. is_loopback_ip({127, _, _, _, Port}) -> {ok, Config} = arweave_config:get_env(), Port == Config#config.port; is_loopback_ip({_, _, _, _, _}) -> false. -else. %% @doc Is the IP address in question a loopback ('us') address? is_loopback_ip({A, B, C, D, _Port}) -> is_loopback_ip({A, B, C, D}); is_loopback_ip({127, _, _, _}) -> true; is_loopback_ip({0, _, _, _}) -> true; is_loopback_ip({169, 254, _, _}) -> true; is_loopback_ip({255, 255, 255, 255}) -> true; is_loopback_ip({_, _, _, _}) -> false. -endif. score_peers(Rating) -> Total = get_total_rating(Rating), ets:foldl( fun ({{peer, Peer}, Performance}, Acc) -> %% Bigger score increases the chances to end up on the top %% of the peer list, but at the same time the ranking is %% probabilistic to always give everyone a chance to improve %% in the competition (i.e., reduce the advantage gained by %% being the first to earn a reputation). Score = rand:uniform() * get_peer_rating(Rating, Performance) / (Total + 0.0001), [{Peer, Score} | Acc]; (_, Acc) -> Acc end, [], ?MODULE ). %% @doc Return a ranked list of peers. rank_peers(ScoredPeers) -> SortedReversed = lists:reverse( lists:sort(fun({_, S1}, {_, S2}) -> S1 >= S2 end, ScoredPeers)), GroupedBySubnet = lists:foldl( fun({{A, B, _C, _D, _Port}, _Score} = Peer, Acc) -> maps:update_with({A, B}, fun(L) -> [Peer | L] end, [Peer], Acc) end, #{}, SortedReversed ), ScoredSubnetPeers = maps:fold( fun(_Subnet, SubnetPeers, Acc) -> element(2, lists:foldl( fun({Peer, Score}, {N, Acc2}) -> %% At first we take the best peer from every subnet, %% then take the second best from every subnet, etc. {N + 1, [{Peer, {-N, Score}} | Acc2]} end, {0, Acc}, SubnetPeers )) end, [], GroupedBySubnet ), [Peer || {Peer, _} <- lists:sort( fun({_, S1}, {_, S2}) -> S1 >= S2 end, ScoredSubnetPeers )]. set_ranked_peers(Rating, Peers) -> ets:insert(?MODULE, {{peers, Rating}, lists:sublist(Peers, ?MAX_PEERS)}). check_peer(Peer) -> check_peer(Peer, not is_loopback_ip(Peer)). check_peer(Peer, IsPeerScopeValid) -> IsBlacklisted = lists:member(Peer, ?PEER_PERMANENT_BLACKLIST), IsBanned = ar_blacklist_middleware:is_peer_banned(Peer) == banned, case IsPeerScopeValid andalso not IsBlacklisted andalso not IsBanned of true -> ok; false -> reject end. update_rating(Peer, IsSuccess) -> update_rating(Peer, undefined, undefined, 1, IsSuccess). update_rating(Peer, LatencyMilliseconds, DataSize, Concurrency, false) when LatencyMilliseconds =/= undefined; DataSize =/= undefined -> %% Don't credit peers for failed requests. update_rating(Peer, undefined, undefined, Concurrency, false); update_rating(Peer, 0, _DataSize, Concurrency, IsSuccess) -> update_rating(Peer, undefined, undefined, Concurrency, IsSuccess); update_rating(Peer, 0.0, _DataSize, Concurrency, IsSuccess) -> update_rating(Peer, undefined, undefined, Concurrency, IsSuccess); update_rating(Peer, LatencyMilliseconds, DataSize, Concurrency, IsSuccess) -> Performance = get_or_init_performance(Peer), #performance{ total_bytes = TotalBytes, total_throughput = TotalThroughput, total_transfers = TotalTransfers, average_latency = AverageLatency, average_throughput = AverageThroughput, average_success = AverageSuccess, lifetime_rating = LifetimeRating, current_rating = CurrentRating } = Performance, TotalBytes2 = case DataSize of undefined -> TotalBytes; _ -> TotalBytes + DataSize end, AverageLatency2 = case LatencyMilliseconds of undefined -> AverageLatency; _ -> calculate_ema(AverageLatency, LatencyMilliseconds, ?THROUGHPUT_ALPHA) end, %% In order to approximate the impact of multiple concurrent requests we multiply %% DataSize by the Concurrency value. We do this *only* when updating the AverageThroughput %% value so that it doesn't distort the TotalThroughput. AverageThroughput2 = case LatencyMilliseconds of undefined -> AverageThroughput; _ -> calculate_ema( AverageThroughput, (DataSize * Concurrency) / LatencyMilliseconds, ?THROUGHPUT_ALPHA) end, TotalThroughput2 = case LatencyMilliseconds of undefined -> TotalThroughput; _ -> TotalThroughput + (DataSize / LatencyMilliseconds) end, TotalTransfers2 = case DataSize of undefined -> TotalTransfers; _ -> TotalTransfers + 1 end, AverageSuccess2 = calculate_ema(AverageSuccess, ar_util:bool_to_int(IsSuccess), ?SUCCESS_ALPHA), %% Rating is an estimate of the peer's effective throughput in bytes per millisecond. %% 'lifetime' considers all data ever received from this peer %% 'current' considers recently received data LifetimeRating2 = case TotalTransfers2 > 0 of true -> (TotalThroughput2 / TotalTransfers2) * AverageSuccess2; _ -> LifetimeRating end, CurrentRating2 = case AverageThroughput2 > 0 of true -> AverageThroughput2 * AverageSuccess2; _ -> CurrentRating end, Performance2 = Performance#performance{ total_bytes = TotalBytes2, total_throughput = TotalThroughput2, total_transfers = TotalTransfers2, average_latency = AverageLatency2, average_throughput = AverageThroughput2, average_success = AverageSuccess2, lifetime_rating = LifetimeRating2, current_rating = CurrentRating2 }, TotalLifetimeRating = get_total_rating(lifetime), TotalLifetimeRating2 = TotalLifetimeRating - LifetimeRating + LifetimeRating2, TotalCurrentRating = get_total_rating(current), TotalCurrentRating2 = TotalCurrentRating - CurrentRating + CurrentRating2, maybe_rotate_peer_ports(Peer), set_performance(Peer, Performance2), set_total_rating(lifetime, TotalLifetimeRating2), set_total_rating(current, TotalCurrentRating2), Performance2. calculate_ema(OldEMA, Value, Alpha) -> Alpha * Value + (1 - Alpha) * OldEMA. maybe_add_peer(Peer, Release) -> maybe_rotate_peer_ports(Peer), %% If we've just added his peer, flag it as active and connected. connected_peer(Peer), case ets:lookup(?MODULE, {peer, Peer}) of [{_, #performance{ release = Release }}] -> ok; [{_, Performance}] -> set_performance(Peer, Performance#performance{ release = Release }); [] -> case check_peer(Peer) of ok -> set_performance(Peer, #performance{ release = Release }); _ -> ok end end. remove_peer(Reason, RemovedPeer) -> case Reason of rotated -> ok; _ -> ?LOG_DEBUG([ {event, remove_peer}, {peer, ar_util:format_peer(RemovedPeer)}, {reason, Reason} ]) end, Performance = get_or_init_performance(RemovedPeer), TotalLifetimeRating = get_total_rating(lifetime), TotalCurrentRating = get_total_rating(current), set_total_rating(lifetime, TotalLifetimeRating - get_peer_rating(lifetime, Performance)), set_total_rating(current, TotalCurrentRating - get_peer_rating(current, Performance)), ets:delete(?MODULE, {peer, RemovedPeer}), remove_peer_port(RemovedPeer), ar_events:send(peer, {removed, RemovedPeer}). remove_peer_port(Peer) -> {IP, Port} = get_ip_port(Peer), case ets:lookup(?MODULE, {peer_ip, IP}) of [] -> ok; [{_, {PortMap, Position}}] -> case is_in_port_map(Port, PortMap) of false -> ok; {true, N} -> PortMap2 = erlang:setelement(N, PortMap, empty_slot), case is_port_map_empty(PortMap2) of true -> ets:delete(?MODULE, {peer_ip, IP}); false -> ets:insert(?MODULE, {{peer_ip, IP}, {PortMap2, Position}}) end end end. is_port_map_empty(PortMap) -> is_port_map_empty(PortMap, erlang:size(PortMap), 1). is_port_map_empty(_PortMap, Max, N) when N > Max -> true; is_port_map_empty(PortMap, Max, N) -> case element(N, PortMap) of empty_slot -> is_port_map_empty(PortMap, Max, N + 1); _ -> false end. store_peers() -> case get_total_rating(lifetime) of 0 -> ok; Total -> Records = ets:foldl( fun ({{peer, Peer}, Performance}, Acc) -> [{Peer, Performance} | Acc]; (_, Acc) -> Acc end, [], ?MODULE ), Tags = ets:foldl(fun ({{ar_tags, _, _, _}, _} = Tag, Acc) -> [Tag|Acc]; (_, Acc) -> Acc end, [], ?MODULE), ?LOG_INFO([{event, store_peers} , {total, Total} , {records, length(Records)} , {tags, length(Tags)}]), ar_storage:write_term(peers, {Total, Records, Tags}) end. %%-------------------------------------------------------------------- %% @hidden %% @doc internal function to tag a peer. %% @end %%-------------------------------------------------------------------- set_tag(Peer, Tag, Value) -> ets:insert(?MODULE, {{ar_tags, ?MODULE, Peer, Tag}, Value}). %%-------------------------------------------------------------------- %% @hidden %% @doc internal function to get tag value set on a peer. %% @end %%-------------------------------------------------------------------- get_tag(Peer, Tag) -> Pattern = {{ar_tags, ?MODULE, Peer, Tag}, '$1'}, Guard = [], Select = ['$1'], case ets:select(?MODULE, [{Pattern, Guard, Select}]) of [] -> {error, not_found}; [V] -> {ok, V} end. %%-------------------------------------------------------------------- %% @doc defined a peer as connected (in HTTP sense). %% @end %%-------------------------------------------------------------------- connected_peer(Peer) -> set_tag(Peer, {connection, last}, erlang:system_time(second)), set_tag(Peer, {connection, active}, true). %%-------------------------------------------------------------------- %% @doc defined a peer as disconnected (in HTTP sense). %% @end %%-------------------------------------------------------------------- disconnected_peer(Peer) -> set_tag(Peer, {connection, active}, false). %%-------------------------------------------------------------------- %% @doc returns peer's timestamp. %% @end %%-------------------------------------------------------------------- get_connection_timestamp_peer(Peer) -> case get_tag(Peer, {connection, last}) of {ok, V} -> V; _ -> undefined end. %%-------------------------------------------------------------------- %% @doc returns the HTTP connection state of a peer. %% @end %%-------------------------------------------------------------------- is_connected_peer(Peer) -> case get_tag(Peer, {connection, active}) of {ok, V} -> V; {error, _} -> false end. %%%=================================================================== %%% Tests. %%%=================================================================== connected_peer_test() -> Peer = {100, 117, 109, 98, 1234}, % drop all objects from the table to start with a clean state ets:delete_all_objects(?MODULE), % get all peers connected, it should returns nothing by % default because the table is empty. ?assertEqual(undefined, get_connection_timestamp_peer(Peer)), % manually add a new peer using set_ranked_peers function. % the node is not connected because gun did not manage the % connection in this test. set_ranked_peers(lifetime, [Peer]), set_ranked_peers(current, [Peer]), ?assertEqual(false, is_connected_peer(Peer)), ?assertEqual(undefined, get_connection_timestamp_peer(Peer)), % force this peer to be connected using connected_peer % function. A timestamp is created. connected_peer(Peer), Timestamp = get_connection_timestamp_peer(Peer), ?assertEqual(true, is_connected_peer(Peer)), ?assertEqual(Timestamp, get_connection_timestamp_peer(Peer)), ?assertNotEqual(undefined, get_connection_timestamp_peer(Peer)), ?assertEqual([Peer], get_peers(lifetime)), ?assertEqual([Peer], get_peers(current)), % Now remove the connection to the peer. A timestamp must % still be there. disconnected_peer(Peer), ?assertEqual(false, is_connected_peer(Peer)), ?assertNotEqual(undefined, get_connection_timestamp_peer(Peer)), ?assertEqual([Peer], get_peers(lifetime)), ?assertEqual([Peer], get_peers(current)), % let modify manually the timestamp to check get_peers/1 % function, and overwrite Peer timestamp with some defined % values. Time = erlang:system_time(second), % Go above the limit Limit = Time-((?CURRENT_PEERS_LIST_FILTER+10)*60*60*24), set_tag(Peer, {connection, last}, Limit), ?assertEqual([], get_peers(current)), ?assertEqual([Peer], get_peers(lifetime)). rotate_peer_ports_test() -> Peer = {2, 2, 2, 2, 1}, maybe_rotate_peer_ports(Peer), [{_, {PortMap, 1}}] = ets:lookup(?MODULE, {peer_ip, {2, 2, 2, 2}}), ?assertEqual(1, element(1, PortMap)), remove_peer(test, Peer), ?assertEqual([], ets:lookup(?MODULE, {peer_ip, {2, 2, 2, 2}})), maybe_rotate_peer_ports(Peer), Peer2 = {2, 2, 2, 2, 2}, maybe_rotate_peer_ports(Peer2), [{_, {PortMap2, 2}}] = ets:lookup(?MODULE, {peer_ip, {2, 2, 2, 2}}), ?assertEqual(1, element(1, PortMap2)), ?assertEqual(2, element(2, PortMap2)), remove_peer(test, Peer), [{_, {PortMap3, 2}}] = ets:lookup(?MODULE, {peer_ip, {2, 2, 2, 2}}), ?assertEqual(empty_slot, element(1, PortMap3)), ?assertEqual(2, element(2, PortMap3)), Peer3 = {2, 2, 2, 2, 3}, Peer4 = {2, 2, 2, 2, 4}, Peer5 = {2, 2, 2, 2, 5}, Peer6 = {2, 2, 2, 2, 6}, Peer7 = {2, 2, 2, 2, 7}, Peer8 = {2, 2, 2, 2, 8}, Peer9 = {2, 2, 2, 2, 9}, Peer10 = {2, 2, 2, 2, 10}, Peer11 = {2, 2, 2, 2, 11}, maybe_rotate_peer_ports(Peer3), maybe_rotate_peer_ports(Peer4), maybe_rotate_peer_ports(Peer5), maybe_rotate_peer_ports(Peer6), maybe_rotate_peer_ports(Peer7), maybe_rotate_peer_ports(Peer8), maybe_rotate_peer_ports(Peer9), maybe_rotate_peer_ports(Peer10), [{_, {PortMap4, 10}}] = ets:lookup(?MODULE, {peer_ip, {2, 2, 2, 2}}), ?assertEqual(empty_slot, element(1, PortMap4)), ?assertEqual(2, element(2, PortMap4)), ?assertEqual(10, element(10, PortMap4)), maybe_rotate_peer_ports(Peer8), maybe_rotate_peer_ports(Peer9), maybe_rotate_peer_ports(Peer10), [{_, {PortMap5, 10}}] = ets:lookup(?MODULE, {peer_ip, {2, 2, 2, 2}}), ?assertEqual(empty_slot, element(1, PortMap5)), ?assertEqual(2, element(2, PortMap5)), ?assertEqual(3, element(3, PortMap5)), ?assertEqual(9, element(9, PortMap5)), ?assertEqual(10, element(10, PortMap5)), maybe_rotate_peer_ports(Peer11), [{_, {PortMap6, 10}}] = ets:lookup(?MODULE, {peer_ip, {2, 2, 2, 2}}), ?assertEqual(element(2, PortMap5), element(1, PortMap6)), ?assertEqual(3, element(2, PortMap6)), ?assertEqual(4, element(3, PortMap6)), ?assertEqual(5, element(4, PortMap6)), ?assertEqual(11, element(10, PortMap6)), maybe_rotate_peer_ports(Peer11), [{_, {PortMap7, 10}}] = ets:lookup(?MODULE, {peer_ip, {2, 2, 2, 2}}), ?assertEqual(element(2, PortMap5), element(1, PortMap7)), ?assertEqual(3, element(2, PortMap7)), ?assertEqual(4, element(3, PortMap7)), ?assertEqual(5, element(4, PortMap7)), ?assertEqual(11, element(10, PortMap7)), remove_peer(test, Peer4), [{_, {PortMap8, 10}}] = ets:lookup(?MODULE, {peer_ip, {2, 2, 2, 2}}), ?assertEqual(empty_slot, element(3, PortMap8)), ?assertEqual(3, element(2, PortMap8)), ?assertEqual(5, element(4, PortMap8)), remove_peer(test, Peer2), remove_peer(test, Peer3), remove_peer(test, Peer5), remove_peer(test, Peer6), remove_peer(test, Peer7), remove_peer(test, Peer8), remove_peer(test, Peer9), remove_peer(test, Peer10), [{_, {PortMap9, 10}}] = ets:lookup(?MODULE, {peer_ip, {2, 2, 2, 2}}), ?assertEqual(11, element(10, PortMap9)), remove_peer(test, Peer11), ?assertEqual([], ets:lookup(?MODULE, {peer_ip, {2, 2, 2, 2}})). update_rating_test() -> ets:delete_all_objects(?MODULE), Peer1 = {1, 2, 3, 4, 1984}, Peer2 = {5, 6, 7, 8, 1984}, ?assertEqual(#performance{}, get_or_init_performance(Peer1)), ?assertEqual(0, get_total_rating(lifetime)), ?assertEqual(0, get_total_rating(current)), update_rating(Peer1, true), ?assertEqual(#performance{}, get_or_init_performance(Peer1)), ?assertEqual(0, get_total_rating(lifetime)), ?assertEqual(0, get_total_rating(current)), update_rating(Peer1, false), assert_performance(#performance{ average_success = 0.965 }, get_or_init_performance(Peer1)), ?assertEqual(0, get_total_rating(lifetime)), ?assertEqual(0, get_total_rating(current)), %% Failed transfer should impact bytes or latency update_rating(Peer1, 1000, 100, 1, false), assert_performance(#performance{ average_success = 0.9312 }, get_or_init_performance(Peer1)), ?assertEqual(0, get_total_rating(lifetime)), ?assertEqual(0, get_total_rating(current)), %% Test successful transfer update_rating(Peer1, 1000, 100, 1, true), assert_performance(#performance{ total_bytes = 100, total_throughput = 0.1, total_transfers = 1, average_latency = 50, average_throughput = 0.005, average_success = 0.9336, lifetime_rating = 0.0934, current_rating = 0.0047 }, get_or_init_performance(Peer1)), ?assertEqual(0.0934, round(get_total_rating(lifetime), 4)), ?assertEqual(0.0047, round(get_total_rating(current), 4)), %% Test concurrency update_rating(Peer1, 1000, 50, 10, true), assert_performance(#performance{ total_bytes = 150, total_throughput = 0.15, total_transfers = 2, average_latency = 97.5, average_throughput = 0.0298, average_success = 0.936, lifetime_rating = 0.0702, current_rating = 0.0278 }, get_or_init_performance(Peer1)), ?assertEqual(0.0702, round(get_total_rating(lifetime), 4)), ?assertEqual(0.0278, round(get_total_rating(current), 4)), %% With 2 peers total rating should be the sum of both update_rating(Peer2, 1000, 100, 1, true), assert_performance(#performance{ total_bytes = 100, total_throughput = 0.1, total_transfers = 1, average_latency = 50, average_throughput = 0.005, average_success = 1, lifetime_rating = 0.1, current_rating = 0.005 }, get_or_init_performance(Peer2)), ?assertEqual(0.1702, round(get_total_rating(lifetime), 4)), ?assertEqual(0.0328, round(get_total_rating(current), 4)). block_rejected_test_() -> [ {timeout, 30, fun test_block_rejected/0} ]. test_block_rejected() -> ar_blacklist_middleware:cleanup_ban(whereis(ar_blacklist_middleware)), Peer = {127, 0, 0, 1, ar_test_node:get_unused_port()}, ar_peers:add_peer(Peer, -1), ar_events:send(block, {rejected, invalid_signature, <<>>, Peer}), timer:sleep(5000), ?assertEqual(#{Peer => #performance{}}, ar_peers:get_peer_performances([Peer])), ?assertEqual(not_banned, ar_blacklist_middleware:is_peer_banned(Peer)), ar_events:send(block, {rejected, failed_to_fetch_first_chunk, <<>>, Peer}), timer:sleep(5000), ?assertEqual( #{Peer => #performance{ average_success = 0.965 }}, ar_peers:get_peer_performances([Peer])), ?assertEqual(not_banned, ar_blacklist_middleware:is_peer_banned(Peer)), ar_events:send(block, {rejected, invalid_previous_solution_hash, <<>>, Peer}), timer:sleep(5000), ?assertEqual(#{Peer => #performance{}}, ar_peers:get_peer_performances([Peer])), ?assertEqual(banned, ar_blacklist_middleware:is_peer_banned(Peer)). rate_data_test() -> ets:delete_all_objects(?MODULE), Peer1 = {1, 2, 3, 4, 1984}, ?assertEqual(#performance{}, get_or_init_performance(Peer1)), ?assertEqual(0, get_total_rating(lifetime)), ?assertEqual(0, get_total_rating(current)), ar_peers:rate_fetched_data(Peer1, chunk, {error, timeout}, 1000000, 100, 10), timer:sleep(500), assert_performance(#performance{ average_success = 0.965 }, get_or_init_performance(Peer1)), ?assertEqual(0, get_total_rating(lifetime)), ?assertEqual(0, get_total_rating(current)), ar_peers:rate_fetched_data(Peer1, block, 1000000, 100), timer:sleep(500), assert_performance(#performance{ total_bytes = 100, total_throughput = 0.1, total_transfers = 1, average_latency = 50, average_throughput = 0.005, average_success = 0.9662, lifetime_rating = 0.0966, current_rating = 0.0048 }, get_or_init_performance(Peer1)), ?assertEqual(0.0966, round(get_total_rating(lifetime), 4)), ?assertEqual(0.0048, round(get_total_rating(current), 4)), ar_peers:rate_fetched_data(Peer1, tx, ok, 1000000, 100, 2), timer:sleep(500), assert_performance(#performance{ total_bytes = 200, total_throughput = 0.2, total_transfers = 2, average_latency = 97.5, average_throughput = 0.0148, average_success = 0.9674, lifetime_rating = 0.0967, current_rating = 0.0143 }, get_or_init_performance(Peer1)), ?assertEqual(0.0967, round(get_total_rating(lifetime), 4)), ?assertEqual(0.0143, round(get_total_rating(current), 4)), ar_peers:rate_gossiped_data(Peer1, block, 1000000, 100), timer:sleep(500), assert_performance(#performance{ total_bytes = 300, total_throughput = 0.3, total_transfers = 3, average_latency = 142.625, average_throughput = 0.019, average_success = 0.9685, lifetime_rating = 0.0969, current_rating = 0.0184 }, get_or_init_performance(Peer1)), ?assertEqual(0.0969, round(get_total_rating(lifetime), 4)), ?assertEqual(0.0184, round(get_total_rating(current), 4)). assert_performance(Expected, Actual) -> ?assertEqual(Expected#performance.total_bytes, Actual#performance.total_bytes), ?assertEqual( round(Expected#performance.total_throughput, 4), round(Actual#performance.total_throughput, 4)), ?assertEqual(Expected#performance.total_transfers, Actual#performance.total_transfers), ?assertEqual( round(Expected#performance.average_latency, 4), round(Actual#performance.average_latency, 4)), ?assertEqual( round(Expected#performance.average_throughput, 4), round(Actual#performance.average_throughput, 4)), ?assertEqual( round(Expected#performance.average_success, 4), round(Actual#performance.average_success, 4)), ?assertEqual( round(Expected#performance.lifetime_rating, 4), round(Actual#performance.lifetime_rating, 4)), ?assertEqual( round(Expected#performance.current_rating, 4), round(Actual#performance.current_rating, 4)). round(Float, N) -> Multiplier = math:pow(10, N), round(Float * Multiplier) / Multiplier. ================================================ FILE: apps/arweave/src/ar_poa.erl ================================================ %%% @doc This module implements all mechanisms required to validate a proof of access %%% for a chunk of data received from the network. -module(ar_poa). -export([get_data_path_validation_ruleset/2, get_data_path_validation_ruleset/3, validate_pre_fork_2_5/4, validate/1, chunk_proof/2, chunk_proof/3, chunk_proof/5, validate_paths/1, get_padded_offset/1, get_padded_offset/2]). -include_lib("arweave/include/ar_poa.hrl"). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave/include/ar_consensus.hrl"). %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Return the merkle proof validation ruleset code depending on the block start %% offset, the threshold where the offset rebases were allowed (and the validation %% changed in some other ways on top of that). The threshold where the specific %% requirements were imposed on data splits to make each chunk belong to its own %% 256 KiB bucket is set to ar_block:strict_data_split_threshold(). The code is then passed to %% ar_merkle:validate_path/5. get_data_path_validation_ruleset(BlockStartOffset, MerkleRebaseSupportThreshold) -> get_data_path_validation_ruleset(BlockStartOffset, MerkleRebaseSupportThreshold, ar_block:strict_data_split_threshold()). %% @doc Return the merkle proof validation ruleset code depending on the block start %% offset, the threshold where the offset rebases were allowed (and the validation %% changed in some other ways on top of that), and the threshold where the specific %% requirements were imposed on data splits to make each chunk belong to its own %% 256 KiB bucket. The code is then passed to ar_merkle:validate_path/5. get_data_path_validation_ruleset(BlockStartOffset, MerkleRebaseSupportThreshold, StrictDataSplitThreshold) -> case BlockStartOffset >= MerkleRebaseSupportThreshold of true -> offset_rebase_support_ruleset; false -> case BlockStartOffset >= StrictDataSplitThreshold of true -> strict_data_split_ruleset; false -> strict_borders_ruleset end end. get_data_path_validation_ruleset(BlockStartOffset) -> get_data_path_validation_ruleset(BlockStartOffset, ?MERKLE_REBASE_SUPPORT_THRESHOLD, ar_block:strict_data_split_threshold()). %% @doc Validate a proof of access. validate(Args) -> {BlockStartOffset, RecallOffset, TXRoot, BlockSize, SPoA, Packing, SubChunkIndex, ExpectedChunkID} = Args, #poa{ chunk = Chunk, unpacked_chunk = UnpackedChunk } = SPoA, ChunkMetadata = #chunk_metadata{ tx_root = TXRoot, tx_path = SPoA#poa.tx_path, data_path = SPoA#poa.data_path }, ChunkProof = chunk_proof(ChunkMetadata, RecallOffset, BlockStartOffset, BlockSize), case validate_paths(ChunkProof) of {false, _} -> false; {true, ChunkProof2} -> #chunk_proof{ chunk_id = ChunkID, chunk_start_offset = ChunkStartOffset, chunk_end_offset = ChunkEndOffset, tx_start_offset = TXStartOffset } = ChunkProof2, case ExpectedChunkID of not_set -> validate2(Packing, {ChunkID, ChunkStartOffset, ChunkEndOffset, BlockStartOffset, TXStartOffset, TXRoot, Chunk, UnpackedChunk, SubChunkIndex}); _ -> case ChunkID == ExpectedChunkID of false -> false; true -> {true, ChunkID} end end end. chunk_proof(#chunk_metadata{} = ChunkMetadata, SeekByte) -> chunk_proof(ChunkMetadata, SeekByte, ?MERKLE_REBASE_SUPPORT_THRESHOLD). chunk_proof(#chunk_metadata{} = ChunkMetadata, SeekByte, MerkleRebaseSupportThreshold) -> {BlockStartOffset, BlockEndOffset, TXRoot} = ar_block_index:get_block_bounds(SeekByte), ChunkMetadata2 = case ChunkMetadata#chunk_metadata.tx_root of not_set -> ChunkMetadata#chunk_metadata{ tx_root = TXRoot }; TXRoot -> ChunkMetadata end, ValidateDataPathRuleset = get_data_path_validation_ruleset( BlockStartOffset, MerkleRebaseSupportThreshold, ar_block:strict_data_split_threshold()), chunk_proof( ChunkMetadata2, BlockStartOffset, BlockEndOffset, SeekByte, ValidateDataPathRuleset ). chunk_proof(#chunk_metadata{} = ChunkMetadata, RecallOffset, BlockStartOffset, BlockSize) -> BlockRelativeOffset = get_recall_bucket_offset(RecallOffset, BlockStartOffset), ValidateDataPathRuleset = get_data_path_validation_ruleset(BlockStartOffset), BlockEndOffset = BlockStartOffset + BlockSize, SeekByte = BlockStartOffset + BlockRelativeOffset, chunk_proof( ChunkMetadata, BlockStartOffset, BlockEndOffset, SeekByte, ValidateDataPathRuleset ). chunk_proof(#chunk_metadata{} = ChunkMetadata, BlockStartOffset, BlockEndOffset, SeekByte, ValidateDataPathRuleset) -> #chunk_proof{ seek_byte = SeekByte, metadata = ChunkMetadata, block_start_offset = BlockStartOffset, block_end_offset = BlockEndOffset, validate_data_path_ruleset = ValidateDataPathRuleset }. %% @doc Validate the TXPath and DataPath for a chunk. This will return the ChunkID but won't %% validate that the ChunkID is correct. -spec validate_paths(#chunk_proof{}) -> {boolean(), #chunk_proof{}}. validate_paths(Proof) -> #chunk_proof{ seek_byte = SeekByte, metadata = #chunk_metadata{ tx_root = TXRoot, tx_path = TXPath, data_path = DataPath }, block_start_offset = BlockStartOffset, block_end_offset = BlockEndOffset, validate_data_path_ruleset = ValidateDataPathRuleset } = Proof, BlockRelativeOffset = SeekByte - BlockStartOffset, BlockSize = BlockEndOffset - BlockStartOffset, case ar_merkle:validate_path(TXRoot, BlockRelativeOffset, BlockSize, TXPath) of false -> {false, Proof#chunk_proof{ tx_path_is_valid = invalid }}; {DataRoot, TXStartOffset, TXEndOffset} -> Proof2 = Proof#chunk_proof{ metadata = Proof#chunk_proof.metadata#chunk_metadata{ data_root = DataRoot }, tx_start_offset = TXStartOffset, tx_end_offset = TXEndOffset, tx_path_is_valid = valid }, TXSize = TXEndOffset - TXStartOffset, TXRelativeOffset = BlockRelativeOffset - TXStartOffset, case ar_merkle:validate_path( DataRoot, TXRelativeOffset, TXSize, DataPath, ValidateDataPathRuleset) of false -> {false, Proof2#chunk_proof{ data_path_is_valid = invalid }}; {ChunkID, ChunkStartOffset, ChunkEndOffset} -> Proof3 = Proof2#chunk_proof{ chunk_id = ChunkID, chunk_start_offset = ChunkStartOffset, chunk_end_offset = ChunkEndOffset, metadata = Proof2#chunk_proof.metadata#chunk_metadata{ chunk_size = ChunkEndOffset - ChunkStartOffset }, data_path_is_valid = valid }, {true, Proof3} end end. get_recall_bucket_offset(RecallOffset, BlockStartOffset) -> case RecallOffset >= ar_block:strict_data_split_threshold() of true -> get_padded_offset(RecallOffset + 1, ar_block:strict_data_split_threshold()) - (?DATA_CHUNK_SIZE) - BlockStartOffset; false -> RecallOffset - BlockStartOffset end. validate2({spora_2_6, _} = Packing, Args) -> {ChunkID, ChunkStartOffset, ChunkEndOffset, BlockStartOffset, TXStartOffset, TXRoot, Chunk, _UnpackedChunk, _SubChunkIndex} = Args, ChunkSize = ChunkEndOffset - ChunkStartOffset, AbsoluteEndOffset = BlockStartOffset + TXStartOffset + ChunkEndOffset, prometheus_counter:inc(validating_packed_spora, [ar_packing_server:packing_atom(Packing)]), case ar_packing_server:unpack(Packing, AbsoluteEndOffset, TXRoot, Chunk, ChunkSize) of {error, _} -> false; {exception, Exception} -> ?LOG_WARNING([{event, validate_unpack_exception}, {packing, ar_serialize:encode_packing(Packing, false)}, {exception, Exception}]), error; {ok, Unpacked} -> case ChunkID == ar_tx:generate_chunk_id(Unpacked) of false -> false; true -> {true, ChunkID} end end; validate2(Packing, Args) -> {_ChunkID, ChunkStartOffset, ChunkEndOffset, _BlockStartOffset, _TXStartOffset, _TXRoot, _Chunk, UnpackedChunk, _SubChunkIndex} = Args, ChunkSize = ChunkEndOffset - ChunkStartOffset, case ChunkSize > ?DATA_CHUNK_SIZE of true -> false; false -> PaddingSize = ?DATA_CHUNK_SIZE - ChunkSize, case binary:part(UnpackedChunk, ChunkSize, PaddingSize) of << 0:(PaddingSize * 8) >> -> validate3(Packing, Args); _ -> false end end. validate3(Packing, Args) -> {ChunkID, ChunkStartOffset, ChunkEndOffset, BlockStartOffset, TXStartOffset, TXRoot, Chunk, UnpackedChunk, SubChunkIndex} = Args, AbsoluteEndOffset = BlockStartOffset + TXStartOffset + ChunkEndOffset, SubChunkSize = ?COMPOSITE_PACKING_SUB_CHUNK_SIZE, SubChunkStartOffset = SubChunkIndex * SubChunkSize, %% We always expect the provided unpacked chunks to be padded (if necessary) %% to 256 KiB. UnpackedSubChunk = binary:part(UnpackedChunk, SubChunkStartOffset, SubChunkSize), PackingAtom = ar_packing_server:packing_atom(Packing), prometheus_counter:inc(validating_packed_spora, [PackingAtom]), case ar_packing_server:unpack_sub_chunk(Packing, AbsoluteEndOffset, TXRoot, Chunk, SubChunkStartOffset) of {error, _} -> false; {exception, Exception} -> ?LOG_WARNING([{event, validate_unpack_exception}, {packing, ar_serialize:encode_packing(Packing, false)}, {exception, Exception}]), error; {ok, UnpackedSubChunk} -> ChunkSize = ChunkEndOffset - ChunkStartOffset, UnpackedChunkNoPadding = binary:part(UnpackedChunk, 0, ChunkSize), case ChunkID == ar_tx:generate_chunk_id(UnpackedChunkNoPadding) of false -> false; true -> {true, ChunkID} end; {ok, _UnexpectedSubChunk} -> false end. %% @doc Return the smallest multiple of 256 KiB >= Offset %% counting from ar_block:strict_data_split_threshold(). get_padded_offset(Offset) -> get_padded_offset(Offset, ar_block:strict_data_split_threshold()). %% @doc Return the smallest multiple of 256 KiB >= Offset %% counting from StrictDataSplitThreshold. get_padded_offset(Offset, StrictDataSplitThreshold) -> Diff = Offset - StrictDataSplitThreshold, StrictDataSplitThreshold + ((Diff - 1) div (?DATA_CHUNK_SIZE) + 1) * (?DATA_CHUNK_SIZE). %% @doc Validate a proof of access. validate_pre_fork_2_5(BlockOffset, TXRoot, BlockEndOffset, POA) -> Validation = ar_merkle:validate_path( TXRoot, BlockOffset, BlockEndOffset, POA#poa.tx_path ), case Validation of false -> false; {DataRoot, StartOffset, EndOffset} -> TXOffset = BlockOffset - StartOffset, validate_data_path_pre_fork_2_5(DataRoot, TXOffset, EndOffset - StartOffset, POA) end. %%%=================================================================== %%% Private functions. %%%=================================================================== validate_data_path_pre_fork_2_5(DataRoot, TXOffset, EndOffset, POA) -> Validation = ar_merkle:validate_path( DataRoot, TXOffset, EndOffset, POA#poa.data_path ), case Validation of false -> false; {ChunkID, _, _} -> validate_chunk_pre_fork_2_5(ChunkID, POA) end. validate_chunk_pre_fork_2_5(ChunkID, POA) -> ChunkID == ar_tx:generate_chunk_id(POA#poa.chunk). ================================================ FILE: apps/arweave/src/ar_poller.erl ================================================ %% This Source Code Form is subject to the terms of the GNU General %% Public License, v. 2.0. If a copy of the GPLv2 was not distributed %% with this file, You can obtain one at %% https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html %%% @doc The module periodically asks peers about their recent blocks and downloads %%% the missing ones. It serves the following purposes: %%% %%% - allows following the network in the absence of a public IP; %%% - protects the node from lagging behind when there are networking issues. -module(ar_poller). -behaviour(gen_server). -export([start_link/2, pause/0, resume/0]). -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2]). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). %% The frequency of choosing the peers to poll. -ifdef(AR_TEST). -define(COLLECT_PEERS_FREQUENCY_MS, 2000). -else. -define(COLLECT_PEERS_FREQUENCY_MS, 1000 * 15). -endif. -record(state, { workers, worker_count, pause = false, in_sync_trusted_peers = sets:new() }). %%%=================================================================== %%% Public API. %%%=================================================================== start_link(Name, Workers) -> gen_server:start_link({local, Name}, ?MODULE, Workers, []). %% @doc Put polling on pause. pause() -> gen_server:cast(?MODULE, pause). %% @doc Resume paused polling. resume() -> gen_server:cast(?MODULE, resume). %%%=================================================================== %%% Generic server callbacks. %%%=================================================================== init(Workers) -> ok = ar_events:subscribe(node_state), case ar_node:is_joined() of true -> handle_node_state_initialized(); false -> ok end, {ok, Config} = arweave_config:get_env(), {ok, #state{ workers = Workers, worker_count = length(Workers), in_sync_trusted_peers = sets:from_list(Config#config.peers) }}. handle_call(Request, _From, State) -> ?LOG_WARNING([{event, unhandled_call}, {module, ?MODULE}, {request, Request}]), {reply, ok, State}. handle_cast(pause, #state{ workers = Workers } = State) -> [gen_server:cast(W, pause) || W <- Workers], {noreply, State#state{ pause = true }}; handle_cast(resume, #state{ pause = false } = State) -> {noreply, State}; handle_cast(resume, #state{ workers = Workers } = State) -> [gen_server:cast(W, resume) || W <- Workers], gen_server:cast(?MODULE, collect_peers), {noreply, State#state{ pause = false }}; handle_cast(collect_peers, #state{ pause = true } = State) -> {noreply, State}; handle_cast(collect_peers, State) -> #state{ worker_count = N, workers = Workers } = State, TrustedPeers = ar_util:pick_random(ar_peers:get_trusted_peers(), N div 3), Peers = ar_peers:get_peers(current), OtherPeers = ar_data_discovery:pick_peers(Peers -- TrustedPeers, N - length(TrustedPeers)), PickedPeers = TrustedPeers ++ OtherPeers, start_polling_peers(Workers, PickedPeers), ar_util:cast_after(?COLLECT_PEERS_FREQUENCY_MS, ?MODULE, collect_peers), {noreply, State}; handle_cast({peer_out_of_sync_timeout, Peer}, State) -> #state{ in_sync_trusted_peers = Set } = State, {ok, Config} = arweave_config:get_env(), case lists:member(Peer, Config#config.peers) of false -> {noreply, State}; true -> {noreply, State#state{ in_sync_trusted_peers = sets:add_element(Peer, Set) }} end; handle_cast({peer_out_of_sync, Peer}, State) -> #state{ in_sync_trusted_peers = Set } = State, {ok, Config} = arweave_config:get_env(), case lists:member(Peer, Config#config.peers) of false -> {noreply, State}; true -> Set2 = sets:del_element(Peer, Set), ar_util:cast_after(300000, ?MODULE, {peer_out_of_sync_timeout, Peer}), case {sets:is_empty(Set), sets:is_empty(Set2)} of {false, true} -> ar_mining_stats:pause_performance_reports(60000), ar_util:terminal_clear(), TrustedPeersStr = string:join([ar_util:format_peer(Peer2) || Peer2 <- Config#config.peers], ", "), ?LOG_INFO([{event, node_out_of_sync}, {peer, ar_util:format_peer(Peer)}, {trusted_peers, TrustedPeersStr}]), ar:console("WARNING: The node is out of sync with all of the specified " "trusted peers: ~s.~n~n" "Please, check whether you are in sync with the network and " "make sure your CPU computes VDF fast enough or you are connected " "to a VDF server.~nThe node may be still mining, but console " "performance reports are temporarily paused.~n~n", [TrustedPeersStr]); _ -> ok end, {noreply, State#state{ in_sync_trusted_peers = Set2 }} end; handle_cast({block, Peer, B, BlockQueryTime}, State) -> case ar_ignore_registry:member(B#block.indep_hash) of false -> ?LOG_INFO([{event, fetched_block_for_validation}, {block, ar_util:encode(B#block.indep_hash)}, {peer, ar_util:format_peer(Peer)}]); true -> ok end, case ar_block_pre_validator:pre_validate(B, Peer, erlang:timestamp()) of ok -> ar_peers:rate_fetched_data(Peer, block, BlockQueryTime, byte_size(term_to_binary(B))); _ -> ok end, {noreply, State}; handle_cast(Msg, State) -> ?LOG_ERROR([{event, unhandled_cast}, {module, ?MODULE}, {message, Msg}]), {noreply, State}. handle_info({event, node_state, {initialized, _B}}, State) -> handle_node_state_initialized(), {noreply, State}; handle_info({event, node_state, _}, State) -> {noreply, State}; handle_info(Info, State) -> ?LOG_ERROR([{event, unhandled_info}, {module, ?MODULE}, {info, Info}]), {noreply, State}. terminate(Reason, _State) -> ?LOG_INFO([{module, ?MODULE},{pid, self()},{callback, terminate},{reason, Reason}]), ok. %%%=================================================================== %%% Private functions. %%%=================================================================== handle_node_state_initialized() -> gen_server:cast(?MODULE, collect_peers). start_polling_peers([W | Workers], [Peer | Peers]) -> gen_server:cast(W, {set_peer, Peer}), start_polling_peers(Workers, Peers); start_polling_peers(_Workers, []) -> ok. ================================================ FILE: apps/arweave/src/ar_poller_sup.erl ================================================ -module(ar_poller_sup). -behaviour(supervisor). -export([start_link/0]). -export([init/1]). -include_lib("arweave/include/ar_sup.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). %%%=================================================================== %%% Public API. %%%=================================================================== start_link() -> supervisor:start_link({local, ?MODULE}, ?MODULE, []). %% =================================================================== %% Supervisor callbacks. %% =================================================================== init([]) -> {ok, Config} = arweave_config:get_env(), Children = lists:map( fun(Num) -> Name = list_to_atom("ar_poller_worker_" ++ integer_to_list(Num)), {Name, {ar_poller_worker, start_link, [Name]}, permanent, ?SHUTDOWN_TIMEOUT, worker, [ar_poller_worker]} end, lists:seq(1, Config#config.block_pollers) ), Workers = [element(1, El) || El <- Children], Children2 = [?CHILD_WITH_ARGS(ar_poller, worker, ar_poller, [ar_poller, Workers]) | Children], {ok, {{one_for_one, 5, 10}, Children2}}. ================================================ FILE: apps/arweave/src/ar_poller_worker.erl ================================================ -module(ar_poller_worker). -behaviour(gen_server). -export([start_link/1]). -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2]). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -record(state, { peer, polling_frequency_ms, ref, pause = false }). %%%=================================================================== %%% Public interface. %%%=================================================================== start_link(Name) -> gen_server:start_link({local, Name}, ?MODULE, [], []). %%%=================================================================== %%% gen_server callbacks. %%%=================================================================== init([]) -> {ok, Config} = arweave_config:get_env(), [ok] = ar_events:subscribe([node_state]), State = #state{ polling_frequency_ms = Config#config.polling * 1000 }, case ar_node:is_joined() of true -> {ok, handle_node_state_initialized(State)}; false -> {ok, State} end. handle_call(Request, _From, State) -> ?LOG_WARNING([{event, unhandled_call}, {module, ?MODULE}, {request, Request}]), {reply, ok, State}. handle_cast(pause, State) -> {noreply, State#state{ pause = true }}; handle_cast(resume, #state{ pause = false } = State) -> {noreply, State}; handle_cast(resume, State) -> Ref = make_ref(), gen_server:cast(self(), {poll, Ref}), {noreply, State#state{ pause = false, ref = Ref }}; handle_cast({poll, _Ref}, #state{ pause = true } = State) -> {noreply, State}; handle_cast({poll, _Ref}, #state{ peer = undefined } = State) -> {noreply, State#state{ pause = true }}; handle_cast({poll, Ref}, #state{ ref = Ref, peer = Peer, polling_frequency_ms = FrequencyMs } = State) -> CurrentHeight = ar_node:get_height(), {L, NotOnChain} = ar_block_cache:get_longest_chain_cache(block_cache), HL = [H || {H, _TXIDs} <- L], case NotOnChain >= 5 of true -> slow_block_application_warning(NotOnChain); false -> ok end, case ar_http_iface_client:get_recent_hash_list_diff(Peer, HL) of {ok, in_sync} -> ar_util:cast_after(FrequencyMs, self(), {poll, Ref}), {noreply, State}; {ok, {H, TXIDs, BlocksOnTop}} -> case ar_ignore_registry:member({poller_worker, H}) orelse ar_ignore_registry:permanent_member(H) of true -> ok; false -> case BlocksOnTop >= 5 of true -> warning(Peer, behind); false -> ok end, IgnoreRef = make_ref(), ar_ignore_registry:add_ref({poller_worker, H}, IgnoreRef, 1000), Indices = get_missing_tx_indices(TXIDs), case ar_http_iface_client:get_block(Peer, H, Indices) of {#block{ height = Height } = B, TimeMicroseconds, _Size} -> case Height =< CurrentHeight - 5 of true -> warning(Peer, fork); false -> ok end, case collect_missing_transactions(B#block.txs) of {ok, TXs} -> B2 = B#block{ txs = TXs }, ar_ignore_registry:remove_ref({poller_worker, H}, IgnoreRef), gen_server:cast(ar_poller, {block, Peer, B2, TimeMicroseconds}), ok; failed -> ?LOG_WARNING([{event, failed_to_get_block_txs_from_peer}, {block, ar_util:encode(H)}, {peer, ar_util:format_peer(Peer)}, {tx_count, length(B#block.txs)}]), ok end; Error -> ar_ignore_registry:remove_ref({poller_worker, H}, IgnoreRef), ?LOG_DEBUG([{event, failed_to_fetch_block}, {peer, ar_util:format_peer(Peer)}, {block, ar_util:encode(H)}, {error, io_lib:format("~p", [Error])}]), ok end end, ar_util:cast_after(FrequencyMs, self(), {poll, Ref}), {noreply, State}; {error, not_found} -> ?LOG_DEBUG([{event, peer_out_of_sync}, {peer, ar_util:format_peer(Peer)}]), gen_server:cast(ar_poller, {peer_out_of_sync, Peer}), {noreply, State#state{ pause = true }}; {error, Reason} -> ar_http_iface_client:log_failed_request(Reason, [{event, failed_to_get_recent_hash_list_diff}, {peer, ar_util:format_peer(Peer)}, {reason, io_lib:format("~p", [Reason])}]), {noreply, State#state{ pause = true }} end; handle_cast({poll, _Ref}, State) -> {noreply, State}; handle_cast({set_peer, Peer}, #state{ ref = Ref, pause = Pause } = State) -> Ref2 = case Pause of true -> Ref3 = make_ref(), gen_server:cast(self(), {poll, Ref3}), Ref3; false -> Ref end, {noreply, State#state{ peer = Peer, pause = false, ref = Ref2 }}; handle_cast(Msg, State) -> ?LOG_ERROR([{event, unhandled_cast}, {message, Msg}]), {noreply, State}. handle_info({event, node_state, {initialized, _B}}, State) -> {noreply, handle_node_state_initialized(State)}; handle_info({event, node_state, _}, State) -> {noreply, State}; handle_info({gun_down, _, http, normal, _, _}, State) -> {noreply, State}; handle_info({gun_down, _, http, closed, _, _}, State) -> {noreply, State}; handle_info({gun_up, _, http}, State) -> {noreply, State}; handle_info(Info, State) -> ?LOG_WARNING([{event, unhandled_info}, {info, Info}]), {noreply, State}. terminate(Reason, _State) -> ?LOG_INFO([{module, ?MODULE},{pid, self()},{callback, terminate},{reason, Reason}]), ok. %%%=================================================================== %%% Private functions. %%%=================================================================== handle_node_state_initialized(State) -> Ref = make_ref(), gen_server:cast(self(), {poll, Ref}), State#state{ ref = Ref }. get_missing_tx_indices(TXIDs) -> get_missing_tx_indices(TXIDs, 0). get_missing_tx_indices([], _N) -> []; get_missing_tx_indices([TXID | TXIDs], N) -> case ar_mempool:has_tx(TXID) of true -> get_missing_tx_indices(TXIDs, N + 1); false -> [N | get_missing_tx_indices(TXIDs, N + 1)] end. slow_block_application_warning(N) -> ar_mining_stats:pause_performance_reports(60000), ar_util:terminal_clear(), ar:console("WARNING: there are more than ~B not yet validated blocks on the longest chain." " Please, double-check if you are in sync with the network and make sure your " "CPU computes VDF fast enough or you are connected to a VDF server." "~nThe node may be still mining, but console performance reports are temporarily " "paused.~n~n", [N]). warning(Peer, Event) -> {ok, Config} = arweave_config:get_env(), case lists:member(Peer, Config#config.peers) of false -> ok; true -> ar_mining_stats:pause_performance_reports(60000), EventMessage = case Event of behind -> "is 5 or more blocks ahead of us"; fork -> "is on a fork branching off of our fork 5 or more blocks behind" end, ar_util:terminal_clear(), ar:console("WARNING: peer ~s ~s. " "Please, double-check if you are in sync with the network and " "make sure your CPU computes VDF fast enough or you are connected " "to a VDF server.~nThe node may be still mining, but console performance " "reports are temporarily paused.~n~n", [ar_util:format_peer(Peer), EventMessage]) end. collect_missing_transactions([#tx{} = TX | TXs]) -> case collect_missing_transactions(TXs) of failed -> failed; {ok, TXs2} -> {ok, [TX | TXs2]} end; collect_missing_transactions([TXID | TXs]) -> case ar_mempool:get_tx(TXID) of not_found -> failed; TX -> collect_missing_transactions([TX | TXs]) end; collect_missing_transactions([]) -> {ok, []}. ================================================ FILE: apps/arweave/src/ar_pool.erl ================================================ %%% @doc The module defines the core pool mining functionality. %%% %%% The key actors are a pool client, a pool proxy, and a pool server. The pool client may be %%% a standalone mining node or an exit peer in a coordinated mining setup. The other CM peers %%% communicate with the pool via the exit peer. The proxy is NOT an Arweave node. %%% %%% Communication Scheme %%% %%% +---> Standalone Pool Client %%% | %%% Pool Server <--> Pool Proxy <---+ %%% | %%% +---> CM Exit Node Pool Client <--> CM Miner Pool Client %%% %%% Job Assignment %%% %%% 1. Solo Mining %%% %%% Pool Server -> Pool Proxy -> Standalone Pool Client %%% %%% 2. Coordinated Mining %%% %%% Pool Server -> Pool Proxy -> CM Exit Node Pool Client -> CM Miner Pool Client %%% %%% Partial Solution Lifecycle %%% %%% 1. Solo Mining %%% %%% Standalone Pool Client -> Pool Proxy -> Pool Sever %%% %%% 2. Coordinated Mining %%% %%% CM Miner Pool Client -> CM Exit Node Pool Client -> Pool Proxy -> Pool Server -module(ar_pool). -behaviour(gen_server). -export([start_link/0, is_client/0, get_current_session_key_seed_pairs/0, get_jobs/1, get_latest_job/0, cache_jobs/1, process_partial_solution/1, post_partial_solution/1, pool_peer/0, process_cm_jobs/2]). -export([init/1, handle_cast/2, handle_call/3, handle_info/2, terminate/2]). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("arweave/include/ar_consensus.hrl"). -include_lib("arweave/include/ar_mining.hrl"). -include_lib("arweave/include/ar_pool.hrl"). -include_lib("eunit/include/eunit.hrl"). -record(state, { %% The most recent keys come first. session_keys = [], %% Key => [{Output, StepNumber, PartitionUpperBound, Seed, Diff}, ...] jobs_by_session_key = maps:new(), request_pid_by_ref = maps:new() }). %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Start the server. start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). %% @doc Return true if we are a pool client. is_client() -> {ok, Config} = arweave_config:get_env(), Config#config.is_pool_client == true. %% @doc Return a list of up to two most recently cached VDF session key, seed pairs. get_current_session_key_seed_pairs() -> gen_server:call(?MODULE, get_current_session_key_seed_pairs, ?DEFAULT_CALL_TIMEOUT). %% @doc Return a set of the most recent cached jobs. get_jobs(PrevOutput) -> gen_server:call(?MODULE, {get_jobs, PrevOutput}, ?DEFAULT_CALL_TIMEOUT). %% @doc Return the most recent cached #job{}. Return an empty record if the %% cache is empty. get_latest_job() -> gen_server:call(?MODULE, get_latest_job, ?DEFAULT_CALL_TIMEOUT). %% @doc Cache the given jobs. cache_jobs(Jobs) -> gen_server:cast(?MODULE, {cache_jobs, Jobs}). %% @doc Validate the given (partial) solution. If the solution is eligible for %% producing a block, produce and publish a block. process_partial_solution(Solution) -> gen_server:call(?MODULE, {process_partial_solution, Solution}, ?DEFAULT_CALL_TIMEOUT). %% @doc Send the partial solution to the pool. post_partial_solution(Solution) -> gen_server:cast(?MODULE, {post_partial_solution, Solution}). %% @doc Return the pool server as a "peer" recognized by ar_http_iface_client. pool_peer() -> {ok, Config} = arweave_config:get_env(), {pool, Config#config.pool_server_address}. %% @doc Process the set of coordinated mining jobs received from the pool. process_cm_jobs(Jobs, Peer) -> #pool_cm_jobs{ h1_to_h2_jobs = H1ToH2Jobs, h1_read_jobs = H1ReadJobs } = Jobs, {ok, Config} = arweave_config:get_env(), Partitions = ar_mining_io:get_partitions(infinity), case Config#config.mine of true -> process_h1_to_h2_jobs(H1ToH2Jobs, Peer, Partitions); _ -> ok end, process_h1_read_jobs(H1ReadJobs, Partitions). %%%=================================================================== %%% Generic server callbacks. %%%=================================================================== init([]) -> ok = ar_events:subscribe(solution), {ok, #state{}}. handle_call(get_current_session_key_seed_pairs, _From, State) -> JobsBySessionKey = State#state.jobs_by_session_key, Keys = lists:sublist(State#state.session_keys, 2), KeySeedPairs = [{Key, element(4, hd(maps:get(Key, JobsBySessionKey)))} || Key <- Keys], {reply, KeySeedPairs, State}; handle_call({get_jobs, PrevOutput}, _From, State) -> SessionKeys = State#state.session_keys, JobCache = State#state.jobs_by_session_key, {reply, get_jobs(PrevOutput, SessionKeys, JobCache), State}; handle_call(get_latest_job, _From, State) -> case State#state.session_keys of [] -> {reply, #job{}, State}; [Key | _] -> {O, SN, U, _S, _Diff} = hd(maps:get(Key, State#state.jobs_by_session_key)), {reply, #job{ output = O, global_step_number = SN, partition_upper_bound = U }, State} end; handle_call({process_partial_solution, Solution}, From, State) -> #state{ request_pid_by_ref = Map } = State, Ref = make_ref(), case process_partial_solution(Solution, Ref) of noreply -> {noreply, State#state{ request_pid_by_ref = maps:put(Ref, From, Map) }}; Reply -> {reply, Reply, State} end; handle_call(Request, _From, State) -> ?LOG_WARNING([{event, unhandled_call}, {module, ?MODULE}, {request, Request}]), {reply, ok, State}. handle_cast({cache_jobs, #jobs{ jobs = [] }}, State) -> {noreply, State}; handle_cast({cache_jobs, Jobs}, State) -> #jobs{ jobs = JobList, partial_diff = PartialDiff, next_seed = NextSeed, seed = Seed, interval_number = IntervalNumber, next_vdf_difficulty = NextVDFDifficulty } = Jobs, SessionKey = {NextSeed, IntervalNumber, NextVDFDifficulty}, SessionKeys = State#state.session_keys, SessionKeys2 = case lists:member(SessionKey, SessionKeys) of true -> SessionKeys; false -> [SessionKey | SessionKeys] end, JobList2 = [{Job#job.output, Job#job.global_step_number, Job#job.partition_upper_bound, Seed, PartialDiff} || Job <- JobList], PrevJobList = maps:get(SessionKey, State#state.jobs_by_session_key, []), JobList3 = JobList2 ++ PrevJobList, JobsBySessionKey = maps:put(SessionKey, JobList3, State#state.jobs_by_session_key), {SessionKeys3, JobsBySessionKey2} = case length(SessionKeys2) == 3 of true -> [SK1, SK2, RemoveKey] = SessionKeys2, {[SK1, SK2], maps:remove(RemoveKey, JobsBySessionKey)}; false -> {SessionKeys2, JobsBySessionKey} end, {noreply, State#state{ session_keys = SessionKeys3, jobs_by_session_key = JobsBySessionKey2 }}; handle_cast({post_partial_solution, Solution}, State) -> case ar_http_iface_client:post_partial_solution(pool_peer(), Solution) of {ok, _Response} -> ok; {error, Error} -> ?LOG_WARNING([{event, failed_to_submit_partial_solution}, {reason, io_lib:format("~p", [Error])}]) end, {noreply, State}; handle_cast(Cast, State) -> ?LOG_WARNING([{event, unhandled_cast}, {module, ?MODULE}, {cast, Cast}]), {noreply, State}. handle_info({event, solution, {rejected, #{ reason := mining_address_banned, source := {pool, Ref} }}}, State) -> #state{ request_pid_by_ref = Map } = State, PID = maps:get(Ref, Map), gen_server:reply(PID, #partial_solution_response{ status = <<"rejected_mining_address_banned">> }), {noreply, State#state{ request_pid_by_ref = maps:remove(Ref, Map) }}; handle_info({event, solution, {rejected, #{ reason := missing_key_file, source := {pool, Ref} }}}, State) -> #state{ request_pid_by_ref = Map } = State, PID = maps:get(Ref, Map), gen_server:reply(PID, #partial_solution_response{ status = <<"rejected_missing_key_file">> }), {noreply, State#state{ request_pid_by_ref = maps:remove(Ref, Map) }}; handle_info({event, solution, {rejected, #{ reason := vdf_not_found, source := {pool, Ref} }}}, State) -> #state{ request_pid_by_ref = Map } = State, PID = maps:get(Ref, Map), gen_server:reply(PID, #partial_solution_response{ status = <<"rejected_vdf_not_found">> }), {noreply, State#state{ request_pid_by_ref = maps:remove(Ref, Map) }}; handle_info({event, solution, {rejected, #{ reason := bad_vdf, source := {pool, Ref} }}}, State) -> #state{ request_pid_by_ref = Map } = State, PID = maps:get(Ref, Map), gen_server:reply(PID, #partial_solution_response{ status = <<"rejected_bad_vdf">> }), {noreply, State#state{ request_pid_by_ref = maps:remove(Ref, Map) }}; handle_info({event, solution, {rejected, #{ reason := invalid_packing_difficulty, source := {pool, Ref} }}}, State) -> #state{ request_pid_by_ref = Map } = State, PID = maps:get(Ref, Map), gen_server:reply(PID, #partial_solution_response{ status = <<"rejected_invalid_packing_difficulty">> }), {noreply, State#state{ request_pid_by_ref = maps:remove(Ref, Map) }}; handle_info({event, solution, {partial, #{ source := {pool, Ref} }}}, State) -> #state{ request_pid_by_ref = Map } = State, PID = maps:get(Ref, Map), gen_server:reply(PID, #partial_solution_response{ status = <<"accepted">> }), {noreply, State#state{ request_pid_by_ref = maps:remove(Ref, Map) }}; handle_info({event, solution, {stale, #{ source := {pool, Ref} }}}, State) -> #state{ request_pid_by_ref = Map } = State, PID = maps:get(Ref, Map), gen_server:reply(PID, #partial_solution_response{ status = <<"stale">> }), {noreply, State#state{ request_pid_by_ref = maps:remove(Ref, Map) }}; handle_info({event, solution, {accepted, #{ indep_hash := H, source := {pool, Ref} }}}, State) -> #state{ request_pid_by_ref = Map } = State, PID = maps:get(Ref, Map), gen_server:reply(PID, #partial_solution_response{ indep_hash = H, status = <<"accepted_block">> }), {noreply, State#state{ request_pid_by_ref = maps:remove(Ref, Map) }}; handle_info({event, solution, _Event}, State) -> {noreply, State}; handle_info(Message, State) -> ?LOG_WARNING([{event, unhandled_info}, {module, ?MODULE}, {message, Message}]), {noreply, State}. terminate(Reason, _State) -> ?LOG_INFO([{module, ?MODULE},{pid, self()},{callback, terminate},{reason, Reason}]), ok. %%%=================================================================== %%% Private functions. %%%=================================================================== get_jobs(PrevOutput, SessionKeys, JobCache) -> case SessionKeys of [] -> #jobs{}; [{NextSeed, Interval, NextVDFDifficulty} = SessionKey | _] -> Jobs = maps:get(SessionKey, JobCache), {Seed, PartialDiff, Jobs2} = collect_jobs(Jobs, PrevOutput, ?GET_JOBS_COUNT), Jobs3 = [#job{ output = O, global_step_number = SN, partition_upper_bound = U } || {O, SN, U} <- Jobs2], #jobs{ jobs = Jobs3, seed = Seed, partial_diff = PartialDiff, next_seed = NextSeed, interval_number = Interval, next_vdf_difficulty = NextVDFDifficulty } end. collect_jobs([], _PrevO, _N) -> {<<>>, {0, 0}, []}; collect_jobs(_Jobs, _PrevO, 0) -> {<<>>, {0, 0}, []}; collect_jobs([{O, _SN, _U, _S, _PartialDiff} | _Jobs], O, _N) -> {<<>>, {0, 0}, []}; collect_jobs([{O, SN, U, S, PartialDiff} | Jobs], PrevO, N) -> {S, PartialDiff, [{O, SN, U} | collect_jobs(Jobs, PrevO, N - 1, PartialDiff)]}. collect_jobs([], _PrevO, _N, _PartialDiff) -> []; collect_jobs(_Jobs, _PrevO, 0, _PartialDiff) -> []; collect_jobs([{O, _SN, _U, _S, _PartialDiff} | _Jobs], O, _N, _PartialDiff2) -> []; collect_jobs([{O, SN, U, _S, PartialDiff} | Jobs], PrevO, N, PartialDiff) -> [{O, SN, U} | collect_jobs(Jobs, PrevO, N - 1, PartialDiff)]; collect_jobs(_Jobs, _PrevO, _N, _PartialDiff) -> %% PartialDiff mismatch. []. process_partial_solution(Solution, Ref) -> PoA1 = Solution#mining_solution.poa1, PoA2 = Solution#mining_solution.poa2, case ar_block:validate_proof_size(PoA1) andalso ar_block:validate_proof_size(PoA2) of true -> process_partial_solution_field_size(Solution, Ref); false -> #partial_solution_response{ status = <<"rejected_bad_poa">> } end. process_partial_solution_field_size(Solution, Ref) -> #mining_solution{ nonce_limiter_output = Output, seed = Seed, next_seed = NextSeed, mining_address = MiningAddress, preimage = Preimage, solution_hash = SolutionH } = Solution, %% We have less strict deserialization in the pool pipeline to simplify %% the pool "proxy" implementation. Therefore, we validate the field sizes here %% and return the "rejected_bad_poa" status in case of a failure. case {byte_size(Output), byte_size(Seed), byte_size(NextSeed), byte_size(MiningAddress), byte_size(Preimage), byte_size(SolutionH)} of {32, 48, 48, 32, 32, 32} -> case assert_chunk_sizes(Solution) of {true, Solution2} -> process_partial_solution_poa2_size(Solution2, Ref); {false, _} -> #partial_solution_response{ status = <<"rejected_bad_poa">> } end; _ -> #partial_solution_response{ status = <<"rejected_bad_poa">> } end. assert_chunk_sizes(Solution) -> #mining_solution{ packing_difficulty = PackingDifficulty, recall_byte2 = RecallByte2, poa1 = #poa{ chunk = C1, unpacked_chunk = U1 } = PoA1, poa2 = #poa{ chunk = C2, unpacked_chunk = U2 } = PoA2 } = Solution, SolutionResetUnpackedChunk2 = Solution#mining_solution{ poa2 = PoA2#poa{ unpacked_chunk = <<>> } }, SolutionResetUnpackedChunks = SolutionResetUnpackedChunk2#mining_solution{ poa1 = PoA1#poa{ unpacked_chunk = <<>> } }, C1Size = byte_size(C1), C2Size = byte_size(C2), U1Size = byte_size(U1), U2Size = byte_size(U2), IsC1FullSize = C1Size == ?DATA_CHUNK_SIZE, IsC1SubChunkSize = C1Size == ?COMPOSITE_PACKING_SUB_CHUNK_SIZE, IsC2Empty = C2Size == 0, IsC2FullSize = C2Size == ?DATA_CHUNK_SIZE, IsC2SubChunkSize = C2Size == ?COMPOSITE_PACKING_SUB_CHUNK_SIZE, %% When the packing is composite (packing_difficulty >= 1), The unpacked chunk is %% expected to be 0-padded when smaller than ?DATA_CHUNK_SIZE. IsU1FullSize = U1Size == ?DATA_CHUNK_SIZE, IsU2FullSize = U2Size == ?DATA_CHUNK_SIZE, case {PackingDifficulty >= 1, RecallByte2} of {false, undefined} -> {IsC1FullSize andalso IsC2Empty, SolutionResetUnpackedChunks}; {true, undefined} -> {IsC1SubChunkSize andalso IsC2Empty andalso IsU1FullSize, SolutionResetUnpackedChunk2}; {false, _} -> {IsC1FullSize andalso IsC2FullSize, SolutionResetUnpackedChunks}; {true, _} -> {IsC1SubChunkSize andalso IsC2SubChunkSize andalso IsU1FullSize andalso IsU2FullSize, Solution} end. process_partial_solution_poa2_size(Solution, Ref) -> #mining_solution{ poa2 = #poa{ chunk = C, data_path = DP, tx_path = TP, unpacked_chunk = U } } = Solution, case ar_mining_server:is_one_chunk_solution(Solution) of true -> case {C, DP, TP, U} of {<<>>, <<>>, <<>>, <<>>} -> process_partial_solution_partition_number(Solution, Ref); _ -> #partial_solution_response{ status = <<"rejected_bad_poa">> } end; false -> process_partial_solution_partition_number(Solution, Ref) end. process_partial_solution_partition_number(Solution, Ref) -> PartitionNumber = Solution#mining_solution.partition_number, PartitionUpperBound = Solution#mining_solution.partition_upper_bound, Max = ar_node:get_max_partition_number(PartitionUpperBound), case PartitionNumber > Max of false -> process_partial_solution_packing_difficulty(Solution, Ref); true -> #partial_solution_response{ status = <<"rejected_bad_poa">> } end. process_partial_solution_packing_difficulty(Solution, Ref) -> #mining_solution{ packing_difficulty = PackingDifficulty, replica_format = ReplicaFormat } = Solution, Height = ar_node:get_height(), case ar_block:validate_replica_format(Height, PackingDifficulty, ReplicaFormat) of true -> process_partial_solution_nonce(Solution, Ref); false -> #partial_solution_response{ status = <<"rejected_bad_poa">> } end. process_partial_solution_nonce(Solution, Ref) -> Max = ar_block:get_max_nonce(Solution#mining_solution.packing_difficulty), case Solution#mining_solution.nonce > Max of false -> process_partial_solution_quick_pow(Solution, Ref); true -> #partial_solution_response{ status = <<"rejected_bad_poa">> } end. process_partial_solution_quick_pow(Solution, Ref) -> #mining_solution{ nonce_limiter_output = NonceLimiterOutput, partition_number = PartitionNumber, seed = Seed, mining_address = MiningAddress, preimage = Preimage, solution_hash = SolutionH, packing_difficulty = PackingDifficulty } = Solution, H0 = ar_block:compute_h0(NonceLimiterOutput, PartitionNumber, Seed, MiningAddress, PackingDifficulty), case ar_block:compute_solution_h(H0, Preimage) of SolutionH -> process_partial_solution_pow(Solution, Ref, H0); _ -> %% Solution hash mismatch (pattern matching against solution_hash = SolutionH). #partial_solution_response{ status = <<"rejected_wrong_hash">> } end. process_partial_solution_pow(Solution, Ref, H0) -> #mining_solution{ nonce = Nonce, poa1 = #poa{ chunk = Chunk1 }, solution_hash = SolutionH, preimage = Preimage, poa2 = #poa{ chunk = Chunk2 } } = Solution, {H1, Preimage1} = ar_block:compute_h1(H0, Nonce, Chunk1), case {H1 == SolutionH andalso Preimage1 == Preimage, ar_mining_server:is_one_chunk_solution(Solution)} of {true, false} -> #partial_solution_response{ status = <<"rejected_bad_poa">> }; {true, true} -> process_partial_solution_partition_upper_bound(Solution, Ref, H0, H1); {false, true} -> #partial_solution_response{ status = <<"rejected_bad_poa">> }; {false, false} -> {H2, Preimage2} = ar_block:compute_h2(H1, Chunk2, H0), case H2 == SolutionH andalso Preimage2 == Preimage of false -> #partial_solution_response{ status = <<"rejected_wrong_hash">> }; true -> process_partial_solution_partition_upper_bound(Solution, Ref, H0, H1) end end. process_partial_solution_partition_upper_bound(Solution, Ref, H0, H1) -> #mining_solution{ partition_upper_bound = PartitionUpperBound } = Solution, %% We are going to validate the VDF data later anyways; here we simply want to %% make sure the upper bound is positive so that the recall byte calculation %% does not fail as it takes a remainder of the division by partition upper bound. case PartitionUpperBound > 0 of true -> process_partial_solution_poa(Solution, Ref, H0, H1); _ -> #partial_solution_response{ status = <<"rejected_bad_poa">> } end. process_partial_solution_poa(Solution, Ref, H0, H1) -> #mining_solution{ partition_number = PartitionNumber, partition_upper_bound = PartitionUpperBound, nonce = Nonce, recall_byte1 = RecallByte1, poa1 = PoA1, mining_address = MiningAddress, solution_hash = SolutionH, recall_byte2 = RecallByte2, poa2 = PoA2, packing_difficulty = PackingDifficulty, replica_format = ReplicaFormat } = Solution, {RecallRange1Start, RecallRange2Start} = ar_block:get_recall_range(H0, PartitionNumber, PartitionUpperBound), ComputedRecallByte1 = ar_block:get_recall_byte(RecallRange1Start, Nonce, PackingDifficulty), {BlockStart1, BlockEnd1, TXRoot1} = ar_block_index:get_block_bounds(ComputedRecallByte1), BlockSize1 = BlockEnd1 - BlockStart1, Packing = ar_block:get_packing(PackingDifficulty, MiningAddress, ReplicaFormat), SubChunkIndex = ar_block:get_sub_chunk_index(PackingDifficulty, Nonce), case RecallByte1 == ComputedRecallByte1 andalso ar_poa:validate({BlockStart1, RecallByte1, TXRoot1, BlockSize1, PoA1, Packing, SubChunkIndex, not_set}) of error -> ?LOG_ERROR([{event, pool_failed_to_validate_proof_of_access}]), #partial_solution_response{ status = <<"rejected_bad_poa">> }; false -> #partial_solution_response{ status = <<"rejected_bad_poa">> }; {true, ChunkID} when H1 == SolutionH -> PoACache = {{BlockStart1, RecallByte1, TXRoot1, BlockSize1, Packing}, ChunkID}, PoA2Cache = undefined, process_partial_solution_difficulty(Solution, Ref, PoACache, PoA2Cache); {true, ChunkID} -> ComputedRecallByte2 = ar_block:get_recall_byte(RecallRange2Start, Nonce, PackingDifficulty), {BlockStart2, BlockEnd2, TXRoot2} = ar_block_index:get_block_bounds( ComputedRecallByte2), BlockSize2 = BlockEnd2 - BlockStart2, case RecallByte2 == ComputedRecallByte2 andalso ar_poa:validate({BlockStart2, RecallByte2, TXRoot2, BlockSize2, PoA2, Packing, SubChunkIndex, not_set}) of error -> ?LOG_ERROR([{event, pool_failed_to_validate_proof_of_access}]), #partial_solution_response{ status = <<"rejected_bad_poa">> }; false -> #partial_solution_response{ status = <<"rejected_bad_poa">> }; {true, Chunk2ID} -> PoA2Cache = {{BlockStart2, RecallByte2, TXRoot2, BlockSize2, Packing}, Chunk2ID}, PoACache = {{BlockStart1, RecallByte1, TXRoot1, BlockSize1, Packing}, ChunkID}, process_partial_solution_difficulty(Solution, Ref, PoACache, PoA2Cache) end end. process_partial_solution_difficulty(Solution, Ref, PoACache, PoA2Cache) -> #mining_solution{ solution_hash = SolutionH, recall_byte2 = RecallByte2, packing_difficulty = PackingDifficulty } = Solution, IsPoA1 = (RecallByte2 == undefined), case ar_node_utils:passes_diff_check(SolutionH, IsPoA1, ar_node:get_current_diff(), PackingDifficulty) of false -> #partial_solution_response{ status = <<"accepted">> }; true -> process_partial_solution_vdf(Solution, Ref, PoACache, PoA2Cache) end. process_partial_solution_vdf(Solution, Ref, PoACache, PoA2Cache) -> #mining_solution{ step_number = StepNumber, next_seed = NextSeed, start_interval_number = StartIntervalNumber, next_vdf_difficulty = NextVDFDifficulty, nonce_limiter_output = Output, seed = Seed, partition_upper_bound = PartitionUpperBound } = Solution, SessionKey = {NextSeed, StartIntervalNumber, NextVDFDifficulty}, MayBeLastStepCheckpoints = ar_nonce_limiter:get_step_checkpoints(StepNumber, SessionKey), MayBeSeed = ar_nonce_limiter:get_seed(SessionKey), MayBeUpperBound = ar_nonce_limiter:get_active_partition_upper_bound(StepNumber, SessionKey), case {MayBeLastStepCheckpoints, MayBeSeed, MayBeUpperBound} of {not_found, _, _} -> #partial_solution_response{ status = <<"rejected_vdf_not_found">> }; {_, not_found, _} -> #partial_solution_response{ status = <<"rejected_vdf_not_found">> }; {_, _, not_found} -> #partial_solution_response{ status = <<"rejected_vdf_not_found">> }; {[Output | _] = LastStepCheckpoints, Seed, PartitionUpperBound} -> Solution2 = Solution#mining_solution{ last_step_checkpoints = LastStepCheckpoints, %% ar_node_worker will fetch the required steps based on the prev block. steps = not_found }, ar_node_worker:found_solution({pool, Ref}, Solution2, PoACache, PoA2Cache), noreply; _ -> %% {Output, Seed, PartitionUpperBound} mismatch (pattern matching against %% the solution fields deconstructed above). #partial_solution_response{ status = <<"rejected_bad_vdf">> } end. process_h1_to_h2_jobs([], _Peer, _Partitions) -> ok; process_h1_to_h2_jobs([Candidate | Jobs], Peer, Partitions) -> case we_have_partition_for_the_second_recall_byte(Candidate, Partitions) of true -> ar_coordination:compute_h2_for_peer(Peer, Candidate); false -> ok end, process_h1_to_h2_jobs(Jobs, Peer, Partitions). process_h1_read_jobs([], _Partitions) -> ok; process_h1_read_jobs([Candidate | Jobs], Partitions) -> case we_have_partition_for_the_first_recall_byte(Candidate, Partitions) of true -> ar_mining_server:prepare_and_post_solution(Candidate), ar_mining_stats:h2_received_from_peer(pool); false -> ok end, process_h1_read_jobs(Jobs, Partitions). we_have_partition_for_the_first_recall_byte(_Candidate, []) -> false; we_have_partition_for_the_first_recall_byte( #mining_candidate{ mining_address = Addr, partition_number = PartitionID, packing_difficulty = PackingDifficulty }, [{PartitionID, Addr, PackingDifficulty} | _Partitions]) -> true; we_have_partition_for_the_first_recall_byte(Candidate, [_Partition | Partitions]) -> %% Mining address or partition number mismatch. we_have_partition_for_the_first_recall_byte(Candidate, Partitions). we_have_partition_for_the_second_recall_byte(_Candidate, []) -> false; we_have_partition_for_the_second_recall_byte( #mining_candidate{ mining_address = Addr, partition_number2 = PartitionID, packing_difficulty = PackingDifficulty }, [{PartitionID, Addr, PackingDifficulty} | _Partitions]) -> true; we_have_partition_for_the_second_recall_byte(Candidate, [_Partition | Partitions]) -> %% Mining address or partition number mismatch. we_have_partition_for_the_second_recall_byte(Candidate, Partitions). %%%=================================================================== %%% Tests. %%%=================================================================== get_jobs_test() -> ?assertEqual(#jobs{}, get_jobs(<<>>, [], maps:new())), ?assertEqual(#jobs{ next_seed = ns, interval_number = in, next_vdf_difficulty = nvd }, get_jobs(o, [{ns, in, nvd}], #{ {ns, in, nvd} => [{o, gsn, u, s, d}] })), ?assertEqual(#jobs{ jobs = [#job{ output = o, global_step_number = gsn, partition_upper_bound = u }], partial_diff = d, seed = s, next_seed = ns, interval_number = in, next_vdf_difficulty = nvd }, get_jobs(a, [{ns, in, nvd}], #{ {ns, in, nvd} => [{o, gsn, u, s, d}] })), %% d2 /= d (the difficulties are different) => only take the latest job. ?assertEqual(#jobs{ jobs = [#job{ output = o, global_step_number = gsn, partition_upper_bound = u }], partial_diff = d, seed = s, next_seed = ns, interval_number = in, next_vdf_difficulty = nvd }, get_jobs(a, [{ns, in, nvd}, {ns2, in2, nvd2}], #{ {ns, in, nvd} => [{o, gsn, u, s, d}, {o2, gsn2, u2, s, d2}], %% Same difficulty, but a different VDF session => not picked. {ns2, in2, nvd2} => [{o3, gsn3, u3, s3, d}] })), %% d2 == d => take both. ?assertEqual(#jobs{ jobs = [#job{ output = o, global_step_number = gsn, partition_upper_bound = u }, #job{ output = o2, global_step_number = gsn2, partition_upper_bound = u2 }], partial_diff = d, seed = s, next_seed = ns, interval_number = in, next_vdf_difficulty = nvd }, get_jobs(a, [{ns, in, nvd}, {ns2, in2, nvd2}], #{ {ns, in, nvd} => [{o, gsn, u, s, d}, {o2, gsn2, u2, s, d}], {ns2, in2, nvd2} => [{o2, gsn2, u2, s2, d2}] })), %% Take strictly above the previous output. ?assertEqual(#jobs{ jobs = [#job{ output = o, global_step_number = gsn, partition_upper_bound = u }], partial_diff = d, seed = s, next_seed = ns, interval_number = in, next_vdf_difficulty = nvd }, get_jobs(o2, [{ns, in, nvd}, {ns2, in2, nvd2}], #{ {ns, in, nvd} => [{o, gsn, u, s, d}, {o2, gsn2, u2, s, d}], {ns2, in2, nvd2} => [{o2, gsn2, u2, s2, d2}] })). process_partial_solution_test_() -> ar_test_node:test_with_mocked_functions([ {ar_block, compute_h0, fun(O, P, S, M, PD) -> crypto:hash(sha256, << O/binary, P:256, S/binary, M/binary, PD:8 >>) end}, {ar_block_index, get_block_bounds, fun(_Byte) -> {10, 110, << 1:256 >>} end}, {ar_poa, validate, fun(Args) -> PoA = #poa{ tx_path = << 0:(2176 * 8) >>, data_path = << 0:(349504 * 8) >> }, PoA2 = PoA#poa{ chunk = << 0:(262144 * 8) >> }, CPoA = PoA#poa{ chunk = << 0:(8192 * 8) >>, unpacked_chunk = << 1:(262144 * 8) >> }, case Args of {10, _, << 1:256 >>, 100, PoA2, {spora_2_6, << 0:256 >>}, -1, not_set} -> {true, << 2:256 >>}; {10, _, << 1:256 >>, 100, CPoA, {composite, << 0:256 >>, 1}, 30, not_set} -> {true, << 2:256 >>}; _ -> false end end}, {ar_node, get_current_diff, fun() -> {?MAX_DIFF, ?MAX_DIFF} end}, {ar_node, get_height, fun() -> 0 end}], fun test_process_partial_solution/0 ). test_process_partial_solution() -> Zero = << 0:256 >>, Zero48 = << 0:(8*48) >>, H0 = ar_block:compute_h0(Zero, 0, Zero48, Zero, 0), SolutionHQuick = ar_block:compute_solution_h(H0, Zero), C = << 0:(262144 * 8) >>, {H1, Preimage1} = ar_block:compute_h1(H0, 1, C), SolutionH = ar_block:compute_solution_h(H0, Preimage1), {RecallRange1Start, RecallRange2Start} = ar_block:get_recall_range(H0, 0, 1), RecallByte1 = RecallRange1Start + 1 * ?DATA_CHUNK_SIZE, {H2, Preimage2} = ar_block:compute_h2(H1, C, H0), RecallByte2 = RecallRange2Start + 1 * ?DATA_CHUNK_SIZE, PoA = #poa{ chunk = C }, CompositeSubChunk = << 0:(8192 * 8) >>, CPoA = #poa{ chunk = CompositeSubChunk }, CH0 = ar_block:compute_h0(Zero, 0, Zero48, Zero, 1), {CH1, CPreimage1} = ar_block:compute_h1(CH0, 30, CompositeSubChunk), CSolutionH = ar_block:compute_solution_h(CH0, CPreimage1), {CRecallRange1Start, CRecallRange2Start} = ar_block:get_recall_range(CH0, 0, 1), CRecallByte1 = CRecallRange1Start, {CH2, CPreimage2} = ar_block:compute_h2(CH1, CompositeSubChunk, CH0), CRecallByte2 = CRecallRange2Start, TestCases = [ {"Bad proof size 0", #mining_solution{ poa1 = #poa{} }, % Empty chunk. #partial_solution_response{ status = <<"rejected_bad_poa">> }}, {"Bad proof size 1", #mining_solution{ poa1 = #poa{ chunk = C, tx_path = << 0:(2177 * 8) >> } }, #partial_solution_response{ status = <<"rejected_bad_poa">> }}, {"Bad proof size 2", #mining_solution{ poa1 = PoA, poa2 = #poa{ chunk = C, tx_path = << 0:(2177 * 8) >> } }, #partial_solution_response{ status = <<"rejected_bad_poa">> }}, {"Bad proof size 3", #mining_solution{ poa1 = #poa{ chunk = C, data_path = << 0:(349505 * 8) >> } }, #partial_solution_response{ status = <<"rejected_bad_poa">> }}, {"Bad proof size 4", #mining_solution{ poa1 = PoA, poa2 = #poa{ chunk = C, data_path = << 0:(349505 * 8) >> } }, #partial_solution_response{ status = <<"rejected_bad_poa">> }}, {"Bad field size 1", #mining_solution{ next_seed = <<>>, poa1 = PoA }, #partial_solution_response{ status = <<"rejected_bad_poa">> }}, {"Bad field size 2", #mining_solution{ seed = <<>>, poa1 = PoA }, #partial_solution_response{ status = <<"rejected_bad_poa">> }}, {"Bad field size 3", #mining_solution{ preimage = <<>>, poa1 = PoA }, #partial_solution_response{ status = <<"rejected_bad_poa">> }}, {"Bad field size 4", #mining_solution{ mining_address = <<>>, poa1 = PoA }, #partial_solution_response{ status = <<"rejected_bad_poa">> }}, {"Bad field size 5", #mining_solution{ nonce_limiter_output = <<>>, poa1 = PoA }, #partial_solution_response{ status = <<"rejected_bad_poa">> }}, {"Bad field size 6", #mining_solution{ solution_hash = <<>>, poa1 = PoA }, #partial_solution_response{ status = <<"rejected_bad_poa">> }}, {"Bad field size 7", #mining_solution{ poa1 = #poa{ chunk = << 0:((?DATA_CHUNK_SIZE + 1) * 8) >> }}, #partial_solution_response{ status = <<"rejected_bad_poa">> }}, {"Bad field size 8", #mining_solution{ poa1 = PoA, poa2 = #poa{ chunk = << 0:((?DATA_CHUNK_SIZE + 1) * 8) >> }}, #partial_solution_response{ status = <<"rejected_bad_poa">> }}, {"Bad partition number", #mining_solution{ partition_number = 1, poa1 = PoA }, #partial_solution_response{ status = <<"rejected_bad_poa">> }}, {"Bad nonce", #mining_solution{ poa1 = PoA, %% We have 2 nonces per recall range (packing diff = 0) in debug mode. nonce = 2 }, #partial_solution_response{ status = <<"rejected_bad_poa">> }}, {"Bad quick pow", #mining_solution{ poa1 = PoA }, #partial_solution_response{ status = <<"rejected_wrong_hash">> }}, {"Bad pow", #mining_solution{ nonce = 1, solution_hash = SolutionHQuick, preimage = Preimage1, partition_upper_bound = 1, recall_byte1 = RecallByte1, poa1 = PoA#poa{ tx_path = << 0:(2176 * 8) >>, data_path = << 0:(349504 * 8) >> }}, #partial_solution_response{ status = <<"rejected_wrong_hash">> }}, {"Bad partition upper bound", #mining_solution{ nonce = 1, solution_hash = SolutionH, preimage = Preimage1, partition_upper_bound = 0, recall_byte1 = RecallByte1, poa1 = PoA#poa{ tx_path = << 0:(2176 * 8) >>, data_path = << 0:(349504 * 8) >> }}, #partial_solution_response{ status = <<"rejected_bad_poa">> }}, {"Bad poa 1", #mining_solution{ nonce = 1, solution_hash = SolutionH, preimage = Preimage1, partition_upper_bound = 1, poa1 = PoA }, #partial_solution_response{ status = <<"rejected_bad_poa">> }}, {"Bad poa 2", #mining_solution{ nonce = 1, solution_hash = SolutionH, preimage = Preimage1, partition_upper_bound = 1, poa1 = PoA#poa{ tx_path = << 0:(2176 * 8) >>, data_path = << 0:(349504 * 8) >> }}, #partial_solution_response{ status = <<"rejected_bad_poa">> }}, {"Bad poa 3", #mining_solution{ nonce = 1, solution_hash = SolutionH, preimage = Preimage1, partition_upper_bound = 1, recall_byte1 = RecallByte1, poa2 = #poa{ tx_path = << 0:(2176 * 8) >>, data_path = << 0:(349504 * 8) >> }, poa1 = PoA#poa{ tx_path = << 0:(2176 * 8) >>, data_path = << 0:(349504 * 8) >> }}, #partial_solution_response{ status = <<"rejected_bad_poa">> }}, {"Two-chunk bad poa 1", #mining_solution{ nonce = 1, solution_hash = SolutionH, preimage = Preimage1, partition_upper_bound = 1, recall_byte1 = RecallByte1, recall_byte2 = 0, poa2 = PoA#poa{ tx_path = << 0:(2176 * 8) >>, data_path = << 0:(349504 * 8) >> }, poa1 = PoA#poa{ tx_path = << 0:(2176 * 8) >>, data_path = << 0:(349504 * 8) >> }}, #partial_solution_response{ status = <<"rejected_bad_poa">> }}, {"Two-chunk bad poa 2", #mining_solution{ nonce = 1, solution_hash = SolutionH, preimage = Preimage2, partition_upper_bound = 1, recall_byte1 = RecallByte1, recall_byte2 = 0, poa2 = PoA#poa{ tx_path = << 0:(2176 * 8) >>, data_path = << 0:(349504 * 8) >> }, poa1 = PoA#poa{ tx_path = << 0:(2176 * 8) >>, data_path = << 0:(349504 * 8) >> }}, #partial_solution_response{ status = <<"rejected_wrong_hash">> }}, {"Two-chunk bad poa 3", #mining_solution{ nonce = 1, solution_hash = H2, preimage = Preimage2, partition_upper_bound = 1, recall_byte1 = RecallByte1, recall_byte2 = 0, poa2 = PoA#poa{ tx_path = << 0:(2176 * 8) >>, data_path = << 0:(349504 * 8) >> }, poa1 = PoA#poa{ tx_path = << 0:(2176 * 8) >>, data_path = << 0:(349504 * 8) >> }}, #partial_solution_response{ status = <<"rejected_bad_poa">> }}, {"Accepted", #mining_solution{ nonce = 1, solution_hash = SolutionH, preimage = Preimage1, partition_upper_bound = 1, recall_byte1 = RecallByte1, poa1 = PoA#poa{ tx_path = << 0:(2176 * 8) >>, data_path = << 0:(349504 * 8) >> }}, #partial_solution_response{ status = <<"accepted">> }}, {"Accepted 2", #mining_solution{ nonce = 1, solution_hash = H2, preimage = Preimage2, partition_upper_bound = 1, recall_byte1 = RecallByte1, recall_byte2 = RecallByte2, poa2 = PoA#poa{ tx_path = << 0:(2176 * 8) >>, data_path = << 0:(349504 * 8) >> }, poa1 = PoA#poa{ tx_path = << 0:(2176 * 8) >>, data_path = << 0:(349504 * 8) >> }}, #partial_solution_response{ status = <<"accepted">> }}, {"No unpacked chunk", #mining_solution{ nonce = 30, solution_hash = CSolutionH, preimage = CPreimage1, partition_upper_bound = 1, recall_byte1 = CRecallByte1, packing_difficulty = 1, poa1 = CPoA#poa{ tx_path = << 0:(2176 * 8) >>, data_path = << 0:(349504 * 8) >> }}, #partial_solution_response{ status = <<"rejected_bad_poa">> }}, {"Accepted packing difficulty=1", #mining_solution{ nonce = 30, solution_hash = CSolutionH, preimage = CPreimage1, partition_upper_bound = 1, recall_byte1 = CRecallByte1, packing_difficulty = 1, poa1 = CPoA#poa{ tx_path = << 0:(2176 * 8) >>, data_path = << 0:(349504 * 8) >>, unpacked_chunk = << 1:(262144 * 8) >> }}, #partial_solution_response{ status = <<"accepted">> }}, {"No second unpacked chunk", #mining_solution{ nonce = 30, solution_hash = CH2, preimage = CPreimage2, partition_upper_bound = 1, recall_byte1 = CRecallByte1, recall_byte2 = CRecallByte2, packing_difficulty = 1, poa2 = CPoA#poa{ tx_path = << 0:(2176 * 8) >>, data_path = << 0:(349504 * 8) >> }, poa1 = CPoA#poa{ tx_path = << 0:(2176 * 8) >>, data_path = << 0:(349504 * 8) >> }}, #partial_solution_response{ status = <<"rejected_bad_poa">> }}, {"Accepted two-chunk packing difficulty=1", #mining_solution{ nonce = 30, solution_hash = CH2, preimage = CPreimage2, partition_upper_bound = 1, recall_byte1 = CRecallByte1, recall_byte2 = CRecallByte2, packing_difficulty = 1, poa2 = CPoA#poa{ tx_path = << 0:(2176 * 8) >>, data_path = << 0:(349504 * 8) >>, unpacked_chunk = << 1:(262144 * 8) >> }, poa1 = CPoA#poa{ tx_path = << 0:(2176 * 8) >>, data_path = << 0:(349504 * 8) >>, unpacked_chunk = << 1:(262144 * 8) >>}}, #partial_solution_response{ status = <<"accepted">> }} ], lists:foreach( fun({Title, Solution, ExpectedReply}) -> Ref = make_ref(), ?assertEqual(ExpectedReply, process_partial_solution(Solution, Ref), Title) end, TestCases ). process_solution_test_() -> ar_test_node:test_with_mocked_functions([ {ar_block, compute_h0, fun(O, P, S, M, PD) -> crypto:hash(sha256, << O/binary, P:256, S/binary, M/binary, PD:8 >>) end}, {ar_block_index, get_block_bounds, fun(_Byte) -> {10, 110, << 1:256 >>} end}, {ar_poa, validate, fun(Args) -> PoA = #poa{ tx_path = << 0:(2176 * 8) >>, data_path = << 0:(349504 * 8) >> }, PoA2 = PoA#poa{ chunk = << 0:(262144 * 8) >> }, CPoA = PoA#poa{ chunk = << 0:(8192 * 8) >>, unpacked_chunk = << 1:(262144 * 8) >> }, case Args of {10, _, << 1:256 >>, 100, PoA2, {spora_2_6, << 0:256 >>}, -1, not_set} -> {true, << 2:256 >>}; {10, _, << 1:256 >>, 100, CPoA, {composite, << 0:256 >>, 2}, 31, not_set} -> {true, << 2:256 >>}; _ -> false end end}, {ar_node, get_current_diff, fun() -> {0, 0} end}, {ar_node, get_height, fun() -> 0 end}, {ar_nonce_limiter, get_step_checkpoints, fun(S, {N, SIN, D}) -> case {S, N, SIN, D} of {0, << 10:(48*8) >>, 0, 0} -> %% Test not found. not_found; {0, << 3:(48*8) >>, 0, 0} -> %% Test output mismatch (<< 1:256 >> /= << 0:256 >>). [<< 1:256 >>]; _ -> [<< 0:256 >>] end end}, {ar_nonce_limiter, get_seed, fun({N, SIN, D}) -> case {N, SIN, D} of {<< 11:(48*8) >>, 0, 0} -> %% Test not_found. not_found; {<< 2:(48*8) >>, 0, 0} -> %% Test seed mismatch (<< 3:(48*8) >> /= << 0:(48*8) >>). << 3:(48*8) >>; _ -> << 0:(48*8) >> end end}, {ar_nonce_limiter, get_active_partition_upper_bound, fun(S, {N, SIN, D}) -> case {S, N, SIN, D} of {0, << 12:(48*8) >>, 0, 0} -> %% Test not_found. not_found; {0, << 1:(48*8) >>, 0, 0} -> %% Test partition upper bound mismatch (2 /= 1). 2; _ -> 1 end end}, {ar_events, send, fun(_Type, _Payload) -> ok end}, {ar_node_worker, found_solution, fun(_, _, _, _) -> ok end}], fun test_process_solution/0 ). test_process_solution() -> Zero = << 0:256 >>, Zero48 = << 0:(48*8) >>, C = << 0:(262144 * 8) >>, H0 = ar_block:compute_h0(Zero, 0, Zero48, Zero, 0), {_H1, Preimage1} = ar_block:compute_h1(H0, 1, C), SolutionH = ar_block:compute_solution_h(H0, Preimage1), {RecallRange1Start, _RecallRange2Start} = ar_block:get_recall_range(H0, 0, 1), RecallByte1 = RecallRange1Start + 1 * ?DATA_CHUNK_SIZE, PoA = #poa{ chunk = C }, CompositeSubChunk = << 0:(8192 * 8) >>, CPoA = #poa{ chunk = CompositeSubChunk }, CH0 = ar_block:compute_h0(Zero, 0, Zero48, Zero, 2), {_CH1, CPreimage1} = ar_block:compute_h1(CH0, 31, CompositeSubChunk), CSolutionH = ar_block:compute_solution_h(CH0, CPreimage1), {CRecallRange1Start, _CRecallRange2Start} = ar_block:get_recall_range(CH0, 0, 1), CRecallByte1 = CRecallRange1Start, TestCases = [ {"VDF not found", #mining_solution{ next_seed = << 10:(48*8) >>, nonce = 1, solution_hash = SolutionH, preimage = Preimage1, partition_upper_bound = 1, recall_byte1 = RecallByte1, poa1 = PoA#poa{ tx_path = << 0:(2176 * 8) >>, data_path = << 0:(349504 * 8) >> }}, #partial_solution_response{ status = <<"rejected_vdf_not_found">> }}, {"VDF not found 2", #mining_solution{ next_seed = << 11:(48*8) >>, nonce = 1, solution_hash = SolutionH, preimage = Preimage1, partition_upper_bound = 1, recall_byte1 = RecallByte1, poa1 = PoA#poa{ tx_path = << 0:(2176 * 8) >>, data_path = << 0:(349504 * 8) >> }}, #partial_solution_response{ status = <<"rejected_vdf_not_found">> }}, {"VDF not found 3", #mining_solution{ next_seed = << 12:(48*8) >>, nonce = 1, solution_hash = SolutionH, preimage = Preimage1, partition_upper_bound = 1, recall_byte1 = RecallByte1, poa1 = PoA#poa{ tx_path = << 0:(2176 * 8) >>, data_path = << 0:(349504 * 8) >> }}, #partial_solution_response{ status = <<"rejected_vdf_not_found">> }}, {"Bad VDF 1", #mining_solution{ next_seed = << 1:(48*8) >>, nonce = 1, solution_hash = SolutionH, preimage = Preimage1, partition_upper_bound = 1, recall_byte1 = RecallByte1, poa1 = PoA#poa{ tx_path = << 0:(2176 * 8) >>, data_path = << 0:(349504 * 8) >> }}, #partial_solution_response{ status = <<"rejected_bad_vdf">> }}, {"Bad VDF 2", #mining_solution{ next_seed = << 2:(48*8) >>, nonce = 1, solution_hash = SolutionH, preimage = Preimage1, partition_upper_bound = 1, recall_byte1 = RecallByte1, poa1 = PoA#poa{ tx_path = << 0:(2176 * 8) >>, data_path = << 0:(349504 * 8) >> }}, #partial_solution_response{ status = <<"rejected_bad_vdf">> }}, {"Bad VDF 3", #mining_solution{ next_seed = << 3:(48*8) >>, nonce = 1, solution_hash = SolutionH, preimage = Preimage1, partition_upper_bound = 1, recall_byte1 = RecallByte1, poa1 = PoA#poa{ tx_path = << 0:(2176 * 8) >>, data_path = << 0:(349504 * 8) >> }}, #partial_solution_response{ status = <<"rejected_bad_vdf">> }}, {"Accepted", #mining_solution{ next_seed = << 4:(48*8) >>, nonce = 1, solution_hash = SolutionH, preimage = Preimage1, partition_upper_bound = 1, recall_byte1 = RecallByte1, poa1 = PoA#poa{ tx_path = << 0:(2176 * 8) >>, data_path = << 0:(349504 * 8) >> }}, noreply}, {"Accepted packing diff=2", #mining_solution{ next_seed = << 4:(48*8) >>, nonce = 31, solution_hash = CSolutionH, preimage = CPreimage1, partition_upper_bound = 1, recall_byte1 = CRecallByte1, packing_difficulty = 2, poa1 = CPoA#poa{ tx_path = << 0:(2176 * 8) >>, data_path = << 0:(349504 * 8) >>, unpacked_chunk = << 1:(262144 * 8) >> }}, %% The difficulty is about 32 times higher now (because we can try 32x nonces). %% However, the recall range reduction (1 / (4 (base) * 2 (packing diff))) %% make it only about 4 times higher. %% The inputs are deterministic. noreply} ], lists:foreach( fun({Title, Solution, ExpectedReply}) -> Ref = make_ref(), ?assertEqual(ExpectedReply, process_partial_solution(Solution, Ref), Title) end, TestCases ). ================================================ FILE: apps/arweave/src/ar_pool_cm_job_poller.erl ================================================ -module(ar_pool_cm_job_poller). -behaviour(gen_server). -export([start_link/0]). -export([init/1, handle_cast/2, handle_call/3, handle_info/2, terminate/2]). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("arweave/include/ar_pool.hrl"). -include_lib("eunit/include/eunit.hrl"). -record(state, {}). %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Start the server. start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). %%%=================================================================== %%% Generic server callbacks. %%%=================================================================== init([]) -> case {ar_pool:is_client(), ar_coordination:is_exit_peer()} of {true, true} -> gen_server:cast(self(), fetch_cm_jobs); _ -> %% If we are a CM miner and not an exit peer, our exit peer will push %% the pool CM jobs to us. ok end, {ok, #state{}}. handle_call(Request, _From, State) -> ?LOG_WARNING([{event, unhandled_call}, {module, ?MODULE}, {request, Request}]), {reply, ok, State}. handle_cast(fetch_cm_jobs, State) -> Peer = ar_pool:pool_peer(), Partitions = ar_coordination:get_cluster_partitions_list(), PartitionJobs = #pool_cm_jobs{ partitions = Partitions }, case ar_http_iface_client:get_pool_cm_jobs(Peer, PartitionJobs) of {ok, Jobs} -> push_cm_jobs_to_cm_peers(Jobs), ar_pool:process_cm_jobs(Jobs, Peer), ar_util:cast_after(?FETCH_CM_JOBS_FREQUENCY_MS, self(), fetch_cm_jobs); {error, Error} -> ?LOG_WARNING([{event, failed_to_fetch_pool_cm_jobs}, {error, io_lib:format("~p", [Error])}]), ar_util:cast_after(?FETCH_CM_JOBS_RETRY_MS, self(), fetch_cm_jobs) end, {noreply, State}; handle_cast(Cast, State) -> ?LOG_WARNING([{event, unhandled_cast}, {module, ?MODULE}, {cast, Cast}]), {noreply, State}. handle_info(Message, State) -> ?LOG_WARNING([{event, unhandled_info}, {module, ?MODULE}, {message, Message}]), {noreply, State}. terminate(Reason, _State) -> ?LOG_INFO([ {module, ?MODULE}, {pid, self()}, {callback, terminate}, {reason, Reason} ]), ok. %%%=================================================================== %%% Private functions. %%%=================================================================== push_cm_jobs_to_cm_peers(Jobs) -> {ok, Config} = arweave_config:get_env(), Peers = Config#config.cm_peers, Payload = ar_serialize:jsonify(ar_serialize:pool_cm_jobs_to_json_struct(Jobs)), push_cm_jobs_to_cm_peers(Payload, Peers). push_cm_jobs_to_cm_peers(_Payload, []) -> ok; push_cm_jobs_to_cm_peers(Payload, [Peer | Peers]) -> spawn(fun() -> ar_http_iface_client:post_pool_cm_jobs(Peer, Payload) end), push_cm_jobs_to_cm_peers(Payload, Peers). ================================================ FILE: apps/arweave/src/ar_pool_job_poller.erl ================================================ -module(ar_pool_job_poller). -behaviour(gen_server). -export([start_link/0]). -export([init/1, handle_cast/2, handle_call/3, handle_info/2, terminate/2]). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("arweave/include/ar_pool.hrl"). -include_lib("eunit/include/eunit.hrl"). -record(state, {}). %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Start the server. start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). %%%=================================================================== %%% Generic server callbacks. %%%=================================================================== init([]) -> case ar_pool:is_client() of true -> gen_server:cast(self(), fetch_jobs); false -> ok end, {ok, #state{}}. handle_call(Request, _From, State) -> ?LOG_WARNING([{event, unhandled_call}, {module, ?MODULE}, {request, Request}]), {reply, ok, State}. handle_cast(fetch_jobs, State) -> PrevOutput = (ar_pool:get_latest_job())#job.output, {ok, Config} = arweave_config:get_env(), Peer = case {Config#config.coordinated_mining, Config#config.cm_exit_peer} of {true, not_set} -> %% We are a CM exit node. ar_pool:pool_peer(); {true, ExitPeer} -> %% We are a CM miner. ExitPeer; _ -> %% We are a standalone pool client (a non-CM miner and a pool client). ar_pool:pool_peer() end, case ar_http_iface_client:get_jobs(Peer, PrevOutput) of {ok, Jobs} -> emit_pool_jobs(Jobs), ar_pool:cache_jobs(Jobs), ar_util:cast_after(?FETCH_JOBS_FREQUENCY_MS, self(), fetch_jobs); {error, Error} -> ?LOG_WARNING([{event, failed_to_fetch_pool_jobs}, {error, io_lib:format("~p", [Error])}]), ar_util:cast_after(?FETCH_JOBS_RETRY_MS, self(), fetch_jobs) end, {noreply, State}; handle_cast(Cast, State) -> ?LOG_WARNING([{event, unhandled_cast}, {module, ?MODULE}, {cast, Cast}]), {noreply, State}. handle_info(Message, State) -> ?LOG_WARNING([{event, unhandled_info}, {module, ?MODULE}, {message, Message}]), {noreply, State}. terminate(Reason, _State) -> ?LOG_INFO([ {module, ?MODULE}, {pid, self()}, {callback, terminate}, {reason, Reason} ]), ok. %%%=================================================================== %%% Private functions. %%%=================================================================== emit_pool_jobs(Jobs) -> SessionKey = {Jobs#jobs.next_seed, Jobs#jobs.interval_number, Jobs#jobs.next_vdf_difficulty}, emit_pool_jobs(Jobs#jobs.jobs, SessionKey, Jobs#jobs.partial_diff, Jobs#jobs.seed). emit_pool_jobs([], _SessionKey, _PartialDiff, _Seed) -> ok; emit_pool_jobs([Job | Jobs], SessionKey, PartialDiff, Seed) -> #job{ output = Output, global_step_number = StepNumber, partition_upper_bound = PartitionUpperBound } = Job, ar_mining_server:add_pool_job( SessionKey, StepNumber, Output, PartitionUpperBound, Seed, PartialDiff), emit_pool_jobs(Jobs, SessionKey, PartialDiff, Seed). ================================================ FILE: apps/arweave/src/ar_pricing.erl ================================================ -module(ar_pricing). %% 2.6 exports. -export([get_price_per_gib_minute/2, get_tx_fee/1, get_miner_reward_endowment_pool_debt_supply/1, recalculate_price_per_gib_minute/1, redenominate/3, may_be_redenominate/1, get_redenomination_threshold/0, get_redenomination_delay_blocks/0]). %% 2.5 exports. -export([get_tx_fee/4, get_miner_reward_and_endowment_pool/1, usd_to_ar_rate/1, usd_to_ar/3, recalculate_usd_to_ar_rate/1, get_storage_cost/4, get_expected_min_decline_rate/6]). %% For tests. -export([get_v2_price_per_gib_minute/2]). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave/include/ar_inflation.hrl"). -include_lib("arweave/include/ar_pricing.hrl"). -include_lib("arweave/include/ar_consensus.hrl"). -include_lib("eunit/include/eunit.hrl"). %%%=================================================================== %%% Types. %%%=================================================================== -type nonegint() :: non_neg_integer(). -type fraction() :: {integer(), integer()}. -type usd() :: float() | fraction(). -type date() :: {nonegint(), nonegint(), nonegint()}. -type time() :: {nonegint(), nonegint(), nonegint()}. -type datetime() :: {date(), time()}. %%%=================================================================== %%% Public interface 2.6+. %%%=================================================================== %% @doc Return the price per gibibyte minute estimated from the given history of %% network hash rates and block rewards. The total reward used in calculations %% is at least 1 Winston, even if all block rewards from the given history are 0. %% Also, the returned price is always at least 1 Winston. get_price_per_gib_minute(Height, B) -> V2Price = get_v2_price_per_gib_minute(Height, B), ar_pricing_transition:get_transition_price(Height, V2Price). get_v2_price_per_gib_minute(Height, B) -> OneDifficultyHeight = ar_fork:height_2_7() + ar_block_time_history:history_length(), TwoDifficultyHeight = ar_fork:height_2_7_2() + ar_block_time_history:history_length(), case Height of _ when Height >= TwoDifficultyHeight -> get_v2_price_per_gib_minute_two_difficulty(Height, B); _ when Height >= OneDifficultyHeight -> get_v2_price_per_gib_minute_one_difficulty(Height, B); _ -> get_v2_price_per_gib_minute_simple(B) end. get_v2_price_per_gib_minute_two_difficulty(Height, B) -> {HashRateTotal, RewardTotal, History} = ar_rewards:get_reward_history_totals(B), {IntervalTotal, VDFIntervalTotal, OneChunkCount, TwoChunkCount} = ar_block_time_history:sum_history(B), %% The intent of the SolutionsPerPartitionPerVDFStep is to estimate network replica %% count (how many copies of the weave are stored across the network). %% The logic behind this is complex - an explanation from @vird: %% %% 1. Naive solution: If we assume that each miner stores 1 replica, then we %% can trivially calculate the network replica count using the network hashrate %% (which we have) and the weave size (which we also have). However what if on %% average each miner only stores 50% of the weave? In that case each miner will %% get fewer hashes per partition (because they will miss out on 2-chunk solutions %% that fall on the partitions they don't store), and that will push *up* the %% replica count for a given network hashrate. How much to scale up our replica %% count is based on the average replica count per miner. %% %% 2. Estimate average replica count per miner. Start with this basic assumption: %% the higher the percentage of the weave a miner stores, the more likely they are %% to mine a 2-chunk solution. If a miner has 100% of the weave and if the PoA1 and %% PoA2 difficulties are the same, then, on average, 50% of their solutions will be %% 1-chunk, and 50% will be 2-chunk. %% %% With this we can use the ratio of observed 2-chunk to 1-chunk solutions to %% estimate the average percentage of the weave each miner stores. %% %% 3. However, what happens if the PoA1 difficulty is higher than the PoA2 difficulty? %% In that case, we'd expect a miner with 100% of the weave to have fewer 1-chunk %% solutions than 2-chunk solutions. If the PoA1 difficulty is PoA1Mult times higher %% than the PoA2 difficulty, we'd expect the maximum number of solutions to be: %% %% (PoA1Mult + 1) * RecallRangeSize div (?DATA_CHUNK_SIZE * PoA1Mult) %% %% Or basically 1 1-chunk solution for every PoA1Mult 2-chunk solutions in the %% full-replica case. %% %% 4. Finally, what if the average miner is not mining a full replica? In that case we %% need to arrive at an equation that weights the 1-chunk and 2-chunk solutions %% differently - and use that to estimate the expected number of solutions per %% partition: %% %% EstimatedSolutionsPerPartition = %% ( %% RecallRangeSize div PoA1Mult + %% RecallRangeSize * TwoChunkCount div (OneChunkCount * PoA1Mult) %% ) div (?DATA_CHUNK_SIZE) %% %% The SolutionsPerPartitionPerVDFStep combines that average weave calculation %% with the expected number of solutions per partition per VDF step to arrive a single %% number that can be used in the PricePerGiBPerMinute calculation. PoA1Mult = ar_difficulty:poa1_diff_multiplier(Height), RecallRangeSize = ar_block:get_recall_range_size(0), MaxSolutionsPerPartition = (PoA1Mult + 1) * RecallRangeSize div (?DATA_CHUNK_SIZE * PoA1Mult), SolutionsPerPartitionPerVDFStep = case OneChunkCount of 0 -> MaxSolutionsPerPartition; _ -> %% The following is a version of the EstimatedSolutionsPerPartition %% equation mentioned above that has been simpplified to limit rounding %% errors: EstimatedSolutionsPerPartition = (OneChunkCount + TwoChunkCount) * RecallRangeSize div (?DATA_CHUNK_SIZE * OneChunkCount * PoA1Mult), min(MaxSolutionsPerPartition, EstimatedSolutionsPerPartition) end, %% The following walks through the math of calculating the price per GiB per minute. %% However to reduce rounding errors due to divs, the uncommented equation at the %% end is used instead. Logically they should be the same. Notably the '* ?TARGET_BLOCK_TIME' in %% SolutionsPerPartitionPerBlock and the 'div ?TARGET_BLOCK_TIME' in PricePerGiBPerSecond cancel %% each other out. %% %% SolutionsPerPartitionPerSecond = %% (SolutionsPerPartitionPerVDFStep * VDFIntervalTotal) div IntervalTotal %% SolutionsPerPartitionPerBlock = SolutionsPerPartitionPerSecond * ?TARGET_BLOCK_TIME, %% EstimatedPartitionCount = max(1, HashRateTotal) div SolutionsPerPartitionPerBlock, %% EstimatedDataSizeInGiB = EstimatedPartitionCount * (ar_block:partition_size()) div (?GiB), %% PricePerGiBPerBlock = max(1, RewardTotal) div EstimatedDataSizeInGiB, %% PricePerGiBPerSecond = PricePerGibPerBlock div ?TARGET_BLOCK_TIME %% PricePerGiBPerMinute = PricePerGiBPerSecond * 60, PricePerGiBPerMinute = ( (SolutionsPerPartitionPerVDFStep * VDFIntervalTotal) * max(1, RewardTotal) * (?GiB) * 60 ) div ( IntervalTotal * max(1, HashRateTotal) * (ar_block:partition_size()) ), log_price_metrics(get_v2_price_per_gib_minute_two_difficulty, Height, History, HashRateTotal, RewardTotal, IntervalTotal, VDFIntervalTotal, OneChunkCount, TwoChunkCount, SolutionsPerPartitionPerVDFStep, PricePerGiBPerMinute), PricePerGiBPerMinute. get_v2_price_per_gib_minute_one_difficulty(Height, B) -> {HashRateTotal, RewardTotal, History} = ar_rewards:get_reward_history_totals(B), {IntervalTotal, VDFIntervalTotal, OneChunkCount, TwoChunkCount} = ar_block_time_history:sum_history(B), %% The intent of the SolutionsPerPartitionPerVDFStep is to estimate network replica %% count (how many copies of the weave are stored across the network). %% The logic behind this is complex - an explanation from @vird: %% %% 1. Naive solution: If we assume that each miner stores 1 replica, then we %% can trivially calculate the network replica count using the network hashrate %% (which we have) and the weave size (which we also have). However what if on %% average each miner only stores 50% of the weave? In that case each miner will %% get fewer hashes per partition (because they will miss out on 2-chunk solutions %% that fall on the partitions they don't store), and that will push *up* the %% replica count for a given network hashrate. How much to scale up our replica %% count is based on the average replica count per miner. %% 2. Estimate average replica count per miner: Start with this basic assumption: %% the higher the percentage of the weave a miner stores, the more likely they are %% to mine a 2-chunk solution. If a miner has 100% of the weave, then, on average, %% 50% of their solutions will be 1-chunk, and 50% will be 2-chunk. %% %% With this we can use the ratio of observed 2-chunk to 1-chunk solutions to %% estimate the average percentage of the weave each miner stores. %% %% The SolutionsPerPartitionPerVDFStep combines that average weave % calculation %% with the expected number of solutions per partition per VDF step to arrive a single %% number that can be used in the PricePerGiBPerMinute calculation. RecallRangeSize = ?LEGACY_RECALL_RANGE_SIZE, SolutionsPerPartitionPerVDFStep = case OneChunkCount of 0 -> 2 * RecallRangeSize div (?DATA_CHUNK_SIZE); _ -> min(2 * RecallRangeSize, RecallRangeSize + RecallRangeSize * TwoChunkCount div OneChunkCount) div ?DATA_CHUNK_SIZE end, %% The following walks through the math of calculating the price per GiB per minute. %% However to reduce rounding errors due to divs, the uncommented equation at the %% end is used instead. Logically they should be the same. Notably the '* ?TARGET_BLOCK_TIME' in %% SolutionsPerPartitionPerBlock and the 'div ?TARGET_BLOCK_TIME' in PricePerGiBPerSecond cancel %% each other out. %% %% SolutionsPerPartitionPerSecond = %% (SolutionsPerPartitionPerVDFStep * VDFIntervalTotal) div IntervalTotal %% SolutionsPerPartitionPerBlock = SolutionsPerPartitionPerSecond * ?TARGET_BLOCK_TIME, %% EstimatedPartitionCount = max(1, HashRateTotal) div SolutionsPerPartitionPerBlock, %% EstimatedDataSizeInGiB = EstimatedPartitionCount * (ar_block:partition_size()) div (?GiB), %% PricePerGiBPerBlock = max(1, RewardTotal) div EstimatedDataSizeInGiB, %% PricePerGiBPerSecond = PricePerGibPerBlock div ?TARGET_BLOCK_TIME %% PricePerGiBPerMinute = PricePerGiBPerSecond * 60, PricePerGiBPerMinute = ( (SolutionsPerPartitionPerVDFStep * VDFIntervalTotal) * max(1, RewardTotal) * (?GiB) * 60 ) div ( IntervalTotal * max(1, HashRateTotal) * (ar_block:partition_size()) ), log_price_metrics(get_v2_price_per_gib_minute_one_difficulty, Height, History, HashRateTotal, RewardTotal, IntervalTotal, VDFIntervalTotal, OneChunkCount, TwoChunkCount, SolutionsPerPartitionPerVDFStep, PricePerGiBPerMinute), PricePerGiBPerMinute. get_v2_price_per_gib_minute_simple(B) -> {HashRateTotal, RewardTotal, _History} = ar_rewards:get_reward_history_totals(B), %% 2 recall ranges per partition per second. SolutionsPerPartitionPerSecond = 2 * (?LEGACY_RECALL_RANGE_SIZE) div (?DATA_CHUNK_SIZE), SolutionsPerPartitionPerMinute = SolutionsPerPartitionPerSecond * 60, SolutionsPerPartitionPerBlock = SolutionsPerPartitionPerMinute * 2, %% Estimated partition count = hash rate / 2 / solutions per partition per minute. %% 2 minutes is the average block time. %% Estimated data size = estimated partition count * partition size. %% Estimated price per gib minute = total block reward / estimated data size %% in gibibytes. (max(1, RewardTotal) * (?GiB) * SolutionsPerPartitionPerBlock) div (max(1, HashRateTotal) * (ar_block:partition_size()) * 2 % The reward is paid every two minutes whereas we are calculating % the minute rate here. ). %% @doc Return the minimum required transaction fee for the given number of %% total bytes stored and gibibyte minute price. get_tx_fee(Args) -> {DataSize, GiBMinutePrice, KryderPlusRateMultiplier, Height} = Args, FirstYearPrice = DataSize * GiBMinutePrice * 60 * 24 * 365, {LnDecayDividend, LnDecayDivisor} = ?LN_PRICE_DECAY_ANNUAL, PerpetualPrice = {-FirstYearPrice * LnDecayDivisor * KryderPlusRateMultiplier * (?N_REPLICATIONS(Height)), LnDecayDividend * (?GiB)}, MinerShare = ar_fraction:multiply(PerpetualPrice, ?MINER_MINIMUM_ENDOWMENT_CONTRIBUTION_SHARE), {Dividend, Divisor} = ar_fraction:add(PerpetualPrice, MinerShare), Dividend div Divisor. %% @doc Return the block reward, the new endowment pool, and the new debt supply. get_miner_reward_endowment_pool_debt_supply(Args) -> {EndowmentPool, DebtSupply, TXs, WeaveSize, Height, GiBMinutePrice, KryderPlusRateMultiplierLatch, KryderPlusRateMultiplier, Denomination, BlockInterval} = Args, Inflation = redenominate(ar_inflation:calculate(Height), 1, Denomination), ExpectedReward = (?N_REPLICATIONS(Height)) * WeaveSize * GiBMinutePrice * BlockInterval div (60 * ?GiB), {EndowmentPoolFeeShare, MinerFeeShare} = distribute_transaction_fees2(TXs, Denomination), BaseReward = Inflation + MinerFeeShare, EndowmentPool2 = EndowmentPool + EndowmentPoolFeeShare, case BaseReward >= ExpectedReward of true -> {BaseReward, EndowmentPool2, DebtSupply, KryderPlusRateMultiplierLatch, KryderPlusRateMultiplier, EndowmentPoolFeeShare, 0}; false -> Take = ExpectedReward - BaseReward, {EndowmentPool3, DebtSupply2} = case Take > EndowmentPool2 of true -> {0, DebtSupply + Take - EndowmentPool2}; false -> {EndowmentPool2 - Take, DebtSupply} end, {KryderPlusRateMultiplierLatch2, KryderPlusRateMultiplier2} = case {Take > EndowmentPool2, KryderPlusRateMultiplierLatch} of {true, 0} -> {1, KryderPlusRateMultiplier * 2}; {false, 1} -> Threshold = redenominate(?RESET_KRYDER_PLUS_LATCH_THRESHOLD, 1, Denomination), case EndowmentPool3 > Threshold of true -> {0, KryderPlusRateMultiplier}; false -> {1, KryderPlusRateMultiplier} end; _ -> {KryderPlusRateMultiplierLatch, KryderPlusRateMultiplier} end, {BaseReward + Take, EndowmentPool3, DebtSupply2, KryderPlusRateMultiplierLatch2, KryderPlusRateMultiplier2, EndowmentPoolFeeShare, Take} end. %% @doc Return the denominated amount. redenominate(Amount, 0, _Denomination) -> Amount; redenominate(Amount, BaseDenomination, BaseDenomination) -> Amount; redenominate(Amount, BaseDenomination, Denomination) when Denomination > BaseDenomination -> redenominate(Amount * 1000, BaseDenomination, Denomination - 1). %% @doc Return the threshold for scheduling redenomination. -ifdef(LOCALNET). get_redenomination_threshold() -> case application:get_env(arweave, redenomination_threshold) of {ok, Value} when is_integer(Value), Value > 0 -> Value; _ -> ?REDENOMINATION_THRESHOLD end. -else. get_redenomination_threshold() -> ?REDENOMINATION_THRESHOLD. -endif. %% @doc Return the delay (in blocks) before redenomination takes effect. -ifdef(LOCALNET). get_redenomination_delay_blocks() -> case application:get_env(arweave, redenomination_delay_blocks) of {ok, Value} when is_integer(Value), Value > 0 -> Value; _ -> ?REDENOMINATION_DELAY_BLOCKS end. -else. get_redenomination_delay_blocks() -> ?REDENOMINATION_DELAY_BLOCKS. -endif. %% @doc Increase the amount of base currency units in the system if %% the available supply is too low. may_be_redenominate(B) -> #block{ height = Height, denomination = Denomination, redenomination_height = RedenominationHeight } = B, case ar_pricing_transition:is_v2_pricing_height(Height + 1) of false -> {Denomination, RedenominationHeight}; true -> may_be_redenominate2(B) end. may_be_redenominate2(B) -> #block{ height = Height, denomination = Denomination, redenomination_height = RedenominationHeight } = B, case Height == RedenominationHeight of true -> {Denomination + 1, RedenominationHeight}; false -> case Height < RedenominationHeight of true -> {Denomination, RedenominationHeight}; false -> may_be_redenominate3(B) end end. may_be_redenominate3(B) -> #block{ height = Height, debt_supply = DebtSupply, reward_pool = EndowmentPool, denomination = Denomination, redenomination_height = RedenominationHeight } = B, TotalSupply = get_total_supply(Denomination), Threshold = get_redenomination_threshold(), case TotalSupply + DebtSupply - EndowmentPool < Threshold of true -> {Denomination, Height + get_redenomination_delay_blocks()}; false -> {Denomination, RedenominationHeight} end. %% @doc Return the new current and scheduled prices per byte minute. recalculate_price_per_gib_minute(B) -> #block{ height = PrevHeight, price_per_gib_minute = Price, scheduled_price_per_gib_minute = ScheduledPrice } = B, Height = PrevHeight + 1, Fork_2_7 = ar_fork:height_2_7(), Fork_2_7_1 = ar_fork:height_2_7_1(), case Height of Fork_2_7 -> {ar_pricing_transition:static_price(), ar_pricing_transition:static_price()}; Height when Height < Fork_2_7_1 -> case is_price_adjustment_height(Height) of false -> {Price, ScheduledPrice}; true -> %% price_per_gib_minute = scheduled_price_per_gib_minute %% scheduled_price_per_gib_minute = get_price_per_gib_minute() capped to %% 0.5x to 2x of old price_per_gib_minute Price2 = min(Price * 2, get_price_per_gib_minute(Height, B)), Price3 = max(Price div 2, Price2), {ScheduledPrice, Price3} end; _ -> case is_price_adjustment_height(Height) of false -> {Price, ScheduledPrice}; true -> %% price_per_gib_minute = scheduled_price_per_gib_minute %% scheduled_price_per_gib_minute = %% get_price_per_gib_minute() %% EMA'ed with scheduled_price_per_gib_minute at 0.1 alpha %% and then capped to 0.5x to 2x of scheduled_price_per_gib_minute TargetPrice = get_price_per_gib_minute(Height, B), EMAPrice = (9 * ScheduledPrice + TargetPrice) div 10, Price2 = min(ScheduledPrice * 2, EMAPrice), Price3 = max(ScheduledPrice div 2, Price2), ?LOG_DEBUG([{event, recalculate_price_per_gib_minute}, {height, Height}, {old_price, Price}, {scheduled_price, ScheduledPrice}, {target_price, TargetPrice}, {ema_price, EMAPrice}, {capped_price, Price3}]), {ScheduledPrice, Price3} end end. is_price_adjustment_height(Height) -> Height rem ?PRICE_ADJUSTMENT_FREQUENCY == 0. distribute_transaction_fees2(TXs, Denomination) -> distribute_transaction_fees2(TXs, 0, 0, Denomination). distribute_transaction_fees2([], EndowmentPoolTotal, MinerTotal, _Denomination) -> {EndowmentPoolTotal, MinerTotal}; distribute_transaction_fees2([TX | TXs], EndowmentPoolTotal, MinerTotal, Denomination) -> TXFee = redenominate(TX#tx.reward, TX#tx.denomination, Denomination), {Dividend, Divisor} = ?MINER_FEE_SHARE, MinerFee = TXFee * Dividend div Divisor, EndowmentPoolTotal2 = EndowmentPoolTotal + TXFee - MinerFee, MinerTotal2 = MinerTotal + MinerFee, distribute_transaction_fees2(TXs, EndowmentPoolTotal2, MinerTotal2, Denomination). get_total_supply(Denomination) -> redenominate(?TOTAL_SUPPLY, 1, Denomination). %%%=================================================================== %%% Public interface 2.5. %%%=================================================================== %% @doc Return the perpetual cost of storing the given amount of data. get_storage_cost(DataSize, Timestamp, Rate, Height) -> Size = ?TX_SIZE_BASE + DataSize, PerpetualGBStorageCost = usd_to_ar( get_perpetual_gb_cost_at_timestamp(Timestamp, Height), Rate, Height ), StorageCost = max(1, PerpetualGBStorageCost div (?MiB * 1024)) * Size, HashingCost = StorageCost, StorageCost + HashingCost. %% @doc Calculate the transaction fee. get_tx_fee(DataSize, Timestamp, Rate, Height) -> MaintenanceCost = get_storage_cost(DataSize, Timestamp, Rate, Height), MinerFeeShare = get_miner_fee_share(MaintenanceCost, Height), MaintenanceCost + MinerFeeShare. %% @doc Return the miner reward and the new endowment pool. get_miner_reward_and_endowment_pool({Pool, TXs, unclaimed, _, _, _, _}) -> {0, Pool + lists:sum([TX#tx.reward || TX <- TXs])}; get_miner_reward_and_endowment_pool(Args) -> {Pool, TXs, _Addr, WeaveSize, Height, Timestamp, Rate} = Args, Inflation = trunc(ar_inflation:calculate(Height)), {PoolFeeShare, MinerFeeShare} = distribute_transaction_fees(TXs, Height), BaseReward = Inflation + MinerFeeShare, StorageCostPerGBPerBlock = usd_to_ar( get_gb_cost_per_block_at_timestamp(Timestamp, Height), Rate, Height ), Burden = WeaveSize * StorageCostPerGBPerBlock div (?MiB * 1024), Pool2 = Pool + PoolFeeShare, case BaseReward >= Burden of true -> {BaseReward, Pool2}; false -> Take = min(Pool2, Burden - BaseReward), {BaseReward + Take, Pool2 - Take} end. %% @doc Return the effective USD to AR rate corresponding to the given block %% considering its previous block. usd_to_ar_rate(#block{ height = PrevHeight } = PrevB) -> Height_2_5 = ar_fork:height_2_5(), Height = PrevHeight + 1, case PrevHeight < Height_2_5 of true -> ?INITIAL_USD_TO_AR(Height)(); false -> PrevB#block.usd_to_ar_rate end. %% @doc Return the amount of AR the given number of USD is worth. usd_to_ar(USD, Rate, Height) when is_number(USD) -> usd_to_ar({USD, 1}, Rate, Height); usd_to_ar({Dividend, Divisor}, Rate, Height) -> InitialInflation = trunc(ar_inflation:calculate(?INITIAL_USD_TO_AR_HEIGHT(Height)())), CurrentInflation = trunc(ar_inflation:calculate(Height)), {InitialRateDividend, InitialRateDivisor} = Rate, trunc( Dividend * ?WINSTON_PER_AR * CurrentInflation * InitialRateDividend ) div Divisor div InitialInflation div InitialRateDivisor. recalculate_usd_to_ar_rate(#block{ height = PrevHeight } = B) -> Height = PrevHeight + 1, Fork_2_5 = ar_fork:height_2_5(), true = Height >= Fork_2_5, case Height > Fork_2_5 of false -> Rate = ?INITIAL_USD_TO_AR(Height)(), {Rate, Rate}; true -> Fork_2_6 = ar_fork:height_2_6(), case Height == Fork_2_6 of true -> {B#block.usd_to_ar_rate, ?FORK_2_6_PRE_TRANSITION_USD_TO_AR_RATE}; false -> recalculate_usd_to_ar_rate2(B) end end. %% @doc Return an estimation for the minimum required decline rate making the given %% Amount (in Winston) sufficient to subsidize storage for Period seconds starting from %% Timestamp and assuming the given USD to AR rate. %% When computing the exponent, the function accounts for the first 16 summands in %% the Taylor series. The fraction is reduced to the 1/1000000 precision. get_expected_min_decline_rate(Timestamp, Period, Amount, Size, Rate, Height) -> {USDDiv1, USDDivisor1} = get_gb_cost_per_year_at_timestamp(Timestamp, Height), %% Multiply by 2 to account for hashing costs. Sum1 = 2 * usd_to_ar({USDDiv1, USDDivisor1}, Rate, Height), {USDDiv2, USDDivisor2} = get_gb_cost_per_year_at_timestamp(Timestamp + Period, Height), Sum2 = 2 * usd_to_ar({USDDiv2, USDDivisor2}, Rate, Height), %% Sum1 / -logRate - Sum2 / -logRate = Amount %% => -logRate = (Sum1 - Sum2) / Amount %% => 1 / Rate = exp((Sum1 - Sum2) / Amount) %% => Rate = 1 / exp((Sum1 - Sum2) / Amount) {ExpDiv, ExpDivisor} = ar_fraction:natural_exponent( {(Sum1 - Sum2) * Size, Amount * (?MiB * 1024)}, 16), ar_fraction:reduce({ExpDivisor, ExpDiv}, 1000000). %%%=================================================================== %%% Private functions. %%%=================================================================== %% @doc Get the share of the maintenance cost the miner receives for a transation. get_miner_fee_share(MaintenanceCost, Height) -> {Dividend, Divisor} = ?MINING_REWARD_MULTIPLIER, case Height >= ar_fork:height_2_5() of false -> erlang:trunc(MaintenanceCost * (Dividend / Divisor)); true -> MaintenanceCost * Dividend div Divisor end. distribute_transaction_fees(TXs, Height) -> distribute_transaction_fees(TXs, 0, 0, Height). distribute_transaction_fees([], EndowmentPool, Miner, _Height) -> {EndowmentPool, Miner}; distribute_transaction_fees([TX | TXs], EndowmentPool, Miner, Height) -> TXFee = TX#tx.reward, {Dividend, Divisor} = ?MINING_REWARD_MULTIPLIER, MinerFee = case Height >= ar_fork:height_2_5() of false -> erlang:trunc((Dividend / Divisor) * TXFee / ((Dividend / Divisor) + 1)); true -> TXFee * Dividend div (Dividend + Divisor) end, distribute_transaction_fees(TXs, EndowmentPool + TXFee - MinerFee, Miner + MinerFee, Height). %% @doc Return the cost of storing 1 GB in the network perpetually. %% Integral of the exponential decay curve k*e^(-at), i.e. k/a. %% @end -spec get_perpetual_gb_cost_at_timestamp(Timestamp::integer(), Height::nonegint()) -> usd(). get_perpetual_gb_cost_at_timestamp(Timestamp, Height) -> K = get_gb_cost_per_year_at_timestamp(Timestamp, Height), get_perpetual_gb_cost(K, Height). -spec get_perpetual_gb_cost(Init::usd(), Height::nonegint()) -> usd(). get_perpetual_gb_cost(Init, Height) -> case Height >= ar_fork:height_2_5() of true -> {LnDecayDividend, LnDecayDivisor} = ?LN_PRICE_DECAY_ANNUAL, {InitDividend, InitDivisor} = Init, {-InitDividend * LnDecayDivisor, InitDivisor * LnDecayDividend}; false -> {Dividend, Divisor} = ?PRICE_DECAY_ANNUAL, Init / -math:log(Dividend / Divisor) end. %% @doc Return the cost in USD of storing 1 GB per year at the given time. -spec get_gb_cost_per_year_at_timestamp(Timestamp::integer(), Height::nonegint()) -> usd(). get_gb_cost_per_year_at_timestamp(Timestamp, Height) -> Datetime = system_time_to_universal_time(Timestamp, seconds), get_gb_cost_per_year_at_datetime(Datetime, Height). %% @doc Return the cost in USD of storing 1 GB per average block time at the given time. -spec get_gb_cost_per_block_at_timestamp(integer(), nonegint()) -> usd(). get_gb_cost_per_block_at_timestamp(Timestamp, Height) -> Datetime = system_time_to_universal_time(Timestamp, seconds), get_gb_cost_per_block_at_datetime(Datetime, Height). %% @doc Return the cost in USD of storing 1 GB per year. -spec get_gb_cost_per_year_at_datetime(DT::datetime(), Height::nonegint()) -> usd(). get_gb_cost_per_year_at_datetime({{Y, M, _}, _} = DT, Height) -> PrevY = prev_jun_30_year(Y, M), NextY = next_jun_30_year(Y, M), FracY = fraction_of_year(PrevY, NextY, DT, Height), PrevYCost = usd_p_gby(PrevY, Height), NextYCost = usd_p_gby(NextY, Height), case Height >= ar_fork:height_2_5() of true -> {FracYDividend, FracYDivisor} = FracY, {PrevYCostDividend, PrevYCostDivisor} = PrevYCost, {NextYCostDividend, NextYCostDivisor} = NextYCost, Dividend = (?N_REPLICATIONS(Height)) * ( PrevYCostDividend * NextYCostDivisor * FracYDivisor - FracYDividend * ( PrevYCostDividend * NextYCostDivisor - NextYCostDividend * PrevYCostDivisor ) ), Divisor = PrevYCostDivisor * NextYCostDivisor * FracYDivisor, {Dividend, Divisor}; false -> CY = PrevYCost - (FracY * (PrevYCost - NextYCost)), CY * (?N_REPLICATIONS(Height)) end. prev_jun_30_year(Y, M) when M < 7 -> Y - 1; prev_jun_30_year(Y, _M) -> Y. next_jun_30_year(Y, M) when M < 7 -> Y; next_jun_30_year(Y, _M) -> Y + 1. %% @doc Return the cost in USD of storing 1 GB per average block time. -spec get_gb_cost_per_block_at_datetime(DT::datetime(), Height::nonegint()) -> usd(). get_gb_cost_per_block_at_datetime(DT, Height) -> case Height >= ar_fork:height_2_5() of true -> {Dividend, Divisor} = get_gb_cost_per_year_at_datetime(DT, Height), {Dividend, Divisor * ar_inflation:blocks_per_year(Height)}; false -> get_gb_cost_per_year_at_datetime(DT, Height) / ar_inflation:blocks_per_year(Height) end. %% @doc Return the cost in USD of storing 1 GB per year. Estmimated from empirical data. %% Assumes a year after 2019 inclusive. Uses data figures for 2018 and 2019. %% Extrapolates the exponential decay curve k*e^(-at) to future years. %% @end -spec usd_p_gby(nonegint(), nonegint()) -> usd(). usd_p_gby(2018, Height) -> {Dividend, Divisor} = ?USD_PER_GBY_2018, case Height >= ar_fork:height_2_5() of true -> {Dividend, Divisor}; false -> Dividend / Divisor end; usd_p_gby(2019, Height) -> {Dividend, Divisor} = ?USD_PER_GBY_2019, case Height >= ar_fork:height_2_5() of true -> {Dividend, Divisor}; false -> Dividend / Divisor end; usd_p_gby(Y, Height) -> case Height >= ar_fork:height_2_5() of true -> {KDividend, KDivisor} = ?USD_PER_GBY_2019, {ADividend, ADivisor} = ?LN_PRICE_DECAY_ANNUAL, T = Y - 2019, P = ?TX_PRICE_NATURAL_EXPONENT_DECIMAL_FRACTION_PRECISION, {EDividend, EDivisor} = ar_fraction:natural_exponent({ADividend * T, ADivisor}, P), {EDividend * KDividend, EDivisor * KDivisor}; false -> {Dividend, Divisor} = ?USD_PER_GBY_2019, K = Dividend / Divisor, {DecayDividend, DecayDivisor} = ?PRICE_DECAY_ANNUAL, A = math:log(DecayDividend / DecayDivisor), T = Y - 2019, K * math:exp(A * T) end. %% @doc Return elapsed time as the fraction of the year %% between Jun 30th of PrevY and Jun 30th of NextY. %% @end -spec fraction_of_year(nonegint(), nonegint(), datetime(), nonegint()) -> float() | fraction(). fraction_of_year(PrevY, NextY, {{Y, Mo, D}, {H, Mi, S}}, Height) -> Start = calendar:datetime_to_gregorian_seconds({{PrevY, 6, 30}, {23, 59, 59}}), Now = calendar:datetime_to_gregorian_seconds({{Y, Mo, D}, {H, Mi, S}}), End = calendar:datetime_to_gregorian_seconds({{NextY, 6, 30}, {23, 59, 59}}), case Height >= ar_fork:height_2_5() of true -> {Now - Start, End - Start}; false -> (Now - Start) / (End - Start) end. %% TODO Use calendar:system_time_to_universal_time/2 in Erlang OTP-21. system_time_to_universal_time(Time, TimeUnit) -> Seconds = erlang:convert_time_unit(Time, TimeUnit, seconds), DaysFrom0To1970 = 719528, SecondsPerDay = 86400, calendar:gregorian_seconds_to_datetime(Seconds + (DaysFrom0To1970 * SecondsPerDay)). recalculate_usd_to_ar_rate2(#block{ height = PrevHeight } = B) -> case is_price_adjustment_height(PrevHeight + 1) of false -> {B#block.usd_to_ar_rate, B#block.scheduled_usd_to_ar_rate}; true -> Fork_2_6 = ar_fork:height_2_6(), true = PrevHeight + 1 /= Fork_2_6, case PrevHeight + 1 > Fork_2_6 of true -> %% Keep the rate fixed after the 2.6 fork till the transition to the %% new pricing scheme ends. Then it won't be used any longer. {B#block.scheduled_usd_to_ar_rate, B#block.scheduled_usd_to_ar_rate}; false -> recalculate_usd_to_ar_rate3(B) end end. recalculate_usd_to_ar_rate3(#block{ height = PrevHeight, diff = Diff } = B) -> Height = PrevHeight + 1, InitialDiff = ar_retarget:switch_to_linear_diff(?INITIAL_USD_TO_AR_DIFF(Height)()), MaxDiff = ?MAX_DIFF, InitialRate = ?INITIAL_USD_TO_AR(Height)(), {Dividend, Divisor} = InitialRate, ScheduledRate = {Dividend * (MaxDiff - Diff), Divisor * (MaxDiff - InitialDiff)}, Rate = B#block.scheduled_usd_to_ar_rate, MaxAdjustmentUp = ar_fraction:multiply(Rate, ?USD_TO_AR_MAX_ADJUSTMENT_UP_MULTIPLIER), MaxAdjustmentDown = ar_fraction:multiply(Rate, ?USD_TO_AR_MAX_ADJUSTMENT_DOWN_MULTIPLIER), CappedScheduledRate = ar_fraction:reduce(ar_fraction:maximum( ar_fraction:minimum(ScheduledRate, MaxAdjustmentUp), MaxAdjustmentDown), ?USD_TO_AR_FRACTION_REDUCTION_LIMIT), ?LOG_DEBUG([{event, recalculated_rate}, {new_rate, ar_util:safe_divide(element(1, Rate), element(2, Rate))}, {new_scheduled_rate, ar_util:safe_divide(element(1, CappedScheduledRate), element(2, CappedScheduledRate))}, {new_scheduled_rate_without_capping, ar_util:safe_divide(element(1, ScheduledRate), element(2, ScheduledRate))}, {max_adjustment_up, ar_util:safe_divide(element(1, MaxAdjustmentUp), element(2,MaxAdjustmentUp))}, {max_adjustment_down, ar_util:safe_divide(element(1, MaxAdjustmentDown), element(2,MaxAdjustmentDown))}]), {Rate, CappedScheduledRate}. log_price_metrics(Event, Height, History, HashRateTotal, RewardTotal, IntervalTotal, VDFIntervalTotal, OneChunkCount, TwoChunkCount, SolutionsPerPartitionPerVDFStep, PricePerGiBPerMinute) -> RewardHistoryLength = length(History), AverageHashRate = HashRateTotal div RewardHistoryLength, EstimatedDataSizeInBytes = network_data_size(Height, AverageHashRate, IntervalTotal, VDFIntervalTotal, SolutionsPerPartitionPerVDFStep), prometheus_gauge:set(poa_count, [1], OneChunkCount), prometheus_gauge:set(poa_count, [2], TwoChunkCount), prometheus_gauge:set(v2_price_per_gibibyte_minute, PricePerGiBPerMinute), prometheus_gauge:set(network_data_size, EstimatedDataSizeInBytes), ?LOG_DEBUG([{event, Event}, {height, Height}, {reward_history_length, RewardHistoryLength}, {hash_rate_total, HashRateTotal}, {average_hash_rate, AverageHashRate}, {reward_total, RewardTotal}, {interval_total, IntervalTotal}, {vdf_interval_total, VDFIntervalTotal}, {one_chunk_count, OneChunkCount}, {two_chunk_count, TwoChunkCount}, {solutions_per_partition_per_vdf_step, SolutionsPerPartitionPerVDFStep}, {data_size, EstimatedDataSizeInBytes}, {price, PricePerGiBPerMinute}]). network_data_size(Height, AverageHashRate, IntervalTotal, VDFIntervalTotal, SolutionsPerPartitionPerVDFStep) -> TargetTime = ar_testnet:target_block_time(Height), SolutionsPerPartitionPerBlock = (SolutionsPerPartitionPerVDFStep * VDFIntervalTotal * TargetTime) div IntervalTotal, ?LOG_DEBUG([{event, network_data_size}, {solutions_per_partition_per_vdf_step, SolutionsPerPartitionPerVDFStep}, {vdf_interval_total, VDFIntervalTotal}, {target_time, TargetTime}, {interval_total, IntervalTotal}, {solutions_per_partition_per_block, SolutionsPerPartitionPerBlock}]), case SolutionsPerPartitionPerBlock of 0 -> 0; _ -> EstimatedPartitionCount = AverageHashRate div SolutionsPerPartitionPerBlock, EstimatedPartitionCount * (ar_block:partition_size()) end. %%%=================================================================== %%% Tests. %%%=================================================================== get_gb_cost_per_year_at_datetime_is_monotone_test_() -> [ ar_test_node:test_with_mocked_functions([{ar_fork, height_2_5, fun() -> infinity end}], fun test_get_gb_cost_per_year_at_datetime_is_monotone/0, 120) | [ ar_test_node:test_with_mocked_functions([{ar_fork, height_2_5, fun() -> Height end}], fun test_get_gb_cost_per_year_at_datetime_is_monotone/0, 120) || Height <- lists:seq(0, 20) ] ]. test_get_gb_cost_per_year_at_datetime_is_monotone() -> InitialDT = {{2019, 1, 1}, {0, 0, 0}}, FollowingDTs = [ {{2019, 1, 1}, {10, 0, 0}}, {{2019, 6, 15}, {0, 0, 0}}, {{2019, 6, 29}, {23, 59, 59}}, {{2019, 6, 30}, {0, 0, 0}}, {{2019, 6, 30}, {23, 59, 59}}, {{2019, 7, 1}, {0, 0, 0}}, {{2019, 12, 31}, {23, 59, 59}}, {{2020, 1, 1}, {0, 0, 0}}, {{2020, 1, 2}, {0, 0, 0}}, {{2020, 10, 1}, {0, 0, 0}}, {{2020, 12, 31}, {23, 59, 59}}, {{2021, 1, 1}, {0, 0, 0}}, {{2021, 2, 1}, {0, 0, 0}}, {{2021, 12, 31}, {23, 59, 59}}, {{2022, 1, 1}, {0, 0, 0}}, {{2022, 6, 29}, {23, 59, 59}}, {{2022, 6, 30}, {0, 0, 0}}, {{2050, 3, 1}, {10, 10, 10}}, {{2100, 2, 1}, {0, 0, 0}} ], lists:foldl( fun(CurrDT, {PrevDT, PrevHeight}) -> CurrCost = get_gb_cost_per_year_at_datetime(CurrDT, PrevHeight + 1), PrevCost = get_gb_cost_per_year_at_datetime(PrevDT, PrevHeight), assert_less_than_or_equal_to(CurrCost, PrevCost), {CurrDT, PrevHeight + 1} end, {InitialDT, 0}, FollowingDTs ). assert_less_than_or_equal_to(X1, X2) when is_number(X1), is_number(X2) -> ?assert(X1 =< X2, io_lib:format("~p is bigger than ~p", [X1, X2])); assert_less_than_or_equal_to({Dividend1, Divisor1} = X1, X2) when is_number(X2) -> ?assert((Dividend1 div Divisor1) =< X2, io_lib:format("~p is bigger than ~p", [X1, X2])); assert_less_than_or_equal_to({Dividend1, Divisor1} = X1, {Dividend2, Divisor2} = X2) -> ?assert(Dividend1 * Divisor2 =< Dividend2 * Divisor1, io_lib:format("~p is bigger than ~p", [X1, X2])). ================================================ FILE: apps/arweave/src/ar_pricing_transition.erl ================================================ -module(ar_pricing_transition). -export([get_transition_price/2, static_price/0, static_pricing_height/0, is_v2_pricing_height/1, transition_start_2_6_8/0, transition_start_2_7_2/0, transition_length_2_6_8/0, transition_length_2_7_2/0, transition_length/1 ]). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave/include/ar_inflation.hrl"). -include_lib("arweave/include/ar_pricing.hrl"). -include_lib("arweave/include/ar_consensus.hrl"). -include_lib("eunit/include/eunit.hrl"). %% @doc This module encapsulates most of the complexity of our multi-phased pricing transition. %% __ %% / \__ %% V2 Pricing..................................__ / %% /| \_/ %% / | %% / | %% / | %% / | %% / | %% 520 (cap).............________/ | %% / | | | %% 400 ______________/ | | | %% | | | | | %% today 2/20/24 3/7/24 11/20/24 11/20/26 %% 2.6.8 2.7.2 2.7.2 2.7.2 %% Transition HF Transition Transition %% Start Start End %%%=================================================================== %%% Constants %%%=================================================================== %% The number of blocks which have to pass since the 2.6.8 fork before we %% start mixing in the new fee calculation method. -ifdef(AR_TEST). -define(PRICE_2_6_8_TRANSITION_START, 2). -else. -ifndef(PRICE_2_6_8_TRANSITION_START). -ifdef(FORKS_RESET). -define(PRICE_2_6_8_TRANSITION_START, 0). -else. %% Target: February 20, 2024 at 2p UTC %% Fork 2.6.8 was published at: May 30, 2023 at 3:35p UTC %% Time between dates: 265 days, 22 hours, 25 minutes %% https://www.timeanddate.com/date/durationresult.html?m1=05&d1=30&y1=2023&m2=02&d2=20&y2=2024&h1=15&i1=35&s1=&h2=14&i2=&s2= %% In seconds: 22,976,700 %% In blocks: 22,976,700 / 128s average block time = 179505 %% Target block: 1189560 + 179505 = 1369065 -define(PRICE_2_6_8_TRANSITION_START, 179505). -endif. -endif. -endif. %% The number of blocks following the 2.6.8 + ?PRICE_2_6_8_TRANSITION_START block %% where the tx fee computation is transitioned to the new calculation method. %% Let TransitionStart = fork 2.6.8 height + ?PRICE_2_6_8_TRANSITION_START. %% Let A = height - TransitionStart + 1. %% Let B = TransitionStart + ?PRICE_2_6_8_TRANSITION_BLOCKS - (height + 1). %% Then price per GiB-minute = price old * B / (A + B) + price new * A / (A + B). -ifdef(AR_TEST). -define(PRICE_2_6_8_TRANSITION_BLOCKS, 2). -else. -ifndef(PRICE_2_6_8_TRANSITION_BLOCKS). -ifdef(FORKS_RESET). -define(PRICE_2_6_8_TRANSITION_BLOCKS, 0). -else. -ifndef(PRICE_2_6_8_TRANSITION_BLOCKS). -define(PRICE_2_6_8_TRANSITION_BLOCKS, (30 * 24 * 30 * 18)). % ~18 months. -endif. -endif. -endif. -endif. %% The number of blocks which have to pass since the 2.6.8 fork before we %% remove the price transition cap. %% %% Note: Even though this constant is related to the *2.7.2* fork we count the blocks %% since the *2.6.8* fork for easier comparison with ?PRICE_2_6_8_TRANSITION_START -ifdef(AR_TEST). -define(PRICE_2_7_2_TRANSITION_START, 4). -else. -ifndef(PRICE_2_7_2_TRANSITION_START). -ifdef(FORKS_RESET). -define(PRICE_2_7_2_TRANSITION_START, 0). -else. %% Target: November 20, 2024 at 2p UTC %% Fork 2.6.8 was published at: May 30, 2023 at 3:35p UTC %% Time between dates: 539 days, 22 hours, 25 minutes %% https://www.timeanddate.com/date/durationresult.html?m1=5&d1=30&y1=2023&m2=11&d2=20&y2=2024&h1=15&i1=35&s1=0&h2=14&i2=0&s2=0 %% In seconds: 46,650,300 %% In blocks: 46,650,300 / 128.9s average block time = 361910 %% Target block: 1189560 + 361910 = 1551470 -define(PRICE_2_7_2_TRANSITION_START, 361910). -endif. -endif. -endif. %% The number of blocks following the 2.6.8 + ?PRICE_2_7_2_TRANSITION_START block %% where the tx fee computation is transitioned to the new calculation method. %% Let TransitionStart = fork 2.6.8 height + ?PRICE_2_7_2_TRANSITION_START. %% Let A = height - TransitionStart + 1. %% Let B = TransitionStart + ?PRICE_2_7_2_TRANSITION_START - (height + 1). %% Then price per GiB-minute = price cap * B / (A + B) + price new * A / (A + B). -ifdef(AR_TEST). -define(PRICE_2_7_2_TRANSITION_BLOCKS, 2). -else. -ifndef(PRICE_2_7_2_TRANSITION_BLOCKS). -ifdef(FORKS_RESET). -define(PRICE_2_7_2_TRANSITION_BLOCKS, 0). -else. -ifndef(PRICE_2_7_2_TRANSITION_BLOCKS). -define(PRICE_2_7_2_TRANSITION_BLOCKS, (30 * 24 * 30 * 24)). % ~24 months. -endif. -endif. -endif. -endif. -ifdef(AR_TEST). -define(PRICE_PER_GIB_MINUTE_PRE_TRANSITION, 8162). -else. %% STATIC_2_6_8_FEE_WINSTON / (200 (years) * 365 (days) * 24 * 60) / 20 (replicas) %% = ~400 Winston per GiB per minute. -define(PRICE_PER_GIB_MINUTE_PRE_TRANSITION, 400). -endif. -ifdef(AR_TEST). -define(PRICE_2_7_2_PER_GIB_MINUTE_UPPER_BOUND, 30000). -else. -ifndef(PRICE_2_7_2_PER_GIB_MINUTE_UPPER_BOUND). %% 714_000_000_000 / (200 (years) * 365 (days) * 24 * 60) / 20 (replicas) %% = ~340 Winston per GiB per minute. -define(PRICE_2_7_2_PER_GIB_MINUTE_UPPER_BOUND, 340). -endif. -endif. -ifdef(AR_TEST). -define(PRICE_2_7_2_PER_GIB_MINUTE_LOWER_BOUND, 0). -else. -ifndef(PRICE_2_7_2_PER_GIB_MINUTE_LOWER_BOUND). %% 357_000_000_000 / (200 (years) * 365 (days) * 24 * 60) / 20 (replicas) %% = ~170 Winston per GiB per minute. -define(PRICE_2_7_2_PER_GIB_MINUTE_LOWER_BOUND, 170). -endif. -endif. %%%=================================================================== %%% Public Interface %%%=================================================================== %% @doc There's a complex series of transition phases that we pass through as we move from %% static pricing to dynamic pricing (aka v2 pricing). This function handles those phases. get_transition_price(Height, V2Price) -> StaticPricingHeight = ar_pricing_transition:static_pricing_height(), PriceTransitionStart = transition_start(Height), PriceTransitionEnd = PriceTransitionStart + transition_length(Height), StartPrice = transition_start_price(Height), UpperBound = transition_upper_bound(Height), LowerBound = transition_lower_bound(Height), case Height of _ when Height < StaticPricingHeight -> ar_pricing_transition:static_price(); _ when Height < PriceTransitionEnd -> %% Interpolate between the pre-transition price and the new price. Interval1 = Height - PriceTransitionStart, Interval2 = PriceTransitionEnd - Height, InterpolatedPrice = (StartPrice * Interval2 + V2Price * Interval1) div (Interval1 + Interval2), PricePerGiBPerMinute = ar_util:between(InterpolatedPrice, LowerBound, UpperBound), ?LOG_DEBUG([{event, get_price_per_gib_minute}, {height, Height}, {price1, StartPrice}, {price2, V2Price}, {lower_bound, LowerBound}, {upper_bound, UpperBound}, {transition_start, PriceTransitionStart}, {transition_end, PriceTransitionEnd}, {interval1, Interval1}, {interval2, Interval2}, {interpolated_price, InterpolatedPrice}, {price, PricePerGiBPerMinute}]), PricePerGiBPerMinute; _ -> V2Price end. static_price() -> ?PRICE_PER_GIB_MINUTE_PRE_TRANSITION. %% @doc Height before which we use the hardcoded static price - no phase %% of the pricing transition has started. static_pricing_height() -> ar_pricing_transition:transition_start_2_6_8(). %% @doc Return true if the given height is a height where the transition to the %% new pricing algorithm is complete. is_v2_pricing_height(Height) -> Height >= ar_pricing_transition:transition_start_2_7_2() + ar_pricing_transition:transition_length_2_7_2(). transition_start_2_6_8() -> ar_fork:height_2_6_8() + ?PRICE_2_6_8_TRANSITION_START. transition_start_2_7_2() -> %% Note: Even though this constant is related to the *2.7.2* fork we count the blocks %% since the *2.6.8* fork for easier comparison with ?PRICE_2_6_8_TRANSITION_START ar_fork:height_2_6_8() + ?PRICE_2_7_2_TRANSITION_START. transition_length_2_6_8() -> ?PRICE_2_6_8_TRANSITION_BLOCKS. transition_length_2_7_2() -> ?PRICE_2_7_2_TRANSITION_BLOCKS. transition_length(Height) -> TransitionStart_2_7_2 = ar_pricing_transition:transition_start_2_7_2(), case Height of _ when Height >= TransitionStart_2_7_2 -> ar_pricing_transition:transition_length_2_7_2(); _ -> ar_pricing_transition:transition_length_2_6_8() end. %%%=================================================================== %%% Private functions %%%=================================================================== transition_start(Height) -> TransitionStart_2_6_8 = ar_pricing_transition:transition_start_2_6_8(), TransitionStart_2_7_2 = ar_pricing_transition:transition_start_2_7_2(), %% There are 2 overlapping transition periods: %% 2.6.8 Transition Period: %% - Start: 2.6.8 + ?PRICE_2_6_8_TRANSITION_START %% - Length: 18 months %% %% 2.7.2 Transition Period: %% - Start: 2.6.8 + ?PRICE_2_7_2_TRANSITION_START %% - Length: 24 months %% %% The 2.7.2 transition period starts in the middle of the 2.6.8 transition period and %% replaces it. case Height of _ when Height >= TransitionStart_2_7_2 -> TransitionStart_2_7_2; _ -> TransitionStart_2_6_8 end. transition_start_price(Height) -> TransitionStart_2_7_2 = ar_pricing_transition:transition_start_2_7_2(), case Height of _ when Height >= TransitionStart_2_7_2 -> ?PRICE_2_7_2_PER_GIB_MINUTE_UPPER_BOUND; _ -> ?PRICE_PER_GIB_MINUTE_PRE_TRANSITION end. transition_upper_bound(Height) -> TransitionStart_2_7_2 = ar_pricing_transition:transition_start_2_7_2(), Fork_2_7_2 = ar_fork:height_2_7_2(), case Height of _ when Height >= TransitionStart_2_7_2 -> infinity; _ when Height >= Fork_2_7_2 -> ?PRICE_2_7_2_PER_GIB_MINUTE_UPPER_BOUND; _ -> infinity end. transition_lower_bound(Height) -> TransitionStart_2_7_2 = ar_pricing_transition:transition_start_2_7_2(), Fork_2_7_2 = ar_fork:height_2_7_2(), case Height of _ when Height >= TransitionStart_2_7_2 -> 0; _ when Height >= Fork_2_7_2 -> ?PRICE_2_7_2_PER_GIB_MINUTE_LOWER_BOUND; _ -> 0 end. ================================================ FILE: apps/arweave/src/ar_process_sampler.erl ================================================ -module(ar_process_sampler). -behaviour(gen_server). -include_lib("arweave/include/ar.hrl"). -export([start_link/0]). -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2]). -define(SAMPLE_PROCESSES_INTERVAL, 15000). -define(SAMPLE_SCHEDULERS_INTERVAL, 30000). -define(SAMPLE_SCHEDULERS_DURATION, 5000). -record(state, { scheduler_samples = undefined }). %% API start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). %% gen_server callbacks init([]) -> {ok, _} = ar_timer:send_interval( ?SAMPLE_PROCESSES_INTERVAL, self(), sample_processes, #{ skip_on_shutdown => false } ), ar_util:cast_after(?SAMPLE_SCHEDULERS_INTERVAL, ?MODULE, sample_schedulers), {ok, #state{}}. handle_call(_Request, _From, State) -> {reply, ok, State}. handle_cast(sample_schedulers, State) -> State2 = sample_schedulers(State), {noreply, State2}; handle_cast(_Msg, State) -> {noreply, State}. handle_info(sample_processes, State) -> StartTime = erlang:monotonic_time(), Processes = erlang:processes(), ProcessData = lists:filtermap(fun(Pid) -> process_function(Pid) end, Processes), ProcessMetrics = lists:foldl(fun({_Status, ProcessName, Memory, Reductions, MsgQueueLen}, Acc) -> %% Sum the data for each process. This is a compromise for handling unregistered %% processes. It has the effect of summing the memory and message queue length across all unregistered processes running off the %% same function. In general this is what we want (e.g. for the io threads within %% ar_mining_io and the hashing threads within ar_mining_hashing, we wand to %% see if, in aggregate, their memory or message queue length has spiked). {MemoryTotal, ReductionsTotal, MsgQueueLenTotal} = maps:get(ProcessName, Acc, {0, 0, 0}), Metrics = { MemoryTotal + Memory, ReductionsTotal + Reductions, MsgQueueLenTotal + MsgQueueLen}, maps:put(ProcessName, Metrics, Acc) end, #{}, ProcessData), %% Clear out the process_info metric so that we don't persist data about processes that %% have exited. We have to deregister and re-register the metric because we don't track %% all the label values used. prometheus_gauge:deregister(process_info), prometheus_gauge:new([{name, process_info}, {labels, [process, type]}, {help, "Sampling info about active processes. Only set when debug=true."}]), maps:foreach(fun(ProcessName, Metrics) -> {Memory, Reductions, MsgQueueLen} = Metrics, prometheus_gauge:set(process_info, [ProcessName, memory], Memory), prometheus_gauge:set(process_info, [ProcessName, reductions], Reductions), prometheus_gauge:set(process_info, [ProcessName, message_queue], MsgQueueLen) end, ProcessMetrics), prometheus_gauge:set(process_info, [total, memory], erlang:memory(total)), prometheus_gauge:set(process_info, [processes, memory], erlang:memory(processes)), prometheus_gauge:set(process_info, [processes_used, memory], erlang:memory(processes_used)), prometheus_gauge:set(process_info, [system, memory], erlang:memory(system)), prometheus_gauge:set(process_info, [atom, memory], erlang:memory(atom)), prometheus_gauge:set(process_info, [atom_used, memory], erlang:memory(atom_used)), prometheus_gauge:set(process_info, [binary, memory], erlang:memory(binary)), prometheus_gauge:set(process_info, [code, memory], erlang:memory(code)), prometheus_gauge:set(process_info, [ets, memory], erlang:memory(ets)), log_binary_alloc(), EndTime = erlang:monotonic_time(), ElapsedTime = erlang:convert_time_unit(EndTime-StartTime, native, microsecond), ?LOG_DEBUG([{event, sample_processes}, {elapsed_ms, ElapsedTime / 1000}]), {noreply, State}; handle_info(_Info, State) -> {noreply, State}. terminate(Reason, _State) -> ?LOG_INFO([{module, ?MODULE},{pid, self()},{callback, terminate},{reason, Reason}]), ok. %% Internal functions sample_schedulers(#state{ scheduler_samples = undefined } = State) -> %% Start sampling erlang:system_flag(scheduler_wall_time,true), Samples = scheduler:sample_all(), %% Every ?SAMPLE_SCHEDULERS_INTERVAL ms, we'll sample the schedulers for %% ?SAMPLE_SCHEDULERS_DURATION ms. ar_util:cast_after(?SAMPLE_SCHEDULERS_INTERVAL, ?MODULE, sample_schedulers), ar_util:cast_after(?SAMPLE_SCHEDULERS_DURATION, ?MODULE, sample_schedulers), State#state{ scheduler_samples = Samples }; sample_schedulers(#state{ scheduler_samples = Samples1 } = State) -> %% Finish sampling Samples2 = scheduler:sample_all(), Util = scheduler:utilization(Samples1, Samples2), erlang:system_flag(scheduler_wall_time,false), average_utilization(Util), State#state{ scheduler_samples = undefined }. average_utilization(Util) -> Averages = lists:foldl( fun ({Type, Value, _}, Acc) -> maps:put(Type, {Value, 1}, Acc); ({Type, _, Value, _}, Acc) -> case (Type == io andalso Value > 0) orelse (Type /= io) of true -> {Sum, Count} = maps:get(Type, Acc, {0, 0}), maps:put(Type, {Sum+Value, Count+1}, Acc); false -> Acc end end, #{}, Util), maps:foreach( fun(Type, {Sum, Count}) -> prometheus_gauge:set(scheduler_utilization, [Type], Sum / Count) end, Averages). process_function(Pid) -> case process_info(Pid, [current_function, current_stacktrace, registered_name, status, memory, reductions, message_queue_len, messages]) of [{current_function, {erlang, process_info, _A}}, _, _, _, _, _, _, _] -> false; [{current_function, CurrentFunction}, {current_stacktrace, Stack}, {registered_name, Name}, {status, Status}, {memory, Memory}, {reductions, Reductions}, {message_queue_len, MsgQueueLen}, {messages, Messages}] -> ProcessName = process_name(Name, Stack), case MsgQueueLen > 1000 of true -> FormattedMessages = [format_message(Msg) || Msg <- lists:sublist(Messages, 10)], ?LOG_DEBUG([{event, process_long_message_queue}, {pid, Pid}, {process_name, ProcessName}, {current_function, CurrentFunction}, {current_stacktrace, Stack}, {memory, Memory}, {reductions, Reductions}, {message_queue_len, MsgQueueLen}, {head_messages, FormattedMessages}]); false -> ok end, {true, {Status, ProcessName, Memory, Reductions, MsgQueueLen}}; _ -> false end. log_binary_alloc() -> [Instance0 | _Rest] = erlang:system_info({allocator, binary_alloc}), log_binary_alloc_instances([Instance0]). log_binary_alloc_instances([]) -> ok; log_binary_alloc_instances([Instance | _Rest]) -> {instance, Id, [ _Versions, _Options, MBCS, SBCS, Calls ]} = Instance, {calls, [ {binary_alloc, AllocGigaCount, AllocCount}, {binary_free, FreeGigaCount, FreeCount}, {binary_realloc, ReallocGigaCount, ReallocCount}, _MsegAllocCount, _MsegDeallocCount, _MsegReallocCount, _SysAllocCount, _SysDeallocCount, _SysReallocCount ]} = Calls, log_binary_alloc_carrier(Id, MBCS), log_binary_alloc_carrier(Id, SBCS), prometheus_gauge:set(allocator, [binary, Id, calls, binary_alloc_count], (AllocGigaCount * 1000000000) + AllocCount), prometheus_gauge:set(allocator, [binary, Id, calls, binary_free_count], (FreeGigaCount * 1000000000) + FreeCount), prometheus_gauge:set(allocator, [binary, Id, calls, binary_realloc_count], (ReallocGigaCount * 1000000000) + ReallocCount). log_binary_alloc_carrier(Id, Carrier) -> {CarrierType, [ {blocks, Blocks}, {carriers, _, CarrierCount, _}, _MsegCount, _SysCount, {carriers_size, _, CarrierSize, _}, _MsegSize, _SysSize ]} = Carrier, case Blocks of [{binary_alloc, [{count, _, BlockCount, _}, {size, _, BlockSize, _}]}] -> prometheus_gauge:set(allocator, [binary, Id, CarrierType, binary_block_count], BlockCount), prometheus_gauge:set(allocator, [binary, Id, CarrierType, binary_block_size], BlockSize); _ -> prometheus_gauge:set(allocator, [binary, Id, CarrierType, binary_block_count], 0), prometheus_gauge:set(allocator, [binary, Id, CarrierType, binary_block_size], 0) end, prometheus_gauge:set(allocator, [binary, Id, CarrierType, binary_carrier_count], CarrierCount), prometheus_gauge:set(allocator, [binary, Id, CarrierType, binary_carrier_size], CarrierSize). %% @doc Anonymous processes don't have a registered name. So we'll name them after their %% module, function and arity. process_name([], []) -> "unknown"; process_name([], Stack) -> InitialCall = initial_call(lists:reverse(Stack)), M = element(1, InitialCall), F = element(2, InitialCall), A = element(3, InitialCall), atom_to_list(M) ++ ":" ++ atom_to_list(F) ++ "/" ++ integer_to_list(A); process_name(Name, _Stack) -> atom_to_list(Name). initial_call([]) -> "unknown"; initial_call([{proc_lib, init_p_do_apply, _A, _Location} | Stack]) -> initial_call(Stack); initial_call([InitialCall | _Stack]) -> InitialCall. format_message(Msg) -> TruncatedMsg = truncate_term(Msg), Formatted = io_lib:format("~p", [TruncatedMsg]), OutputStr = lists:flatten(Formatted), LimitedOutput = limit_output(OutputStr, 1000), io_lib:format("~s~n", [LimitedOutput]). limit_output(Str, Limit) -> if length(Str) > Limit -> lists:sublist(Str, Limit); true -> Str end. truncate_term(Term) when is_binary(Term) -> if byte_size(Term) > 8 -> <> = Term, %% Append ellipsis (three periods) to indicate truncation. <>; true -> Term end; truncate_term([]) -> []; truncate_term([Head | Tail]) -> [truncate_term(Head) | truncate_term(Tail)]; truncate_term(Term) when is_tuple(Term) -> List = tuple_to_list(Term), TruncatedList = [truncate_term(Elem) || Elem <- List], list_to_tuple(TruncatedList); truncate_term(Term) when is_map(Term) -> maps:map(fun(_Key, Value) -> truncate_term(Value) end, Term); truncate_term(Term) -> Term. ================================================ FILE: apps/arweave/src/ar_prometheus_cowboy_handler.erl ================================================ %% @doc %% Cowboy2 handler for exporting prometheus metrics. %% @end -module(ar_prometheus_cowboy_handler). %% -behaviour(cowboy_handler). -export([init/2, terminate/3]). -include_lib("arweave/include/ar.hrl"). %% =================================================================== %% cowboy_handler callbacks %% =================================================================== init(Req, _Opts) -> handle(Req). terminate(_Reason, _Req, _State) -> ok. %% =================================================================== %% Private functions %% =================================================================== handle(Request) -> Method = cowboy_req:method(Request), Request1 = gen_response(Method, Request), {ok, Request1, undefined}. gen_response(<<"HEAD">>, Request) -> Registry0 = cowboy_req:binding(registry, Request, <<"default">>), case prometheus_registry:exists(Registry0) of false -> cowboy_req:reply(404, #{}, <<"Unknown Registry">>, Request); Registry -> gen_metrics_response(Registry, Request) end; gen_response(<<"GET">>, Request) -> Registry0 = cowboy_req:binding(registry, Request, <<"default">>), case prometheus_registry:exists(Registry0) of false -> cowboy_req:reply(404, #{}, <<"Unknown Registry">>, Request); Registry -> gen_metrics_response(Registry, Request) end; gen_response(_, Request) -> Request. gen_metrics_response(Registry, Request) -> URI = true, GetHeader = fun(Name, Default) -> cowboy_req:header(iolist_to_binary(Name), Request, Default) end, {Code, RespHeaders, Body} = prometheus_http_impl:reply( #{ path => URI, headers => GetHeader, registry => Registry, standalone => false}), Headers = prometheus_cowboy:to_cowboy_headers(RespHeaders), Headers2 = maps:merge(?CORS_HEADERS, maps:from_list(Headers)), cowboy_req:reply(Code, Headers2, Body, Request). ================================================ FILE: apps/arweave/src/ar_prometheus_cowboy_labels.erl ================================================ -module(ar_prometheus_cowboy_labels). -export([label_value/2]). %%%=================================================================== %%% Prometheus cowboy labels module callback (no behaviour) %%%=================================================================== label_value(http_method, #{req:=Req}) -> normalize_method(cowboy_req:method(Req)); label_value(route, #{req:=Req}) -> ar_http_iface_server:label_http_path(cowboy_req:path(Req)); label_value(_, _) -> undefined. %%%=================================================================== %%% Private functions. %%%=================================================================== normalize_method(<<"GET">>) -> 'GET'; normalize_method(<<"HEAD">>) -> 'HEAD'; normalize_method(<<"POST">>) -> 'POST'; normalize_method(<<"PUT">>) -> 'PUT'; normalize_method(<<"DELETE">>) -> 'DELETE'; normalize_method(<<"CONNECT">>) -> 'CONNECT'; normalize_method(<<"OPTIONS">>) -> 'OPTIONS'; normalize_method(<<"TRACE">>) -> 'TRACE'; normalize_method(<<"PATCH">>) -> 'PATCH'; normalize_method(_) -> undefined. ================================================ FILE: apps/arweave/src/ar_rate_limiter.erl ================================================ -module(ar_rate_limiter). -behaviour(gen_server). -export([start_link/0, throttle/2, off/0, on/0, is_on_cooldown/2, set_cooldown/3]). -export([is_throttled/2]). -export([init/1, handle_cast/2, handle_call/3, handle_info/2, terminate/2]). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). % Used in ?RPM_BY_PATH. -include_lib("arweave/include/ar_blacklist_middleware.hrl"). -include_lib("eunit/include/eunit.hrl"). -record(state, { traces, off }). %%%=================================================================== %%% Public interface. %%%=================================================================== start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). %% @doc Hang until it is safe to make another request to the given Peer with the given Path. %% The limits are configured in include/ar_blacklist_middleware.hrl. throttle(Peer, Path) -> {ok, Config} = arweave_config:get_env(), case lists:member(Peer, Config#config.local_peers) of true -> ok; false -> throttle2(Peer, Path) end. throttle2(Peer, Path) -> P = ar_http_iface_server:split_path(iolist_to_binary(Path)), case P of [<<"tx">>] -> %% Do not throttle transaction gossip. ok; _ -> case catch gen_server:call(?MODULE, {throttle, Peer, P}, infinity) of {'EXIT', {noproc, {gen_server, call, _}}} -> ok; {'EXIT', Reason} -> exit(Reason); _ -> ok end end. %% @doc Turn rate limiting off. off() -> gen_server:cast(?MODULE, turn_off). %% @doc Turn rate limiting on. on() -> gen_server:cast(?MODULE, turn_on). %% @doc Return true if Peer should be throttled for the given RPMKey. is_throttled(Peer, Path) -> case catch gen_server:call(?MODULE, {is_throttled, Peer, Path}, infinity) of {'EXIT', {noproc, {gen_server, call, _}}} -> false; {'EXIT', Reason} -> exit(Reason); Bool when is_boolean(Bool) -> Bool end. %% @doc Return true if Peer is on cooldown for the given Path. is_on_cooldown(Peer, RPMKey) -> Now = os:system_time(millisecond), case ets:lookup(?MODULE, {cooldown, Peer, RPMKey}) of [{_, Until}] when Until > Now -> true; _ -> false end. %% @doc Put Peer on cooldown for the given RPMKey for Milliseconds. set_cooldown(Peer, RPMKey, Milliseconds) when Milliseconds > 0 -> ?LOG_DEBUG([{event, set_cooldown}, {peer, ar_util:format_peer(Peer)}, {rpm_key, RPMKey}, {milliseconds, Milliseconds}]), Until = os:system_time(millisecond) + Milliseconds, ets:insert(?MODULE, {{cooldown, Peer, RPMKey}, Until}), ok; set_cooldown(_Peer, _RPMKey, _Milliseconds) -> ok. %%%=================================================================== %%% Generic server callbacks. %%%=================================================================== init([]) -> {ok, #state{ traces = #{}, off = false }}. handle_call({throttle, _Peer, _Path}, _From, #state{ off = true } = State) -> {reply, ok, State}; handle_call({is_throttled, Peer, Path}, _From, State) -> {Throttle, _} = is_throttled(Peer, Path, State), {reply, Throttle, State}; handle_call({throttle, Peer, Path}, From, State) -> gen_server:cast(?MODULE, {throttle, Peer, Path, From}), {noreply, State}; handle_call(Request, _From, State) -> ?LOG_WARNING([{event, unhandled_call}, {module, ?MODULE}, {request, Request}]), {reply, ok, State}. handle_cast({throttle, Peer, Path, From}, State) -> #state{ traces = Traces } = State, {RPMKey, Limit} = ?RPM_BY_PATH(Path)(), {Throttle, {N, Trace}} = is_throttled(Peer, Path, State), case Throttle of true -> ?LOG_DEBUG([{event, approaching_peer_rpm_limit}, {path, Path}, {minute_limit, Limit}, {peer, ar_util:format_peer(Peer)}, {caller, From}]), ar_util:cast_after(1000, ?MODULE, {throttle, Peer, Path, From}), {noreply, State}; false -> gen_server:reply(From, ok), Traces2 = maps:put({Peer, RPMKey}, {N, Trace}, Traces), {noreply, State#state{ traces = Traces2 }} end; handle_cast(turn_off, State) -> {noreply, State#state{ off = true }}; handle_cast(turn_on, State) -> {noreply, State#state{ off = false }}; handle_cast(Cast, State) -> ?LOG_WARNING([{event, unhandled_cast}, {module, ?MODULE}, {cast, Cast}]), {noreply, State}. handle_info(Message, State) -> ?LOG_WARNING([{event, unhandled_info}, {module, ?MODULE}, {message, Message}]), {noreply, State}. terminate(Reason, _State) -> ?LOG_INFO([{module, ?MODULE},{pid, self()},{callback, terminate},{reason, Reason}]), ok. %%%=================================================================== %%% Private functions. %%%=================================================================== cut_trace(N, Trace, Now) -> {{value, Timestamp}, Trace2} = queue:out(Trace), case Timestamp < Now - ?THROTTLE_PERIOD of true -> cut_trace(N - 1, Trace2, Now); false -> {N, Trace} end. %% @doc Internal predicate used by both server and tests. Returns {Throttle, {NewN, NewTrace}}. is_throttled(Peer, Path, #state{ traces = Traces } = _State) -> {RPMKey, Limit} = ?RPM_BY_PATH(Path)(), Now = os:system_time(millisecond), case maps:get({Peer, RPMKey}, Traces, not_found) of not_found -> {false, {1, queue:from_list([Now])}}; {N, Trace} -> {N2, Trace2} = cut_trace(N, queue:in(Now, Trace), Now), %% The macro specifies requests per minute while the throttling window %% is 30 seconds. HalfLimit = Limit div 2, %% Try to approach but not hit the limit. Throttle = N2 + 1 > max(1, HalfLimit * 80 div 100), {Throttle, {N2 + 1, Trace2}} end. %%-------------------------------------------------------------------- %% Tests %%-------------------------------------------------------------------- is_throttled_server_down_test() -> %% When the server is not running, we should not crash and return false. Peer = {127,0,0,1}, ?assertEqual(false, is_throttled(Peer, [<<"hash_list">>]) ). is_throttled_test() -> Peer = {127,0,0,1}, RPMKey = data_sync_record, Path = [<<"data_sync_record">>], Now = os:system_time(millisecond), ThrottleLimit = (?DEFAULT_REQUESTS_PER_MINUTE_LIMIT div 2) * 80 div 100, %% Build a trace representing ThrottleLimit - 1 requests Trace = queue:from_list(lists:duplicate(ThrottleLimit - 1, Now + 2000 - ?THROTTLE_PERIOD)), State = #state{ traces = #{ {Peer, RPMKey} => {ThrottleLimit - 1, Trace} }, off = false }, {Throttle1, {N1, Trace1}} = is_throttled(Peer, Path, State), ?assertEqual(false, Throttle1), %% Add one more implied request (same inputs) should be throttled next time State2 = #state{ traces = #{ {Peer, RPMKey} => {N1, Trace1} }, off = false }, {Throttle2, {_N2, _Trace2}} = is_throttled(Peer, Path, State2), ?assertEqual(true, Throttle2), %% Sleep to let most of the requests age out. Note: ar_rate_limiter only updates the traces %% state when there is no throttle, so we won't use {N2, Trace2} timer:sleep(3000), {Throttle3, {N3, _Trace3}} = is_throttled(Peer, Path, State2), ?assertEqual(false, Throttle3), ?assertEqual(2, N3), %% Not found path should not throttle and should suggest initial trace State4 = #state{ traces = #{}, off = false }, {Throttle4, {N4, Trace4}} = is_throttled(Peer, Path, State4), ?assertEqual(false, Throttle4), ?assertEqual(1, N4), ?assertEqual(1, queue:len(Trace4)), ok. ================================================ FILE: apps/arweave/src/ar_repack.erl ================================================ -module(ar_repack). -behaviour(gen_server). -export([name/1, register_workers/0, get_read_range/3, chunk_range_read/4]). -export([start_link/2, init/1, handle_cast/2, handle_call/3, handle_info/2, terminate/2]). -include("ar.hrl"). -include("ar_sup.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include("ar_repack.hrl"). -include_lib("eunit/include/eunit.hrl"). -moduledoc """ This module handles the repack-in-place logic. """. -define(REPACK_WRITE_BATCH_SIZE, 1024). -record(state, { store_id = undefined, read_batch_size = ?DEFAULT_REPACK_BATCH_SIZE, write_batch_size = ?REPACK_WRITE_BATCH_SIZE, num_entropy_offsets, module_start = 0, module_end = 0, footprint_start = 0, %% The highest chunk offset that can be read for this repack footprint. footprint_end = 0, %% The highest bucket end offset to generate entropy for. Generating entropy for this %% bucket may yield entropy offsets higher than this because entropy is generated in %% 256 MiB batches. entropy_end = 0, next_cursor = 0, configured_packing = undefined, target_packing = undefined, repack_status = undefined, repack_chunk_map = #{}, write_queue = gb_sets:new() }). -ifdef(AR_TEST). -define(DEVICE_LOCK_WAIT, 100). -else. -define(DEVICE_LOCK_WAIT, 5_000). -endif. -define(STATE_COUNT_INTERVAL, 10_000). %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Start the server. start_link(Name, {StoreID, Packing}) -> gen_server:start_link({local, Name}, ?MODULE, {StoreID, Packing}, []). %% @doc Return the name of the server serving the given StoreID. name(StoreID) -> list_to_atom("ar_repack_" ++ ar_storage_module:label(StoreID)). register_workers() -> {ok, Config} = arweave_config:get_env(), RepackInPlaceWorkers = lists:flatmap( fun({StorageModule, Packing}) -> StoreID = ar_storage_module:id(StorageModule), %% Note: the config validation will prevent a StoreID from being used in both %% `storage_modules` and `repack_in_place_storage_modules`, so there's %% no risk of a `Name` clash with the workers spawned above. RepackWorker = ?CHILD_WITH_ARGS( ar_repack, worker, name(StoreID), [name(StoreID), {StoreID, Packing}]), RepackIOWorker = ?CHILD_WITH_ARGS( ar_repack_io, worker, ar_repack_io:name(StoreID), [ar_repack_io:name(StoreID), StoreID]), [RepackWorker, RepackIOWorker] end, Config#config.repack_in_place_storage_modules ), RepackInPlaceWorkers. init({StoreID, ToPacking}) -> FromPacking = ar_storage_module:get_packing(StoreID), ?LOG_INFO([{event, ar_repack_init}, {name, name(StoreID)}, {store_id, StoreID}, {from_packing, ar_serialize:encode_packing(FromPacking, false)}, {to_packing, ar_serialize:encode_packing(ToPacking, false)}]), %% ModuleStart to PaddedModuleEnd is the *chunk* range that will be repacked. Chunk %% offsets will later be converted to bucket offsets and entropy offsets - and the %% bucket and entropy ranges may differ from this chunk range. Module = ar_storage_module:get_by_id(StoreID), {ModuleStart, ModuleEnd} = ar_storage_module:module_range(Module), PaddedModuleEnd = ar_block:get_chunk_padded_offset(ModuleEnd), Cursor = read_cursor(StoreID, ToPacking, ModuleStart), {ok, Config} = arweave_config:get_env(), BatchSize = Config#config.repack_batch_size, CacheSize = Config#config.repack_cache_size_mb, NumEntropyOffsets = calculate_num_entropy_offsets(CacheSize, BatchSize), gen_server:cast(self(), repack), gen_server:cast(self(), count_states), ar_device_lock:set_device_lock_metric(StoreID, repack, paused), State = #state{ store_id = StoreID, read_batch_size = BatchSize, num_entropy_offsets = NumEntropyOffsets, module_start = ModuleStart, module_end = PaddedModuleEnd, next_cursor = Cursor, configured_packing = FromPacking, target_packing = ToPacking, repack_status = paused }, log_info(starting_repack_in_place, State, [ {name, name(StoreID)}, {read_batch_size, BatchSize}, {write_batch_size, State#state.write_batch_size}, {num_entropy_offsets, State#state.num_entropy_offsets}, {from_packing, ar_serialize:encode_packing(FromPacking, false)}, {to_packing, ar_serialize:encode_packing(ToPacking, false)}, {raw_module_end, ModuleEnd}, {next_cursor, Cursor}]), {ok, State}. %% @doc Gets the start and end offset of the range of chunks to read starting from %% BucketEndOffset. Also includes the BucketEndOffsets covered by that range. get_read_range(BucketEndOffset, #state{} = State) -> #state{ module_end = ModuleEnd, footprint_end = FootprintEnd, read_batch_size = BatchSize } = State, get_read_range(BucketEndOffset, min(ModuleEnd, FootprintEnd), BatchSize). -spec get_read_range( non_neg_integer(), non_neg_integer(), non_neg_integer()) -> {non_neg_integer(), non_neg_integer(), [non_neg_integer()]}. get_read_range(BucketEndOffset, RangeEnd, BatchSize) -> ReadRangeStart = ar_chunk_storage:get_chunk_byte_from_bucket_end(BucketEndOffset), Partition = ar_node:get_partition_number(BucketEndOffset), {EntropyPartitionStart, EntropyPartitionEnd} = ar_replica_2_9:get_entropy_partition_range(Partition), SectorSize = ar_block:get_replica_2_9_entropy_sector_size(), EntropyPartitionStartBucket = ar_chunk_storage:get_chunk_bucket_start(EntropyPartitionStart), Sector = (BucketEndOffset - EntropyPartitionStartBucket) div SectorSize, SectorBucketEnd = EntropyPartitionStartBucket + (Sector + 1) * SectorSize, SectorChunkEnd = ar_chunk_storage:get_chunk_byte_from_bucket_end(SectorBucketEnd) + ?DATA_CHUNK_SIZE, FullRangeSize = ?DATA_CHUNK_SIZE * BatchSize, ReadRangeEnd = lists:min([ ReadRangeStart + FullRangeSize, EntropyPartitionEnd, SectorChunkEnd, RangeEnd]), BucketEndOffsets = [BucketEndOffset + (N * ?DATA_CHUNK_SIZE) || N <- lists:seq(0, BatchSize-1), BucketEndOffset + (N * ?DATA_CHUNK_SIZE) =< ReadRangeEnd], {ReadRangeStart, ReadRangeEnd, BucketEndOffsets}. chunk_range_read(BucketEndOffset, OffsetChunkMap, OffsetMetadataMap, StoreID) -> gen_server:cast(name(StoreID), {chunk_range_read, BucketEndOffset, OffsetChunkMap, OffsetMetadataMap}). %%%=================================================================== %%% Gen server callbacks. %%%=================================================================== handle_call(Request, _From, #state{} = State) -> ?LOG_WARNING([{event, unhandled_call}, {module, ?MODULE}, {request, Request}]), {reply, ok, State}. handle_cast(repack, #state{} = State) -> #state{ store_id = StoreID } = State, store_cursor(State), NewStatus = ar_device_lock:acquire_lock(repack, StoreID, State#state.repack_status), State2 = State#state{ repack_status = NewStatus }, State3 = case NewStatus of active -> repack(State2); paused -> ar_util:cast_after(?DEVICE_LOCK_WAIT, self(), repack), State2; _ -> State2 end, {noreply, State3}; handle_cast({chunk_range_read, BucketEndOffset, OffsetChunkMap, OffsetMetadataMap}, #state{} = State) -> {_, _, ReadRangeOffsets} = get_read_range(BucketEndOffset, State), State2 = add_range_to_repack_chunk_map(OffsetChunkMap, OffsetMetadataMap, State), State3 = mark_missing_chunks(ReadRangeOffsets, State2), {noreply, State3}; handle_cast({expire_repack_request, {BucketEndOffset, FootprintID}}, #state{footprint_start = FootprintStart} = State) when FootprintID == FootprintStart -> #state{ repack_chunk_map = Map } = State, State2 = case maps:get(BucketEndOffset, Map, not_found) of not_found -> %% Chunk has already been repacked and processed. State; RepackChunk -> log_debug(repack_request_expired, RepackChunk, State, []), remove_repack_chunk(BucketEndOffset, State) end, {noreply, State2}; handle_cast({expire_repack_request, _Ref}, #state{} = State) -> %% Request is from an old batch, ignore. {noreply, State}; handle_cast({expire_encipher_request, {BucketEndOffset, FootprintID}}, #state{footprint_start = FootprintStart} = State) when FootprintID == FootprintStart -> {noreply, expire_exor_request(BucketEndOffset, State)}; handle_cast({expire_encipher_request, _Ref}, #state{} = State) -> %% Request is from an old batch, ignore. {noreply, State}; handle_cast({expire_decipher_request, {BucketEndOffset, FootprintID}}, #state{footprint_start = FootprintStart} = State) when FootprintID == FootprintStart -> {noreply, expire_exor_request(BucketEndOffset, State)}; handle_cast({expire_decipher_request, _Ref}, #state{} = State) -> %% Request is from an old batch, ignore. {noreply, State}; handle_cast(count_states, #state{} = State) -> count_states(cache, State), ar_util:cast_after(?STATE_COUNT_INTERVAL, self(), count_states), {noreply, State}; handle_cast(Request, #state{} = State) -> ?LOG_WARNING([{event, unhandled_cast}, {module, ?MODULE}, {request, Request}]), {noreply, State}. handle_info({entropy, BucketEndOffset, RewardAddr, Entropies}, #state{} = State) -> #state{ footprint_start = FootprintStart, footprint_end = FootprintEnd } = State, generate_repack_entropy( BucketEndOffset + ?DATA_CHUNK_SIZE, {replica_2_9, RewardAddr}, State), EntropyKeys = ar_entropy_gen:generate_entropy_keys(RewardAddr, BucketEndOffset), EntropyOffsets = ar_entropy_gen:entropy_offsets(BucketEndOffset, FootprintEnd), State2 = ar_entropy_gen:map_entropies( Entropies, EntropyOffsets, FootprintStart, EntropyKeys, RewardAddr, fun entropy_generated/4, [], State), {noreply, State2}; handle_info({chunk, {packed, {BucketEndOffset, _}, ChunkArgs}}, #state{} = State) -> #state{ repack_chunk_map = Map } = State, State2 = case maps:get(BucketEndOffset, Map, not_found) of not_found -> {Packing, _, AbsoluteOffset, _, ChunkSize} = ChunkArgs, log_warning(chunk_repack_request_not_found, State, [ {bucket_end_offset, BucketEndOffset}, {absolute_offset, AbsoluteOffset}, {chunk_size, ChunkSize}, {packing, ar_serialize:encode_packing(Packing, false)}, {repack_chunk_map, maps:size(Map)} ]), State; RepackChunk -> {Packing, Chunk, _, _, _} = ChunkArgs, %% sanity checks true = RepackChunk#repack_chunk.state == needs_repack, %% end sanity checks RepackChunk2 = RepackChunk#repack_chunk{ chunk = Chunk, source_packing = Packing }, update_chunk_state(RepackChunk2, State) end, {noreply, State2}; handle_info({chunk, {deciphered, {BucketEndOffset, _}, UnpackedChunk}}, #state{} = State) -> #state{ repack_chunk_map = Map } = State, State2 = case maps:get(BucketEndOffset, Map, not_found) of not_found -> log_warning(chunk_decipher_request_not_found, State, [ {bucket_end_offset, BucketEndOffset}, {repack_chunk_map, maps:size(Map)} ]), State; RepackChunk -> %% sanity checks true = RepackChunk#repack_chunk.state == needs_decipher, true = byte_size(UnpackedChunk) == ?DATA_CHUNK_SIZE, %% end sanity checks RepackChunk2 = RepackChunk#repack_chunk{ chunk = UnpackedChunk, source_entropy = <<>>, source_packing = unpacked_padded }, update_chunk_state(RepackChunk2, State) end, {noreply, State2}; handle_info({chunk, {enciphered, {BucketEndOffset, _}, PackedChunk}}, #state{} = State) -> #state{ repack_chunk_map = Map } = State, State2 = case maps:get(BucketEndOffset, Map, not_found) of not_found -> log_warning(chunk_encipher_request_not_found, State, [ {bucket_end_offset, BucketEndOffset}, {repack_chunk_map, maps:size(Map)} ]), State; RepackChunk -> %% sanity checks true = RepackChunk#repack_chunk.state == needs_encipher, %% end sanity checks RepackChunk2 = RepackChunk#repack_chunk{ chunk = PackedChunk, target_entropy = <<>>, source_packing = RepackChunk#repack_chunk.target_packing }, update_chunk_state(RepackChunk2, State) end, {noreply, State2}; handle_info({entropy_generated, _Ref, _Entropy}, State) -> ?LOG_WARNING([{event, entropy_generation_timed_out}]), {noreply, State}; handle_info(Request, #state{} = State) -> ?LOG_WARNING([{event, unhandled_info}, {module, ?MODULE}, {request, Request}]), {noreply, State}. terminate(Reason, #state{} = State) -> log_debug(terminate, State, [{reason, ar_util:safe_format(Reason)}]), store_cursor(State), ?LOG_INFO([{module, ?MODULE},{pid, self()},{callback, terminate},{reason, Reason}]), ok. %%%=================================================================== %%% Private functions. %%%=================================================================== calculate_num_entropy_offsets(CacheSize, BatchSize) -> min(ar_block:get_sub_chunks_per_replica_2_9_entropy(), (CacheSize * 4) div BatchSize). %% @doc Outer repack loop. Called via `gen_server:cast(self(), repack)`. Each call %% repacks another footprint of chunks. A repack footprint is N entropy footprints where N %% is the repack batch size. repack(#state{ next_cursor = Cursor, module_end = ModuleEnd } = State) when Cursor > ModuleEnd -> #state{ repack_chunk_map = Map, store_id = StoreID, target_packing = TargetPacking } = State, case maps:size(Map) of 0 -> ar_device_lock:release_lock(repack, StoreID), ar_device_lock:set_device_lock_metric(StoreID, repack, complete), State2 = State#state{ repack_status = complete }, ar:console("~n~nRepacking of ~s is complete! " "We suggest you stop the node, rename " "the storage module folder to reflect " "the new packing, and start the " "node with the new storage module.~n", [StoreID]), ?LOG_INFO([{event, repacking_complete}, {store_id, StoreID}, {target_packing, ar_serialize:encode_packing(TargetPacking, false)}]), State2; _ -> log_debug(repacking_complete_but_waiting, State, [ {target_packing, ar_serialize:encode_packing(TargetPacking, false)}]), ar_util:cast_after(5000, self(), repack), State end; repack(#state{} = State) -> #state{ next_cursor = Cursor, target_packing = TargetPacking } = State, case ar_packing_server:is_buffer_full() of true -> log_debug(waiting_for_repack_buffer, State, [ {target_packing, ar_serialize:encode_packing(TargetPacking, false)}]), ar_util:cast_after(200, self(), repack), State; false -> repack_footprint(Cursor, State) end. repack_footprint(Cursor, #state{} = State) -> #state{ module_end = ModuleEnd, num_entropy_offsets = NumEntropyOffsets, configured_packing = SourcePacking, target_packing = TargetPacking, store_id = StoreID, read_batch_size = BatchSize } = State, BucketEndOffset = ar_chunk_storage:get_chunk_bucket_end(Cursor), BucketStartOffset = ar_chunk_storage:get_chunk_bucket_start(Cursor), FootprintOffsets = footprint_offsets(BucketEndOffset, NumEntropyOffsets, ModuleEnd), FootprintStart = BucketStartOffset+1, FootprintEnd = footprint_end(FootprintOffsets, ModuleEnd, BatchSize), case should_repack(Cursor, FootprintStart, FootprintEnd, State) of {false, Logs} -> %% Skip this Cursor for one of these reasons: %% 1. Cursor has already been repacked. %% Note: we expect this to happen a lot since we iterate through all %% chunks in the partition, but for each chunk we will repack N %% entropy footprints. %% 2. The iteration range of this batch starts after the end of the %% storage module. gen_server:cast(self(), repack), Interval = ar_sync_record:get_next_unsynced_interval( Cursor, infinity, TargetPacking, ar_data_sync, StoreID), NextCursor = case Interval of not_found -> Cursor + ?DATA_CHUNK_SIZE; {_, Start} -> Start end, NextCursor2 = max(NextCursor, Cursor + ?DATA_CHUNK_SIZE), log_debug(skipping_cursor, State, [ {next_cursor, NextCursor2}, {cursor, Cursor}, {footprint_start, FootprintStart}, {footprint_end, FootprintEnd}, {footprint_offsets, length(FootprintOffsets)} ] ++ Logs), State#state{ next_cursor = NextCursor2 }; true -> State2 = State#state{ footprint_start = FootprintStart, footprint_end = FootprintEnd, next_cursor = Cursor + ?DATA_CHUNK_SIZE }, {_, EntropyEnd, _} = get_read_range(BucketEndOffset, State2), State3 = State2#state{ entropy_end = ar_chunk_storage:get_chunk_bucket_end(EntropyEnd) }, State4 = init_repack_chunk_map(FootprintOffsets, State3), MaxChunkMapOffset = lists:max(maps:keys(State4#state.repack_chunk_map)), log_info(repack_footprint_start, State4, [ {cursor, Cursor}, {bucket_end_offset, BucketEndOffset}, {source_packing, ar_serialize:encode_packing(SourcePacking, false)}, {target_packing, ar_serialize:encode_packing(TargetPacking, false)}, {entropy_end, EntropyEnd}, {read_batch_size, BatchSize}, {write_batch_size, State4#state.write_batch_size}, {num_entropy_offsets, NumEntropyOffsets}, {footprint_offsets, length(FootprintOffsets)}, {max_chunk_map_offset, MaxChunkMapOffset} ]), %% sanity checks true = MaxChunkMapOffset =< FootprintEnd, true = EntropyEnd =< FootprintEnd, true = FootprintEnd =< ModuleEnd, %% end sanity checks %% We'll generate BatchSize entropy footprints, one for each bucket end offset %% starting at BucketEndOffset and ending at EntropyEnd. generate_repack_entropy(BucketEndOffset, SourcePacking, State4), generate_repack_entropy(BucketEndOffset, TargetPacking, State4), ar_repack_io:read_footprint( FootprintOffsets, FootprintStart, FootprintEnd, StoreID), State4 end. should_repack(Cursor, FootprintStart, FootprintEnd, State) -> #state{ module_start = ModuleStart, module_end = ModuleEnd, target_packing = TargetPacking, store_id = StoreID } = State, PaddedEndOffset = ar_block:get_chunk_padded_offset(Cursor), IsChunkRecorded = ar_sync_record:is_recorded(PaddedEndOffset, ar_data_sync, StoreID), IsEntropyRecorded = ar_entropy_storage:is_entropy_recorded( PaddedEndOffset, TargetPacking, StoreID), %% Skip this offset if it's already packed to TargetPacking, or if it's not recorded %% at all. Skip = case {IsChunkRecorded, IsEntropyRecorded} of %% Chunk is missing and we haven't written entropy yet, so we still want to process %% the bucket and write entropy to it. {false, false} -> false; %% Chunk is missing but entropy has already been written, so we can skip. {false, true} -> true; %% Skip if chunk is recorded and already packed to TargetPacking {{true, TargetPacking}, _} -> true; %% Skip if entropy exists for an unpacked chunk as this indicates the chunks %% 1. the chunks are small and therefore can't be packed %% 2. have already been processed and classified as `entropy_only` {{true, unpacked}, true} -> true; _ -> false end, ShouldRepack = ( not Skip andalso FootprintStart =< ModuleEnd andalso FootprintEnd >= ModuleStart ), case ShouldRepack of false -> Logs = [ {cursor, Cursor}, {padded_end_offset, PaddedEndOffset}, {is_chunk_recorded, IsChunkRecorded}, {is_entropy_recorded, IsEntropyRecorded}, {skip, Skip} ], {false, Logs}; _ -> true end. %% @doc Generates the set of entropy offsets that will be used during one iteration of %% repack_footprint. Expects to be called with a BucketEndOffset. This is to avoid %% unexpected filtering results when a BucketEndOffset is lower than a PickOffset or %% an AbsoluteEndOffset. %% %% One footprint of entropy offsets is generated and then filtered such that: %% - no offset is less than BucketEndOffset %% - no offset is greater than ModuleEnd %% - at most NumEntropyOffsets offsets are returned footprint_offsets(BucketEndOffset, NumEntropyOffsets, ModuleEnd) -> %% sanity checks BucketEndOffset = ar_chunk_storage:get_chunk_bucket_end(BucketEndOffset), %% end sanity checks EntropyOffsets = ar_entropy_gen:entropy_offsets(BucketEndOffset, ModuleEnd), FilteredOffsets = lists:filter( fun(Offset) -> Offset >= BucketEndOffset end, EntropyOffsets), lists:sublist(FilteredOffsets, NumEntropyOffsets). %% @doc Calculates and returns the highest chunk offset that can be read for this %% repack footprint. This is the highest chunk offset that maps to the highest bucket in %% the footprint. footprint_end(FootprintOffsets, ModuleEnd, BatchSize) -> FirstOffset = lists:min(FootprintOffsets), LastOffset = lists:max(FootprintOffsets), %% The final read range of the footprint starts at the last entropy offset {_, LastOffsetRangeEnd, _} = get_read_range(LastOffset, ModuleEnd, BatchSize), %% makes sure all offsets are in the same entropy partition Partition = ar_replica_2_9:get_entropy_partition(FirstOffset), {_, EntropyPartitionEnd} = ar_replica_2_9:get_entropy_partition_range(Partition), min(LastOffsetRangeEnd, EntropyPartitionEnd). generate_repack_entropy(BucketEndOffset, {replica_2_9, _}, #state{ entropy_end = EntropyEnd }) when BucketEndOffset > EntropyEnd -> ok; generate_repack_entropy(BucketEndOffset, {replica_2_9, RewardAddr}, #state{} = State) -> #state{ store_id = StoreID } = State, ar_entropy_gen:generate_entropies(StoreID, RewardAddr, BucketEndOffset, self()); generate_repack_entropy(_BucketEndOffset, _Packing, #state{}) -> %% Only generate entropy for the replica.2.9 packing format. ok. init_repack_chunk_map([], #state{} = State) -> State; init_repack_chunk_map([EntropyOffset | EntropyOffsets], #state{} = State) -> #state{ repack_chunk_map = Map, configured_packing = SourcePacking, target_packing = TargetPacking } = State, {_ReadRangeStart, _ReadRangeEnd, ReadRangeOffsets} = get_read_range( EntropyOffset, State), Map2 = lists:foldl( fun(BucketEndOffset, Acc) -> false = maps:is_key(BucketEndOffset, Acc), SourceEntropy = case SourcePacking of {replica_2_9, _} -> not_set; _ -> %% Setting to <<>> indicates that source entropy is not needed. <<>> end, TargetEntropy = case TargetPacking of {replica_2_9, _} -> not_set; _ -> %% Setting to <<>> indicates that target entropy is not needed. <<>> end, RepackChunk = #repack_chunk{ offsets = #chunk_offsets{ bucket_end_offset = BucketEndOffset }, target_packing = TargetPacking, source_entropy = SourceEntropy, target_entropy = TargetEntropy }, maps:put(BucketEndOffset, RepackChunk, Acc) end, Map, ReadRangeOffsets), %% sanity checks true = maps:size(Map2) == maps:size(Map) + length(ReadRangeOffsets), %% end sanity checks init_repack_chunk_map(EntropyOffsets, State#state{ repack_chunk_map = Map2 }). add_range_to_repack_chunk_map(OffsetChunkMap, OffsetMetadataMap, #state{} = State) -> #state{ store_id = StoreID, configured_packing = ConfiguredPacking, target_packing = TargetPacking } = State, maps:fold( fun(AbsoluteEndOffset, Metadata, Acc) -> #state{ repack_chunk_map = RepackChunkMap } = Acc, BucketEndOffset = ar_chunk_storage:get_chunk_bucket_end(AbsoluteEndOffset), RepackChunk = maps:get(BucketEndOffset, RepackChunkMap, not_found), RepackChunk2 = assemble_repack_chunk( RepackChunk, AbsoluteEndOffset, TargetPacking, Metadata, OffsetChunkMap, ConfiguredPacking, StoreID), case RepackChunk2 of not_found -> Acc; _ -> update_chunk_state(RepackChunk2, Acc) end end, State, OffsetMetadataMap). assemble_repack_chunk( RepackChunk, AbsoluteEndOffset, TargetPacking, Metadata, OffsetChunkMap, ConfiguredPacking, StoreID) -> {ChunkDataKey, TXRoot, DataRoot, TXPath, RelativeOffset, ChunkSize} = Metadata, BucketEndOffset = ar_chunk_storage:get_chunk_bucket_end(AbsoluteEndOffset), PaddedEndOffset = ar_block:get_chunk_padded_offset(AbsoluteEndOffset), SourcePacking = get_chunk_packing(PaddedEndOffset, ConfiguredPacking, StoreID), ShouldRepack = ( ar_chunk_storage:is_storage_supported( PaddedEndOffset, ChunkSize, SourcePacking) orelse ar_chunk_storage:is_storage_supported( PaddedEndOffset, ChunkSize, TargetPacking) ), case {ShouldRepack, RepackChunk} of {true, not_found} -> log_error(chunk_not_found_in_map, [ {bucket_end_offset, ar_chunk_storage:get_chunk_bucket_end(AbsoluteEndOffset)}, {absolute_end_offset, AbsoluteEndOffset}, {padded_end_offset, ar_block:get_chunk_padded_offset(AbsoluteEndOffset)}, {chunk_size, ChunkSize} ]), not_found; {true, _} -> RepackChunk#repack_chunk{ source_packing = SourcePacking, offsets = #chunk_offsets{ absolute_offset = AbsoluteEndOffset, bucket_end_offset = BucketEndOffset, padded_end_offset = PaddedEndOffset, relative_offset = RelativeOffset }, metadata = #chunk_metadata{ chunk_data_key = ChunkDataKey, tx_root = TXRoot, data_root = DataRoot, tx_path = TXPath, chunk_size = ChunkSize }, chunk = maps:get(PaddedEndOffset, OffsetChunkMap, not_found) }; {false, _} -> not_found end. get_chunk_packing(PaddedEndOffset, ConfiguredPacking, StoreID) -> HasConfiguredPacking = ar_sync_record:is_recorded( PaddedEndOffset, ConfiguredPacking, ar_data_sync, StoreID), case HasConfiguredPacking of true -> ConfiguredPacking; _ -> case ar_sync_record:is_recorded(PaddedEndOffset, ar_data_sync, StoreID) of {true, Packing} -> Packing; _ -> not_found end end. %% @doc Mark any chunks that weren't found in either chunk_storage or the chunks_index. mark_missing_chunks([], #state{} = State) -> State; mark_missing_chunks([BucketEndOffset | ReadRangeOffsets], #state{} = State) -> #state{ repack_chunk_map = Map } = State, RepackChunk = maps:get(BucketEndOffset, Map, not_found), State2 = case RepackChunk of not_found -> State; #repack_chunk{state = needs_chunk} -> %% If we're here and still in the needs_chunk state it means we weren't able %% to find the chunk in chunk_storage or the chunks_index. RepackChunk2 = RepackChunk#repack_chunk{ chunk = not_found, metadata = not_found }, update_chunk_state(RepackChunk2, State); _ -> State end, mark_missing_chunks(ReadRangeOffsets, State2). cache_repack_chunk(RepackChunk, #state{} = State) -> #repack_chunk{ offsets = #chunk_offsets{ bucket_end_offset = BucketEndOffset } } = RepackChunk, State#state{ repack_chunk_map = maps:put(BucketEndOffset, RepackChunk, State#state.repack_chunk_map) }. remove_repack_chunk(BucketEndOffset, #state{} = State) -> State2 = State#state{ repack_chunk_map = maps:remove(BucketEndOffset, State#state.repack_chunk_map) }, maybe_repack_next_footprint(State2). enqueue_chunk_for_writing(RepackChunk, #state{} = State) -> #state{ target_packing = TargetPacking, store_id = StoreID } = State, #repack_chunk{ offsets = #chunk_offsets{ bucket_end_offset = BucketEndOffset } } = RepackChunk, State2 = State#state{ write_queue = gb_sets:add_element( {BucketEndOffset, RepackChunk}, State#state.write_queue) }, case gb_sets:size(State2#state.write_queue) >= State2#state.write_batch_size of true -> count_states(queue, State2), ar_repack_io:write_queue(State2#state.write_queue, TargetPacking, StoreID), State2#state{ write_queue = gb_sets:new() }; false -> State2 end. entropy_generated(Entropy, BucketEndOffset, RewardAddr, #state{} = State) -> #state{ repack_chunk_map = Map, configured_packing = SourcePacking, target_packing = TargetPacking } = State, case maps:get(BucketEndOffset, Map, not_found) of not_found -> %% This should never happen. log_error(entropy_generated_chunk_not_found, State, [ {bucket_end_offset, BucketEndOffset} ]), State; RepackChunk -> RepackChunk2 = case {replica_2_9, RewardAddr} of TargetPacking -> RepackChunk#repack_chunk{ target_entropy = Entropy }; SourcePacking -> RepackChunk#repack_chunk{ source_entropy = Entropy } end, update_chunk_state(RepackChunk2, State) end. maybe_repack_next_footprint(#state{} = State) -> #state{ repack_chunk_map = Map, write_queue = WriteQueue, target_packing = TargetPacking, store_id = StoreID } = State, case maps:size(Map) of 0 -> count_states(queue, State), ar_repack_io:write_queue(WriteQueue, TargetPacking, StoreID), State2 = State#state{ write_queue = gb_sets:new() }, gen_server:cast(self(), repack), State2; _ -> State end. read_chunk_and_data_path(RepackChunk, #state{} = State) -> #state{ store_id = StoreID } = State, #repack_chunk{ metadata = Metadata, chunk = MaybeChunk } = RepackChunk, #chunk_metadata{ chunk_data_key = ChunkDataKey } = Metadata, case ar_data_sync:get_chunk_data(ChunkDataKey, StoreID) of not_found -> log_warning(chunk_not_found_in_chunk_data_db, RepackChunk, State, []), RepackChunk#repack_chunk{ metadata = Metadata#chunk_metadata{ data_path = not_found } }; {ok, V} -> case binary_to_term(V, [safe]) of {Chunk, DataPath} -> RepackChunk#repack_chunk{ metadata = Metadata#chunk_metadata{ data_path = DataPath }, chunk = Chunk }; DataPath when MaybeChunk /= not_found -> RepackChunk#repack_chunk{ metadata = Metadata#chunk_metadata{ data_path = DataPath }, chunk = MaybeChunk }; _ -> log_warning(chunk_not_found, RepackChunk, State, []), RepackChunk#repack_chunk{ metadata = Metadata#chunk_metadata{ data_path = not_found } } end end. update_chunk_state(RepackChunk, #state{} = State) -> RepackChunk2 = ar_repack_fsm:crank_state(RepackChunk), case RepackChunk == RepackChunk2 of true -> %% Cache it anyways, just in case. cache_repack_chunk(RepackChunk2, State); false -> process_state_change(RepackChunk2, State) end. process_state_change(RepackChunk, #state{} = State) -> #state{ store_id = StoreID, footprint_start = FootprintStart } = State, #repack_chunk{ offsets = #chunk_offsets{ bucket_end_offset = BucketEndOffset, absolute_offset = AbsoluteEndOffset }, chunk = Chunk } = RepackChunk, case RepackChunk#repack_chunk.state of invalid -> ChunkSize = RepackChunk#repack_chunk.metadata#chunk_metadata.chunk_size, ar_data_sync:invalidate_bad_data_record( AbsoluteEndOffset, ChunkSize, StoreID, repack_found_stale_indices), RepackChunk2 = RepackChunk#repack_chunk{ chunk = invalid }, State2 = cache_repack_chunk(RepackChunk2, State), update_chunk_state(RepackChunk2, State2); already_repacked -> %% Remove the chunk to free up memory. If we're in the already_repacked state %% it means the entropy hasn't been set yet. Once it's set we'll transition to %% the ignore state and the RepackChunk will be removed from the cache. RepackChunk2 = RepackChunk#repack_chunk{ chunk = <<>> }, cache_repack_chunk(RepackChunk2, State); needs_data_path -> RepackChunk2 = read_chunk_and_data_path(RepackChunk, State), State2 = cache_repack_chunk(RepackChunk2, State), update_chunk_state(RepackChunk2, State2); needs_repack -> %% Include BatchStart so that we don't accidentally expire a chunk from some %% future batch. Unlikely, but not impossible. ChunkSize = RepackChunk#repack_chunk.metadata#chunk_metadata.chunk_size, TXRoot = RepackChunk#repack_chunk.metadata#chunk_metadata.tx_root, SourcePacking = RepackChunk#repack_chunk.source_packing, TargetPacking = RepackChunk#repack_chunk.target_packing, Packing = case TargetPacking of {replica_2_9, _} -> unpacked_padded; _ -> TargetPacking end, ar_packing_server:request_repack({BucketEndOffset, FootprintStart}, self(), {Packing, SourcePacking, Chunk, AbsoluteEndOffset, TXRoot, ChunkSize}), cache_repack_chunk(RepackChunk, State); needs_decipher -> %% We now have the unpacked_padded chunk and the entropy, proceed %% with enciphering and storing the chunk. SourceEntropy = RepackChunk#repack_chunk.source_entropy, ar_packing_server:request_decipher( {BucketEndOffset, FootprintStart}, self(), {Chunk, SourceEntropy}), cache_repack_chunk(RepackChunk, State); needs_encipher -> %% We now have the unpacked_padded chunk and the entropy, proceed %% with enciphering and storing the chunk. TargetEntropy = RepackChunk#repack_chunk.target_entropy, ar_packing_server:request_encipher( {BucketEndOffset, FootprintStart}, self(), {Chunk, TargetEntropy}), cache_repack_chunk(RepackChunk, State); write_entropy -> State2 = enqueue_chunk_for_writing(RepackChunk, State), remove_repack_chunk(BucketEndOffset, State2); write_chunk -> State2 = enqueue_chunk_for_writing(RepackChunk, State), remove_repack_chunk(BucketEndOffset, State2); ignore -> %% Chunk was already_repacked. remove_repack_chunk(BucketEndOffset, State); error -> %% This should never happen. log_error(invalid_repack_chunk_state, RepackChunk, State, []), remove_repack_chunk(BucketEndOffset, State); _ -> %% No action to take now, but since the chunk state changed, we need to update %% the cache. cache_repack_chunk(RepackChunk, State) end. expire_exor_request(BucketEndOffset, State) -> #state{ repack_chunk_map = Map } = State, case maps:get(BucketEndOffset, Map, not_found) of not_found -> %% Chunk has already been processed. State; RepackChunk -> log_debug(exor_request_expired, RepackChunk, State, []), remove_repack_chunk(BucketEndOffset, State) end. read_cursor(StoreID, TargetPacking, ModuleStart) -> Filepath = ar_chunk_storage:get_filepath("repack_in_place_cursor2", StoreID), DefaultCursor = case ModuleStart of 0 -> 0; _ -> ModuleStart + 1 end, case file:read_file(Filepath) of {ok, Bin} -> case catch binary_to_term(Bin, [safe]) of {Cursor, TargetPacking} when is_integer(Cursor) -> Cursor; _ -> DefaultCursor end; _ -> DefaultCursor end. store_cursor(#state{} = State) -> store_cursor(State#state.next_cursor, State#state.store_id, State#state.target_packing). store_cursor(none, _StoreID, _TargetPacking) -> ok; store_cursor(Cursor, StoreID, TargetPacking) -> Filepath = ar_chunk_storage:get_filepath("repack_in_place_cursor2", StoreID), file:write_file(Filepath, term_to_binary({Cursor, TargetPacking})). log_error(Event, #repack_chunk{} = RepackChunk, #state{} = State, ExtraLogs) -> ?LOG_ERROR(format_logs(Event, RepackChunk, State, ExtraLogs)). log_error(Event, #state{} = State, ExtraLogs) -> ?LOG_ERROR(format_logs(Event, State, ExtraLogs)). log_error(Event, ExtraLogs) -> ?LOG_ERROR(format_logs(Event, ExtraLogs)). log_warning(Event, #repack_chunk{} = RepackChunk, #state{} = State, ExtraLogs) -> ?LOG_WARNING(format_logs(Event, RepackChunk, State, ExtraLogs)). log_warning(Event, #state{} = State, ExtraLogs) -> ?LOG_WARNING(format_logs(Event, State, ExtraLogs)). log_info(Event, #state{} = State, ExtraLogs) -> ?LOG_INFO(format_logs(Event, State, ExtraLogs)). log_debug(Event, #repack_chunk{} = RepackChunk, #state{} = State, ExtraLogs) -> ?LOG_DEBUG(format_logs(Event, RepackChunk, State, ExtraLogs)). log_debug(Event, #state{} = State, ExtraLogs) -> ?LOG_DEBUG(format_logs(Event, State, ExtraLogs)). format_logs(Event, ExtraLogs) -> [ {event, Event}, {tags, [repack_in_place]}, {pid, self()} | ExtraLogs ]. format_logs(Event, #state{} = State, ExtraLogs) -> format_logs(Event, [ {store_id, State#state.store_id}, {next_cursor, State#state.next_cursor}, {footprint_start, State#state.footprint_start}, {footprint_end, State#state.footprint_end}, {module_start, State#state.module_start}, {module_end, State#state.module_end}, {repack_chunk_map, maps:size(State#state.repack_chunk_map)}, {write_queue, gb_sets:size(State#state.write_queue)} | ExtraLogs ]). format_logs(Event, #repack_chunk{} = RepackChunk, #state{} = State, ExtraLogs) -> #repack_chunk{ state = ChunkState, offsets = Offsets, metadata = Metadata, chunk = Chunk, target_entropy = TargetEntropy, source_entropy = SourceEntropy, source_packing = SourcePacking, target_packing = TargetPacking } = RepackChunk, #chunk_offsets{ absolute_offset = AbsoluteOffset, bucket_end_offset = BucketEndOffset, padded_end_offset = PaddedEndOffset } = Offsets, ChunkSize = case Metadata of #chunk_metadata{chunk_size = Size} -> Size; _ -> Metadata end, format_logs(Event, State, [ {state, ChunkState}, {bucket_end_offset, BucketEndOffset}, {absolute_offset, AbsoluteOffset}, {padded_end_offset, PaddedEndOffset}, {chunk_size, ChunkSize}, {chunk, atom_or_binary(Chunk)}, {source_packing, ar_serialize:encode_packing(SourcePacking, false)}, {target_packing, ar_serialize:encode_packing(TargetPacking, false)}, {source_entropy, atom_or_binary(SourceEntropy)}, {target_entropy, atom_or_binary(TargetEntropy)} | ExtraLogs ]). count_states(cache, #state{} = State) -> #state{ store_id = StoreID, repack_chunk_map = Map } = State, MapCount = maps:fold( fun(_BucketEndOffset, RepackChunk, Acc) -> maps:update_with(RepackChunk#repack_chunk.state, fun(Count) -> Count + 1 end, 1, Acc) end, #{}, Map ), log_debug(count_cache_states, State, [ {cache_size, maps:size(Map)}, {states, maps:to_list(MapCount)} ]), StoreIDLabel = ar_storage_module:label(StoreID), maps:fold( fun(ChunkState, Count, Acc) -> prometheus_gauge:set(repack_chunk_states, [StoreIDLabel, cache, ChunkState], Count), Acc end, ok, MapCount ); count_states(queue, #state{} = State) -> #state{ store_id = StoreID, write_queue = WriteQueue } = State, WriteQueueCount = gb_sets:fold( fun({_BucketEndOffset, RepackChunk}, Acc) -> maps:update_with(RepackChunk#repack_chunk.state, fun(Count) -> Count + 1 end, 1, Acc) end, #{}, WriteQueue ), log_debug(count_write_queue_states, State, [ {queue_size, gb_sets:size(WriteQueue)}, {states, maps:to_list(WriteQueueCount)} ]), StoreIDLabel = ar_storage_module:label(StoreID), maps:fold( fun(ChunkState, Count, Acc) -> prometheus_gauge:set(repack_chunk_states, [StoreIDLabel, queue, ChunkState], Count), Acc end, ok, WriteQueueCount ). atom_or_binary(Atom) when is_atom(Atom) -> Atom; atom_or_binary(Bin) when is_binary(Bin) -> binary:part(Bin, {0, min(10, byte_size(Bin))}). %%%=================================================================== %%% Tests. %%%=================================================================== cache_size_test_() -> ar_test_node:test_with_mocked_functions([ {ar_block, get_sub_chunks_per_replica_2_9_entropy, fun() -> 3 end} ], fun test_cache_size/0, 30). test_cache_size() -> ?assertEqual(1, calculate_num_entropy_offsets(100, 400)), ?assertEqual(2, calculate_num_entropy_offsets(100, 200)), ?assertEqual(3, calculate_num_entropy_offsets(300, 400)), ?assertEqual(3, calculate_num_entropy_offsets(3000, 400)), ?assertEqual(3, calculate_num_entropy_offsets(3, 4)), ?assertEqual(3, calculate_num_entropy_offsets(3, 1)), ?assertEqual(2, calculate_num_entropy_offsets(5, 10)). footprint_offsets_test_() -> [ ar_test_node:test_with_mocked_functions([ {ar_block, get_replica_2_9_entropy_sector_size, fun() -> 786432 end}, {ar_block, get_replica_2_9_entropy_partition_size, fun() -> 2359296 end}, {ar_block, get_sub_chunks_per_replica_2_9_entropy, fun() -> 3 end}, {ar_block, strict_data_split_threshold, fun() -> 700_000 end} ], fun test_footprint_offsets_small/0, 30), %% Run footprint_offsets tests using the production constant values. ar_test_node:test_with_mocked_functions([ {ar_block, partition_size, fun() -> 3_600_000_000_000 end}, {ar_block, strict_data_split_threshold, fun() -> 30_607_159_107_830 end}, {ar_storage_module, get_overlap, fun(_) -> 104_857_600 end}, {ar_block, get_sub_chunks_per_replica_2_9_entropy, fun() -> 1024 end}, {ar_block, get_replica_2_9_entropy_sector_size, fun() -> 3_515_875_328 end} ], fun test_footprint_offsets_large/0, 30) ]. test_footprint_offsets_small() -> {Start0, End0} = ar_storage_module:module_range({ar_block:partition_size(), 0, unpacked}), {Start1, End1} = ar_storage_module:module_range({ar_block:partition_size(), 1, unpacked}), PaddedEnd0 = ar_block:get_chunk_padded_offset(End0), PaddedEnd1 = ar_block:get_chunk_padded_offset(End1), ?assertEqual(3, ar_block:get_sub_chunks_per_replica_2_9_entropy()), ?assertEqual({0, 2262144}, {Start0, End0}), ?assertEqual({2000000, 4262144}, {Start1, End1}), ?assertEqual(2272864, PaddedEnd0), ?assertEqual(4370016, PaddedEnd1), ?assertEqual([262144, 1048576, 1835008], footprint_offsets(262144, 3, PaddedEnd0)), ?assertEqual([262144], footprint_offsets(262144, 3, 1_000_000)), ?assertEqual([262144, 1048576], footprint_offsets(262144, 3, 1_500_000)), ?assertEqual([262144, 1048576], footprint_offsets(262144, 2, PaddedEnd0)), ?assertEqual([262144], footprint_offsets(262144, 1, PaddedEnd0)), ?assertEqual([786432, 1572864], footprint_offsets(786432, 3, PaddedEnd0)), ?assertEqual([786432, 1572864], footprint_offsets(786432, 2, PaddedEnd0)), ?assertEqual([786432], footprint_offsets(786432, 1, PaddedEnd0)), ?assertEqual([1048576, 1835008], footprint_offsets(1048576, 3, PaddedEnd0)), ?assertEqual([1572864], footprint_offsets(1572864, 3, PaddedEnd0)), ?assertEqual([1572864], footprint_offsets(1572864, 2, PaddedEnd0)), ?assertEqual([1572864], footprint_offsets(1572864, 1, PaddedEnd0)), ?assertEqual([1835008], footprint_offsets(1835008, 3, PaddedEnd0)), ?assertEqual([2097152], footprint_offsets(2097152, 3, PaddedEnd0)), %% all offsets should be limited to a single entropy partition ?assertEqual([2097152], footprint_offsets(2097152, 3, PaddedEnd1)), ?assertEqual([2359296, 3145728, 3932160], footprint_offsets(2359296, 3, PaddedEnd1)), ?assertEqual([2621440, 3407872, 4194304], footprint_offsets(2621440, 3, PaddedEnd1)), ?assertEqual([2883584, 3670016], footprint_offsets(2883584, 3, PaddedEnd1)), ?assertEqual([3145728, 3932160], footprint_offsets(3145728, 3, PaddedEnd1)), ?assertEqual([4194304], footprint_offsets(4194304, 3, PaddedEnd1)). %% @doc run a series of footprint_offsets tests using the production constant values. test_footprint_offsets_large() -> {Start0, End0} = ar_storage_module:module_range({ar_block:partition_size(), 0, unpacked}), {Start1, End1} = ar_storage_module:module_range({ar_block:partition_size(), 1, unpacked}), {Start30, End30} = ar_storage_module:module_range({ar_block:partition_size(), 30, unpacked}), PaddedEnd0 = ar_block:get_chunk_padded_offset(End0), PaddedEnd1 = ar_block:get_chunk_padded_offset(End1), PaddedEnd30 = ar_block:get_chunk_padded_offset(End30), ?assertEqual(1024, ar_block:get_sub_chunks_per_replica_2_9_entropy()), ?assertEqual(3515875328, ar_block:get_replica_2_9_entropy_sector_size()), ?assertEqual({0, 3600104857600}, {Start0, End0}), ?assertEqual({3600000000000, 7200104857600}, {Start1, End1}), ?assertEqual({108000000000000, 111600104857600}, {Start30, End30}), ?assertEqual(3600104857600, PaddedEnd0), ?assertEqual(7200104857600, PaddedEnd1), ?assertEqual(111600104939766, PaddedEnd30), TestCases = [ %% {ExpectedFootprintOffsetsLength, End, BucketEndOffset} %% Partition 0 - special case as there is no lower partition {1024, PaddedEnd0, ar_chunk_storage:get_chunk_bucket_end(Start0)}, {1024, PaddedEnd0, ar_chunk_storage:get_chunk_bucket_end(Start0 + ?DATA_CHUNK_SIZE)}, {1024, PaddedEnd0, ar_chunk_storage:get_chunk_bucket_end(Start0 + (2 * ?DATA_CHUNK_SIZE))}, {1023, PaddedEnd0, ar_chunk_storage:get_chunk_bucket_end(Start0 + (ar_block:get_replica_2_9_entropy_sector_size()))}, {1023, PaddedEnd0, ar_chunk_storage:get_chunk_bucket_end(Start0 + (ar_block:get_replica_2_9_entropy_sector_size() + ?DATA_CHUNK_SIZE))}, {1022, PaddedEnd0, ar_chunk_storage:get_chunk_bucket_end(Start0 + (2 * ar_block:get_replica_2_9_entropy_sector_size()))}, {1022, PaddedEnd0, ar_chunk_storage:get_chunk_bucket_end(Start0 + (2 * ar_block:get_replica_2_9_entropy_sector_size() + ?DATA_CHUNK_SIZE))}, %% Partition 1 - before the strict data split threshold {1, PaddedEnd1, ar_chunk_storage:get_chunk_bucket_end(Start1)}, {1, PaddedEnd1, ar_chunk_storage:get_chunk_bucket_end(Start1 + ?DATA_CHUNK_SIZE)}, {1024, PaddedEnd1, ar_chunk_storage:get_chunk_bucket_end(Start1 + (2 * ?DATA_CHUNK_SIZE))}, {1023, PaddedEnd1, ar_chunk_storage:get_chunk_bucket_end(Start1 + (ar_block:get_replica_2_9_entropy_sector_size()))}, {1023, PaddedEnd1, ar_chunk_storage:get_chunk_bucket_end(Start1 + (ar_block:get_replica_2_9_entropy_sector_size() + ?DATA_CHUNK_SIZE))}, {1022, PaddedEnd1, ar_chunk_storage:get_chunk_bucket_end(Start1 + (2 * ar_block:get_replica_2_9_entropy_sector_size()))}, {1022, PaddedEnd1, ar_chunk_storage:get_chunk_bucket_end(Start1 + (2 * ar_block:get_replica_2_9_entropy_sector_size() + ?DATA_CHUNK_SIZE))}, %% Partition 30 - after the strict data split threshold {1, PaddedEnd30, ar_chunk_storage:get_chunk_bucket_end(Start30)}, {1024, PaddedEnd30, ar_chunk_storage:get_chunk_bucket_end(Start30 + ?DATA_CHUNK_SIZE)}, {1024, PaddedEnd30, ar_chunk_storage:get_chunk_bucket_end(Start30 + (2 * ?DATA_CHUNK_SIZE))}, {1023, PaddedEnd30, ar_chunk_storage:get_chunk_bucket_end(Start30 + (ar_block:get_replica_2_9_entropy_sector_size()))}, {1023, PaddedEnd30, ar_chunk_storage:get_chunk_bucket_end(Start30 + (ar_block:get_replica_2_9_entropy_sector_size() + ?DATA_CHUNK_SIZE))}, {1022, PaddedEnd30, ar_chunk_storage:get_chunk_bucket_end(Start30 + (2 * ar_block:get_replica_2_9_entropy_sector_size()))}, {1022, PaddedEnd30, ar_chunk_storage:get_chunk_bucket_end(Start30 + (2 * ar_block:get_replica_2_9_entropy_sector_size() + ?DATA_CHUNK_SIZE))} ], lists:foreach( fun({ExpectedLength, End, BucketEndOffset}) -> ?assertEqual(ExpectedLength, length(footprint_offsets(BucketEndOffset, 1024, End)), lists:flatten(io_lib:format( "Offset: ~p, Expected Length: ~p", [BucketEndOffset, ExpectedLength]))) end, TestCases ), ok. footprint_end_test_() -> [ ar_test_node:test_with_mocked_functions([ {ar_block, get_replica_2_9_entropy_sector_size, fun() -> 786432 end}, {ar_block, get_replica_2_9_entropy_partition_size, fun() -> 2359296 end}, {ar_block, get_sub_chunks_per_replica_2_9_entropy, fun() -> 3 end}, {ar_block, strict_data_split_threshold, fun() -> 700_000 end} ], fun test_footprint_end_small/0, 30) ]. test_footprint_end_small() -> {Start0, End0} = ar_storage_module:module_range({ar_block:partition_size(), 0, unpacked}), {Start1, End1} = ar_storage_module:module_range({ar_block:partition_size(), 1, unpacked}), PaddedEnd0 = ar_block:get_chunk_padded_offset(End0), PaddedEnd1 = ar_block:get_chunk_padded_offset(End1), ?assertEqual(3, ar_block:get_sub_chunks_per_replica_2_9_entropy()), ?assertEqual({0, 2262144}, {Start0, End0}), ?assertEqual({2000000, 4262144}, {Start1, End1}), ?assertEqual(2272864, PaddedEnd0), ?assertEqual(4370016, PaddedEnd1), ?assertEqual({0, 2272864}, ar_replica_2_9:get_entropy_partition_range(0)), ?assertEqual(2010720, footprint_end([262144, 1048576, 1835008], PaddedEnd0, 1)), ?assertEqual(2272864, footprint_end([262144, 1048576, 1835008], PaddedEnd0, 2)), ?assertEqual(2272864, footprint_end([262144, 1048576, 1835008], PaddedEnd0, 3)), ?assertEqual(2272864, footprint_end([262144, 1048576, 1835008], PaddedEnd1, 4)), ok. assemble_repack_chunk_test_() -> [ ar_test_node:test_with_mocked_functions([ {ar_sync_record, is_recorded, fun(_, _, _) -> {true, unpacked} end} ], fun test_assemble_repack_chunk/0, 30), ar_test_node:test_with_mocked_functions([ {ar_sync_record, is_recorded, fun(_, _, _) -> {true, unpacked} end} ], fun test_assemble_repack_chunk_too_small_unpacked/0, 30), ar_test_node:test_with_mocked_functions([ {ar_sync_record, is_recorded, fun(_, _, _) -> {true, {spora_2_6, <<"addr">>}} end} ], fun test_assemble_repack_chunk_too_small_packed/0, 30) ]. test_assemble_repack_chunk() -> Addr = <<"addr">>, StoreID = "storage_module_100_unpacked", ChunkDataKey = <<"chunk_data_key">>, TXRoot = <<"tx_root">>, DataRoot = <<"data_root">>, TXPath = <<"tx_path">>, RelativeOffset = 1000, ChunkSize = ?DATA_CHUNK_SIZE, Chunk = crypto:strong_rand_bytes(ChunkSize), Metadata = {ChunkDataKey, TXRoot, DataRoot, TXPath, RelativeOffset, ChunkSize}, % %% Error - BucketEndOffset hasn't been initialized ?assertEqual(not_found, assemble_repack_chunk(not_found, 100, {replica_2_9, Addr}, Metadata, #{}, unpacked, StoreID)), ExpectedRepackedChunk = #repack_chunk{ source_packing = unpacked, metadata = #chunk_metadata{ chunk_data_key = ChunkDataKey, tx_root = TXRoot, data_root = DataRoot, tx_path = TXPath, chunk_size = ChunkSize }, chunk = Chunk }, %% Chunk before the strict data split threshold %% unpacked -> unpacked ExpectedOffsets1 = #chunk_offsets{ absolute_offset = 100, bucket_end_offset = 262144, padded_end_offset = 100, relative_offset = RelativeOffset }, ?assertEqual( ExpectedRepackedChunk#repack_chunk{ offsets = ExpectedOffsets1, target_packing = unpacked }, assemble_repack_chunk( #repack_chunk{ target_packing = unpacked }, 100, unpacked, Metadata, #{ 100 => Chunk }, unpacked, StoreID) ), %% unpacked -> packed ?assertEqual( ExpectedRepackedChunk#repack_chunk{ offsets = ExpectedOffsets1, target_packing = {replica_2_9, Addr} }, assemble_repack_chunk( #repack_chunk{ target_packing = {replica_2_9, Addr} }, 100, {replica_2_9, Addr}, Metadata, #{ 100 => Chunk }, unpacked, StoreID) ), %% Chunk after the strict data split threshold %% unpacked -> unpacked ExpectedOffsets2 = #chunk_offsets{ absolute_offset = 10_000_000, bucket_end_offset = 10_223_616, padded_end_offset = 10_223_616, relative_offset = RelativeOffset }, ?assertEqual( ExpectedRepackedChunk#repack_chunk{ offsets = ExpectedOffsets2, target_packing = unpacked }, assemble_repack_chunk( #repack_chunk{ target_packing = unpacked }, 10_000_000, unpacked, Metadata, #{ 10_223_616 => Chunk }, unpacked, StoreID) ), %% unpacked -> packed ?assertEqual( ExpectedRepackedChunk#repack_chunk{ offsets = ExpectedOffsets2, target_packing = {replica_2_9, Addr} }, assemble_repack_chunk( #repack_chunk{ target_packing = {replica_2_9, Addr} }, 10_000_000, {replica_2_9, Addr}, Metadata, #{ 10_223_616 => Chunk }, unpacked, StoreID) ), ok. test_assemble_repack_chunk_too_small_unpacked() -> Addr = <<"addr">>, StoreID = "storage_module_100_unpacked", ChunkDataKey = <<"chunk_data_key">>, TXRoot = <<"tx_root">>, DataRoot = <<"data_root">>, TXPath = <<"tx_path">>, RelativeOffset = 1000, ChunkSize = 100, Metadata = {ChunkDataKey, TXRoot, DataRoot, TXPath, RelativeOffset, ChunkSize}, %% Small chunk before the strict data split threshold %% unpacked -> unpacked ?assertEqual(not_found, assemble_repack_chunk( #repack_chunk{ target_packing = unpacked }, 100, unpacked, Metadata, #{}, unpacked, StoreID)), %% unpacked -> packed ?assertEqual(not_found, assemble_repack_chunk( #repack_chunk{ target_packing = {replica_2_9, Addr} }, 100, {replica_2_9, Addr}, Metadata, #{}, unpacked, StoreID)), %% Small chunk after the strict data split threshold %% unpacked -> unpacked ?assertEqual(not_found, assemble_repack_chunk( #repack_chunk{ target_packing = unpacked }, 10_000_000, unpacked, Metadata, #{}, unpacked, StoreID)), %% unpacked -> packed ExpectedRepackedChunk = #repack_chunk{ source_packing = unpacked, target_packing = {replica_2_9, Addr}, metadata = #chunk_metadata{ chunk_data_key = ChunkDataKey, tx_root = TXRoot, data_root = DataRoot, tx_path = TXPath, chunk_size = ChunkSize }, offsets = #chunk_offsets{ absolute_offset = 10_000_000, bucket_end_offset = 10_223_616, padded_end_offset = 10_223_616, relative_offset = RelativeOffset }, chunk = not_found }, ?assertEqual(ExpectedRepackedChunk, assemble_repack_chunk( #repack_chunk{ target_packing = {replica_2_9, Addr} }, 10_000_000, {replica_2_9, Addr}, Metadata, #{}, unpacked, StoreID)), ok. test_assemble_repack_chunk_too_small_packed() -> Addr = <<"addr">>, StoreID = "storage_module_100_unpacked", ChunkDataKey = <<"chunk_data_key">>, TXRoot = <<"tx_root">>, DataRoot = <<"data_root">>, TXPath = <<"tx_path">>, RelativeOffset = 1000, ChunkSize = 100, Metadata = {ChunkDataKey, TXRoot, DataRoot, TXPath, RelativeOffset, ChunkSize}, %% Small chunk before the strict data split threshold %% packed -> unpacked ?assertEqual(not_found, assemble_repack_chunk( #repack_chunk{ target_packing = unpacked }, 100, unpacked, Metadata, #{}, {spora_2_6, <<"addr">>}, StoreID)), %% packed -> packed ?assertEqual(not_found, assemble_repack_chunk( #repack_chunk{ target_packing = {replica_2_9, Addr} }, 100, {replica_2_9, Addr}, Metadata, #{}, {spora_2_6, <<"addr">>}, StoreID)), %% Small chunk after the strict data split threshold ExpectedRepackedChunk = #repack_chunk{ source_packing = {spora_2_6, Addr}, metadata = #chunk_metadata{ chunk_data_key = ChunkDataKey, tx_root = TXRoot, data_root = DataRoot, tx_path = TXPath, chunk_size = ChunkSize }, offsets = #chunk_offsets{ absolute_offset = 10_000_000, bucket_end_offset = 10_223_616, padded_end_offset = 10_223_616, relative_offset = RelativeOffset }, chunk = not_found }, %% packed -> unpacked ?assertEqual(ExpectedRepackedChunk#repack_chunk{target_packing = unpacked}, assemble_repack_chunk( #repack_chunk{ target_packing = unpacked }, 10_000_000, unpacked, Metadata, #{}, {spora_2_6, <<"addr">>}, StoreID)), %% packed -> packed ?assertEqual(ExpectedRepackedChunk#repack_chunk{target_packing = {replica_2_9, Addr}}, assemble_repack_chunk( #repack_chunk{ target_packing = {replica_2_9, Addr} }, 10_000_000, {replica_2_9, Addr}, Metadata, #{}, {spora_2_6, <<"addr">>}, StoreID)), ok. should_repack_test_() -> [ ar_test_node:test_with_mocked_functions([ {ar_block, strict_data_split_threshold, fun() -> 700_000 end}, {ar_sync_record, is_recorded, fun(_, _, _) -> false end}, {ar_entropy_storage, is_entropy_recorded, fun(_, _, _) -> false end} ], fun test_should_repack_no_chunk_no_entropy/0, 30), ar_test_node:test_with_mocked_functions([ {ar_block, strict_data_split_threshold, fun() -> 700_000 end}, {ar_sync_record, is_recorded, fun(_, _, _) -> {true, {replica_2_9, <<"addr">>}} end}, {ar_entropy_storage, is_entropy_recorded, fun(_, _, _) -> true end} ], fun test_should_repack_chunk_and_entropy/0, 30), ar_test_node:test_with_mocked_functions([ {ar_block, strict_data_split_threshold, fun() -> 700_000 end}, {ar_sync_record, is_recorded, fun(_, _, _) -> false end}, {ar_entropy_storage, is_entropy_recorded, fun(_, _, _) -> true end} ], fun test_should_repack_entropy_but_no_chunk/0, 30), ar_test_node:test_with_mocked_functions([ {ar_block, strict_data_split_threshold, fun() -> 700_000 end}, {ar_sync_record, is_recorded, fun(_, _, _) -> {true, unpacked} end}, {ar_entropy_storage, is_entropy_recorded, fun(_, _, _) -> true end} ], fun test_should_repack_unpacked_chunk_and_entropy/0, 30), ar_test_node:test_with_mocked_functions([ {ar_block, strict_data_split_threshold, fun() -> 700_000 end}, {ar_sync_record, is_recorded, fun(_, _, _) -> {true, unpacked} end}, {ar_entropy_storage, is_entropy_recorded, fun(_, _, _) -> false end} ], fun test_should_repack_unpacked_chunk_no_entropy/0, 30) ]. test_should_repack_no_chunk_no_entropy() -> %% No chunk exists to repack however we still want to process the bucket and write %% entropy to it. ?assertEqual(true, should_repack(600_000, 200_000, 300_000, #state{ module_start = 100_000, module_end = 2_000_000 })), ?assertEqual({false, [ {cursor, 600_000}, {padded_end_offset, 600_000}, {is_chunk_recorded, false}, {is_entropy_recorded, false}, {skip, false} ]}, should_repack(600_000, 0, 50_000, #state{ module_start = 100_000, module_end = 2_000_000, target_packing = {replica_2_9, <<"addr">>} })), ?assertEqual({false, [ {cursor, 600_000}, {padded_end_offset, 600_000}, {is_chunk_recorded, false}, {is_entropy_recorded, false}, {skip, false} ]}, should_repack(600_000, 2_000_001, 3_000_000, #state{ module_start = 100_000, module_end = 2_000_000, target_packing = {replica_2_9, <<"addr">>} })), ?assertEqual(true, should_repack(750_000, 200_000, 300_000, #state{ module_start = 100_000, module_end = 2_000_000 })). test_should_repack_chunk_and_entropy() -> %% Chunk is already packed to the target packing ?assertEqual({false, [ {cursor, 600_000}, {padded_end_offset, 600_000}, {is_chunk_recorded, {true, {replica_2_9, <<"addr">>}}}, {is_entropy_recorded, true}, {skip, true} ]}, should_repack(600_000, 200_000, 300_000, #state{ module_start = 100_000, module_end = 2_000_000, target_packing = {replica_2_9, <<"addr">>} })), %% Chunk exists and needs repacking - but footprint start is beyond the end of the module ?assertEqual({false, [ {cursor, 600_000}, {padded_end_offset, 600_000}, {is_chunk_recorded, {true, {replica_2_9, <<"addr">>}}}, {is_entropy_recorded, true}, {skip, false} ]}, should_repack(600_000, 2_000_001, 3_000_000, #state{ module_start = 100_000, module_end = 2_000_000, target_packing = {replica_2_9, <<"addr2">>} })), %% Chunk exists, needs repacking and falls within the module. ?assertEqual( true, should_repack(600_000, 200_000, 300_000, #state{ module_start = 100_000, module_end = 2_000_000, target_packing = {replica_2_9, <<"addr2">>} })), ?assertEqual({false, [ {cursor, 600_000}, {padded_end_offset, 600_000}, {is_chunk_recorded, {true, {replica_2_9, <<"addr">>}}}, {is_entropy_recorded, true}, {skip, false} ]}, should_repack(600_000, 0, 50_000, #state{ module_start = 100_000, module_end = 2_000_000, target_packing = {replica_2_9, <<"addr2">>} })), ?assertEqual({false, [ {cursor, 600_000}, {padded_end_offset, 600_000}, {is_chunk_recorded, {true, {replica_2_9, <<"addr">>}}}, {is_entropy_recorded, true}, {skip, false} ]}, should_repack(600_000, 2_000_001, 3_000_000, #state{ module_start = 100_000, module_end = 2_000_000, target_packing = {replica_2_9, <<"addr2">>} })). test_should_repack_entropy_but_no_chunk() -> %% Entropy exists which means this bucket has been processed, but there is no chunk %% to repack. ?assertEqual({false, [ {cursor, 600_000}, {padded_end_offset, 600_000}, {is_chunk_recorded, false}, {is_entropy_recorded, true}, {skip, true} ]}, should_repack(600_000, 200_000, 300_000, #state{ module_start = 100_000, module_end = 2_000_000, target_packing = {replica_2_9, <<"addr">>} })). test_should_repack_unpacked_chunk_and_entropy() -> %% Unpacked chunk and entropy exist, which means: %% 1. this bucket has small chunks which can not be written to chunk storage. %% 2. this bucket has already been processed so we can skip ?assertEqual({false, [ {cursor, 600_000}, {padded_end_offset, 600_000}, {is_chunk_recorded, {true, unpacked}}, {is_entropy_recorded, true}, {skip, true} ]}, should_repack(600_000, 200_000, 300_000, #state{ module_start = 100_000, module_end = 2_000_000, target_packing = {replica_2_9, <<"addr">>} })). test_should_repack_unpacked_chunk_no_entropy() -> %% Chunk is already packed to the target packing ?assertEqual({false, [ {cursor, 600_000}, {padded_end_offset, 600_000}, {is_chunk_recorded, {true, unpacked}}, {is_entropy_recorded, false}, {skip, true} ]}, should_repack(600_000, 200_000, 300_000, #state{ module_start = 100_000, module_end = 2_000_000, target_packing = unpacked })), %% Chunk exists, needs repacking and falls within the module. ?assertEqual( true, should_repack(600_000, 200_000, 300_000, #state{ module_start = 100_000, module_end = 2_000_000, target_packing = {replica_2_9, <<"addr">>} })), ?assertEqual({false, [ {cursor, 600_000}, {padded_end_offset, 600_000}, {is_chunk_recorded, {true, unpacked}}, {is_entropy_recorded, false}, {skip, false} ]}, should_repack(600_000, 0, 50_000, #state{ module_start = 100_000, module_end = 2_000_000, target_packing = {replica_2_9, <<"addr">>} })), ?assertEqual({false, [ {cursor, 600_000}, {padded_end_offset, 600_000}, {is_chunk_recorded, {true, unpacked}}, {is_entropy_recorded, false}, {skip, false} ]}, should_repack(600_000, 2_000_001, 3_000_000, #state{ module_start = 100_000, module_end = 2_000_000, target_packing = {replica_2_9, <<"addr">>} })). init_repack_chunk_map_test_() -> [ ar_test_node:test_with_mocked_functions(ar_test_node:mainnet_packing_mocks(), fun test_init_repack_chunk_map_a/0, 30), ar_test_node:test_with_mocked_functions(ar_test_node:mainnet_packing_mocks(), fun test_init_repack_chunk_map_b/0, 30) ]. %% @doc This tests a specific off-by-one error that occurred in the footprint_end calculation. %% Previously there was an ar_entropy_gen:footprint_end function which was incorrect. The %% fix removes the ar_entropy_gen:footprint_end function and has everyone use %% ar_replica_2_9:get_entropy_partition_range instead, as that one does the correct end of %% range calculation. %% %% Keeping this test as it's an easy way to assert no future regressions in this logic. test_init_repack_chunk_map_a() -> Cursor = 18003250911837, ModuleStart = 18000000000000, ModuleEnd = 21600104857600, BatchSize = 100, BucketEndOffset = ar_chunk_storage:get_chunk_bucket_end(Cursor), BucketStartOffset = ar_chunk_storage:get_chunk_bucket_start(Cursor), FootprintOffsets = footprint_offsets(BucketEndOffset, 1024, ModuleEnd), FootprintStart = BucketStartOffset+1, FootprintEnd = footprint_end(FootprintOffsets, ModuleEnd, BatchSize), State = #state{ module_start = ModuleStart, module_end = ModuleEnd, footprint_start = FootprintStart, footprint_end = FootprintEnd, read_batch_size = BatchSize, repack_chunk_map = #{}, target_packing = {replica_2_9, <<"addr">>} }, State2 = init_repack_chunk_map(FootprintOffsets, State), ?assertEqual(102334, maps:size(State2#state.repack_chunk_map)), ok. test_init_repack_chunk_map_b() -> Cursor = 21564833002875, NumEntropyOffsets = 1024, ModuleStart = 18000000000000, ModuleEnd = 21600104857600, BatchSize = 100, BucketEndOffset = ar_chunk_storage:get_chunk_bucket_end(Cursor), BucketStartOffset = ar_chunk_storage:get_chunk_bucket_start(Cursor), FootprintOffsets = footprint_offsets(BucketEndOffset, NumEntropyOffsets, ModuleEnd), FootprintStart = BucketStartOffset+1, FootprintEnd = footprint_end(FootprintOffsets, ModuleEnd, BatchSize), {_, EntropyEnd, _} = get_read_range(BucketEndOffset, FootprintEnd, BatchSize), EntropyEnd2 = ar_chunk_storage:get_chunk_bucket_end(EntropyEnd), {_ReadRangeStart, _ReadRangeEnd, ReadRangeOffsets} = get_read_range( BucketEndOffset, FootprintEnd, BatchSize), State = #state{ module_start = ModuleStart, module_end = ModuleEnd, footprint_start = FootprintStart, footprint_end = FootprintEnd, read_batch_size = BatchSize, repack_chunk_map = #{}, target_packing = {replica_2_9, <<"addr">>} }, State2 = init_repack_chunk_map(FootprintOffsets, State), MaxChunkMap = lists:max(maps:keys(State2#state.repack_chunk_map)), ?assertEqual(ar_chunk_storage:get_chunk_bucket_end(FootprintEnd), MaxChunkMap), ?assertEqual(EntropyEnd2, lists:max(ReadRangeOffsets)), ok. get_read_range_test_() -> [ ar_test_node:test_with_mocked_functions([ {ar_block, get_replica_2_9_entropy_sector_size, fun() -> 786432 end}, {ar_block, get_replica_2_9_entropy_partition_size, fun() -> 2359296 end}, {ar_block, strict_data_split_threshold, fun() -> 5_000_000 end} ], fun test_get_read_range_before_strict/0, 30), ar_test_node:test_with_mocked_functions([ {ar_block, get_replica_2_9_entropy_sector_size, fun() -> 786432 end}, {ar_block, get_replica_2_9_entropy_partition_size, fun() -> 2359296 end}, {ar_block, strict_data_split_threshold, fun() -> 700_000 end} ], fun test_get_read_range_after_strict/0, 30) ]. test_get_read_range_before_strict() -> ?assertEqual({2359296, 4456447}, ar_replica_2_9:get_entropy_partition_range(1)), ?assertEqual(786432, ar_block:get_replica_2_9_entropy_sector_size()), %% no limit ?assertEqual( {2097151, 2883583, [2097152, 2359296, 2621440]}, get_read_range(2097152, 4_000_000, 3) ), %% sector limit ?assertEqual( {2359295, 3145727, [2359296, 2621440, 2883584]}, get_read_range(2359296, 4_000_000, 4) ), ?assertEqual( {3407871, 3932159, [3407872, 3670016]}, get_read_range(3407872, 4_000_000, 3) ), %% range end limit ?assertEqual( {2359295, 2700000, [2359296, 2621440]}, get_read_range(2359296, 2_700_000, 4) ), %% partition end limit ?assertEqual( {3932159, 4456447, [3932160, 4194304]}, get_read_range(3932160, 6_000_000, 3) ), ok. test_get_read_range_after_strict() -> ?assertEqual({2272865, 4370016}, ar_replica_2_9:get_entropy_partition_range(1)), ?assertEqual(786432, ar_block:get_replica_2_9_entropy_sector_size()), %% no limit ?assertEqual( {2272864, 3059296, [2359296, 2621440, 2883584]}, get_read_range(2359296, 4_000_000, 3) ), %% sector limit ?assertEqual( {2272864, 3059296, [2359296, 2621440, 2883584]}, get_read_range(2359296, 4_000_000, 4) ), ?assertEqual( {3321440, 3845728, [3407872, 3670016]}, get_read_range(3407872, 4_000_000, 3) ), %% range end limit ?assertEqual( {2272864, 2700000, [2359296, 2621440]}, get_read_range(2359296, 2_700_000, 4) ), %% partition end limit ?assertEqual( {3845728, 4370016, [3932160, 4194304]}, get_read_range(3932160, 6_000_000, 3) ), ok. ================================================ FILE: apps/arweave/src/ar_repack_fsm.erl ================================================ -module(ar_repack_fsm). -export([crank_state/1]). -include("ar.hrl"). -include("ar_repack.hrl"). -include_lib("eunit/include/eunit.hrl"). -moduledoc """ Maintain a finite state machine (FSM) to track the state each chunk passes through as it is repacked. State Transition Diagram: needs_chunk | +----> invalid ----------> entropy_only | +----> entropy_only | | | +----> write_entropy (terminal) | | | +----> ignore | +----> already_repacked -> ignore (terminal) | +----> needs_data_path --> has_chunk | +----> has_chunk | +----> write_chunk (terminal) | +----> needs_repack ---------------------------> has_chunk | +----> needs_source_entropy -> needs_decipher -> has_chunk | +----> needs_target_entropy -> needs_encipher -> has_chunk Start State: needs_chunk Terminal States: write_chunk, write_entropy, ignore State Descriptions: - needs_chunk: Initial state, waiting to read chunk data and metadata - entropy_only: Chunk is too small or not found, only entropy will be recorded - invalid: Chunk is corrupt or inconsistent, will be invalidated - already_repacked: Chunk is already in target format - needs_data_path: Chunk not found on disk, checking chunk data db - has_chunk: Chunk has been read, decide what to do next - needs_repack: Repack between non-replica_2_9 formats - needs_source_entropy: Waiting for source entropy to be calculated - needs_decipher: Waiting for chunk to be deciphered from replica_2_9 to unpacked_padded - needs_target_entropy: Waiting for target entropy to be calculated - needs_encipher: Waiting for chunk to be enciphered from unpacked_padded to replica_2_9 - write_chunk: Terminal state, chunk will be written - write_entropy: Terminal state, only entropy will be written - ignore: Terminal state, no action needed """. %% @doc: Repeatedly call next_state until the state no longer changes. -spec crank_state(#repack_chunk{}) -> #repack_chunk{}. crank_state(RepackChunk) -> crank_state(RepackChunk, next_state(RepackChunk)). crank_state(RepackChunk, RepackChunk) -> %% State did not change, return the final state RepackChunk; crank_state(_OldRepackChunk, NewRepackChunk) -> %% State has changed, continue cranking crank_state(NewRepackChunk, next_state(NewRepackChunk)). %% --------------------------------------------------------------------------- %% State: needs_chunk %% --------------------------------------------------------------------------- next_state( #repack_chunk{ state = needs_chunk, chunk = not_set, metadata = not_set } = RepackChunk) -> RepackChunk; next_state( #repack_chunk{ state = needs_chunk, chunk = not_found, metadata = not_found } = RepackChunk) -> %% Chunk is not recorded in any index. NextState = entropy_only, RepackChunk#repack_chunk{state = NextState}; next_state(#repack_chunk{ state = needs_chunk, metadata = Metadata } = RepackChunk) when Metadata == not_set orelse Metadata == not_found -> %% Metadata can not be empty unless chunk is also empty. log_error(invalid_repack_fsm_transition, RepackChunk, []), RepackChunk#repack_chunk{state = error}; next_state(#repack_chunk{state = needs_chunk} = RepackChunk) -> #repack_chunk{ offsets = Offsets, metadata = Metadata, chunk = Chunk, source_packing = SourcePacking, target_packing = TargetPacking } = RepackChunk, #chunk_metadata{ chunk_size = ChunkSize } = Metadata, #chunk_offsets{ absolute_offset = AbsoluteEndOffset } = Offsets, IsTooSmall = ( ChunkSize /= ?DATA_CHUNK_SIZE andalso AbsoluteEndOffset =< ar_block:strict_data_split_threshold() ), IsStorageSupported = ar_chunk_storage:is_storage_supported( AbsoluteEndOffset, ChunkSize, TargetPacking), NextState = case {IsTooSmall, SourcePacking, Chunk, IsStorageSupported} of {true, _, _, _} -> entropy_only; {_, not_found, _, _} -> %% This offset exists in some of the chunk indices, the chunk is not recorded %% in the sync record. This can happen if there was some corruption at some %% point in the past. We'll clean out the bad indices, and then record %% the entropy. invalid; {_, TargetPacking, _, _} -> already_repacked; {_, _, not_found, _} -> %% Chunk doesn't exist on disk, try chunk data db. needs_data_path; {_, _, _, false} -> %% We are going to move this chunk to RocksDB after repacking so %% we read its DataPath here to pass it later on to store_chunk. needs_data_path; _ -> has_chunk end, RepackChunk#repack_chunk{state = NextState}; %% --------------------------------------------------------------------------- %% State: needs_data_path %% --------------------------------------------------------------------------- next_state(#repack_chunk{ state = needs_data_path, metadata = #chunk_metadata{data_path = not_set}} = RepackChunk) -> %% Still waiting on data path. RepackChunk; next_state(#repack_chunk{state = needs_data_path} = RepackChunk) -> #repack_chunk{ chunk = Chunk, metadata = Metadata } = RepackChunk, #chunk_metadata{ data_path = DataPath } = Metadata, IsInvalid = ( Chunk == not_found orelse DataPath == not_found ), NextState = case IsInvalid of true -> %% This offset exists in some of the chunk indices and sync records, but there's %% either no chunk data or no data_path. This can happen if there was some %% corruption at some point in the past. We'll clean out the bad indices, and %% then record the entropy. invalid; _ -> has_chunk end, RepackChunk#repack_chunk{state = NextState}; %% --------------------------------------------------------------------------- %% State: has_chunk %% %% has_chunk is an intermediate state to avoid duplicating state transition %% logic across both the needs_chunk and needs_data_path states. Once a chunk %% and optionally data_path have been read, we'll transition to has_chunk and %% then from there enter the repack logic. %% --------------------------------------------------------------------------- next_state(#repack_chunk{state = has_chunk} = RepackChunk) -> #repack_chunk{ source_packing = SourcePacking, target_packing = TargetPacking } = RepackChunk, NextState = case {SourcePacking, TargetPacking} of _ when SourcePacking == TargetPacking -> write_chunk; {{replica_2_9, _}, _} -> %% Source is replica_2_9, so we need its entropy first before we can unpack it. needs_source_entropy; {unpacked_padded, {replica_2_9, _}} -> %% When source_packing is unpacked_padded it means that the chunk was originally %% some other format, but has now been repacked to unpacked_padded and is ready %% to be enciphered to the target_packing replica_2_9 format. Before we can do %% that we need to wait for the target entropy to be generated. needs_target_entropy; _ -> %% Source packing is either unpacked or spora_2_6, so the next step is to repack %% it. Whether we repack to unpacked_padded or to some other format depends on %% the current source and target packing. The logic to determine what to repack %% to is handled by ar_repack.erl. needs_repack end, RepackChunk#repack_chunk{state = NextState}; %% --------------------------------------------------------------------------- %% State: invalid %% --------------------------------------------------------------------------- next_state(#repack_chunk{state = invalid} = RepackChunk) -> #repack_chunk{ chunk = Chunk } = RepackChunk, NextState = case Chunk of invalid -> %% Chunk is already invalid, ready to write entropy. entropy_only; _ -> %% Offset has not yet been invalidated. invalid end, RepackChunk#repack_chunk{state = NextState}; %% --------------------------------------------------------------------------- %% State: entropy_only %% --------------------------------------------------------------------------- next_state(#repack_chunk{state = entropy_only} = RepackChunk) -> #repack_chunk{ target_packing = TargetPacking } = RepackChunk, NextState = case {has_all_entropy(RepackChunk), TargetPacking} of {false, _} -> %% Still waiting on entropy. entropy_only; {true, {replica_2_9, _}} -> %% We don't have a record of this chunk anywhere, so we'll record and %% index the entropy write_entropy; {true, _} -> %% We have a record of this chunk, so we'll ignore it. ignore end, RepackChunk#repack_chunk{state = NextState}; %% --------------------------------------------------------------------------- %% State: already_repacked %% --------------------------------------------------------------------------- next_state(#repack_chunk{state = already_repacked} = RepackChunk) -> NextState = case has_all_entropy(RepackChunk) of false -> %% Still waiting on entropy. already_repacked; true -> %% Repacked chunk already exists on disk so don't write anything %% (neither entropy nor chunk) ignore end, RepackChunk#repack_chunk{state = NextState}; %% --------------------------------------------------------------------------- %% State: needs_repack %% %% Source chunk will be repacked directly to: %% - TargetPacking if TargetPacking is not replica_2_9 %% - unpacked_padded if TargetPacking is replica_2_9 %% %% Note when TargetPacking is replica_2_9 we need to generate entropy and then %% encipher the chunk rather than doing a direct repack. %% %% When we detect one of those condition, transition to has_chunk which will %% determine what state to transition to next. %% --------------------------------------------------------------------------- next_state(#repack_chunk{state = needs_repack} = RepackChunk) -> #repack_chunk{ source_packing = SourcePacking, target_packing = TargetPacking } = RepackChunk, NextState = case {SourcePacking, TargetPacking} of {TargetPacking, _} -> has_chunk; {unpacked_padded, {replica_2_9, _}} -> has_chunk; _ -> %% Still waiting on repacking. needs_repack end, RepackChunk#repack_chunk{state = NextState}; %% --------------------------------------------------------------------------- %% State: needs_source_entropy %% --------------------------------------------------------------------------- next_state(#repack_chunk{state = needs_source_entropy} = RepackChunk) -> #repack_chunk{ source_entropy = SourceEntropy } = RepackChunk, NextState = case SourceEntropy of not_set -> %% Still waiting on entropy. needs_source_entropy; _ -> %% sanity checks true = SourceEntropy /= <<>>, %% end sanity checks needs_decipher end, RepackChunk#repack_chunk{state = NextState}; %% --------------------------------------------------------------------------- %% State: needs_target_entropy %% --------------------------------------------------------------------------- next_state(#repack_chunk{state = needs_target_entropy} = RepackChunk) -> #repack_chunk{ target_entropy = TargetEntropy } = RepackChunk, NextState = case TargetEntropy of not_set -> %% Still waiting on entropy. needs_target_entropy; _ -> %% We now have the unpacked_padded chunk and the entropy, proceed %% with enciphering and storing the chunk. %% sanity checks true = TargetEntropy /= <<>>, true = RepackChunk#repack_chunk.chunk /= not_found, true = RepackChunk#repack_chunk.chunk /= not_set, true = RepackChunk#repack_chunk.source_packing == unpacked_padded, %% end sanity checks needs_encipher end, RepackChunk#repack_chunk{state = NextState}; %% --------------------------------------------------------------------------- %% State: needs_decipher %% --------------------------------------------------------------------------- next_state(#repack_chunk{state = needs_decipher} = RepackChunk) -> #repack_chunk{ source_packing = SourcePacking } = RepackChunk, NextState = case SourcePacking of unpacked_padded -> has_chunk; _ -> %% Still waiting on deciphering. needs_decipher end, RepackChunk#repack_chunk{state = NextState}; %% --------------------------------------------------------------------------- %% State: needs_encipher %% --------------------------------------------------------------------------- next_state(#repack_chunk{state = needs_encipher} = RepackChunk) -> #repack_chunk{ source_packing = SourcePacking, target_packing = TargetPacking } = RepackChunk, %% sanity checks true = RepackChunk#repack_chunk.chunk /= not_found, true = RepackChunk#repack_chunk.chunk /= not_set, %% end sanity checks IsRepacked = SourcePacking == TargetPacking, NextState = case IsRepacked of true -> has_chunk; _ -> %% Still waiting on enciphering. needs_encipher end, RepackChunk#repack_chunk{state = NextState}; %% --------------------------------------------------------------------------- %% State: write_chunk %% --------------------------------------------------------------------------- next_state(#repack_chunk{state = write_chunk} = RepackChunk) -> %% write_chunk is a terminal state. RepackChunk; %% --------------------------------------------------------------------------- %% State: write_entropy %% --------------------------------------------------------------------------- next_state(#repack_chunk{state = write_entropy} = RepackChunk) -> %% write_entropy is a terminal state. RepackChunk; %% --------------------------------------------------------------------------- %% State: ignore %% --------------------------------------------------------------------------- next_state(#repack_chunk{state = ignore} = RepackChunk) -> %% ignore is a terminal state. RepackChunk; next_state(RepackChunk) -> log_error(invalid_repack_fsm_transition, RepackChunk, []), RepackChunk. has_all_entropy(RepackChunk) -> #repack_chunk{ source_entropy = SourceEntropy, target_entropy = TargetEntropy } = RepackChunk, SourceEntropy /= not_set andalso TargetEntropy /= not_set. log_error(Event, #repack_chunk{} = RepackChunk, ExtraLogs) -> ?LOG_ERROR(format_logs(Event, RepackChunk, ExtraLogs)). log_debug(Event, #repack_chunk{} = RepackChunk, ExtraLogs) -> ?LOG_DEBUG(format_logs(Event, RepackChunk, ExtraLogs)). format_logs(Event, #repack_chunk{} = RepackChunk, ExtraLogs) -> #repack_chunk{ offsets = Offsets, metadata = Metadata, state = ChunkState, source_entropy = SourceEntropy, target_entropy = TargetEntropy, source_packing = SourcePacking, target_packing = TargetPacking, chunk = Chunk } = RepackChunk, #chunk_offsets{ absolute_offset = AbsoluteOffset, padded_end_offset = PaddedEndOffset, bucket_end_offset = BucketEndOffset } = Offsets, {ChunkSize, DataPath} = case Metadata of #chunk_metadata{chunk_size = Size, data_path = Path} -> {Size, Path}; _ -> {not_set, not_set} end, [ {event, Event}, {state, ChunkState}, {bucket_end_offset, BucketEndOffset}, {absolute_offset, AbsoluteOffset}, {padded_end_offset, PaddedEndOffset}, {chunk_size, ChunkSize}, {source_packing, ar_serialize:encode_packing(SourcePacking, false)}, {target_packing, ar_serialize:encode_packing(TargetPacking, false)}, {chunk, atom_or_binary(Chunk)}, {source_entropy, atom_or_binary(SourceEntropy)}, {target_entropy, atom_or_binary(TargetEntropy)}, {data_path, atom_or_binary(DataPath)} | ExtraLogs ]. atom_or_binary(Atom) when is_atom(Atom) -> Atom; atom_or_binary(Bin) when is_binary(Bin) -> binary:part(Bin, {0, min(10, byte_size(Bin))}). %%%=================================================================== %%% Tests. %%%=================================================================== state_transition_test_() -> [ ar_test_node:test_with_mocked_functions([ {ar_block, strict_data_split_threshold, fun() -> 700_000 end} ], fun test_state_transitions/0, 30) ]. test_state_transitions() -> Addr1 = crypto:strong_rand_bytes(32), Addr2 = crypto:strong_rand_bytes(32), Chunk = crypto:strong_rand_bytes(?DATA_CHUNK_SIZE), Entropy1 = crypto:strong_rand_bytes(?DATA_CHUNK_SIZE), Entropy2 = crypto:strong_rand_bytes(?DATA_CHUNK_SIZE), %% --------------------------------------------------------------------------- %% needs_chunk %% --------------------------------------------------------------------------- ?assertEqual(needs_chunk, (next_state(#repack_chunk{ state = needs_chunk, chunk = not_set, metadata = not_set }))#repack_chunk.state), ?assertEqual(entropy_only, (next_state(#repack_chunk{ state = needs_chunk, chunk = not_found, metadata = not_found }))#repack_chunk.state), ?assertEqual(error, (next_state(#repack_chunk{ state = needs_chunk, chunk = Chunk, metadata = not_set, offsets = #chunk_offsets{} }))#repack_chunk.state), ?assertEqual(entropy_only, (next_state(#repack_chunk{ state = needs_chunk, chunk = Chunk, metadata = #chunk_metadata{chunk_size = 100}, offsets = #chunk_offsets{absolute_offset = 100}, source_packing = unpacked, target_packing = {replica_2_9, Addr1} }))#repack_chunk.state), ?assertEqual(invalid, (next_state(#repack_chunk{ state = needs_chunk, chunk = Chunk, metadata = #chunk_metadata{chunk_size = ?DATA_CHUNK_SIZE}, offsets = #chunk_offsets{absolute_offset = 1000000}, source_packing = not_found, target_packing = {replica_2_9, Addr1} }))#repack_chunk.state), ?assertEqual(already_repacked, (next_state(#repack_chunk{ state = needs_chunk, chunk = Chunk, metadata = #chunk_metadata{chunk_size = ?DATA_CHUNK_SIZE}, offsets = #chunk_offsets{absolute_offset = 1000000}, source_packing = {replica_2_9, Addr1}, target_packing = {replica_2_9, Addr1} }))#repack_chunk.state), ?assertEqual(needs_data_path, (next_state(#repack_chunk{ state = needs_chunk, chunk = not_found, metadata = #chunk_metadata{chunk_size = ?DATA_CHUNK_SIZE}, offsets = #chunk_offsets{absolute_offset = 1000000}, source_packing = unpacked, target_packing = {replica_2_9, Addr1} }))#repack_chunk.state), ?assertEqual(needs_data_path, (next_state(#repack_chunk{ state = needs_chunk, chunk = Chunk, metadata = #chunk_metadata{chunk_size = 100}, offsets = #chunk_offsets{absolute_offset = 1000000}, source_packing = {replica_2_9, Addr1}, target_packing = unpacked }))#repack_chunk.state), ?assertEqual(has_chunk, (next_state(#repack_chunk{ state = needs_chunk, chunk = Chunk, metadata = #chunk_metadata{chunk_size = ?DATA_CHUNK_SIZE}, offsets = #chunk_offsets{absolute_offset = 1000000}, source_packing = {replica_2_9, Addr2}, target_packing = {replica_2_9, Addr1} }))#repack_chunk.state), ?assertEqual(has_chunk, (next_state(#repack_chunk{ state = needs_chunk, chunk = Chunk, metadata = #chunk_metadata{chunk_size = ?DATA_CHUNK_SIZE}, offsets = #chunk_offsets{absolute_offset = 1000000}, source_packing = unpacked, target_packing = {replica_2_9, Addr1} }))#repack_chunk.state), ?assertEqual(has_chunk, (next_state(#repack_chunk{ state = needs_chunk, chunk = Chunk, metadata = #chunk_metadata{chunk_size = ?DATA_CHUNK_SIZE}, offsets = #chunk_offsets{absolute_offset = 1000000}, source_packing = {replica_2_9, Addr1}, target_packing = unpacked }))#repack_chunk.state), %% --------------------------------------------------------------------------- %% needs_data_path %% --------------------------------------------------------------------------- ?assertEqual(needs_data_path, (next_state(#repack_chunk{ state = needs_data_path, metadata = #chunk_metadata{data_path = not_set} }))#repack_chunk.state), ?assertEqual(invalid, (next_state(#repack_chunk{ state = needs_data_path, chunk = not_found, metadata = #chunk_metadata{data_path = <<"path">>} }))#repack_chunk.state), ?assertEqual(invalid, (next_state(#repack_chunk{ state = needs_data_path, chunk = Chunk, metadata = #chunk_metadata{data_path = not_found} }))#repack_chunk.state), ?assertEqual(has_chunk, (next_state(#repack_chunk{ state = needs_data_path, chunk = Chunk, metadata = #chunk_metadata{data_path = <<"path">>}, source_packing = {replica_2_9, Addr1}, target_packing = {replica_2_9, Addr1} }))#repack_chunk.state), ?assertEqual(has_chunk, (next_state(#repack_chunk{ state = needs_data_path, chunk = Chunk, metadata = #chunk_metadata{data_path = <<"path">>}, source_packing = {replica_2_9, Addr1}, target_packing = {replica_2_9, Addr2} }))#repack_chunk.state), ?assertEqual(has_chunk, (next_state(#repack_chunk{ state = needs_data_path, chunk = Chunk, metadata = #chunk_metadata{data_path = <<"path">>}, source_packing = unpacked, target_packing = {replica_2_9, Addr1} }))#repack_chunk.state), %% --------------------------------------------------------------------------- %% has_chunk %% --------------------------------------------------------------------------- ?assertEqual(write_chunk, (next_state(#repack_chunk{ state = has_chunk, source_packing = {replica_2_9, Addr1}, target_packing = {replica_2_9, Addr1} }))#repack_chunk.state), ?assertEqual(needs_source_entropy, (next_state(#repack_chunk{ state = has_chunk, source_packing = {replica_2_9, Addr1}, target_packing = {replica_2_9, Addr2} }))#repack_chunk.state), ?assertEqual(needs_source_entropy, (next_state(#repack_chunk{ state = has_chunk, source_packing = {replica_2_9, Addr1}, target_packing = unpacked }))#repack_chunk.state), ?assertEqual(needs_target_entropy, (next_state(#repack_chunk{ state = has_chunk, source_packing = unpacked_padded, target_packing = {replica_2_9, Addr1} }))#repack_chunk.state), ?assertEqual(needs_repack, (next_state(#repack_chunk{ state = has_chunk, source_packing = unpacked, target_packing = {replica_2_9, Addr1} }))#repack_chunk.state), ?assertEqual(needs_repack, (next_state(#repack_chunk{ state = has_chunk, source_packing = unpacked_padded, target_packing = unpacked }))#repack_chunk.state), ?assertEqual(needs_repack, (next_state(#repack_chunk{ state = has_chunk, source_packing = {spora_2_6, Addr1}, target_packing = {replica_2_9, Addr1} }))#repack_chunk.state), ?assertEqual(needs_repack, (next_state(#repack_chunk{ state = has_chunk, source_packing = {spora_2_6, Addr1}, target_packing = unpacked }))#repack_chunk.state), %% --------------------------------------------------------------------------- %% invalid %% --------------------------------------------------------------------------- ?assertEqual(entropy_only, (next_state(#repack_chunk{ state = invalid, chunk = invalid }))#repack_chunk.state), ?assertEqual(invalid, (next_state(#repack_chunk{ state = invalid, chunk = Chunk }))#repack_chunk.state), %% --------------------------------------------------------------------------- %% entropy_only %% --------------------------------------------------------------------------- ?assertEqual(entropy_only, (next_state(#repack_chunk{ state = entropy_only }))#repack_chunk.state), ?assertEqual(entropy_only, (next_state(#repack_chunk{ state = entropy_only, target_entropy = Entropy1 }))#repack_chunk.state), ?assertEqual(entropy_only, (next_state(#repack_chunk{ state = entropy_only, source_entropy = Entropy1 }))#repack_chunk.state), ?assertEqual(write_entropy, (next_state(#repack_chunk{ state = entropy_only, source_entropy = <<>>, target_entropy = Entropy1, target_packing = {replica_2_9, Addr1} }))#repack_chunk.state), ?assertEqual(ignore, (next_state(#repack_chunk{ state = entropy_only, source_entropy = Entropy1, target_entropy = <<>>, target_packing = unpacked }))#repack_chunk.state), %% --------------------------------------------------------------------------- %% already_repacked %% --------------------------------------------------------------------------- ?assertEqual(already_repacked, (next_state(#repack_chunk{ state = already_repacked, target_entropy = Entropy1 }))#repack_chunk.state), ?assertEqual(already_repacked, (next_state(#repack_chunk{ state = already_repacked, source_entropy = Entropy1 }))#repack_chunk.state), ?assertEqual(ignore, (next_state(#repack_chunk{ state = already_repacked, source_entropy = <<>>, target_entropy = Entropy2 }))#repack_chunk.state), %% --------------------------------------------------------------------------- %% needs_repack %% --------------------------------------------------------------------------- ?assertEqual(has_chunk, (next_state(#repack_chunk{ state = needs_repack, source_packing = {replica_2_9, Addr1}, target_packing = {replica_2_9, Addr1}, chunk = Chunk }))#repack_chunk.state), ?assertEqual(has_chunk, (next_state(#repack_chunk{ state = needs_repack, source_packing = unpacked, target_packing = unpacked, chunk = Chunk }))#repack_chunk.state), ?assertEqual(has_chunk, (next_state(#repack_chunk{ state = needs_repack, source_packing = unpacked_padded, target_packing = {replica_2_9, Addr1}, chunk = Chunk }))#repack_chunk.state), ?assertEqual(needs_repack, (next_state(#repack_chunk{ state = needs_repack, source_packing = unpacked, target_packing = {replica_2_9, Addr1}, chunk = Chunk }))#repack_chunk.state), ?assertEqual(needs_repack, (next_state(#repack_chunk{ state = needs_repack, source_packing = {spora_2_6, Addr1}, target_packing = unpacked, chunk = Chunk }))#repack_chunk.state), %% --------------------------------------------------------------------------- %% needs_source_entropy %% --------------------------------------------------------------------------- ?assertEqual(needs_source_entropy, (next_state(#repack_chunk{ state = needs_source_entropy, chunk = Chunk, source_packing = {replica_2_9, Addr1} }))#repack_chunk.state), ?assertEqual(needs_decipher, (next_state(#repack_chunk{ state = needs_source_entropy, chunk = Chunk, source_packing = {replica_2_9, Addr1}, source_entropy = Entropy1 }))#repack_chunk.state), %% --------------------------------------------------------------------------- %% needs_target_entropy %% --------------------------------------------------------------------------- ?assertEqual(needs_target_entropy, (next_state(#repack_chunk{ state = needs_target_entropy, target_entropy = not_set, chunk = Chunk, source_packing = unpacked_padded }))#repack_chunk.state), ?assertEqual(needs_encipher, (next_state(#repack_chunk{ state = needs_target_entropy, target_entropy = Entropy1, chunk = Chunk, source_packing = unpacked_padded }))#repack_chunk.state), %% --------------------------------------------------------------------------- %% needs_decipher %% --------------------------------------------------------------------------- ?assertEqual(has_chunk, (next_state(#repack_chunk{ state = needs_decipher, source_packing = unpacked_padded, target_packing = {replica_2_9, Addr1}, chunk = Chunk }))#repack_chunk.state), ?assertEqual(needs_decipher, (next_state(#repack_chunk{ state = needs_decipher, source_packing = {replica_2_9, Addr1}, target_packing = {replica_2_9, Addr2}, chunk = Chunk }))#repack_chunk.state), %% --------------------------------------------------------------------------- %% needs_encipher %% --------------------------------------------------------------------------- ?assertEqual(has_chunk, (next_state(#repack_chunk{ state = needs_encipher, source_packing = {replica_2_9, Addr1}, target_packing = {replica_2_9, Addr1}, chunk = Chunk }))#repack_chunk.state), ?assertEqual(needs_encipher, (next_state(#repack_chunk{ state = needs_encipher, source_packing = unpacked_padded, target_packing = {replica_2_9, Addr1}, chunk = Chunk }))#repack_chunk.state), %% --------------------------------------------------------------------------- %% write_chunk %% --------------------------------------------------------------------------- ?assertEqual(write_chunk, (next_state(#repack_chunk{ state = write_chunk, chunk = Chunk }))#repack_chunk.state), %% --------------------------------------------------------------------------- %% write_entropy %% --------------------------------------------------------------------------- ?assertEqual(write_entropy, (next_state(#repack_chunk{ state = write_entropy, target_entropy = Entropy1 }))#repack_chunk.state), %% --------------------------------------------------------------------------- %% ignore %% --------------------------------------------------------------------------- ?assertEqual(ignore, (next_state(#repack_chunk{ state = ignore, chunk = Chunk }))#repack_chunk.state), ok. ================================================ FILE: apps/arweave/src/ar_repack_io.erl ================================================ -module(ar_repack_io). -behaviour(gen_server). -export([name/1, read_footprint/4, write_queue/3]). -export([start_link/2, init/1, handle_cast/2, handle_call/3, handle_info/2, terminate/2]). -include("ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include("ar_repack.hrl"). -include_lib("eunit/include/eunit.hrl"). -moduledoc """ This module handles disk IO for the repack-in-place process. """. -record(state, { store_id = undefined, read_batch_size = ?DEFAULT_REPACK_BATCH_SIZE }). %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Start the server. start_link(Name, StoreID) -> gen_server:start_link({local, Name}, ?MODULE, StoreID, []). %% @doc Return the name of the server serving the given StoreID. name(StoreID) -> list_to_atom("ar_repack_io_" ++ ar_storage_module:label(StoreID)). init(StoreID) -> {ok, Config} = arweave_config:get_env(), ReadBatchSize = Config#config.repack_batch_size, State = #state{ store_id = StoreID, read_batch_size = ReadBatchSize }, log_info(ar_repack_io_init, State, [ {name, name(StoreID)}, {read_batch_size, ReadBatchSize} ]), {ok, State}. %% @doc Read all the chunks covered by the given footprint. %% The footprint covers: %% - A list of offsets determined by the replica.2.9 entropy footprint pattern. %% - A set of consecutive chunks following each offset. The number of consecutive chunks %% read for each footprint offset is determined by the repack_batch_size config. -spec read_footprint( [non_neg_integer()], non_neg_integer(), non_neg_integer(), ar_storage_module:store_id()) -> ok. read_footprint(FootprintOffsets, FootprintStart, FootprintEnd, StoreID) -> gen_server:cast(name(StoreID), {read_footprint, FootprintOffsets, FootprintStart, FootprintEnd}). write_queue(WriteQueue, Packing, StoreID) -> gen_server:cast(name(StoreID), {write_queue, WriteQueue, Packing}). %%%=================================================================== %%% Gen server callbacks. %%%=================================================================== handle_call(Request, _From, #state{} = State) -> ?LOG_WARNING([{event, unhandled_call}, {module, ?MODULE}, {request, Request}]), {reply, ok, State}. handle_cast( {read_footprint, FootprintOffsets, FootprintStart, FootprintEnd}, #state{} = State) -> do_read_footprint(FootprintOffsets, FootprintStart, FootprintEnd, State), {noreply, State}; handle_cast({write_queue, WriteQueue, Packing}, #state{} = State) -> process_write_queue(WriteQueue, Packing, State), {noreply, State}; handle_cast(Request, #state{} = State) -> ?LOG_WARNING([{event, unhandled_cast}, {module, ?MODULE}, {request, Request}]), {noreply, State}. handle_info(Request, #state{} = State) -> ?LOG_WARNING([{event, unhandled_info}, {module, ?MODULE}, {request, Request}]), {noreply, State}. terminate(Reason, #state{} = State) -> log_debug(terminate, State, [ {module, ?MODULE}, {reason, ar_util:safe_format(Reason)} ]). %%%=================================================================== %%% Private functions. %%%=================================================================== do_read_footprint([], _FootprintStart, _FootprintEnd, #state{}) -> ok; do_read_footprint([ BucketEndOffset | FootprintOffsets], FootprintStart, FootprintEnd, #state{} = State) when BucketEndOffset < FootprintStart -> %% Advance until we hit a chunk covered by the current storage module do_read_footprint(FootprintOffsets, FootprintStart, FootprintEnd, State); do_read_footprint( [BucketEndOffset | _FootprintOffsets], _FootprintStart, FootprintEnd, #state{} ) when BucketEndOffset > FootprintEnd -> ok; do_read_footprint( [BucketEndOffset | FootprintOffsets], FootprintStart, FootprintEnd, #state{} = State) -> #state{ store_id = StoreID, read_batch_size = ReadBatchSize } = State, StartTime = erlang:monotonic_time(), {ReadRangeStart, ReadRangeEnd, _ReadRangeOffsets} = ar_repack:get_read_range( BucketEndOffset, FootprintEnd, ReadBatchSize), ReadRangeSizeInBytes = ReadRangeEnd - ReadRangeStart, OffsetChunkMap = case catch ar_chunk_storage:get_range(ReadRangeStart, ReadRangeSizeInBytes, StoreID) of [] -> #{}; {'EXIT', _Exc} -> log_error(failed_to_read_chunk_range, State, [ {read_range_start, ReadRangeStart}, {read_range_end, ReadRangeEnd}, {read_range_size_bytes, ReadRangeSizeInBytes} ]), #{}; Range -> maps:from_list(Range) end, OffsetMetadataMap = case ar_data_sync:get_chunk_metadata_range(ReadRangeStart+1, ReadRangeEnd, StoreID) of {ok, MetadataMap} -> MetadataMap; {error, invalid_iterator} -> #{}; {error, Reason} -> log_warning(failed_to_read_chunk_metadata, State, [ {read_range_start, ReadRangeStart}, {read_range_end, ReadRangeEnd}, {reason, Reason} ]), #{} end, ChunkReadSizeInBytes = maps:fold( fun(_Key, Value, Acc) -> Acc + byte_size(Value) end, 0, OffsetChunkMap ), ar_metrics:record_rate_metric( StartTime, ChunkReadSizeInBytes, chunk_read_rate_bytes_per_second, [ar_storage_module:label(StoreID), repack]), EndTime = erlang:monotonic_time(), ElapsedTime = max(1, erlang:convert_time_unit(EndTime - StartTime, native, millisecond)), log_debug(read_footprint, State, [ {bucket_end_offset, BucketEndOffset}, {read_range_start, ReadRangeStart}, {read_range_end, ReadRangeEnd}, {read_range_size_bytes, ReadRangeSizeInBytes}, {chunk_read_size_bytes, ChunkReadSizeInBytes}, {chunks_read, maps:size(OffsetChunkMap)}, {metadata_read, maps:size(OffsetMetadataMap)}, {footprint_start, FootprintStart}, {footprint_end, FootprintEnd}, {remaining_offsets, length(FootprintOffsets)}, {time_taken, ElapsedTime}, {rate, (ChunkReadSizeInBytes / ?MiB / ElapsedTime) * 1000} ]), ar_repack:chunk_range_read( BucketEndOffset, OffsetChunkMap, OffsetMetadataMap, State#state.store_id), read_footprint(FootprintOffsets, FootprintStart, FootprintEnd, StoreID). process_write_queue(WriteQueue, Packing, #state{} = State) -> #state{ store_id = StoreID } = State, StartTime = erlang:monotonic_time(), gb_sets:fold( fun({_BucketEndOffset, RepackChunk}, _) -> write_repack_chunk(RepackChunk, Packing, State) end, ok, WriteQueue ), ar_metrics:record_rate_metric( StartTime, gb_sets:size(WriteQueue) * ?DATA_CHUNK_SIZE, chunk_write_rate_bytes_per_second, [ar_storage_module:label(StoreID), repack]), EndTime = erlang:monotonic_time(), ElapsedTime = max(1, erlang:convert_time_unit(EndTime - StartTime, native, millisecond)), log_debug(process_write_queue, State, [ {write_queue_size, gb_sets:size(WriteQueue)}, {time_taken, ElapsedTime}, {rate, (gb_sets:size(WriteQueue) / 4 / ElapsedTime) * 1000} ]). write_repack_chunk(RepackChunk, Packing, #state{} = State) -> #state{ store_id = StoreID } = State, case RepackChunk#repack_chunk.state of write_entropy -> {replica_2_9, RewardAddr} = Packing, Entropy = RepackChunk#repack_chunk.target_entropy, BucketEndOffset = RepackChunk#repack_chunk.offsets#chunk_offsets.bucket_end_offset, ar_entropy_storage:store_entropy(Entropy, BucketEndOffset, StoreID, RewardAddr); write_chunk -> write_chunk(RepackChunk, Packing, State); _ -> log_error(unexpected_chunk_state, State, format_logs(RepackChunk)) end. write_chunk(RepackChunk, TargetPacking, #state{} = State) -> #state{ store_id = StoreID } = State, #repack_chunk{ offsets = Offsets, metadata = Metadata, chunk = Chunk } = RepackChunk, #chunk_offsets{ absolute_offset = AbsoluteOffset } = Offsets, IsBlacklisted = ar_tx_blacklist:is_byte_blacklisted(AbsoluteOffset), case remove_from_sync_record(Offsets, StoreID) of ok when IsBlacklisted == true -> ok; ok when IsBlacklisted == false -> WriteResult = ar_data_sync:write_chunk( AbsoluteOffset, Metadata, Chunk, TargetPacking, StoreID), case WriteResult of {ok, TargetPacking} -> add_to_sync_record(Offsets, Metadata, TargetPacking, StoreID); {ok, WrongPacking} -> %% This shouldn't ever happen - the only time write_chunk should change %% the packing is when writing to unpacked_padded. log_error(repacked_chunk_stored_with_wrong_packing, State, [ {requested_packing, ar_serialize:encode_packing(TargetPacking, true)}, {stored_packing, ar_serialize:encode_packing(WrongPacking, true)} ]); Error -> log_error(failed_to_store_repacked_chunk, State, [ {requested_packing, ar_serialize:encode_packing(TargetPacking, true)}, {error, io_lib:format("~p", [Error])} ]) end; Error -> log_error(failed_to_remove_from_sync_record, State, [ {error, io_lib:format("~p", [Error])} ]) end. remove_from_sync_record(Offsets, StoreID) -> #chunk_offsets{ padded_end_offset = PaddedEndOffset } = Offsets, StartOffset = PaddedEndOffset - ?DATA_CHUNK_SIZE, DeleteEntropyRecord = ar_entropy_storage:delete_record(PaddedEndOffset, StoreID), DeleteFootprint = case DeleteEntropyRecord of ok -> ar_footprint_record:delete(PaddedEndOffset, StoreID); Error -> Error end, DeleteSyncRecord = case DeleteFootprint of ok -> ar_sync_record:delete(PaddedEndOffset, StartOffset, ar_data_sync, StoreID); Error2 -> Error2 end, case DeleteSyncRecord of ok -> ar_sync_record:delete(PaddedEndOffset, StartOffset, ar_chunk_storage, StoreID); Error3 -> Error3 end. add_to_sync_record(Offsets, Metadata, Packing, StoreID) -> #chunk_offsets{ padded_end_offset = PaddedEndOffset, bucket_end_offset = BucketEndOffset } = Offsets, #chunk_metadata{ chunk_size = ChunkSize } = Metadata, StartOffset = PaddedEndOffset - ?DATA_CHUNK_SIZE, ar_sync_record:add(PaddedEndOffset, StartOffset, Packing, ar_data_sync, StoreID), case ar_data_sync:is_footprint_record_supported(PaddedEndOffset, ChunkSize, Packing) of true -> ar_footprint_record:add(PaddedEndOffset, Packing, StoreID); false -> ok end, IsStorageSupported = ar_chunk_storage:is_storage_supported(PaddedEndOffset, ChunkSize, Packing), IsReplica29 = case Packing of {replica_2_9, _} -> true; _ -> false end, case IsStorageSupported andalso IsReplica29 of true -> ar_entropy_storage:add_record(BucketEndOffset, Packing, StoreID); _ -> ok end. log_error(Event, #state{} = State, ExtraLogs) -> ?LOG_ERROR(format_logs(Event, State, ExtraLogs)). log_warning(Event, #state{} = State, ExtraLogs) -> ?LOG_WARNING(format_logs(Event, State, ExtraLogs)). log_info(Event, #state{} = State, ExtraLogs) -> ?LOG_INFO(format_logs(Event, State, ExtraLogs)). log_debug(Event, #state{} = State, ExtraLogs) -> ?LOG_DEBUG(format_logs(Event, State, ExtraLogs)). format_logs(Event, #state{} = State, ExtraLogs) -> [ {event, Event}, {tags, [repack_in_place, ar_repack_io]}, {pid, self()}, {store_id, State#state.store_id} | ExtraLogs ]. format_logs(#repack_chunk{} = RepackChunk) -> #repack_chunk{ state = ChunkState, offsets = Offsets, metadata = Metadata, chunk = Chunk, source_packing = SourcePacking, target_packing = TargetPacking, target_entropy = TargetEntropy, source_entropy = SourceEntropy } = RepackChunk, #chunk_offsets{ absolute_offset = AbsoluteOffset, bucket_end_offset = BucketEndOffset, padded_end_offset = PaddedEndOffset } = Offsets, ChunkSize = case Metadata of #chunk_metadata{chunk_size = Size} -> Size; _ -> Metadata end, [ {state, ChunkState}, {bucket_end_offset, BucketEndOffset}, {absolute_offset, AbsoluteOffset}, {padded_end_offset, PaddedEndOffset}, {chunk_size, ChunkSize}, {chunk, atom_or_binary(Chunk)}, {source_packing, ar_serialize:encode_packing(SourcePacking, false)}, {target_packing, ar_serialize:encode_packing(TargetPacking, false)}, {source_entropy, atom_or_binary(SourceEntropy)}, {target_entropy, atom_or_binary(TargetEntropy)} ]. atom_or_binary(Atom) when is_atom(Atom) -> Atom; atom_or_binary(Bin) when is_binary(Bin) -> binary:part(Bin, {0, min(10, byte_size(Bin))}). ================================================ FILE: apps/arweave/src/ar_replica_2_9.erl ================================================ -module(ar_replica_2_9). -export([get_entropy_partition/1, get_entropy_partition_range/1, get_entropy_key/3, get_slice_index/1, get_partition_offset/1, get_entropy_index/2]). -include("ar.hrl"). -include("ar_consensus.hrl"). -include_lib("eunit/include/eunit.hrl"). -moduledoc """ This module handles mapping the 2.9 replica entropy to chunks and sub-chunks. Here's a break down of how entropy is mapped to sub-chunks. 1. Iterate through each chunk's (e.g. chunk0) sub-chunks (e.g. s0, s1) assigning each one to a different entropy. This ensures that all contiguous sub-chunks are assigned to different entropies, maximizing the amount of work that an on-demand miner needs to do to pack and mine a contiguous recall range. chunk0 chunk1 +-----------------------------+ +-----------------------------+ | s0 | s1 | s2 | ... | s31 | | s0 | s1 | s2 | ... | s31 | +-----------------------------+ +-----------------------------+ v v v v v v v v entropy index: e0 e1 e2 e31 e32 e33 e33 e63 2. Each 8 MiB entropy contains 1024 8 KiB slices. To finish packing the sub-chunks we will encipher them with the appropriate slice. A sub-chunk's slice index is determined by its *chunk* - each sub-chunk in a chunk is assigned to a different *entropy* but has the same *slice index*. A slice index and sector index are the same but are just used in difference contexts (e.g. slices divide up entropy, sectors divide up the partition). A chunk in sector 0 of the partition is enciphered with slice index 0 from its entropies. sector0 sector1 sector2 sector1023 sector0 sector1 chunk0 c12413 c26825 cXXXXXX chunk1 c12414 +-------++-------++-------+ +-------+ +-------++-------+ | | | | || | | | || | | | | ... | | | | | | | | | || | | | | +-------++-------++-------+ +-------+ +-------++-------+ | | | | | | +-----------------------------------------------+ +--------------------------+ e0: | slice0 | slice1 | slice2 | ...... | slice1023 | e32: | slice0 | slice1 | ...... +-----------------------------------------------+ +--------------------------+ | | | | | | +-----------------------------------------------+ +--------------------------+ e1: | slice0 | slice1 | slice2 | ...... | slice1023 | e33: | slice0 | slice1 | ...... +-----------------------------------------------+ +--------------------------+ | | | | | | +-----------------------------------------------+ +--------------------------+ e2: | slice0 | slice1 | slice2 | ...... | slice1023 | e34: | slice0 | slice1 | ...... +-----------------------------------------------+ +--------------------------+ ... | | | | | | +-----------------------------------------------+ +--------------------------+ e31: | slice0 | slice1 | slice2 | ...... | slice1023 | e63: | slice0 | slice1 | ...... +-----------------------------------------------+ +--------------------------+ | | | | | | v v v v v v Glossary: entropy: An 8 MiB (?REPLICA_2_9_ENTROPY_SIZE) block of entropy that contains the entropy for 1024 sub-chunks (?REPLICA_2_9_ENTROPY_SIZE div ?COMPOSITE_PACKING_SUB_CHUNK_SIZE. slice: The 8192 byte (?COMPOSITE_PACKING_SUB_CHUNK_SIZE) range of an 'entropy' that will be enciphered with a sub-chunk when packing to the replica_2_9 format. entropy partition: contains all the entropies needed to encipher all the chunks in a recall partition. A recall partition is 3.6 TB (ar_block:partition_size()), but an entropy partition is slightly larger since enciphering a chunk (256 KiB) requires slices from 32 different entropies (256 MiB). Some of the entropies in a partition can be reused by neighboring recall partitions. entropy index: The index of an entropy within an entropy partition. All of a chunk's sub-chunks have a different entropy index. slice index: the index of a slice within an entropy. All of a chunk's sub-chunks have the same slice index. sector: Each slice of an entropy is distributed to a different sector such that consecutive slices map to chunks that are as far as possible from each other within a partition. With an entropy size of 8_388_608 bytes and a slice size of 8_192 bytes, there are 1024 slices per entropy, which yields 1024 sectors per partition. """. %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Return the 2.9 partition number the chunk with the given absolute end offset is %% mapped to. This partition number is a part of the 2.9 replication key. It is NOT %% the same as the ar_block:partition_size() (3.6 TB) recall partition. -spec get_entropy_partition( AbsoluteChunkEndOffset :: non_neg_integer() ) -> non_neg_integer(). get_entropy_partition(AbsoluteChunkEndOffset) -> BucketStart = get_entropy_bucket_start(AbsoluteChunkEndOffset), ar_node:get_partition_number(BucketStart). get_entropy_partition_range(PartitionNumber) -> %% The goal of this function is to return the minimum and maximum byte offsets that, when %% fed to ar_replica_2_9:get_entropy_partition/1 will yield the provided PartitinNumber. %% %% To do this we do a rough reversal of the steps taken by %% ar_replica_2_9:get_entropy_partition/1: %% %% get_entropy_partition(AbsoluteChunkEndOffset) -> %% BucketStart = get_entropy_bucket_start(AbsoluteChunkEndOffset), %% ar_node:get_partition_number(BucketStart). %% %% I say "rough reverseal" because several of the steps are not reversible (e.g. %% ar_util:floor_int/2 discards data and so it not perfectly reversible). %% %% 1. Reverse ar_node:get_partition_number(BucketStart) to get the pick offsets %% representing the byte boundaries of the recall partition. StartRecall = PartitionNumber * ar_block:partition_size(), EndRecall = (PartitionNumber + 1) * ar_block:partition_size(), %% 2. The next 3 steps reverse ar_replica_2_9:get_entropy_bucket_start/1 to yield the %% first and last bytes of the entropy partition. %% %% Get the first bucket boundary greater than the recall boundaries. This represents %% the bucket end offset of the bucket which contains the first/last byte of the %% recall partition. %% %% Note: by passing 0 into get_padded_offset/2 we ignore the strict data split %% threshold and focus on just finding the nearest 256 KiB aligned boundary greater %% than the recall boundaries. StartBucket1 = ar_poa:get_padded_offset(StartRecall, 0), EndBucket1 = ar_poa:get_padded_offset(EndRecall, 0), %% 3. ar_replica_2_9:get_entropy_partition/1 allocates this straddling bucket to the %% previous partition. So the start of the entropy partition is the first byte which %% falls in the *next* bucket, and the end of the entropy partition is the last byte %% which falls in *this* bucket. To get those bytes we'll advance to the next bucket... StartBucket2 = StartBucket1 + ?DATA_CHUNK_SIZE, EndBucket2 = EndBucket1 + ?DATA_CHUNK_SIZE, %% 4. ... and then get the first byte which falls in that bucket StartByte1 = ar_chunk_storage:get_chunk_byte_from_bucket_end(StartBucket2) + 1, EndByte1 = ar_chunk_storage:get_chunk_byte_from_bucket_end(EndBucket2), %% 5. Handle the special case of partition 0. Since it has no preceding partition its %% byte start is 0. StartByte2 = case PartitionNumber of 0 -> 0; _ -> StartByte1 end, {StartByte2, EndByte1}. %% @doc Return the key used to generate the entropy for the 2.9 replication format. %% RewardAddr: The address of the miner that mined the chunk. %% AbsoluteEndOffset: The absolute end offset of the chunk. %% SubChunkStartOffset: The start offset of the sub-chunk within the chunk. 0 is the first %% sub-chunk of the chunk, (?DATA_CHUNK_SIZE - ?COMPOSITE_PACKING_SUB_CHUNK_SIZE) is the %% last sub-chunk of the chunk. -spec get_entropy_key( RewardAddr :: binary(), AbsoluteEndOffset :: non_neg_integer(), SubChunkStartOffset :: non_neg_integer() ) -> binary(). get_entropy_key(RewardAddr, AbsoluteEndOffset, SubChunkStartOffset) -> Partition = get_entropy_partition(AbsoluteEndOffset), %% We use the key to generate a large entropy shared by many chunks. EntropyIndex = get_entropy_index(AbsoluteEndOffset, SubChunkStartOffset), crypto:hash(sha256, << Partition:256, EntropyIndex:256, RewardAddr/binary >>). %% @doc Return the 0-based index indicating which area within a 2.9 entropy the %% given sub-chunk is mapped to (aka slice index). Sub-chunks of the same chunk are mapped to %% different entropies but all use the same slice index. -spec get_slice_index( AbsoluteChunkEndOffset :: non_neg_integer() ) -> non_neg_integer(). get_slice_index(AbsoluteChunkEndOffset) -> PartitionRelativeOffset = get_partition_offset(AbsoluteChunkEndOffset), SectorSize = ar_block:get_replica_2_9_entropy_sector_size(), (PartitionRelativeOffset div SectorSize) rem ar_block:get_sub_chunks_per_replica_2_9_entropy(). %%%=================================================================== %%% Private functions. %%%=================================================================== %% @doc Return the start offset of the bucket containing the given chunk offset. %% A chunk bucket is a 0-based, 256-KiB wide, 256-KiB aligned range. A chunk belongs to %% the bucket that contains the first byte of the chunk. -spec get_entropy_bucket_start(non_neg_integer()) -> non_neg_integer(). get_entropy_bucket_start(AbsoluteChunkEndOffset) -> PaddedEndOffset = ar_block:get_chunk_padded_offset(AbsoluteChunkEndOffset), PickOffset = max(0, PaddedEndOffset - ?DATA_CHUNK_SIZE), BucketStart = ar_util:floor_int(PickOffset, ?DATA_CHUNK_SIZE), true = BucketStart == ar_chunk_storage:get_chunk_bucket_start(PaddedEndOffset), BucketStart. %% @doc Return the offset of the chunk within its partition. -spec get_partition_offset(AbsoluteChunkEndOffset :: non_neg_integer()) -> non_neg_integer(). get_partition_offset(AbsoluteChunkEndOffset) -> BucketStart = get_entropy_bucket_start(AbsoluteChunkEndOffset), Partition = get_entropy_partition(AbsoluteChunkEndOffset), PartitionStart = Partition * ar_block:partition_size(), BucketStart - PartitionStart. %% @doc Returns the index of the entropy containing the slice for specified chunk's sub-chunk. %% An entropy index is 0-based index used to identify a specific entropy within an entropy %% partition. It is not unique - the same index will refer to different entropies in different %% partitions and for different mining addresses. For a unique entropy identifier see %% get_entropy_key/3. %% %% The entropy index is for the 2.9 replication format. -spec get_entropy_index( AbsoluteChunkEndOffset :: non_neg_integer(), SubChunkStartOffset :: non_neg_integer() ) -> non_neg_integer(). get_entropy_index(AbsoluteChunkEndOffset, SubChunkStartOffset) -> %% Assert that SubChunkStartOffset is less than ?DATA_CHUNK_SIZE true = SubChunkStartOffset < ?DATA_CHUNK_SIZE, PartitionRelativeOffset = get_partition_offset(AbsoluteChunkEndOffset), SectorSize = ar_block:get_replica_2_9_entropy_sector_size(), %% Index of this chunk into the sector (i.e. how many chunks into the sector it falls) ChunkBucket = (PartitionRelativeOffset rem SectorSize) div ?DATA_CHUNK_SIZE, %% Index of this sub-chunk into the chunk (i.e. how many sub-chunks into the chunk it %% falls) SubChunkBucket = SubChunkStartOffset div ?COMPOSITE_PACKING_SUB_CHUNK_SIZE, ChunkBucket * ?COMPOSITE_PACKING_SUB_CHUNK_COUNT + SubChunkBucket. %%%=================================================================== %%% Tests. %%%=================================================================== get_entropy_key_test_() -> ar_test_node:test_with_mocked_functions([ {ar_block, partition_size, fun() -> 2_000_000 end}, {ar_block, get_replica_2_9_entropy_sector_size, fun() -> 786432 end}, {ar_block, get_replica_2_9_entropy_partition_size, fun() -> 2359296 end}, {ar_block, get_sub_chunks_per_replica_2_9_entropy, fun() -> 3 end} ], fun test_get_entropy_key/0, 30). test_get_entropy_key() -> SubChunkSize = ?COMPOSITE_PACKING_SUB_CHUNK_SIZE, SectorSize = ar_block:get_replica_2_9_entropy_sector_size(), EntropyPartitionSize = ar_block:get_replica_2_9_entropy_partition_size(), Addr = << 0:256 >>, ?assertEqual(32, ?COMPOSITE_PACKING_SUB_CHUNK_COUNT), ?assertEqual(0, get_entropy_index(1, 0)), EntropyKey = ar_util:encode(get_entropy_key(Addr, 1, 0)), ?assertEqual(EntropyKey, ar_util:encode(get_entropy_key(Addr, 1, 0))), ?assertEqual(EntropyKey, ar_util:encode(get_entropy_key(Addr, 262144, 0))), %% The strict data split threshold in tests is 262144 * 3. Before the strict data %% split threshold, the mapping works such that the chunk end offset up to but excluding %% the bucket border is mapped to the previous bucket. ?assertEqual(EntropyKey, ar_util:encode(get_entropy_key(Addr, 262144 * 2 - 1, 0))), EntropyKey2 = ar_util:encode(get_entropy_key(Addr, 262144 * 2, 0)), ?assertNotEqual(EntropyKey, EntropyKey2), ?assertEqual(EntropyKey2, ar_util:encode(get_entropy_key(Addr, 262144 * 3 - 1, 0))), EntropyKey3 = ar_util:encode(get_entropy_key(Addr, 262144 * 3, 0)), ?assertNotEqual(EntropyKey2, EntropyKey3), EntropyKey4 = ar_util:encode(get_entropy_key(Addr, 262144 * 3 + 1, 0)), %% 262144 * 3 is the strict data split threshold so chunks ending after it are mapped %% to the first bucket after the threshold so the key does not equal the one of the %% chunk ending exactly at the threshold which is still mapped to the previous bucket. ?assertNotEqual(EntropyKey3, EntropyKey4), ?assertEqual(EntropyKey4, ar_util:encode(get_entropy_key(Addr, 262144 * 4 - 1, 0))), ?assertEqual(EntropyKey4, ar_util:encode(get_entropy_key(Addr, 262144 * 4, 0))), %% The mapping then goes this way indefinitely. EntropyKey5 = ar_util:encode(get_entropy_key(Addr, 262144 * 5, 0)), ?assertNotEqual(EntropyKey4, EntropyKey5), %% Shift by sector size. ?assertEqual(EntropyKey4, ar_util:encode(get_entropy_key(Addr, 262144 * 3 + 1 + SectorSize, 0))), ?assertEqual(EntropyKey4, ar_util:encode(get_entropy_key(Addr, 262144 * 4 + SectorSize, 0))), ?assertEqual(EntropyKey5, ar_util:encode(get_entropy_key(Addr, 262144 * 4 + 1 + SectorSize, 0))), ?assertEqual(EntropyKey5, ar_util:encode(get_entropy_key(Addr, 262144 * 5 + SectorSize, 0))), %% Exactly equal to the recall partition size: ?assertEqual(0, get_entropy_partition(262144 * 5 + SectorSize)), %% One greater than the recall partition size: ?assertEqual(1, get_entropy_partition(262144 * 5 + SectorSize + 1)), %% Greater than the entropy partition size (shouldn't matter since we map chunks %% based on recall partition size) ?assertEqual(1, get_entropy_partition(262144 * 6 + SectorSize + 1)), %% The new partition => the new entropy. EntropyKey6 = ar_util:encode(get_entropy_key(Addr, 262144 * 5 + 2 * SectorSize, 0)), ?assertNotEqual(EntropyKey6, EntropyKey5), %% There is, of course, regularity within every partition. ?assertEqual(EntropyKey6, ar_util:encode(get_entropy_key(Addr, 262144 * 5 + 3 * SectorSize, 0))), %% Test the edges of recall partition vs. entropy partition. ?assertEqual(0, get_entropy_partition(ar_block:partition_size())), ?assertEqual(1, get_entropy_partition(EntropyPartitionSize)), ?assertEqual(1, get_entropy_partition(2 * ar_block:partition_size())), ?assertEqual(2, get_entropy_partition(ar_block:partition_size() + EntropyPartitionSize)), ?assertEqual(2, get_entropy_partition(3 * ar_block:partition_size())), ?assertEqual(3, get_entropy_partition(2 * ar_block:partition_size() + EntropyPartitionSize)), ?assertEqual(10, get_entropy_partition(11 * ar_block:partition_size())), ?assertEqual(11, get_entropy_partition(10 * ar_block:partition_size() + EntropyPartitionSize)), %% This sub-chunk offset isn't used in practice, just adding a bounds check. ?assertMatch( {'EXIT', {{badmatch, false}, _}}, catch get_entropy_index(0, 32 * SubChunkSize)). get_entropy_partition_range_test_() -> [ ar_test_node:test_with_mocked_functions([ {ar_block, strict_data_split_threshold, fun() -> 700_000 end} ], fun test_get_entropy_partition_range_after_strict/0, 30), ar_test_node:test_with_mocked_functions([ {ar_block, strict_data_split_threshold, fun() -> 5_000_000 end} ], fun test_get_entropy_partition_range_before_strict/0, 30) ]. test_get_entropy_partition_range_after_strict() -> Start0 = 0, End0 = 2272864, ?assertEqual(0, get_entropy_partition(Start0)), ?assertEqual(0, get_entropy_partition(End0)), ?assertEqual({Start0, End0}, get_entropy_partition_range(0)), Start1 = 2272865, End1 = 4370016, ?assertEqual(1, get_entropy_partition(Start1)), ?assertEqual(1, get_entropy_partition(End1)), ?assertEqual({Start1, End1}, get_entropy_partition_range(1)), Start2 = 4370017, End2 = 6205024, ?assertEqual(2, get_entropy_partition(Start2)), ?assertEqual(2, get_entropy_partition(End2)), ?assertEqual({Start2, End2}, get_entropy_partition_range(2)), ok. test_get_entropy_partition_range_before_strict() -> Start0 = 0, End0 = 2359295, ?assertEqual(0, get_entropy_partition(Start0)), ?assertEqual(0, get_entropy_partition(End0)), ?assertEqual({Start0, End0}, get_entropy_partition_range(0)), Start1 = 2359296, End1 = 4456447, ?assertEqual(1, get_entropy_partition(Start1)), ?assertEqual(1, get_entropy_partition(End1)), ?assertEqual({Start1, End1}, get_entropy_partition_range(1)), Start2 = 4456448, End2 = 6048576, ?assertEqual(2, get_entropy_partition(Start2)), ?assertEqual(2, get_entropy_partition(End2)), ?assertEqual({Start2, End2}, get_entropy_partition_range(2)), ok. %% @doc Walk sequentially through all chunks in a couple partitions and verify their slice %% indices slice_index_walk_test_() -> ar_test_node:test_with_mocked_functions([ {ar_block, partition_size, fun() -> 8 * 262144 end}, {ar_block, get_replica_2_9_entropy_sector_size, fun() -> 786432 end}, {ar_block, get_replica_2_9_entropy_partition_size, fun() -> 2359296 end}, {ar_block, get_sub_chunks_per_replica_2_9_entropy, fun() -> 3 end}, {ar_block, strict_data_split_threshold, fun() -> 3 * 262144 end} ], fun test_slice_index_walk/0, 30). test_slice_index_walk() -> %% -------------------------------------------------------------------------- %% Before the strict data split threshold: %% -------------------------------------------------------------------------- %% Partition start %% Sector start %% All sub-chunks in a chunk have the same slice index assert_slice_index(0, [ 0 ]), assert_slice_index(0, [ 1, 262144-1, 262144 ]), assert_slice_index(0, [ 262144+1, 2*262144-1 ]), assert_slice_index(0, [ 2*262144, 2*262144+1, 3*262144-1 ]), %% The strict data split threshold: %% The end offset exactly at the strict data split threshold is mapped to the %% second bucket, therefore it is still the same sector size. assert_slice_index(0, [ 3*262144 ]), %% -------------------------------------------------------------------------- %% After the strict data split threshold, all end offsets are padded to a multiple of %% ?DATA_CHUNK_SIZE (i.e. 262144). %% -------------------------------------------------------------------------- %% Sector start assert_slice_index(1, [ 3*262144+1, 4*262144-1, 4*262144 ]), assert_slice_index(1, [ 4*262144+1, 5*262144-1, 5*262144 ]), assert_slice_index(1, [ 5*262144+1 , 6*262144-1, 6*262144 ]), %% Sector start assert_slice_index(2, [ 6*262144+1, 7*262144-1, 7*262144 ]), assert_slice_index(2, [ 7*262144+1, 8*262144-1, 8*262144 ]), %% Recall partition start %% Sector start assert_slice_index(0, [ 8*262144+1, 9*262144-1, 9*262144 ]), assert_slice_index(0, [ 9*262144+1, 10*262144-1, 10*262144 ]), assert_slice_index(0, [ 10*262144+1, 11*262144-1, 11*262144 ]), %% Sector start assert_slice_index(1, [ 11*262144+1, 12*262144-1, 12*262144 ]), assert_slice_index(1, [ 12*262144+1, 13*262144-1, 13*262144 ]), assert_slice_index(1, [ 13*262144+1, 14*262144-1, 14*262144 ]), %% Sector start assert_slice_index(2, [ 14*262144+1, 15*262144-1, 15*262144 ]), assert_slice_index(2, [ 15*262144+1, 16*262144-1, 16*262144 ]), %% Recall partition start %% Sector start assert_slice_index(0, [ 16*262144+1, 17*262144-1, 17*262144 ]), ?assertEqual(ar_block:get_sub_chunks_per_replica_2_9_entropy() - 1, get_slice_index(ar_block:partition_size())), ?assertEqual(0, get_slice_index(ar_block:partition_size() + 1)), ok. assert_slice_index(_ExpectedIndex, []) -> ok; assert_slice_index(ExpectedIndex, [AbsoluteChunkByteOffset | Rest]) -> ?assertEqual( ExpectedIndex, get_slice_index(AbsoluteChunkByteOffset), lists:flatten(io_lib:format("get_slice_index(~p)", [AbsoluteChunkByteOffset])) ), assert_slice_index(ExpectedIndex, Rest). %% @doc Walk through every sub-chunk of each chunk and verify its entropy index and %% entropy sub-chunk index. entropy_index_walk_test_() -> ar_test_node:test_with_mocked_functions([ {ar_block, get_replica_2_9_entropy_sector_size, fun() -> 786432 end}, {ar_block, get_replica_2_9_entropy_partition_size, fun() -> 2359296 end}, {ar_block, get_sub_chunks_per_replica_2_9_entropy, fun() -> 3 end} ], fun test_entropy_index_walk/0, 30). test_entropy_index_walk() -> %% assert_entropy_index takes a list of chunk end offsets and verifies the entropy %% index for each sub-chunk in the chunk. The first argument is the expected entropy %% index for the first sub-chunk in the chunk, for each subsequent sub-chunk the %% expected index is incremented by 1. %% %% The sector size determines the number of entropy indices. During tests the sector %% size is 3*262144, so the total number of entropy indices is 3*262144 / 8192 = 96 (one %% for each sub-chunk in each sector). %% In tests the strict data split threshold is 262144 * 3, before that offset chunks %% were not padded. So each provided end offset is taken as is. After the threshold each %% offset is padded to a multiple of ?DATA_CHUNK_SIZE (i.e. 262144) off of the threshold %% value. %% -------------------------------------------------------------------------- %% Before the strict data split threshold: %% -------------------------------------------------------------------------- %% Partition start %% Sector start assert_entropy_index(0, [ 0 ]), assert_entropy_index(0, [ 1, 262144-1, 262144 ]), assert_entropy_index(0, [ 262144+1, 2*262144-1 ]), assert_entropy_index(32, [ 2*262144, 2*262144+1, 3*262144-1 ]), %% The strict data split threshold: assert_entropy_index(64, [ 3*262144 ]), %% -------------------------------------------------------------------------- %% After the strict data split threshold, all end offsets are padded to a multiple of %% ?DATA_CHUNK_SIZE (i.e. 262144). %% -------------------------------------------------------------------------- %% Sector start assert_entropy_index(0, [ 3*262144+1, 4*262144-1, 4*262144 ]), assert_entropy_index(32, [ 4*262144+1, 5*262144-1, 5*262144 ]), assert_entropy_index(64, [ 5*262144+1 , 6*262144-1, 6*262144 ]), %% Sector start assert_entropy_index(0, [ 6*262144+1, 7*262144-1, 7*262144 ]), assert_entropy_index(32, [ 7*262144+1, 8*262144-1, 8*262144 ]), %% Partition start %% Sector start assert_entropy_index(0, [ 8*262144+1, 9*262144-1, 9*262144 ]), assert_entropy_index(32, [ 9*262144+1, 10*262144-1, 10*262144 ]), assert_entropy_index(64, [ 10*262144+1, 11*262144-1, 11*262144 ]), %% Sector start assert_entropy_index(0, [ 11*262144+1, 12*262144-1, 12*262144 ]), assert_entropy_index(32, [ 12*262144+1, 13*262144-1, 13*262144 ]), assert_entropy_index(64, [ 13*262144+1, 14*262144-1, 14*262144 ]), %% Sector start assert_entropy_index(0, [ 14*262144+1, 15*262144-1, 15*262144 ]), assert_entropy_index(32, [ 15*262144+1, 16*262144-1, 16*262144 ]), %% Partition start %% Sector start assert_entropy_index(0, [ 16*262144+1, 17*262144-1, 17*262144 ]), ok. assert_entropy_index(_ExpectedIndex, []) -> ok; assert_entropy_index(ExpectedIndex, [AbsoluteChunkByteOffset | Rest]) -> walk_sub_chunks(ExpectedIndex, AbsoluteChunkByteOffset, 0), assert_entropy_index(ExpectedIndex, Rest). walk_sub_chunks(_ExpectedIndex, _AbsoluteChunkByteOffset, SubChunkStartOffset) when SubChunkStartOffset >= ?DATA_CHUNK_SIZE -> ok; walk_sub_chunks(ExpectedIndex, AbsoluteChunkByteOffset, SubChunkStartOffset) -> ?assertEqual( ExpectedIndex, get_entropy_index(AbsoluteChunkByteOffset, SubChunkStartOffset), lists:flatten(io_lib:format("get_entropy_index(~p, ~p)", [AbsoluteChunkByteOffset, SubChunkStartOffset])) ), ?assertEqual( ExpectedIndex, get_entropy_index(AbsoluteChunkByteOffset, SubChunkStartOffset+1), lists:flatten(io_lib:format("get_entropy_index(~p, ~p)", [AbsoluteChunkByteOffset, SubChunkStartOffset+1])) ), ?assertEqual( ExpectedIndex, get_entropy_index(AbsoluteChunkByteOffset, SubChunkStartOffset+8192-1), lists:flatten(io_lib:format("get_entropy_index(~p, ~p)", [AbsoluteChunkByteOffset, SubChunkStartOffset+8192-1])) ), walk_sub_chunks(ExpectedIndex+1, AbsoluteChunkByteOffset, SubChunkStartOffset+8192). ================================================ FILE: apps/arweave/src/ar_retarget.erl ================================================ %%% @doc A helper module for deciding when and which blocks will be retarget %%% blocks, that is those in which change the current mining difficulty %%% on the weave to maintain a constant block time. %%% @end -module(ar_retarget). -export([is_retarget_height/1, is_retarget_block/1, maybe_retarget/5, calculate_difficulty/5, validate_difficulty/2, switch_to_linear_diff/1, switch_to_linear_diff_pre_fork_2_5/1, switch_to_log_diff/1]). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave/include/ar_consensus.hrl"). -include_lib("eunit/include/eunit.hrl"). %% A macro for checking if the given block is a retarget block. %% Returns true if so, otherwise returns false. -define(IS_RETARGET_BLOCK(X), ( ((X#block.height rem ?RETARGET_BLOCKS) == 0) and (X#block.height =/= 0) ) ). %% A macro for checking if the given height is a retarget height. %% Returns true if so, otherwise returns false. -define(IS_RETARGET_HEIGHT(Height), ( ((Height rem ?RETARGET_BLOCKS) == 0) and (Height =/= 0) ) ). %% @doc The unconditional difficulty reduction coefficient applied at the %% first 2.5 block. -define(DIFF_DROP_2_5, 2). %% @doc The unconditional difficulty reduction coefficient applied at the %% first 2.6 block. -define(INITIAL_DIFF_DROP_2_6, 100). %% @doc The additional difficulty reduction coefficient applied every 10 minutes at the %% first 2.6 block. -define(DIFF_DROP_2_6, 2). %% @doc The unconditional difficulty reduction coefficient applied at the %% first 2.7.2 block. -define(INITIAL_DIFF_DROP_2_7_2, 10). %% @doc The additional difficulty reduction coefficient applied every 10 minutes at the %% first 2.7.2 block. -define(DIFF_DROP_2_7_2, 2). %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Return true if the given height is a retarget height. is_retarget_height(Height) -> ?IS_RETARGET_HEIGHT(Height). %% @doc Return true if the given block is a retarget block. is_retarget_block(Block) -> ?IS_RETARGET_BLOCK(Block). maybe_retarget(Height, {CurPoA1Diff, CurDiff}, TS, LastRetargetTS, PrevTS) -> case ar_retarget:is_retarget_height(Height) of true -> NewDiff = calculate_difficulty(CurDiff, TS, LastRetargetTS, Height, PrevTS), {ar_difficulty:poa1_diff(NewDiff, Height), NewDiff}; false -> {CurPoA1Diff, CurDiff} end. -ifdef(LOCALNET). calculate_difficulty(OldDiff, _TS, _Last, _Height, _PrevTS) -> OldDiff. -else. calculate_difficulty(OldDiff, TS, Last, Height, PrevTS) -> Fork_1_7 = ar_fork:height_1_7(), Fork_1_8 = ar_fork:height_1_8(), Fork_1_9 = ar_fork:height_1_9(), Fork_2_4 = ar_fork:height_2_4(), Fork_2_5 = ar_fork:height_2_5(), Fork_2_6 = ar_fork:height_2_6(), Fork_2_7_2 = ar_fork:height_2_7_2(), Fork_Testnet = ar_testnet:height_testnet_fork(), case Height of _ when Height == Fork_Testnet -> calculate_difficulty_with_drop(OldDiff, TS, Last, Height, PrevTS, 100, 2); _ when Height == Fork_2_7_2 -> calculate_difficulty_with_drop(OldDiff, TS, Last, Height, PrevTS, ?INITIAL_DIFF_DROP_2_7_2, ?DIFF_DROP_2_7_2); _ when Height == Fork_2_6 -> calculate_difficulty_with_drop(OldDiff, TS, Last, Height, PrevTS, ?INITIAL_DIFF_DROP_2_6, ?DIFF_DROP_2_6); _ when Height > Fork_2_5 -> calculate_difficulty(OldDiff, TS, Last, Height); _ when Height == Fork_2_5 -> calculate_difficulty_at_2_5(OldDiff, TS, Last, Height, PrevTS); _ when Height > Fork_2_4 -> calculate_difficulty_after_2_4_before_2_5(OldDiff, TS, Last, Height); _ when Height == Fork_2_4 -> calculate_difficulty_at_2_4(OldDiff, TS, Last, Height); _ when Height >= Fork_1_9 -> calculate_difficulty_at_and_after_1_9_before_2_4(OldDiff, TS, Last, Height); _ when Height > Fork_1_8 -> calculate_difficulty_after_1_8_before_1_9(OldDiff, TS, Last, Height); _ when Height == Fork_1_8 -> switch_to_linear_diff_pre_fork_2_5(OldDiff); _ when Height == Fork_1_7 -> ar_difficulty:switch_to_randomx_fork_diff(OldDiff); _ -> calculate_difficulty_before_1_8(OldDiff, TS, Last, Height) end. -endif. %% @doc Assert the new block has an appropriate difficulty. -ifdef(LOCALNET). validate_difficulty(_NewB, _OldB) -> true. -else. validate_difficulty(NewB, OldB) -> case ar_retarget:is_retarget_block(NewB) of true -> (NewB#block.diff == calculate_difficulty( OldB#block.diff, NewB#block.timestamp, OldB#block.last_retarget, NewB#block.height, OldB#block.timestamp)); false -> (NewB#block.diff == OldB#block.diff) and (NewB#block.last_retarget == OldB#block.last_retarget) end. -endif. %% @doc The number a hash must be greater than, to give the same odds of success %% as the old-style Diff (number of leading zeros in the bitstring). switch_to_linear_diff(LogDiff) -> ?MAX_DIFF - ar_fraction:pow(2, 256 - LogDiff). switch_to_linear_diff_pre_fork_2_5(Diff) -> erlang:trunc(math:pow(2, 256)) - erlang:trunc(math:pow(2, 256 - Diff)). %% @doc only used for logging/metrics as the log diff is easier to understand than the linear diff switch_to_log_diff(LinearDiff) -> 256 - math:log2(?MAX_DIFF - LinearDiff). %%%=================================================================== %%% Private functions. %%%=================================================================== calculate_difficulty(OldDiff, TS, Last, Height) -> %% We only do retarget if the time it took to mine ?RETARGET_BLOCKS is bigger than %% or equal to RetargetToleranceUpperBound or smaller than or equal to %% RetargetToleranceLowerBound. TargetTime = ?RETARGET_BLOCKS * ar_testnet:target_block_time(Height), TargetTimeUpperBound = TargetTime + ar_testnet:target_block_time(Height), TargetTimeLowerBound = TargetTime - ar_testnet:target_block_time(Height), ActualTime = max(TS - Last, ar_block:get_max_timestamp_deviation()), case ActualTime < TargetTimeUpperBound andalso ActualTime > TargetTimeLowerBound of true -> OldDiff; false -> %% Scale difficulty by TargetTime / ActualTime %% If ActualTime is less than TargetTime it means we need to *increase* the difficulty, %% and vice versa. ar_difficulty:scale_diff(OldDiff, {TargetTime, ActualTime}, Height) end. calculate_difficulty_at_2_5(OldDiff, TS, Last, Height, PrevTS) -> calculate_difficulty_with_drop(OldDiff, TS, Last, Height, PrevTS, ?DIFF_DROP_2_5, ?DIFF_DROP_2_5). calculate_difficulty_with_drop(OldDiff, TS, Last, Height, PrevTS, InitialCoeff, Coeff) -> TargetTime = ?RETARGET_BLOCKS * ar_testnet:target_block_time(Height), ActualTime = max(TS - Last, ar_block:get_max_timestamp_deviation()), Step = 10 * 60, %% Drop the difficulty InitialCoeff times right away, then drop extra Coeff times %% for every 10 minutes passed. ActualTime2 = ActualTime * InitialCoeff * ar_fraction:pow(Coeff, max(TS - PrevTS, 0) div Step), %% Scale difficulty by TargetTime / ActualTime2 %% If ActualTime2 is less than TargetTime it means we need to *increase* the difficulty, %% and vice versa. ar_difficulty:scale_diff(OldDiff, {TargetTime, ActualTime2}, Height). calculate_difficulty_after_2_4_before_2_5(OldDiff, TS, Last, Height) -> TargetTime = ?RETARGET_BLOCKS * ar_testnet:target_block_time(Height), ActualTime = TS - Last, TimeDelta = ActualTime / TargetTime, case abs(1 - TimeDelta) < ?RETARGET_TOLERANCE of true -> OldDiff; false -> MaxDiff = ?MAX_DIFF, MinDiff = ar_difficulty:min_difficulty(Height), DiffInverse = erlang:trunc((MaxDiff - OldDiff) * TimeDelta), ar_util:between( MaxDiff - DiffInverse, MinDiff, MaxDiff ) end. calculate_difficulty_at_2_4(OldDiff, TS, Last, Height) -> TargetTime = ?RETARGET_BLOCKS * ar_testnet:target_block_time(Height), ActualTime = TS - Last, %% Make the difficulty drop 10 times faster than usual. The difficulty %% after SPoRA is estimated to be around 10-100 times lower. In the worst %% case, the 10x adjustment leads to a block per 12 seconds on average, %% what is a reasonable lower bound on the block time. In case of the 100x %% reduction in difficulty, it would only take 100 minutes to adjust. TimeDelta = 10 * ActualTime / TargetTime, MaxDiff = ?MAX_DIFF, MinDiff = ar_difficulty:min_difficulty(Height), DiffInverse = erlang:trunc((MaxDiff - OldDiff) * TimeDelta), ar_util:between( MaxDiff - DiffInverse, MinDiff, MaxDiff ). calculate_difficulty_at_and_after_1_9_before_2_4(OldDiff, TS, Last, Height) -> TargetTime = ?RETARGET_BLOCKS * ar_testnet:target_block_time(Height), ActualTime = TS - Last, TimeDelta = ActualTime / TargetTime, case abs(1 - TimeDelta) < ?RETARGET_TOLERANCE of true -> OldDiff; false -> MaxDiff = ?MAX_DIFF, MinDiff = ar_difficulty:min_difficulty(Height), EffectiveTimeDelta = ar_util:between( ActualTime / TargetTime, 1 / ?DIFF_ADJUSTMENT_UP_LIMIT, ?DIFF_ADJUSTMENT_DOWN_LIMIT ), DiffInverse = erlang:trunc((MaxDiff - OldDiff) * EffectiveTimeDelta), ar_util:between( MaxDiff - DiffInverse, MinDiff, MaxDiff ) end. calculate_difficulty_after_1_8_before_1_9(OldDiff, TS, Last, Height) -> TargetTime = ?RETARGET_BLOCKS * ar_testnet:target_block_time(Height), ActualTime = TS - Last, TimeDelta = ActualTime / TargetTime, case abs(1 - TimeDelta) < ?RETARGET_TOLERANCE of true -> OldDiff; false -> MaxDiff = ?MAX_DIFF, MinDiff = ar_difficulty:min_difficulty(Height), ar_util:between( MaxDiff - (MaxDiff - OldDiff) * ActualTime div TargetTime, max(MinDiff, OldDiff div 2), min(MaxDiff, OldDiff * 4) ) end. calculate_difficulty_before_1_8(OldDiff, TS, Last, Height) -> TargetTime = ?RETARGET_BLOCKS * ar_testnet:target_block_time(Height), ActualTime = TS - Last, TimeError = abs(ActualTime - TargetTime), Diff = erlang:max( if TimeError < (TargetTime * ?RETARGET_TOLERANCE) -> OldDiff; TargetTime > ActualTime -> OldDiff + 1; true -> OldDiff - 1 end, ar_difficulty:min_difficulty(Height) ), Diff. %%%=================================================================== %%% Tests. %%%=================================================================== %% Ensure that after a series of very fast mines, the diff increases. simple_retarget_test_() -> {timeout, 300, fun() -> [B0] = ar_weave:init(), ar_test_node:start(B0), lists:foreach( fun(Height) -> ar_test_node:mine(), ar_test_node:wait_until_height(main, Height) end, lists:seq(1, ?RETARGET_BLOCKS + 1) ), true = ar_util:do_until( fun() -> [BH | _] = ar_node:get_blocks(), B = ar_storage:read_block(BH), B#block.diff > B0#block.diff end, 1000, 5 * 60 * 1000 ) end}. calculate_difficulty_linear_test_() -> ar_test_node:test_with_mocked_functions([{ar_fork, height_2_5, fun() -> 0 end}], fun test_calculate_difficulty_linear/0, 120). test_calculate_difficulty_linear() -> Diff = switch_to_linear_diff(27), TargetTime = ?RETARGET_BLOCKS * ?TARGET_BLOCK_TIME, Timestamp = os:system_time(seconds), %% The change is smaller than retarget tolerance. Retarget1 = Timestamp - TargetTime - ?TARGET_BLOCK_TIME + 1, ?assertEqual( Diff, calculate_difficulty(Diff, Timestamp, Retarget1, 1) ), Retarget2 = Timestamp - TargetTime + ?TARGET_BLOCK_TIME - 1, ?assertEqual( Diff, calculate_difficulty(Diff, Timestamp, Retarget2, 1) ), %% The change is not capped by ?DIFF_ADJUSTMENT_UP_LIMIT anymore. Retarget3 = Timestamp - TargetTime div (?DIFF_ADJUSTMENT_UP_LIMIT + 1), ?assertEqual( (?DIFF_ADJUSTMENT_UP_LIMIT + 1) * hashes(Diff), hashes( calculate_difficulty(Diff, Timestamp, Retarget3, 1) ) ), %% The change is not capped by ?DIFF_ADJUSTMENT_DOWN_LIMIT anymore. Retarget4 = Timestamp - (?DIFF_ADJUSTMENT_DOWN_LIMIT + 2) * TargetTime, ?assertEqual( hashes(Diff), (?DIFF_ADJUSTMENT_DOWN_LIMIT + 2) * hashes( calculate_difficulty(Diff, Timestamp, Retarget4, 1) ) ), %% The actual time is three times smaller. Retarget5 = Timestamp - TargetTime div 3, ?assert( 3.001 * hashes(Diff) > hashes( calculate_difficulty(Diff, Timestamp, Retarget5, 1) ) ), ?assert( 3.001 / 2 * hashes(Diff) > hashes( % Expect 2x drop at 2.5. calculate_difficulty_at_2_5(Diff, Timestamp, Retarget5, 0, Timestamp - 1) ) ), ?assert( 2.999 * hashes(Diff) < hashes( calculate_difficulty(Diff, Timestamp, Retarget5, 1) ) ), ?assert( 2.999 / 2 * hashes(Diff) < hashes( % Expect 2x drop at 2.5. calculate_difficulty_at_2_5(Diff, Timestamp, Retarget5, 0, Timestamp - 1) ) ), %% The actual time is two times bigger. Retarget6 = Timestamp - 2 * TargetTime, ?assert( hashes(Diff) > 1.999 * hashes( calculate_difficulty(Diff, Timestamp, Retarget6, 1) ) ), ?assert( hashes(Diff) > 3.999 * hashes( % Expect 2x drop at 2.5. calculate_difficulty_at_2_5(Diff, Timestamp, Retarget6, 0, Timestamp - 1) ) ), ?assert( hashes(Diff) > 7.999 * hashes( % Expect extra 2x after 10 minutes. calculate_difficulty_at_2_5(Diff, Timestamp, Retarget6, 0, Timestamp - 600) ) ), ?assert( hashes(Diff) < 2.001 * hashes( calculate_difficulty(Diff, Timestamp, Retarget6, 1) ) ), ?assert( hashes(Diff) < 4.001 * hashes( % Expect 2x drop at 2.5. calculate_difficulty_at_2_5(Diff, Timestamp, Retarget6, 0, Timestamp - 1) ) ), ?assert( hashes(Diff) < 8.001 * hashes( % Expect extra 2x after 10 minutes. calculate_difficulty_at_2_5(Diff, Timestamp, Retarget6, 0, Timestamp - 600) ) ). hashes(Diff) -> MaxDiff = ?MAX_DIFF, MaxDiff div (MaxDiff - Diff). ================================================ FILE: apps/arweave/src/ar_rewards.erl ================================================ -module(ar_rewards). -export([reward_history_length/1, expected_hashes_length/1, buffered_reward_history_length/1, set_reward_history/2, get_locked_rewards/1, trim_locked_rewards/2, trim_reward_history/2, trim_buffered_reward_history/2, interim_reward_history_bi/2, get_oldest_locked_address/1, add_element/2, has_locked_reward/2, reward_history_hash/3, validate_reward_history_hashes/3, get_total_reward_for_address/2, get_reward_history_totals/1, apply_rewards/2, apply_reward/4, log_reward_history/3]). -include_lib("arweave/include/ar.hrl"). reward_history_length(Height) -> min( Height - ar_fork:height_2_6() + 1, %% included for compatibility with unit tests case Height >= ar_fork:height_2_8() of true -> ar_testnet:reward_history_blocks(Height) + ar_block:get_consensus_window_size(); false -> ar_testnet:legacy_reward_history_blocks(Height) + ar_block:get_consensus_window_size() end ). expected_hashes_length(Height) -> case Height >= ar_fork:height_2_8() of true -> %% Take one more block.reward_history_hash because after 2.8 we use %% the previous reward history hash to compute the new one. ar_block:get_consensus_window_size() + 1; false -> ar_block:get_consensus_window_size() end. %% @doc The reward history that gets cached in #block and returned by /reward_history has %% to be long enough to include: %% 1. The current reward history (i.e. reward_history_length(Height)) %% 2. The reward history that was in use recently %% (i.e. reward_history_length(Height - expected_hashes_length(Height))) %% 3. The current locked rewards (i.e. locked_rewards_blocks(Height)) buffered_reward_history_length(Height) -> max( max( reward_history_length(Height - expected_hashes_length(Height)), reward_history_length(Height) ), ar_testnet:locked_rewards_blocks(Height) ). %% @doc Add the corresponding reward history to every block record. We keep %% the reward histories in the block cache and use them to validate blocks applied on top. %% %% The expectation is that RewardHistory is at least %% reward_history_length/1 long, and that Blocks is no longer than %% ar_block:get_consensus_window_size(). If so then each block.reward_history value will be at least %% ?REWARD_HISTORY_BLOCKS long. set_reward_history([], _RewardHistory) -> []; set_reward_history(Blocks, []) -> Blocks; set_reward_history([B | Blocks], RewardHistory) -> [B#block{ reward_history = RewardHistory } | set_reward_history(Blocks, tl(RewardHistory))]. %% @doc Return the most recent part of the reward history including the locked rewards. get_locked_rewards(B) -> trim_locked_rewards(B#block.height, B#block.reward_history). %% @doc Trim RewardHistory to just the locked rewards. trim_locked_rewards(Height, RewardHistory) -> LockRewardsLength = ar_testnet:locked_rewards_blocks(Height), lists:sublist(RewardHistory, LockRewardsLength). %% @doc Trim RewardHistory to the values that will be stored in the block. This is the %% sliding window plus a buffer of ar_block:get_consensus_window_size() values. trim_reward_history(Height, RewardHistory) -> lists:sublist(RewardHistory, reward_history_length(Height)). %% @doc See the buffered_reward_history_length/1 function for the distinction between %% reward_history_length/1 and buffered_reward_history_length/1. trim_buffered_reward_history(Height, RewardHistory) -> lists:sublist(RewardHistory, buffered_reward_history_length(Height)). %% @doc Return the portion of the block index needed for reading the reward history %% during startup. Until ~2 months post 2.8 hardfork, the reward history accumulated %% by any node will be shorter than the full expected length — specifically 21,600 %% blocks plus the number of blocks elapsed since the 2.8 activation. interim_reward_history_bi(Height, BI) -> InterimRewardHistoryLength = (Height - ar_fork:height_2_8()) + 21600, lists:sublist(trim_buffered_reward_history(Height, BI), InterimRewardHistoryLength). get_oldest_locked_address(B) -> LockedRewards = get_locked_rewards(B), {Addr, _HashRate, _Reward, _Denomination} = lists:last(LockedRewards), Addr. %% @doc Add a new {Addr, HashRate, Reward, Denomination} tuple to the reward history. add_element(B, RewardHistory) -> Height = B#block.height, Reward = B#block.reward, HashRate = ar_difficulty:get_hash_rate_fixed_ratio(B), Denomination = B#block.denomination, RewardAddr = B#block.reward_addr, trim_buffered_reward_history(Height, [{RewardAddr, HashRate, Reward, Denomination} | RewardHistory]). has_locked_reward(_Addr, []) -> false; has_locked_reward(Addr, [{Addr, _, _, _} | _]) -> true; has_locked_reward(Addr, [_ | RewardHistory]) -> has_locked_reward(Addr, RewardHistory). validate_reward_history_hashes(_Height, _RewardHistory, []) -> true; validate_reward_history_hashes(0, [_Element] = History, [H]) -> %% This clause is not applicable in mainnet but reflects how we initialize %% the reward history hash in the new weaves, even if the 2.8 height is not %% set from the genesis. H == reward_history_hash(0, <<>>, History); validate_reward_history_hashes(Height, RewardHistory, [H, PrevH | ExpectedHashes]) -> case validate_reward_history_hash(Height, PrevH, H, RewardHistory) of true -> case ExpectedHashes of [] -> true; _ -> validate_reward_history_hashes(Height - 1, tl(RewardHistory), [PrevH | ExpectedHashes]) end; false -> false end; validate_reward_history_hashes(Height, RewardHistory, [H]) -> %% After 2.8 we always include one extra hash to the list so we cannot end up here. true = Height < ar_fork:height_2_8(), validate_reward_history_hash(Height, not_set, H, RewardHistory). validate_reward_history_hash(Height, PreviousRewardHistoryHash, H, RewardHistory) -> H == reward_history_hash(Height, PreviousRewardHistoryHash, %% Pre-2.8: slice the reward history to compute the hash %% Post-2.8: use the previous reward history hash and the head of the history to compute %% the new hash. trim_locked_rewards(Height, RewardHistory)). reward_history_hash(Height, PreviousRewardHistoryHash, History) -> case Height >= ar_fork:height_2_8() of true -> Element = encode_reward_history_element(hd(History)), Preimage = << Element/binary, PreviousRewardHistoryHash/binary >>, crypto:hash(sha256, Preimage); false -> reward_history_hash(History, [ar_serialize:encode_int(length(History), 8)]) end. encode_reward_history_element({Addr, HashRate, Reward, Denomination}) -> HashRateBin = ar_serialize:encode_int(HashRate, 8), RewardBin = ar_serialize:encode_int(Reward, 8), DenominationBin = << Denomination:24 >>, crypto:hash(sha256, << Addr/binary, HashRateBin/binary, RewardBin/binary, DenominationBin/binary >>). reward_history_hash([], IOList) -> crypto:hash(sha256, iolist_to_binary(IOList)); reward_history_hash([{Addr, HashRate, Reward, Denomination} | History], IOList) -> HashRateBin = ar_serialize:encode_int(HashRate, 8), RewardBin = ar_serialize:encode_int(Reward, 8), DenominationBin = << Denomination:24 >>, reward_history_hash(History, [Addr, HashRateBin, RewardBin, DenominationBin | IOList]). get_total_reward_for_address(Addr, B) -> get_total_reward_for_address(Addr, get_locked_rewards(B), B#block.denomination, 0). get_total_reward_for_address(_Addr, [], _Denomination, Total) -> Total; get_total_reward_for_address(Addr, [{Addr, _, Reward, RewardDenomination} | LockedRewards], Denomination, Total) -> Reward2 = ar_pricing:redenominate(Reward, RewardDenomination, Denomination), get_total_reward_for_address(Addr, LockedRewards, Denomination, Total + Reward2); get_total_reward_for_address(Addr, [_ | LockedRewards], Denomination, Total) -> get_total_reward_for_address(Addr, LockedRewards, Denomination, Total). %% @doc Return {HashRateTotal, RewardTotal} summed up over the entire %% sliding window of the history of rewards for the given block. get_reward_history_totals(B) -> Denomination = B#block.denomination, History = trim_reward_history(B#block.height, B#block.reward_history), log_reward_history("get_reward_history_totals", History, 200), {HashRateTotal, RewardTotal} = get_totals(History, Denomination, 0, 0), {HashRateTotal, RewardTotal, History}. get_totals([], _Denomination, HashRateTotal, RewardTotal) -> {HashRateTotal, RewardTotal}; get_totals([{_Addr, HashRate, Reward, RewardDenomination} | History], Denomination, HashRateTotal, RewardTotal) -> HashRateTotal2 = HashRateTotal + HashRate, Reward2 = ar_pricing:redenominate(Reward, RewardDenomination, Denomination), RewardTotal2 = RewardTotal + Reward2, get_totals(History, Denomination, HashRateTotal2, RewardTotal2). apply_rewards(PrevB, Accounts) -> %% The only time we won't have only a single reward to apply is if the %% ?LOCKED_REWARDS_BLOCKS has changed between blocks. And currently that can only %% happen on testnet. Height = PrevB#block.height, NumRewardsToApply = max(0, ar_testnet:locked_rewards_blocks(Height) - ar_testnet:locked_rewards_blocks(Height + 1) + 1), true = NumRewardsToApply == 1 orelse ar_testnet:is_testnet(), %% Get the last NumRewardsToApply elements of the LockedRewards list in reverse order. %% Normally this will be a list with a single element: the last element in the %% LockedRewards list. %% When forking testnet off of mainnet this may be a list of more than 1 element. RewardsToApply = lists:sublist(lists:reverse(get_locked_rewards(PrevB)), NumRewardsToApply), apply_rewards2(RewardsToApply, PrevB#block.denomination, Accounts). apply_rewards2([], _Denomination, Accounts) -> Accounts; apply_rewards2([{Addr, _HashRate, Reward, RewardDenomination} | RewardsToApply], Denomination, Accounts) -> case ar_node_utils:is_account_banned(Addr, Accounts) of true -> apply_rewards2(RewardsToApply, Denomination, Accounts); false -> Reward2 = ar_pricing:redenominate(Reward, RewardDenomination, Denomination), Accounts2 = apply_reward(Accounts, Addr, Reward2, Denomination), apply_rewards2(RewardsToApply, Denomination, Accounts2) end. %% @doc Add the mining reward to the corresponding account. apply_reward(Accounts, unclaimed, _Quantity, _Denomination) -> Accounts; apply_reward(Accounts, RewardAddr, Amount, Denomination) -> case maps:get(RewardAddr, Accounts, not_found) of not_found -> ar_node_utils:update_account(RewardAddr, Amount, <<>>, Denomination, true, Accounts); {Balance, LastTX} -> Balance2 = ar_pricing:redenominate(Balance, 1, Denomination), ar_node_utils:update_account(RewardAddr, Balance2 + Amount, LastTX, Denomination, true, Accounts); {Balance, LastTX, AccountDenomination, MiningPermission} -> Balance2 = ar_pricing:redenominate(Balance, AccountDenomination, Denomination), ar_node_utils:update_account(RewardAddr, Balance2 + Amount, LastTX, Denomination, MiningPermission, Accounts) end. log_reward_history(Message, RewardHistory, N) -> Length = length(RewardHistory), LimitedRewardHistory = lists:sublist(RewardHistory, N), LogEntries = lists:map(fun({Addr, HashRate, Reward, Denomination}) -> EncodedAddr = ar_util:encode(Addr), LogHashRate = math:log10(HashRate), io_lib:format("{~s, ~p, ~p, ~p}", [EncodedAddr, LogHashRate, Reward, Denomination]) end, LimitedRewardHistory), LogString = string:join(LogEntries, "; "), ?LOG_INFO("~s Length: ~p, Entries: ~s", [Message, Length, LogString]). ================================================ FILE: apps/arweave/src/ar_rx4096_nif.erl ================================================ -module(ar_rx4096_nif). -include_lib("arweave/include/ar.hrl"). -on_load(init_nif/0). -export([rx4096_hash_nif/5, rx4096_info_nif/1, rx4096_init_nif/5, rx4096_encrypt_composite_chunk_nif/9, rx4096_decrypt_composite_chunk_nif/10, rx4096_decrypt_composite_sub_chunk_nif/10, rx4096_reencrypt_composite_chunk_nif/13 ]). %%%=================================================================== %%% Public interface. %%%=================================================================== rx4096_info_nif(_State) -> ?LOG_ERROR("rx4096_info_nif"), erlang:nif_error(nif_not_loaded). rx4096_init_nif(_Key, _HashingMode, _JIT, _LargePages, _Threads) -> ?LOG_ERROR("rx4096_init_nif"), erlang:nif_error(nif_not_loaded). rx4096_hash_nif(_State, _Data, _JIT, _LargePages, _HardwareAES) -> ?LOG_ERROR("rx4096_hash_nif"), erlang:nif_error(nif_not_loaded). rx4096_encrypt_composite_chunk_nif(_State, _Key, _Chunk, _JIT, _LargePages, _HardwareAES, _RoundCount, _IterationCount, _SubChunkCount) -> ?LOG_ERROR("rx4096_encrypt_composite_chunk_nif"), erlang:nif_error(nif_not_loaded). rx4096_decrypt_composite_chunk_nif(_State, _Data, _Chunk, _OutSize, _JIT, _LargePages, _HardwareAES, _RoundCount, _IterationCount, _SubChunkCount) -> ?LOG_ERROR("rx4096_decrypt_composite_chunk_nif"), erlang:nif_error(nif_not_loaded). rx4096_decrypt_composite_sub_chunk_nif(_State, _Data, _Chunk, _OutSize, _JIT, _LargePages, _HardwareAES, _RoundCount, _IterationCount, _Offset) -> ?LOG_ERROR("rx4096_decrypt_composite_sub_chunk_nif"), erlang:nif_error(nif_not_loaded). rx4096_reencrypt_composite_chunk_nif(_State, _DecryptKey, _EncryptKey, _Chunk, _JIT, _LargePages, _HardwareAES, _DecryptRoundCount, _EncryptRoundCount, _DecryptIterationCount, _EncryptIterationCount, _DecryptSubChunkCount, _EncryptSubChunkCount) -> ?LOG_ERROR("rx4096_reencrypt_composite_chunk_nif"), erlang:nif_error(nif_not_loaded). init_nif() -> PrivDir = code:priv_dir(arweave), ok = erlang:load_nif(filename:join([PrivDir, "rx4096_arweave"]), 0). ================================================ FILE: apps/arweave/src/ar_rx512_nif.erl ================================================ -module(ar_rx512_nif). -include_lib("arweave/include/ar.hrl"). -on_load(init_nif/0). -export([rx512_hash_nif/5, rx512_info_nif/1, rx512_init_nif/5, rx512_encrypt_chunk_nif/7, rx512_decrypt_chunk_nif/8, rx512_reencrypt_chunk_nif/10 ]). %%%=================================================================== %%% Public interface. %%%=================================================================== rx512_info_nif(_State) -> ?LOG_ERROR("rx512_info_nif"), erlang:nif_error(nif_not_loaded). rx512_init_nif(_Key, _HashingMode, _JIT, _LargePages, _Threads) -> ?LOG_ERROR("rx512_init_nif"), erlang:nif_error(nif_not_loaded). rx512_hash_nif(_State, _Data, _JIT, _LargePages, _HardwareAES) -> ?LOG_ERROR("rx512_hash_nif"), erlang:nif_error(nif_not_loaded). rx512_encrypt_chunk_nif(_State, _Data, _Chunk, _RoundCount, _JIT, _LargePages, _HardwareAES) -> ?LOG_ERROR("rx512_encrypt_chunk_nif"), erlang:nif_error(nif_not_loaded). rx512_decrypt_chunk_nif(_State, _Data, _Chunk, _OutSize, _RoundCount, _JIT, _LargePages, _HardwareAES) -> ?LOG_ERROR("rx512_decrypt_chunk_nif"), erlang:nif_error(nif_not_loaded). rx512_reencrypt_chunk_nif(_State, _DecryptKey, _EncryptKey, _Chunk, _ChunkSize, _DecryptRoundCount, _EncryptRoundCount, _JIT, _LargePages, _HardwareAES) -> ?LOG_ERROR("rx512_reencrypt_chunk_nif"), erlang:nif_error(nif_not_loaded). init_nif() -> PrivDir = code:priv_dir(arweave), ok = erlang:load_nif(filename:join([PrivDir, "rx512_arweave"]), 0). ================================================ FILE: apps/arweave/src/ar_rxsquared_nif.erl ================================================ -module(ar_rxsquared_nif). -include_lib("arweave/include/ar.hrl"). -on_load(init_nif/0). -export([rxsquared_hash_nif/5, rxsquared_info_nif/1, rxsquared_init_nif/5, rsp_fused_entropy_nif/10, rsp_feistel_encrypt_nif/2, rsp_feistel_decrypt_nif/2]). %%%=================================================================== %%% Public interface. %%%=================================================================== rxsquared_info_nif(_State) -> ?LOG_ERROR("rxsquared_info_nif"), erlang:nif_error(nif_not_loaded). rxsquared_init_nif(_Key, _HashingMode, _JIT, _LargePages, _Threads) -> ?LOG_ERROR("rxsquared_init_nif"), erlang:nif_error(nif_not_loaded). rxsquared_hash_nif(_State, _Data, _JIT, _LargePages, _HardwareAES) -> ?LOG_ERROR("rxsquared_hash_nif"), erlang:nif_error(nif_not_loaded). init_nif() -> PrivDir = code:priv_dir(arweave), ok = erlang:load_nif(filename:join([PrivDir, "rxsquared_arweave"]), 0). %%%=================================================================== %%% Randomx square packing %%%=================================================================== rsp_fused_entropy_nif( _RandomxState, _ReplicaEntropySubChunkCount, _CompositePackingSubChunkSize, _LaneCount, _RxDepth, _JitEnabled, _LargePagesEnabled, _HardwareAESEnabled, _RandomxProgramCount, _Key ) -> ?LOG_ERROR("rsp_fused_entropy_nif"), erlang:nif_error(nif_not_loaded). rsp_feistel_encrypt_nif(_InMsg, _Key) -> ?LOG_ERROR("rsp_feistel_encrypt_nif"), erlang:nif_error(nif_not_loaded). rsp_feistel_decrypt_nif(_InMsg, _Key) -> ?LOG_ERROR("rsp_feistel_decrypt_nif"), erlang:nif_error(nif_not_loaded). ================================================ FILE: apps/arweave/src/ar_semaphore.erl ================================================ -module(ar_semaphore). -behaviour(gen_server). -export([start_link/2, acquire/2, stop/1]). -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2]). -include_lib("kernel/include/logger.hrl"). %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Open a semaphore registered with Name, with the specified %% Capacity. start_link(Name, InitCapacity) -> prometheus_gauge:new([ {name, Name}, {help, "The size of the corresponding semaphore queue."} ]), gen_server:start_link({local, Name}, ?MODULE, [InitCapacity], []). %% @doc Acquire the semaphore, willing to wait for the provided %% Timeout. acquire(Name, Timeout) -> try gen_server:call(Name, acquire, Timeout) catch exit:{timeout, _} -> {error, timeout} end. %% @doc Close the semaphore and stop the process registered under the %% given name. stop(Name) -> gen_server:stop(Name). %%%=================================================================== %%% Generic server callbacks. %%%=================================================================== init([InitCapacity]) when is_integer(InitCapacity) -> {ok, {InitCapacity, #{}, queue:new()}}; init([infinity]) -> {ok, {infinity, undefined, undefined}}. handle_call(acquire, {FromPid, FromRef}, {Capacity, WaitingPids, Queue}) when is_integer(Capacity) -> case maps:is_key(FromPid, WaitingPids) of true -> {reply, {error, process_already_waiting}, {Capacity, WaitingPids, Queue}}; false -> case Capacity > 0 of true -> monitor(process, FromPid), {reply, ok, {Capacity - 1, WaitingPids#{ FromPid => {} }, Queue}}; false -> Queue1 = queue:in({FromPid, FromRef}, Queue), prometheus_gauge:inc(element(2, process_info(self(), registered_name))), {noreply, {Capacity, WaitingPids, Queue1}} end end; handle_call(acquire, _, {infinity, _, _} = State) -> {reply, ok, State}. handle_cast(_, State) -> {stop, {error, handle_cast_unsupported}, State}. handle_info({'DOWN', _, process, Pid, _}, {Capacity, WaitingPids, Queue}) -> case maps:take(Pid, WaitingPids) of {{}, WaitingPids1} -> dequeue({Capacity + 1, WaitingPids1, Queue}); error -> {noreply, {Capacity, WaitingPids, Queue}} end. terminate(Reason, _State) -> ?LOG_INFO([{module, ?MODULE},{pid, self()},{callback, terminate},{reason, Reason}]), ok. %%%=================================================================== %%% Private functions. %%%=================================================================== dequeue({Capacity, WaitingPids, Queue}) -> case Capacity > 0 of false -> {noreply, {Capacity, WaitingPids, Queue}}; true -> case queue:out(Queue) of {empty, Queue} -> prometheus_gauge:set(element(2, process_info(self(), registered_name)), 0), {noreply, {Capacity, WaitingPids, Queue}}; {{value, {FromPid, FromRef}}, NewQueue} -> monitor(process, FromPid), gen_server:reply({FromPid, FromRef}, ok), prometheus_gauge:dec(element(2, process_info(self(), registered_name))), {noreply, {Capacity - 1, WaitingPids#{ FromPid => {} }, NewQueue}} end end. ================================================ FILE: apps/arweave/src/ar_serialize.erl ================================================ %%% @doc The module contains the serialization and deserialization utilities for the %%% various protocol entitities - transactions, blocks, proofs, etc -module(ar_serialize). -export([block_to_binary/1, binary_to_block/1, json_struct_to_block/1, block_to_json_struct/1, block_announcement_to_binary/1, binary_to_block_announcement/1, binary_to_block_announcement_response/1, block_announcement_response_to_binary/1, tx_to_binary/1, binary_to_tx/1, poa_map_to_binary/1, binary_to_poa/1, poa_no_chunk_map_to_binary/1, binary_to_no_chunk_map/1, poa_map_to_json_map/1, poa_no_chunk_map_to_json_map/1, json_map_to_poa_map/1, block_index_to_binary/1, binary_to_block_index/1, encode_double_signing_proof/2, json_struct_to_poa/1, poa_to_json_struct/1, tx_to_json_struct/1, json_struct_to_tx/1, json_struct_to_v1_tx/1, etf_to_wallet_chunk_response/1, wallet_list_to_json_struct/3, wallet_to_json_struct/2, json_struct_to_wallet_list/1, block_index_to_json_struct/1, json_struct_to_block_index/1, jsonify/1, dejsonify/1, json_decode/1, json_decode/2, query_to_json_struct/1, json_struct_to_query/1, encode_int/2, encode_bin/2, encode_bin_list/3, signature_type_to_binary/1, binary_to_signature_type/1, reward_history_to_binary/1, binary_to_reward_history/1, block_time_history_to_binary/1, binary_to_block_time_history/1, parse_32b_list/1, nonce_limiter_update_to_binary/2, binary_to_nonce_limiter_update/2, nonce_limiter_update_response_to_binary/1, binary_to_nonce_limiter_update_response/1, partition_to_json_struct/4, candidate_to_json_struct/1, solution_to_json_struct/1, json_map_to_solution/1, json_map_to_candidate/1, encode_packing/2, decode_packing/2, jobs_to_json_struct/1, json_struct_to_jobs/1, partial_solution_response_to_json_struct/1, pool_cm_jobs_to_json_struct/1, json_map_to_pool_cm_jobs/1, footprint_to_json_map/1, json_map_to_footprint/1, data_roots_to_binary/1, binary_to_data_roots/1]). -include("ar.hrl"). -include("ar_consensus.hrl"). -include("ar_vdf.hrl"). -include("ar_mining.hrl"). -include("ar_pool.hrl"). -include_lib("eunit/include/eunit.hrl"). %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Serialize the block. block_to_binary(#block{ indep_hash = H, previous_block = PrevH, timestamp = TS, nonce = Nonce, height = Height, diff = Diff, cumulative_diff = CDiff, last_retarget = LastRetarget, hash = Hash, block_size = BlockSize, weave_size = WeaveSize, reward_addr = Addr, tx_root = TXRoot, wallet_list = WalletList, hash_list_merkle = HashListMerkle, reward_pool = RewardPool, packing_2_5_threshold = Threshold, strict_data_split_threshold = StrictChunkThreshold, usd_to_ar_rate = Rate, scheduled_usd_to_ar_rate = ScheduledRate, poa = #poa{ option = Option, chunk = Chunk, data_path = DataPath, tx_path = TXPath }, tags = Tags, txs = TXs } = B) -> Addr2 = case Addr of unclaimed -> <<>>; _ -> Addr end, {RateDividend, RateDivisor} = case Rate of undefined -> {undefined, undefined}; _ -> Rate end, {ScheduledRateDividend, ScheduledRateDivisor} = case ScheduledRate of undefined -> {undefined, undefined}; _ -> ScheduledRate end, Nonce2 = case B#block.height >= ar_fork:height_2_6() of true -> binary:encode_unsigned(Nonce, big); false -> Nonce end, << H:48/binary, (encode_bin(PrevH, 8))/binary, (encode_int(TS, 8))/binary, (encode_bin(Nonce2, 16))/binary, (encode_int(Height, 8))/binary, (encode_int(Diff, 16))/binary, (encode_int(CDiff, 16))/binary, (encode_int(LastRetarget, 8))/binary, (encode_bin(Hash, 8))/binary, (encode_int(BlockSize, 16))/binary, (encode_int(WeaveSize, 16))/binary, (encode_bin(Addr2, 8))/binary, (encode_bin(TXRoot, 8))/binary, (encode_bin(WalletList, 8))/binary, (encode_bin(HashListMerkle, 8))/binary, (encode_int(RewardPool, 8))/binary, (encode_int(Threshold, 8))/binary, (encode_int(StrictChunkThreshold, 8))/binary, (encode_int(RateDividend, 8))/binary, (encode_int(RateDivisor, 8))/binary, (encode_int(ScheduledRateDividend, 8))/binary, (encode_int(ScheduledRateDivisor, 8))/binary, (encode_int(Option, 8))/binary, (encode_bin(Chunk, 24))/binary, (encode_bin(TXPath, 24))/binary, (encode_bin(DataPath, 24))/binary, (encode_bin_list(Tags, 16, 16))/binary, (encode_transactions(TXs))/binary, (encode_post_2_6_fields(B))/binary >>. %% @doc Deserialize the block. binary_to_block(<< H:48/binary, PrevHSize:8, PrevH:PrevHSize/binary, TSSize:8, TS:(TSSize * 8), NonceSize:16, Nonce:NonceSize/binary, HeightSize:8, Height:(HeightSize * 8), DiffSize:16, Diff:(DiffSize * 8), CDiffSize:16, CDiff:(CDiffSize * 8), LastRetargetSize:8, LastRetarget:(LastRetargetSize * 8), HashSize:8, Hash:HashSize/binary, BlockSizeSize:16, BlockSize:(BlockSizeSize * 8), WeaveSizeSize:16, WeaveSize:(WeaveSizeSize * 8), AddrSize:8, Addr:AddrSize/binary, TXRootSize:8, TXRoot:TXRootSize/binary, % 0 or 32 WalletListSize:8, WalletList:WalletListSize/binary, HashListMerkleSize:8, HashListMerkle:HashListMerkleSize/binary, RewardPoolSize:8, RewardPool:(RewardPoolSize * 8), PackingThresholdSize:8, Threshold:(PackingThresholdSize * 8), StrictChunkThresholdSize:8, StrictChunkThreshold:(StrictChunkThresholdSize * 8), RateDividendSize:8, RateDividend:(RateDividendSize * 8), RateDivisorSize:8, RateDivisor:(RateDivisorSize * 8), SchedRateDividendSize:8, SchedRateDividend:(SchedRateDividendSize * 8), SchedRateDivisorSize:8, SchedRateDivisor:(SchedRateDivisorSize * 8), PoAOptionSize:8, PoAOption:(PoAOptionSize * 8), ChunkSize:24, Chunk:ChunkSize/binary, TXPathSize:24, TXPath:TXPathSize/binary, DataPathSize:24, DataPath:DataPathSize/binary, Rest/binary >>) when NonceSize =< 512 -> Threshold2 = case PackingThresholdSize of 0 -> undefined; _ -> Threshold end, StrictChunkThreshold2 = case StrictChunkThresholdSize of 0 -> undefined; _ -> StrictChunkThreshold end, Rate = case RateDivisorSize of 0 -> undefined; _ -> {RateDividend, RateDivisor} end, ScheduledRate = case SchedRateDivisorSize of 0 -> undefined; _ -> {SchedRateDividend, SchedRateDivisor} end, Addr2 = case {AddrSize, Height >= ar_fork:height_2_6()} of {0, false} -> unclaimed; _ -> Addr end, B = #block{ indep_hash = H, previous_block = PrevH, timestamp = TS, nonce = Nonce, height = Height, diff = Diff, cumulative_diff = CDiff, last_retarget = LastRetarget, hash = Hash, block_size = BlockSize, weave_size = WeaveSize, reward_addr = Addr2, tx_root = TXRoot, wallet_list = WalletList, hash_list_merkle = HashListMerkle, reward_pool = RewardPool, packing_2_5_threshold = Threshold2, strict_data_split_threshold = StrictChunkThreshold2, usd_to_ar_rate = Rate, scheduled_usd_to_ar_rate = ScheduledRate, poa = #poa{ option = PoAOption, chunk = Chunk, data_path = DataPath, tx_path = TXPath }}, parse_block_tags_transactions(Rest, B); binary_to_block(_Bin) -> {error, invalid_block_input}. %% @doc Convert a block record into a JSON struct. block_to_json_struct( #block{ nonce = Nonce, previous_block = PrevHash, timestamp = TimeStamp, last_retarget = LastRetarget, diff = Diff, height = Height, hash = Hash, indep_hash = IndepHash, txs = TXs, tx_root = TXRoot, wallet_list = WalletList, reward_addr = RewardAddr, tags = Tags, reward_pool = RewardPool, weave_size = WeaveSize, block_size = BlockSize, cumulative_diff = CDiff, hash_list_merkle = MR, poa = POA, previous_cumulative_diff = PrevCDiff, merkle_rebase_support_threshold = RebaseThreshold, recall_byte2 = RecallByte2, packing_difficulty = PackingDifficulty, unpacked_chunk_hash = UnpackedChunkHash, unpacked_chunk2_hash = UnpackedChunk2Hash, replica_format = ReplicaFormat } = B) -> {JSONDiff, JSONCDiff} = case Height >= ar_fork:height_1_8() of true -> {integer_to_binary(Diff), integer_to_binary(CDiff)}; false -> {Diff, CDiff} end, {JSONRewardPool, JSONBlockSize, JSONWeaveSize} = case Height >= ar_fork:height_2_4() of true -> {integer_to_binary(RewardPool), integer_to_binary(BlockSize), integer_to_binary(WeaveSize)}; false -> {RewardPool, BlockSize, WeaveSize} end, Tags2 = case Height >= ar_fork:height_2_5() of true -> [ar_util:encode(Tag) || Tag <- Tags]; false -> Tags end, Nonce2 = case B#block.height >= ar_fork:height_2_6() of true -> binary:encode_unsigned(Nonce); false -> Nonce end, JSONElements = [{nonce, ar_util:encode(Nonce2)}, {previous_block, ar_util:encode(PrevHash)}, {timestamp, TimeStamp}, {last_retarget, LastRetarget}, {diff, JSONDiff}, {height, Height}, {hash, ar_util:encode(Hash)}, {indep_hash, ar_util:encode(IndepHash)}, {txs, lists:map( fun(TXID) when is_binary(TXID) -> ar_util:encode(TXID); (TX) -> ar_util:encode(TX#tx.id) end, TXs ) }, {tx_root, ar_util:encode(TXRoot)}, {tx_tree, []}, {wallet_list, ar_util:encode(WalletList)}, {reward_addr, case RewardAddr of unclaimed -> list_to_binary("unclaimed"); _ -> ar_util:encode(RewardAddr) end}, {tags, Tags2}, {reward_pool, JSONRewardPool}, {weave_size, JSONWeaveSize}, {block_size, JSONBlockSize}, {cumulative_diff, JSONCDiff}, {hash_list_merkle, ar_util:encode(MR)}, {poa, poa_to_json_struct(POA)}], JSONElements2 = case Height < ar_fork:height_1_6() of true -> KeysToDelete = [cumulative_diff, hash_list_merkle], delete_keys(KeysToDelete, JSONElements); false -> JSONElements end, JSONElements3 = case Height >= ar_fork:height_2_4() of true -> delete_keys([tx_tree], JSONElements2); false -> JSONElements2 end, JSONElements4 = case Height >= ar_fork:height_2_5() of true -> {RateDividend, RateDivisor} = B#block.usd_to_ar_rate, {ScheduledRateDividend, ScheduledRateDivisor} = B#block.scheduled_usd_to_ar_rate, [ {usd_to_ar_rate, [integer_to_binary(RateDividend), integer_to_binary(RateDivisor)]}, {scheduled_usd_to_ar_rate, [integer_to_binary(ScheduledRateDividend), integer_to_binary(ScheduledRateDivisor)]}, {packing_2_5_threshold, integer_to_binary(B#block.packing_2_5_threshold)}, {strict_data_split_threshold, integer_to_binary(B#block.strict_data_split_threshold)} | JSONElements3 ]; false -> JSONElements3 end, JSONElements5 = case Height >= ar_fork:height_2_6() of true -> PricePerGiBMinute = B#block.price_per_gib_minute, ScheduledPricePerGiBMinute = B#block.scheduled_price_per_gib_minute, DebtSupply = B#block.debt_supply, KryderPlusRateMultiplier = B#block.kryder_plus_rate_multiplier, KryderPlusRateMultiplierLatch = B#block.kryder_plus_rate_multiplier_latch, Denomination = B#block.denomination, RedenominationHeight = B#block.redenomination_height, DoubleSigningProof = case B#block.double_signing_proof of undefined -> {[]}; {Key, Sig1, CDiff1, PrevCDiff1, Preimage1, Sig2, CDiff2, PrevCDiff2, Preimage2} -> {[{pub_key, ar_util:encode(Key)}, {sig1, ar_util:encode(Sig1)}, {cdiff1, integer_to_binary(CDiff1)}, {prev_cdiff1, integer_to_binary(PrevCDiff1)}, {preimage1, ar_util:encode(Preimage1)}, {sig2, ar_util:encode(Sig2)}, {cdiff2, integer_to_binary(CDiff2)}, {prev_cdiff2, integer_to_binary(PrevCDiff2)}, {preimage2, ar_util:encode(Preimage2)}]} end, JSONElements6 = [{hash_preimage, ar_util:encode(B#block.hash_preimage)}, {recall_byte, integer_to_binary(B#block.recall_byte)}, {reward, integer_to_binary(B#block.reward)}, {previous_solution_hash, ar_util:encode(B#block.previous_solution_hash)}, {partition_number, B#block.partition_number}, {nonce_limiter_info, nonce_limiter_info_to_json_struct( B#block.height, B#block.nonce_limiter_info)}, {poa2, poa_to_json_struct(B#block.poa2)}, {signature, ar_util:encode(B#block.signature)}, {reward_key, ar_util:encode(element(2, B#block.reward_key))}, {price_per_gib_minute, integer_to_binary(PricePerGiBMinute)}, {scheduled_price_per_gib_minute, integer_to_binary(ScheduledPricePerGiBMinute)}, {reward_history_hash, ar_util:encode(B#block.reward_history_hash)}, {debt_supply, integer_to_binary(DebtSupply)}, {kryder_plus_rate_multiplier, integer_to_binary(KryderPlusRateMultiplier)}, {kryder_plus_rate_multiplier_latch, integer_to_binary(KryderPlusRateMultiplierLatch)}, {denomination, integer_to_binary(Denomination)}, {redenomination_height, RedenominationHeight}, {double_signing_proof, DoubleSigningProof}, {previous_cumulative_diff, integer_to_binary(PrevCDiff)} | JSONElements4], case RecallByte2 of undefined -> JSONElements6; _ -> [{recall_byte2, integer_to_binary(RecallByte2)} | JSONElements6] end; false -> JSONElements4 end, JSONElements8 = case Height >= ar_fork:height_2_7() of true -> JSONElements7 = [ {merkle_rebase_support_threshold, integer_to_binary(RebaseThreshold)}, {chunk_hash, ar_util:encode(B#block.chunk_hash)}, {block_time_history_hash, ar_util:encode(B#block.block_time_history_hash)} | JSONElements5], case B#block.chunk2_hash of undefined -> JSONElements7; _ -> [{chunk2_hash, ar_util:encode(B#block.chunk2_hash)} | JSONElements7] end; false -> JSONElements5 end, JSONElements9 = case Height >= ar_fork:height_2_8() of false -> JSONElements8; true -> case {PackingDifficulty >= 1, RecallByte2} of {false, _} -> [{packing_difficulty, PackingDifficulty} | JSONElements8]; {true, undefined} -> [{packing_difficulty, PackingDifficulty}, {unpacked_chunk_hash, ar_util:encode(UnpackedChunkHash)} | JSONElements8]; _ -> [{packing_difficulty, PackingDifficulty}, {unpacked_chunk_hash, ar_util:encode(UnpackedChunkHash)}, {unpacked_chunk2_hash, ar_util:encode(UnpackedChunk2Hash)} | JSONElements8] end end, JSONElements10 = case Height >= ar_fork:height_2_9() of false -> JSONElements9; true -> [{replica_format, ReplicaFormat} | JSONElements9] end, {JSONElements10}. reward_history_to_binary(RewardHistory) -> reward_history_to_binary(RewardHistory, []). reward_history_to_binary([], IOList) -> iolist_to_binary(IOList); reward_history_to_binary([{Addr, HashRate, Reward, Denomination} | RewardHistory], IOList) -> reward_history_to_binary(RewardHistory, [Addr, ar_serialize:encode_int(HashRate, 8), ar_serialize:encode_int(Reward, 8), << Denomination:24 >> | IOList]). binary_to_reward_history(Bin) -> binary_to_reward_history(Bin, []). binary_to_reward_history(<< Addr:32/binary, HashRateSize:8, HashRate:(HashRateSize * 8), RewardSize:8, Reward:(RewardSize * 8), Denomination:24, Rest/binary >>, RewardHistory) -> binary_to_reward_history(Rest, [{Addr, HashRate, Reward, Denomination} | RewardHistory]); binary_to_reward_history(<<>>, RewardHistory) -> {ok, RewardHistory}; binary_to_reward_history(_Rest, _RewardHistory) -> {error, invalid_reward_history}. block_time_history_to_binary(BlockTimeHistory) -> block_time_history_to_binary(BlockTimeHistory, []). block_time_history_to_binary([], IOList) -> iolist_to_binary(IOList); block_time_history_to_binary([{BlockInterval, VDFInterval, ChunkCount} | BlockTimeHistory], IOList) -> block_time_history_to_binary(BlockTimeHistory, [ ar_serialize:encode_int(BlockInterval, 8), ar_serialize:encode_int(VDFInterval, 8), ar_serialize:encode_int(ChunkCount, 8) | IOList]). binary_to_block_time_history(Bin) -> binary_to_block_time_history(Bin, []). binary_to_block_time_history(<< BlockIntervalSize:8, BlockInterval:(BlockIntervalSize * 8), VDFIntervalSize:8, VDFInterval:(VDFIntervalSize * 8), ChunkCountSize:8, ChunkCount:(ChunkCountSize * 8), Rest/binary >>, BlockTimeHistory) -> binary_to_block_time_history(Rest, [{BlockInterval, VDFInterval, ChunkCount} | BlockTimeHistory]); binary_to_block_time_history(<<>>, BlockTimeHistory) -> {ok, BlockTimeHistory}; binary_to_block_time_history(_Rest, _BlockTimeHistory) -> {error, invalid_block_time_history}. %% Note: the #nonce_limiter_update and #vdf_session records are only serialized for communication %% between a VDF server and VDF client. Only fields that are required for this communication are %% serialized. %% %% For example, the vdf_difficulty and next_vdf_difficulty fields are omitted as they are only used %% by nodes that compute their own VDF and never need to be shared from VDF server to VDF client. nonce_limiter_update_to_binary(2 = _Format, #nonce_limiter_update{ session_key = {NextSeed, Interval, NextVDFDifficulty}, session = Session, is_partial = IsPartial }) -> #vdf_session{ step_number = StepNumber, step_checkpoints_map = Map } = Session, Checkpoints = maps:get(StepNumber, Map, []), IsPartialBin = case IsPartial of true -> << 1:8 >>; _ -> << 0:8 >> end, CheckpointLen = length(Checkpoints), << NextSeed:48/binary, (ar_serialize:encode_int(NextVDFDifficulty, 8))/binary, Interval:64, IsPartialBin/binary, CheckpointLen:16, (iolist_to_binary(Checkpoints))/binary, (encode_vdf_session(2, Session))/binary >>; nonce_limiter_update_to_binary(3 = _Format, #nonce_limiter_update{ session_key = {NextSeed, Interval, NextVDFDifficulty}, session = Session, is_partial = IsPartial }) -> #vdf_session{ step_checkpoints_map = Map } = Session, CheckpointsMapBin = encode_step_checkpoints_map(Map), CheckpointsMapSize = byte_size(CheckpointsMapBin), IsPartialBin = case IsPartial of true -> << 1:8 >>; _ -> << 0:8 >> end, << NextSeed:48/binary, (ar_serialize:encode_int(NextVDFDifficulty, 8))/binary, Interval:64, IsPartialBin/binary, CheckpointsMapSize:24, CheckpointsMapBin:CheckpointsMapSize/binary, (encode_vdf_session(2, Session))/binary >>; nonce_limiter_update_to_binary(4 = _Format, #nonce_limiter_update{ session_key = {NextSeed, Interval, NextVDFDifficulty}, session = Session, is_partial = IsPartial }) -> #vdf_session{ step_checkpoints_map = Map } = Session, CheckpointsMapBin = encode_step_checkpoints_map(Map), CheckpointsMapSize = byte_size(CheckpointsMapBin), IsPartialBin = case IsPartial of true -> << 1:8 >>; _ -> << 0:8 >> end, << NextSeed:48/binary, (ar_serialize:encode_int(NextVDFDifficulty, 8))/binary, Interval:64, IsPartialBin/binary, CheckpointsMapSize:24, CheckpointsMapBin:CheckpointsMapSize/binary, (encode_vdf_session(4, Session))/binary >>. encode_step_checkpoints_map(Map) -> encode_step_checkpoints_map(maps:keys(Map), Map, <<>>). encode_step_checkpoints_map([], _Map, Bin) -> Bin; encode_step_checkpoints_map([Key | Keys], Map, Bin) -> Checkpoints = maps:get(Key, Map), CheckpointLen = length(Checkpoints), encode_step_checkpoints_map(Keys, Map, << Key:64, CheckpointLen:16, (iolist_to_binary(Checkpoints))/binary, Bin/binary >>). encode_vdf_session(2 = _Format, #vdf_session{ step_number = StepNumber, seed = Seed, steps = Steps, prev_session_key = PrevSessionKey, upper_bound = UpperBound, next_upper_bound = NextUpperBound }) -> StepsLen = length(Steps), << StepNumber:64, Seed:48/binary, (encode_int(UpperBound, 8))/binary, (encode_int(NextUpperBound, 8))/binary, StepsLen:16, (iolist_to_binary(Steps))/binary, (encode_session_key(2, PrevSessionKey))/binary >>; encode_vdf_session(4 = _Format, #vdf_session{ step_number = StepNumber, seed = Seed, steps = Steps, prev_session_key = PrevSessionKey, upper_bound = UpperBound, next_upper_bound = NextUpperBound, vdf_difficulty = VDFDifficulty }) -> StepsLen = length(Steps), << StepNumber:64, Seed:48/binary, (encode_int(UpperBound, 8))/binary, (encode_int(NextUpperBound, 8))/binary, StepsLen:16, (iolist_to_binary(Steps))/binary, (encode_int(VDFDifficulty, 8))/binary, (encode_session_key(2, PrevSessionKey))/binary >>. encode_session_key(undefined) -> <<>>; encode_session_key({NextSeed, Interval, NextDifficulty}) -> << NextSeed:48/binary, (ar_serialize:encode_int(NextDifficulty, 8))/binary, Interval:64 >>. encode_session_key(2 = _Format, SessionKey) -> encode_session_key(SessionKey). decode_session_key(<<>>) -> undefined; decode_session_key(<< NextSeed:48/binary, NextVDFDifficultySize:8, NextVDFDifficulty:(NextVDFDifficultySize * 8), Interval:64 >>) -> {NextSeed, Interval, NextVDFDifficulty}; decode_session_key(_) -> error. binary_to_nonce_limiter_update(2, % Format << NextSeed:48/binary, NextVDFDifficultySize:8, NextVDFDifficulty:(NextVDFDifficultySize * 8), Interval:64, IsPartial:8, CheckpointLen:16, Checkpoints:(CheckpointLen * 32)/binary, StepNumber:64, Seed:48/binary, UpperBoundSize:8, UpperBound:(UpperBoundSize * 8), NextUpperBoundSize:8, NextUpperBound:(NextUpperBoundSize * 8), StepsLen:16, Steps:(StepsLen * 32)/binary, PrevSessionKeyBin/binary >>) when UpperBoundSize > 0, StepsLen > 0, CheckpointLen == ?VDF_CHECKPOINT_COUNT_IN_STEP -> NextUpperBound2 = case NextUpperBoundSize of 0 -> undefined; _ -> NextUpperBound end, Update = #nonce_limiter_update{ session_key = {NextSeed, Interval, NextVDFDifficulty}, is_partial = case IsPartial of 0 -> false; _ -> true end, session = Session = #vdf_session{ step_number = StepNumber, seed = Seed, step_checkpoints_map = #{ StepNumber => parse_32b_list(Checkpoints) }, upper_bound = UpperBound, next_upper_bound = NextUpperBound2, steps = parse_32b_list(Steps) } }, case decode_session_key(PrevSessionKeyBin) of undefined -> {ok, Update}; error -> {error, invalid1}; SessionKey -> Session2 = Session#vdf_session{ prev_session_key = SessionKey }, {ok, Update#nonce_limiter_update{ session = Session2 }} end; binary_to_nonce_limiter_update(2, _Bin) -> {error, invalid2}; binary_to_nonce_limiter_update(3, % Format = 3. << NextSeed:48/binary, NextVDFDifficultySize:8, NextVDFDifficulty:(NextVDFDifficultySize * 8), Interval:64, IsPartial:8, CheckpointsMapSize:24, CheckpointsMapBin:CheckpointsMapSize/binary, StepNumber:64, Seed:48/binary, UpperBoundSize:8, UpperBound:(UpperBoundSize * 8), NextUpperBoundSize:8, NextUpperBound:(NextUpperBoundSize * 8), StepsLen:16, Steps:(StepsLen * 32)/binary, PrevSessionKeyBin/binary >>) when UpperBoundSize > 0, StepsLen > 0 -> NextUpperBound2 = case NextUpperBoundSize of 0 -> undefined; _ -> NextUpperBound end, case decode_step_checkpoints_map(CheckpointsMapBin, #{}) of {error, _} = Error -> Error; {ok, StepCheckpointsMap} -> Update = #nonce_limiter_update{ session_key = {NextSeed, Interval, NextVDFDifficulty}, is_partial = case IsPartial of 0 -> false; _ -> true end, session = Session = #vdf_session{ step_number = StepNumber, seed = Seed, upper_bound = UpperBound, next_upper_bound = NextUpperBound2, steps = parse_32b_list(Steps), step_checkpoints_map = StepCheckpointsMap } }, case decode_session_key(PrevSessionKeyBin) of undefined -> {ok, Update}; error -> {error, invalid1}; PrevSessionKey -> Session2 = Session#vdf_session{ prev_session_key = PrevSessionKey }, {ok, Update#nonce_limiter_update{ session = Session2 }} end end; binary_to_nonce_limiter_update(3, _Bin) -> {error, invalid2}; binary_to_nonce_limiter_update(4, % Format = 4. << NextSeed:48/binary, NextVDFDifficultySize:8, NextVDFDifficulty:(NextVDFDifficultySize * 8), Interval:64, IsPartial:8, CheckpointsMapSize:24, CheckpointsMapBin:CheckpointsMapSize/binary, StepNumber:64, Seed:48/binary, UpperBoundSize:8, UpperBound:(UpperBoundSize * 8), NextUpperBoundSize:8, NextUpperBound:(NextUpperBoundSize * 8), StepsLen:16, Steps:(StepsLen * 32)/binary, VDFDifficultySize:8, VDFDifficulty:(VDFDifficultySize * 8), PrevSessionKeyBin/binary >>) when UpperBoundSize > 0, StepsLen > 0 -> NextUpperBound2 = case NextUpperBoundSize of 0 -> undefined; _ -> NextUpperBound end, case decode_step_checkpoints_map(CheckpointsMapBin, #{}) of {error, _} = Error -> Error; {ok, StepCheckpointsMap} -> Update = #nonce_limiter_update{ session_key = {NextSeed, Interval, NextVDFDifficulty}, is_partial = case IsPartial of 0 -> false; _ -> true end, session = Session = #vdf_session{ step_number = StepNumber, seed = Seed, upper_bound = UpperBound, next_upper_bound = NextUpperBound2, vdf_difficulty = VDFDifficulty, next_vdf_difficulty = NextVDFDifficulty, steps = parse_32b_list(Steps), step_checkpoints_map = StepCheckpointsMap } }, case decode_session_key(PrevSessionKeyBin) of undefined -> {ok, Update}; error -> {error, invalid1}; PrevSessionKey -> Session2 = Session#vdf_session{ prev_session_key = PrevSessionKey }, {ok, Update#nonce_limiter_update{ session = Session2 }} end end; binary_to_nonce_limiter_update(4, _Bin) -> {error, invalid2}; binary_to_nonce_limiter_update(_, _Bin) -> {error, invalid_format}. decode_step_checkpoints_map(<<>>, Map) -> {ok, Map}; decode_step_checkpoints_map(<< StepNumber:64, CheckpointLen:16, Checkpoints:(CheckpointLen * 32)/binary, Rest/binary >>, Map) when CheckpointLen == ?VDF_CHECKPOINT_COUNT_IN_STEP -> decode_step_checkpoints_map(Rest, maps:put(StepNumber, parse_32b_list(Checkpoints), Map)); decode_step_checkpoints_map(_Bin, _Map) -> {error, invalid_checkpoints_map}. parse_32b_list(<<>>) -> []; parse_32b_list(<< El:32/binary, Rest/binary >>) -> [El | parse_32b_list(Rest)]. nonce_limiter_update_response_to_binary(#nonce_limiter_update_response{ session_found = SessionFound, step_number = StepNumber, postpone = Postpone, format = Format }) -> SessionFoundBin = case SessionFound of false -> << 0:8 >>; _ -> << 1:8 >> end, << SessionFoundBin/binary, (encode_int(StepNumber, 8))/binary, Postpone:8, Format:8 >>. binary_to_nonce_limiter_update_response(<< SessionFoundBin:8, StepNumberSize:8, StepNumber:(StepNumberSize * 8) >>) -> binary_to_nonce_limiter_update_response( SessionFoundBin, StepNumberSize, StepNumber, 0, 1); binary_to_nonce_limiter_update_response(<< SessionFoundBin:8, StepNumberSize:8, StepNumber:(StepNumberSize * 8), Postpone:8 >>) -> binary_to_nonce_limiter_update_response( SessionFoundBin, StepNumberSize, StepNumber, Postpone, 1); binary_to_nonce_limiter_update_response(<< SessionFoundBin:8, StepNumberSize:8, StepNumber:(StepNumberSize * 8), Postpone:8, Format:8 >>) -> binary_to_nonce_limiter_update_response( SessionFoundBin, StepNumberSize, StepNumber, Postpone, Format); binary_to_nonce_limiter_update_response(_Bin) -> {error, invalid2}. binary_to_nonce_limiter_update_response( SessionFoundBin, StepNumberSize, StepNumber, Postpone, Format) when SessionFoundBin == 0; SessionFoundBin == 1 -> SessionFound = case SessionFoundBin of 0 -> false; 1 -> true end, StepNumber2 = case StepNumberSize of 0 -> undefined; _ -> StepNumber end, {ok, #nonce_limiter_update_response{ session_found = SessionFound, step_number = StepNumber2, postpone = Postpone, format = Format }}; binary_to_nonce_limiter_update_response( _SessionFoundBin, _StepNumberSize, _StepNumber, _Postpone, _Format) -> {error, invalid1}. encode_double_signing_proof(undefined, _Height) -> << 0:8 >>; encode_double_signing_proof(Proof, Height) -> {Key, Sig1, CDiff1, PrevCDiff1, Preimage1, Sig2, CDiff2, PrevCDiff2, Preimage2} = Proof, case Height >= ar_fork:height_2_9() of false -> << 1:8, Key:512/binary, Sig1:512/binary, (ar_serialize:encode_int(CDiff1, 16))/binary, (ar_serialize:encode_int(PrevCDiff1, 16))/binary, Preimage1:64/binary, Sig2:512/binary, (ar_serialize:encode_int(CDiff2, 16))/binary, (ar_serialize:encode_int(PrevCDiff2, 16))/binary, Preimage2:64/binary >>; true -> << 1:8, (ar_serialize:encode_bin(Key, 16))/binary, (ar_serialize:encode_bin(Sig1, 16))/binary, (ar_serialize:encode_int(CDiff1, 16))/binary, (ar_serialize:encode_int(PrevCDiff1, 16))/binary, Preimage1:64/binary, (ar_serialize:encode_bin(Sig2, 16))/binary, (ar_serialize:encode_int(CDiff2, 16))/binary, (ar_serialize:encode_int(PrevCDiff2, 16))/binary, Preimage2:64/binary >> end. %%%=================================================================== %%% Private functions. %%%=================================================================== encode_post_2_6_fields(#block{ height = Height, hash_preimage = HashPreimage, recall_byte = RecallByte, reward = Reward, previous_solution_hash = PreviousSolutionHash, partition_number = PartitionNumber, signature = Sig, nonce_limiter_info = NonceLimiterInfo, poa2 = #poa{ chunk = Chunk, data_path = DataPath, tx_path = TXPath }, recall_byte2 = RecallByte2, price_per_gib_minute = PricePerGiBMinute, scheduled_price_per_gib_minute = ScheduledPricePerGiBMinute, reward_history_hash = RewardHistoryHash, debt_supply = DebtSupply, kryder_plus_rate_multiplier = KryderPlusRateMultiplier, kryder_plus_rate_multiplier_latch = KryderPlusRateMultiplierLatch, denomination = Denomination, redenomination_height = RedenominationHeight, double_signing_proof = DoubleSigningProof, previous_cumulative_diff = PrevCDiff } = B) -> RewardKey = case B#block.reward_key of undefined -> <<>>; {_Type, Key} -> Key end, case Height >= ar_fork:height_2_6() of false -> <<>>; true -> << (encode_bin(HashPreimage, 8))/binary, (encode_int(RecallByte, 16))/binary, (encode_int(Reward, 8))/binary, (encode_bin(Sig, 16))/binary, (encode_int(RecallByte2, 16))/binary, (encode_bin(PreviousSolutionHash, 8))/binary, PartitionNumber:256, (encode_nonce_limiter_info(NonceLimiterInfo))/binary, (encode_bin(Chunk, 24))/binary, (encode_bin(RewardKey, 16))/binary, (encode_bin(TXPath, 24))/binary, (encode_bin(DataPath, 24))/binary, (encode_int(PricePerGiBMinute, 8))/binary, (encode_int(ScheduledPricePerGiBMinute, 8))/binary, RewardHistoryHash:32/binary, (encode_int(DebtSupply, 8))/binary, KryderPlusRateMultiplier:24, KryderPlusRateMultiplierLatch:8, Denomination:24, (encode_int(RedenominationHeight, 8))/binary, (encode_int(PrevCDiff, 16))/binary, (encode_double_signing_proof(DoubleSigningProof, Height))/binary, (encode_post_2_7_fields(B))/binary >> end. encode_post_2_7_fields(#block{ height = Height, merkle_rebase_support_threshold = Threshold, chunk_hash = ChunkHash, chunk2_hash = Chunk2Hash, block_time_history_hash = BlockTimeHistoryHash, nonce_limiter_info = #nonce_limiter_info{ vdf_difficulty = VDFDifficulty, next_vdf_difficulty = NextVDFDifficulty } } = B) -> case Height >= ar_fork:height_2_7() of true -> << (encode_int(Threshold, 16))/binary, ChunkHash:32/binary, (encode_bin(Chunk2Hash, 8))/binary, BlockTimeHistoryHash:32/binary, (encode_int(VDFDifficulty, 8))/binary, (encode_int(NextVDFDifficulty, 8))/binary, (encode_post_2_8_fields(B))/binary >>; false -> <<>> end. encode_post_2_8_fields(#block{ height = Height, packing_difficulty = PackingDifficulty, unpacked_chunk_hash = UnpackedChunkHash, unpacked_chunk2_hash = UnpackedChunk2Hash, poa = #poa{ unpacked_chunk = UnpackedChunk }, poa2 = #poa{ unpacked_chunk = UnpackedChunk2 }} = B) -> case Height >= ar_fork:height_2_8() of false -> <<>>; true -> << PackingDifficulty:8, (ar_serialize:encode_bin(UnpackedChunkHash, 8))/binary, (ar_serialize:encode_bin(UnpackedChunk2Hash, 8))/binary, (ar_serialize:encode_bin(UnpackedChunk, 24))/binary, (ar_serialize:encode_bin(UnpackedChunk2, 24))/binary, (encode_post_2_9_fields(B))/binary >> end. encode_post_2_9_fields(#block{ height = Height, replica_format = ReplicaFormat }) -> case Height >= ar_fork:height_2_9() of false -> <<>>; true -> << ReplicaFormat:8 >> end. encode_nonce_limiter_info(#nonce_limiter_info{ output = Output, global_step_number = N, seed = Seed, next_seed = NextSeed, partition_upper_bound = PartitionUpperBound, next_partition_upper_bound = NextPartitionUpperBound, prev_output = PrevOutput, last_step_checkpoints = Checkpoints, steps = Steps }) -> CheckpointsLen = length(Checkpoints), StepsLen = length(Steps), << Output:32/binary, N:64, Seed:48/binary, NextSeed:48/binary, (encode_bin(PrevOutput, 8))/binary, PartitionUpperBound:256, NextPartitionUpperBound:256, CheckpointsLen:16, (iolist_to_binary(Checkpoints))/binary, StepsLen:16, (iolist_to_binary(Steps))/binary >>. encode_int(undefined, SizeBits) -> << 0:SizeBits >>; encode_int(N, SizeBits) -> Bin = binary:encode_unsigned(N, big), << (byte_size(Bin)):SizeBits, Bin/binary >>. encode_bin(undefined, SizeBits) -> << 0:SizeBits >>; encode_bin(Bin, SizeBits) -> << (byte_size(Bin)):SizeBits, Bin/binary >>. encode_bin_list(Bins, LenBits, ElemSizeBits) -> encode_bin_list(Bins, [], 0, LenBits, ElemSizeBits). encode_bin_list([], Encoded, N, LenBits, _ElemSizeBits) -> << N:LenBits, (iolist_to_binary(Encoded))/binary >>; encode_bin_list([Bin | Bins], Encoded, N, LenBits, ElemSizeBits) -> Elem = encode_bin(Bin, ElemSizeBits), encode_bin_list(Bins, [Elem | Encoded], N + 1, LenBits, ElemSizeBits). encode_transactions(TXs) -> encode_transactions(TXs, [], 0). encode_transactions([], Encoded, N) -> << N:16, (iolist_to_binary(Encoded))/binary >>; encode_transactions([<< TXID:32/binary >> | TXs], Encoded, N) -> encode_transactions(TXs, [<< 32:24, TXID:32/binary >> | Encoded], N + 1); encode_transactions([TX | TXs], Encoded, N) -> Bin = encode_tx(TX), TXSize = byte_size(Bin), encode_transactions(TXs, [<< TXSize:24, Bin/binary >> | Encoded], N + 1). encode_tx(#tx{ format = Format, id = TXID, last_tx = LastTX, owner = Owner, tags = Tags, target = Target, quantity = Quantity, data = Data, data_size = DataSize, data_root = DataRoot, signature = Signature, reward = Reward, signature_type = SignatureType } = TX) -> Owner2 = case SignatureType of ?ECDSA_KEY_TYPE -> <<>>; _ -> Owner end, << Format:8, TXID:32/binary, (encode_bin(LastTX, 8))/binary, (encode_bin(Owner2, 16))/binary, (encode_bin(Target, 8))/binary, (encode_int(Quantity, 8))/binary, (encode_int(DataSize, 16))/binary, (encode_bin(DataRoot, 8))/binary, (encode_bin(Signature, 16))/binary, (encode_int(Reward, 8))/binary, (encode_bin(Data, 24))/binary, (encode_tx_tags(Tags))/binary, (may_be_encode_tx_denomination(TX))/binary >>. encode_tx_tags(Tags) -> encode_tx_tags(Tags, [], 0). encode_tx_tags([], Encoded, N) -> << N:16, (iolist_to_binary(Encoded))/binary >>; encode_tx_tags([{Name, Value} | Tags], Encoded, N) -> TagNameSize = byte_size(Name), TagValueSize = byte_size(Value), Tag = << TagNameSize:16, TagValueSize:16, Name/binary, Value/binary >>, encode_tx_tags(Tags, [Tag | Encoded], N + 1). may_be_encode_tx_denomination(#tx{ denomination = 0 }) -> <<>>; may_be_encode_tx_denomination(#tx{ denomination = Denomination }) -> << Denomination:24 >>. parse_block_tags_transactions(Bin, B) -> case parse_block_tags(Bin) of {error, Reason} -> {error, Reason}; {ok, Tags, Rest} -> parse_block_transactions(Rest, B#block{ tags = Tags }) end. parse_block_transactions(Bin, B) -> case {parse_block_transactions(Bin), B#block.height < ar_fork:height_2_6()} of {{error, Reason}, _} -> {error, Reason}; {{ok, TXs, <<>>}, true} -> {ok, B#block{ txs = TXs }}; {{ok, TXs, Rest}, false} -> parse_block_post_2_6_fields(B#block{ txs = TXs }, Rest); _ -> {error, invalid_input1} end. parse_block_post_2_6_fields(B, << HashPreimageSize:8, HashPreimage:HashPreimageSize/binary, RecallByteSize:16, RecallByte:(RecallByteSize * 8), RewardSize:8, Reward:(RewardSize * 8), SigSize:16, Sig:SigSize/binary, RecallByte2Size:16, RecallByte2:(RecallByte2Size * 8), PreviousSolutionHashSize:8, PreviousSolutionHash:PreviousSolutionHashSize/binary, PartitionNumber:256, NonceLimiterOutput:32/binary, GlobalStepNumber:64, Seed:48/binary, NextSeed:48/binary, PrevOutputSize:8, PrevOutput:PrevOutputSize/binary, PartitionUpperBound:256, NextPartitionUpperBound:256, LastCheckpointsLen:16, LastCheckpoints:(LastCheckpointsLen * 32)/binary, StepsLen:16, Steps:(StepsLen * 32)/binary, ChunkSize:24, Chunk:ChunkSize/binary, RewardKeySize:16, RewardKey:RewardKeySize/binary, TXPathSize:24, TXPath:TXPathSize/binary, DataPathSize:24, DataPath:DataPathSize/binary, PricePerGiBMinuteSize:8, PricePerGiBMinute:(PricePerGiBMinuteSize * 8), ScheduledPricePerGiBMinuteSize:8, ScheduledPricePerGiBMinute:(ScheduledPricePerGiBMinuteSize * 8), RewardHistoryHash:32/binary, DebtSupplySize:8, DebtSupply:(DebtSupplySize * 8), KryderPlusRateMultiplier:24, KryderPlusRateMultiplierLatch:8, Denomination:24, RedenominationHeightSize:8, RedenominationHeight:(RedenominationHeightSize * 8), PrevCDiffSize:16, PrevCDiff:(PrevCDiffSize * 8), Rest/binary >>) -> %% The only block where recall_byte may be undefined is the genesis block %% of a new weave. RecallByte_2 = case RecallByteSize of 0 -> undefined; _ -> RecallByte end, Height = B#block.height, Nonce = binary:decode_unsigned(B#block.nonce, big), NonceLimiterInfo = #nonce_limiter_info{ output = NonceLimiterOutput, prev_output = PrevOutput, global_step_number = GlobalStepNumber, seed = Seed, next_seed = NextSeed, partition_upper_bound = PartitionUpperBound, next_partition_upper_bound = NextPartitionUpperBound, last_step_checkpoints = parse_checkpoints(LastCheckpoints, Height), steps = parse_checkpoints(Steps, Height) }, RecallByte2_2 = case RecallByte2Size of 0 -> undefined; _ -> RecallByte2 end, SigType = case {RewardKeySize, Height >= ar_fork:height_2_9()} of {?ECDSA_PUB_KEY_SIZE, true} -> ?ECDSA_KEY_TYPE; _ -> ?RSA_KEY_TYPE end, B2 = B#block{ hash_preimage = HashPreimage, recall_byte = RecallByte_2, reward = Reward, nonce = Nonce, recall_byte2 = RecallByte2_2, previous_solution_hash = PreviousSolutionHash, signature = Sig, partition_number = PartitionNumber, reward_key = {SigType, RewardKey}, nonce_limiter_info = NonceLimiterInfo, poa2 = #poa{ chunk = Chunk, data_path = DataPath, tx_path = TXPath }, price_per_gib_minute = PricePerGiBMinute, scheduled_price_per_gib_minute = ScheduledPricePerGiBMinute, reward_history_hash = RewardHistoryHash, debt_supply = DebtSupply, kryder_plus_rate_multiplier = KryderPlusRateMultiplier, kryder_plus_rate_multiplier_latch = KryderPlusRateMultiplierLatch, denomination = Denomination, redenomination_height = RedenominationHeight, previous_cumulative_diff = PrevCDiff }, parse_double_signing_proof(Rest, B2); parse_block_post_2_6_fields(_B, _Rest) -> {error, invalid_input4}. parse_checkpoints(<<>>, 0) -> []; parse_checkpoints(_, 0) -> {error, invalid_checkpoints}; parse_checkpoints(<< Checkpoint:32/binary >>, _Height) -> %% The block must have at least one checkpoint (the last nonce limiter output). [Checkpoint]; parse_checkpoints(<< Checkpoint:32/binary, Rest/binary >>, Height) -> [Checkpoint | parse_checkpoints(Rest, Height)]. parse_block_tags(<< TagsLen:16, Rest/binary >>) when TagsLen =< 2048 -> parse_block_tags(TagsLen, Rest, [], 0); parse_block_tags(_Bin) -> {error, invalid_tags_input}. parse_block_tags(0, Rest, Tags, _TotalSize) -> {ok, Tags, Rest}; parse_block_tags(N, << TagSize:16, Tag:TagSize/binary, Rest/binary >>, Tags, TotalSize) when TotalSize + TagSize =< 2048 -> parse_block_tags(N - 1, << Rest/binary >>, [Tag | Tags], TotalSize + TagSize); parse_block_tags(_N, _Bin, _Tags, _TotalSize) -> {error, invalid_tag_input}. parse_block_transactions(<< Count:16, Rest/binary >>) when Count =< 1000 -> parse_block_transactions(Count, Rest, []); parse_block_transactions(_Bin) -> {error, invalid_transactions_input}. parse_block_transactions(0, Rest, TXs) -> {ok, TXs, Rest}; parse_block_transactions(N, << Size:24, Bin:Size/binary, Rest/binary >>, TXs) when N > 0 -> case parse_tx(Bin) of {error, Reason} -> {error, Reason}; {ok, TX} -> parse_block_transactions(N - 1, Rest, [TX | TXs]) end; parse_block_transactions(_N, _Rest, _TXs) -> {error, invalid_transactions2_input}. parse_double_signing_proof(<< 0:8, Rest/binary >>, B) -> parse_post_2_7_fields(Rest, B); parse_double_signing_proof(Bin, #block{ height = Height } = B) -> case {Bin, Height >= ar_fork:height_2_9()} of {<< 1:8, Key:512/binary, Sig1:512/binary, CDiff1Size:16, CDiff1:(CDiff1Size * 8), PrevCDiff1Size:16, PrevCDiff1:(PrevCDiff1Size * 8), Preimage1:64/binary, Sig2:512/binary, CDiff2Size:16, CDiff2:(CDiff2Size * 8), PrevCDiff2Size:16, PrevCDiff2:(PrevCDiff2Size * 8), Preimage2:64/binary, Rest/binary >>, false} -> Proof = {Key, Sig1, CDiff1, PrevCDiff1, Preimage1, Sig2, CDiff2, PrevCDiff2, Preimage2}, B2 = B#block{ double_signing_proof = Proof }, parse_post_2_7_fields(Rest, B2); {_Bin, false} -> {error, invalid_double_signing_proof_input}; {<< 1:8, KeySize:16, Key:KeySize/binary, Sig1Size:16, Sig1:Sig1Size/binary, CDiff1Size:16, CDiff1:(CDiff1Size * 8), PrevCDiff1Size:16, PrevCDiff1:(PrevCDiff1Size * 8), Preimage1:64/binary, Sig2Size:16, Sig2:Sig2Size/binary, CDiff2Size:16, CDiff2:(CDiff2Size * 8), PrevCDiff2Size:16, PrevCDiff2:(PrevCDiff2Size * 8), Preimage2:64/binary, Rest/binary >>, true} when (KeySize == ?RSA_BLOCK_SIG_SIZE andalso Sig1Size == ?RSA_BLOCK_SIG_SIZE andalso Sig2Size == ?RSA_BLOCK_SIG_SIZE) orelse (KeySize == ?ECDSA_PUB_KEY_SIZE andalso Sig1Size == ?ECDSA_SIG_SIZE andalso Sig2Size == ?ECDSA_SIG_SIZE) -> Proof = {Key, Sig1, CDiff1, PrevCDiff1, Preimage1, Sig2, CDiff2, PrevCDiff2, Preimage2}, B2 = B#block{ double_signing_proof = Proof }, parse_post_2_7_fields(Rest, B2); {_Bin, true} -> {error, invalid_double_signing_proof_input2} end. parse_post_2_7_fields(Rest, #block{ height = Height } = B) -> case {Rest, Height >= ar_fork:height_2_7()} of {<<>>, false} -> {ok, B}; {<< ThresholdSize:16, Threshold:(ThresholdSize*8), ChunkHash:32/binary, Chunk2HashSize:8, Chunk2Hash:Chunk2HashSize/binary, BlockTimeHistoryHash:32/binary, VDFDifficultySize:8, VDFDifficulty:(VDFDifficultySize * 8), NextVDFDifficultySize:8, NextVDFDifficulty:(NextVDFDifficultySize * 8), Rest2/binary >>, true} -> Chunk2Hash2 = case Chunk2HashSize of 0 -> undefined; _ -> Chunk2Hash end, B2 = B#block{ merkle_rebase_support_threshold = Threshold, chunk_hash = ChunkHash, chunk2_hash = Chunk2Hash2, block_time_history_hash = BlockTimeHistoryHash, nonce_limiter_info = (B#block.nonce_limiter_info)#nonce_limiter_info{ vdf_difficulty = VDFDifficulty, next_vdf_difficulty = NextVDFDifficulty } }, parse_post_2_8_fields(Rest2, B2); _ -> {error, invalid_merkle_rebase_support_threshold} end. parse_post_2_8_fields(Rest, #block{ height = Height, poa = PoA, poa2 = PoA2 } = B) -> case {Rest, Height >= ar_fork:height_2_8()} of {<<>>, false} -> {ok, B}; {<< PackingDifficulty:8, UnpackedChunkHashSize:8, UnpackedChunkHash:UnpackedChunkHashSize/binary, UnpackedChunk2HashSize:8, UnpackedChunk2Hash:UnpackedChunk2HashSize/binary, UnpackedChunkSize:24, UnpackedChunk:UnpackedChunkSize/binary, UnpackedChunk2Size:24, UnpackedChunk2:UnpackedChunk2Size/binary, Rest2/binary >>, true} -> UnpackedChunkHash_2 = case UnpackedChunkHash of <<>> -> undefined; _ -> UnpackedChunkHash end, UnpackedChunk2Hash_2 = case UnpackedChunk2Hash of <<>> -> undefined; _ -> UnpackedChunk2Hash end, parse_post_2_9_fields(Rest2, B#block{ packing_difficulty = PackingDifficulty, unpacked_chunk_hash = UnpackedChunkHash_2, unpacked_chunk2_hash = UnpackedChunk2Hash_2, poa = PoA#poa{ unpacked_chunk = UnpackedChunk }, poa2 = PoA2#poa{ unpacked_chunk = UnpackedChunk2 } }); _ -> {error, invalid_packing_difficulty} end. parse_post_2_9_fields(Rest, #block{ height = Height } = B) -> case {Rest, Height >= ar_fork:height_2_9()} of {<<>>, false} -> {ok, B}; {<< ReplicaFormat:8 >>, true} -> {ok, B#block{ replica_format = ReplicaFormat }}; _ -> {error, invalid_replica_format} end. parse_tx(<< TXID:32/binary >>) -> {ok, TXID}; parse_tx(<< Format:8, TXID:32/binary, LastTXSize:8, LastTX:LastTXSize/binary, OwnerSize:16, Owner:OwnerSize/binary, TargetSize:8, Target:TargetSize/binary, QuantitySize:8, Quantity:(QuantitySize * 8), DataSizeSize:16, DataSize:(DataSizeSize * 8), DataRootSize:8, DataRoot:DataRootSize/binary, SignatureSize:16, Signature:SignatureSize/binary, RewardSize:8, Reward:(RewardSize * 8), DataEncodingSize:24, Data:DataEncodingSize/binary, Rest/binary >>) when Format == 1 orelse Format == 2 -> case parse_tx_tags(Rest) of {error, Reason} -> {error, Reason}; {ok, Tags, Rest2} -> SigType = set_sig_type_from_pub_key(Owner, Signature), case parse_tx_denomination(Rest2) of {ok, Denomination} -> DataSize2 = case Format of 1 -> byte_size(Data); _ -> DataSize end, TX = #tx{ format = Format, id = TXID, last_tx = LastTX, owner = Owner, target = Target, quantity = Quantity, data_size = DataSize2, data_root = DataRoot, signature = Signature, reward = Reward, data = Data, tags = Tags, denomination = Denomination, signature_type = SigType }, case SigType of {?ECDSA_SIGN_ALG, secp256k1} -> DataSegment = ar_tx:generate_signature_data_segment(TX), Owner2 = ar_wallet:recover_key(DataSegment, Signature, SigType), {ok, TX#tx{ owner = Owner2, owner_address = ar_wallet:to_address(Owner2, SigType) }}; {?RSA_SIGN_ALG, 65537} -> {ok, TX#tx{ owner_address = ar_wallet:to_address(Owner, SigType) }} end; {error, Reason} -> {error, Reason} end end; parse_tx(_Bin) -> {error, invalid_tx_input}. parse_tx_tags(<< TagsLen:16, Rest/binary >>) when TagsLen =< 2048 -> parse_tx_tags(TagsLen, Rest, []); parse_tx_tags(_Bin) -> {error, invalid_tx_tags_input}. parse_tx_tags(0, Rest, Tags) -> {ok, Tags, Rest}; parse_tx_tags(N, << TagNameSize:16, TagValueSize:16, TagName:TagNameSize/binary, TagValue:TagValueSize/binary, Rest/binary >>, Tags) when N > 0 -> parse_tx_tags(N - 1, Rest, [{TagName, TagValue} | Tags]); parse_tx_tags(_N, _Bin, _Tags) -> {error, invalid_tx_tag_input}. parse_tx_denomination(<<>>) -> {ok, 0}; parse_tx_denomination(<< Denomination:24 >>) when Denomination > 0 -> {ok, Denomination}; parse_tx_denomination(_Rest) -> {error, invalid_denomination}. tx_to_binary(TX) -> Bin = encode_tx(TX), TXSize = byte_size(Bin), << TXSize:24, Bin/binary >>. binary_to_tx(<< Size:24, Bin:Size/binary >>) -> parse_tx(Bin); binary_to_tx(_Rest) -> {error, invalid_input7}. block_announcement_to_binary(#block_announcement{ indep_hash = H, previous_block = PrevH, tx_prefixes = L, recall_byte = O, recall_byte2 = O2, solution_hash = SolutionH }) -> << H:48/binary, PrevH:48/binary, (encode_int(O, 8))/binary, (encode_tx_prefixes(L))/binary, (case O2 of undefined -> <<>>; _ -> encode_int(O2, 8) end)/binary, (encode_solution_hash(SolutionH))/binary >>. encode_tx_prefixes(L) -> << (length(L)):16, (encode_tx_prefixes(L, []))/binary >>. encode_tx_prefixes([], Encoded) -> iolist_to_binary(Encoded); encode_tx_prefixes([Prefix | Prefixes], Encoded) -> encode_tx_prefixes(Prefixes, [<< Prefix:8/binary >> | Encoded]). encode_solution_hash(undefined) -> <<>>; encode_solution_hash(H) -> << H:32/binary >>. binary_to_block_announcement(<< H:48/binary, PrevH:48/binary, RecallByteSize:8, RecallByte:(RecallByteSize * 8), N:16, Rest/binary >>) -> RecallByte2 = case RecallByteSize of 0 -> undefined; _ -> RecallByte end, case parse_tx_prefixes_and_recall_byte2_and_solution_hash(N, Rest) of {error, Reason} -> {error, Reason}; {ok, {Prefixes, RecallByte3, SolutionH}} -> {ok, #block_announcement{ indep_hash = H, previous_block = PrevH, recall_byte = RecallByte2, tx_prefixes = Prefixes, recall_byte2 = RecallByte3, solution_hash = SolutionH }} end; binary_to_block_announcement(_Rest) -> {error, invalid_input}. parse_tx_prefixes_and_recall_byte2_and_solution_hash(N, Bin) -> parse_tx_prefixes_and_recall_byte2_and_solution_hash(N, Bin, []). parse_tx_prefixes_and_recall_byte2_and_solution_hash(0, Rest, Prefixes) -> case Rest of <<>> -> {ok, {Prefixes, undefined, undefined}}; << RecallByte2Size:8, RecallByte2:(RecallByte2Size * 8), SolutionH:32/binary >> -> {ok, {Prefixes, RecallByte2, SolutionH}}; << SolutionH:32/binary >> -> {ok, {Prefixes, undefined, SolutionH}}; _ -> {error, invalid_recall_byte2_and_solution_hash_input} end; parse_tx_prefixes_and_recall_byte2_and_solution_hash(N, << Prefix:8/binary, Rest/binary >>, Prefixes) when N > 0 -> parse_tx_prefixes_and_recall_byte2_and_solution_hash(N - 1, Rest, [Prefix | Prefixes]); parse_tx_prefixes_and_recall_byte2_and_solution_hash(_N, _Rest, _Prefixes) -> {error, invalid_tx_prefixes_input}. binary_to_block_announcement_response(<< ChunkMissing:8, Rest/binary >>) when ChunkMissing == 1 orelse ChunkMissing == 0 -> case parse_missing_tx_indices_and_missing_chunk2(Rest) of {ok, {Indices, MissingChunk2}} -> {ok, #block_announcement_response{ missing_chunk = ar_util:int_to_bool(ChunkMissing), missing_tx_indices = Indices, missing_chunk2 = MissingChunk2 }}; {error, Reason} -> {error, Reason} end; binary_to_block_announcement_response(_Rest) -> {error, invalid_block_announcement_response_input}. parse_missing_tx_indices_and_missing_chunk2(Bin) -> parse_missing_tx_indices_and_missing_chunk2(Bin, []). parse_missing_tx_indices_and_missing_chunk2(<<>>, Indices) -> {ok, {Indices, undefined}}; parse_missing_tx_indices_and_missing_chunk2(<< MissingChunk2:8 >>, Indices) -> case MissingChunk2 of 0 -> {ok, {Indices, false}}; 1 -> {ok, {Indices, true}}; _ -> {error, invalid_missing_chunk2_input} end; parse_missing_tx_indices_and_missing_chunk2(<< Index:16, Rest/binary >>, Indices) -> parse_missing_tx_indices_and_missing_chunk2(Rest, [Index | Indices]); parse_missing_tx_indices_and_missing_chunk2(_Rest, _Indices) -> {error, invalid_missing_tx_indices_input}. block_announcement_response_to_binary(#block_announcement_response{ missing_tx_indices = L, missing_chunk = Reply, missing_chunk2 = Reply2 }) -> << (ar_util:bool_to_int(Reply)):8, (encode_missing_tx_indices(L))/binary, (case Reply2 of undefined -> <<>>; false -> << 0:8 >>; true -> << 1:8 >> end)/binary >>. encode_missing_tx_indices(L) -> encode_missing_tx_indices(L, []). encode_missing_tx_indices([], Encoded) -> iolist_to_binary(Encoded); encode_missing_tx_indices([Index | Indices], Encoded) -> encode_missing_tx_indices(Indices, [<< Index:16 >> | Encoded]). poa_map_to_binary(#{ chunk := Chunk, tx_path := TXPath, data_path := DataPath, packing := Packing }) -> BinaryPacking = packing_to_binary(Packing), << (encode_bin(Chunk, 24))/binary, (encode_bin(TXPath, 24))/binary, (encode_bin(DataPath, 24))/binary, (encode_bin(BinaryPacking, 8))/binary >>. poa_no_chunk_map_to_binary(#{ tx_path := TXPath, data_path := DataPath }) -> << (encode_bin(TXPath, 24))/binary, (encode_bin(DataPath, 24))/binary >>. binary_to_poa(<< ChunkSize:24, Chunk:ChunkSize/binary, TXPathSize:24, TXPath:TXPathSize/binary, DataPathSize:24, DataPath:DataPathSize/binary, PackingSize:8, PackingBinary:PackingSize/binary >>) -> Packing = binary_to_packing(PackingBinary, error), case Packing of error -> {error, invalid_packing}; _ -> {ok, #{ chunk => Chunk, data_path => DataPath, tx_path => TXPath, packing => Packing }} end; binary_to_poa(_Rest) -> {error, invalid_input}. binary_to_no_chunk_map(<< TXPathSize:24, TXPath:TXPathSize/binary, DataPathSize:24, DataPath:DataPathSize/binary >>) -> {ok, #{ data_path => DataPath, tx_path => TXPath }}; binary_to_no_chunk_map(_Rest) -> {error, invalid_input}. block_index_to_binary(BI) -> block_index_to_binary(BI, []). block_index_to_binary([], Encoded) -> iolist_to_binary(Encoded); block_index_to_binary([{BH, WeaveSize, TXRoot} | BI], Encoded) -> block_index_to_binary(BI, [<< BH:48/binary, (encode_int(WeaveSize, 16))/binary, (encode_bin(TXRoot, 8))/binary >> | Encoded]). binary_to_block_index(Bin) -> binary_to_block_index(Bin, []). binary_to_block_index(<<>>, BI) -> {ok, BI}; binary_to_block_index(<< BH:48/binary, WeaveSizeSize:16, WeaveSize:(WeaveSizeSize * 8), TXRootSize:8, TXRoot:TXRootSize/binary, Rest/binary >>, BI) -> binary_to_block_index(Rest, [{BH, WeaveSize, TXRoot} | BI]); binary_to_block_index(_Rest, _BI) -> {error, invalid_input}. data_roots_to_binary({TXRoot, BlockSize, Entries}) when is_binary(TXRoot) -> EncodedEntries = lists:map( fun({DataRoot, TXSize, TXStartOffset, TXPath}) -> << DataRoot:32/binary, (encode_int(TXSize, 8))/binary, (encode_int(TXStartOffset, 8))/binary, (encode_bin(TXPath, 24))/binary >> end, Entries), << (encode_bin(TXRoot, 8))/binary, (encode_int(BlockSize, 16))/binary, (length(Entries)):32, (iolist_to_binary(EncodedEntries))/binary >>. %% @doc Decode data_roots_to_binary/1 payload. binary_to_data_roots(<< TXRootSize:8, TXRoot:TXRootSize/binary, BlockSizeSize:16, BlockSize:(BlockSizeSize*8), Count:32, Rest/binary >>) when TXRootSize == 0; TXRootSize == 32; Count =< ?BLOCK_TX_COUNT_LIMIT -> case catch binary_to_data_root_entries(Count, Rest, []) of {ok, Entries, <<>>} -> {ok, {TXRoot, BlockSize, lists:reverse(Entries)}}; {ok, _Entries, _Tail} -> {error, invalid_input3}; {'EXIT', _} -> {error, exception}; Error -> Error end; binary_to_data_roots(_Other) -> {error, invalid_input1}. binary_to_data_root_entries(0, Bin, Acc) -> {ok, Acc, Bin}; binary_to_data_root_entries(N, << DataRoot:32/binary, TXSizeSize:8, TXSize:(TXSizeSize*8), TXStartSize:8, TXStartOffset:(TXStartSize*8), TXPathSize:24, TXPath:TXPathSize/binary, Rest/binary >>, Acc) when N > 0 -> binary_to_data_root_entries(N - 1, Rest, [{DataRoot, TXSize, TXStartOffset, TXPath} | Acc]); binary_to_data_root_entries(_N, _Bin, _Acc) -> {error, invalid_input2}. %% @doc Take a JSON struct and produce JSON string. jsonify(JSONStruct) -> iolist_to_binary(jiffy:encode(JSONStruct)). %% @doc Decode JSON string into a JSON struct. %% @deprecated In favor of json_decode/1 dejsonify(JSON) -> case json_decode(JSON) of {ok, V} -> V; {error, Reason} -> throw({error, Reason}) end. json_decode(JSON) -> json_decode(JSON, []). json_decode(JSON, JiffyOpts) -> case catch jiffy:decode(JSON, JiffyOpts) of {'EXIT', {Reason, _Stacktrace}} -> {error, Reason}; DecodedJSON -> {ok, DecodedJSON} end. delete_keys([], Proplist) -> Proplist; delete_keys([Key | Keys], Proplist) -> delete_keys( Keys, lists:keydelete(Key, 1, Proplist) ). %% @doc Convert parsed JSON blocks fields from a HTTP request into a block. json_struct_to_block(JSONBlock) when is_binary(JSONBlock) -> json_struct_to_block(dejsonify(JSONBlock)); json_struct_to_block({BlockStruct}) -> Height = find_value(<<"height">>, BlockStruct), true = is_integer(Height) andalso Height < ar_fork:height_2_6(), Fork_2_5 = ar_fork:height_2_5(), TXIDs = find_value(<<"txs">>, BlockStruct), WalletList = find_value(<<"wallet_list">>, BlockStruct), HashList = find_value(<<"hash_list">>, BlockStruct), TagsValue = find_value(<<"tags">>, BlockStruct), Tags = case Height >= Fork_2_5 of true -> [ar_util:decode(Tag) || Tag <- TagsValue]; false -> true = (byte_size(list_to_binary(TagsValue)) =< 2048), TagsValue end, Fork_1_8 = ar_fork:height_1_8(), Fork_1_6 = ar_fork:height_1_6(), CDiff = case find_value(<<"cumulative_diff">>, BlockStruct) of _ when Height < Fork_1_6 -> 0; undefined -> 0; % In case it's an invalid block (in the pre-fork format). BinaryCDiff when Height >= Fork_1_8 -> binary_to_integer(BinaryCDiff); CD -> CD end, Diff = case find_value(<<"diff">>, BlockStruct) of BinaryDiff when Height >= Fork_1_8 -> binary_to_integer(BinaryDiff); D -> D end, MR = case find_value(<<"hash_list_merkle">>, BlockStruct) of _ when Height < Fork_1_6 -> <<>>; undefined -> <<>>; % In case it's an invalid block (in the pre-fork format). R -> ar_util:decode(R) end, RewardAddr = case find_value(<<"reward_addr">>, BlockStruct) of <<"unclaimed">> -> unclaimed; AddrBinary -> AddrBinary end, RewardAddr2 = case RewardAddr of unclaimed -> unclaimed; _ -> ar_wallet:base64_address_with_optional_checksum_to_decoded_address(RewardAddr) end, {RewardPool, BlockSize, WeaveSize} = case Height >= ar_fork:height_2_4() of true -> { binary_to_integer(find_value(<<"reward_pool">>, BlockStruct)), binary_to_integer(find_value(<<"block_size">>, BlockStruct)), binary_to_integer(find_value(<<"weave_size">>, BlockStruct)) }; false -> { find_value(<<"reward_pool">>, BlockStruct), find_value(<<"block_size">>, BlockStruct), find_value(<<"weave_size">>, BlockStruct) } end, {Rate, ScheduledRate, Packing_2_5_Threshold, StrictDataSplitThreshold} = case Height >= Fork_2_5 of true -> [RateDividendBinary, RateDivisorBinary] = find_value(<<"usd_to_ar_rate">>, BlockStruct), [ScheduledRateDividendBinary, ScheduledRateDivisorBinary] = find_value(<<"scheduled_usd_to_ar_rate">>, BlockStruct), {{binary_to_integer(RateDividendBinary), binary_to_integer(RateDivisorBinary)}, {binary_to_integer(ScheduledRateDividendBinary), binary_to_integer(ScheduledRateDivisorBinary)}, binary_to_integer(find_value(<<"packing_2_5_threshold">>, BlockStruct)), binary_to_integer(find_value(<<"strict_data_split_threshold">>, BlockStruct))}; false -> {undefined, undefined, undefined, undefined} end, Timestamp = find_value(<<"timestamp">>, BlockStruct), true = is_integer(Timestamp), LastRetarget = find_value(<<"last_retarget">>, BlockStruct), true = is_integer(LastRetarget), DecodedTXIDs = [ar_util:decode(TXID) || TXID <- TXIDs], [] = [TXID || TXID <- DecodedTXIDs, byte_size(TXID) /= 32], #block{ nonce = ar_util:decode(find_value(<<"nonce">>, BlockStruct)), previous_block = ar_util:decode(find_value(<<"previous_block">>, BlockStruct)), timestamp = Timestamp, last_retarget = LastRetarget, diff = Diff, height = Height, hash = ar_util:decode(find_value(<<"hash">>, BlockStruct)), indep_hash = ar_util:decode(find_value(<<"indep_hash">>, BlockStruct)), txs = DecodedTXIDs, hash_list = case HashList of undefined -> unset; _ -> [ar_util:decode(Hash) || Hash <- HashList] end, wallet_list = ar_util:decode(WalletList), reward_addr = RewardAddr2, tags = Tags, reward_pool = RewardPool, weave_size = WeaveSize, block_size = BlockSize, cumulative_diff = CDiff, hash_list_merkle = MR, tx_root = case find_value(<<"tx_root">>, BlockStruct) of undefined -> <<>>; Root -> ar_util:decode(Root) end, poa = case find_value(<<"poa">>, BlockStruct) of undefined -> #poa{}; POAStruct -> json_struct_to_poa(POAStruct) end, usd_to_ar_rate = Rate, scheduled_usd_to_ar_rate = ScheduledRate, packing_2_5_threshold = Packing_2_5_Threshold, strict_data_split_threshold = StrictDataSplitThreshold }. %% @doc Convert a transaction record into a JSON struct. tx_to_json_struct( #tx{ id = ID, format = Format, last_tx = Last, owner = Owner, tags = Tags, target = Target, quantity = Quantity, data = Data, reward = Reward, signature = Sig, signature_type = SigType, data_size = DataSize, data_root = DataRoot, denomination = Denomination }) -> Owner2 = case SigType of ?ECDSA_KEY_TYPE -> <<>>; _ -> Owner end, Fields = [ {format, case Format of undefined -> 1; _ -> Format end}, {id, ar_util:encode(ID)}, {last_tx, ar_util:encode(Last)}, {owner, ar_util:encode(Owner2)}, {tags, lists:map( fun({Name, Value}) -> { [ {name, ar_util:encode(Name)}, {value, ar_util:encode(Value)} ] } end, Tags ) }, {target, ar_util:encode(Target)}, {quantity, integer_to_binary(Quantity)}, {data, ar_util:encode(Data)}, {data_size, integer_to_binary(DataSize)}, {data_tree, []}, {data_root, ar_util:encode(DataRoot)}, {reward, integer_to_binary(Reward)}, {signature, ar_util:encode(Sig)} ], Fields2 = case Denomination > 0 of true -> Fields ++ [{denomination, integer_to_binary(Denomination)}]; false -> Fields end, {Fields2}. poa_to_json_struct(POA) -> Fields = [ {option, integer_to_binary(POA#poa.option)}, {tx_path, ar_util:encode(POA#poa.tx_path)}, {data_path, ar_util:encode(POA#poa.data_path)}, {chunk, ar_util:encode(POA#poa.chunk)} ], Fields2 = case POA#poa.unpacked_chunk of <<>> -> Fields; UnpackedChunk -> Fields ++ [{unpacked_chunk, ar_util:encode(UnpackedChunk)}] end, {Fields2}. nonce_limiter_info_to_json_struct(Height, #nonce_limiter_info{ output = Output, global_step_number = N, seed = Seed, next_seed = NextSeed, partition_upper_bound = ZoneUpperBound, next_partition_upper_bound = NextZoneUpperBound, last_step_checkpoints = Checkpoints, steps = Steps, prev_output = PrevOutput, vdf_difficulty = VDFDifficulty, next_vdf_difficulty = NextVDFDifficulty }) -> Fields = [{output, ar_util:encode(Output)}, {global_step_number, N}, {seed, ar_util:encode(Seed)}, {next_seed, ar_util:encode(NextSeed)}, {zone_upper_bound, ZoneUpperBound}, {next_zone_upper_bound, NextZoneUpperBound}, {prev_output, ar_util:encode(PrevOutput)}, {last_step_checkpoints, [ar_util:encode(Elem) || Elem <- Checkpoints]}, %% Keeping 'checkpoints' as JSON key (rather than 'steps') for backwards %% compatibility. {checkpoints, [ar_util:encode(Elem) || Elem <- Steps]}], Fields2 = case Height >= ar_fork:height_2_7() of false -> Fields; true -> Fields ++ [{vdf_difficulty, integer_to_binary(VDFDifficulty)}, {next_vdf_difficulty, integer_to_binary(NextVDFDifficulty)}] end, {Fields2}. diff_pair_to_json_list(DiffPair) -> {PoA1Diff, Diff} = DiffPair, [ ar_util:integer_to_binary(PoA1Diff), ar_util:integer_to_binary(Diff) ]. json_struct_to_poa({JSONStruct}) -> UnpackedChunk = case find_value(<<"unpacked_chunk">>, JSONStruct) of undefined -> <<>>; U -> U end, #poa{ option = binary_to_integer(find_value(<<"option">>, JSONStruct)), tx_path = ar_util:decode(find_value(<<"tx_path">>, JSONStruct)), data_path = ar_util:decode(find_value(<<"data_path">>, JSONStruct)), chunk = ar_util:decode(find_value(<<"chunk">>, JSONStruct)), unpacked_chunk = ar_util:decode(UnpackedChunk) }. json_struct_to_poa_from_map(JSONStruct) -> #poa{ option = binary_to_integer(maps:get(<<"option">>, JSONStruct)), tx_path = ar_util:decode(maps:get(<<"tx_path">>, JSONStruct)), data_path = ar_util:decode(maps:get(<<"data_path">>, JSONStruct)), chunk = ar_util:decode(maps:get(<<"chunk">>, JSONStruct)), unpacked_chunk = ar_util:decode(maps:get(<<"unpacked_chunk">>, JSONStruct, <<>>)) }. %% @doc Convert parsed JSON tx fields from a HTTP request into a %% transaction record. json_struct_to_tx(JSONTX) when is_binary(JSONTX) -> json_struct_to_tx(dejsonify(JSONTX)); json_struct_to_tx({TXStruct}) -> json_struct_to_tx(TXStruct, true). json_struct_to_v1_tx(JSONTX) when is_binary(JSONTX) -> {TXStruct} = dejsonify(JSONTX), json_struct_to_tx(TXStruct, false). json_struct_to_tx(TXStruct, ComputeDataSize) -> Tags = case find_value(<<"tags">>, TXStruct) of undefined -> []; Xs -> Xs end, Data = ar_util:decode(find_value(<<"data">>, TXStruct)), Format = case find_value(<<"format">>, TXStruct) of undefined -> 1; N when is_integer(N) -> N; N when is_binary(N) -> binary_to_integer(N) end, Denomination = case find_value(<<"denomination">>, TXStruct) of undefined -> 0; EncodedDenomination -> MaybeDenomination = binary_to_integer(EncodedDenomination), true = MaybeDenomination > 0, MaybeDenomination end, TXID = ar_util:decode(find_value(<<"id">>, TXStruct)), 32 = byte_size(TXID), Owner = ar_util:decode(find_value(<<"owner">>, TXStruct)), Sig = ar_util:decode(find_value(<<"signature">>, TXStruct)), SigType = set_sig_type_from_pub_key(Owner, Sig), TX = #tx{ format = Format, id = TXID, last_tx = ar_util:decode(find_value(<<"last_tx">>, TXStruct)), owner = Owner, tags = [{ar_util:decode(Name), ar_util:decode(Value)} %% Only the elements matching this pattern are included in the list. || {[{<<"name">>, Name}, {<<"value">>, Value}]} <- Tags], target = ar_wallet:base64_address_with_optional_checksum_to_decoded_address( find_value(<<"target">>, TXStruct)), quantity = binary_to_integer(find_value(<<"quantity">>, TXStruct)), data = Data, reward = binary_to_integer(find_value(<<"reward">>, TXStruct)), signature = Sig, signature_type = SigType, data_size = parse_data_size(Format, TXStruct, Data, ComputeDataSize), data_root = case find_value(<<"data_root">>, TXStruct) of undefined -> <<>>; DR -> ar_util:decode(DR) end, denomination = Denomination }, case SigType of ?ECDSA_KEY_TYPE -> DataSegment = ar_tx:generate_signature_data_segment(TX), Owner2 = ar_wallet:recover_key(DataSegment, Sig, SigType), TX#tx{ owner = Owner2, owner_address = ar_wallet:to_address(Owner2, SigType) }; ?RSA_KEY_TYPE -> TX#tx{ owner_address = ar_wallet:to_address(Owner, SigType) } end. set_sig_type_from_pub_key(_Owner, <<>>) -> %% Transactions with the empty signatures are used in some old tests, %% e.g., ar_http_iface_tests.erl. ?RSA_KEY_TYPE; set_sig_type_from_pub_key(Owner, _Sig) -> case Owner of <<>> -> ?ECDSA_KEY_TYPE; _ -> ?RSA_KEY_TYPE end. json_list_to_diff_pair(List) -> [PoA1DiffBin, DiffBin] = case List of undefined -> [<<"0">>, <<"0">>]; _ -> List end, PoA1Diff = ar_util:binary_to_integer(PoA1DiffBin), Diff = ar_util:binary_to_integer(DiffBin), {PoA1Diff, Diff}. parse_data_size(1, _TXStruct, Data, true) -> byte_size(Data); parse_data_size(_Format, TXStruct, _Data, _ComputeDataSize) -> binary_to_integer(find_value(<<"data_size">>, TXStruct)). etf_to_wallet_chunk_response(ETF) -> catch etf_to_wallet_chunk_response_unsafe(ETF). etf_to_wallet_chunk_response_unsafe(ETF) -> #{ next_cursor := NextCursor, wallets := Wallets } = binary_to_term(ETF, [safe]), true = is_binary(NextCursor) orelse NextCursor == last, lists:foreach( fun ({Addr, {Balance, LastTX}}) when is_binary(Addr), is_binary(LastTX), is_integer(Balance), Balance >= 0 -> ok; ({Addr, {Balance, LastTX, Denomination, MiningPermission}}) when is_binary(Addr), is_binary(LastTX), is_integer(Balance), Balance >= 0, is_integer(Denomination), Denomination > 0, is_boolean(MiningPermission) -> ok end, Wallets ), {ok, #{ next_cursor => NextCursor, wallets => Wallets }}. %% @doc Convert a wallet list into a JSON struct. %% The order of the wallets is somewhat weird for historical reasons. If the reward address, %% appears in the list for the first time, it is placed on the first position. Except for that, %% wallets are sorted in the alphabetical order. wallet_list_to_json_struct(RewardAddr, IsRewardAddrNew, WL) -> List = ar_patricia_tree:foldr( fun(Addr, Value, Acc) -> case Addr == RewardAddr andalso IsRewardAddrNew of true -> Acc; false -> [wallet_to_json_struct(Addr, Value) | Acc] end end, [], WL ), case {ar_patricia_tree:get(RewardAddr, WL), IsRewardAddrNew} of {not_found, _} -> List; {_, false} -> List; {Value, true} -> %% Place the reward wallet first, for backwards-compatibility. [wallet_to_json_struct(RewardAddr, Value) | List] end. wallet_to_json_struct(Address, {Balance, LastTX}) -> {[{address, ar_util:encode(Address)}, {balance, list_to_binary(integer_to_list(Balance))}, {last_tx, ar_util:encode(LastTX)}]}; wallet_to_json_struct(Address, {Balance, LastTX, Denomination, MiningPermission}) -> {[{address, ar_util:encode(Address)}, {balance, list_to_binary(integer_to_list(Balance))}, {last_tx, ar_util:encode(LastTX)}, {denomination, Denomination}, {mining_permission, MiningPermission}]}. %% @doc Convert parsed JSON from fields into a valid wallet list. json_struct_to_wallet_list(JSON) when is_binary(JSON) -> json_struct_to_wallet_list(dejsonify(JSON)); json_struct_to_wallet_list(WalletsStruct) -> lists:foldl( fun(WalletStruct, Acc) -> {Address, Value} = json_struct_to_wallet(WalletStruct), ar_patricia_tree:insert(Address, Value, Acc) end, ar_patricia_tree:new(), WalletsStruct ). json_struct_to_wallet({Wallet}) -> Address = ar_util:decode(find_value(<<"address">>, Wallet)), Balance = binary_to_integer(find_value(<<"balance">>, Wallet)), true = Balance >= 0, LastTX = ar_util:decode(find_value(<<"last_tx">>, Wallet)), case find_value(<<"denomination">>, Wallet) of undefined -> {Address, {Balance, LastTX}}; Denomination when is_integer(Denomination), Denomination > 0 -> MiningPermission = find_value(<<"mining_permission">>, Wallet), true = is_boolean(MiningPermission), {Address, {Balance, LastTX, Denomination, MiningPermission}} end. %% @doc Find the value associated with a key in parsed a JSON structure list. find_value(Key, List) -> case lists:keyfind(Key, 1, List) of {Key, Val} -> Val; false -> undefined end. %% @doc Convert an ARQL query into a JSON struct query_to_json_struct({Op, Expr1, Expr2}) -> { [ {op, list_to_binary(atom_to_list(Op))}, {expr1, query_to_json_struct(Expr1)}, {expr2, query_to_json_struct(Expr2)} ] }; query_to_json_struct(Expr) -> Expr. %% @doc Convert parsed JSON from fields into an internal ARQL query. json_struct_to_query(QueryJSON) -> case json_decode(QueryJSON) of {ok, Decoded} -> {ok, do_json_struct_to_query(Decoded)}; {error, _} -> {error, invalid_json} end. do_json_struct_to_query({Query}) -> { list_to_existing_atom(binary_to_list(find_value(<<"op">>, Query))), do_json_struct_to_query(find_value(<<"expr1">>, Query)), do_json_struct_to_query(find_value(<<"expr2">>, Query)) }; do_json_struct_to_query(Query) -> Query. %% @doc Generate a JSON structure representing a block index. block_index_to_json_struct(BI) -> lists:map( fun ({BH, WeaveSize, TXRoot}) -> Keys1 = [{<<"hash">>, ar_util:encode(BH)}], Keys2 = case WeaveSize of not_set -> Keys1; _ -> [{<<"weave_size">>, integer_to_binary(WeaveSize)} | Keys1] end, Keys3 = case TXRoot of not_set -> Keys2; _ -> [{<<"tx_root">>, ar_util:encode(TXRoot)} | Keys2] end, {Keys3}; (BH) -> ar_util:encode(BH) end, BI ). %% @doc Convert a JSON structure into a block index. json_struct_to_block_index(JSONStruct) -> lists:map( fun (Hash) when is_binary(Hash) -> ar_util:decode(Hash); ({JSON}) -> Hash = ar_util:decode(find_value(<<"hash">>, JSON)), WeaveSize = case find_value(<<"weave_size">>, JSON) of undefined -> not_set; WS -> binary_to_integer(WS) end, TXRoot = case find_value(<<"tx_root">>, JSON) of undefined -> not_set; R -> ar_util:decode(R) end, {Hash, WeaveSize, TXRoot} end, JSONStruct ). poa_map_to_json_map(Map) -> #{ chunk := Chunk, tx_path := TXPath, data_path := DataPath, packing := Packing } = Map, BinaryPacking = iolist_to_binary(encode_packing(Packing, true)), Map2 = #{ chunk => ar_util:encode(Chunk), tx_path => ar_util:encode(TXPath), data_path => ar_util:encode(DataPath), packing => BinaryPacking }, Map3 = case maps:get(absolute_end_offset, Map, not_found) of not_found -> Map2; EndOffset -> Map2#{ absolute_end_offset => integer_to_binary(EndOffset) } end, case maps:get(chunk_size, Map, not_found) of not_found -> Map3; ChunkSize -> Map3#{ chunk_size => integer_to_binary(ChunkSize) } end. poa_no_chunk_map_to_json_map(Map) -> #{ tx_path := TXPath, data_path := DataPath } = Map, Map2 = #{ tx_path => ar_util:encode(TXPath), data_path => ar_util:encode(DataPath) }, case maps:get(absolute_end_offset, Map, not_found) of not_found -> Map2; EndOffset -> Map2#{ absolute_end_offset => integer_to_binary(EndOffset) } end. json_map_to_poa_map(JSON) -> Map = #{ data_root => ar_util:decode(maps:get(<<"data_root">>, JSON, <<>>)), chunk => ar_util:decode(maps:get(<<"chunk">>, JSON)), data_path => ar_util:decode(maps:get(<<"data_path">>, JSON)), tx_path => ar_util:decode(maps:get(<<"tx_path">>, JSON, <<>>)), data_size => binary_to_integer(maps:get(<<"data_size">>, JSON, <<"0">>)) }, PackingJSON = maps:get(<<"packing">>, JSON, <<"unpacked">>), Packing = decode_packing(PackingJSON, error), Map2 = case Packing of error -> error({unsupported_packing, PackingJSON}); Packing -> maps:put(packing, Packing, Map) end, case maps:get(<<"offset">>, JSON, none) of none -> Map2; Offset -> Map2#{ offset => binary_to_integer(Offset) } end. signature_type_to_binary(SigType) -> case SigType of {?RSA_SIGN_ALG, 65537} -> <<"PS256_65537">>; {?ECDSA_SIGN_ALG, secp256k1} -> <<"ES256K">>; {?EDDSA_SIGN_ALG, ed25519} -> <<"Ed25519">> end. binary_to_signature_type(List) -> case List of undefined -> {?RSA_SIGN_ALG, 65537}; <<"PS256_65537">> -> {?RSA_SIGN_ALG, 65537}; <<"ES256K">> -> {?ECDSA_SIGN_ALG, secp256k1}; <<"Ed25519">> -> {?EDDSA_SIGN_ALG, ed25519}; %% For backwards-compatibility. _ -> {?RSA_SIGN_ALG, 65537} end. candidate_to_json_struct( #mining_candidate{ cm_diff = DiffPair, cm_h1_list = H1List, h0 = H0, h1 = H1, h2 = H2, mining_address = MiningAddress, nonce = Nonce, next_seed = NextSeed, next_vdf_difficulty = NextVDFDifficulty, nonce_limiter_output = NonceLimiterOutput, partition_number = PartitionNumber, partition_number2 = PartitionNumber2, partition_upper_bound = PartitionUpperBound, poa2 = PoA2, preimage = Preimage, seed = Seed, session_key = SessionKey, start_interval_number = StartIntervalNumber, step_number = StepNumber, label = Label, packing_difficulty = PackingDifficulty, replica_format = ReplicaFormat }) -> JSON = [ {cm_diff, diff_pair_to_json_list(DiffPair)}, {cm_h1_list, h1_list_to_json_struct(H1List)}, {mining_address, ar_util:encode(MiningAddress)}, {h0, ar_util:encode(H0)}, {partition_number, integer_to_binary(PartitionNumber)}, {partition_number2, integer_to_binary(PartitionNumber2)}, {partition_upper_bound, integer_to_binary(PartitionUpperBound)}, {seed, ar_util:encode(Seed)}, {next_seed, ar_util:encode(NextSeed)}, {next_vdf_difficulty, integer_to_binary(NextVDFDifficulty)}, {session_key, session_key_json_struct(SessionKey)}, {start_interval_number, integer_to_binary(StartIntervalNumber)}, {step_number, integer_to_binary(StepNumber)}, {nonce_limiter_output, ar_util:encode(NonceLimiterOutput)}, {label, Label}, {packing_difficulty, PackingDifficulty}, {replica_format, ReplicaFormat} ], JSON2 = encode_if_set(JSON, h1, H1, fun ar_util:encode/1), JSON3 = encode_if_set(JSON2, h2, H2, fun ar_util:encode/1), JSON4 = encode_if_set(JSON3, nonce, Nonce, fun integer_to_binary/1), JSON5 = encode_if_set(JSON4, poa2, PoA2, fun poa_to_json_struct/1), {encode_if_set(JSON5, preimage, Preimage, fun ar_util:encode/1)}. h1_list_to_json_struct(H1List) -> lists:map(fun ({H1, Nonce}) -> {[ {h1, ar_util:encode(H1)}, {nonce, integer_to_binary(Nonce)} ]} end, H1List). session_key_json_struct({NextSeed, Interval, NextDifficulty}) -> {[ {next_seed, ar_util:encode(NextSeed)}, {interval, integer_to_binary(Interval)}, {next_difficulty, integer_to_binary(NextDifficulty)} ]}. json_map_to_candidate(JSON) -> DiffPair = json_list_to_diff_pair(maps:get(<<"cm_diff">>, JSON)), H1List = json_struct_to_h1_list(maps:get(<<"cm_h1_list">>, JSON)), H0 = ar_util:decode(maps:get(<<"h0">>, JSON)), H1 = decode_if_set(JSON, <<"h1">>, fun ar_util:decode/1, not_set), H2 = decode_if_set(JSON, <<"h2">>, fun ar_util:decode/1, not_set), MiningAddress = ar_util:decode(maps:get(<<"mining_address">>, JSON)), NextSeed = ar_util:decode(maps:get(<<"next_seed">>, JSON)), NextVDFDifficulty = binary_to_integer(maps:get(<<"next_vdf_difficulty">>, JSON)), Nonce = decode_if_set(JSON, <<"nonce">>, fun binary_to_integer/1, not_set), NonceLimiterOutput = ar_util:decode(maps:get(<<"nonce_limiter_output">>, JSON)), PartitionNumber = binary_to_integer(maps:get(<<"partition_number">>, JSON)), PartitionNumber2 = binary_to_integer(maps:get(<<"partition_number2">>, JSON)), PartitionUpperBound = binary_to_integer(maps:get(<<"partition_upper_bound">>, JSON)), PoA2 = decode_if_set(JSON, <<"poa2">>, fun json_struct_to_poa_from_map/1, not_set), Preimage = decode_if_set(JSON, <<"preimage">>, fun ar_util:decode/1, not_set), Seed = ar_util:decode(maps:get(<<"seed">>, JSON)), SessionKey = json_struct_to_session_key(maps:get(<<"session_key">>, JSON)), StartIntervalNumber = binary_to_integer(maps:get(<<"start_interval_number">>, JSON)), StepNumber = binary_to_integer(maps:get(<<"step_number">>, JSON)), Label = maps:get(<<"label">>, JSON, <<"not_set">>), PackingDifficulty = maps:get(<<"packing_difficulty">>, JSON, 0), ReplicaFormat = maps:get(<<"replica_format">>, JSON, 0), true = (PackingDifficulty >= 0 andalso PackingDifficulty =< ?MAX_PACKING_DIFFICULTY andalso ReplicaFormat == 0) orelse (ReplicaFormat == 1 andalso PackingDifficulty == ?REPLICA_2_9_PACKING_DIFFICULTY), #mining_candidate{ cm_diff = DiffPair, cm_h1_list = H1List, h0 = H0, h1 = H1, h2 = H2, mining_address = MiningAddress, next_seed = NextSeed, next_vdf_difficulty = NextVDFDifficulty, nonce = Nonce, nonce_limiter_output = NonceLimiterOutput, partition_number = PartitionNumber, partition_number2 = PartitionNumber2, partition_upper_bound = PartitionUpperBound, poa2 = PoA2, preimage = Preimage, seed = Seed, session_key = SessionKey, start_interval_number = StartIntervalNumber, step_number = StepNumber, label = Label, packing_difficulty = PackingDifficulty, replica_format = ReplicaFormat }. json_struct_to_h1_list(JSON) -> lists:map(fun (JSONElement) -> H1 = ar_util:decode(maps:get(<<"h1">>, JSONElement)), Nonce = binary_to_integer(maps:get(<<"nonce">>, JSONElement)), {H1, Nonce} end, JSON). json_struct_to_session_key(JSON) -> { ar_util:decode(maps:get(<<"next_seed">>, JSON)), binary_to_integer(maps:get(<<"interval">>, JSON)), binary_to_integer(maps:get(<<"next_difficulty">>, JSON)) }. solution_to_json_struct( #mining_solution{ last_step_checkpoints = LastStepCheckpoints, mining_address = MiningAddress, next_seed = NextSeed, next_vdf_difficulty = NextVDFDifficulty, nonce = Nonce, nonce_limiter_output = NonceLimiterOutput, partition_number = PartitionNumber, partition_upper_bound = PartitionUpperBound, poa1 = PoA1, poa2 = PoA2, preimage = Preimage, recall_byte1 = RecallByte1, recall_byte2 = RecallByte2, seed = Seed, solution_hash = SolutionHash, start_interval_number = StartIntervalNumber, step_number = StepNumber, steps = Steps, packing_difficulty = PackingDifficulty, replica_format = ReplicaFormat }) -> JSON = [ {last_step_checkpoints, ar_util:encode(iolist_to_binary(LastStepCheckpoints))}, {mining_address, ar_util:encode(MiningAddress)}, {nonce, Nonce}, {nonce_limiter_output, ar_util:encode(NonceLimiterOutput)}, {next_seed, ar_util:encode(NextSeed)}, {next_vdf_difficulty, integer_to_binary(NextVDFDifficulty)}, {partition_number, integer_to_binary(PartitionNumber)}, {partition_upper_bound, integer_to_binary(PartitionUpperBound)}, {poa1, poa_to_json_struct(PoA1)}, {poa2, poa_to_json_struct(PoA2)}, {preimage, ar_util:encode(Preimage)}, {recall_byte1, integer_to_binary(RecallByte1)}, {seed, ar_util:encode(Seed)}, {solution_hash, ar_util:encode(SolutionHash)}, {start_interval_number, integer_to_binary(StartIntervalNumber)}, {step_number, integer_to_binary(StepNumber)}, {steps, ar_util:encode(iolist_to_binary(Steps))}, {packing_difficulty, PackingDifficulty}, {replica_format, ReplicaFormat} ], {encode_if_set(JSON, recall_byte2, RecallByte2, fun integer_to_binary/1)}. json_map_to_solution(JSON) -> LastStepCheckpoints = parse_json_checkpoints( ar_util:decode(maps:get(<<"last_step_checkpoints">>, JSON, <<>>))), MiningAddress = ar_util:decode(maps:get(<<"mining_address">>, JSON)), NextSeed = ar_util:decode(maps:get(<<"next_seed">>, JSON)), NextVDFDifficulty = maps:get(<<"next_vdf_difficulty">>, JSON), NextVDFDifficulty2 = case is_binary(NextVDFDifficulty) of true -> binary_to_integer(NextVDFDifficulty); false -> NextVDFDifficulty end, Nonce = maps:get(<<"nonce">>, JSON), NonceLimiterOutput = ar_util:decode(maps:get(<<"nonce_limiter_output">>, JSON)), PartitionNumber = binary_to_integer(maps:get(<<"partition_number">>, JSON)), PartitionUpperBound = binary_to_integer(maps:get(<<"partition_upper_bound">>, JSON)), PoA1 = json_struct_to_poa_from_map(maps:get(<<"poa1">>, JSON)), PoA2 = json_struct_to_poa_from_map(maps:get(<<"poa2">>, JSON)), Preimage = ar_util:decode(maps:get(<<"preimage">>, JSON)), RecallByte1 = binary_to_integer(maps:get(<<"recall_byte1">>, JSON)), RecallByte2 = decode_if_set(JSON, <<"recall_byte2">>, fun binary_to_integer/1, undefined), Seed = ar_util:decode(maps:get(<<"seed">>, JSON)), SolutionHash = ar_util:decode(maps:get(<<"solution_hash">>, JSON)), StartIntervalNumber = binary_to_integer(maps:get(<<"start_interval_number">>, JSON)), StepNumber = binary_to_integer(maps:get(<<"step_number">>, JSON)), Steps = parse_json_checkpoints(ar_util:decode(maps:get(<<"steps">>, JSON, <<>>))), PackingDifficulty = maps:get(<<"packing_difficulty">>, JSON, 0), ReplicaFormat = maps:get(<<"replica_format">>, JSON, 0), true = (PackingDifficulty >= 0 andalso PackingDifficulty =< ?MAX_PACKING_DIFFICULTY andalso ReplicaFormat == 0) orelse (ReplicaFormat == 1 andalso PackingDifficulty == ?REPLICA_2_9_PACKING_DIFFICULTY), #mining_solution{ last_step_checkpoints = LastStepCheckpoints, mining_address = MiningAddress, next_seed = NextSeed, next_vdf_difficulty = NextVDFDifficulty2, nonce = Nonce, nonce_limiter_output = NonceLimiterOutput, partition_number = PartitionNumber, partition_upper_bound = PartitionUpperBound, poa1 = PoA1, poa2 = PoA2, preimage = Preimage, recall_byte1 = RecallByte1, recall_byte2 = RecallByte2, seed = Seed, solution_hash = SolutionHash, start_interval_number = StartIntervalNumber, step_number = StepNumber, steps = Steps, packing_difficulty = PackingDifficulty, replica_format = ReplicaFormat }. encode_if_set(JSON, _JSONProperty, not_set, _Encoder) -> JSON; encode_if_set(JSON, _JSONProperty, undefined, _Encoder) -> JSON; encode_if_set(JSON, JSONProperty, Value, Encoder) -> [{JSONProperty, Encoder(Value)} | JSON]. decode_if_set(JSON, JSONProperty, Decoder, Default) -> case maps:get(JSONProperty, JSON, not_found) of not_found -> Default; EncodedValue -> Decoder(EncodedValue) end. parse_json_checkpoints(<<>>) -> []; parse_json_checkpoints(<< Checkpoint:32/binary, Rest/binary >>) -> [Checkpoint | parse_json_checkpoints(Rest)]. jobs_to_json_struct(Jobs) -> #jobs{ jobs = JobList, partial_diff = PartialDiff, seed = Seed, next_seed = NextSeed, interval_number = IntervalNumber, next_vdf_difficulty = NextVDFDiff } = Jobs, {[{jobs, [job_to_json_struct(Job) || Job <- JobList]}, {partial_diff, diff_pair_to_json_list(PartialDiff)}, {seed, ar_util:encode(Seed)}, {next_seed, ar_util:encode(NextSeed)}, {interval_number, integer_to_binary(IntervalNumber)}, {next_vdf_difficulty, integer_to_binary(NextVDFDiff)} ]}. job_to_json_struct(Job) -> #job{ output = Output, global_step_number = StepNumber, partition_upper_bound = PartitionUpperBound } = Job, {[{nonce_limiter_output, ar_util:encode(Output)}, {step_number, integer_to_binary(StepNumber)}, {partition_upper_bound, integer_to_binary(PartitionUpperBound)}]}. json_struct_to_jobs(Struct) -> {Keys} = Struct, PartialDiff = json_list_to_diff_pair(proplists:get_value(<<"partial_diff">>, Keys)), Seed = ar_util:decode(proplists:get_value(<<"seed">>, Keys, <<>>)), NextSeed = ar_util:decode(proplists:get_value(<<"next_seed">>, Keys, <<>>)), NextVDFDiff = binary_to_integer(proplists:get_value(<<"next_vdf_difficulty">>, Keys, <<"0">>)), IntervalNumber = binary_to_integer(proplists:get_value(<<"interval_number">>, Keys, <<"0">>)), Jobs = [json_struct_to_job(Job) || Job <- proplists:get_value(<<"jobs">>, Keys, [])], #jobs{ jobs = Jobs, seed = Seed, next_seed = NextSeed, interval_number = IntervalNumber, next_vdf_difficulty = NextVDFDiff, partial_diff = PartialDiff }. json_struct_to_job(Struct) -> {Keys} = Struct, Output = ar_util:decode(proplists:get_value(<<"nonce_limiter_output">>, Keys, <<>>)), StepNumber = binary_to_integer(proplists:get_value(<<"step_number">>, Keys, <<"0">>)), PartitionUpperBound = binary_to_integer(proplists:get_value(<<"partition_upper_bound">>, Keys, <<"0">>)), #job{ output = Output, global_step_number = StepNumber, partition_upper_bound = PartitionUpperBound }. partial_solution_response_to_json_struct(Response) -> #partial_solution_response{ indep_hash = H, status = S } = Response, {[{<<"indep_hash">>, ar_util:encode(H)}, {<<"status">>, S}]}. partition_to_json_struct(Bucket, BucketSize, Addr, PackingDifficulty) -> Fields = [ {bucket, Bucket}, {bucketsize, BucketSize}, {addr, ar_util:encode(Addr)} ], Fields2 = case PackingDifficulty >= 1 of true -> Fields ++ [{pdiff, PackingDifficulty}]; false -> Fields end, {Fields2}. %% Used in logging among other things, therefore we have more %% possible values here than in decode_packing/2. encode_packing(undefined, false) -> "undefined"; encode_packing({spora_2_6, Addr}, _Strict) -> "spora_2_6_" ++ binary_to_list(ar_util:encode(Addr)); encode_packing({composite, Addr, PackingDifficulty}, _Strict) -> "composite_" ++ binary_to_list(ar_util:encode(Addr)) ++ "." ++ integer_to_list(PackingDifficulty); encode_packing(spora_2_5, _Strict) -> "spora_2_5"; encode_packing(unpacked, _Strict) -> "unpacked"; encode_packing(unpacked_padded, _Strict) -> "unpacked_padded"; encode_packing({replica_2_9, Addr}, _Strict) -> "replica_2_9_" ++ binary_to_list(ar_util:encode(Addr)); encode_packing(Packing, false) when is_atom(Packing) -> atom_to_list(Packing). decode_packing(<<"unpacked">>, _Error) -> unpacked; decode_packing(<<"spora_2_5">>, _Error) -> spora_2_5; decode_packing(<< "spora_2_6_", Addr/binary >>, Error) -> case ar_util:safe_decode(Addr) of {ok, DecodedAddr} -> {spora_2_6, DecodedAddr}; _ -> Error end; decode_packing(<<"composite_", Rest/binary>>, Error) -> case binary:split(Rest, <<".">>, [global]) of [AddrBin, PackingDifficultyBin] -> case catch binary_to_integer(PackingDifficultyBin) of PackingDifficulty when is_integer(PackingDifficulty), PackingDifficulty >= 0, PackingDifficulty =< ?MAX_PACKING_DIFFICULTY -> case ar_util:safe_decode(AddrBin) of {ok, DecodedAddr} -> {composite, DecodedAddr, PackingDifficulty}; _ -> Error end; _ -> Error end; _ -> Error end; decode_packing(<< "replica_2_9_", Addr/binary >>, Error) -> case ar_util:safe_decode(Addr) of {ok, DecodedAddr} -> {replica_2_9, DecodedAddr}; _ -> Error end; decode_packing(<<"unpacked_padded">>, _Error) -> unpacked_padded; decode_packing(_, Error) -> Error. binary_to_packing(<<"unpacked">>, _Error) -> unpacked; binary_to_packing(<<"spora_2_5">>, _Error) -> spora_2_5; binary_to_packing(<< "spora_2_6_", Addr/binary >>, Error) when byte_size(Addr) =< 64 -> case ar_util:safe_decode(Addr) of {ok, DecodedAddr} -> {spora_2_6, DecodedAddr}; _ -> Error end; binary_to_packing(<< "composite_", PackingDifficulty:8, Addr/binary >>, Error) when byte_size(Addr) =< 64, PackingDifficulty =< ?MAX_PACKING_DIFFICULTY -> case ar_util:safe_decode(Addr) of {ok, DecodedAddr} -> {composite, DecodedAddr, PackingDifficulty}; _ -> Error end; binary_to_packing(<< "replica_2_9_", Addr/binary >>, Error) when byte_size(Addr) =< 64 -> case ar_util:safe_decode(Addr) of {ok, DecodedAddr} -> {replica_2_9, DecodedAddr}; _ -> Error end; binary_to_packing(<<"unpacked_padded">>, _Error) -> unpacked_padded. packing_to_binary(unpacked) -> <<"unpacked">>; packing_to_binary(spora_2_5) -> <<"spora_2_5">>; packing_to_binary({spora_2_6, Addr}) -> iolist_to_binary([<<"spora_2_6_">>, ar_util:encode(Addr)]); packing_to_binary({composite, Addr, PackingDifficulty}) -> iolist_to_binary([<<"composite_">>, << PackingDifficulty:8 >>, ar_util:encode(Addr)]); packing_to_binary({replica_2_9, Addr}) -> iolist_to_binary([<<"replica_2_9_">>, ar_util:encode(Addr)]); packing_to_binary(unpacked_padded) -> <<"unpacked_padded">>. pool_cm_jobs_to_json_struct(Jobs) -> #pool_cm_jobs{ h1_to_h2_jobs = H1ToH2Jobs, h1_read_jobs = H1ReadJobs, partitions = Partitions } = Jobs, {[ {h1_to_h2_jobs, [pool_cm_h1_to_h2_job_to_json_struct(Job) || Job <- H1ToH2Jobs]}, {h1_read_jobs, [pool_cm_h1_read_job_to_json_struct(Job) || Job <- H1ReadJobs]}, {partitions, Partitions} ]}. pool_cm_h1_to_h2_job_to_json_struct(Job) -> candidate_to_json_struct(Job). pool_cm_h1_read_job_to_json_struct(Job) -> candidate_to_json_struct(Job). json_map_to_pool_cm_jobs(Map) -> H1ToH2Jobs = [json_map_to_candidate(Job) || Job <- maps:get(<<"h1_to_h2_jobs">>, Map, [])], H1ReadJobs = [json_map_to_candidate(Job) || Job <- maps:get(<<"h1_read_jobs">>, Map, [])], #pool_cm_jobs{ h1_to_h2_jobs = H1ToH2Jobs, h1_read_jobs = H1ReadJobs }. footprint_to_json_map(Intervals) -> Intervals2 = ar_intervals:to_list(Intervals), Intervals3 = [[integer_to_binary(Start), integer_to_binary(End)] || {End, Start} <- Intervals2], #{ intervals => Intervals3 }. json_map_to_footprint(Map) -> Intervals = maps:get(<<"intervals">>, Map), Intervals2 = [{binary_to_integer(End), binary_to_integer(Start)} || [Start, End] <- Intervals], ar_intervals:from_list(Intervals2). ================================================ FILE: apps/arweave/src/ar_shutdown_manager.erl ================================================ %%%=================================================================== %%% This Source Code Form is subject to the terms of the GNU General %%% Public License, v. 2.0. If a copy of the GPLv2 was not distributed %%% with this file, You can obtain one at %%% https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html %%% %%% @doc Arweave Shutdown Manager. %%% %%% This module was created to ensure all remaining connections or %%% processes related to arweave have been correctly terminated. This %%% process should be started first and stopped last. %%% %%% The module export few functions to help diagnose arweave network %%% connections. %%% %%% When arweave application is stopped, this application should %%% receive `shutdown' due to trap exist. In this situation, terminate %%% functon is then called. %%% %%% @end %%%=================================================================== -module(ar_shutdown_manager). -export([start_link/0]). -export([init/1, terminate/2]). -export([handle_call/3, handle_cast/2, handle_info/2]). -export([ apply/3, apply/4, connections/0, connections/1, list_connections/0, list_connections/1, shutdown/0, socket_info/1, socket_info/2, start_killer/1, state/0, terminate_connections/0 ]). -include_lib("eunit/include/eunit.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- start_link() -> start_link(#{}). %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- start_link(Args) -> gen_server:start_link({local, ?MODULE}, ?MODULE, Args, []). %%-------------------------------------------------------------------- %% @doc returns service state. %% @end %%-------------------------------------------------------------------- -spec state() -> shutdown | running. state() -> case ets:lookup(?MODULE, state) of [{state, shutdown}] -> shutdown; _ -> running end. %%-------------------------------------------------------------------- %% @doc set state value to shutdown. %% @end %%-------------------------------------------------------------------- -spec shutdown() -> boolean(). shutdown() -> ets:insert(?MODULE, {state, shutdown}). %%-------------------------------------------------------------------- %% @doc apply a function only if the service is running. %% @see apply/4 %% @end %%-------------------------------------------------------------------- -spec apply(Module, Function, Arguments) -> Return when Module :: atom(), Function :: atom(), Arguments :: [term()], Return :: any() | {error, shutdown}. apply(Module, Function, Arguments) -> apply(Module, Function, Arguments, #{}). %%-------------------------------------------------------------------- %% @doc execute a MFA with extra option for filtering. %% @end %%-------------------------------------------------------------------- -spec apply(Module, Function, Arguments, Opts) -> Return when Module :: atom(), Function :: atom(), Arguments :: [term()], Opts :: #{ skip_on_shutdown => boolean() }, Return :: any() | {error, shutdown}. apply(Module, Function, Arguments, #{ skip_on_shutdown := false }) -> erlang:apply(Module, Function, Arguments); apply(Module, Function, Arguments, Opts) -> case state() of running -> erlang:apply(Module, Function, Arguments); shutdown -> ?LOG_WARNING([ {state, shutdown}, {module, Module}, {function, Function}, {function, Arguments} ]), {error, shutdown} end. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- init(_Args) -> erlang:process_flag(trap_exit, true), StartedAt = erlang:system_time(), ?LOG_INFO([{start, ?MODULE}, {pid, self()}, {started_at, StartedAt}]), ets:insert(?MODULE, {state, running}), {ok, #{ started_at => StartedAt }}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- handle_call(uptime, _From, State = #{ started_at := StartedAt }) -> Now = erlang:system_time(), {reply, Now-StartedAt, State}; handle_call(Msg, From, State) -> ?LOG_WARNING([{message, Msg}, {from, From}]), {noreply, State}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- handle_cast(Msg, State) -> ?LOG_WARNING([{message, Msg}]), {noreply, State}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- handle_info(Msg, State) -> ?LOG_WARNING([{message, Msg}]), {noreply, State}. %%-------------------------------------------------------------------- %% @hidden %% @doc this function is called when ar_sup is being stopped. If it %% was correctly configured, this should be the last function to be %% executed in the supervision tree. %% @end %%-------------------------------------------------------------------- terminate(_Reason = shutdown, _State = #{ started_at := StartedAt }) -> Now = erlang:system_time(), terminate_connections(), ?LOG_INFO([ {stop, ?MODULE}, {started_at, StartedAt}, {stopped_at, Now}, {uptime, Now-StartedAt} ]), ok. %%-------------------------------------------------------------------- %% @hidden %% @doc terminate all active http connections from ranch/cowboy. This %% @end %%-------------------------------------------------------------------- terminate_connections() -> % this process should not exit, it will be linked to another % process in charge to kill all connections. erlang:process_flag(trap_exit, true), % list the connections/sockets by target, where target can % be gun or cowboy. Connections = list_connections(), ?LOG_DEBUG([{connections, length(Connections)}]), case Connections of [] -> ok; Sockets -> % this call is blocking until all killer % processes are dead. When done, the code % will loop to check if some connections % are still active. _ = killers_connections_init(Sockets), terminate_connections() end. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- list_connections() -> Targets = [cowboy, gun], lists:flatten([ list_connections(T) || T <- Targets ]). list_connections(gun) -> lists:flatten([ begin ProcessInfo = process_info(P), Links = proplists:get_value(links, ProcessInfo, []), [ L || L <- Links, is_port(L) ] end || {_, P, _, _} <- supervisor:which_children(gun_sup) ]); list_connections(cowboy) -> {ok, Config} = arweave_config:get_env(), Filters = [{'=:=', peer_port, Config#config.port}], SocketsInfo = connections(#{ filters => Filters }), [ S || #{ socket := S } <- SocketsInfo ]. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- killers_connections_init([]) -> ok; killers_connections_init(Sockets) -> % start killers processes to stop those sockets. Killers = lists:foldr( fun(S, Acc) -> case start_killer(S) of {ok, K} -> [K|Acc]; _ -> Acc end end, [], Sockets), % wait until all connection killers are done with their job. killers_connections_loop(Killers). %%-------------------------------------------------------------------- %% @hidden %% @doc main terminate connections loop. This functions waits for all %% killer process to be stopped. %% @end %%-------------------------------------------------------------------- -spec killers_connections_loop([pid()]) -> ok. killers_connections_loop([]) -> ok; killers_connections_loop(Killers) -> {ok, Config} = arweave_config:get_env(), TcpTimeout = 1000*Config#config.shutdown_tcp_connection_timeout, receive {'EXIT', Pid, _} -> Filter = fun (P) when P =:= Pid -> false; (_) -> true end, NewKillers = lists:filter(Filter, Killers), killers_connections_loop(NewKillers); Msg -> ?LOG_WARNING([{received, Msg}]), killers_connections_loop(Killers) after TcpTimeout -> ?LOG_WARNING([{error, timeout}]), {error, timeout} end. %%-------------------------------------------------------------------- %% @hidden %% @doc start a connection killer process. This function starts a new %% killer job using a socket. A killer will be spawned and linked to %% the caller process. %% @end %%-------------------------------------------------------------------- -spec start_killer(port()) -> {ok, pid()} | {error, term()}. start_killer(Socket) when is_port(Socket) -> Fun = fun() -> killer_init(Socket) end, {ok, spawn_link(Fun)}; start_killer(Term) -> {error, Term}. %%-------------------------------------------------------------------- %% @hidden %% @doc killer process initialization function. A killer connection %% process requires some information about a socket, but it also %% need to wait until the socket is terminated. So, the first step %% is to trap exit then links it to the socket. The extended information %% about the socket is also collected before entering into the loop. %% @see killer_loop/1 %% @end %%-------------------------------------------------------------------- killer_init(Socket) -> ?LOG_DEBUG([{socket, Socket}, {pid, self()}, {action, started}]), erlang:process_flag(trap_exit, true), erlang:link(Socket), {ok, Config} = arweave_config:get_env(), Mode = Config#config.shutdown_tcp_mode, State = socket_info(Socket), NewState = killer_loop(State#{ counter => 0, mode => Mode }), ?LOG_DEBUG([{state, NewState}, {pid, self()}, {action, done}]). %%-------------------------------------------------------------------- %% @hidden %% @doc killer connection loop. this loop is checking the state of %% an active socket. The goal is to have all socket in closed state. %% if its not the case, the killer connection will loop over until %% its done. %% @end %%-------------------------------------------------------------------- -spec killer_loop(State) -> Return when State :: map(), Return :: ok | {error, term()}. killer_loop(_State = #{ info := #{ states := [closed] }}) -> ok; killer_loop(State = #{ socket := Socket, counter := Counter }) -> Delay = maps:get(delay, State, 1000), receive {'EXIT', Socket, _} -> ?LOG_DEBUG([{state, State}, {pid, self()}, {action, exited}]), ok after Delay -> try stop_connection(State) of stop -> ok; continue -> NewState = socket_info(Socket, State), killer_loop(NewState#{ counter => Counter + 1}) catch E:R -> ?LOG_DEBUG([{state, State}, {pid, self()}, {action, error}, {error, E}, {reason, R}]), {E, {R, State}} end end. %%-------------------------------------------------------------------- %% @hidden %% @doc stop an active connection using a socket. This function is %% mainly used to kill a connection based on socket state. %% %% Two modes are currently available: shutdown or close. The shutdown mode %% shutdowns the socket and follows the safest procedure for the %% client: shutdown the socket and then close it when the remote %% side of the connection is okay. %% %% The close mode is setting linger and close the socket. This is not %% a clean way but in some situation, it can be useful. %% @end %%-------------------------------------------------------------------- -spec stop_connection(State) -> Return when State :: map(), Return :: continue | stop. stop_connection(State = #{ socket := Socket, mode := shutdown }) -> ?LOG_DEBUG([{state, State}, {pid, self()}]), case State of #{ info := #{ states := [closed] }} -> stop; #{ counter := 0 } -> ranch_tcp:setopts(Socket, [{delay_send, false}]), ranch_tcp:setopts(Socket, [{nodelay, true}]), ranch_tcp:setopts(Socket, [{send_timeout_close, true}]), ranch_tcp:setopts(Socket, [{send_timeout, 10}]), ranch_tcp:setopts(Socket, [{keepalive, false}]), ranch_tcp:setopts(Socket, [{exit_on_close, false}]), ranch_tcp:shutdown(Socket, write), continue; #{ counter := Counter } when Counter < 10 -> ranch_tcp:shutdown(Socket, write), continue; #{ counter := Counter } when Counter > 10 -> ranch_tcp:shutdown(Socket, read), ranch_tcp:close(Socket), continue; _ -> continue end; stop_connection(State = #{ socket := Socket, mode := close }) -> ?LOG_DEBUG([{state, State}, {pid, self()}]), case State of #{ info := #{ states := [closed] }} -> stop; _ -> ranch_tcp:setopts(Socket, [{delay_send, false}]), ranch_tcp:setopts(Socket, [{nodelay, true}]), ranch_tcp:setopts(Socket, [{exit_on_close, false}]), ranch_tcp:setopts(Socket, [{send_timeout_close, true}]), ranch_tcp:setopts(Socket, [{send_timeout, 10}]), ranch_tcp:setopts(Socket, [{keepalive, false}]), ranch_tcp:setopts(Socket, [{linger, {true, 0}}]), ranch_tcp:shutdown(Socket, read_write), ranch_tcp:close(Socket), continue end. %%-------------------------------------------------------------------- %% @doc list all active connections. %% @see connections/1 %% @end %%-------------------------------------------------------------------- -spec connections() -> Return when Return :: [map()]. connections() -> connections(#{}). %%-------------------------------------------------------------------- %% @doc `connections/1' is based on inet module private function. %% The goal of this function is to return only active http connections. %% By using this method, we are avoiding checking the links from %% `ranch:procs/2' and deal directly with the sockets from `inet'. %% @see connections/1 %% @end %%-------------------------------------------------------------------- -spec connections(Opts) -> Return when Opts :: #{ filters => [tuple()] }, Return :: [map()]. connections(Opts) -> Filters = maps:get(filters, Opts, []), Sockets = erlang:ports(), Foldr = fun network_connection_foldr/2, NetworkSockets = lists:foldr(Foldr, [], Sockets), DefaultFilters = [ {'=:=', name, "tcp_inet"}, {'=/=', [info, states], [closed]}, {'=/=', [info, states], [listen, open]}, {'=/=', [info, states], [acception, listen, open]} ], FinalFilters = DefaultFilters ++ Filters, data_filters(FinalFilters, NetworkSockets, #{}). %%-------------------------------------------------------------------- %% @hidden %% @doc filters only ports defined as network sockets, ignore all %% other ports/sockets, and collect extended socket information. %% @end %%-------------------------------------------------------------------- -spec network_connection_foldr(Port, Acc) -> Return when Port :: port(), Acc :: list(), Return :: [map()]. network_connection_foldr(Port, Acc) -> try erlang:port_info(Port, name) of {name, N = "tcp_inet"} -> I = socket_info(Port), [I#{ name => N }|Acc]; {name, N = "udp_inet"} -> I = socket_info(Port), [I#{ name => N }|Acc]; {name, N = "sctp_inet"} -> I = socket_info(Port), [I#{ name => N }|Acc]; _ -> Acc catch _:_ -> Acc end. %%-------------------------------------------------------------------- %% @doc Returns extended information about an active socket (port). %% it includes a formatted version of the peer/sock name and all %% information from `inet:info/1' function. %% @end %%-------------------------------------------------------------------- -spec socket_info(Socket) -> Return when Socket :: port(), Return :: #{ socket => port(), sock_name => {tuple(), pos_integer()}, sock_address => tuple(), sock_port => pos_integer(), sock => string(), peer_name => {tuple(), pos_integer()}, peer_address => tuple(), peer_port => pos_integer(), peer => string(), info => map() }. socket_info(Socket) when is_port (Socket) -> socket_info(Socket, #{}). socket_info(Socket, State) -> {SockAddress, SockPort, Sock} = sock_wrapper(Socket, sockname), {PeerAddress, PeerPort, Peer} = sock_wrapper(Socket, peername), % On R24, inet:info/1 can raise an exception % because some functions used to generate the % final map results are not returning correct % data. See inet:port_info/1, line 714 Info = try inet:info(Socket) of Result when is_map(Result) -> Result; _ -> #{} catch _:_ -> #{} end, NewState= #{ socket => Socket, sock_name => {SockAddress, SockPort}, sock_address => SockAddress, sock_port => SockPort, sock => Sock, peer_name => {PeerAddress, SockPort}, peer_address => PeerAddress, peer_port => PeerPort, peer => Peer, info => Info }, maps:merge(State, NewState). %%-------------------------------------------------------------------- %% @hidden %% @doc wrapper around `inet:peername/1' and `inet:sockname/1'. %% @end %%--------------------------------------------------------------------- -spec sock_wrapper(Socket, Info) -> Return when Socket :: port(), Info :: peername | sockname, Return :: {Address, Port, Peer}, Address :: tuple() | undefined, Port :: pos_integer() | undefined, Peer :: string(). sock_wrapper(Socket, Info) -> try inet:Info(Socket) of {ok, {Address, Port}} -> AddressList = inet:ntoa(Address), PortList = integer_to_list(Port), Peer = string:join([AddressList, PortList], ":"), {Address, Port, Peer}; _ -> {undefined, undefined, "unknown"} catch _:_ -> {undefined, undefined, "unknown"} end. %%-------------------------------------------------------------------- %% @hidden %% @doc a simple data filter function. Filters Data using a list of %% filters. Only the data matching all filters are returned. %% @end %%-------------------------------------------------------------------- -spec data_filters(Filters, Datas, Opts) -> Return when Filters :: [ {Operator, CheckKey} | {Operator, CheckKey, CheckValue} ], Operator :: atom(), CheckKey :: [term()] | term(), CheckValue :: term(), Datas :: [map()], Opts :: #{ reverse => boolean() }, Return :: [map()]. data_filters(_, [], _) -> []; data_filters([], Datas, _) -> Datas; data_filters(Filters, Datas, Opts) -> data_filters(Filters, Datas, [], Opts). data_filters_test() -> ?assertEqual( [], data_filters([], [], #{}) ), ?assertEqual( [#{}], data_filters([], [#{}], #{}) ), ?assertEqual( [], data_filters([{'is_integer', a}], [], #{}) ), ?assertEqual( [], data_filters([{'=:=', a, 1}], [#{ a => 2 }], #{}) ), ?assertEqual([ #{ a => 1 }], data_filters([{'=:=', a, 1}], [#{ a => 1 }], #{}) ). data_filters(_Filters, [], Buffer, _Opts) -> lists:reverse(Buffer); data_filters(Filters, [H|T], Buffer, Opts) -> case filters(Filters, H, Opts) of {false, _} -> data_filters(Filters, T, Buffer, Opts); {true, _} -> data_filters(Filters, T, [H|Buffer], Opts) end. filters([], Data, _Opts) -> {true, Data}; filters([Filter|Rest], Data, Opts) -> case filter(Filter, Data, Opts) of true -> filters(Rest, Data, Opts); false -> {false, Data} end. filter(Filter, Data, Opts) -> CheckKey = erlang:element(2, Filter), case get(CheckKey, Data) of {ok, Value} -> filter2(Filter, [Value], Data, Opts); {error, _} -> false end. filter2(Filter = {Operator, _CheckKey}, [Value], Data, Opts) -> Result = erlang:apply(erlang, Operator, [Value]), filter3(Filter, [Result, Value], Data, Opts); filter2(Filter = {Operator, _CheckKey, CheckValue}, [Value], Data, Opts) -> Result = erlang:apply(erlang, Operator, [CheckValue, Value]), filter3(Filter, [Result, Value], Data, Opts). filter3(_Filter, [Result|_], _Data, #{ reverse := true }) -> not Result; filter3(_Filter, [Result|_], _Data, _Opts) -> Result. %%-------------------------------------------------------------------- %% @hidden %% @doc recursive maps value extractor. %% @end %%-------------------------------------------------------------------- -spec get(Key, Map) -> Return when Key :: [term()] | term(), Map :: map(), Return :: {ok, term()} | {error, term()}. get(Key, Map) when not is_list(Key), is_map(Map) -> {ok, maps:get(Key, Map)}; get([Key], Map) when is_map(Map) -> {ok, maps:get(Key, Map)}; get([Key|Rest], Map) when is_map(Map) -> Value = maps:get(Key, Map), get(Rest, Value); get(_, _) -> {error, not_found}. get_test() -> ?assertEqual( {error, not_found}, get(1, []) ), ?assertEqual( {ok, 1}, get(a, #{ a => 1 }) ), ?assertEqual( {ok, 1}, get([a,b], #{ a => #{ b => 1 } }) ), ?assertEqual( {error, not_found}, get([a,b,c], #{ a => #{ b => 1 } }) ). ================================================ FILE: apps/arweave/src/ar_storage.erl ================================================ -module(ar_storage). -behaviour(gen_server). -export([start_link/0, read_block_index/0, read_block_index/1, read_reward_history/1, read_reward_history/2, read_block_time_history/2, read_block_time_history/3, store_block_index/1, update_block_index/3, store_reward_history_part/1, store_reward_history_part2/1, store_block_time_history_part/2, store_block_time_history_part2/1, write_full_block/2, read_block/1, read_block/2, read_block/3, write_tx/1, read_tx/1, read_tx/2, read_tx_data/1, read_tx_data/2, update_confirmation_index/1, get_tx_confirmation_data/1, read_wallet_list/1, read_wallet_list/2, write_wallet_list/2, delete_blacklisted_tx/1, lookup_tx_filename/1, lookup_tx_filename/2, open_databases/0, open_start_from_state_databases/1, close_start_from_state_databases/0, wallet_list_filepath/1, wallet_list_filepath/2, tx_filepath/1, tx_filepath/2, tx_data_filepath/1, tx_data_filepath/2, read_tx_file/1, read_migrated_v1_tx_file/1, read_migrated_v1_tx_file/2, ensure_directories/1, write_file_atomic/2, write_tx_data/3, write_term/2, write_term/3, read_term/1, read_term/2, delete_term/1, is_file/1, migrate_tx_record/1, migrate_block_record/1, read_account/2, read_account/4, read_block_from_file/2]). -export([init/1, handle_cast/2, handle_call/3, handle_info/2, terminate/2]). -include("ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include("ar_wallets.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("kernel/include/file.hrl"). -record(state, {}). %%%=================================================================== %%% Public interface. %%%=================================================================== start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). %% @doc Read the entire stored block index. read_block_index() -> read_block_index(not_set). read_block_index(CustomDir) -> case block_index_tip(CustomDir) of not_found -> not_found; {Height, {_H, _, _, _PrevH}} -> {ok, Map} = ar_kv:get_range(get_db_name(block_index_db, CustomDir), << 0:256 >>, << Height:256 >>), read_block_index_from_map(Map, 0, Height, <<>>, []) end. read_block_index_from_map(_Map, Height, End, _PrevH, BI) when Height > End -> BI; read_block_index_from_map(Map, Height, End, PrevH, BI) -> V = maps:get(<< Height:256 >>, Map, not_found), case V of not_found -> ar:console("The stored block index is invalid. Height ~B not found.~n", [Height]), not_found; _ -> case binary_to_term(V, [safe]) of {H, WeaveSize, TXRoot, PrevH} -> read_block_index_from_map(Map, Height + 1, End, H, [{H, WeaveSize, TXRoot} | BI]); {_, _, _, PrevH2} -> ar:console("The stored block index is invalid. Height: ~B, " "stored previous hash: ~s, expected previous hash: ~s.~n", [Height, ar_util:encode(PrevH2), ar_util:encode(PrevH)]), not_found end end. %% @doc Return the reward history for the given block index part or not_found. read_reward_history(BI) -> read_reward_history(BI, not_set). read_reward_history([], _CustomDir) -> []; read_reward_history([{H, _WeaveSize, _TXRoot} | BI], CustomDir) -> case read_reward_history(BI, CustomDir) of not_found -> not_found; History -> case ar_kv:get(get_db_name(reward_history_db, CustomDir), H) of not_found -> ?LOG_DEBUG([{event, read_reward_history_not_found}, {reason, missing_block}, {block, ar_util:encode(H)}]), not_found; {ok, Bin} -> Element = binary_to_term(Bin, [safe]), [Element | History] end end. %% @doc Return the block time history for the given block index part or not_found. read_block_time_history(Height, BI) -> read_block_time_history(Height, BI, not_set). read_block_time_history(_Height, [], _CustomDir) -> []; read_block_time_history(Height, [{H, _WeaveSize, _TXRoot} | BI], CustomDir) -> case Height < ar_fork:height_2_7() of true -> []; false -> case read_block_time_history(Height - 1, BI, CustomDir) of not_found -> not_found; History -> case ar_kv:get(get_db_name(block_time_history_db, CustomDir), H) of not_found -> not_found; {ok, Bin} -> Element = binary_to_term(Bin, [safe]), [Element | History] end end end. %% @doc Record the entire block index on disk. %% Return {error, block_index_no_recent_intersection} if the local state forks away %% at more than ar_block:get_consensus_window_size() blocks ago. store_block_index(BI) -> %% Use a key that is bigger than any << Height:256 >> (<<"a">> > << Height:256 >>) %% to retrieve the largest stored Height. NewHeight = length(BI) - 1, case ar_kv:get_prev(block_index_db, <<"a">>) of none -> update_block_index(NewHeight, 0, lists:reverse(BI)); {ok, << StoredHeight:256 >>, _V} -> %% RootHeight should a historical height shared by both the stored BI and the %% new BI RootHeight = max(0, min(StoredHeight, NewHeight) - ar_block:get_consensus_window_size()), {ok, V} = ar_kv:get(block_index_db, << RootHeight:256 >>), {H, WeaveSize, TXRoot} = lists:nth(NewHeight - RootHeight + 1, BI), case binary_to_term(V, [safe]) of {H, WeaveSize, TXRoot, _PrevH} -> BI2 = lists:reverse(lists:sublist(BI, NewHeight - RootHeight)), update_block_index(NewHeight, StoredHeight - RootHeight, BI2); {H2, _, _, _} -> ?LOG_ERROR([{event, failed_to_store_block_index}, {reason, no_intersection}, {height, RootHeight}, {stored_hash, ar_util:encode(H2)}, {expected_hash, ar_util:encode(H)}]), {error, block_index_no_recent_intersection} end; Error -> Error end. %% @doc Record the block index update on disk. Remove the orphans, if any. update_block_index(NewTipHeight, OrphanCount, _BI) when NewTipHeight < 0 orelse OrphanCount < 0 -> {error, badarg}; update_block_index(NewTipHeight, OrphanCount, BI) -> %% Record the contents of BI starting at this height - the entry at IndexHeight will %% be the first entry written (perhaps replacing an existing entry at that height) CurTipHeight = case block_index_tip() of not_found -> -1; {Height, _} -> Height end, %% IndexHeight is by default one beyond the current tip, this only changes if we have %% orphans (which will be deleted). IndexHeight = (CurTipHeight + 1) - OrphanCount, case IndexHeight + length(BI) - 1 == NewTipHeight of true -> update_block_index2(IndexHeight, OrphanCount, BI); false -> ?LOG_ERROR([{event, failed_to_update_block_index}, {reason, block_index_gap}, {cur_tip_height, CurTipHeight}, {new_tip_height, NewTipHeight}, {index_height, IndexHeight}, {orphan_count, OrphanCount}, {block_count, length(BI)}]), {error, not_found} end. update_block_index2(IndexHeight, OrphanCount, BI) -> %% 1. Delete all the orphaned blocks from the block index case ar_kv:delete_range(block_index_db, << IndexHeight:256 >>, << (IndexHeight + OrphanCount):256 >>) of ok -> case IndexHeight of 0 -> update_block_index3(0, <<>>, BI); _ -> %% 2. Add all the entries in BI to the block index %% BI will include the new tip block at the current height, as well as any new %% history blocks if the tip is on a new branch. case ar_kv:get(block_index_db, << (IndexHeight - 1):256 >>) of not_found -> ?LOG_ERROR([{event, failed_to_update_block_index}, {reason, prev_element_not_found}, {prev_height, IndexHeight - 1}]), {error, not_found}; {ok, Bin} -> {PrevH, _, _, _} = binary_to_term(Bin, [safe]), update_block_index3(IndexHeight, PrevH, BI) end end; {error, Error} -> ?LOG_ERROR([{event, failed_to_update_block_index}, {reason, failed_to_remove_orphaned_range}, {range_start, IndexHeight}, {range_end, IndexHeight + OrphanCount}, {reason, io_lib:format("~p", [Error])}]), {error, Error} end. update_block_index3(_Height, _PrevH, []) -> ok; update_block_index3(Height, PrevH, [{H, WeaveSize, TXRoot} | BI]) -> Bin = term_to_binary({H, WeaveSize, TXRoot, PrevH}), case ar_kv:put(block_index_db, << Height:256 >>, Bin) of ok -> update_block_index3(Height + 1, H, BI); Error -> ?LOG_ERROR([{event, failed_to_update_block_index}, {height, Height}, {reason, io_lib:format("~p", [Error])}]), {error, Error} end. store_reward_history_part([]) -> ok; store_reward_history_part(Blocks) -> store_reward_history_part2([{B#block.indep_hash, {B#block.reward_addr, ar_difficulty:get_hash_rate_fixed_ratio(B), B#block.reward, B#block.denomination}} || B <- Blocks]). store_reward_history_part2([]) -> ok; store_reward_history_part2([{H, El} | History]) -> Bin = term_to_binary(El), case ar_kv:put(reward_history_db, H, Bin) of ok -> store_reward_history_part2(History); Error -> ?LOG_ERROR([{event, failed_to_update_reward_history}, {reason, io_lib:format("~p", [Error])}, {block, ar_util:encode(H)}]), {error, not_found} end. store_block_time_history_part([], _PrevB) -> ok; store_block_time_history_part(Blocks, PrevB) -> History = ar_block_time_history:get_history_from_blocks(Blocks, PrevB), store_block_time_history_part2(History). store_block_time_history_part2([]) -> ok; store_block_time_history_part2([{H, El} | History]) -> Bin = term_to_binary(El), case ar_kv:put(block_time_history_db, H, Bin) of ok -> store_block_time_history_part2(History); Error -> ?LOG_ERROR([{event, failed_to_update_block_time_history}, {reason, io_lib:format("~p", [Error])}, {block, ar_util:encode(H)}]), {error, not_found} end. -if(?NETWORK_NAME == "arweave.N.1"). write_full_block(#block{ height = 0 } = BShadow, TXs) -> %% Genesis transactions are stored in data/genesis_txs; they are part of the repository. write_full_block2(BShadow, TXs); write_full_block(BShadow, TXs) -> case update_confirmation_index(BShadow#block{ txs = TXs }) of ok -> case write_tx([TX || TX <- TXs, not is_blacklisted(TX)]) of ok -> write_full_block2(BShadow, TXs); Error -> Error end; Error -> Error end. -else. write_full_block(BShadow, TXs) -> case update_confirmation_index(BShadow#block{ txs = TXs }) of ok -> case write_tx([TX || TX <- TXs, not is_blacklisted(TX)]) of ok -> write_full_block2(BShadow, TXs); Error -> Error end; Error -> Error end. -endif. is_blacklisted(#tx{ format = 2 }) -> false; is_blacklisted(#tx{ id = TXID }) -> ar_tx_blacklist:is_tx_blacklisted(TXID). update_confirmation_index(B) -> put_tx_confirmation_data(B). put_tx_confirmation_data(B) -> Data = term_to_binary({B#block.height, B#block.indep_hash}), lists:foldl( fun (TX, ok) -> ar_kv:put(tx_confirmation_db, TX#tx.id, Data); (_TX, Acc) -> Acc end, ok, B#block.txs ). %% @doc Return {BlockHeight, BlockHash} belonging to the block where %% the given transaction was included. get_tx_confirmation_data(TXID) -> case ar_kv:get(tx_confirmation_db, TXID) of {ok, Binary} -> {ok, binary_to_term(Binary, [safe])}; not_found -> not_found end. %% @doc Read a block from disk, given a height %% and a block index (used to determine the hash by height). read_block(Height, BI) when is_integer(Height) -> read_block(Height, BI, not_set); read_block(B, _CustomDir) when is_record(B, block) -> B; read_block(unavailable, _CustomDir) -> unavailable; read_block(Blocks, CustomDir) when is_list(Blocks) -> lists:map(fun(B) -> read_block(B, CustomDir) end, Blocks); read_block({H, _, _}, CustomDir) -> read_block(H, CustomDir); read_block(BH, CustomDir) -> case ar_disk_cache:lookup_block_filename(BH, CustomDir) of {ok, {Filename, Encoding}} -> %% The cache keeps a rotated number of recent headers when the %% node is out of disk space. read_block_from_file(Filename, Encoding); _ -> case ar_kv:get(get_db_name(block_db, CustomDir), BH) of not_found -> case lookup_block_filename(BH, CustomDir) of unavailable -> unavailable; {Filename, Encoding} -> read_block_from_file(Filename, Encoding) end; {ok, V} -> parse_block_kv_binary(V); {error, Reason} -> ?LOG_WARNING([{event, error_reading_block_from_kv_storage}, {block, ar_util:encode(BH)}, {error, io_lib:format("~p", [Reason])}]), unavailable end end. read_block(Height, BI, CustomDir) when is_integer(Height) -> case Height of _ when Height < 0 -> unavailable; _ when Height > length(BI) - 1 -> unavailable; _ -> {H, _, _} = lists:nth(length(BI) - Height, BI), read_block(H, CustomDir) end. read_block(B) -> read_block(B, not_set). %% @doc Read the account information for the given address and %% root hash of the account tree. Return {0, <<>>} if the given address does not belong %% to the tree. The balance may be also 0 when the address exists in the tree. Return %% not_found if some of the files with the account data are missing. read_account(Addr, Key) -> read_account(Addr, Key, <<>>, not_set). read_account(Addr, Key, Prefix, CustomDir) -> case get_account_tree_value(Key, Prefix, CustomDir) of {ok, << Key:48/binary, _/binary >>, V} -> case binary_to_term(V, [safe]) of {K, Val} when K == Addr -> Val; {_, _} -> {0, <<>>}; [_ | _] = SubTrees -> case find_key_by_matching_longest_prefix(Addr, SubTrees) of not_found -> {0, <<>>}; {H, Prefix2} -> read_account(Addr, H, Prefix2, CustomDir) end end; _ -> read_account2(Addr, Key, CustomDir) end. find_key_by_matching_longest_prefix(Addr, Keys) -> find_key_by_matching_longest_prefix(Addr, Keys, {<<>>, -1}). find_key_by_matching_longest_prefix(_Addr, [], {Key, Prefix}) -> case Key of <<>> -> not_found; _ -> {Key, Prefix} end; find_key_by_matching_longest_prefix(Addr, [{_, Prefix} | Keys], {Key, KeyPrefix}) when Prefix == <<>> orelse byte_size(Prefix) =< byte_size(KeyPrefix) -> find_key_by_matching_longest_prefix(Addr, Keys, {Key, KeyPrefix}); find_key_by_matching_longest_prefix(Addr, [{H, Prefix} | Keys], {Key, KeyPrefix}) -> case binary:match(Addr, Prefix) of {0, _} -> find_key_by_matching_longest_prefix(Addr, Keys, {H, Prefix}); _ -> find_key_by_matching_longest_prefix(Addr, Keys, {Key, KeyPrefix}) end. read_account2(Addr, RootHash, CustomDir) -> %% Unfortunately, we do not have an easy access to the information about how many %% accounts there were in the given tree so we perform the binary search starting %% from the number in the latest block. Size = ar_wallets:get_size(), MaxFileCount = Size div ?WALLET_LIST_CHUNK_SIZE + 1, Dir = case CustomDir of not_set -> get_data_dir(); _ -> CustomDir end, read_account(Addr, RootHash, 0, MaxFileCount, Dir, false). read_account(_Addr, _RootHash, Left, Right, _DataDir, _RightFileFound) when Left == Right -> not_found; read_account(Addr, RootHash, Left, Right, DataDir, RightFileFound) -> Pos = Left + (Right - Left) div 2, Filepath = wallet_list_chunk_relative_filepath(Pos * ?WALLET_LIST_CHUNK_SIZE, RootHash), case filelib:is_file(filename:join(DataDir, Filepath)) of false -> read_account(Addr, RootHash, Left, Pos, DataDir, false); true -> {ok, L} = ar_storage:read_term(Filepath), read_account2(Addr, RootHash, Pos, Left, Right, DataDir, L, RightFileFound) end. wallet_list_chunk_relative_filepath(Position, RootHash) -> binary_to_list(iolist_to_binary([ ?WALLET_LIST_DIR, "/", ar_util:encode(RootHash), "-", integer_to_binary(Position), "-", integer_to_binary(?WALLET_LIST_CHUNK_SIZE) ])). read_account2(Addr, _RootHash, _Pos, _Left, _Right, _DataDir, [last, {LargestAddr, _} | _L], _RightFileFound) when Addr > LargestAddr -> {0, <<>>}; read_account2(Addr, RootHash, Pos, Left, Right, DataDir, [last | L], RightFileFound) -> read_account2(Addr, RootHash, Pos, Left, Right, DataDir, L, RightFileFound); read_account2(Addr, RootHash, Pos, _Left, Right, DataDir, [{LargestAddr, _} | _L], RightFileFound) when Addr > LargestAddr -> case Pos + 1 == Right of true -> case RightFileFound of true -> {0, <<>>}; false -> not_found end; false -> read_account(Addr, RootHash, Pos, Right, DataDir, RightFileFound) end; read_account2(Addr, RootHash, Pos, Left, _Right, DataDir, L, _RightFileFound) -> case Addr < element(1, lists:last(L)) of true -> case Pos == Left of true -> {0, <<>>}; false -> read_account(Addr, RootHash, Left, Pos, DataDir, true) end; false -> case lists:search(fun({Addr2, _}) -> Addr2 == Addr end, L) of {value, {Addr, Data}} -> Data; false -> {0, <<>>} end end. lookup_block_filename(H) -> lookup_block_filename(H, not_set). lookup_block_filename(H, CustomDir) -> Dir = case CustomDir of not_set -> get_data_dir(); _ -> CustomDir end, Name = filename:join([Dir, ?BLOCK_DIR, binary_to_list(ar_util:encode(H))]), NameJSON = iolist_to_binary([Name, ".json"]), case is_file(NameJSON) of true -> {NameJSON, json}; false -> NameBin = iolist_to_binary([Name, ".bin"]), case is_file(NameBin) of true -> {NameBin, binary}; false -> unavailable end end. %% @doc Delete the blacklisted tx with the given hash from disk. Return {ok, BytesRemoved} if %% the removal is successful or the file does not exist. The reported number of removed %% bytes does not include the migrated v1 data. The removal of migrated v1 data is requested %% from ar_data_sync asynchronously. The v2 headers are not removed. delete_blacklisted_tx(Hash) -> case ar_kv:get(tx_db, Hash) of {ok, V} -> TX = parse_tx_kv_binary(V), case TX#tx.format == 1 andalso TX#tx.data_size > 0 of true -> case ar_kv:delete(tx_db, Hash) of ok -> {ok, byte_size(V)}; Error -> Error end; _ -> {ok, 0} end; {error, _} = DBError -> DBError; not_found -> case lookup_tx_filename(Hash) of {Status, Filename} -> case Status of migrated_v1 -> case file:read_file_info(Filename) of {ok, FileInfo} -> case file:delete(Filename) of ok -> {ok, FileInfo#file_info.size}; Error -> Error end; Error -> Error end; _ -> {ok, 0} end; unavailable -> {ok, 0} end end. parse_tx_kv_binary(Bin) -> case catch ar_serialize:binary_to_tx(Bin) of {ok, TX} -> TX; _ -> migrate_tx_record(binary_to_term(Bin, [safe])) end. %% Convert the stored tx record to its latest state in the code %% (assign the default values to all missing fields). Since the version introducing %% the fork 2.6, the transactions are serialized via ar_serialize:tx_to_binary/1, which %% is maintained compatible with all past versions, so this code is only used %% on the nodes synced before the corresponding release. migrate_tx_record(#tx{} = TX) -> TX; migrate_tx_record({tx, Format, ID, LastTX, Owner, Tags, Target, Quantity, Data, DataSize, DataTree, DataRoot, Signature, Reward}) -> #tx{ format = Format, id = ID, last_tx = LastTX, owner = Owner, tags = Tags, target = Target, quantity = Quantity, data = Data, data_size = DataSize, data_root = DataRoot, signature = Signature, signature_type = ?DEFAULT_KEY_TYPE, reward = Reward, data_tree = DataTree, owner_address = ar_wallet:to_address(Owner, ?DEFAULT_KEY_TYPE) }. parse_block_kv_binary(Bin) -> case catch ar_serialize:binary_to_block(Bin) of {ok, B} -> B; _ -> migrate_block_record(binary_to_term(Bin, [safe])) end. %% Convert the stored block record to its latest state in the code %% (assign the default values to all missing fields). Since the version introducing %% the fork 2.6, the blocks are serialized via ar_serialize:block_to_binary/1, which %% is maintained compatible with all past block versions, so this code is only used %% on the nodes synced before the corresponding release. migrate_block_record(#block{} = B) -> B; migrate_block_record({block, Nonce, PrevH, TS, Last, Diff, Height, Hash, H, TXs, TXRoot, TXTree, HL, HLMerkle, WL, RewardAddr, Tags, RewardPool, WeaveSize, BlockSize, CDiff, SizeTaggedTXs, PoA, Rate, ScheduledRate, Packing_2_5_Threshold, StrictDataSplitThreshold}) -> PoA_2 = case PoA of {poa, O, TXPath, DataPath, Chunk} -> #poa{ option = O, tx_path = TXPath, data_path = DataPath, chunk = Chunk }; #poa{} -> PoA end, #block{ nonce = Nonce, previous_block = PrevH, timestamp = TS, last_retarget = Last, diff = Diff, height = Height, hash = Hash, indep_hash = H, txs = TXs, tx_root = TXRoot, tx_tree = TXTree, hash_list = HL, hash_list_merkle = HLMerkle, wallet_list = WL, reward_addr = RewardAddr, tags = Tags, reward_pool = RewardPool, weave_size = WeaveSize, block_size = BlockSize, cumulative_diff = CDiff, size_tagged_txs = SizeTaggedTXs, poa = PoA_2, usd_to_ar_rate = Rate, scheduled_usd_to_ar_rate = ScheduledRate, packing_2_5_threshold = Packing_2_5_Threshold, strict_data_split_threshold = StrictDataSplitThreshold }. write_tx(TXs) when is_list(TXs) -> lists:foldl( fun (TX, ok) -> write_tx(TX); (_TX, Acc) -> Acc end, ok, TXs ); write_tx(#tx{ format = Format, id = TXID } = TX) -> case write_tx_header(TX) of ok -> DataSize = byte_size(TX#tx.data), case DataSize > 0 of true -> case {DataSize == TX#tx.data_size, Format} of {false, 2} -> ?LOG_ERROR([{event, failed_to_store_tx_data}, {reason, size_mismatch}, {tx, ar_util:encode(TX#tx.id)}]), ok; {true, 1} -> case write_tx_data(no_expected_data_root, TX#tx.data, TXID) of ok -> ok; {error, Reason} -> ?LOG_WARNING([{event, failed_to_store_tx_data}, {reason, Reason}, {tx, ar_util:encode(TX#tx.id)}]), %% We have stored the data in the tx_db table %% so we return ok here. ok end; {true, 2} -> case ar_tx_blacklist:is_tx_blacklisted(TX#tx.id) of true -> ok; false -> case write_tx_data(TX#tx.data_root, TX#tx.data, TXID) of ok -> ok; {error, Reason} -> %% v2 data is not part of the header. We have to %% report success here even if we failed to store %% the attached data. ?LOG_WARNING([{event, failed_to_store_tx_data}, {reason, Reason}, {tx, ar_util:encode(TX#tx.id)}]), ok end end end; false -> ok end; NotOk -> NotOk end. write_tx_header(TX) -> TX2 = case TX#tx.format of 1 -> TX; _ -> TX#tx{ data = <<>> } end, ar_kv:put(tx_db, TX#tx.id, ar_serialize:tx_to_binary(TX2)). write_tx_data(ExpectedDataRoot, Data, TXID) -> Chunks = ar_tx:chunk_binary(?DATA_CHUNK_SIZE, Data), SizeTaggedChunks = ar_tx:chunks_to_size_tagged_chunks(Chunks), SizeTaggedChunkIDs = ar_tx:sized_chunks_to_sized_chunk_ids(SizeTaggedChunks), case {ExpectedDataRoot, ar_merkle:generate_tree(SizeTaggedChunkIDs)} of {no_expected_data_root, {DataRoot, DataTree}} -> write_tx_data(DataRoot, DataTree, Data, SizeTaggedChunks, TXID); {_, {ExpectedDataRoot, DataTree}} -> write_tx_data(ExpectedDataRoot, DataTree, Data, SizeTaggedChunks, TXID); _ -> {error, [invalid_data_root]} end. write_tx_data(DataRoot, DataTree, Data, SizeTaggedChunks, TXID) -> Errors = lists:foldl( fun ({<<>>, _}, Acc) -> %% Empty chunks are produced by ar_tx:chunk_binary/2, when %% the data is evenly split by the given chunk size. They are %% the last chunks of the corresponding transactions and have %% the same end offsets as their preceding chunks. They are never %% picked as recall chunks because recall byte has to be strictly %% smaller than the end offset. They are an artifact of the original %% chunking implementation. There is no value in storing them. Acc; ({Chunk, Offset}, Acc) -> DataPath = ar_merkle:generate_path(DataRoot, Offset - 1, DataTree), TXSize = byte_size(Data), DiskPoolResult = ar_data_sync:add_chunk_to_disk_pool( DataRoot, DataPath, Chunk, Offset - 1, TXSize), case DiskPoolResult of ok -> Acc; temporary -> Acc; {error, Reason} -> ?LOG_WARNING([{event, failed_to_write_tx_chunk}, {tx, ar_util:encode(TXID)}, {reason, io_lib:format("~p", [Reason])}]), [Reason | Acc] end end, [], SizeTaggedChunks ), case Errors of [] -> ok; _ -> {error, Errors} end. %% @doc Read a tx from disk, given a hash. -spec read_tx( binary() | #tx{} | [#tx{}] ) -> unavailable | #tx{} | [#tx{} | unavailable]. read_tx(TX) -> read_tx(TX, not_set). read_tx(unavailable, _CustomDir) -> unavailable; read_tx(TX, _CustomDir) when is_record(TX, tx) -> TX; read_tx(TXs, CustomDir) when is_list(TXs) -> lists:map(fun(TX) -> read_tx(TX, CustomDir) end, TXs); read_tx(ID, CustomDir) -> case read_tx_from_disk_cache(ID, CustomDir) of unavailable -> read_tx2(ID, CustomDir); TX -> TX end. read_tx2(ID, CustomDir) -> case ar_kv:get(get_db_name(tx_db, CustomDir), ID) of not_found -> read_tx_from_file(ID, CustomDir); {ok, Binary} -> TX = parse_tx_kv_binary(Binary), TX2 = TX#tx{ owner_address = ar_tx:get_owner_address(TX) }, case TX2#tx.format == 1 andalso TX2#tx.data_size > 0 andalso byte_size(TX2#tx.data) == 0 of true -> case read_tx_data_from_kv_storage(TX2#tx.id, CustomDir) of {ok, Data} -> TX2#tx{ data = Data }; Error -> ?LOG_WARNING([{event, error_reading_tx_from_kv_storage}, {tx, ar_util:encode(ID)}, {error, io_lib:format("~p", [Error])}]), unavailable end; _ -> TX2 end end. read_tx_from_disk_cache(ID, CustomDir) -> case ar_disk_cache:lookup_tx_filename(ID, CustomDir) of unavailable -> unavailable; {ok, Filename} -> case read_tx_file(Filename) of {ok, TX} -> TX; _Error -> unavailable end end. read_tx_from_file(ID, CustomDir) -> case lookup_tx_filename(ID, CustomDir) of {ok, Filename} -> case read_tx_file(Filename) of {ok, TX} -> TX; _Error -> unavailable end; {migrated_v1, Filename} -> case read_migrated_v1_tx_file(Filename, CustomDir) of {ok, TX} -> TX; _Error -> unavailable end; unavailable -> unavailable end. read_tx_file(Filename) -> case read_file_raw(Filename) of {ok, <<>>} -> file:delete(Filename), ?LOG_WARNING([{event, empty_tx_file}, {filename, Filename}]), {error, tx_file_empty}; {ok, Binary} -> case catch ar_serialize:json_struct_to_tx(Binary) of TX when is_record(TX, tx) -> {ok, TX}; _ -> file:delete(Filename), ?LOG_WARNING([{event, failed_to_parse_tx}, {filename, Filename}]), {error, failed_to_parse_tx} end; Error -> Error end. read_file_raw(Filename) -> case file:open(Filename, [read, raw, binary]) of {ok, File} -> case file:read(File, 20000000) of {ok, Bin} -> file:close(File), {ok, Bin}; Error -> Error end; Error -> Error end. read_migrated_v1_tx_file(Filename) -> read_migrated_v1_tx_file(Filename, not_set). read_migrated_v1_tx_file(Filename, CustomDir) -> case read_file_raw(Filename) of {ok, Binary} -> case catch ar_serialize:json_struct_to_v1_tx(Binary) of #tx{ id = ID } = TX -> case read_tx_data_from_kv_storage(ID, CustomDir) of {ok, Data} -> {ok, TX#tx{ data = Data }}; Error -> Error end end; Error -> Error end. read_tx_data_from_kv_storage(ID) -> read_tx_data_from_kv_storage(ID, not_set). read_tx_data_from_kv_storage(ID, _CustomDir) -> case ar_data_sync:get_tx_data(ID) of {ok, Data} -> {ok, Data}; {error, not_found} -> {error, data_unavailable}; {error, timeout} -> {error, data_fetch_timeout}; Error -> Error end. read_tx_data(TX) -> read_tx_data(TX, not_set). read_tx_data(TX, CustomDir) -> case read_file_raw(tx_data_filepath(TX, CustomDir)) of {ok, Data} -> {ok, ar_util:decode(Data)}; Error -> Error end. write_wallet_list(Height, Tree) -> {RootHash, _UpdatedTree, UpdateMap} = ar_block:hash_wallet_list(Tree), store_account_tree_update(Height, RootHash, UpdateMap), erlang:garbage_collect(), RootHash. %% @doc Read a given wallet list (by hash) from the disk. read_wallet_list(WalletListHash) -> read_wallet_list(WalletListHash, not_set). read_wallet_list(<<>>, _CustomDir) -> {ok, ar_patricia_tree:new()}; read_wallet_list(WalletListHash, CustomDir) when is_binary(WalletListHash) -> Key = WalletListHash, read_wallet_list(get_account_tree_value(Key, <<>>, CustomDir), ar_patricia_tree:new(), [], WalletListHash, WalletListHash, CustomDir). read_wallet_list({ok, << K:48/binary, _/binary >>, Bin}, Tree, Keys, RootHash, K, CustomDir) -> case binary_to_term(Bin, [safe]) of {Key, Value} -> Tree2 = ar_patricia_tree:insert(Key, Value, Tree), case Keys of [] -> {ok, Tree2}; [{H, Prefix} | Keys2] -> read_wallet_list(get_account_tree_value(H, Prefix, CustomDir), Tree2, Keys2, RootHash, H, CustomDir) end; [{H, Prefix} | Hs] -> read_wallet_list(get_account_tree_value(H, Prefix, CustomDir), Tree, Hs ++ Keys, RootHash, H, CustomDir) end; read_wallet_list({ok, _, _}, _Tree, _Keys, RootHash, _K, CustomDir) -> read_wallet_list_from_chunk_files(RootHash, CustomDir); read_wallet_list(none, _Tree, _Keys, RootHash, _K, CustomDir) -> read_wallet_list_from_chunk_files(RootHash, CustomDir); read_wallet_list(Error, _Tree, _Keys, _RootHash, _K, _CustomDir) -> Error. read_wallet_list_from_chunk_files(WalletListHash, CustomDir) when is_binary(WalletListHash) -> case read_wallet_list_chunk(WalletListHash, CustomDir) of not_found -> Filename = wallet_list_filepath(WalletListHash, CustomDir), case file:read_file(Filename) of {ok, JSON} -> parse_wallet_list_json(JSON); {error, enoent} -> not_found; Error -> Error end; {ok, Tree} -> {ok, Tree}; {error, _Reason} = Error -> Error end; read_wallet_list_from_chunk_files(WL, _CustomDir) when is_list(WL) -> {ok, ar_patricia_tree:from_proplist([{get_wallet_key(T), get_wallet_value(T)} || T <- WL])}. get_wallet_key(T) -> element(1, T). get_wallet_value({_, Balance, LastTX}) -> {Balance, LastTX}; get_wallet_value({_, Balance, LastTX, Denomination, MiningPermission}) -> {Balance, LastTX, Denomination, MiningPermission}. read_wallet_list_chunk(RootHash, CustomDir) -> read_wallet_list_chunk(RootHash, 0, ar_patricia_tree:new(), CustomDir). read_wallet_list_chunk(RootHash, Position, Tree, CustomDir) -> Dir = case CustomDir of not_set -> get_data_dir(); _ -> CustomDir end, Filename = binary_to_list(iolist_to_binary([ Dir, "/", ?WALLET_LIST_DIR, "/", ar_util:encode(RootHash), "-", integer_to_binary(Position), "-", integer_to_binary(?WALLET_LIST_CHUNK_SIZE) ])), case read_term(".", Filename) of {ok, Chunk} -> {NextPosition, Wallets} = case Chunk of [last | Tail] -> {last, Tail}; _ -> {Position + ?WALLET_LIST_CHUNK_SIZE, Chunk} end, Tree2 = lists:foldl( fun({K, V}, Acc) -> ar_patricia_tree:insert(K, V, Acc) end, Tree, Wallets ), case NextPosition of last -> {ok, Tree2}; _ -> read_wallet_list_chunk(RootHash, NextPosition, Tree2, CustomDir) end; {error, Reason} = Error -> ?LOG_ERROR([ {event, failed_to_read_wallet_list_chunk}, {reason, Reason} ]), Error; not_found -> not_found end. parse_wallet_list_json(JSON) -> case ar_serialize:json_decode(JSON) of {ok, JiffyStruct} -> {ok, ar_serialize:json_struct_to_wallet_list(JiffyStruct)}; {error, Reason} -> {error, {invalid_json, Reason}} end. lookup_tx_filename(ID) -> lookup_tx_filename(ID, not_set). lookup_tx_filename(ID, CustomDir) -> Filepath = tx_filepath(ID, CustomDir), case is_file(Filepath) of true -> {ok, Filepath}; false -> MigratedV1Path = filepath([?TX_DIR, "migrated_v1", tx_filename(ID)], CustomDir), case is_file(MigratedV1Path) of true -> {migrated_v1, MigratedV1Path}; false -> unavailable end end. %% @doc A quick way to lookup the file without using the Erlang file server. %% Helps take off some IO load during the busy times. is_file(Filepath) -> case file:read_file_info(Filepath, [raw]) of {ok, #file_info{ type = Type }} when Type == regular orelse Type == symlink -> true; _ -> false end. read_block_from_file(Filename, Encoding) -> case read_file_raw(Filename) of {ok, Bin} -> case Encoding of json -> parse_block_json(Bin); binary -> parse_block_binary(Bin) end; {error, Reason} -> ?LOG_WARNING([{event, error_reading_block}, {error, io_lib:format("~p", [Reason])}]), unavailable end. %%%=================================================================== %%% Generic server callbacks. %%%=================================================================== init([]) -> process_flag(trap_exit, true), {ok, Config} = arweave_config:get_env(), ensure_directories(Config#config.data_dir), %% Copy genesis transactions (snapshotted in the repo) into data_dir/txs ar_weave:add_mainnet_v1_genesis_txs(), open_databases(), case Config#config.start_from_state of not_set -> ok; CustomDir -> open_start_from_state_databases(CustomDir) end, ets:insert(?MODULE, [{same_disk_storage_modules_total_size, get_same_disk_storage_modules_total_size()}]), {ok, #state{}}. open_databases() -> {ok, Config} = arweave_config:get_env(), DataDir = Config#config.data_dir, ok = ar_kv:open(#{ path => filename:join([DataDir, ?ROCKS_DB_DIR, "ar_storage_tx_confirmation_db"]), name => tx_confirmation_db}), ok = ar_kv:open(#{ path => filename:join([DataDir, ?ROCKS_DB_DIR, "ar_storage_tx_db"]), name => tx_db}), ok = ar_kv:open(#{ path => filename:join([DataDir, ?ROCKS_DB_DIR, "ar_storage_block_db"]), name => block_db}), ok = ar_kv:open(#{ path => filename:join([DataDir, ?ROCKS_DB_DIR, "reward_history_db"]), name => reward_history_db}), ok = ar_kv:open(#{ path => filename:join([DataDir, ?ROCKS_DB_DIR, "block_time_history_db"]), name => block_time_history_db}), ok = ar_kv:open(#{ path => filename:join([DataDir, ?ROCKS_DB_DIR, "block_index_db"]), name => block_index_db}), ok = ar_kv:open(#{ path => filename:join([DataDir, ?ROCKS_DB_DIR, "account_tree_db"]), name => account_tree_db}). open_start_from_state_databases(CustomDir) -> {ok, Config} = arweave_config:get_env(), LogFilepath = filename:join([Config#config.log_dir, "rocksdb", "start_from_state"]), ok = ar_kv:open_readonly(#{ path => filename:join([CustomDir, ?ROCKS_DB_DIR, "ar_storage_tx_confirmation_db"]), name => start_from_state_tx_confirmation_db, log_path => LogFilepath}), ok = ar_kv:open_readonly(#{ path => filename:join([CustomDir, ?ROCKS_DB_DIR, "ar_storage_tx_db"]), name => start_from_state_tx_db, log_path => LogFilepath}), ok = ar_kv:open_readonly(#{ path => filename:join([CustomDir, ?ROCKS_DB_DIR, "ar_storage_block_db"]), name => start_from_state_block_db, log_path => LogFilepath}), ok = ar_kv:open_readonly(#{ path => filename:join([CustomDir, ?ROCKS_DB_DIR, "reward_history_db"]), name => start_from_state_reward_history_db, log_path => LogFilepath}), ok = ar_kv:open_readonly(#{ path => filename:join([CustomDir, ?ROCKS_DB_DIR, "block_time_history_db"]), name => start_from_state_block_time_history_db, log_path => LogFilepath}), ok = ar_kv:open_readonly(#{ path => filename:join([CustomDir, ?ROCKS_DB_DIR, "block_index_db"]), name => start_from_state_block_index_db, log_path => LogFilepath}), ok = ar_kv:open_readonly(#{ path => filename:join([CustomDir, ?ROCKS_DB_DIR, "account_tree_db"]), name => start_from_state_account_tree_db, log_path => LogFilepath}). close_start_from_state_databases() -> ar_kv:close(start_from_state_tx_confirmation_db), ar_kv:close(start_from_state_tx_db), ar_kv:close(start_from_state_block_db), ar_kv:close(start_from_state_reward_history_db), ar_kv:close(start_from_state_block_time_history_db), ar_kv:close(start_from_state_block_index_db), ar_kv:close(start_from_state_account_tree_db). handle_call(Request, _From, State) -> ?LOG_WARNING([{event, unhandled_call}, {module, ?MODULE}, {request, Request}]), {reply, ok, State}. handle_cast({store_account_tree_update, Height, RootHash, Map}, State) -> store_account_tree_update(Height, RootHash, Map), erlang:garbage_collect(), {noreply, State}; handle_cast(Cast, State) -> ?LOG_WARNING([{event, unhandled_cast}, {module, ?MODULE}, {cast, Cast}]), {noreply, State}. handle_info(Message, State) -> ?LOG_WARNING([{event, unhandled_info}, {module, ?MODULE}, {message, Message}]), {noreply, State}. terminate(Reason, _State) -> ?LOG_INFO([{module, ?MODULE},{pid, self()},{callback, terminate},{reason, Reason}]). %%%=================================================================== %%% Private functions. %%%=================================================================== get_data_dir() -> {ok, Config} = arweave_config:get_env(), Config#config.data_dir. block_index_tip() -> block_index_tip(not_set). block_index_tip(CustomDir) -> %% Use a key that is bigger than any << Height:256 >> (<<"a">> > << Height:256 >>) %% to retrieve the largest stored Height. case ar_kv:get_prev(get_db_name(block_index_db, CustomDir), <<"a">>) of none -> not_found; {ok, << Height:256 >>, V} -> {Height, binary_to_term(V, [safe])} end. write_block(B) -> {ok, Config} = arweave_config:get_env(), case lists:member(disk_logging, Config#config.enable) of true -> ?LOG_INFO([{event, writing_block_to_disk}, {block, ar_util:encode(B#block.indep_hash)}]); _ -> do_nothing end, TXIDs = lists:map(fun(TXID) when is_binary(TXID) -> TXID; (#tx{ id = TXID }) -> TXID end, B#block.txs), ar_kv:put(block_db, B#block.indep_hash, ar_serialize:block_to_binary(B#block{ txs = TXIDs })). write_full_block2(BShadow, _) -> case write_block(BShadow) of ok -> ok; Error -> Error end. parse_block_json(JSON) -> case catch ar_serialize:json_decode(JSON) of {ok, JiffyStruct} -> case catch ar_serialize:json_struct_to_block(JiffyStruct) of B when is_record(B, block) -> B; Error -> ?LOG_WARNING([{event, error_parsing_block_json}, {error, io_lib:format("~p", [Error])}]), unavailable end; Error -> ?LOG_WARNING([{event, error_parsing_block_json}, {error, io_lib:format("~p", [Error])}]), unavailable end. parse_block_binary(Bin) -> case catch ar_serialize:binary_to_block(Bin) of {ok, B} -> B; Error -> ?LOG_WARNING([{event, error_parsing_block_bin}, {error, io_lib:format("~p", [Error])}]), unavailable end. filepath(PathComponents) -> filepath(PathComponents, not_set). filepath(PathComponents, CustomDir) -> DataDir = case CustomDir of not_set -> get_data_dir(); _ -> CustomDir end, to_string(filename:join([DataDir | PathComponents])). to_string(Bin) when is_binary(Bin) -> binary_to_list(Bin); to_string(String) -> String. %% @doc Ensure that all of the relevant storage directories exist. ensure_directories(DataDir) -> %% Append "/" to every path so that filelib:ensure_dir/1 creates a directory %% if it does not exist. filelib:ensure_dir(filename:join(DataDir, ?TX_DIR) ++ "/"), filelib:ensure_dir(filename:join(DataDir, ?BLOCK_DIR) ++ "/"), filelib:ensure_dir(filename:join(DataDir, ?WALLET_LIST_DIR) ++ "/"), filelib:ensure_dir(filename:join(DataDir, ?HASH_LIST_DIR) ++ "/"), filelib:ensure_dir(filename:join([DataDir, ?TX_DIR, "migrated_v1"]) ++ "/"). get_db_name(DBName, not_set) -> DBName; get_db_name(DBName, _CustomDir) -> list_to_atom("start_from_state_" ++ atom_to_list(DBName)). get_same_disk_storage_modules_total_size() -> {ok, Config} = arweave_config:get_env(), DataDir = Config#config.data_dir, {ok, Info} = file:read_file_info(DataDir), Device = Info#file_info.major_device, get_same_disk_storage_modules_total_size(0, Config#config.storage_modules, DataDir, Device). get_same_disk_storage_modules_total_size(TotalSize, [], _DataDir, _Device) -> TotalSize; get_same_disk_storage_modules_total_size(TotalSize, [{Size, _Bucket, _Packing} = Module | StorageModules], DataDir, Device) -> Path = filename:join([DataDir, "storage_modules", ar_storage_module:id(Module)]), filelib:ensure_dir(Path ++ "/"), {ok, Info} = file:read_file_info(Path), TotalSize2 = case Info#file_info.major_device == Device of true -> TotalSize + Size; false -> TotalSize end, get_same_disk_storage_modules_total_size(TotalSize2, StorageModules, DataDir, Device). tx_filepath(TX) -> tx_filepath(TX, not_set). tx_filepath(TX, CustomDir) -> filepath([?TX_DIR, tx_filename(TX)], CustomDir). tx_data_filepath(TX) -> tx_data_filepath(TX, not_set). tx_data_filepath(TX, CustomDir) when is_record(TX, tx) -> tx_data_filepath(TX#tx.id, CustomDir); tx_data_filepath(ID, CustomDir) -> filepath([?TX_DIR, tx_data_filename(ID)], CustomDir). tx_filename(TX) when is_record(TX, tx) -> tx_filename(TX#tx.id); tx_filename(TXID) when is_binary(TXID) -> iolist_to_binary([ar_util:encode(TXID), ".json"]). tx_data_filename(TXID) -> iolist_to_binary([ar_util:encode(TXID), "_data.json"]). wallet_list_filepath(Hash) -> wallet_list_filepath(Hash, not_set). wallet_list_filepath(Hash, CustomDir) when is_binary(Hash) -> filepath([?WALLET_LIST_DIR, iolist_to_binary([ar_util:encode(Hash), ".json"])], CustomDir). write_file_atomic(Filename, Data) -> SwapFilename = Filename ++ ".swp", case file:open(SwapFilename, [write, raw]) of {ok, F} -> case file:write(F, Data) of ok -> case file:close(F) of ok -> file:rename(SwapFilename, Filename); Error -> Error end; Error -> Error end; Error -> Error end. write_term(Name, Term) -> {ok, Config} = arweave_config:get_env(), DataDir = Config#config.data_dir, write_term(DataDir, Name, Term, override). write_term(Dir, Name, Term) when is_atom(Name) -> write_term(Dir, atom_to_list(Name), Term, override); write_term(Dir, Name, Term) -> write_term(Dir, Name, Term, override). write_term(Dir, Name, Term, Override) -> Filepath = filename:join(Dir, Name), case Override == do_not_override andalso filelib:is_file(Filepath) of true -> ok; false -> case write_file_atomic(Filepath, term_to_binary(Term)) of ok -> ok; {error, Reason} = Error -> ?LOG_ERROR([{event, failed_to_write_term}, {name, Name}, {reason, Reason}]), Error end end. read_term(Name) -> {ok, Config} = arweave_config:get_env(), DataDir = Config#config.data_dir, read_term(DataDir, Name). read_term(Dir, Name) when is_atom(Name) -> read_term(Dir, atom_to_list(Name)); read_term(Dir, Name) -> case file:read_file(filename:join(Dir, Name)) of {ok, Binary} -> {ok, binary_to_term(Binary, [safe])}; {error, enoent} -> not_found; {error, Reason} = Error -> ?LOG_ERROR([{event, failed_to_read_term}, {name, Name}, {reason, Reason}]), Error end. delete_term(Name) -> {ok, Config} = arweave_config:get_env(), DataDir = Config#config.data_dir, file:delete(filename:join(DataDir, atom_to_list(Name))). store_account_tree_update(Height, RootHash, Map) -> ?LOG_INFO([{event, storing_account_tree_update}, {updated_key_count, map_size(Map)}, {height, Height}, {root_hash, ar_util:encode(RootHash)}]), maps:map( fun({H, Prefix} = Key, Value) -> Prefix2 = case Prefix of root -> <<>>; _ -> Prefix end, DBKey = << H/binary, Prefix2/binary >>, case ar_kv:get(account_tree_db, DBKey) of not_found -> case ar_kv:put(account_tree_db, DBKey, term_to_binary(Value)) of ok -> ok; {error, Reason} -> ?LOG_ERROR([{event, failed_to_store_account_tree_key}, {key_hash, ar_util:encode(element(1, Key))}, {key_prefix, case element(2, Key) of root -> root; Prefix -> ar_util:encode(Prefix) end}, {height, Height}, {root_hash, ar_util:encode(RootHash)}, {reason, io_lib:format("~p", [Reason])}]) end; {ok, _} -> ok; {error, Reason} -> ?LOG_ERROR([{event, failed_to_read_account_tree_key}, {key_hash, ar_util:encode(element(1, Key))}, {key_prefix, case element(2, Key) of root -> root; Prefix -> ar_util:encode(Prefix) end}, {height, Height}, {root_hash, ar_util:encode(RootHash)}, {reason, io_lib:format("~p", [Reason])}]) end end, Map ), ?LOG_INFO([{event, stored_account_tree}]). %% @doc Ignore the prefix when querying a key since the prefix might depend on the order of %% insertions and is only used to optimize certain lookups. get_account_tree_value(Key, Prefix, CustomDir) -> ar_kv:get_prev(get_db_name(account_tree_db, CustomDir), << Key/binary, Prefix/binary >>). % does not work: %ar_kv:get_next(account_tree_db, << Key/binary, Prefix/binary >>). % works: %<< N:(48 * 8) >> = Key, %Key2 = << (N + 1):(48 * 8) >>, %ar_kv:get_prev(account_tree_db, Key2). %%%=================================================================== %%% Tests %%%=================================================================== %% @doc Test block storage. store_and_retrieve_block_test_() -> {timeout, 60, fun test_store_and_retrieve_block/0}. test_store_and_retrieve_block() -> [B0] = ar_weave:init(), ar_test_node:start(B0), TXIDs = [TX#tx.id || TX <- B0#block.txs], FetchedB0 = read_block(B0#block.indep_hash), FetchedB01 = FetchedB0#block{ txs = [tx_id(TX) || TX <- FetchedB0#block.txs] }, FetchedB02 = read_block(B0#block.height, [{B0#block.indep_hash, B0#block.weave_size, B0#block.tx_root}]), FetchedB03 = FetchedB02#block{ txs = [tx_id(TX) || TX <- FetchedB02#block.txs] }, ?assertEqual(B0#block{ size_tagged_txs = unset, txs = TXIDs, reward_history = [], block_time_history = [], account_tree = undefined }, FetchedB01), ?assertEqual(B0#block{ size_tagged_txs = unset, txs = TXIDs, reward_history = [], block_time_history = [], account_tree = undefined }, FetchedB03), ar_test_node:mine(), ar_test_node:wait_until_height(main, 1), ar_test_node:mine(), BI1 = ar_test_node:wait_until_height(main, 2), [{_, BlockCount}] = ets:lookup(ar_header_sync, synced_blocks), ar_util:do_until( fun() -> 3 == BlockCount end, 100, 2000 ), BH1 = element(1, hd(BI1)), ?assertMatch(#block{ height = 2, indep_hash = BH1 }, read_block(BH1)), ?assertMatch(#block{ height = 2, indep_hash = BH1 }, read_block(2, BI1)). tx_id(#tx{ id = TXID }) -> TXID; tx_id(TXID) -> TXID. store_and_retrieve_wallet_list_test_() -> [ {timeout, 20, fun test_store_and_retrieve_wallet_list/0}, {timeout, 240, fun test_store_and_retrieve_wallet_list_permutations/0} ]. test_store_and_retrieve_wallet_list() -> [B0] = ar_weave:init(), [TX] = B0#block.txs, Addr = ar_wallet:to_address(TX#tx.owner, {?RSA_SIGN_ALG, 65537}), write_block(B0), TXID = TX#tx.id, ExpectedWL = ar_patricia_tree:from_proplist([{Addr, {0, TXID}}]), WalletListHash = write_wallet_list(0, ExpectedWL), {ok, ActualWL} = read_wallet_list(WalletListHash), assert_wallet_trees_equal(ExpectedWL, ActualWL), Addr2 = binary:part(Addr, 0, 16), TXID2 = crypto:strong_rand_bytes(32), ExpectedWL2 = ar_patricia_tree:from_proplist([{Addr, {0, TXID}}, {Addr2, {0, TXID2}}]), WalletListHash2 = write_wallet_list(0, ExpectedWL2), {ok, ActualWL2} = read_wallet_list(WalletListHash2), ?assertEqual({0, TXID}, read_account(Addr, WalletListHash2)), ?assertEqual({0, TXID2}, read_account(Addr2, WalletListHash2)), assert_wallet_trees_equal(ExpectedWL2, ActualWL2), {WalletListHash, ActualWL3, _UpdateMap} = ar_block:hash_wallet_list(ActualWL), Addr3 = << (binary:part(Addr, 0, 3))/binary, (crypto:strong_rand_bytes(29))/binary >>, TXID3 = crypto:strong_rand_bytes(32), TXID4 = crypto:strong_rand_bytes(32), ActualWL4 = ar_patricia_tree:insert(Addr3, {100, TXID3}, ar_patricia_tree:insert(Addr2, {0, TXID4}, ActualWL3)), {WalletListHash3, ActualWL5, UpdateMap2} = ar_block:hash_wallet_list(ActualWL4), store_account_tree_update(1, WalletListHash3, UpdateMap2), ?assertEqual({100, TXID3}, read_account(Addr3, WalletListHash3)), ?assertEqual({0, TXID4}, read_account(Addr2, WalletListHash3)), ?assertEqual({0, TXID}, read_account(Addr, WalletListHash3)), {ok, ActualWL6} = read_wallet_list(WalletListHash3), assert_wallet_trees_equal(ActualWL5, ActualWL6). test_store_and_retrieve_wallet_list_permutations() -> lists:foreach( fun(Permutation) -> store_and_retrieve_wallet_list(Permutation) end, permutations([ <<"a">>, <<"aa">>, <<"ab">>, <<"bb">>, <<"b">>, <<"aaa">> ])), lists:foreach( fun(Permutation) -> store_and_retrieve_wallet_list(Permutation) end, permutations([ <<"a">>, <<"aa">>, <<"aaa">>, <<"aaaa">>, <<"aaaaa">> ])), store_and_retrieve_wallet_list([ <<"a">>, <<"aa">>, <<"ab">>, <<"b">> ]), store_and_retrieve_wallet_list([ <<"aa">>, <<"a">>, <<"ab">> ]), store_and_retrieve_wallet_list([ <<"aaa">>, <<"bbb">>, <<"aab">>, <<"ab">>, <<"a">> ]), store_and_retrieve_wallet_list([ <<"aaaa">>, <<"aaab">>, <<"aaac">>, <<"aaa">>, <<"aab">>, <<"aac">>, <<"aa">>, <<"ab">>, <<"ac">>, <<"a">>, <<"b">>, <<"c">> ]), store_and_retrieve_wallet_list([ <<"a">>, <<"b">>, <<"c">>, <<"aa">>, <<"ab">>, <<"ac">>, <<"aaa">>, <<"aab">>, <<"aac">>, <<"aaaa">>, <<"aaab">>, <<"aaac">>, <<"a">>, <<"b">>, <<"c">>, <<"aa">>, <<"ab">>, <<"ac">>, <<"aaa">>, <<"aab">>, <<"aac">>, <<"aaaa">>, <<"aaab">>, <<"aaac">> ]), store_and_retrieve_wallet_list([ <<"aaaa">>, <<"aaa">>, <<"aa">>, <<"a">>, <<"aaab">>, <<"aab">>, <<"ab">>, <<"b">>, <<"aaac">>, <<"aac">>, <<"ac">>, <<"c">>, <<"aaaa">>, <<"aaa">>, <<"aa">>, <<"a">>, <<"aaab">>, <<"aab">>, <<"ab">>, <<"b">>, <<"aaac">>, <<"aac">>, <<"ac">>, <<"c">> ]), store_and_retrieve_wallet_list([ <<"aaaa">>, <<"aaab">>, <<"aaac">>, <<"a">>, <<"aa">>, <<"aaa">>, <<"aaaa">>, <<"aaab">>, <<"aaac">> ]), ok. store_and_retrieve_wallet_list(Keys) -> MinBinary = <<>>, MaxBinary = << <<1:1>> || _ <- lists:seq(1, 512) >>, ar_kv:delete_range(account_tree_db, MinBinary, MaxBinary), store_and_retrieve_wallet_list(Keys, ar_patricia_tree:new(), maps:new(), false). store_and_retrieve_wallet_list([], Tree, InsertedKeys, IsUpdate) -> store_and_retrieve_wallet_list2(Tree, InsertedKeys, IsUpdate); store_and_retrieve_wallet_list([Key | Keys], Tree, InsertedKeys, IsUpdate) -> TXID = crypto:strong_rand_bytes(32), Balance = rand:uniform(1000000000), Tree2 = ar_patricia_tree:insert(Key, {Balance, TXID}, Tree), InsertedKeys2 = maps:put(Key, {Balance, TXID}, InsertedKeys), case rand:uniform(2) of 1 -> Tree3 = store_and_retrieve_wallet_list2(Tree2, InsertedKeys2, IsUpdate), store_and_retrieve_wallet_list(Keys, Tree3, InsertedKeys2, true); _ -> store_and_retrieve_wallet_list(Keys, Tree2, InsertedKeys2, IsUpdate) end. store_and_retrieve_wallet_list2(Tree, InsertedKeys, IsUpdate) -> {WalletListHash, Tree2} = case IsUpdate of false -> {write_wallet_list(0, Tree), Tree}; _ -> {R, T, Map} = ar_block:hash_wallet_list(Tree), store_account_tree_update(0, R, Map), {R, T} end, {ok, ActualTree} = read_wallet_list(WalletListHash), maps:foreach( fun(Key, {Balance, TXID}) -> ?assertEqual({Balance, TXID}, read_account(Key, WalletListHash)) end, InsertedKeys ), assert_wallet_trees_equal(Tree, ActualTree), assert_wallet_trees_equal(Tree2, ActualTree), Tree2. %% From: https://www.erlang.org/doc/programming_examples/list_comprehensions.html#permutations permutations([]) -> [[]]; permutations(L) -> [[H|T] || H <- L, T <- permutations(L--[H])]. assert_wallet_trees_equal(Expected, Actual) -> ?assertEqual( ar_patricia_tree:foldr(fun(K, V, Acc) -> [{K, V} | Acc] end, [], Expected), ar_patricia_tree:foldr(fun(K, V, Acc) -> [{K, V} | Acc] end, [], Actual) ). read_wallet_list_chunks_test() -> TestCases = [ [random_wallet()], % < chunk size [random_wallet() || _ <- lists:seq(1, ?WALLET_LIST_CHUNK_SIZE)], % == chunk size [random_wallet() || _ <- lists:seq(1, ?WALLET_LIST_CHUNK_SIZE + 1)], % > chunk size [random_wallet() || _ <- lists:seq(1, 10 * ?WALLET_LIST_CHUNK_SIZE)], [random_wallet() || _ <- lists:seq(1, 10 * ?WALLET_LIST_CHUNK_SIZE + 1)] ], lists:foreach( fun(TestCase) -> Tree = ar_patricia_tree:from_proplist(TestCase), RootHash = write_wallet_list(0, Tree), {ok, ReadTree} = read_wallet_list(RootHash), assert_wallet_trees_equal(Tree, ReadTree) end, TestCases ). random_wallet() -> {crypto:strong_rand_bytes(32), {rand:uniform(1000000000), crypto:strong_rand_bytes(32)}}. update_block_index_test_() -> [ {timeout, 20, fun test_update_block_index/0} ]. test_update_block_index() -> ar_kv:delete_range(block_index_db, <<0:256>>, <<"a">>), ?assertEqual( {error, not_found}, update_block_index(2, 0, [ {<<"hash_a">>, 0, <<"root_a">>} ]), "Gap on empty index" ), ?assertEqual( {error, badarg}, update_block_index(-1, 0, [ {<<"hash_a">>, 0, <<"root_a">>} ]), "Negative tip" ), ?assertEqual( {error, badarg}, update_block_index(0, -1, [ {<<"hash_a">>, 0, <<"root_a">>} ]), "Negative orphan count" ), ?assertEqual( {error, not_found}, update_block_index(0, 1, [ {<<"hash_a">>, 0, <<"root_a">>} ]), "Orphan on empty index" ), ?assertEqual( ok, update_block_index(0, 0, [ {<<"hash_a">>, 0, <<"root_a">>} ]) ), ?assertEqual([ {<<"hash_a">>, 0, <<"root_a">>} ], read_block_index()), ?assertEqual( {error, not_found}, update_block_index(2, 0, [ {<<"hash_b">>, 0, <<"root_b">>} ]), "Gap on non-empty index" ), ?assertEqual( ok, update_block_index(1, 0, [ {<<"hash_b">>, 0, <<"root_b">>} ]) ), ?assertEqual([ {<<"hash_b">>, 0, <<"root_b">>}, {<<"hash_a">>, 0, <<"root_a">>} ], read_block_index()), ?assertEqual( {error, not_found}, update_block_index(0, 3, [ {<<"hash_c">>, 0, <<"root_c">>} ]), "Too many orphans" ), ?assertEqual( ok, update_block_index(2, 0, [ {<<"hash_c">>, 0, <<"root_c">>} ]) ), ?assertEqual( ok, update_block_index(3, 0, [ {<<"hash_d">>, 0, <<"root_d">>} ]) ), ?assertEqual([ {<<"hash_d">>, 0, <<"root_d">>}, {<<"hash_c">>, 0, <<"root_c">>}, {<<"hash_b">>, 0, <<"root_b">>}, {<<"hash_a">>, 0, <<"root_a">>} ], read_block_index()), %% Orphan: 2 for 1 ?assertEqual( ok, update_block_index(4, 1, [ {<<"hash_e">>, 0, <<"root_e">>}, {<<"hash_f">>, 0, <<"root_f">>} ]) ), ?assertEqual([ {<<"hash_f">>, 0, <<"root_f">>}, {<<"hash_e">>, 0, <<"root_e">>}, {<<"hash_c">>, 0, <<"root_c">>}, {<<"hash_b">>, 0, <<"root_b">>}, {<<"hash_a">>, 0, <<"root_a">>} ], read_block_index()), %% Orphan: 1 for 1 ?assertEqual( ok, update_block_index(4, 1, [ {<<"hash_g">>, 0, <<"root_g">>} ]) ), ?assertEqual([ {<<"hash_g">>, 0, <<"root_g">>}, {<<"hash_e">>, 0, <<"root_e">>}, {<<"hash_c">>, 0, <<"root_c">>}, {<<"hash_b">>, 0, <<"root_b">>}, {<<"hash_a">>, 0, <<"root_a">>} ], read_block_index()), %% Orphan: 1 for 2 ?assertEqual( ok, update_block_index(3, 2, [ {<<"hash_h">>, 0, <<"root_h">>} ]) ), ?assertEqual([ {<<"hash_h">>, 0, <<"root_h">>}, {<<"hash_c">>, 0, <<"root_c">>}, {<<"hash_b">>, 0, <<"root_b">>}, {<<"hash_a">>, 0, <<"root_a">>} ], read_block_index()), %% Orphan: 1 for 3 ?assertEqual( ok, update_block_index(1, 3, [ {<<"hash_i">>, 0, <<"root_i">>} ]) ), ?assertEqual([ {<<"hash_i">>, 0, <<"root_i">>}, {<<"hash_a">>, 0, <<"root_a">>} ], read_block_index()), %% Orphan: 2 for 2 ?assertEqual( ok, update_block_index(1, 2, [ {<<"hash_j">>, 0, <<"root_j">>}, {<<"hash_k">>, 0, <<"root_k">>} ]) ), ?assertEqual([ {<<"hash_k">>, 0, <<"root_k">>}, {<<"hash_j">>, 0, <<"root_j">>} ], read_block_index()), %% Orphan: 3 for 1 ?assertEqual( ok, update_block_index(3, 1, [ {<<"hash_l">>, 0, <<"root_l">>}, {<<"hash_m">>, 0, <<"root_m">>}, {<<"hash_n">>, 0, <<"root_n">>} ]) ), ?assertEqual([ {<<"hash_n">>, 0, <<"root_n">>}, {<<"hash_m">>, 0, <<"root_m">>}, {<<"hash_l">>, 0, <<"root_l">>}, {<<"hash_j">>, 0, <<"root_j">>} ], read_block_index()), %% Replace all but genesis ?assertEqual( ok, update_block_index(2, 3, [ {<<"hash_o">>, 0, <<"root_o">>}, {<<"hash_p">>, 0, <<"root_p">>} ]) ), ?assertEqual([ {<<"hash_p">>, 0, <<"root_p">>}, {<<"hash_o">>, 0, <<"root_o">>}, {<<"hash_j">>, 0, <<"root_j">>} ], read_block_index()). ================================================ FILE: apps/arweave/src/ar_storage_module.erl ================================================ -module(ar_storage_module). -export([get_overlap/1, id/1, label/1, address_label/2, module_address/1, module_packing_difficulty/1, packing_label/1, get_by_id/1, get_range/1, module_range/1, module_range/2, get_packing/1, get/2, get_all/1, get_all/2, get_all/3, has_any/1, has_range/2, get_cover/3, is_repack_in_place/1]). -include_lib("arweave_config/include/arweave_config.hrl"). -include("ar.hrl"). -include("ar_consensus.hrl"). -include_lib("eunit/include/eunit.hrl"). %% The overlap makes sure a 100 MiB recall range can always be fetched %% from a single storage module. -ifdef(AR_TEST). -define(OVERLAP, 262144). -else. -define(OVERLAP, (?LEGACY_RECALL_RANGE_SIZE)). -endif. -ifdef(AR_TEST). -define(REPLICA_2_9_OVERLAP, 262144). -else. -define(REPLICA_2_9_OVERLAP, (262144 * 10)). -endif. -type storage_module() :: {integer(), integer(), {atom(), binary()}} | {integer(), integer(), {atom(), binary(), integer()}}. %%%=================================================================== %%% Public interface. %%%=================================================================== get_overlap({replica_2_9, _Addr}) -> ?REPLICA_2_9_OVERLAP; get_overlap(_Packing) -> ?OVERLAP. %% @doc Return the storage module identifier. id(?DEFAULT_MODULE) -> ?DEFAULT_MODULE; id({BucketSize, Bucket, Packing}) -> PackingString = case Packing of {spora_2_6, Addr} -> ar_util:encode(Addr); {composite, Addr, PackingDiff} -> << (ar_util:encode(Addr))/binary, ".", (integer_to_binary(PackingDiff))/binary >>; {replica_2_9, Addr} -> << (ar_util:encode(Addr))/binary, ".replica.2.9" >>; _ -> atom_to_list(Packing) end, id(BucketSize, Bucket, PackingString). %% @doc Return the obscure unique label for the given storage module. label(?DEFAULT_MODULE) -> ?DEFAULT_MODULE; label(StoreID) -> case ets:lookup(?MODULE, {label, StoreID}) of [] -> StorageModule = get_by_id(StoreID), {BucketSize, Bucket, Packing} = StorageModule, PackingLabel = packing_label(Packing), Label = id(BucketSize, Bucket, PackingLabel), ets:insert(?MODULE, {{label, StoreID}, Label}), Label; [{_, Label}] -> Label end. %% @doc Return the obscure unique label for the given %% replica owner address + replica type pair. address_label(Addr, ReplicaType) -> Key = {Addr, ReplicaType}, case ets:lookup(?MODULE, {address_label, Key}) of [] -> Label = case ets:lookup(?MODULE, last_address_label) of [] -> 1; [{_, Counter}] -> Counter + 1 end, ets:insert(?MODULE, {{address_label, Key}, Label}), ets:insert(?MODULE, {last_address_label, Label}), integer_to_list(Label); [{_, Label}] -> integer_to_list(Label) end. -spec module_address(ar_storage_module:storage_module()) -> binary() | undefined. module_address({_, _, {spora_2_6, Addr}}) -> Addr; module_address({_, _, {composite, Addr, _PackingDifficulty}}) -> Addr; module_address({_, _, {replica_2_9, Addr}}) -> Addr; module_address(_StorageModule) -> undefined. -spec module_packing_difficulty(ar_storage_module:storage_module()) -> integer(). module_packing_difficulty({_, _, {composite, _Addr, PackingDifficulty}}) -> true = PackingDifficulty /= ?REPLICA_2_9_PACKING_DIFFICULTY, PackingDifficulty; module_packing_difficulty({_, _, {replica_2_9, _Addr}}) -> ?REPLICA_2_9_PACKING_DIFFICULTY; module_packing_difficulty(_StorageModule) -> 0. packing_label({spora_2_6, Addr}) -> AddrLabel = ar_storage_module:address_label(Addr, spora_2_6), list_to_atom("spora_2_6_" ++ AddrLabel); packing_label({composite, Addr, PackingDifficulty}) -> AddrLabel = ar_storage_module:address_label(Addr, {composite, PackingDifficulty}), list_to_atom("composite_" ++ AddrLabel); packing_label({replica_2_9, Addr}) -> AddrLabel = ar_storage_module:address_label(Addr, replica_2_9), list_to_atom("replica_2_9_" ++ AddrLabel); packing_label(Packing) -> Packing. %% @doc Return the storage module with the given identifier or not_found. %% Search across both attached modules and repacked in-place modules. get_by_id(?DEFAULT_MODULE) -> ?DEFAULT_MODULE; get_by_id(Atom) when is_atom(Atom) -> %% May be 'default' or an atom from the unit tests. Atom; get_by_id(ID) -> {ok, Config} = arweave_config:get_env(), RepackInPlaceModules = [element(1, El) || El <- Config#config.repack_in_place_storage_modules], get_by_id(ID, Config#config.storage_modules ++ RepackInPlaceModules). get_by_id(_ID, []) -> not_found; get_by_id(ID, [Module | Modules]) -> case ar_storage_module:id(Module) == ID of true -> Module; false -> get_by_id(ID, Modules) end. %% @doc Return {StartOffset, EndOffset} the given module is responsible for. get_range(?DEFAULT_MODULE) -> {0, infinity}; get_range(ID) -> Module = get_by_id(ID), case Module of not_found -> not_found; _ -> module_range(Module) end. -spec module_range(ar_storage_module:storage_module()) -> {non_neg_integer(), non_neg_integer()}. module_range(Module) -> {_BucketSize, _Bucket, Packing} = Module, module_range(Module, ar_storage_module:get_overlap(Packing)). module_range(Module, Overlap) -> {BucketSize, Bucket, _Packing} = Module, {BucketSize * Bucket, (Bucket + 1) * BucketSize + Overlap}. %% @doc Return the packing configured for the given module. get_packing(?DEFAULT_MODULE) -> unpacked; get_packing({_BucketSize, _Bucket, Packing}) -> Packing; get_packing(ID) -> Module = get_by_id(ID), case Module of not_found -> not_found; _ -> get_packing(Module) end. %% @doc Return a configured storage module covering the given Offset, preferably %% with the given Packing. Return not_found if none is found. get(Offset, Packing) -> {ok, Config} = arweave_config:get_env(), get(Offset, Packing, Config#config.storage_modules, not_found). %% @doc Return the list of all configured storage modules covering the given Offset. get_all(Offset) -> {ok, Config} = arweave_config:get_env(), get_all2(Offset, Config#config.storage_modules, []). %% @doc Return the list of configured storage modules whose ranges intersect %% the given interval. get_all(Start, End) -> {ok, Config} = arweave_config:get_env(), get_all(Start, End, Config#config.storage_modules). %% @doc Return the list of storage modules chosen from the given list %% whose ranges intersect the given interval. get_all(Start, End, StorageModules) -> get_all2(Start, End, StorageModules, []). %% @doc Return true if the given Offset belongs to at least one storage module. has_any(Offset) -> {ok, Config} = arweave_config:get_env(), has_any(Offset, Config#config.storage_modules). %% @doc Return true if the given range is covered by the configured storage modules. has_range(Start, End) -> {ok, Config} = arweave_config:get_env(), case ets:lookup(?MODULE, unique_sorted_intervals) of [] -> Intervals = get_unique_sorted_intervals(Config#config.storage_modules), ets:insert(?MODULE, {unique_sorted_intervals, Intervals}), has_range(Start, End, Intervals); [{_, Intervals}] -> has_range(Start, End, Intervals) end. %% @doc Return the list of at least one {Start, End, StoreID} covering the given range %% or not_found. The given StoreID (may be none) has a higher chance to be picked in case %% there are several storage modules covering the same range. %% %% 0 6 10 14 20 30 %% |--- sm_1 ---|--- sm_2 ---|--- sm_3 ---| %% |----sm_4----| %% %% 1. get_cover(2, 8, none): 2<--->8 %% 2. get_cover(7, 13, none): 7<--------->13 %% 3. get_cover(7, 25, none): 7<-------------------->25 %% 4. get_cover(7, 25, sm4): 7<-------------------->25 %% %% 1. returns [{2, 8, sm_1}] %% 2. returns [{7, 10, sm1}, {10, 13, sm_2}] %% 3. returns [{7, 10, sm1}, {10, 20, sm_2}, {20, 25, sm_3}] %% 4. returns [{7, 10, sm1}, {10, 20, sm_4}, {20, 25, sm_3}] get_cover(Start, End, MaybeModule) -> {ok, Config} = arweave_config:get_env(), SortedStorageModules = sort_storage_modules_by_left_bound( Config#config.storage_modules, MaybeModule), case get_cover2(Start, End, SortedStorageModules) of [] -> not_found; not_found -> not_found; Cover -> Cover end. is_repack_in_place(ID) -> {ok, Config} = arweave_config:get_env(), lists:any( fun({Module, _TargetPacking}) -> ar_storage_module:id(Module) == ID end, Config#config.repack_in_place_storage_modules). %%%=================================================================== %%% Private functions. %%%=================================================================== id(BucketSize, Bucket, PackingString) -> case BucketSize == ar_block:partition_size() of true -> binary_to_list(iolist_to_binary(io_lib:format("storage_module_~B_~s", [Bucket, PackingString]))); false -> binary_to_list(iolist_to_binary(io_lib:format("storage_module_~B_~B_~s", [BucketSize, Bucket, PackingString]))) end. get(Offset, Packing, [{BucketSize, Bucket, Packing2} | StorageModules], StorageModule) -> case Offset =< BucketSize * Bucket orelse Offset > BucketSize * (Bucket + 1) + ar_storage_module:get_overlap(Packing2) of true -> get(Offset, Packing, StorageModules, StorageModule); false -> case Packing == Packing2 of true -> {BucketSize, Bucket, Packing}; false -> get(Offset, Packing, StorageModules, {BucketSize, Bucket, Packing}) end end; get(_Offset, _Packing, [], StorageModule) -> StorageModule. get_all2(Offset, [{BucketSize, Bucket, Packing} = StorageModule | StorageModules], FoundModules) -> case Offset =< BucketSize * Bucket orelse Offset > BucketSize * (Bucket + 1) + ar_storage_module:get_overlap(Packing) of true -> get_all2(Offset, StorageModules, FoundModules); false -> get_all2(Offset, StorageModules, [StorageModule | FoundModules]) end; get_all2(_Offset, [], FoundModules) -> FoundModules. get_all2(Start, End, [{BucketSize, Bucket, Packing} = StorageModule | StorageModules], FoundModules) -> case End =< BucketSize * Bucket orelse Start >= BucketSize * (Bucket + 1) + ar_storage_module:get_overlap(Packing) of true -> get_all2(Start, End, StorageModules, FoundModules); false -> get_all2(Start, End, StorageModules, [StorageModule | FoundModules]) end; get_all2(_Start, _End, [], FoundModules) -> FoundModules. has_any(_Offset, []) -> false; has_any(Offset, [{BucketSize, Bucket, Packing} | StorageModules]) -> case Offset > Bucket * BucketSize andalso Offset =< (Bucket + 1) * BucketSize + ar_storage_module:get_overlap(Packing) of true -> true; false -> has_any(Offset, StorageModules) end. get_unique_sorted_intervals(StorageModules) -> get_unique_sorted_intervals(StorageModules, ar_intervals:new()). get_unique_sorted_intervals([], Intervals) -> [{Start, End} || {End, Start} <- ar_intervals:to_list(Intervals)]; get_unique_sorted_intervals([{BucketSize, Bucket, _Packing} | StorageModules], Intervals) -> End = (Bucket + 1) * BucketSize, Start = Bucket * BucketSize, get_unique_sorted_intervals(StorageModules, ar_intervals:add(Intervals, End, Start)). has_range(PartitionStart, PartitionEnd, _Intervals) when PartitionStart >= PartitionEnd -> true; has_range(_PartitionStart, _PartitionEnd, []) -> false; has_range(PartitionStart, _PartitionEnd, [{Start, _End} | _Intervals]) when PartitionStart < Start -> %% The given intervals are unique and sorted. false; has_range(PartitionStart, PartitionEnd, [{_Start, End} | Intervals]) when PartitionStart >= End -> has_range(PartitionStart, PartitionEnd, Intervals); has_range(_PartitionStart, PartitionEnd, [{_Start, End} | Intervals]) -> has_range(End, PartitionEnd, Intervals). sort_storage_modules_by_left_bound(StorageModules, MaybeModule) -> lists:sort( fun({BucketSize1, Bucket1, _} = M1, {BucketSize2, Bucket2, _} = M2) -> Start1 = BucketSize1 * Bucket1, Start2 = BucketSize2 * Bucket2, case Start1 =< Start2 of false -> false; true -> case Start1 == Start2 of true -> M1 == MaybeModule orelse M2 /= MaybeModule; false -> true end end end, StorageModules ). get_cover2(Start, End, _StorageModules) when Start >= End -> []; get_cover2(_Start, _End, []) -> not_found; get_cover2(Start, _End, [{BucketSize, Bucket, _Packing} | _StorageModules]) when BucketSize * Bucket > Start -> not_found; get_cover2(Start, End, [{BucketSize, Bucket, _Packing} | StorageModules]) when BucketSize * Bucket + BucketSize =< Start -> get_cover2(Start, End, StorageModules); get_cover2(Start, End, [{BucketSize, Bucket, _Packing} = StorageModule | StorageModules]) -> Start2 = BucketSize * Bucket, End2 = Start2 + BucketSize, End3 = min(End, End2), StoreID = ar_storage_module:id(StorageModule), case get_cover2(End3, End, StorageModules) of not_found -> not_found; List -> [{Start, End3, StoreID} | List] end. %%%=================================================================== %%% Tests. %%%=================================================================== label_test() -> {ok, Config} = arweave_config:get_env(), OldLabels = ets:match_object(?MODULE, {{label, '_'}, '_'}), OldAddrLabels = ets:match_object(?MODULE, {{address_label, '_'}, '_'}), OldLastLabel = ets:lookup(?MODULE, last_address_label), ets:match_delete(?MODULE, {{label, '_'}, '_'}), ets:match_delete(?MODULE, {{address_label, '_'}, '_'}), ets:delete(?MODULE, last_address_label), try arweave_config:set_env(Config#config{storage_modules = [ {ar_block:partition_size(), 0, {spora_2_6, <<"a">>}}, {ar_block:partition_size(), 2, {spora_2_6, <<"a">>}}, {ar_block:partition_size(), 0, {spora_2_6, <<"b">>}}, {524288, 3, {spora_2_6, <<"b">>}}, {ar_block:partition_size(), 2, unpacked}, {ar_block:partition_size(), 2, {spora_2_6, <<"s÷">>}}, {524288, 2, {spora_2_6, <<"s÷">>}}, {524288, 3, {composite, <<"b">>, 1}}, {524288, 3, {composite, <<"b">>, 1}}, {524288, 3, {composite, <<"b">>, 2}} ]}), ?assertEqual("storage_module_0_spora_2_6_1", label(id({ar_block:partition_size(), 0, {spora_2_6, <<"a">>}}))), ?assertEqual("storage_module_2_spora_2_6_1", label(id({ar_block:partition_size(), 2, {spora_2_6, <<"a">>}}))), ?assertEqual("storage_module_0_spora_2_6_2", label(id({ar_block:partition_size(), 0, {spora_2_6, <<"b">>}}))), ?assertEqual("storage_module_524288_3_spora_2_6_2", label(id({524288, 3, {spora_2_6, <<"b">>}}))), ?assertEqual("storage_module_2_unpacked", label(id({ar_block:partition_size(), 2, unpacked}))), %% force a _ in the encoded address ?assertEqual("storage_module_2_spora_2_6_3", label(id({ar_block:partition_size(), 2, {spora_2_6, <<"s÷">>}}))), ?assertEqual("storage_module_524288_2_spora_2_6_3", label(id({524288, 2, {spora_2_6, <<"s÷">>}}))), ?assertEqual("storage_module_524288_3_composite_4", label(id({524288, 3, {composite, <<"b">>, 1}}))), ?assertEqual("storage_module_524288_3_composite_4", label(id({524288, 3, {composite, <<"b">>, 1}}))), ?assertEqual("storage_module_524288_3_composite_5", label(id({524288, 3, {composite, <<"b">>, 2}}))) after ets:match_delete(?MODULE, {{label, '_'}, '_'}), ets:match_delete(?MODULE, {{address_label, '_'}, '_'}), ets:delete(?MODULE, last_address_label), ets:insert(?MODULE, OldLabels), ets:insert(?MODULE, OldAddrLabels), ets:insert(?MODULE, OldLastLabel), arweave_config:set_env(Config) end. has_any_test() -> ?assertEqual(false, has_any(0, [])), ?assertEqual(false, has_any(0, [{10, 1, p}])), ?assertEqual(false, has_any(10, [{10, 1, p}])), ?assertEqual(true, has_any(11, [{10, 1, p}])), ?assertEqual(true, has_any(11, [{10, 1, {replica_2_9, a}}])), ?assertEqual(true, has_any(20 + ?OVERLAP, [{10, 1, p}])), ?assertEqual(true, has_any(20 + ?OVERLAP, [{10, 1, {replica_2_9, a}}])). get_unique_sorted_intervals_test() -> ?assertEqual([{0, 24}, {90, 120}], get_unique_sorted_intervals([{10, 0, p}, {30, 3, p}, {20, 0, p}, {12, 1, p}])). has_range_test() -> ?assertEqual(false, has_range(0, 10, [])), ?assertEqual(false, has_range(0, 10, [{0, 9}])), ?assertEqual(true, has_range(0, 10, [{0, 10}])), ?assertEqual(true, has_range(0, 10, [{0, 11}])), ?assertEqual(true, has_range(0, 10, [{0, 9}, {9, 10}])), ?assertEqual(true, has_range(5, 10, [{0, 9}, {9, 10}])), ?assertEqual(true, has_range(5, 10, [{0, 2}, {2, 9}, {9, 10}])). sort_storage_modules_by_left_bound_test() -> ?assertEqual([], sort_storage_modules_by_left_bound([], none)), ?assertEqual([{1, 0, p}], sort_storage_modules_by_left_bound([{1, 0, p}], none)), ?assertEqual([{10, 0, p}, {10, 1, p}, {10, 2, p}], sort_storage_modules_by_left_bound([{10, 1, p}, {10, 0, p}, {10, 2, p}], none)), ?assertEqual([{10, 0, p}, {7, 1, p}, {10, 1, p}, {10, 2, p}], sort_storage_modules_by_left_bound([{10, 1, p}, {10, 0, p}, {10, 2, p}, {7, 1, p}], none)), ?assertEqual([{10, 0, p}, {10, 1, p}, {10, 1, p2}], sort_storage_modules_by_left_bound([{10, 1, p}, {10, 0, p}, {10, 1, p2}], none)), ?assertEqual([{10, 0, p}, {10, 1, p2}, {10, 1, p}], sort_storage_modules_by_left_bound([{10, 1, p}, {10, 0, p}, {10, 1, p2}], {10, 1, p2})). get_cover2_test() -> ?assertEqual(not_found, get_cover2(0, 1, [])), ?assertEqual([{0, 1, "storage_module_1_0_p"}], get_cover2(0, 1, [{1, 0, p}])), ?assertEqual([{0, 1, "storage_module_1_0_p"}, {1, 2, "storage_module_1_1_p"}], get_cover2(0, 2, [{1, 0, p}, {1, 1, p}])), ?assertEqual(not_found, get_cover2(0, 2, [{1, 0, p}, {1, 2, p}])), ?assertEqual([{0, 2, "storage_module_2_0_p"}], get_cover2(0, 2, [{2, 0, p}, {1, 0, p}])), ?assertEqual([{0, 2, "storage_module_2_0_p"}, {2, 3, "storage_module_3_0_p"}], get_cover2(0, 3, [{2, 0, p}, {3, 0, p}])). ================================================ FILE: apps/arweave/src/ar_storage_sup.erl ================================================ -module(ar_storage_sup). -behaviour(supervisor). -export([start_link/0]). -export([init/1]). -include_lib("arweave/include/ar_sup.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). %%%=================================================================== %%% Public interface. %%%=================================================================== start_link() -> supervisor:start_link({local, ?MODULE}, ?MODULE, []). %% =================================================================== %% Supervisor callbacks. %% =================================================================== init([]) -> ets:new(ar_storage, [set, public, named_table, {read_concurrency, true}]), ets:new(ar_storage_module, [set, public, named_table]), {ok, {{one_for_one, 5, 10}, [ ?CHILD(ar_storage, worker), ?CHILD(ar_device_lock, worker) ]}}. ================================================ FILE: apps/arweave/src/ar_sup.erl ================================================ %% This Source Code Form is subject to the terms of the GNU General %% Public License, v. 2.0. If a copy of the GPLv2 was not distributed %% with this file, You can obtain one at %% https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html -module(ar_sup). -behaviour(supervisor). %% API -export([start_link/0]). %% Supervisor callbacks -export([init/1]). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave/include/ar_sup.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). %% =================================================================== %% API functions %% =================================================================== start_link() -> supervisor:start_link({local, ?MODULE}, ?MODULE, []). %% =================================================================== %% Supervisor callbacks %% =================================================================== init([]) -> %% These ETS tables should belong to the supervisor. ets:new(ar_shutdown_manager, [set, public, named_table, {read_concurrency, true}]), ets:new(ar_timer, [set, public, named_table, {read_concurrency, true}]), ets:new(ar_peers, [set, public, named_table, {read_concurrency, true}]), ets:new(ar_http, [set, public, named_table]), ets:new(ar_rate_limiter, [set, public, named_table, {read_concurrency, true}]), ets:new(ar_blacklist_middleware, [set, public, named_table]), ets:new(blacklist, [set, public, named_table]), ets:new(ignored_ids, [bag, public, named_table]), ets:new(ar_tx_emitter_recently_emitted, [set, public, named_table]), ets:new(ar_tx_db, [set, public, named_table]), ets:new(ar_nonce_limiter, [set, public, named_table]), ets:new(ar_nonce_limiter_server, [set, public, named_table]), ets:new(ar_header_sync, [set, public, named_table, {read_concurrency, true}]), ets:new(ar_data_discovery, [ordered_set, public, named_table, {read_concurrency, true}]), ets:new(ar_data_discovery_footprint_buckets, [ordered_set, public, named_table, {read_concurrency, true}]), ets:new(ar_data_sync_coordinator, [set, public, named_table]), ets:new(ar_data_sync_state, [set, public, named_table, {read_concurrency, true}]), ets:new(ar_chunk_storage, [set, public, named_table]), ets:new(ar_entropy_storage, [set, public, named_table]), ets:new(ar_mining_stats, [set, public, named_table]), ets:new(entropy_generation_stats, [ordered_set, public, named_table]), ets:new(ar_global_sync_record, [set, public, named_table]), ets:new(ar_disk_pool_data_roots, [set, public, named_table, {read_concurrency, true}]), ets:new(ar_tx_blacklist, [set, public, named_table, {read_concurrency, true}]), ets:new(ar_tx_blacklist_pending_headers, [set, public, named_table, {read_concurrency, true}]), ets:new(ar_tx_blacklist_pending_data, [set, public, named_table, {read_concurrency, true}]), ets:new(ar_tx_blacklist_offsets, [ordered_set, public, named_table, {read_concurrency, true}]), ets:new(ar_tx_blacklist_pending_restore_headers, [ordered_set, public, named_table, {read_concurrency, true}]), ets:new(block_cache, [set, public, named_table]), ets:new(tx_prefixes, [bag, public, named_table]), ets:new(block_index, [ordered_set, public, named_table]), ets:new(node_state, [set, public, named_table]), ets:new(mining_state, [set, public, named_table, {read_concurrency, true}]), Children = [ ?CHILD(ar_shutdown_manager, worker), ?CHILD(ar_rate_limiter, worker), ?CHILD(ar_disksup, worker), ?CHILD_SUP(ar_events_sup, supervisor), ?CHILD_SUP(ar_http_sup, supervisor), ?CHILD_SUP(ar_kv_sup, supervisor), ?CHILD_SUP(ar_storage_sup, supervisor), ?CHILD(ar_peers, worker), ?CHILD(ar_disk_cache, worker), ?CHILD(ar_watchdog, worker), ?CHILD(ar_tx_blacklist, worker), ?CHILD_SUP(ar_bridge_sup, supervisor), ?CHILD_SUP(ar_packing_sup, supervisor), ?CHILD_SUP(ar_sync_record_sup, supervisor), ?CHILD(ar_data_discovery, worker), ?CHILD(ar_header_sync, worker), ?CHILD_SUP(ar_chunk_storage_sup, supervisor), ?CHILD_SUP(ar_data_sync_sup, supervisor), ?CHILD_SUP(ar_data_root_sync_sup, supervisor), ?CHILD_SUP(ar_verify_chunks_sup, supervisor), ?CHILD(ar_global_sync_record, worker), ?CHILD_SUP(ar_nonce_limiter_sup, supervisor), mining_sup(), ?CHILD(ar_coordination, worker), ?CHILD_SUP(ar_tx_emitter_sup, supervisor), ?CHILD(ar_tx_poller, worker), ?CHILD_SUP(ar_block_pre_validator_sup, supervisor), ?CHILD_SUP(ar_poller_sup, supervisor), ?CHILD_SUP(ar_webhook_sup, supervisor), ?CHILD(ar_pool, worker), ?CHILD(ar_pool_job_poller, worker), ?CHILD(ar_pool_cm_job_poller, worker), ?CHILD(ar_chain_stats, worker), ?CHILD_SUP(ar_node_sup, supervisor) ], {ok, Config} = arweave_config:get_env(), DebugChildren = case Config#config.debug of true -> [?CHILD(ar_process_sampler, worker)]; false -> [] end, {ok, {{one_for_one, 5, 10}, Children ++ DebugChildren}}. -ifdef(LOCALNET). mining_sup() -> ?CHILD_SUP(ar_localnet_mining_sup, supervisor). -else. mining_sup() -> ?CHILD_SUP(ar_mining_sup, supervisor). -endif. ================================================ FILE: apps/arweave/src/ar_sync_buckets.erl ================================================ -module(ar_sync_buckets). -export([new/0, new/1, from_intervals/1, from_intervals/2, add/3, delete/3, cut/2, get/3, serialize/2, deserialize/2, foreach/3]). -include_lib("arweave/include/ar_sync_buckets.hrl"). -include_lib("eunit/include/eunit.hrl"). %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Return an empty set of buckets. new() -> {?DEFAULT_SYNC_BUCKET_SIZE, #{}}. new(Size) -> {Size, #{}}. %% @doc Initialize buckets from a set of intervals (see ar_intervals). %% The bucket size is ?DEFAULT_SYNC_BUCKET_SIZE. from_intervals(Intervals) -> from_intervals(Intervals, new()). %% @doc Add the data from a set of intervals (see ar_intervals) to the given buckets. from_intervals(Intervals, SyncBuckets) -> {Size, Map} = SyncBuckets, {Size, ar_intervals:fold( fun({End, Start}, Acc) -> add(Start, End, Size, Acc) end, Map, Intervals )}. %% @doc Add the interval with the end offset End and start offset Start to the buckets. add(End, Start, Buckets) -> {Size, Map} = Buckets, {Size, add(Start, End, Size, Map)}. %% @doc Remove the interval with the end offset End and start offset Start from the buckets. delete(End, Start, Buckets) -> {Size, Map} = Buckets, {Size, delete(Start, End, Size, Map)}. %% @doc Remove the intervals strictly above Offset from the buckets. cut(_Offset, {_Size, Map} = Buckets) when map_size(Map) == 0 -> Buckets; cut(Offset, Buckets) -> {Size, Map} = Buckets, Last = lists:last(lists:sort(maps:keys(Map))), End = (Last + 1) * Size, {Size, delete(Offset, End, Size, Map)}. %% @doc Return the percentage of data synced in the given bucket of size BucketSize. %% If the recorded bucket size is bigger than the given bucket size, return the share %% corresponding to the bigger bucket (essentially assuming the uniform distribution of data). %% If the given bucket crosses the border between the two recorded buckets, return %% the sum of their shares. get(Bucket, BucketSize, Buckets) -> {Size, Map} = Buckets, First = Bucket * BucketSize div Size, Last = (Bucket * BucketSize + BucketSize - 1) div Size, lists:sum([maps:get(Key, Map, 0) || Key <- lists:seq(First, Last)]). %% @doc Serialize buckets into Erlang Term Format. If the size of the serialized structure %% exceeds MaxSize, double the bucket size and restructure the buckets. %% Throw uncompressable_buckets if MaxSize is too low. serialize(Buckets, MaxSize) -> serialize(Buckets, MaxSize, infinity). serialize(Buckets, MaxSize, PrevSerializedSize) -> S = term_to_binary(Buckets), SerializedSize = byte_size(S), case SerializedSize > MaxSize of true -> case SerializedSize > PrevSerializedSize of true -> throw(uncompressable_buckets); false -> ok end, {Size, Map} = Buckets, %% Double the bucket size until the serialized buckets are smaller than MaxSize. serialize({Size * 2, maps:fold( fun(Bucket, Share, Acc) -> maps:update_with(Bucket div 2, fun(Sh) -> (Sh + Share) / 2 end, Share, Acc) end, maps:new(), Map )}, MaxSize, SerializedSize); false -> {Buckets, S} end. %% @doc Deserialize the buckets from Erlang Term Format. %% The bucket size must be bigger than or equal to ExpectedBucketSize. deserialize(SerializedBuckets, ExpectedBucketSize) -> case catch binary_to_term(SerializedBuckets, [safe]) of {BucketSize, Map} when is_map(Map), is_integer(BucketSize), BucketSize >= ExpectedBucketSize, BucketSize =< ExpectedBucketSize * ?MAX_SYNC_BUCKET_SIZE_RATIO -> {ok, {BucketSize, maps:filter( fun (Bucket, Share) when is_integer(Bucket), Bucket >= 0, is_number(Share), Share > 0, Share =< 1 -> true; (_, _) -> false end, Map )}}; {'EXIT', Reason} -> {error, Reason}; _ -> {error, invalid_format} end. %% @doc Apply the given function of two arguments (Bucket, Share) to each %% of the given buckets breaking them down according to the given size. foreach(Fun, BucketSize, {Size, Map}) when Size >= BucketSize, Size rem BucketSize == 0 -> Ratio = Size div BucketSize, maps:fold( fun(Bucket, Share, ok) -> foreach_range(Fun, Share, Bucket * Ratio, (Bucket + 1) * Ratio) end, ok, Map ); foreach(_Fun, _BucketSize, _Buckets) -> ok. foreach_range(_Fun, _Share, SubBucket, End) when SubBucket >= End -> ok; foreach_range(Fun, Share, SubBucket, End) -> Fun(SubBucket, Share), foreach_range(Fun, Share, SubBucket + 1, End). %%%=================================================================== %%% Private functions. %%%=================================================================== add(Start, End, _Size, Map) when Start >= End -> Map; add(Start, End, Size, Map) -> Bucket = Start div Size, Share = maps:get(Bucket, Map, 0), BucketUpperBound = bucket_upper_bound(Start, Size), Increase = min(BucketUpperBound, End) - Start, add(BucketUpperBound, End, Size, maps:put(Bucket, min(1, (Share * Size + Increase) / Size), Map)). bucket_upper_bound(Offset, Size) -> ar_util:ceil_int(Offset, Size). delete(Start, End, _Size, Map) when Start >= End -> Map; delete(Start, End, Size, Map) -> Bucket = Start div Size, Share = maps:get(Bucket, Map, 0), BucketUpperBound = bucket_upper_bound(Start, Size), Decrease = min(BucketUpperBound, End) - Start, delete(BucketUpperBound, End, Size, maps:put(Bucket, max(0, Share * (1 - Decrease / Size)), Map)). %%%=================================================================== %%% Tests. %%%=================================================================== buckets_test() -> Size = 10000000000, B1 = {10000000000, #{}}, ?assertException(throw, uncompressable_buckets, serialize(B1, 10)), {B1, S1} = serialize(B1, 20), {ok, B1} = deserialize(S1, ?DEFAULT_SYNC_BUCKET_SIZE), B2 = add(5, 0, B1), ?assertEqual(5 / Size, get(0, 10, B2)), B3 = add(Size * 2, Size, B2), ?assertEqual({Size, #{ 0 => 5 / Size, 1 => 1 }}, B3), {B3, S3} = serialize(B3, 40), {ok, B3} = deserialize(S3, ?DEFAULT_SYNC_BUCKET_SIZE), %% The size of the serialized buckets is 31 bytes. DoubleSize = 2 * Size, ?assertEqual({DoubleSize, #{ 0 => 0.5 + 5 / Size / 2 }}, element(1, serialize(B3, 30))), {_, S3_1} = serialize(B3, 30), ?assertEqual({ok, {DoubleSize, #{ 0 => 0.5 + 5 / Size / 2 }}}, deserialize(S3_1, ?DEFAULT_SYNC_BUCKET_SIZE)), ?assertEqual({Size, #{ 0 => 5 / Size, 1 => 0.5 }}, cut(Size + Size div 2, B3)), ?assertEqual({Size, #{ 0 => (1 - (Size - 4) / Size) * (5 / Size), 1 => 0 }}, delete(Size * 2, 4, B3)), B4 = from_intervals(gb_sets:from_list([{5, 0}, {2 * Size, Size}]), {10000000000, #{}}), ?assertEqual(B3, B4), B5 = from_intervals(gb_sets:from_list([{2 * Size, Size}]), B2), ?assertEqual(B4, B5). ================================================ FILE: apps/arweave/src/ar_sync_record.erl ================================================ -module(ar_sync_record). -behaviour(gen_server). -export([start_link/2, get/2, get/3, add/4, add/5, add_async/5, add_async/6, delete/4, cut/3, is_recorded/2, is_recorded/3, is_recorded/4, is_recorded_any/3, get_next_synced_interval/4, get_next_synced_interval/5, get_next_unsynced_interval/4, get_next_unsynced_interval/5, get_interval/3, get_intersection_size/4, name/1]). -export([init/1, handle_cast/2, handle_call/3, handle_info/2, terminate/2]). -include("ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). %% The kv storage key to the sync records. -define(SYNC_RECORDS_KEY, <<"sync_records">>). %% The kv key of the write ahead log counter. -define(WAL_COUNT_KEY, <<"wal">>). %% The frequency of dumping sync records on disk. -ifdef(AR_TEST). -define(STORE_SYNC_RECORD_FREQUENCY_MS, 1000). -else. -define(STORE_SYNC_RECORD_FREQUENCY_MS, 60 * 1000). -endif. -record(state, { %% A map ID => Intervals %% where Intervals is a set of non-overlapping intervals %% of global byte offsets {End, Start} denoting some synced %% data. End offsets are defined on [1, WeaveSize], start %% offsets are defined on [0, WeaveSize). %% %% Each set serves as a compact map of what is synced by the node. %% No matter how big the weave is or how much of it the node stores, %% this record can remain very small compared to the space taken by %% chunk identifiers, whose number grows unlimited with time. sync_record_by_id, %% A map {ID, Packing} => Intervals. sync_record_by_id_type, %% The name of the WAL store. state_db, %% The identifier of the storage module. store_id, %% The storage module. storage_module, %% The partition covered by the storage module. partition_number, %% The size in bytes of the storage module; undefined for the "default" storage. storage_module_size, %% The index of the storage module; undefined for the "default" storage. storage_module_index, %% The number of entries in the write-ahead log. wal, %% Whether the sync record is in memory only. in_memory = false }). %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Start the server. start_link(Name, StoreID) -> gen_server:start_link({local, Name}, ?MODULE, StoreID, []). %% @doc Return the set of intervals. get(ID, StoreID) -> GenServerID = name(StoreID), case catch gen_server:call(GenServerID, {get, ID}, 20000) of {'EXIT', {timeout, {gen_server, call, _}}} -> {error, timeout}; Reply -> Reply end. %% @doc Return the set of intervals. get(ID, Packing, StoreID) -> GenServerID = name(StoreID), case catch gen_server:call(GenServerID, {get, Packing, ID}, 20000) of {'EXIT', {timeout, {gen_server, call, _}}} -> {error, timeout}; Reply -> Reply end. %% @doc Add the given interval to the record with the %% given ID. Store the changes on disk before returning ok. add(End, Start, ID, StoreID) -> GenServerID = name(StoreID), case catch gen_server:call(GenServerID, {add, End, Start, ID}, 120000) of {'EXIT', {timeout, {gen_server, call, _}}} -> {error, timeout}; Reply -> Reply end. %% @doc Add the given interval to the record with the %% given ID and Packing. Store the changes on disk before %% returning ok. add(End, Start, Packing, ID, StoreID) -> GenServerID = name(StoreID), case catch gen_server:call(GenServerID, {add, End, Start, Packing, ID}, 120000) of {'EXIT', {timeout, {gen_server, call, _}}} -> {error, timeout}; Reply -> Reply end. %% @doc Special case of add/4. add_async(Event, End, Start, ID, StoreID) -> GenServerID = name(StoreID), gen_server:cast(GenServerID, {add_async, Event, End, Start, ID}). %% @doc Special case of add/5 for repacked chunks. When repacking the ar_sync_record add %% happens at the end so we don't need to block on it to complete. add_async(Event, End, Start, Packing, ID, StoreID) -> GenServerID = name(StoreID), gen_server:cast(GenServerID, {add_async, Event, End, Start, Packing, ID}). %% @doc Remove the given interval from the record %% with the given ID. Store the changes on disk before %% returning ok. delete(End, Start, ID, StoreID) -> GenServerID = name(StoreID), case catch gen_server:call(GenServerID, {delete, End, Start, ID}, 120000) of {'EXIT', {timeout, {gen_server, call, _}}} -> {error, timeout}; Reply -> Reply end. %% @doc Remove everything strictly above the given %% Offset from the record. Store the changes on disk %% before returning ok. cut(Offset, ID, StoreID) -> GenServerID = name(StoreID), case catch gen_server:call(GenServerID, {cut, Offset, ID}, 120000) of {'EXIT', {timeout, {gen_server, call, _}}} -> {error, timeout}; Reply -> Reply end. %% @doc Return {true, StoreID} or {{true, Packing}, StoreID} if a chunk containing %% the given Offset is found in the record with the given ID, false otherwise. %% If several types are recorded for the chunk, only one of them is returned, %% the choice is not defined. If the chunk is stored in the default storage module, %% return the type found there. If not, search for a configured storage %% module covering the given Offset. If there are multiple %% storage modules with the chunk, the choice is not defined. %% The offset is 1-based - if a chunk consists of a single %% byte that is the first byte of the weave, is_recorded(0, ID) %% returns false and is_recorded(1, ID) returns true. is_recorded(Offset, {ID, Packing}) -> case is_recorded(Offset, Packing, ID, ?DEFAULT_MODULE) of true -> {{true, Packing}, ?DEFAULT_MODULE}; false -> ModuleOffset = case ID of ar_data_sync_footprints -> ar_footprint:get_padded_offset_from_footprint_offset(Offset); _ -> Offset end, StorageModules = [Module || {_, _, ModulePacking} = Module <- ar_storage_module:get_all(ModuleOffset), ModulePacking == Packing], is_recorded_any_by_type(Offset, ID, StorageModules) end; is_recorded(Offset, ID) -> case is_recorded(Offset, ID, ?DEFAULT_MODULE) of false -> ModuleOffset = case ID of ar_data_sync_footprints -> ar_footprint:get_padded_offset_from_footprint_offset(Offset); _ -> Offset end, StorageModules = ar_storage_module:get_all(ModuleOffset), is_recorded_any(Offset, ID, StorageModules); Reply -> {Reply, ?DEFAULT_MODULE} end. %% @doc Return true or {true, Packing} if a chunk containing %% the given Offset is found in the record with the given ID %% in the storage module identified by StoreID, false otherwise. is_recorded(Offset, ID, StoreID) -> case ets:lookup(sync_records, {ID, StoreID}) of [] -> false; [{_, TID}] -> case ar_ets_intervals:is_inside(TID, Offset) of false -> false; true -> case is_recorded2(Offset, ets:first(sync_records), ID, StoreID) of false -> true; {true, Packing} -> {true, Packing} end end end. %% @doc Return true if a chunk containing the given Offset and Packing %% is found in the record in the storage module identified by StoreID, %% false otherwise. is_recorded(Offset, Packing, ID, StoreID) -> case ets:lookup(sync_records, {ID, Packing, StoreID}) of [] -> false; [{_, TID}] -> ar_ets_intervals:is_inside(TID, Offset) end. %% @doc Return the lowest synced interval with the end offset strictly above the given Offset %% and at most EndOffsetUpperBound. %% Return not_found if there are no such intervals. get_next_synced_interval(Offset, EndOffsetUpperBound, ID, StoreID) -> case ets:lookup(sync_records, {ID, StoreID}) of [] -> not_found; [{_, TID}] -> ar_ets_intervals:get_next_interval(TID, Offset, EndOffsetUpperBound) end. %% @doc Return the lowest unsynced interval with the end offset strictly above the given Offset %% and at most EndOffsetUpperBound. %% Return not_found when Offset >= EndOffsetUpperBound. %% Return {EndOffsetUpperBound, Offset} when no records are found. get_next_unsynced_interval(Offset, EndOffsetUpperBound, _ID, _StoreID) when Offset >= EndOffsetUpperBound -> not_found; get_next_unsynced_interval(Offset, EndOffsetUpperBound, ID, StoreID) -> case ets:lookup(sync_records, {ID, StoreID}) of [] -> {EndOffsetUpperBound, Offset}; [{_, TID}] -> ar_ets_intervals:get_next_interval_outside(TID, Offset, EndOffsetUpperBound) end. %% @doc Return the lowest synced interval with the end offset strictly above the given Offset %% and at most EndOffsetUpperBound. %% Return not_found if there are no such intervals. get_next_synced_interval(Offset, EndOffsetUpperBound, Packing, ID, StoreID) -> case ets:lookup(sync_records, {ID, Packing, StoreID}) of [] -> not_found; [{_, TID}] -> ar_ets_intervals:get_next_interval(TID, Offset, EndOffsetUpperBound) end. %% @doc Return the lowest unsynced interval with the end offset strictly above the given Offset %% and at most EndOffsetUpperBound. %% Return not_found when Offset >= EndOffsetUpperBound. %% Return {EndOffsetUpperBound, Offset} when no records are found. get_next_unsynced_interval(Offset, EndOffsetUpperBound, _Packing, _ID, _StoreID) when Offset >= EndOffsetUpperBound -> not_found; get_next_unsynced_interval(Offset, EndOffsetUpperBound, Packing, ID, StoreID) -> case ets:lookup(sync_records, {ID, Packing, StoreID}) of [] -> {EndOffsetUpperBound, Offset}; [{_, TID}] -> ar_ets_intervals:get_next_interval_outside(TID, Offset, EndOffsetUpperBound) end. %% @doc Return the interval containing the given Offset, including the right bound, %% excluding the left bound. Return not_found if the given offset does not belong to %% any interval. get_interval(Offset, ID, StoreID) -> case ets:lookup(sync_records, {ID, StoreID}) of [] -> not_found; [{_, TID}] -> ar_ets_intervals:get_interval_with_byte(TID, Offset) end. %% @doc Return the size of the intersection between the intervals and the given range. %% Return 0 if the given ID and StoreID are not found. get_intersection_size(End, Start, ID, StoreID) -> case ets:lookup(sync_records, {ID, StoreID}) of [] -> 0; [{_, TID}] -> ar_ets_intervals:get_intersection_size(TID, End, Start) end. %%%=================================================================== %%% Generic server callbacks. %%%=================================================================== init(StoreID) -> %% Trap exit to avoid corrupting any open files on quit. process_flag(trap_exit, true), StorageModule = ar_storage_module:get_by_id(StoreID), {ok, Config} = arweave_config:get_env(), DataDir = Config#config.data_dir, {Dir, StorageModuleSize, StorageModuleIndex, PartitionNumber} = case StorageModule of ?DEFAULT_MODULE -> {filename:join([DataDir, ?ROCKS_DB_DIR, "ar_sync_record_db"]), undefined, undefined, undefined}; Atom when is_atom(Atom) -> %% A module without a storage, to use in tests. {undefined, undefined, undefined, undefined}; {Size, Index, _Packing} -> {filename:join([DataDir, "storage_modules", StoreID, ?ROCKS_DB_DIR, "ar_sync_record_db"]), Size, Index, ar_node:get_partition_number(Size * Index)} end, StateDB = {sync_record, StoreID}, {SyncRecordByID, SyncRecordByIDType, WAL} = case Dir of undefined -> {#{}, #{}, undefined}; _ -> ok = ar_kv:open(#{ path => Dir, name => StateDB }), gen_server:cast(self(), store_state), read_sync_records(StateDB, StoreID) end, initialize_sync_record_by_id_type_ets(SyncRecordByIDType, StoreID), initialize_sync_record_by_id_ets(SyncRecordByID, StoreID), {ok, #state{ state_db = StateDB, store_id = StoreID, storage_module = StorageModule, partition_number = PartitionNumber, storage_module_size = StorageModuleSize, storage_module_index = StorageModuleIndex, sync_record_by_id = SyncRecordByID, sync_record_by_id_type = SyncRecordByIDType, wal = WAL, in_memory = Dir == undefined }}. handle_call({get, ID}, _From, State) -> #state{ sync_record_by_id = SyncRecordByID } = State, {reply, maps:get(ID, SyncRecordByID, ar_intervals:new()), State}; handle_call({get, Packing, ID}, _From, State) -> #state{ sync_record_by_id_type = SyncRecordByIDType } = State, {reply, maps:get({ID, Packing}, SyncRecordByIDType, ar_intervals:new()), State}; handle_call({add, End, Start, ID}, _From, State) -> {Reply, State2} = add2(End, Start, ID, State), {reply, Reply, State2}; handle_call({add, End, Start, Packing, ID}, _From, State) -> {Reply, State2} = add2(End, Start, Packing, ID, State), {reply, Reply, State2}; handle_call({delete, End, Start, ID}, _From, State) -> {Reply, State2} = delete2(End, Start, ID, State), {reply, Reply, State2}; handle_call({cut, Offset, ID}, _From, State) -> #state{ sync_record_by_id = SyncRecordByID, sync_record_by_id_type = SyncRecordByIDType, state_db = StateDB, store_id = StoreID } = State, SyncRecord = maps:get(ID, SyncRecordByID, ar_intervals:new()), SyncRecord2 = ar_intervals:cut(SyncRecord, Offset), SyncRecordByID2 = maps:put(ID, SyncRecord2, SyncRecordByID), TID = get_or_create_type_tid({ID, StoreID}), ar_ets_intervals:cut(TID, Offset), SyncRecordByIDType2 = maps:map( fun ({ID2, _}, ByType) when ID2 == ID -> ar_intervals:cut(ByType, Offset); (_, ByType) -> ByType end, SyncRecordByIDType ), ets:foldl( fun ({{ID2, _, SID}, TypeTID}, _) when ID2 == ID, SID == StoreID -> ar_ets_intervals:cut(TypeTID, Offset); (_, _) -> ok end, ok, sync_records ), State2 = State#state{ sync_record_by_id = SyncRecordByID2, sync_record_by_id_type = SyncRecordByIDType2 }, {Reply, State3} = update_write_ahead_log({cut, {Offset, ID}}, StateDB, State2), case Reply of ok -> emit_cut(Offset, StoreID); _ -> ok end, {reply, Reply, State3}; handle_call(Request, _From, State) -> ?LOG_WARNING([{event, unhandled_call}, {module, ?MODULE}, {request, Request}]), {reply, ok, State}. handle_cast(store_state, State) -> {_, State2} = store_state(State), {ok, _} = ar_timer:apply_after( ?STORE_SYNC_RECORD_FREQUENCY_MS, gen_server, cast, [self(), store_state], #{ skip_on_shutdown => false } ), {noreply, State2}; handle_cast({add_async, Event, End, Start, ID}, State) -> {Reply, State2} = add2(End, Start, ID, State), case Reply of ok -> ok; Error -> ?LOG_ERROR([{event, Event}, {operation, add_async}, {status, failed}, {sync_record_id, ID}, {offset, End}, {error, io_lib:format("~p", [Error])}]) end, {noreply, State2}; handle_cast({add_async, Event, End, Start, Packing, ID}, State) -> {Reply, State2} = add2(End, Start, Packing, ID, State), case Reply of ok -> ok; Error -> ?LOG_ERROR([{event, Event}, {operation, add_async}, {status, failed}, {sync_record_id, ID}, {offset, End}, {packing, ar_serialize:encode_packing(Packing, true)}, {error, io_lib:format("~p", [Error])}]) end, {noreply, State2}; handle_cast(Cast, State) -> ?LOG_WARNING([{event, unhandled_cast}, {module, ?MODULE}, {cast, Cast}]), {noreply, State}. handle_info(Message, State) -> ?LOG_WARNING([{event, unhandled_info}, {module, ?MODULE}, {message, Message}]), {noreply, State}. terminate(Reason, State) -> ?LOG_INFO([{event, terminate}, {module, ?MODULE}, {reason, io_lib:format("~p", [Reason])}]), store_state(State). %%%=================================================================== %%% Private functions. %%%=================================================================== name(StoreID) when is_atom(StoreID) -> list_to_atom("ar_sync_record_" ++ atom_to_list(StoreID)); name(StoreID) -> list_to_atom("ar_sync_record_" ++ ar_storage_module:label(StoreID)). add2(End, Start, ID, State) -> #state{ sync_record_by_id = SyncRecordByID, state_db = StateDB, store_id = StoreID, storage_module = Module } = State, SyncRecord = maps:get(ID, SyncRecordByID, ar_intervals:new()), SyncRecord2 = ar_intervals:add(SyncRecord, End, Start), SyncRecordByID2 = maps:put(ID, SyncRecord2, SyncRecordByID), TID = get_or_create_type_tid({ID, StoreID}), ar_ets_intervals:add(TID, End, Start), State2 = State#state{ sync_record_by_id = SyncRecordByID2 }, {Reply, State3} = update_write_ahead_log({add, {End, Start, ID}}, StateDB, State2), case Reply of ok -> emit_add_range(Start, End, ID, #{ module => Module }); _ -> ok end, {Reply, State3}. add2(End, Start, Packing, ID, State) -> #state{ sync_record_by_id = SyncRecordByID, sync_record_by_id_type = SyncRecordByIDType, state_db = StateDB, store_id = StoreID, storage_module = Module } = State, ByType = maps:get({ID, Packing}, SyncRecordByIDType, ar_intervals:new()), ByType2 = ar_intervals:add(ByType, End, Start), SyncRecordByIDType2 = maps:put({ID, Packing}, ByType2, SyncRecordByIDType), TypeTID = get_or_create_type_tid({ID, Packing, StoreID}), ar_ets_intervals:add(TypeTID, End, Start), SyncRecord = maps:get(ID, SyncRecordByID, ar_intervals:new()), SyncRecord2 = ar_intervals:add(SyncRecord, End, Start), SyncRecordByID2 = maps:put(ID, SyncRecord2, SyncRecordByID), TID = get_or_create_type_tid({ID, StoreID}), ar_ets_intervals:add(TID, End, Start), State2 = State#state{ sync_record_by_id = SyncRecordByID2, sync_record_by_id_type = SyncRecordByIDType2 }, {Reply, State3} = update_write_ahead_log({{add, Packing}, {End, Start, ID}}, StateDB, State2), case Reply of ok -> emit_add_range(Start, End, ID, #{ module => Module, packing => Packing }); _ -> ok end, {Reply, State3}. delete2(End, Start, ID, State) -> #state{ sync_record_by_id = SyncRecordByID, sync_record_by_id_type = SyncRecordByIDType, state_db = StateDB, store_id = StoreID } = State, SyncRecord = maps:get(ID, SyncRecordByID, ar_intervals:new()), SyncRecord2 = ar_intervals:delete(SyncRecord, End, Start), SyncRecordByID2 = maps:put(ID, SyncRecord2, SyncRecordByID), TID = get_or_create_type_tid({ID, StoreID}), ar_ets_intervals:delete(TID, End, Start), SyncRecordByIDType2 = maps:map( fun ({ID2, _}, ByType) when ID2 == ID -> ar_intervals:delete(ByType, End, Start); (_, ByType) -> ByType end, SyncRecordByIDType ), ets:foldl( fun ({{ID2, _, SID}, TypeTID}, _) when ID2 == ID, SID == StoreID -> ar_ets_intervals:delete(TypeTID, End, Start); (_, _) -> ok end, ok, sync_records ), State2 = State#state{ sync_record_by_id = SyncRecordByID2, sync_record_by_id_type = SyncRecordByIDType2 }, {Reply, State3} = update_write_ahead_log({delete, {End, Start, ID}}, StateDB, State2), case Reply of ok -> emit_remove_range(Start, End, StoreID); _ -> ok end, {Reply, State3}. is_recorded_any_by_type(Offset, ID, [StorageModule | StorageModules]) -> StoreID = ar_storage_module:id(StorageModule), {_, _, Packing} = StorageModule, case is_recorded(Offset, Packing, ID, StoreID) of true -> {{true, Packing}, StoreID}; false -> is_recorded_any_by_type(Offset, ID, StorageModules) end; is_recorded_any_by_type(_Offset, _ID, []) -> false. is_recorded_any(Offset, ID, [StorageModule | StorageModules]) -> StoreID = ar_storage_module:id(StorageModule), case is_recorded(Offset, ID, StoreID) of false -> is_recorded_any(Offset, ID, StorageModules); Reply -> {Reply, StoreID} end; is_recorded_any(_Offset, _ID, []) -> false. is_recorded2(_Offset, '$end_of_table', _ID, _StoreID) -> false; is_recorded2(Offset, {ID, Packing, StoreID}, ID, StoreID) -> case ets:lookup(sync_records, {ID, Packing, StoreID}) of [{_, TID}] -> case ar_ets_intervals:is_inside(TID, Offset) of true -> {true, Packing}; false -> is_recorded2(Offset, ets:next(sync_records, {ID, Packing, StoreID}), ID, StoreID) end; [] -> %% Very unlucky timing. false end; is_recorded2(Offset, Key, ID, StoreID) -> is_recorded2(Offset, ets:next(sync_records, Key), ID, StoreID). read_sync_records(StateDB, StoreID) -> {SyncRecordByID, SyncRecordByIDType} = case ar_kv:get(StateDB, ?SYNC_RECORDS_KEY) of not_found -> {#{}, #{}}; {ok, V} -> binary_to_term(V, [safe]) end, {SyncRecordByID2, SyncRecordByIDType2, WAL} = replay_write_ahead_log(SyncRecordByID, SyncRecordByIDType, StateDB, StoreID), {SyncRecordByID2, SyncRecordByIDType2, WAL}. replay_write_ahead_log(SyncRecordByID, SyncRecordByIDType, StateDB, StoreID) -> WAL = case ar_kv:get(StateDB, ?WAL_COUNT_KEY) of not_found -> 0; {ok, V} -> binary:decode_unsigned(V) end, replay_write_ahead_log(SyncRecordByID, SyncRecordByIDType, WAL, StateDB, StoreID). replay_write_ahead_log(SyncRecordByID, SyncRecordByIDType, WAL, StateDB, StoreID) -> Module = ar_storage_module:get_by_id(StoreID), replay_write_ahead_log( SyncRecordByID, SyncRecordByIDType, 1, WAL, StateDB, StoreID, Module). replay_write_ahead_log(SyncRecordByID, SyncRecordByIDType, N, WAL, _StateDB, _StoreID, _Module) when N > WAL -> {SyncRecordByID, SyncRecordByIDType, WAL}; replay_write_ahead_log(SyncRecordByID, SyncRecordByIDType, N, WAL, StateDB, StoreID, Module) -> case ar_kv:get(StateDB, binary:encode_unsigned(N)) of not_found -> %% The VM crashed after recording the number. {SyncRecordByID, SyncRecordByIDType, WAL}; {ok, V} -> {Op, Params} = binary_to_term(V, [safe]), case Op of add -> {End, Start, ID} = Params, SyncRecord = maps:get(ID, SyncRecordByID, ar_intervals:new()), SyncRecord2 = ar_intervals:add(SyncRecord, End, Start), emit_add_range(Start, End, ID, #{ module => Module }), SyncRecordByID2 = maps:put(ID, SyncRecord2, SyncRecordByID), replay_write_ahead_log( SyncRecordByID2, SyncRecordByIDType, N + 1, WAL, StateDB, StoreID, Module); {add, Packing} -> {End, Start, ID} = Params, SyncRecord = maps:get(ID, SyncRecordByID, ar_intervals:new()), SyncRecord2 = ar_intervals:add(SyncRecord, End, Start), SyncRecordByID2 = maps:put(ID, SyncRecord2, SyncRecordByID), ByType = maps:get({ID, Packing}, SyncRecordByIDType, ar_intervals:new()), ByType2 = ar_intervals:add(ByType, End, Start), emit_add_range(Start, End, ID, #{ module => Module, packing => Packing }), SyncRecordByIDType2 = maps:put({ID, Packing}, ByType2, SyncRecordByIDType), replay_write_ahead_log( SyncRecordByID2, SyncRecordByIDType2, N + 1, WAL, StateDB, StoreID, Module); delete -> {End, Start, ID} = Params, SyncRecord = maps:get(ID, SyncRecordByID, ar_intervals:new()), SyncRecord2 = ar_intervals:delete(SyncRecord, End, Start), emit_remove_range(Start, End, Module), SyncRecordByID2 = maps:put(ID, SyncRecord2, SyncRecordByID), SyncRecordByIDType2 = maps:map( fun ({ID2, _}, ByType) when ID2 == ID -> ar_intervals:delete(ByType, End, Start); (_, ByType) -> ByType end, SyncRecordByIDType ), replay_write_ahead_log( SyncRecordByID2, SyncRecordByIDType2, N + 1, WAL, StateDB, StoreID, Module); cut -> {Offset, ID} = Params, SyncRecord = maps:get(ID, SyncRecordByID, ar_intervals:new()), SyncRecord2 = ar_intervals:cut(SyncRecord, Offset), emit_cut(Offset, Module), SyncRecordByID2 = maps:put(ID, SyncRecord2, SyncRecordByID), SyncRecordByIDType2 = maps:map( fun ({ID2, _}, ByType) when ID2 == ID -> ar_intervals:cut(ByType, Offset); (_, ByType) -> ByType end, SyncRecordByIDType ), replay_write_ahead_log( SyncRecordByID2, SyncRecordByIDType2, N + 1, WAL, StateDB, StoreID, Module) end end. emit_add_range(Start, End, ar_data_sync, Options) -> ar_events:send(sync_record, {add_range, Start, End, ar_data_sync, Options}); emit_add_range(Start, End, ar_data_sync_footprints, Options) -> ar_events:send(sync_record, {add_range, Start, End, ar_data_sync_footprints, Options}); emit_add_range(_Start, _End, _ID, _Options) -> ok. emit_remove_range(Start, End, Module) -> ar_events:send(sync_record, {remove_range, Start, End, Module}). emit_cut(Offset, Module) -> ar_events:send(sync_record, {cut, Offset, Module}). initialize_sync_record_by_id_ets(SyncRecordByID, StoreID) -> Iterator = maps:iterator(SyncRecordByID), initialize_sync_record_by_id_ets2(maps:next(Iterator), StoreID). initialize_sync_record_by_id_ets2(none, _StoreID) -> ok; initialize_sync_record_by_id_ets2({ID, SyncRecord, Iterator}, StoreID) -> TID = ets:new(sync_record_type, [ordered_set, public, {read_concurrency, true}]), ar_ets_intervals:init_from_gb_set(TID, SyncRecord), ets:insert(sync_records, {{ID, StoreID}, TID}), initialize_sync_record_by_id_ets2(maps:next(Iterator), StoreID). initialize_sync_record_by_id_type_ets(SyncRecordByIDType, StoreID) -> Iterator = maps:iterator(SyncRecordByIDType), initialize_sync_record_by_id_type_ets2(maps:next(Iterator), StoreID). initialize_sync_record_by_id_type_ets2(none, _StoreID) -> ok; initialize_sync_record_by_id_type_ets2({{ID, Packing}, SyncRecord, Iterator}, StoreID) -> TID = ets:new(sync_record_type, [ordered_set, public, {read_concurrency, true}]), ar_ets_intervals:init_from_gb_set(TID, SyncRecord), ets:insert(sync_records, {{ID, Packing, StoreID}, TID}), initialize_sync_record_by_id_type_ets2(maps:next(Iterator), StoreID). store_state(#state{ in_memory = true }) -> ok; store_state(State) -> #state{ state_db = StateDB, sync_record_by_id = SyncRecordByID, sync_record_by_id_type = SyncRecordByIDType, store_id = StoreID, partition_number = PartitionNumber, storage_module_size = StorageModuleSize, storage_module_index = StorageModuleIndex } = State, StoreSyncRecords = ar_kv:put( StateDB, ?SYNC_RECORDS_KEY, term_to_binary({SyncRecordByID, SyncRecordByIDType}) ), ResetWAL = case StoreSyncRecords of {error, _} = Error -> Error; ok -> ar_kv:put(StateDB, ?WAL_COUNT_KEY, binary:encode_unsigned(0)) end, case ResetWAL of {error, Reason} = Error2 -> ?LOG_WARNING([ {event, failed_to_store_state}, {reason, io_lib:format("~p", [Reason])} ]), {Error2, State}; ok -> maps:map( fun ({ar_data_sync, Packing}, TypeRecord) -> ar_mining_stats:set_storage_module_data_size( StoreID, Packing, PartitionNumber, StorageModuleSize, StorageModuleIndex, ar_intervals:sum(TypeRecord)); (_, _) -> ok end, SyncRecordByIDType ), {ok, State#state{ wal = 0 }} end. get_or_create_type_tid(IDType) -> case ets:lookup(sync_records, IDType) of [] -> TID = ets:new(sync_record_type, [ordered_set, public, {read_concurrency, true}]), ets:insert(sync_records, {IDType, TID}), TID; [{_, TID2}] -> TID2 end. update_write_ahead_log(_OpParams, _StateDB, #state{ in_memory = true } = State) -> {ok, State}; update_write_ahead_log(OpParams, StateDB, State) -> #state{ wal = WAL } = State, case ar_kv:put(StateDB, binary:encode_unsigned(WAL + 1), term_to_binary(OpParams)) of {error, _Reason} = Error -> {Error, State}; ok -> case ar_kv:put(StateDB, ?WAL_COUNT_KEY, binary:encode_unsigned(WAL + 1)) of ok -> {ok, State#state{ wal = WAL + 1 }}; Error2 -> {Error2, State} end end. ================================================ FILE: apps/arweave/src/ar_sync_record_sup.erl ================================================ -module(ar_sync_record_sup). -behaviour(supervisor). -export([start_link/0]). -export([init/1]). -include_lib("arweave/include/ar_sup.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). %%%=================================================================== %%% Public interface. %%%=================================================================== start_link() -> supervisor:start_link({local, ?MODULE}, ?MODULE, []). %% =================================================================== %% Supervisor callbacks. %% =================================================================== init([]) -> ets:new(sync_records, [set, public, named_table, {read_concurrency, true}]), {ok, Config} = arweave_config:get_env(), ConfiguredWorkers = lists:map( fun(StorageModule) -> StoreID = ar_storage_module:id(StorageModule), Label = ar_storage_module:label(StoreID), Name = list_to_atom("ar_sync_record_" ++ Label), ?CHILD_WITH_ARGS(ar_sync_record, worker, Name, [Name, StoreID]) end, Config#config.storage_modules ), DefaultSyncRecordWorker = ?CHILD_WITH_ARGS(ar_sync_record, worker, ar_sync_record_default, [ar_sync_record_default, ?DEFAULT_MODULE]), RepackInPlaceWorkers = lists:map( fun({StorageModule, _Packing}) -> StoreID = ar_storage_module:id(StorageModule), Label = ar_storage_module:label(StoreID), Name = list_to_atom("ar_sync_record_" ++ Label), ?CHILD_WITH_ARGS(ar_sync_record, worker, Name, [Name, StoreID]) end, Config#config.repack_in_place_storage_modules ), Workers = [DefaultSyncRecordWorker] ++ ConfiguredWorkers ++ RepackInPlaceWorkers, {ok, {{one_for_one, 5, 10}, Workers}}. ================================================ FILE: apps/arweave/src/ar_testnet.erl ================================================ -module(ar_testnet). -export([is_testnet/0, height_testnet_fork/0, top_up_test_wallet/2, locked_rewards_blocks/1, reward_history_blocks/1, target_block_time/1, legacy_reward_history_blocks/1]). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave/include/ar_pricing.hrl"). -ifndef(TESTNET_REWARD_HISTORY_BLOCKS). -define(TESTNET_REWARD_HISTORY_BLOCKS, ?REWARD_HISTORY_BLOCKS). -endif. -ifndef(TESTNET_LEGACY_REWARD_HISTORY_BLOCKS). -define(TESTNET_LEGACY_REWARD_HISTORY_BLOCKS, ?LEGACY_REWARD_HISTORY_BLOCKS). -endif. -ifndef(TESTNET_LOCKED_REWARDS_BLOCKS). -define(TESTNET_LOCKED_REWARDS_BLOCKS, ?LOCKED_REWARDS_BLOCKS). -endif. -ifndef(TESTNET_TARGET_BLOCK_TIME). -define(TESTNET_TARGET_BLOCK_TIME, ?TARGET_BLOCK_TIME). -endif. -ifndef(TESTNET_FORK_HEIGHT). -define(TESTNET_FORK_HEIGHT, infinity). -endif. -ifdef(TESTNET). is_testnet() -> true. -else. is_testnet() -> false. -endif. -ifdef(TESTNET). height_testnet_fork() -> ?TESTNET_FORK_HEIGHT. -else. height_testnet_fork() -> infinity. -endif. -ifdef(TESTNET). top_up_test_wallet(Accounts, Height) -> case Height == height_testnet_fork() of true -> Addr = ar_util:decode(<>), maps:put(Addr, {?AR(?TOP_UP_TEST_WALLET_AR), <<>>, 1, true}, Accounts); false -> Accounts end. -else. top_up_test_wallet(Accounts, _Height) -> Accounts. -endif. locked_rewards_blocks(Height) -> case application:get_env(arweave, locked_rewards_blocks) of {ok, Value} when is_integer(Value), Value > 0 -> Value; _ -> locked_rewards_blocks2(Height) end. -ifdef(TESTNET). locked_rewards_blocks2(Height) -> case Height >= height_testnet_fork() of true -> ?TESTNET_LOCKED_REWARDS_BLOCKS; false -> ?LOCKED_REWARDS_BLOCKS end. -else. locked_rewards_blocks2(_Height) -> ?LOCKED_REWARDS_BLOCKS. -endif. -ifdef(TESTNET). reward_history_blocks(Height) -> case Height >= height_testnet_fork() of true -> ?TESTNET_REWARD_HISTORY_BLOCKS; false -> ?REWARD_HISTORY_BLOCKS end. -else. reward_history_blocks(_Height) -> ?REWARD_HISTORY_BLOCKS. -endif. -ifdef(TESTNET). legacy_reward_history_blocks(Height) -> case Height >= height_testnet_fork() of true -> ?TESTNET_LEGACY_REWARD_HISTORY_BLOCKS; false -> ?LEGACY_REWARD_HISTORY_BLOCKS end. -else. legacy_reward_history_blocks(_Height) -> ?LEGACY_REWARD_HISTORY_BLOCKS. -endif. -ifdef(TESTNET). target_block_time(Height) -> case Height >= height_testnet_fork() of true -> ?TESTNET_TARGET_BLOCK_TIME; false -> ?TARGET_BLOCK_TIME end. -else. target_block_time(_Height) -> ?TARGET_BLOCK_TIME. -endif. ================================================ FILE: apps/arweave/src/ar_timer.erl ================================================ %%%=================================================================== %%% This Source Code Form is subject to the terms of the GNU General %%% Public License, v. 2.0. If a copy of the GPLv2 was not distributed %%% with this file, You can obtain one at %%% https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html %%% %%% @doc A timer wrapper/manager for Arweave. %%% %%% This module has been created to deal with all timers started by %%% Arweave. Those timers must be managed, in particular during %%% shutdown, when no new connections or other actions are required. %%% %%% Not all timers need to use this module, only the ones needing %%% to use a timer to connect to remote peers. %%% %%% Only intervals are currently managed, other functions are simple %%% wrappers. %%% %%% This module is tightly coupled with `ar_shutdown_manager' and %%% uses `ar_shutdown_manager:apply/4' to know if the application is %%% in running mode or if the application is being stopped. %%% %%% @see ar_shutdown_manager %%% @see ar_shutdown_manager:apply/4 %%% %%% == Examples == %%% %%% When the application is running normally, this module behave %%% exactly like the functions exported by timers: %%% %%% ``` %%% {ok, Ref1} = %%% ar_timer:apply_after( %%% 10_000, %%% io, %%% format, %%% ["hello"], %%% #{} %%% ). %%% ''' %%% %%% If the application is stopped, for example when executing the %%% `./bin/stop' script or `erlang:halt/1' or `init:stop/1' functions, %%% then those functions will return `shutdown'. This is the default %%% behavior when no specific options is passed in the last argument. %%% %%% ``` %%% shutdown = %%% ar_timer:apply_after( %%% 10_000, %%% io, %%% format, %%% ["hello"], %%% #{} %%% ). %%% ''' %%% %%% This behavior can be disabled by setting the key `skip_on_shutdown' %%% to false when needed. In this case, these functions will simply %%% act as wrappers around `timers' module functions. %%% %%% ``` %%% {ok, Ref1} = %%% ar_timer:apply_after( %%% 10_000, %%% io, %%% format, %%% ["hello"], %%% #{ skip_on_shutdown => false } %%% ). %%% ''' %%% %%% @end %%%=================================================================== -module(ar_timer). -export([ apply_after/4, apply_after/5, apply_interval/4, apply_interval/5, cancel/1, insert_timer/2, list_timers/0, terminate_timers/0, send_after/2, send_after/3, send_after/4, send_interval/2, send_interval/3, send_interval/4 ]). -include_lib("kernel/include/logger.hrl"). -type ar_timer_opts() :: #{ skip_on_shutdown => boolean() }. %%-------------------------------------------------------------------- %% @doc wrapper around timer:apply_after/4. %% @see timer:apply_after/5 %% @end %%-------------------------------------------------------------------- -spec apply_after(Time, Module, Function, Arguments) -> Return when Time :: pos_integer(), Module :: atom(), Function :: atom(), Arguments :: [term()], Return :: shutdown | {ok, reference()}. apply_after(Time, Module, Function, Arguments) -> apply_after(Time, Module, Function, Arguments, #{}). %%-------------------------------------------------------------------- %% @doc wrapper around timer:apply_after/4. %% %% @see timer:apply_after/4 %% @end %%-------------------------------------------------------------------- -spec apply_after(Time, Module, Function, Arguments, Opts) -> Return when Time :: pos_integer(), Module :: atom(), Function :: atom(), Arguments :: [term()], Opts :: ar_timer_opts(), Return :: shutdown | {ok, reference()}. apply_after(Time, Module, Function, Arguments, Opts) -> M = timer, F = apply_after, A = [Time, Module, Function, Arguments], case ar_shutdown_manager:apply(M, F, A, Opts) of {ok, TimerRef} -> {ok, TimerRef}; Elsewise -> Elsewise end. %%-------------------------------------------------------------------- %% @doc wrapper around timer:apply_interval/4. %% @see timer:apply_interval/4 %% @end %%-------------------------------------------------------------------- -spec apply_interval(Time, Module, Function, Arguments) -> Return when Time :: pos_integer(), Module :: atom(), Function :: atom(), Arguments :: [term()], Return :: shutdown | {ok, reference()}. apply_interval(Time, Module, Function, Arguments) -> apply_interval(Time, Module, Function, Arguments, #{}). %%-------------------------------------------------------------------- %% @doc wrapper around timer:apply_interval/4 %% @end %%-------------------------------------------------------------------- -spec apply_interval(Time, Module, Function, Arguments, Opts) -> Return when Time :: pos_integer(), Module :: atom(), Function :: atom(), Arguments :: [term()], Opts :: ar_timer_opts(), Return :: shutdown | {ok, reference()}. apply_interval(Time, Module, Function, Arguments, Opts) -> M = timer, F = apply_interval, A = [Time, Module, Function, Arguments], case ar_shutdown_manager:apply(M, F, A, Opts) of {ok, TimerRef} -> insert_timer(TimerRef, #{ pid => self(), module => Module, function => Function, arguments => Arguments, time => Time }), {ok, TimerRef}; Elsewise -> Elsewise end. %%-------------------------------------------------------------------- %% @doc wrapper around timer:send_after/4. %% @see send_after/3 %% @end %%-------------------------------------------------------------------- -spec send_after(Time, Message) -> Return when Time :: pos_integer(), Message :: term(), Return :: shutdown | {ok, reference()}. send_after(Time, Message) -> send_after(Time, self(), Message). %%-------------------------------------------------------------------- %% @doc wrapper around timer:send_after/3. %% @see send_after/4 %% @end %%-------------------------------------------------------------------- -spec send_after(Time, Pid, Message) -> Return when Time :: pos_integer(), Pid :: pid() | atom(), Message :: term(), Return :: shutdown | {ok, reference()}. send_after(Time, Pid, Message) -> send_after(Time, Pid, Message, #{}). %%-------------------------------------------------------------------- %% @doc wrapper around timer:send_after/3. %% @see timer:send_after/3 %% @end %%-------------------------------------------------------------------- -spec send_after(Time, Pid, Message, Opts) -> Return when Time :: pos_integer(), Pid :: pid() | atom(), Message :: term(), Opts :: ar_timer_opts(), Return :: shutdown | {ok, reference()}. send_after(Time, Pid, Message, Opts) -> M = timer, F = send_after, A = [Time, Pid, Message], case ar_shutdown_manager:apply(M, F, A, Opts) of {ok, TimerRef} -> {ok, TimerRef}; Elsewise -> Elsewise end. %%-------------------------------------------------------------------- %% @doc wrapper around timer:send_interval/2. %% @see send_interval/3 %% @end %%-------------------------------------------------------------------- -spec send_interval(Time, Message) -> Return when Time :: pos_integer(), Message :: term(), Return :: shutdown | {ok, reference()}. send_interval(Time, Message) -> send_interval(Time, self(), Message). %%-------------------------------------------------------------------- %% @doc wrapper around timer:interval/3. %% @see send_interval/4 %% @end %%-------------------------------------------------------------------- -spec send_interval(Time, Pid, Message) -> Return when Time :: pos_integer(), Pid :: atom() | pid(), Message :: term(), Return :: shutdown | {ok, reference()}. send_interval(Time, Pid, Message) -> send_interval(Time, Pid, Message, #{}). %%-------------------------------------------------------------------- %% @doc wrapper around timer:interval/3. %% @see timer:send_interval/3 %% @end %%-------------------------------------------------------------------- -spec send_interval(Time, Pid, Message, Opts) -> Return when Time :: pos_integer(), Pid :: atom() | pid(), Message :: term(), Opts :: ar_timer_opts(), Return :: shutdown | {ok, reference()}. send_interval(Time, Pid, Message, Opts) -> M = timer, F = send_interval, A = [Time, Pid, Message], case ar_shutdown_manager:apply(M, F, A, Opts) of {ok, TimerRef} -> insert_timer(TimerRef, #{ pid => self(), time => Time }), {ok, TimerRef}; Elsewise -> Elsewise end. %%-------------------------------------------------------------------- %% @doc wrapper around timer:cancel/1. %% @see timer:cancel/1 %% @end %%-------------------------------------------------------------------- cancel(TimerRef) -> case timer:cancel(TimerRef) of {ok, _} = Reply -> ets:delete(?MODULE, {timer, TimerRef}), ?LOG_DEBUG([ {module, ?MODULE}, {reference, TimerRef}, {action, cancel} ]), Reply; Elsewise -> Elsewise end. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- insert_timer(TimerRef, Meta) -> CreatedAt = erlang:system_time(), NewMeta = Meta#{ created_at => CreatedAt }, ?LOG_DEBUG([ {module, ?MODULE}, {pid, self()}, {meta, NewMeta}, {reference, TimerRef} ]), ets:insert(?MODULE, {{timer, TimerRef}, NewMeta}). %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- list_timers() -> [ Ref || [Ref] <- ets:match(?MODULE, {{timer, '$1'}, '_'}) ]. %%-------------------------------------------------------------------- %% @hidden %% @doc terminate all timers. This function will also list the timers %% from `timer_tab' ETS table and cancel all of them. %% @end %%-------------------------------------------------------------------- terminate_timers() -> % cancel all intervals first [ cancel(Ref) || Ref <- list_timers() ], % then cancel all others timers from timer_tab. case ets:whereis(timer_tab) of undefined -> ok; _ -> [ timer:cancel(Ref) || {Ref, _, _} <- ets:tab2list(timer_tab) ] end. ================================================ FILE: apps/arweave/src/ar_tx.erl ================================================ %%% @doc The module with utilities for transaction creation, signing and verification. -module(ar_tx). -export([new/0, new/1, new/2, new/3, new/4, sign/2, sign/3, sign_v1/2, sign_v1/3, verify/2, verify/3, verify_tx_id/2, generate_signature_data_segment/1, tags_to_list/1, get_tx_fee/1, get_tx_fee2/1, check_last_tx/2, generate_chunk_tree/1, generate_chunk_tree/2, generate_chunk_id/1, chunk_binary/2, chunks_to_size_tagged_chunks/1, sized_chunks_to_sized_chunk_ids/1, get_addresses/1, get_weave_size_increase/2, utility/1, get_owner_address/1]). -include("ar.hrl"). -include("ar_pricing.hrl"). -include_lib("eunit/include/eunit.hrl"). %% Prioritize format=1 transactions with data size bigger than this %% value (in bytes) lower than every other transaction. The motivation %% is to encourage people uploading data to use the new v2 transaction %% format. Large v1 transactions may significantly slow down the rate %% of acceptance of transactions into the weave. -define(DEPRIORITIZE_V1_TX_SIZE_THRESHOLD, 100). %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc A helper for preparing transactions for signing. Used in tests. %% Should be moved to a testing module. new() -> #tx{ id = crypto:strong_rand_bytes(32) }. new(Data) -> #tx{ id = crypto:strong_rand_bytes(32), data = Data, data_size = byte_size(Data) }. new(Data, Reward) -> #tx{ id = crypto:strong_rand_bytes(32), data = Data, reward = Reward, data_size = byte_size(Data) }. new(Data, Reward, Last) -> #tx{ id = crypto:strong_rand_bytes(32), last_tx = Last, data = Data, data_size = byte_size(Data), reward = Reward }. new({SigType, PubKey}, Reward, Qty, Last) -> new(ar_wallet:to_address(PubKey, SigType), Reward, Qty, Last, SigType); new(Dest, Reward, Qty, Last) -> #tx{ id = crypto:strong_rand_bytes(32), last_tx = Last, quantity = Qty, target = Dest, data = <<>>, data_size = 0, reward = Reward }. new(Dest, Reward, Qty, Last, SigType) -> #tx{ id = crypto:strong_rand_bytes(32), last_tx = Last, quantity = Qty, target = Dest, data = <<>>, data_size = 0, reward = Reward, signature_type = SigType }. %% @doc Cryptographically sign (claim ownership of) a v2 transaction. %% Used in tests and by the handler of the POST /unsigned_tx endpoint, which is %% disabled by default. sign(TX, {PrivKey, PubKey = {KeyType, Owner}}) -> TX2 = TX#tx{ owner = Owner, signature_type = KeyType, owner_address = ar_wallet:to_address(Owner, KeyType) }, SignatureDataSegment = generate_signature_data_segment(TX2), sign(TX2, PrivKey, PubKey, SignatureDataSegment). sign(TX, PrivKey, PubKey = {KeyType, Owner}) -> TX2 = TX#tx{ owner = Owner, signature_type = KeyType, owner_address = ar_wallet:to_address(Owner, KeyType) }, SignatureDataSegment = generate_signature_data_segment(TX2), sign(TX2, PrivKey, PubKey, SignatureDataSegment). %% @doc Cryptographically sign (claim ownership of) a v1 transaction. %% Used in tests and by the handler of the POST /unsigned_tx endpoint, which is %% disabled by default. sign_v1(TX, {PrivKey, PubKey = {_, Owner}}) -> sign(TX, PrivKey, PubKey, signature_data_segment_v1(TX#tx{ owner = Owner })). sign_v1(TX, PrivKey, PubKey = {_, Owner}) -> sign(TX, PrivKey, PubKey, signature_data_segment_v1(TX#tx{ owner = Owner })). %% @doc Verify whether a transaction is valid. %% Signature verification can be optionally skipped, useful for %% repeatedly checking mempool transactions' validity. verify(TX, Args) -> verify(TX, Args, verify_signature). -ifdef(AR_TEST). verify(#tx{ signature = <<>> }, _Args, _VerifySignature) -> true; verify(TX, Args, VerifySignature) -> do_verify(TX, Args, VerifySignature). -else. verify(TX, Args, VerifySignature) -> do_verify(TX, Args, VerifySignature). -endif. %% @doc Verify the given transaction actually has the given identifier. %% Compute the signature data segment, verify the signature, and check %% whether its SHA2-256 hash equals the expected identifier. verify_tx_id(ExpectedID, #tx{ format = 1, id = ID } = TX) -> ExpectedID == ID andalso verify_signature_v1(TX, verify_signature) andalso verify_hash(TX); verify_tx_id(ExpectedID, #tx{ format = 2, id = ID } = TX) -> ExpectedID == ID andalso verify_signature_v2(TX, verify_signature) andalso verify_hash(TX). %% @doc Generate the data segment to be signed for a given TX. generate_signature_data_segment(#tx{ format = 2 } = TX) -> case TX#tx.signature_type of {?ECDSA_SIGN_ALG, secp256k1} -> signature_data_segment_v2_no_public_key(TX); {?RSA_SIGN_ALG, 65537} -> signature_data_segment_v2(TX) end; generate_signature_data_segment(#tx{ format = 1 } = TX) -> signature_data_segment_v1(TX). tags_to_list(Tags) -> [[Name, Value] || {Name, Value} <- Tags]. -ifdef(AR_TEST). check_last_tx(_WalletList, TX) when TX#tx.owner == <<>> -> true; check_last_tx(WalletList, _TX) when map_size(WalletList) == 0 -> true; check_last_tx(WalletList, TX) -> Addr = get_owner_address(TX), case maps:get(Addr, WalletList, not_found) of not_found -> false; {_Balance, LastTX} -> LastTX == TX#tx.last_tx; {_Balance, LastTX, _Denomination, _MiningPermission} -> LastTX == TX#tx.last_tx end. -else. %% @doc Check if the given transaction anchors one of the wallets - its last_tx %% matches the last transaction made from the wallet. check_last_tx(WalletList, _TX) when map_size(WalletList) == 0 -> true; check_last_tx(WalletList, TX) -> Addr = get_owner_address(TX), case maps:get(Addr, WalletList, not_found) of not_found -> false; {_Balance, LastTX} -> LastTX == TX#tx.last_tx; {_Balance, LastTX, _Denomination, _MiningPermission} -> LastTX == TX#tx.last_tx end. -endif. %% @doc Split the tx data into chunks and compute the Merkle tree from them. %% Used to compute the Merkle roots of v1 transactions' data and to compute %% Merkle proofs for v2 transactions when their data is uploaded without proofs. generate_chunk_tree(TX) -> generate_chunk_tree(TX, sized_chunks_to_sized_chunk_ids( chunks_to_size_tagged_chunks( chunk_binary(?DATA_CHUNK_SIZE, TX#tx.data) ) ) ). generate_chunk_tree(TX, ChunkIDSizes) -> {Root, Tree} = ar_merkle:generate_tree(ChunkIDSizes), TX#tx{ data_tree = Tree, data_root = Root }. %% @doc Generate a chunk ID used to construct the Merkle tree from the tx data chunks. generate_chunk_id(Chunk) -> crypto:hash(sha256, Chunk). %% @doc Split the binary into chunks. Used for computing the Merkle roots of %% v1 transactions' data and computing Merkle proofs for v2 transactions' when %% their data is uploaded without proofs. chunk_binary(ChunkSize, Bin) when byte_size(Bin) < ChunkSize -> [Bin]; chunk_binary(ChunkSize, Bin) -> <> = Bin, [ChunkBin | chunk_binary(ChunkSize, Rest)]. %% @doc Assign a byte offset to every chunk in the list. chunks_to_size_tagged_chunks(Chunks) -> lists:reverse( element( 2, lists:foldl( fun(Chunk, {Pos, List}) -> End = Pos + byte_size(Chunk), {End, [{Chunk, End} | List]} end, {0, []}, Chunks ) ) ). %% @doc Convert a list of chunk, byte offset tuples to %% the list of chunk ID, byte offset tuples. sized_chunks_to_sized_chunk_ids(SizedChunks) -> [{ar_tx:generate_chunk_id(Chunk), Size} || {Chunk, Size} <- SizedChunks]. %% @doc Get a list of unique source and destination addresses from the given list of txs. get_addresses(TXs) -> get_addresses(TXs, sets:new()). %% @doc Return the number of bytes the weave is increased by when the given transaction %% is included. get_weave_size_increase(#tx{ data_size = DataSize }, Height) -> get_weave_size_increase(DataSize, Height); get_weave_size_increase(0, _Height) -> 0; get_weave_size_increase(DataSize, Height) -> case Height >= ar_fork:height_2_5() of true -> %% The smallest multiple of ?DATA_CHUNK_SIZE larger than or equal to data_size. ar_poa:get_padded_offset(DataSize, 0); false -> DataSize end. %% @doc Return the transaction's utility for the miner. Transactions with higher utility %% are more attractive and therefore preferred when assembling blocks. utility(TX = #tx{ data_size = DataSize }) -> utility(TX, ?TX_SIZE_BASE + DataSize). utility(#tx{ format = 1, reward = Reward, data_size = DataSize, denomination = Denomination }, _Size) when DataSize > ?DEPRIORITIZE_V1_TX_SIZE_THRESHOLD -> %% For convenience, value higher denomination more. %% If we normalize by dividing by denomination, higher-denomination amounts %% may stop being distinguishable. %% To use the current block denomination, we would need to update %% comparators, which is somewhat cumbersome. %% Therefore, we simply choose to prefer higher denominations. {1, Denomination, Reward}; utility(#tx{ reward = Reward, denomination = Denomination }, _Size) -> {2, Denomination, Reward}. %% @doc Return the transaction's owner address. Take the cached value if available. get_owner_address(#tx{ owner = Owner, signature_type = KeyType, owner_address = not_set }) -> ar_wallet:to_address(Owner, KeyType); get_owner_address(#tx{ owner_address = OwnerAddress }) -> OwnerAddress. %%%=================================================================== %%% Private functions. %%%=================================================================== %% @doc Generate the data segment to be signed for a given v2 TX. signature_data_segment_v2(TX) -> List = [ << (integer_to_binary(TX#tx.format))/binary >>, << (TX#tx.owner)/binary >>, << (TX#tx.target)/binary >>, << (list_to_binary(integer_to_list(TX#tx.quantity)))/binary >>, << (list_to_binary(integer_to_list(TX#tx.reward)))/binary >>, << (TX#tx.last_tx)/binary >>, tags_to_list(TX#tx.tags), << (integer_to_binary(TX#tx.data_size))/binary >>, << (TX#tx.data_root)/binary >> ], List2 = case TX#tx.denomination > 0 of true -> [<< (integer_to_binary(TX#tx.denomination))/binary >> | List]; false -> List end, ar_deep_hash:hash(List2). signature_data_segment_v2_no_public_key(TX) -> List = [ << (integer_to_binary(TX#tx.format))/binary >>, << (TX#tx.target)/binary >>, << (list_to_binary(integer_to_list(TX#tx.quantity)))/binary >>, << (list_to_binary(integer_to_list(TX#tx.reward)))/binary >>, << (TX#tx.last_tx)/binary >>, tags_to_list(TX#tx.tags), << (integer_to_binary(TX#tx.data_size))/binary >>, << (TX#tx.data_root)/binary >> ], List2 = case TX#tx.denomination > 0 of true -> [<< (integer_to_binary(TX#tx.denomination))/binary >> | List]; false -> List end, ar_deep_hash:hash(List2). %% @doc Generate the data segment to be signed for a given v1 TX. signature_data_segment_v1(TX) -> case TX#tx.denomination > 0 of true -> ar_deep_hash:hash([ << (integer_to_binary(TX#tx.denomination))/binary >>, << (TX#tx.owner)/binary >>, << (TX#tx.target)/binary >>, << (TX#tx.data)/binary >>, << (list_to_binary(integer_to_list(TX#tx.quantity)))/binary >>, << (list_to_binary(integer_to_list(TX#tx.reward)))/binary >>, << (TX#tx.last_tx)/binary >>, tags_to_list(TX#tx.tags) ]); false -> << (TX#tx.owner)/binary, (TX#tx.target)/binary, (TX#tx.data)/binary, (list_to_binary(integer_to_list(TX#tx.quantity)))/binary, (list_to_binary(integer_to_list(TX#tx.reward)))/binary, (TX#tx.last_tx)/binary, (tags_to_binary(TX#tx.tags))/binary >> end. sign(TX, PrivKey, {KeyType, Owner}, SignatureDataSegment) -> NewTX = TX#tx{ owner = Owner, signature_type = KeyType, owner_address = ar_wallet:to_address(Owner, KeyType) }, Sig = ar_wallet:sign(PrivKey, SignatureDataSegment), ID = crypto:hash(?HASH_ALG, <>), NewTX#tx{ id = ID, signature = Sig }. verify_signature_type(#tx{ format = 1 } = TX, _Height) -> case TX#tx.signature_type of {?RSA_SIGN_ALG, 65537} -> true; _ -> false end; verify_signature_type(#tx{ format = 2 } = TX, Height) -> case TX#tx.signature_type of {?RSA_SIGN_ALG, 65537} -> true; {?ECDSA_SIGN_ALG, secp256k1} -> Height >= ar_fork:height_2_9(); _ -> false end. do_verify(#tx{ format = 1 } = TX, Args, VerifySignature) -> {_Rate, _PricePerGiBMinute, _KryderPlusRateMultiplier, _Denomination, _RedenominationHeight, Height, _Accounts, _Timestamp} = Args, case verify_signature_type(TX, Height) of true -> do_verify_v1(TX, Args, VerifySignature); false -> collect_validation_results(TX#tx.id, [{"tx_signature_type_not_supported", false}]) end; do_verify(#tx{ format = 2 } = TX, Args, VerifySignature) -> {_Rate, _PricePerGiBMinute, _KryderPlusRateMultiplier, _Denomination, _RedenominationHeight, Height, _Accounts, _Timestamp} = Args, case Height < ar_fork:height_2_0() of true -> collect_validation_results(TX#tx.id, [{"tx_format_not_supported", false}]); false -> case verify_signature_type(TX, Height) of true -> do_verify_v2(TX, Args, VerifySignature); false -> collect_validation_results(TX#tx.id, [{"tx_signature_type_not_supported", false}]) end end; do_verify(TX, _Args, _VerifySignature) -> collect_validation_results(TX#tx.id, [{"tx_format_not_supported", false}]). get_addresses([], Addresses) -> sets:to_list(Addresses); get_addresses([TX | TXs], Addresses) -> Source = get_owner_address(TX), WithSource = sets:add_element(Source, Addresses), WithDest = sets:add_element(TX#tx.target, WithSource), get_addresses(TXs, WithDest). do_verify_v1(TX, Args, VerifySignature) -> {_Rate, PricePerGiBMinute, KryderPlusRateMultiplier, Denomination, RedenominationHeight, Height, Accounts, _Timestamp} = Args, Fork_1_8 = ar_fork:height_1_8(), LastTXCheck = case Height of H when H >= Fork_1_8 -> true; _ -> check_last_tx(Accounts, TX) end, case verify_denomination(TX, Denomination, Height, RedenominationHeight) of false -> collect_validation_results(TX#tx.id, [{"invalid_denomination", false}]); true -> From = get_owner_address(TX), FeeArgs = {TX, PricePerGiBMinute, KryderPlusRateMultiplier, Denomination, Height, Accounts, TX#tx.target}, Checks = [ {"quantity_negative", TX#tx.quantity >= 0}, {"same_owner_as_target", (From =/= TX#tx.target)}, {"tx_too_cheap", is_tx_fee_sufficient(FeeArgs)}, {"tx_fields_too_large", tx_field_size_limit_v1(TX, Height, Denomination)}, {"last_tx_not_valid", LastTXCheck}, {"tx_id_not_valid", verify_hash(TX)}, {"overspend", validate_overspend(TX, ar_node_utils:apply_tx(Accounts, Denomination, TX))}, {"tx_signature_not_valid", verify_signature_v1(TX, VerifySignature, Height)}, {"tx_malleable", verify_malleability({TX, PricePerGiBMinute, KryderPlusRateMultiplier, Denomination, Height, Accounts})}, {"invalid_target_length", verify_target_length(TX, Height)} ], collect_validation_results(TX#tx.id, Checks) end. collect_validation_results(TXID, Checks) -> KeepFailed = fun ({_, true}) -> false; ({ErrorCode, false}) -> {true, ErrorCode} end, case lists:filtermap(KeepFailed, Checks) of [] -> true; ErrorCodes -> ar_tx_db:put_error_codes(TXID, ErrorCodes), false end. do_verify_v2(TX, Args, VerifySignature) -> {_Rate, PricePerGiBMinute, KryderPlusRateMultiplier, Denomination, RedenominationHeight, Height, Accounts, _Timestamp} = Args, case verify_denomination(TX, Denomination, Height, RedenominationHeight) of false -> collect_validation_results(TX#tx.id, [{"invalid_denomination", false}]); true -> From = get_owner_address(TX), FeeArgs = {TX, PricePerGiBMinute, KryderPlusRateMultiplier, Denomination, Height, Accounts, TX#tx.target}, Checks = [ {"quantity_negative", TX#tx.quantity >= 0}, {"same_owner_as_target", (From =/= TX#tx.target)}, {"tx_too_cheap", is_tx_fee_sufficient(FeeArgs)}, {"tx_fields_too_large", tx_field_size_limit_v2(TX, Height, Denomination)}, {"tx_id_not_valid", verify_hash(TX)}, {"overspend", validate_overspend(TX, ar_node_utils:apply_tx(Accounts, Denomination, TX))}, {"tx_signature_not_valid", verify_signature_v2(TX, VerifySignature, Height)}, {"tx_data_size_negative", TX#tx.data_size >= 0}, {"tx_data_size_data_root_mismatch", (TX#tx.data_size == 0) == (TX#tx.data_root == <<>>)}, {"invalid_target_length", verify_target_length(TX, Height)} ], collect_validation_results(TX#tx.id, Checks) end. %% @doc Check whether each field in a transaction is within the given byte size limits. tx_field_size_limit_v1(TX, Height, Denomination) -> LastTXLimit = case Height >= ar_fork:height_1_8() of true -> 48; false -> 32 end, MaxDigits = case Height + 1 >= ar_fork:height_2_6() of true -> 30 + (Denomination - 1) * 3; false -> 21 end, (byte_size(TX#tx.id) =< 32) andalso (byte_size(TX#tx.last_tx) =< LastTXLimit) andalso (byte_size(TX#tx.owner) =< 512) andalso validate_tags_size(TX, Height) andalso (byte_size(integer_to_binary(TX#tx.quantity)) =< MaxDigits) andalso (byte_size(TX#tx.data) =< (?TX_DATA_SIZE_LIMIT)) andalso (byte_size(TX#tx.signature) =< 512) andalso (byte_size(integer_to_binary(TX#tx.reward)) =< MaxDigits). %% @doc Verify that the transactions ID is a hash of its signature. verify_hash(#tx{ signature = Sig, id = ID }) -> ID == crypto:hash(?HASH_ALG, << Sig/binary >>). verify_signature_v1(_TX, do_not_verify_signature) -> true; verify_signature_v1(TX, verify_signature) -> SignatureDataSegment = generate_signature_data_segment(TX), ar_wallet:verify({?DEFAULT_KEY_TYPE, TX#tx.owner}, SignatureDataSegment, TX#tx.signature). verify_signature_v1(_TX, do_not_verify_signature, _Height) -> true; verify_signature_v1(TX, verify_signature, Height) -> SignatureDataSegment = generate_signature_data_segment(TX), case Height >= ar_fork:height_2_4() of true -> ar_wallet:verify({?DEFAULT_KEY_TYPE, TX#tx.owner}, SignatureDataSegment, TX#tx.signature); false -> ar_wallet:verify_pre_fork_2_4({?DEFAULT_KEY_TYPE, TX#tx.owner}, SignatureDataSegment, TX#tx.signature) end. verify_malleability(Args) -> {TX, _PricePerGiBMinute, _KryderMultiplier, _Denomination, Height, _Accounts} = Args, case Height + 1 >= ar_fork:height_2_4() of false -> true; true -> case TX#tx.denomination > 0 of true -> %% The signtaure preimage is constructed differently for v1 transactions %% with the explicitly set denomination. true; false -> verify_malleability2(Args) end end. verify_malleability2(Args) -> {TX, PricePerGiBMinute, KryderPlusRateMultiplier, Denomination, Height, Accounts} = Args, Target = TX#tx.target, case {byte_size(Target), TX#tx.quantity > 0} of {TargetSize, true} when TargetSize /= 32 -> false; {TargetSize, false} when TargetSize > 0 -> false; _ -> case ends_with_digit(TX#tx.data) of true -> false; false -> Fee = TX#tx.reward, case Fee < 10 of true -> true; false -> TruncatedReward = ar_pricing:redenominate(list_to_integer( tl(integer_to_list(TX#tx.reward))), TX#tx.denomination, Denomination), not is_tx_fee_sufficient({TX#tx{ reward = TruncatedReward }, PricePerGiBMinute, KryderPlusRateMultiplier, Denomination, Height, Accounts, Target}) end end end. ends_with_digit(<<>>) -> false; ends_with_digit(Data) -> LastByte = binary:last(Data), LastByte >= 48 andalso LastByte =< 57. verify_signature_v2(_TX, do_not_verify_signature) -> true; verify_signature_v2(TX = #tx{ signature_type = SigType }, verify_signature) -> SignatureDataSegment = generate_signature_data_segment(TX), ar_wallet:verify({SigType, TX#tx.owner}, SignatureDataSegment, TX#tx.signature). verify_signature_v2(_TX, do_not_verify_signature, _Height) -> true; verify_signature_v2(TX, verify_signature, Height) -> SignatureDataSegment = generate_signature_data_segment(TX), Wallet = case TX#tx.signature_type of ?RSA_KEY_TYPE -> {{?RSA_SIGN_ALG, 65537}, TX#tx.owner}; ?ECDSA_KEY_TYPE -> {?ECDSA_KEY_TYPE, TX#tx.owner} end, case Height >= ar_fork:height_2_4() of true -> ar_wallet:verify(Wallet, SignatureDataSegment, TX#tx.signature); false -> ar_wallet:verify_pre_fork_2_4({{?RSA_SIGN_ALG, 65537}, TX#tx.owner}, SignatureDataSegment, TX#tx.signature) end. validate_overspend(TX, Accounts) -> From = get_owner_address(TX), Addresses = case TX#tx.target of <<>> -> [From]; To -> [From, To] end, lists:all( fun(Addr) -> case maps:get(Addr, Accounts, not_found) of {0, LastTX} when byte_size(LastTX) == 0 -> false; {0, LastTX, _Denomination, _MiningPermission} when byte_size(LastTX) == 0 -> false; {Quantity, _} when Quantity < 0 -> false; {Quantity, _, _Denomination, _MiningPermission} when Quantity < 0 -> false; not_found -> false; _ -> true end end, Addresses ). is_tx_fee_sufficient(Args) -> {TX, PricePerGiBMinute, KryderPlusRateMultiplier, Denomination, Height, Accounts, Addr} = Args, DataSize = get_weave_size_increase(TX, Height + 1), MinimumRequiredFee = ar_tx:get_tx_fee({DataSize, PricePerGiBMinute, KryderPlusRateMultiplier, Addr, Accounts, Height + 1}), Fee = TX#tx.reward, ar_pricing:redenominate(Fee, TX#tx.denomination, Denomination) >= MinimumRequiredFee. get_tx_fee(Args) -> {DataSize, PricePerGiBMinute, KryderPlusRateMultiplier, Addr, Accounts, Height} = Args, Fork_2_6_8 = ar_fork:height_2_6_8(), Args2 = {DataSize, PricePerGiBMinute, KryderPlusRateMultiplier, Addr, Accounts, Height}, true = Height >= Fork_2_6_8, case Height < ar_pricing_transition:static_pricing_height() of true -> %% Pre-2.6.8 transition period. Use a static fee-based pricing + new account fee. get_static_2_6_8_tx_fee(DataSize, Addr, Accounts); false -> get_tx_fee2(Args2) end. get_static_2_6_8_tx_fee(DataSize, Addr, Accounts) -> UploadFee = (?STATIC_2_6_8_FEE_WINSTON div ?GiB) * (DataSize + ?TX_SIZE_BASE), case Addr == <<>> orelse maps:is_key(Addr, Accounts) of true -> UploadFee; false -> NewAccountFee = (?STATIC_2_6_8_FEE_WINSTON div ?GiB) * ?NEW_ACCOUNT_FEE_DATA_SIZE_EQUIVALENT, UploadFee + NewAccountFee end. get_tx_fee2(Args) -> {DataSize, PricePerGiBMinute, KryderPlusRateMultiplier, Addr, Accounts, Height} = Args, Args2 = {DataSize + ?TX_SIZE_BASE, PricePerGiBMinute, KryderPlusRateMultiplier, Height}, UploadFee = ar_pricing:get_tx_fee(Args2), case Addr == <<>> orelse maps:is_key(Addr, Accounts) of true -> UploadFee; false -> NewAccountFee = get_new_account_fee(PricePerGiBMinute, KryderPlusRateMultiplier, Height), UploadFee + NewAccountFee end. get_new_account_fee(BytePerMinutePrice, KryderPlusRateMultiplier, Height) -> Args = {?NEW_ACCOUNT_FEE_DATA_SIZE_EQUIVALENT, BytePerMinutePrice, KryderPlusRateMultiplier, Height}, ar_pricing:get_tx_fee(Args). verify_target_length(TX, Height) -> case Height >= ar_fork:height_2_4() of true -> (TX#tx.quantity == 0 andalso byte_size(TX#tx.target) =< 32) orelse byte_size(TX#tx.target) == 32; false -> byte_size(TX#tx.target) =< 32 end. verify_denomination(TX, Denomination, Height, RedenominationHeight) -> case Height + 1 >= ar_fork:height_2_6() of false -> TX#tx.denomination == 0; true -> case TX#tx.denomination of 0 -> Height == 0 orelse Height > RedenominationHeight; _ -> TX#tx.denomination > 0 andalso TX#tx.denomination =< Denomination end end. tx_field_size_limit_v2(TX, Height, Denomination) -> MaxDigits = case Height + 1 >= ar_fork:height_2_6() of true -> 30 + (Denomination - 1) * 3; false -> 21 end, (byte_size(TX#tx.id) =< 32) andalso (byte_size(TX#tx.last_tx) =< 48) andalso (byte_size(TX#tx.owner) =< 512) andalso validate_tags_size(TX, Height) andalso (byte_size(integer_to_binary(TX#tx.quantity)) =< MaxDigits) andalso (byte_size(integer_to_binary(TX#tx.data_size)) =< 21) andalso (byte_size(TX#tx.signature) =< 512) andalso (byte_size(integer_to_binary(TX#tx.reward)) =< MaxDigits) andalso (byte_size(TX#tx.data_root) =< 32). validate_tags_size(TX, Height) -> case Height >= ar_fork:height_2_5() of true -> Tags = TX#tx.tags, validate_tags_length(Tags, 0) andalso byte_size(tags_to_binary(Tags)) =< 2048; false -> byte_size(tags_to_binary(TX#tx.tags)) =< 2048 end. validate_tags_length(_, N) when N > 2048 -> false; validate_tags_length([_ | Tags], N) -> validate_tags_length(Tags, N + 1); validate_tags_length([], _) -> true. %% @doc Convert a transactions key-value tags to binary a format. tags_to_binary(Tags) -> list_to_binary( lists:foldr( fun({Name, Value}, Acc) -> [Name, Value | Acc] end, [], Tags ) ). %%%=================================================================== %%% Tests. %%%=================================================================== sign_tx_test_() -> {timeout, 30, fun test_sign_tx/0}. test_sign_tx() -> NewTX = new(<<"TEST DATA">>, ?AR(1)), {Priv, Pub} = ar_wallet:new(), Rate = ?INITIAL_USD_TO_AR_PRE_FORK_2_5, PricePerGiBMinute = 1, Timestamp = os:system_time(seconds), ValidTXs = [ sign_v1(NewTX, Priv, Pub), sign(generate_chunk_tree(NewTX#tx{ format = 2 }), Priv, Pub) ], lists:foreach( fun(TX) -> Accounts = lists:foldl( fun(Addr, Acc) -> maps:put(Addr, {?AR(10), <<>>}, Acc) end, #{}, ar_tx:get_addresses([TX]) ), Args1 = {Rate, PricePerGiBMinute, 1, 1, 0, 0, Accounts, Timestamp}, ?assert(verify(TX, Args1), ar_util:encode(TX#tx.id)), Args2 = {Rate, PricePerGiBMinute, 1, 1, 0, 1, Accounts, Timestamp}, ?assert(verify(TX, Args2), ar_util:encode(TX#tx.id)) end, ValidTXs ), InvalidTXs = [ sign( generate_chunk_tree( % a quantity with empty target NewTX#tx{ format = 2, quantity = 1 } ), Priv, Pub ), sign_v1( generate_chunk_tree( % a target without quantity NewTX#tx{ format = 1, target = crypto:strong_rand_bytes(32) } ), Priv, Pub ) ], lists:foreach( fun(TX) -> Accounts = lists:foldl( fun(Addr, Acc) -> maps:put(Addr, {?AR(10), <<>>}, Acc) end, #{}, ar_tx:get_addresses([TX]) ), Args3 = {Rate, PricePerGiBMinute, 1, 1, 0, 0, Accounts, Timestamp}, ?assert(not verify(TX, Args3), ar_util:encode(TX#tx.id)), Args4 = {Rate, PricePerGiBMinute, 1, 1, 0, 1, Accounts, Timestamp}, ?assert(not verify(TX, Args4), ar_util:encode(TX#tx.id)) end, InvalidTXs ). sign_and_verify_chunked_test_() -> {timeout, 60, fun test_sign_and_verify_chunked/0}. sign_and_verify_chunked_pre_fork_2_5_test_() -> ar_test_node:test_with_mocked_functions([{ar_fork, height_2_5, fun() -> infinity end}], fun test_sign_and_verify_chunked/0, 120). test_sign_and_verify_chunked() -> TXData = crypto:strong_rand_bytes(trunc(?DATA_CHUNK_SIZE * 5.5)), {Priv, Pub} = ar_wallet:new(), UnsignedTX = generate_chunk_tree( #tx{ format = 2, data = TXData, data_size = byte_size(TXData), reward = ?AR(100) } ), SignedTX = sign(UnsignedTX#tx{ data = <<>> }, Priv, Pub), Height = 0, Rate = {1, 3}, PricePerGiBMinute = 200, Timestamp = os:system_time(seconds), Address = ar_wallet:to_address(Pub), Args = {Rate, PricePerGiBMinute, 1, 1, 0, Height, maps:from_list([{Address, {?AR(100), <<>>}}]), Timestamp}, ?assert(verify(SignedTX, Args)). %% Ensure that a forged transaction does not pass verification. forge_test_() -> {timeout, 30, fun test_forge/0}. test_forge() -> NewTX = new(<<"TEST DATA">>, ?AR(10)), {Priv, Pub} = ar_wallet:new(), Rate = ?INITIAL_USD_TO_AR_PRE_FORK_2_5, PricePerGiBMinute = 400, Height = 0, InvalidSignTX = (sign_v1(NewTX, Priv, Pub))#tx{ data = <<"FAKE DATA">> }, Timestamp = os:system_time(seconds), Args = {Rate, PricePerGiBMinute, 1, 1, 0, Height, #{}, Timestamp}, ?assert(not verify(InvalidSignTX, Args)). %% Ensure that transactions above the minimum tx cost are accepted. is_tx_fee_sufficient_test() -> ValidTX = new(<<"TEST DATA">>, ?AR(10)), InvalidTX = new(<<"TEST DATA">>, 1), PricePerGiBMinute = 2, Height = 2, ?assert(is_tx_fee_sufficient({ValidTX, PricePerGiBMinute, 1, 1, Height, #{}, <<"non-existing-addr">>})), ?assert( not is_tx_fee_sufficient({InvalidTX, PricePerGiBMinute, 1, 1, Height, #{}, <<"non-existing-addr">>})). %% Ensure that the check_last_tx function only validates transactions in which %% last tx field matches that expected within the wallet list. check_last_tx_test_() -> {timeout, 60, fun test_check_last_tx/0}. check_last_tx_pre_fork_2_5_test_() -> ar_test_node:test_with_mocked_functions([{ar_fork, height_2_4, fun() -> infinity end}], fun test_sign_and_verify_chunked/0, 120). test_check_last_tx() -> {_Priv1, Pub1} = ar_wallet:new(), {Priv2, Pub2} = ar_wallet:new(), {Priv3, Pub3} = ar_wallet:new(), TX = ar_tx:new(Pub2, ?AR(1), ?AR(500), <<>>), TX2 = ar_tx:new(Pub3, ?AR(1), ?AR(400), TX#tx.id), TX3 = ar_tx:new(Pub1, ?AR(1), ?AR(300), TX#tx.id), SignedTX2 = sign_v1(TX2, Priv2, Pub2), SignedTX3 = sign_v1(TX3, Priv3, Pub3), WalletList = maps:from_list( [ {ar_wallet:to_address(Pub1), {1000, <<>>}}, {ar_wallet:to_address(Pub2), {2000, TX#tx.id}}, {ar_wallet:to_address(Pub3), {3000, <<>>}} ] ), false = check_last_tx(WalletList, SignedTX3), true = check_last_tx(WalletList, SignedTX2). generate_and_validate_even_chunk_tree_test() -> Data = crypto:strong_rand_bytes(?DATA_CHUNK_SIZE * 7), lists:map( fun(ChallengeLocation) -> test_generate_chunk_tree_and_validate_path(Data, ChallengeLocation) end, [ 0, 1, 10, ?DATA_CHUNK_SIZE, ?DATA_CHUNK_SIZE + 1, 2 * ?DATA_CHUNK_SIZE - 1, 7 * ?DATA_CHUNK_SIZE - 1 ] ). generate_and_validate_uneven_chunk_tree_test() -> Data = crypto:strong_rand_bytes(?DATA_CHUNK_SIZE * 4 + 10), lists:map( fun(ChallengeLocation) -> test_generate_chunk_tree_and_validate_path(Data, ChallengeLocation) end, [ 0, 1, 10, ?DATA_CHUNK_SIZE, ?DATA_CHUNK_SIZE + 1, 2 * ?DATA_CHUNK_SIZE - 1, 4 * ?DATA_CHUNK_SIZE + 9 ] ). test_generate_chunk_tree_and_validate_path(Data, ChallengeLocation) -> ChunkStart = ar_util:floor_int(ChallengeLocation, ?DATA_CHUNK_SIZE), Chunk = binary:part(Data, ChunkStart, min(?DATA_CHUNK_SIZE, byte_size(Data) - ChunkStart)), #tx{ data_root = DataRoot, data_tree = DataTree } = ar_tx:generate_chunk_tree( #tx{ data = Data, data_size = byte_size(Data) } ), DataPath = ar_merkle:generate_path( DataRoot, ChallengeLocation, DataTree ), RealChunkID = ar_tx:generate_chunk_id(Chunk), {PathChunkID, StartOffset, EndOffset} = ar_merkle:validate_path(DataRoot, ChallengeLocation, byte_size(Data), DataPath), {PathChunkID, StartOffset, EndOffset} = ar_merkle:validate_path(DataRoot, ChallengeLocation, byte_size(Data), DataPath, strict_data_split_ruleset), {PathChunkID, StartOffset, EndOffset} = ar_merkle:validate_path(DataRoot, ChallengeLocation, byte_size(Data), DataPath, strict_borders_ruleset), ?assertEqual(RealChunkID, PathChunkID), ?assert(ChallengeLocation >= StartOffset), ?assert(ChallengeLocation < EndOffset). get_weave_size_increase_test() -> ?assertEqual(0, get_weave_size_increase(#tx{}, ar_fork:height_2_5())), ?assertEqual(262144, get_weave_size_increase(#tx{ data_size = 1 }, ar_fork:height_2_5())), ?assertEqual(262144, get_weave_size_increase(#tx{ data_size = 256 }, ar_fork:height_2_5())), ?assertEqual(262144, get_weave_size_increase(#tx{ data_size = 256 * 1024 - 1 }, ar_fork:height_2_5())), ?assertEqual(262144, get_weave_size_increase(#tx{ data_size = 256 * 1024 }, ar_fork:height_2_5())), ?assertEqual(2 * 262144, get_weave_size_increase(#tx{ data_size = 256 * 1024 + 1}, ar_fork:height_2_5())), ?assertEqual(0, get_weave_size_increase(#tx{ data_size = 0 }, ar_fork:height_2_5() - 1)), ?assertEqual(1, get_weave_size_increase(#tx{ data_size = 1 }, ar_fork:height_2_5() - 1)), ?assertEqual(262144, get_weave_size_increase(#tx{ data_size = 256 * 1024 }, ar_fork:height_2_5() - 1)). ================================================ FILE: apps/arweave/src/ar_tx_blacklist.erl ================================================ %%% @doc The module manages a transaction blacklist. The blacklisted identifiers %%% are read from the configured files or downloaded from the configured HTTP endpoints. %%% The server coordinates the removal of the transaction headers and data and answers %%% queries about the currently blacklisted transactions and the corresponding global %%% byte offsets. %%% @end -module(ar_tx_blacklist). -behaviour(gen_server). -export([start_link/0, start_taking_down/0, is_tx_blacklisted/1, is_byte_blacklisted/1, get_blacklisted_intervals/2, get_next_not_blacklisted_byte/1, notify_about_removed_tx/1, norify_about_orphaned_tx/1, notify_about_added_tx/3, store_state/0]). -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2]). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). %% The frequency of refreshing the blacklist. -ifdef(AR_TEST). -define(REFRESH_BLACKLISTS_FREQUENCY_MS, 2000). -else. -define(REFRESH_BLACKLISTS_FREQUENCY_MS, 10 * 60 * 1000). -endif. %% How long to wait before retrying to compose a blacklist from local and external %% sources after a failed attempt. -define(REFRESH_BLACKLISTS_RETRY_DELAY_MS, 10000). %% How long to wait for the response to the previously requested %% header or data removal (takedown) before requesting it for a new tx. -ifdef(AR_TEST). -define(REQUEST_TAKEDOWN_DELAY_MS, 1000). -else. -define(REQUEST_TAKEDOWN_DELAY_MS, 30000). -endif. %% The frequency of checking whether the time for the response to %% the previously requested takedown is due. -define(CHECK_PENDING_ITEMS_INTERVAL_MS, 1000). %% The frequency of persisting the server state. -ifdef(AR_TEST). -define(STORE_STATE_FREQUENCY_MS, 20000). -else. -define(STORE_STATE_FREQUENCY_MS, 10 * 60 * 1000). -endif. %% @doc The server state. -record(ar_tx_blacklist_state, { %% The timestamp of the last requested transaction header takedown. %% It is used to throttle the takedown requests. header_takedown_request_timestamp = os:system_time(millisecond), %% The timestamp of the last requested transaction data takedown. %% It is used to throttle the takedown requests. data_takedown_request_timestamp = os:system_time(millisecond), %% A cursor pointing to a TXID in the list of pending unblacklisted transactions. %% Some of them might be orphaned or simply non-existent. pending_restore_cursor = first, unblacklist_timeout = os:system_time(second) }). %%%=================================================================== %%% Public interface. %%%=================================================================== start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). %% @doc Start removing blacklisted headers and data, if any. start_taking_down() -> gen_server:cast(?MODULE, start_taking_down). %% @doc Check whether the given transaction is blacklisted. is_tx_blacklisted(TXID) -> ets:member(ar_tx_blacklist, TXID). %% @doc Check whether the byte with the given global offset is blacklisted. is_byte_blacklisted(Offset) -> ar_ets_intervals:is_inside(ar_tx_blacklist_offsets, Offset). %% @doc Return the smallest not blacklisted byte bigger than or equal to %% the byte at the given global offset. get_next_not_blacklisted_byte(Offset) -> case ets:next(ar_tx_blacklist_offsets, Offset - 1) of '$end_of_table' -> Offset; NextOffset -> case ets:lookup(ar_tx_blacklist_offsets, NextOffset) of [{NextOffset, Start}] -> case Start >= Offset of true -> Offset; false -> NextOffset + 1 end; [] -> %% The key should have been just removed, unlucky timing. get_next_not_blacklisted_byte(Offset) end end. %% @doc Return the blacklisted intervals intersecting the given range. get_blacklisted_intervals(Start, End) -> get_blacklisted_intervals(Start, End, ar_intervals:new()). get_blacklisted_intervals(Start, End, Intervals) -> case ets:next(ar_tx_blacklist_offsets, Start) of '$end_of_table' -> Intervals; Offset -> case ets:lookup(ar_tx_blacklist_offsets, Offset) of [{Offset, Start2}] when Start2 >= End -> Intervals; [{Offset, Start2}] when Offset >= End -> ar_intervals:add(Intervals, End, max(Start2, Start)); [{Offset, Start2}] -> get_blacklisted_intervals(Offset, End, ar_intervals:add(Intervals, Offset, max(Start2, Start))); [] -> %% The key should have been just removed, unlucky timing. get_blacklisted_intervals(Start, End, Intervals) end end. %% @doc Notify the server about the removed transaction header. notify_about_removed_tx(TXID) -> gen_server:cast(?MODULE, {removed_tx, TXID}). %% @doc Notify the server about the orphaned tx caused by the fork. norify_about_orphaned_tx(TXID) -> gen_server:cast(?MODULE, {orphaned_tx, TXID}). %% @doc Notify the server about the added transaction. notify_about_added_tx(TXID, End, Start) -> gen_server:cast(?MODULE, {added_tx, TXID, End, Start}). %%%=================================================================== %%% Generic server callbacks. %%%=================================================================== init([]) -> ?LOG_DEBUG([{event, initializing_tx_blacklist}, {tags, [tx_blacklist]}]), ok = initialize_state(), %% Trap exit to avoid corrupting any open files on quit. process_flag(trap_exit, true), ok = ar_events:subscribe(tx), gen_server:cast(?MODULE, refresh_blacklist), {ok, _} = ar_timer:apply_interval( ?STORE_STATE_FREQUENCY_MS, ?MODULE, store_state, [], #{ skip_on_shutdown => false } ), {ok, #ar_tx_blacklist_state{}}. handle_call(Request, _From, State) -> ?LOG_ERROR([{event, unhandled_call}, {module, ?MODULE}, {request, Request}]), {reply, ok, State}. handle_cast(start_taking_down, State) -> ?LOG_DEBUG([{event, start_taking_down}, {tags, [tx_blacklist]}]), gen_server:cast(?MODULE, maybe_restore), gen_server:cast(?MODULE, maybe_request_takedown), {noreply, State}; handle_cast(refresh_blacklist, State) -> case refresh_blacklist() of error -> _ = ar_timer:apply_after( ?REFRESH_BLACKLISTS_RETRY_DELAY_MS, gen_server, cast, [self(), refresh_blacklist], #{ skip_on_shutdown => true } ); ok -> _ = ar_timer:apply_after( ?REFRESH_BLACKLISTS_FREQUENCY_MS, gen_server, cast, [self(), refresh_blacklist], #{ skip_on_shutdown => true } ) end, {noreply, State}; handle_cast(maybe_request_takedown, State) -> #ar_tx_blacklist_state{ header_takedown_request_timestamp = HTS, data_takedown_request_timestamp = DTS } = State, Now = os:system_time(millisecond), State2 = case HTS + ?REQUEST_TAKEDOWN_DELAY_MS < Now of true -> request_header_takedown(State); false -> State end, State3 = case DTS + ?REQUEST_TAKEDOWN_DELAY_MS < Now of true -> request_data_takedown(State2); false -> State2 end, _ = ar_timer:apply_after( ?CHECK_PENDING_ITEMS_INTERVAL_MS, gen_server, cast, [self(), maybe_request_takedown], #{ skip_on_shutdown => true } ), {noreply, State3}; handle_cast(maybe_restore, #ar_tx_blacklist_state{ pending_restore_cursor = Cursor, unblacklist_timeout = UnblacklistTimeout } = State) -> Now = os:system_time(second), ar_util:cast_after(200, ?MODULE, maybe_restore), case UnblacklistTimeout + 30000 < Now of true -> Read = case Cursor of first -> ets:first(ar_tx_blacklist_pending_restore_headers); _ -> ets:next(ar_tx_blacklist_pending_restore_headers, Cursor) end, case Read of '$end_of_table' -> {noreply, State#ar_tx_blacklist_state{ pending_restore_cursor = first, unblacklist_timeout = Now }}; TXID -> ?LOG_DEBUG([{event, preparing_transaction_unblacklisting}, {tags, [tx_blacklist]}, {tx, ar_util:encode(TXID)}]), ar_events:send(tx, {preparing_unblacklisting, TXID}), {noreply, State#ar_tx_blacklist_state{ pending_restore_cursor = TXID, unblacklist_timeout = Now }} end; false -> {noreply, State} end; handle_cast({removed_tx, TXID}, State) -> case ets:member(ar_tx_blacklist_pending_headers, TXID) of false -> {noreply, State}; true -> ets:delete(ar_tx_blacklist_pending_headers, TXID), {noreply, request_header_takedown(State)} end; handle_cast({orphaned_tx, TXID}, State) -> case ets:lookup(ar_tx_blacklist, TXID) of [{TXID, End, Start}] -> restore_offsets(End, Start), ets:insert(ar_tx_blacklist, [{TXID}]); _ -> ok end, {noreply, State}; handle_cast({added_tx, TXID, End, Start}, State) -> case ets:lookup(ar_tx_blacklist, TXID) of [{TXID}] -> ets:insert(ar_tx_blacklist, [{TXID, End, Start}]), ets:insert(ar_tx_blacklist_pending_data, [{TXID}]), {noreply, request_data_takedown(State)}; [{TXID, CurrentEnd, CurrentStart}] -> restore_offsets(CurrentEnd, CurrentStart), ets:insert(ar_tx_blacklist, [{TXID, End, Start}]), ets:insert(ar_tx_blacklist_pending_data, [{TXID}]), {noreply, request_data_takedown(State)}; _ -> {noreply, State} end; handle_cast(Msg, State) -> ?LOG_ERROR([{event, unhandled_cast}, {module, ?MODULE}, {message, Msg}]), {noreply, State}. handle_info({removed_range, Ref}, State) -> case erlang:get(Ref) of undefined -> {noreply, State}; {range, {Start, End}} -> erlang:erase(Ref), case ets:lookup(ar_tx_blacklist, {End, Start}) of [{{End, Start}}] -> ets:delete(ar_tx_blacklist_pending_data, {End, Start}), {noreply, request_data_takedown(State)}; _ -> {noreply, State} end; {tx, {TXID, Start, End}} -> erlang:erase(Ref), case ets:lookup(ar_tx_blacklist, TXID) of [{TXID, End, Start}] -> ets:delete(ar_tx_blacklist_pending_data, TXID), {noreply, request_data_takedown(State)}; _ -> {noreply, State} end end; handle_info({event, tx, {ready_for_unblacklisting, TXID}}, State) -> ?LOG_DEBUG([{event, unblacklisting_transaction}, {tags, [tx_blacklist]}, {tx, ar_util:encode(TXID)}]), ets:delete(ar_tx_blacklist_pending_restore_headers, TXID), {noreply, State#ar_tx_blacklist_state{ unblacklist_timeout = os:system_time(second) }}; handle_info({event, tx, _}, State) -> {noreply, State}; handle_info(Info, State) -> ?LOG_ERROR([{event, unhandled_info}, {module, ?MODULE}, {message, Info}]), {noreply, State}. terminate(Reason, _State) -> store_state(), close_dets(), ?LOG_INFO([{event, terminate}, {module, ?MODULE}, {reason, Reason}]). %%%=================================================================== %%% Private functions. %%%=================================================================== initialize_state() -> {ok, Config} = arweave_config:get_env(), DataDir = Config#config.data_dir, Dir = filename:join(DataDir, "ar_tx_blacklist"), ok = filelib:ensure_dir(Dir ++ "/"), Names = [ ar_tx_blacklist, ar_tx_blacklist_pending_headers, ar_tx_blacklist_pending_data, ar_tx_blacklist_offsets, ar_tx_blacklist_pending_restore_headers ], lists:foreach( fun (Name) -> {ok, _} = dets:open_file(Name, [{file, filename:join(Dir, Name)}]), true = ets:from_dets(Name, Name) end, Names ). refresh_blacklist() -> {ok, Config} = arweave_config:get_env(), WhitelistFiles = Config#config.transaction_whitelist_files, case load_from_files(WhitelistFiles) of error -> error; {ok, Whitelist} -> WhitelistURLs = Config#config.transaction_whitelist_urls, case load_from_urls(WhitelistURLs) of error -> error; {ok, Whitelist2} -> refresh_blacklist(sets:union(Whitelist, Whitelist2)) end end. refresh_blacklist(Whitelist) -> {ok, Config} = arweave_config:get_env(), BlacklistFiles = Config#config.transaction_blacklist_files, case load_from_files(BlacklistFiles) of error -> error; {ok, Blacklist} -> BlacklistURLs = Config#config.transaction_blacklist_urls, case load_from_urls(BlacklistURLs) of error -> error; {ok, Blacklist2} -> refresh_blacklist(Whitelist, sets:union(Blacklist, Blacklist2)) end end. refresh_blacklist(Whitelist, Blacklist) -> Removed = sets:fold( fun (TXID, Acc) when is_binary(TXID) -> case not sets:is_element(TXID, Whitelist) andalso not ets:member(ar_tx_blacklist, TXID) of true -> [TXID | Acc]; false -> Acc end; ({End, Start}, Acc) -> case ets:member(ar_tx_blacklist, {End, Start}) of true -> Acc; false -> [{End, Start} | Acc] end end, [], Blacklist ), Restored = ets:foldl( fun ({End, Start}, Acc) -> case sets:is_element({End, Start}, Blacklist) of true -> Acc; false -> [{End, Start} | Acc] end; (Entry, Acc) -> TXID = element(1, Entry), case sets:is_element(TXID, Whitelist) orelse not sets:is_element(TXID, Blacklist) of true -> [TXID | Acc]; false -> Acc end end, [], ar_tx_blacklist ), lists:foreach( fun (TXID) when is_binary(TXID) -> ets:insert(ar_tx_blacklist, [{TXID}]), ets:insert(ar_tx_blacklist_pending_headers, [{TXID}]), ets:insert(ar_tx_blacklist_pending_data, [{TXID}]), ets:delete(ar_tx_blacklist_pending_restore_headers, TXID); ({End, Start}) -> ets:insert(ar_tx_blacklist, [{{End, Start}}]), ets:insert(ar_tx_blacklist_pending_data, [{{End, Start}}]) end, Removed ), lists:foreach( fun (TXID) when is_binary(TXID) -> ets:insert(ar_tx_blacklist_pending_restore_headers, [{TXID}]), case ets:lookup(ar_tx_blacklist, TXID) of [{TXID}] -> ok; [{TXID, End, Start}] -> restore_offsets(End, Start) end, ets:delete(ar_tx_blacklist, TXID), ets:delete(ar_tx_blacklist_pending_data, TXID), ets:delete(ar_tx_blacklist_pending_headers, TXID); ({End, Start}) -> restore_offsets(End, Start), ets:delete(ar_tx_blacklist, {End, Start}), ets:delete(ar_tx_blacklist_pending_data, {End, Start}) end, Restored ), ?LOG_DEBUG([{event, refreshed_blacklist}, {tags, [tx_blacklist]}, {whitelist, sets:size(Whitelist)}, {blacklist, sets:size(Blacklist)}, {removed, length(Removed)}, {restored, length(Restored)}, {ar_tx_blacklist, ets:info(ar_tx_blacklist, size)}, {ar_tx_blacklist_pending_headers, ets:info(ar_tx_blacklist_pending_headers, size)}, {ar_tx_blacklist_pending_data, ets:info(ar_tx_blacklist_pending_data, size)}, {ar_tx_blacklist_pending_restore_headers, ets:info(ar_tx_blacklist_pending_restore_headers, size)} ]), ok. load_from_files(Files) -> Lists = lists:map(fun load_from_file/1, Files), case lists:all(fun(error) -> false; (_) -> true end, Lists) of true -> {ok, sets:from_list(lists:flatten(Lists))}; false -> error end. load_from_file(File) -> try {ok, Binary} = file:read_file(File), parse_binary(Binary) catch Type:Pattern -> Warning = [ {event, failed_to_load_and_parse_file}, {tags, [tx_blacklist]}, {file, File}, {exception, {Type, Pattern}} ], ?LOG_WARNING(Warning), error end. parse_binary(Binary) -> lists:filtermap( fun(Line) -> case Line of <<>> -> false; TXIDOrRange -> case binary:split(TXIDOrRange, <<",">>, [global]) of [StartBin, EndBin] -> case {catch binary_to_integer(StartBin), catch binary_to_integer(EndBin)} of {Start, End} when is_integer(Start), is_integer(End), End > Start, Start >= 0 -> {true, {End, Start}}; _ -> ?LOG_WARNING([{event, failed_to_parse_line}, {tags, [tx_blacklist]}, {line, Line}]), false end; _ -> case ar_util:safe_decode(TXIDOrRange) of {error, invalid} -> ?LOG_WARNING([{event, failed_to_parse_line}, {tags, [tx_blacklist]}, {line, Line}]), false; {ok, TXID} -> {true, TXID} end end end end, binary:split(Binary, <<"\n">>, [global]) ). load_from_urls(URLs) -> Lists = lists:map(fun load_from_url/1, URLs), case lists:all(fun(error) -> false; (_) -> true end, Lists) of true -> {ok, sets:from_list(lists:flatten(Lists))}; false -> error end. load_from_url(URL) -> try #{ host := Host, path := RawPath, scheme := Scheme } = M = uri_string:parse(URL), Path = case RawPath of "" -> "/"; Elsewise -> Elsewise end, Query = case maps:get(query, M, not_found) of not_found -> <<>>; Q -> [<<"?">>, Q] end, Port = maps:get(port, M, case Scheme of "http" -> 80; "https" -> 443 end), Reply = ar_http:req(#{ method => get, peer => {Host, Port}, path => binary_to_list(iolist_to_binary([Path, Query])), is_peer_request => false, timeout => 20000, connect_timeout => 1000 }), case Reply of {ok, {{<<"200">>, _}, _, Body, _, _}} -> parse_binary(Body); _ -> ?LOG_WARNING([ {event, failed_to_download_tx_blacklist}, {tags, [tx_blacklist]}, {url, URL}, {reply, Reply} ]), error end catch Type:Pattern -> ?LOG_WARNING([ {event, failed_to_load_and_parse_tx_blacklist}, {tags, [tx_blacklist]}, {url, URL}, {exception, {Type, Pattern}} ]), error end. request_header_takedown(State) -> case ets:first(ar_tx_blacklist_pending_headers) of '$end_of_table' -> State; TXID -> ar_header_sync:request_tx_removal(TXID), State#ar_tx_blacklist_state{ header_takedown_request_timestamp = os:system_time(millisecond) } end. request_data_takedown(State) -> case ets:first(ar_tx_blacklist_pending_data) of '$end_of_table' -> State; TXID when is_binary(TXID) -> case ets:lookup(ar_tx_blacklist, TXID) of [{TXID}] -> case ar_data_sync:get_tx_offset(TXID) of {ok, {End, Size}} -> Start = End - Size, ets:insert(ar_tx_blacklist, [{TXID, End, Start}]), blacklist_offsets(TXID, End, Start, State); {error, Reason} -> ?LOG_WARNING([{event, failed_to_find_blocklisted_tx_in_the_index}, {tags, [tx_blacklist]}, {tx, ar_util:encode(TXID)}, {reason, io_lib:format("~p", [Reason])}]), ets:delete(ar_tx_blacklist_pending_data, TXID), ets:delete(ar_tx_blacklist, TXID), State end; [{TXID, End, Start}] -> blacklist_offsets(TXID, End, Start, State) end; {End, Start} -> blacklist_offsets(End, Start, State) end. store_state() -> Names = [ ar_tx_blacklist, ar_tx_blacklist_pending_headers, ar_tx_blacklist_pending_data, ar_tx_blacklist_offsets, ar_tx_blacklist_pending_restore_headers ], lists:foreach( fun (Name) -> ets:to_dets(Name, Name) end, Names ), ?LOG_DEBUG([{event, stored_state}, {tags, [tx_blacklist]}, {ar_tx_blacklist, ets:info(ar_tx_blacklist, size)}, {ar_tx_blacklist_pending_headers, ets:info(ar_tx_blacklist_pending_headers, size)}, {ar_tx_blacklist_pending_data, ets:info(ar_tx_blacklist_pending_data, size)}, {ar_tx_blacklist_offsets, ets:info(ar_tx_blacklist_offsets, size)}, {ar_tx_blacklist_pending_restore_headers, ets:info(ar_tx_blacklist_pending_restore_headers, size)} ]). restore_offsets(End, Start) -> ar_ets_intervals:delete(ar_tx_blacklist_offsets, End, Start). blacklist_offsets(End, Start, State) -> ar_ets_intervals:add(ar_tx_blacklist_offsets, End, Start), Ref = make_ref(), erlang:put(Ref, {range, {Start, End}}), ?LOG_DEBUG([{event, requesting_data_removal}, {tags, [tx_blacklist]}, {s, Start}, {e, End}]), ar_data_sync:request_data_removal(Start, End, Ref, self()), State#ar_tx_blacklist_state{ data_takedown_request_timestamp = os:system_time(millisecond) }. blacklist_offsets(TXID, End, Start, State) -> ar_ets_intervals:add(ar_tx_blacklist_offsets, End, Start), Ref = make_ref(), erlang:put(Ref, {tx, {TXID, Start, End}}), ?LOG_DEBUG([{event, requesting_tx_data_removal}, {tags, [tx_blacklist]}, {tx, ar_util:encode(TXID)}, {s, Start}, {e, End}]), ar_data_sync:request_tx_data_removal(TXID, Ref, self()), State#ar_tx_blacklist_state{ data_takedown_request_timestamp = os:system_time(millisecond) }. close_dets() -> Names = [ ar_tx_blacklist, ar_tx_blacklist_pending_headers, ar_tx_blacklist_pending_data, ar_tx_blacklist_offsets, ar_tx_blacklist_pending_restore_headers ], lists:foreach( fun (Name) -> case dets:close(Name) of ok -> ok; {error, Reason} -> ?LOG_ERROR([ {event, failed_to_close_dets_table}, {tags, [tx_blacklist]}, {name, Name}, {reason, Reason} ]) end end, Names ). ================================================ FILE: apps/arweave/src/ar_tx_db.erl ================================================ %%% @doc Database for storing error codes for failed transactions, so that a user %%% can get the error reason when polling the status of a transaction. The entries %%% have a TTL. The DB is a singleton. -module(ar_tx_db). -export([get_error_codes/1, put_error_codes/2, ensure_error/1, clear_error_codes/1]). -include_lib("arweave/include/ar.hrl"). -include_lib("eunit/include/eunit.hrl"). %% @doc Put an Erlang term into the meta DB. Typically these are %% write-once values. put_error_codes(TXID, ErrorCodes) -> ets:insert(?MODULE, {TXID, ErrorCodes}), {ok, _} = ar_timer:apply_after( 1800*1000, ?MODULE, clear_error_codes, [TXID], #{ skip_on_shutdown => false } ), ok. %% @doc Retreive a term from the meta db. get_error_codes(TXID) -> case ets:lookup(?MODULE, TXID) of [{_, ErrorCodes}] -> {ok, ErrorCodes}; [] -> not_found end. %% @doc Writes an unknown error code if there are not already any error codes %% for this TX. ensure_error(TXID) -> case ets:lookup(?MODULE, TXID) of [_] -> ok; [] -> put_error_codes(TXID, ["unknown_error"]) end. %% @doc Removes all error codes for this TX. clear_error_codes(TXID) -> ets:delete(?MODULE, TXID). %%%=================================================================== %%% Tests. %%%=================================================================== setup_ets() -> case ets:info(?MODULE) of undefined -> ets:new(?MODULE, [set, public, named_table]), fun() -> ets:delete(?MODULE) end; _ -> fun() -> ok end end. read_write_test_() -> {setup, fun setup_ets/0, fun(Cleanup) -> Cleanup() end, fun(_) -> [fun() -> put_error_codes(mocked_txid1, mocked_error), put_error_codes(mocked_txid2, mocked_error), ensure_error(mocked_txid3), assert_clear_error_codes(mocked_txid1), assert_clear_error_codes(mocked_txid2), assert_clear_error_codes(mocked_txid3) end] end}. assert_clear_error_codes(TXID) -> Fetched = get_error_codes(TXID), ?assertMatch({ok, _}, Fetched), clear_error_codes(TXID), ?assert(not_found == get_error_codes(TXID)), ok. tx_db_test_() -> {setup, fun setup_ets/0, fun(Cleanup) -> Cleanup() end, fun(_) -> [{timeout, 30, fun test_tx_db/0}] end}. test_tx_db() -> {_, Pub1 = {_, Owner1}} = ar_wallet:new(), {Priv2, Pub2} = ar_wallet:new(), Wallets = [ {ar_wallet:to_address(Pub1), ?AR(10000), <<>>}, {ar_wallet:to_address(Pub2), ?AR(10000), <<>>} ], WL = maps:from_list([{A, {B, LTX}} || {A, B, LTX} <- Wallets]), OrphanedTX1 = ar_tx:new(Pub1, ?AR(1), ?AR(5000), <<>>), BadTX = OrphanedTX1#tx{ owner = Owner1, signature = <<"BAD">> }, Timestamp = os:system_time(seconds), ?assert(not ar_tx:verify(BadTX, {{1, 4}, 1, 1, 1, 0, 1, WL, Timestamp})), Expected = {ok, ["same_owner_as_target", "tx_id_not_valid", "tx_signature_not_valid"]}, ?assertEqual(Expected, get_error_codes(BadTX#tx.id)), OrphanedTX2 = ar_tx:new(Pub1, ?AR(1), ?AR(5000), <<>>), SignedTX = ar_tx:sign_v1(OrphanedTX2, Priv2, Pub2), ?assert(ar_tx:verify(SignedTX, {{1, 4}, 1, 1, 1, 0, 1, WL, Timestamp})), clear_error_codes(BadTX#tx.id), clear_error_codes(SignedTX#tx.id), ok. ================================================ FILE: apps/arweave/src/ar_tx_emitter.erl ================================================ -module(ar_tx_emitter). -behaviour(gen_server). -export([start_link/2]). -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2]). -include_lib("arweave_config/include/arweave_config.hrl"). -include("ar.hrl"). %% Remove identifiers of recently emitted transactions from the cache after this long. -define(CLEANUP_RECENTLY_EMITTED_TIMEOUT, 60 * 60 * 1000). -define(WORKER_CONNECT_TIMEOUT, 1 * 1000). -define(WORKER_REQUEST_TIMEOUT, 5 * 1000). %% How frequently to check whether new transactions are appeared for distribution. -define(CHECK_MEMPOOL_FREQUENCY, 1000). -record(state, { currently_emitting, workers, %% How long to wait for a reply from the emitter worker before considering it failed. worker_failed_timeout }). %% How many transactions to send to emitters at one go. With CHUNK_SIZE=1, the propagation %% speed is determined by the slowest peer among those chosen for the given transaction. %% Increasing CHUNK_SIZE reduces the influence of slow peers at the cost of RAM (message %% queues for transaction emitter workers. -define(CHUNK_SIZE, 100). %%%=================================================================== %%% Public interface. %%%=================================================================== start_link(Name, Workers) -> gen_server:start_link({local, Name}, ?MODULE, Workers, []). %%%=================================================================== %%% gen_server callbacks. %%%=================================================================== init(Workers) -> gen_server:cast(?MODULE, process_chunk), NumWorkers = length(Workers), NumPeers = max_propagation_peers(), JobsPerWorker = (?CHUNK_SIZE * NumPeers) div NumWorkers, %% Only time out a worker after we've given enough time for *all* workers to complete %% their tasks (including a small 1000 ms buffer). This should prevent a cascade where %% worker queues keep growing. WorkerFailedTimeout = JobsPerWorker * (?WORKER_CONNECT_TIMEOUT + ?WORKER_REQUEST_TIMEOUT + 1000), State = #state{ workers = queue:from_list(Workers) , currently_emitting = sets:new() , worker_failed_timeout = WorkerFailedTimeout }, {ok, State}. handle_call(Request, _From, State) -> ?LOG_WARNING([{event, unhandled_call}, {module, ?MODULE}, {request, Request}]), {reply, ok, State}. handle_cast(process_chunk, State) -> % only current (active) peers should be used, using lifetime % peers will create unecessary timeouts. The first to % contact are the trusted peers. TrustedPeers = ar_peers:get_trusted_peers(), CurrentPeers = ar_peers:get_peers(current), FilteredPeers = ar_peers:filter_peers(CurrentPeers, {timestamp, 60*60*24}), CleanedPeers = FilteredPeers -- TrustedPeers, Peers = TrustedPeers ++ CleanedPeers, % prepare to emit chunk(s) PropagationQueue = ar_mempool:get_propagation_queue(), PropagationMax = max_propagation_peers(), State2 = emit( PropagationQueue, Peers, PropagationMax, ?CHUNK_SIZE, State), % check later if emit/6 returns an empty set case sets:is_empty(State2#state.currently_emitting) of true -> ar_util:cast_after(?CHECK_MEMPOOL_FREQUENCY, ?MODULE, process_chunk); false -> ok end, {noreply, State2}; handle_cast(Msg, State) -> ?LOG_ERROR([{event, unhandled_cast}, {module, ?MODULE}, {message, Msg}]), {noreply, State}. handle_info({emitted, TXID, Peer}, State) -> #state{ currently_emitting = Emitting } = State, case sets:is_element({TXID, Peer}, Emitting) of false -> %% Should have been cleaned up by timeout. {noreply, State}; true -> Emitting2 = sets:del_element({TXID, Peer}, Emitting), case sets:is_empty(Emitting2) of true -> gen_server:cast(?MODULE, process_chunk); false -> ok end, {noreply, State#state{ currently_emitting = Emitting2 }} end; handle_info({timeout, TXID, Peer}, State) -> #state{ currently_emitting = Emitting } = State, case sets:is_element({TXID, Peer}, Emitting) of false -> %% Should have been emitted. {noreply, State}; true -> ?LOG_DEBUG([{event, tx_propagation_timeout}, {txid, ar_util:encode(TXID)}, {peer, ar_util:format_peer(Peer)}]), Emitting2 = sets:del_element({TXID, Peer}, Emitting), case sets:is_empty(Emitting2) of true -> gen_server:cast(?MODULE, process_chunk); false -> ok end, {noreply, State#state{ currently_emitting = Emitting2 }} end; handle_info({remove_from_recently_emitted, TXID}, State) -> ets:delete(ar_tx_emitter_recently_emitted, TXID), {noreply, State}; handle_info(Info, State) -> ?LOG_ERROR([{event, unhandled_info}, {module, ?MODULE}, {info, Info}]), {noreply, State}. terminate(Reason, _State) -> ?LOG_INFO([{module, ?MODULE},{pid, self()},{callback, terminate},{reason, Reason}]), ok. %%%=================================================================== %%% Private functions. %%%=================================================================== max_propagation_peers() -> {ok, Config} = arweave_config:get_env(), Config#config.max_propagation_peers. emit(_Set, _Peers, _MaxPeers, N, State) when N =< 0 -> State; emit(Set, Peers, MaxPeers, N, State) -> case gb_sets:is_empty(Set) of true -> State; false -> emit_set_not_empty(Set, Peers, MaxPeers, N, State) end. emit_set_not_empty(Set, Peers, MaxPeers, N, State) -> {{Utility, TXID}, Set2} = gb_sets:take_largest(Set), case ets:member(ar_tx_emitter_recently_emitted, TXID) of true -> emit(Set2, Peers, MaxPeers, N, State); false -> #state{ workers = Q, currently_emitting = Emitting, worker_failed_timeout = WorkerFailedTimeout } = State, % only a subset of the whole peers list is % taken using max_propagation_peers value. % the first N peers will be used instead of % the whole list. unfortunately, this list can % also have not connected peers. PeersToSync = lists:sublist(Peers, MaxPeers), % for each peers in the sublist, a chunk is % sent. The workers are taken one by one from % a FIFO, mainly used to distribute the % messages across all available workers. Foldl = fun(Peer, {Acc, Workers}) -> {{value, W}, Workers2} = queue:out(Workers), gen_server:cast(W, {emit, TXID, Peer, ?WORKER_CONNECT_TIMEOUT, ?WORKER_REQUEST_TIMEOUT, self()}), erlang:send_after(WorkerFailedTimeout, ?MODULE, {timeout, TXID, Peer}), {sets:add_element({TXID, Peer}, Acc), queue:in(W, Workers2)} end, {Emitting2, Q2} = lists:foldl(Foldl, {Emitting, Q}, PeersToSync), State2 = State#state{ workers = Q2, currently_emitting = Emitting2 }, %% The cache storing recently emitted transactions is used instead %% of an explicit synchronization of the propagation queue updates %% with ar_node_worker - we do not rely on ar_node_worker removing %% emitted transactions from the queue on time. ets:insert(ar_tx_emitter_recently_emitted, {TXID}), erlang:send_after(?CLEANUP_RECENTLY_EMITTED_TIMEOUT, ?MODULE, {remove_from_recently_emitted, TXID}), ar_events:send(tx, {emitting_scheduled, Utility, TXID}), emit(Set2, Peers, MaxPeers, N - 1, State2) end. ================================================ FILE: apps/arweave/src/ar_tx_emitter_sup.erl ================================================ -module(ar_tx_emitter_sup). -behaviour(supervisor). -export([start_link/0]). -export([init/1]). -include_lib("arweave/include/ar_sup.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). %%%=================================================================== %%% Public interface. %%%=================================================================== start_link() -> supervisor:start_link({local, ?MODULE}, ?MODULE, []). %% =================================================================== %% Supervisor callbacks. %% =================================================================== init([]) -> {ok, Config} = arweave_config:get_env(), MaxEmitters = Config#config.max_emitters, Workers = lists:map(fun tx_workers/1, lists:seq(1, MaxEmitters)), WorkerNames = [ Name || #{ id := Name } <- Workers], Emitter = tx_emitter([ar_tx_emitter, WorkerNames]), ChildrenSpec = [Emitter|Workers], {ok, {supervisor_spec(), ChildrenSpec}}. supervisor_spec() -> #{ strategy => one_for_one , intensity => 5 , period => 10 }. % helper to create ar_tx_emitter process, in charge % of sending chunk to propagate to ar_tx_emitter_worker. tx_emitter(Args) -> #{ id => ar_tx_emitter , type => worker , start => {ar_tx_emitter, start_link, Args} , shutdown => ?SHUTDOWN_TIMEOUT , modules => [ar_tx_emitter] , restart => permanent }. % helper function to create ar_tx_workers processes. tx_workers(Num) -> Name = "ar_tx_emitter_worker_" ++ integer_to_list(Num), Atom = list_to_atom(Name), #{ id => Atom , start => {ar_tx_emitter_worker, start_link, [Atom]} , restart => permanent , type => worker , timeout => ?SHUTDOWN_TIMEOUT , modules => [ar_tx_emitter_worker] }. ================================================ FILE: apps/arweave/src/ar_tx_emitter_worker.erl ================================================ -module(ar_tx_emitter_worker). -behaviour(gen_server). -export([start_link/1]). -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2]). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -record(state, {}). %%%=================================================================== %%% Public interface. %%%=================================================================== start_link(Name) -> gen_server:start_link({local, Name}, ?MODULE, [], []). %%%=================================================================== %%% gen_server callbacks. %%%=================================================================== init(_) -> {ok, #state{}}. handle_call(Request, _From, State) -> ?LOG_WARNING([{event, unhandled_call}, {module, ?MODULE}, {request, Request}]), {reply, ok, State}. handle_cast({emit, TXID, Peer, ConnectTimeout, Timeout, ReplyTo}, State) -> case ar_mempool:get_tx(TXID) of not_found -> ok; TX -> StartedAt = erlang:timestamp(), Opts = #{ connect_timeout => ConnectTimeout div 1000 , timeout => Timeout div 1000 }, emit(#{ tx_id => TXID , peer => Peer , tx => TX , started_at => StartedAt , opts => Opts }) end, ReplyTo ! {emitted, TXID, Peer}, {noreply, State}; handle_cast(Msg, State) -> ?LOG_WARNING([{event, unhandled_cast}, {module, ?MODULE}, {message, Msg}]), {noreply, State}. handle_info({event, tx, _}, State) -> {noreply, State}; handle_info(Info, State) -> ?LOG_WARNING([{event, unhandled_info}, {module, ?MODULE}, {info, Info}]), {noreply, State}. terminate(Reason, _State) -> ?LOG_INFO([{event, terminate}, {module, ?MODULE}, {reason, Reason}]), ok. %%%=================================================================== %%% Private functions. %%%=================================================================== tx_propagated_size(#tx{ format = 2 }) -> ?TX_SIZE_BASE; tx_propagated_size(#tx{ format = 1, data = Data }) -> ?TX_SIZE_BASE + byte_size(Data). tx_to_propagated_tx(#tx{ format = 1 } = TX, _Peer, _TrustedPeers) -> TX; tx_to_propagated_tx(#tx{ format = 2 } = TX, Peer, TrustedPeers) -> case lists:member(Peer, TrustedPeers) of true -> TX; false -> TX#tx{ data = <<>> } end. record_propagation_status(not_sent) -> ok; record_propagation_status(Data) -> StatusClass = ar_metrics:get_status_class(Data), prometheus_counter:inc(propagated_transactions_total, [StatusClass]), StatusClass. record_propagation_rate(PropagatedSize, PropagationTimeUs) -> BitsPerSecond = PropagatedSize * 1000000 / PropagationTimeUs * 8, prometheus_histogram:observe(tx_propagation_bits_per_second, BitsPerSecond), BitsPerSecond. % retrieve information about peer(s) emit(#{ tx := TX, peer := Peer } = Data) -> TrustedPeers = ar_peers:get_trusted_peers(), PropagatedTX = tx_to_propagated_tx(TX, Peer, TrustedPeers), Release = ar_peers:get_peer_release(Peer), NewData = Data#{ propagated_tx => PropagatedTX , trusted_peers => TrustedPeers , release => Release }, emit2(NewData). % depending on the version of the peer, different kind of payload % is being used, one in binary, another one in JSON. emit2(#{ release := Release, peer := Peer, propagated_tx := PropagatedTX, tx_id := TXID, opts := Opts } = Data) when Release >= 42 -> Bin = ar_serialize:tx_to_binary(PropagatedTX), Reply = ar_http_iface_client:send_tx_binary(Peer, TXID, Bin, Opts), NewData = Data#{ reply => Reply }, emit3(NewData); emit2(#{ peer := Peer, propagated_tx := PropagatedTX, tx_id := TXID, opts := Opts } = Data) -> Serialize = ar_serialize:tx_to_json_struct(PropagatedTX), JSON = ar_serialize:jsonify(Serialize), Reply = ar_http_iface_client:send_tx_json(Peer, TXID, JSON, Opts), NewData = Data#{ reply => Reply }, emit3(NewData). % deal with the reply and update propagation statistics. emit3(#{ started_at := StartedAt, reply := Reply, tx := TX } = Data) -> Timestamp = erlang:timestamp(), PropagationTimeUs = timer:now_diff(Timestamp, StartedAt), PropagationStatus = record_propagation_status(Reply), PropagatedSize = tx_propagated_size(TX), PropagationRate = record_propagation_rate(PropagatedSize, PropagationTimeUs), Data#{ propagation_time_us => PropagationTimeUs , propagation_status => PropagationStatus , propagated_size => PropagatedSize , propagation_rate => PropagationRate }. ================================================ FILE: apps/arweave/src/ar_tx_poller.erl ================================================ -module(ar_tx_poller). -behaviour(gen_server). -export([ start_link/0 ]). -export([ init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2 ]). -include_lib("arweave_config/include/arweave_config.hrl"). -include("ar.hrl"). -record(state, { last_seen_tx_timestamp = 0, pending_txids = [], latest_txid_source_peer = none }). %% Number of peers to query for a transaction. -define(QUERY_PEERS_COUNT, 5). %% Check interval in milliseconds - how long to wait before polling %% since the last transaction push. If the node is not public (so it %% never receives transactions by push), we wait this long starting from %% the moment we join the network only once and then keep polling %% for transactions more frequently. -ifdef(AR_TEST). -define(CHECK_INTERVAL_MS, 5_000). -else. -define(CHECK_INTERVAL_MS, 30_000). -endif. %% Poll interval in milliseconds - how long we wait before downloading a new %% transaction or polling the mempools for new transactions. -ifdef(AR_TEST). -define(POLL_INTERVAL_MS, 500). -else. -define(POLL_INTERVAL_MS, 200). -endif. %%% Public API. start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). %%% Gen server callbacks. init([]) -> [ok, ok] = ar_events:subscribe([tx, node_state]), {ok, #state{}}. handle_call(Request, From, State) -> ?LOG_WARNING("Unexpected call: ~p from ~p", [Request, From]), {reply, ignored, State}. handle_cast(check_for_received_txs, State) -> %% Check if there have been any transactions received in the last %% ?CHECK_INTERVAL_MS milliseconds. TimestampDiff = erlang:system_time(microsecond) - State#state.last_seen_tx_timestamp, State3 = case TimestampDiff > 0 andalso TimestampDiff > (?CHECK_INTERVAL_MS * 1000) of true -> check_for_received_txs(State); false -> ar_util:cast_after(?CHECK_INTERVAL_MS, self(), check_for_received_txs), State end, {noreply, State3}; handle_cast(Request, State) -> ?LOG_WARNING("Unexpected cast: ~p", [Request]), {noreply, State}. handle_info({event, node_state, {initialized, _}}, State) -> %% Send a check_for_received_txs cast periodically to check for externally submitted %% transactions. If there have not been any for longer than 30 seconds, request the %% mempool from a peer and download the transactions. {ok, Config} = arweave_config:get_env(), case lists:member(tx_poller, Config#config.disable) of true -> ok; false -> gen_server:cast(self(), check_for_received_txs) end, {noreply, State}; handle_info({event, node_state, _}, State) -> {noreply, State}; handle_info({event, tx, {new, _TX, {pushed, _Peer}}}, State) -> {noreply, State#state{ pending_txids = [], last_seen_tx_timestamp = erlang:system_time(microsecond) }}; handle_info({event, tx, _}, State) -> {noreply, State}; handle_info(Info, State) -> ?LOG_WARNING("event: unhandled_info, info: ~p", [Info]), {noreply, State}. terminate(Reason, _State) -> ?LOG_INFO([{module, ?MODULE},{pid, self()},{callback, terminate},{reason, Reason}]), ok. %%% Internal functions. check_for_received_txs(#state{ pending_txids = [TXID | PendingTXIDs] } = State) -> case ar_mempool:is_known_tx(TXID) of true -> ok; false -> download_and_verify_tx(TXID, State#state.latest_txid_source_peer) end, gen_server:cast(self(), check_for_received_txs), State#state{ pending_txids = PendingTXIDs }; check_for_received_txs(#state{ pending_txids = [] } = State) -> Peers = lists:sublist(ar_peers:get_peers(current), ?QUERY_PEERS_COUNT), Reply = ar_http_iface_client:get_mempool(Peers), ar_util:cast_after(?POLL_INTERVAL_MS, self(), check_for_received_txs), case Reply of {{ok, TXIDs}, TXIDPeer} -> State#state{ pending_txids = TXIDs, latest_txid_source_peer = TXIDPeer }; {error, Error} -> ?LOG_DEBUG([{event, failed_to_get_mempool_txids_from_peers}, {peers, [ar_util:format_peer(Peer) || Peer <- Peers]}, {error, io_lib:format("~p", [Error])} ]), State end. download_and_verify_tx(TXID, TXIDPeer) -> Ref = make_ref(), ar_ignore_registry:add_ref(TXID, Ref, 10_000), Peers = lists:sublist(ar_peers:get_peers(current), ?QUERY_PEERS_COUNT), case ar_http_iface_client:get_tx_from_remote_peers(Peers, TXID, false) of not_found -> ar_ignore_registry:remove_ref(TXID, Ref), ?LOG_DEBUG([{event, failed_to_get_tx_from_peers}, {peers, [ar_util:format_peer(Peer) || Peer <- Peers]}, {txid, ar_util:encode(TXID)}, {txid_peer, ar_util:format_peer(TXIDPeer)} ]); {TX, Peer, Time, Size} -> case ar_tx_validator:validate(TX) of {invalid, Code} -> log_invalid_tx(Code, TXID, TX, Peer, TXIDPeer); {valid, TX2} -> ar_peers:rate_fetched_data(Peer, tx, Time, Size), ar_data_sync:add_data_root_to_disk_pool(TX2#tx.data_root, TX2#tx.data_size, TX#tx.id), ar_events:send(tx, {new, TX2, {pulled, Peer}}), TXID = TX2#tx.id, ar_ignore_registry:remove_ref(TXID, Ref), ar_ignore_registry:add_temporary(TXID, 10 * 60 * 1000) end end. log_invalid_tx(tx_bad_anchor, TXID, TX, Peer, TXIDPeer) -> LastTX = ar_util:encode(TX#tx.last_tx), CurrentHeight = ar_node:get_height(), CurrentBlockHash = ar_util:encode(ar_node:get_current_block_hash()), ?LOG_INFO(format_invalid_tx_message(tx_bad_anchor, TXID, Peer, TXIDPeer, [ {last_tx, LastTX}, {current_height, CurrentHeight}, {current_block_hash, CurrentBlockHash} ])); log_invalid_tx(tx_verification_failed, TXID, TX, Peer, TXIDPeer) -> LastTX = ar_util:encode(TX#tx.last_tx), CurrentHeight = ar_node:get_height(), CurrentBlockHash = ar_util:encode(ar_node:get_current_block_hash()), ErrorCodes = ar_tx_db:get_error_codes(TXID), ?LOG_INFO(format_invalid_tx_message(tx_verification_failed, TXID, Peer, TXIDPeer, [ {last_tx, LastTX}, {current_height, CurrentHeight}, {current_block_hash, CurrentBlockHash}, {error_codes, ErrorCodes} ])); log_invalid_tx(Code, TXID, _TX, Peer, TXIDPeer) -> ?LOG_INFO(format_invalid_tx_message(Code, TXID, Peer, TXIDPeer, [])). format_invalid_tx_message(Code, TXID, Peer, TXIDPeer, ExtraLogs) -> [ {event, fetched_already_included_or_invalid_tx}, {txid, ar_util:encode(TXID)}, {code, Code}, {peer, ar_util:format_peer(Peer)}, {txid_peer, ar_util:format_peer(TXIDPeer)} | ExtraLogs ]. ================================================ FILE: apps/arweave/src/ar_tx_replay_pool.erl ================================================ %%% @doc This module contains functions for transaction verification. It relies on %%% some verification helpers from the ar_tx and ar_node_utils modules. %%% The module should be used to verify transactions on-edge, validate %%% new blocks' transactions, pick transactions to include into a block, and %%% remove no longer valid transactions from the mempool after accepting a new block. -module(ar_tx_replay_pool). -export([verify_tx/1, verify_tx/2, verify_block_txs/1, pick_txs_to_mine/1]). -include_lib("arweave/include/ar.hrl"). %% @doc Verify that a transaction against the given mempool, wallet list, recent %% weave txs, current block height, current difficulty, and current time. %% The mempool is used to look for the same transaction there and to make sure %% the transaction does not reference another transaction from the mempool. %% The mempool is NOT used to verify shared resources like funds, %% wallet list references, and data size. Therefore, the function is suitable %% for on-edge verification where we want to accept potentially conflicting %% transactions to avoid consensus issues later. verify_tx(Args) -> verify_tx(Args, verify_signature). verify_tx({TX, Rate, PricePerGiBMinute, KryderPlusRateMultiplier, Denomination, Height, RedenominationHeight, BlockAnchors, RecentTXMap, Mempool, Wallets}, VerifySignature) when is_record(TX, tx), is_list(BlockAnchors), is_map(RecentTXMap), is_map(Wallets), is_map(Mempool) -> verify_tx2({TX, Rate, PricePerGiBMinute, KryderPlusRateMultiplier, Denomination, Height, RedenominationHeight, os:system_time(seconds), Wallets, BlockAnchors, RecentTXMap, Mempool, VerifySignature}). %% @doc Verify the transactions are valid for the block taken into account %% the given current difficulty and height, the previous blocks' wallet list, %% and recent weave transactions. verify_block_txs( {TXs, Rate, PricePerGiBMinute, KryderPlusRateMultiplier, Denomination, Height, RedenominationHeight, Timestamp, Wallets, BlockAnchors, RecentTXMap}) -> verify_block_txs(TXs, {Rate, PricePerGiBMinute, KryderPlusRateMultiplier, Denomination, Height, RedenominationHeight, Timestamp, Wallets, BlockAnchors, RecentTXMap, maps:new(), 0, 0}). verify_block_txs([], _Args) -> valid; verify_block_txs([TX | TXs], {Rate, PricePerGiBMinute, KryderPlusRateMultiplier, Denomination, Height, RedenominationHeight, Timestamp, Wallets, BlockAnchors, RecentTXMap, Mempool, C, Size}) when is_record(TX, tx), is_map(Wallets), is_list(BlockAnchors), is_map(RecentTXMap), is_map(Mempool) -> case verify_tx2({TX, Rate, PricePerGiBMinute, KryderPlusRateMultiplier, Denomination, Height, RedenominationHeight, Timestamp, Wallets, BlockAnchors, RecentTXMap, Mempool, verify_signature}) of valid -> NewMempool = maps:put(TX#tx.id, no_tx, Mempool), NewWallets = ar_node_utils:apply_tx(Wallets, Denomination, TX), NewSize = case TX of #tx{ format = 1 } -> Size + TX#tx.data_size; _ -> Size end, NewCount = C + 1, AboveFork1_8 = Height >= ar_fork:height_1_8(), CountExceedsLimit = NewCount > ?BLOCK_TX_COUNT_LIMIT, SizeExceedsLimit = NewSize > ?BLOCK_TX_DATA_SIZE_LIMIT, case {AboveFork1_8, CountExceedsLimit, SizeExceedsLimit} of {true, true, _} -> invalid; {true, _, true} -> invalid; _ -> verify_block_txs(TXs, {Rate, PricePerGiBMinute, KryderPlusRateMultiplier, Denomination, Height, RedenominationHeight, Timestamp, NewWallets, BlockAnchors, RecentTXMap, NewMempool, NewCount, NewSize}) end; {invalid, _} -> invalid end. %% @doc Pick a list of transactions from the mempool to mine on. %% Transactions have to be valid when applied on top of each other taken %% into account the current height and diff, recent weave transactions, and %% the wallet list. The total data size of chosen transactions does not %% exceed the block size limit. Before a valid subset of transactions is chosen, %% transactions are sorted from highest to lowest utility and then from oldest %% block anchors to newest. pick_txs_to_mine(Args) -> {BlockAnchors, RecentTXMap, Height, RedenominationHeight, Rate, PricePerGiBMinute, KryderPlusRateMultiplier, Denomination, Timestamp, Wallets, TXs} = Args, pick_txs_under_size_limit(sort_txs_by_utility_and_anchor(TXs, BlockAnchors), {Rate, PricePerGiBMinute, KryderPlusRateMultiplier, Denomination, Height, RedenominationHeight, Timestamp, Wallets, BlockAnchors, RecentTXMap, maps:new(), 0, 0}). %%%=================================================================== %%% Private functions. %%%=================================================================== verify_tx2(Args) -> {TX, Rate, PricePerGiBMinute, KryderPlusRateMultiplier, Denomination, Height, RedenominationHeight, Timestamp, FloatingWallets, BlockAnchors, RecentTXMap, Mempool, VerifySignature} = Args, case ar_tx:verify(TX, {Rate, PricePerGiBMinute, KryderPlusRateMultiplier, Denomination, RedenominationHeight, Height, FloatingWallets, Timestamp}, VerifySignature) of true -> verify_anchor(TX, Height, FloatingWallets, BlockAnchors, RecentTXMap, Mempool); false -> {invalid, tx_verification_failed} end. verify_anchor(TX, Height, FloatingWallets, BlockAnchors, RecentTXMap, Mempool) when is_record(TX, tx), is_map(FloatingWallets), is_list(BlockAnchors), is_map(RecentTXMap), is_map(Mempool) -> ShouldContinue = case ar_fork:height_1_8() of H when Height >= H -> %% Only verify after fork 1.8 otherwise it causes a soft fork %% since current nodes can accept blocks with a chain of last_tx %% references. The check would still fail on edge pre 1.8 since %% TX is validated against a previous blocks' wallet list then. case maps:is_key(TX#tx.last_tx, Mempool) of true -> {invalid, last_tx_in_mempool}; false -> continue end; _ -> continue end, case ShouldContinue of continue -> verify_last_tx(TX, FloatingWallets, BlockAnchors, RecentTXMap, Mempool); {invalid, Reason} -> {invalid, Reason} end. verify_last_tx(TX, FloatingWallets, BlockAnchors, RecentTXMap, Mempool) -> case ar_tx:check_last_tx(FloatingWallets, TX) of true -> valid; false -> verify_block_anchor(TX, BlockAnchors, RecentTXMap, Mempool) end. verify_block_anchor(TX, BlockAnchors, RecentTXMap, Mempool) -> case lists:member(TX#tx.last_tx, BlockAnchors) of false -> {invalid, tx_bad_anchor}; true -> verify_tx_in_weave(TX, RecentTXMap, Mempool) end. verify_tx_in_weave(TX, RecentTXMap, Mempool) -> case maps:is_key(TX#tx.id, RecentTXMap) of true -> {invalid, tx_already_in_weave}; false -> verify_tx_in_mempool(TX, Mempool) end. verify_tx_in_mempool(TX, Mempool) -> case maps:is_key(TX#tx.id, Mempool) of true -> {invalid, tx_already_in_mempool}; false -> valid end. pick_txs_under_size_limit([], _Args) -> []; pick_txs_under_size_limit( [TX | TXs], {Rate, PricePerGiBMinute, KryderPlusRateMultiplier, Denomination, Height, RedenominationHeight, Timestamp, Wallets, BlockAnchors, RecentTXMap, Mempool, Size, Count}) when is_record(TX, tx), is_map(Wallets), is_list(BlockAnchors), is_map(RecentTXMap), is_map(Mempool) -> case verify_tx2({TX, Rate, PricePerGiBMinute, KryderPlusRateMultiplier, Denomination, Height, RedenominationHeight, Timestamp, Wallets, BlockAnchors, RecentTXMap, Mempool, verify_signature}) of valid -> NewMempool = maps:put(TX#tx.id, no_tx, Mempool), NewWallets = ar_node_utils:apply_tx(Wallets, Denomination, TX), NewSize = case TX of #tx{ format = 1 } -> Size + TX#tx.data_size; _ -> Size end, NewCount = Count + 1, CountExceedsLimit = NewCount > ?BLOCK_TX_COUNT_LIMIT, SizeExceedsLimit = NewSize > ?BLOCK_TX_DATA_SIZE_LIMIT, case CountExceedsLimit orelse SizeExceedsLimit of true -> pick_txs_under_size_limit(TXs, {Rate, PricePerGiBMinute, KryderPlusRateMultiplier, Denomination, Height, RedenominationHeight, Timestamp, Wallets, BlockAnchors, RecentTXMap, Mempool, Size, Count}); false -> [TX | pick_txs_under_size_limit(TXs, {Rate, PricePerGiBMinute, KryderPlusRateMultiplier, Denomination, Height, RedenominationHeight, Timestamp, NewWallets, BlockAnchors, RecentTXMap, NewMempool, NewSize, NewCount})] end; {invalid, _} -> pick_txs_under_size_limit(TXs, {Rate, PricePerGiBMinute, KryderPlusRateMultiplier, Denomination, Height, RedenominationHeight, Timestamp, Wallets, BlockAnchors, RecentTXMap, Mempool, Size, Count}) end. sort_txs_by_utility_and_anchor(TXs, BHL) -> lists:sort(fun(TX1, TX2) -> compare_txs(TX1, TX2, BHL) end, TXs). compare_txs(TX1, TX2, BHL) -> case {lists:member(TX1#tx.last_tx, BHL), lists:member(TX2#tx.last_tx, BHL)} of {false, _} -> true; {true, false} -> false; {true, true} -> compare_txs_by_utility(TX1, TX2, BHL) end. compare_txs_by_utility(TX1, TX2, BHL) -> U1 = ar_tx:utility(TX1), U2 = ar_tx:utility(TX2), case U1 == U2 of true -> compare_anchors(TX1#tx.last_tx, TX2#tx.last_tx, BHL); false -> U1 > U2 end. compare_anchors(_Anchor1, _Anchor2, []) -> true; compare_anchors(Anchor, Anchor, _) -> true; compare_anchors(Anchor1, _Anchor2, [Anchor1 | _]) -> false; compare_anchors(_Anchor1, Anchor2, [Anchor2 | _]) -> true; compare_anchors(Anchor1, Anchor2, [_ | Anchors]) -> compare_anchors(Anchor1, Anchor2, Anchors). ================================================ FILE: apps/arweave/src/ar_tx_validator.erl ================================================ -module(ar_tx_validator). -export([validate/1]). -include_lib("arweave/include/ar.hrl"). %%%=================================================================== %%% Public interface. %%%=================================================================== validate(TX) -> Props = ets:select( node_state, [{{'$1', '$2'}, [{'or', {'==', '$1', height}, {'==', '$1', wallet_list}, {'==', '$1', recent_txs_map}, {'==', '$1', block_anchors}, {'==', '$1', usd_to_ar_rate}, {'==', '$1', price_per_gib_minute}, {'==', '$1', kryder_plus_rate_multiplier}, {'==', '$1', denomination}, {'==', '$1', redenomination_height}}], ['$_']}] ), Height = proplists:get_value(height, Props), WL = proplists:get_value(wallet_list, Props), RecentTXMap = proplists:get_value(recent_txs_map, Props), BlockAnchors = proplists:get_value(block_anchors, Props), USDToARRate = proplists:get_value(usd_to_ar_rate, Props), PricePerGiBMinute = proplists:get_value(price_per_gib_minute, Props), KryderPlusRateMultiplier = proplists:get_value(kryder_plus_rate_multiplier, Props), Denomination = proplists:get_value(denomination, Props), RedenominationHeight = proplists:get_value(redenomination_height, Props), Wallets = ar_wallets:get(WL, ar_tx:get_addresses([TX])), Mempool = ar_mempool:get_map(), Result = ar_tx_replay_pool:verify_tx({TX, USDToARRate, PricePerGiBMinute, KryderPlusRateMultiplier, Denomination, Height, RedenominationHeight, BlockAnchors, RecentTXMap, Mempool, Wallets}), Result2 = case {Result, TX#tx.format == 2 andalso byte_size(TX#tx.data) /= 0} of {valid, true} -> Chunks = ar_tx:chunk_binary(?DATA_CHUNK_SIZE, TX#tx.data), SizeTaggedChunks = ar_tx:chunks_to_size_tagged_chunks(Chunks), SizeTaggedChunkIDs = ar_tx:sized_chunks_to_sized_chunk_ids(SizeTaggedChunks), {Root, _} = ar_merkle:generate_tree(SizeTaggedChunkIDs), Size = byte_size(TX#tx.data), case {Root, Size} == {TX#tx.data_root, TX#tx.data_size} of true -> valid; false -> {invalid, invalid_data_root_size} end; _ -> Result end, case Result2 of valid -> case TX#tx.format of 2 -> {valid, TX}; 1 -> case TX#tx.data_size > 0 of true -> %% Compute the data root so that we can inform ar_data_sync about %% it so that it can accept the chunks. One may notice here that %% in case of v1 transactions, chunks arrive together with the tx %% header. However, we send the data root to ar_data_sync in %% advance, otherwise ar_header_sync may fail to store the chunks %% when persisting the transaction as registering the data roots of %% a confirmed block is an asynchronous procedure %% (see ar_data_sync:add_tip_block called in ar_node_worker) which %% does not always complete before ar_header_sync attempts the %% insertion. V1Chunks = ar_tx:chunk_binary(?DATA_CHUNK_SIZE, TX#tx.data), SizeTaggedV1Chunks = ar_tx:chunks_to_size_tagged_chunks(V1Chunks), SizeTaggedV1ChunkIDs = ar_tx:sized_chunks_to_sized_chunk_ids( SizeTaggedV1Chunks), {DataRoot, _} = ar_merkle:generate_tree(SizeTaggedV1ChunkIDs), {valid, TX#tx{ data_root = DataRoot }}; false -> {valid, TX} end end; _ -> Result2 end. ================================================ FILE: apps/arweave/src/ar_unbalanced_merkle.erl ================================================ -module(ar_unbalanced_merkle). -export([ root/2, root/3, hash_list_to_merkle_root/1, block_index_to_merkle_root/1, hash_block_index_entry/1 ]). -include_lib("arweave/include/ar.hrl"). -include_lib("eunit/include/eunit.hrl"). %%% Module for building and manipulating generic and specific unbalanced merkle trees. %% @doc Take a prior merkle root and add a new piece of data to it, optionally %% providing a conversion function prior to hashing. root(OldRoot, Data, Fun) -> root(OldRoot, Fun(Data)). root(OldRoot, Data) -> crypto:hash(?MERKLE_HASH_ALG, << OldRoot/binary, Data/binary >>). %% @doc Generate a new entire merkle tree from a hash list. hash_list_to_merkle_root(HL) -> lists:foldl( fun(BH, MR) -> root(MR, BH) end, <<>>, lists:reverse(HL) ). %% @doc Generate a new entire merkle tree from a block index. block_index_to_merkle_root(HL) -> lists:foldl( fun(BIEntry, MR) -> root(MR, BIEntry, fun hash_block_index_entry/1) end, <<>>, lists:reverse(HL) ). hash_block_index_entry({BH, WeaveSize, TXRoot}) -> ar_deep_hash:hash([BH, integer_to_binary(WeaveSize), TXRoot]). %%% TESTS basic_hash_root_generation_test() -> BH0 = crypto:strong_rand_bytes(32), BH1 = crypto:strong_rand_bytes(32), BH2 = crypto:strong_rand_bytes(32), MR0 = test_hash(BH0), MR1 = test_hash(<>), MR2 = test_hash(<>), ?assertEqual(MR2, hash_list_to_merkle_root([BH2, BH1, BH0])). test_hash(Bin) -> crypto:hash(?MERKLE_HASH_ALG, Bin). root_update_test() -> BH0 = crypto:strong_rand_bytes(32), BH1 = crypto:strong_rand_bytes(32), BH2 = crypto:strong_rand_bytes(32), BH3 = crypto:strong_rand_bytes(32), Root = root( root( hash_list_to_merkle_root([BH1, BH0]), BH2 ), BH3 ), ?assertEqual( hash_list_to_merkle_root([BH3, BH2, BH1, BH0]), Root ). ================================================ FILE: apps/arweave/src/ar_util.erl ================================================ -module(ar_util). -export([ assert_file_exists_and_readable/1, batch_pmap/3, batch_pmap/4, between/3, binary_to_integer/1, block_index_entry_from_block/1, bool_to_int/1, bytes_to_mb_string/1, cast_after/3, ceil_int/2, count/2, decode/1, do_until/3, encode/1, encode_list_indices/1, floor_int/2, format_peer/1, genesis_wallets/0, get_system_device/1, integer_to_binary/1, int_to_bool/1, parse_list_indices/1, parse_peer/1, parse_peer/2, parse_port/1, peer_to_str/1, pfilter/2, pick_random/1, pick_random/2, pmap/2, pmap/3, print_stacktrace/0, safe_decode/1, safe_divide/2, safe_encode/1, safe_ets_lookup/2, safe_format/1, safe_format/3, safe_parse_peer/1, safe_parse_peer/2, shuffle_list/1, take_every_nth/2, terminal_clear/0, timestamp_to_seconds/1,invert_map/1, unique/1, pad_to_closest_multiple_equal_or_above/2 ]). -include("ar.hrl"). -include_lib("eunit/include/eunit.hrl"). -define(DEFAULT_PMAP_TIMEOUT, 60_000). bool_to_int(true) -> 1; bool_to_int(_) -> 0. int_to_bool(1) -> true; int_to_bool(0) -> false. %% @doc Implementations of integer_to_binary and binary_to_integer that can handle infinity. integer_to_binary(infinity) -> <<"infinity">>; integer_to_binary(N) -> erlang:integer_to_binary(N). binary_to_integer(<<"infinity">>) -> infinity; binary_to_integer(N) -> erlang:binary_to_integer(N). %% @doc: rounds IntValue up to the nearest multiple of Nearest. %% Rounds up even if IntValue is already a multiple of Nearest. ceil_int(IntValue, Nearest) -> IntValue - (IntValue rem Nearest) + Nearest. %% @doc: rounds IntValue down to the nearest multiple of Nearest. %% Doesn't change IntValue if it's already a multiple of Nearest. floor_int(IntValue, Nearest) -> IntValue - (IntValue rem Nearest). %% @doc: clamp N to be between Min and Max. between(N, Min, _) when N < Min -> Min; between(N, _, Max) when N > Max -> Max; between(N, _, _) -> N. %% @doc Pick a list of random elements from a given list. pick_random(_, 0) -> []; pick_random([], _) -> []; pick_random(List, N) -> Elem = pick_random(List), [Elem|pick_random(List -- [Elem], N - 1)]. %% @doc Select a random element from a list. pick_random(Xs) -> lists:nth(rand:uniform(length(Xs)), Xs). %% @doc Encode a binary to URL safe base64 binary string. encode(Bin) -> b64fast:encode(Bin). %% @doc Try to decode a URL safe base64 into a binary or throw an error when %% invalid. decode(Input) -> b64fast:decode(Input). safe_encode(Bin) when is_binary(Bin) -> encode(Bin); safe_encode(Bin) -> Bin. %% @doc Safely decode a URL safe base64 into a binary returning an ok or error %% tuple. safe_decode(E) -> try D = decode(E), {ok, D} catch _:_ -> {error, invalid} end. %% @doc Safely lookup a key in an ETS table. %% Returns [] if the table doesn't exist - this can happen when running some of the helper %% utilities like data_doctor safe_ets_lookup(Table, Key) -> try ets:lookup(Table, Key) catch Type:Reason -> ?LOG_WARNING([{event, ets_table_not_found}, {table, Table}, {key, Key}, {type, Type}, {reason, Reason}]), [] end. %% @doc Convert an erlang:timestamp() to seconds since the Unix Epoch. timestamp_to_seconds({MegaSecs, Secs, _MicroSecs}) -> MegaSecs * 1000000 + Secs. %% @doc Convert a map from Key => Value, to Value => set(Keys) -spec invert_map(map()) -> map(). invert_map(Map) -> maps:fold( fun(Key, Value, Acc) -> CurrentSet = maps:get(Value, Acc, sets:new()), UpdatedSet = sets:add_element(Key, CurrentSet), maps:put(Value, UpdatedSet, Acc) end, #{}, Map ). %%-------------------------------------------------------------------- %% @doc Parse a string representing a remote host into our internal %% format. %% @end %%-------------------------------------------------------------------- -spec parse_peer(Hostname) -> Return when Hostname :: string() | binary(), Return :: [IpWithPort] | no_return(), IpWithPort :: {A, A, A, A, Port}, A :: pos_integer(), Port :: pos_integer(). parse_peer(Hostname) -> parse_peer(Hostname, #{}). %%-------------------------------------------------------------------- %% @doc Parse a string representing a remote host into our internal %% format. %% @end %%-------------------------------------------------------------------- -spec parse_peer(Hostname, Opts) -> Return when Hostname :: string() | binary(), Opts :: #{ module_resolve => atom() }, Return :: [IpWithPort] | no_return(), IpWithPort :: {A, A, A, A, Port}, A :: pos_integer(), Port :: pos_integer(). parse_peer("", _Opts) -> throw(empty_peer_string); parse_peer(BitStr, Opts) when is_binary(BitStr) -> parse_peer(binary_to_list(BitStr), Opts); parse_peer([{A,B,C,D,P}], _Opts) -> [{A, B, C, D, parse_port(P)}]; parse_peer(Str, Opts) when is_list(Str) -> % useful to mock the resolver, instead of using % inet, any other custom module can be used. ResolveModule = maps:get(module_resolve, Opts, inet), [Addr, PortStr] = parse_port_split(Str), case ResolveModule:getaddrs(Addr, inet) of {ok, [{A, B, C, D}]} -> [{A, B, C, D, parse_port(PortStr)}]; {ok, AddrsList} when is_list(AddrsList) -> [{A, B, C, D, parse_port(PortStr)} || {A, B, C, D} <- AddrsList]; {error, Reason} -> throw({invalid_peer_string, Str, Reason}) end; parse_peer({{A,B,C,D},P}, _Opts) -> [{A, B, C, D, parse_port(P)}]; parse_peer({IP, Port}, Opts) -> {A, B, C, D} = parse_peer(IP, Opts), [{A, B, C, D, parse_port(Port)}]; parse_peer(_Peer, _) -> throw(invalid_peer). parse_peer_test() -> ?assertThrow( empty_peer_string, parse_peer("") ), ?assertThrow( invalid_peer, parse_peer(1) ), ?assertEqual( [{127,0,0,1,1985}], parse_peer({{127,0,0,1}, 1985}) ), Opts = #{ module_resolve => ar_test_inet_mock }, ?assertEqual( [{127,0,0,1,1984}], parse_peer("single.record.local", Opts) ), ?assertEqual( [ {127,0,0,2,1984}, {127,0,0,3,1984}, {127,0,0,4,1984}, {127,0,0,5,1984} ], parse_peer("multi.record.local", Opts) ), ?assertThrow( {invalid_peer_string,_,_}, parse_peer("error.test.local", Opts) ). %%-------------------------------------------------------------------- %% @doc convert a peer as tuple or binary to string. %% @end %%-------------------------------------------------------------------- -spec peer_to_str(Peer) -> Return when Peer :: binary() | string() | tuple(), Return :: string(). peer_to_str(Bin) when is_binary(Bin) -> binary_to_list(Bin); peer_to_str(Str) when is_list(Str) -> Str; peer_to_str({A, B, C, D, Port}) -> integer_to_list(A) ++ "_" ++ integer_to_list(B) ++ "_" ++ integer_to_list(C) ++ "_" ++ integer_to_list(D) ++ "_" ++ integer_to_list(Port). %% @doc Parses a port string into an integer. parse_port(Int) when is_integer(Int) -> Int; parse_port("") -> ?DEFAULT_HTTP_IFACE_PORT; parse_port(PortStr) -> {ok, [Port], ""} = io_lib:fread("~d", PortStr), Port. parse_port_split(Str) -> case string:tokens(Str, ":") of [Addr] -> [Addr, ?DEFAULT_HTTP_IFACE_PORT]; [Addr, Port] -> [Addr, Port]; _ -> throw({invalid_peer_string, Str}) end. %%-------------------------------------------------------------------- %% @doc wrapper for parse_peer/1 %% @see safe_parse_peer/2 %% @end %%-------------------------------------------------------------------- safe_parse_peer(Peer) -> safe_parse_peer(Peer, #{}). %%-------------------------------------------------------------------- %% @doc wrapper for parse_peer/1 %% @end %%-------------------------------------------------------------------- -spec safe_parse_peer(Hostname, Opts) -> Return when Hostname :: string() | binary(), Opts :: map(), Return :: {ok, ReturnOk} | {error, invalid}, ReturnOk ::[IpWithPort] | no_return(), IpWithPort :: {A, A, A, A, Port}, A :: pos_integer(), Port :: pos_integer(). safe_parse_peer(Peer, Opts) -> try {ok, parse_peer(Peer, Opts)} catch _:_ -> {error, invalid} end. %% @doc Take a remote host ID in various formats, return a HTTP-friendly string. format_peer([{Host, Port}|_]) -> format_peer({Host, Port}); format_peer([{A, B, C, D, Port}|_]) -> format_peer({A, B, C, D, Port}); format_peer(Host) when is_list(Host) -> format_peer({Host, ?DEFAULT_HTTP_IFACE_PORT}); format_peer({A, B, C, D}) -> format_peer({A, B, C, D, ?DEFAULT_HTTP_IFACE_PORT}); format_peer({A, B, C, D, Port}) -> lists:flatten( io_lib:format("~w.~w.~w.~w:~w", [A, B, C, D, Port]) ); format_peer({Host, Port}) -> lists:flatten( io_lib:format("~s:~w", [Host, Port]) ); format_peer(Peer) -> Peer. %% @doc Count occurences of element within list. count(A, List) -> length([ B || B <- List, A == B ]). %% @doc Takes a list and returns the unique values in it (preserving the order of the first %% occurence of each value). unique(Xs) when not is_list(Xs) -> [Xs]; unique(Xs) -> unique([], Xs). unique(Res, []) -> lists:reverse(Res); unique(Res, [X|Xs]) -> case lists:member(X, Res) of false -> unique([X|Res], Xs); true -> unique(Res, Xs) end. %% @doc Pad the given value to the closest multiple equal or above the given value. -spec pad_to_closest_multiple_equal_or_above(Value :: non_neg_integer(), Multiple :: non_neg_integer()) -> non_neg_integer(). pad_to_closest_multiple_equal_or_above(Value, Multiple) -> (Value + Multiple - 1) div Multiple * Multiple. %% @doc Run a map in parallel, throw {pmap_timeout, ?DEFAULT_PMAP_TIMEOUT} %% if a worker takes longer than ?DEFAULT_PMAP_TIMEOUT milliseconds. pmap(Mapper, List) -> pmap(Mapper, List, ?DEFAULT_PMAP_TIMEOUT). %% @doc Run a map in parallel, throw {pmap_timeout, Timeout} if a worker %% takes longer than Timeout milliseconds. pmap(Mapper, List, Timeout) -> Master = self(), ListWithRefs = [{Elem, make_ref()} || Elem <- List], lists:foreach(fun({Elem, Ref}) -> spawn_link(fun() -> Master ! {pmap_work, Ref, Mapper(Elem)} end) end, ListWithRefs), lists:map( fun({_, Ref}) -> receive {pmap_work, Ref, Mapped} -> Mapped after Timeout -> throw({pmap_timeout, Timeout}) end end, ListWithRefs ). %% @doc Run a map in parallel, one batch at a time. If a worker does not %% finish within Timeout milliseconds, return {error, timeout, Elem} for that element %% instead of throwing. batch_pmap(Mapper, List, BatchSize) -> batch_pmap(Mapper, List, BatchSize, ?DEFAULT_PMAP_TIMEOUT). %% @doc Run a map in parallel, one batch at a time. If a worker takes %% longer than Timeout milliseconds, return {error, timeout, Elem}. batch_pmap(_Mapper, [], _BatchSize, _Timeout) -> []; batch_pmap(Mapper, List, BatchSize, Timeout) when BatchSize > 0 -> Self = self(), {Batch, Rest} = case length(List) >= BatchSize of true -> lists:split(BatchSize, List); false -> {List, []} end, ListWithRefs = [{Elem, make_ref()} || Elem <- Batch], lists:foreach(fun({Elem, Ref}) -> spawn_link(fun() -> Self ! {pmap_work, Ref, Mapper(Elem)} end) end, ListWithRefs), lists:map( fun({Elem, Ref}) -> receive {pmap_work, Ref, Mapped} -> Mapped after Timeout -> {error, batch_pmap_timeout, Elem} end end, ListWithRefs ) ++ batch_pmap(Mapper, Rest, BatchSize, Timeout). %% @doc Filter the list in parallel. pfilter(Fun, List) -> Master = self(), ListWithRefs = [{Elem, make_ref()} || Elem <- List], lists:foreach(fun({Elem, Ref}) -> spawn_link(fun() -> Master ! {pmap_work, Ref, Fun(Elem)} end) end, ListWithRefs), lists:filtermap( fun({Elem, Ref}) -> receive {pmap_work, Ref, false} -> false; {pmap_work, Ref, true} -> {true, Elem}; {pmap_work, Ref, {true, Result}} -> {true, Result} end end, ListWithRefs ). %% @doc Generate a list of GENESIS wallets, from the CSV file. genesis_wallets() -> {ok, Bin} = file:read_file("genesis_data/genesis_wallets.csv"), lists:map( fun(Line) -> [Addr, RawQty] = string:tokens(Line, ","), { ar_util:decode(Addr), erlang:trunc(math:ceil(list_to_integer(RawQty))) * ?WINSTON_PER_AR, <<>> } end, string:tokens(binary_to_list(Bin), [10]) ). %% @doc Perform a function until it returns {ok, Value} | ok | true | {error, Error}. %% That term will be returned, others will be ignored. Interval and timeout have to %% be passed in milliseconds. do_until(_DoFun, _Interval, Timeout) when Timeout =< 0 -> {error, timeout}; do_until(DoFun, Interval, Timeout) -> Start = erlang:system_time(millisecond), case DoFun() of {ok, Value} -> {ok, Value}; ok -> ok; true -> true; {error, Error} -> {error, Error}; _ -> timer:sleep(Interval), Now = erlang:system_time(millisecond), do_until(DoFun, Interval, Timeout - (Now - Start)) end. block_index_entry_from_block(B) -> {B#block.indep_hash, B#block.weave_size, B#block.tx_root}. %% @doc Convert the given number of bytes into the "%s MiB" string. bytes_to_mb_string(Bytes) -> integer_to_list(Bytes div 1024 div 1024) ++ " MiB". %% @doc Encode the given list of sorted numbers into a binary where the nth bit %% is 1 the corresponding number is present in the given list; 0 otherwise. encode_list_indices(Indices) -> encode_list_indices(Indices, 0). encode_list_indices([Index | Indices], N) -> << 0:(Index - N), 1:1, (encode_list_indices(Indices, Index + 1))/bitstring >>; encode_list_indices([], N) when N rem 8 == 0 -> <<>>; encode_list_indices([], N) -> << 0:(8 - N rem 8) >>. %% @doc Return a list of position numbers corresponding to 1 bits of the given binary. parse_list_indices(Input) -> parse_list_indices(Input, 0). parse_list_indices(<< 0:1, Rest/bitstring >>, N) -> parse_list_indices(Rest, N + 1); parse_list_indices(<< 1:1, Rest/bitstring >>, N) -> case parse_list_indices(Rest, N + 1) of error -> error; Indices -> [N | Indices] end; parse_list_indices(<<>>, _N) -> []; parse_list_indices(_BadInput, _N) -> error. shuffle_list(List) -> lists:sort(fun(_,_) -> rand:uniform() < 0.5 end, List). %% @doc Format a value and truncate it if it's too long - this can help avoid the node %% locking up when accidentally trying to log a large/complex datatype (e.g. a map of chunks). -spec safe_format(term(), non_neg_integer(), non_neg_integer()) -> string(). safe_format(Value) -> safe_format(Value, 5, 2000). safe_format(Value, Depth, Limit) -> ValueStr = io_lib:format("~P", [Value, Depth]), % Depth limited to 5 case length(ValueStr) > Limit of true -> string:slice(ValueStr, 0, Limit) ++ "... (truncated)"; false -> ValueStr end. %%% %%% Tests. %%% %% @doc Test that unique functions correctly. basic_unique_test() -> [a, b, c] = unique([a, a, b, b, b, c, c]), [a, b, c] = unique([a, b, c, c, b, a]). %% @doc Ensure that hosts are formatted as lists correctly. basic_peer_format_test() -> "127.0.0.1:9001" = format_peer({127,0,0,1,9001}). %% @doc Ensure that pick_random's are actually in the starting list. pick_random_test() -> List = [a, b, c, d, e], true = lists:member(pick_random(List), List). %% @doc Test that binaries of different lengths can be encoded and decoded %% correctly. round_trip_encode_test() -> lists:map( fun(Bytes) -> Bin = crypto:strong_rand_bytes(Bytes), Bin = decode(encode(Bin)) end, lists:seq(1, 64) ). %% Test the paralell mapping functionality. pmap_test() -> Mapper = fun(X) -> timer:sleep(100 * X), X * 2 end, ?assertEqual([6, 2, 4], pmap(Mapper, [3, 1, 2])). cast_after(0, Module, Message) -> gen_server:cast(Module, Message); cast_after(Delay, Module, Message) -> %% Not using timer:apply_after here because send_after is more efficient: %% http://erlang.org/doc/efficiency_guide/commoncaveats.html#timer-module. erlang:send_after(Delay, Module, {'$gen_cast', Message}). take_every_nth(N, L) -> take_every_nth(N, L, 0). take_every_nth(_N, [], _I) -> []; take_every_nth(N, [El | L], I) when I rem N == 0 -> [El | take_every_nth(N, L, I + 1)]; take_every_nth(N, [_El | L], I) -> take_every_nth(N, L, I + 1). safe_divide(A, B) -> case catch A / B of {'EXIT', _} -> A div B; Result -> Result end. encode_list_indices_test() -> lists:foldl( fun(Input, N) -> ?assertEqual(Input, lists:sort(Input)), Encoded = encode_list_indices(Input), ?assert(byte_size(Encoded) =< 125), Indices = parse_list_indices(Encoded), ?assertEqual(Input, Indices, io_lib:format("Case ~B", [N])), N + 1 end, 0, [[], [0], [1], [999], [0, 1], lists:seq(0, 999), lists:seq(0, 999, 2), lists:seq(1, 999, 3)] ). %% @doc os aware way of clearing a terminal terminal_clear() -> io:format( case os:type() == "darwin" of true -> "\e[H\e[J"; false -> os:cmd(clear) end ). -spec get_system_device(string()) -> string(). get_system_device(Path) -> Command = "df -P " ++ Path ++ " | awk 'NR==2 {print $1}'", Device = os:cmd(Command), string:trim(Device). print_stacktrace() -> try throw(dummy) %% In OTP21+ try/catch is the recommended way to get the stacktrace catch _: _Exception:Stacktrace -> %% Remove the first element (print_stacktrace call) TrimmedStacktrace = lists:nthtail(1, Stacktrace), StacktraceString = lists:foldl( fun(StackTraceEntry, Acc) -> Acc ++ io_lib:format(" ~p~n", [StackTraceEntry]) end, "Stack trace:~n", TrimmedStacktrace), ?LOG_INFO(StacktraceString) end. % Function to assert that a file exists and is readable assert_file_exists_and_readable(FilePath) -> case file:read_file(FilePath) of {ok, _} -> ok; {error, _} -> io:format("~nThe filepath ~p doesn't exist or isn't readable.~n~n", [FilePath]), init:stop(1) end. ================================================ FILE: apps/arweave/src/ar_vdf.erl ================================================ -module(ar_vdf). -export([compute/3, compute_legacy/3, compute2/3, verify/8, verify2/8, debug_sha_verify_no_reset/6, debug_sha_verify/8, debug_sha2/3, step_number_to_salt_number/1, checkpoint_buffer_to_checkpoints/1]). -include("ar_vdf.hrl"). -include("ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). step_number_to_salt_number(0) -> 0; step_number_to_salt_number(StepNumber) -> (StepNumber - 1) * ?VDF_CHECKPOINT_COUNT_IN_STEP + 1. %% default IterationCount = ?VDF_DIFFICULTY compute(StartStepNumber, PrevOutput, IterationCount) -> Salt = step_number_to_salt_number(StartStepNumber - 1), SaltBinary = << Salt:256 >>, {ok, Config} = arweave_config:get_env(), case Config#config.vdf of openssl -> ar_vdf_nif:vdf_sha2_nif(SaltBinary, PrevOutput, ?VDF_CHECKPOINT_COUNT_IN_STEP - 1, 0, IterationCount); fused -> ar_vdf_nif:vdf_sha2_fused_nif(SaltBinary, PrevOutput, ?VDF_CHECKPOINT_COUNT_IN_STEP - 1, 0, IterationCount); hiopt_m4 -> ar_vdf_nif:vdf_sha2_hiopt_nif(SaltBinary, PrevOutput, ?VDF_CHECKPOINT_COUNT_IN_STEP - 1, 0, IterationCount); _ -> ar_vdf_nif:vdf_sha2_nif(SaltBinary, PrevOutput, ?VDF_CHECKPOINT_COUNT_IN_STEP - 1, 0, IterationCount) end. compute_legacy(StartStepNumber, PrevOutput, IterationCount) -> Salt = step_number_to_salt_number(StartStepNumber - 1), SaltBinary = << Salt:256 >>, {ok, Output, CheckpointBuffer} = ar_vdf_nif:vdf_sha2_nif( SaltBinary, PrevOutput, ?VDF_CHECKPOINT_COUNT_IN_STEP - 1, 0, IterationCount), Checkpoints = [Output | checkpoint_buffer_to_checkpoints(CheckpointBuffer)], {ok, Output, Checkpoints}. -ifdef(AR_TEST). %% Slow down VDF calculation on tests since it will complete too fast otherwise. compute2(StartStepNumber, PrevOutput, IterationCount) -> {ok, Output, CheckpointBuffer} = compute(StartStepNumber, PrevOutput, IterationCount), Checkpoints = [Output | checkpoint_buffer_to_checkpoints(CheckpointBuffer)], timer:sleep(50), {ok, Output, Checkpoints}. -else. compute2(StartStepNumber, PrevOutput, IterationCount) -> {ok, Output, CheckpointBuffer} = compute(StartStepNumber, PrevOutput, IterationCount), Checkpoints = [Output | checkpoint_buffer_to_checkpoints(CheckpointBuffer)], {ok, Output, Checkpoints}. -endif. %% no reset in CheckpointGroups, then ResetStepNumber < StartSalt %% any number out of bounds of %% [StartSalt, StartSalt+group_list_to_sum_step(CheckpointGroups)] verify(StartSalt, PrevOutput, NumCheckpointsBetweenHashes, Hashes, ResetSalt, ResetSeed, ThreadCount, IterationCount) -> StartSaltBinary = << StartSalt:256 >>, ResetSaltBinary = << ResetSalt:256 >>, NumHashes = length(Hashes), HashBuffer = iolist_to_binary(Hashes), RestStepsSize = ?VDF_BYTE_SIZE * (NumHashes - 1), case HashBuffer of << RestSteps:RestStepsSize/binary, LastStep:?VDF_BYTE_SIZE/binary >> -> case ar_vdf_nif:vdf_parallel_sha_verify_with_reset_nif(StartSaltBinary, PrevOutput, NumHashes - 1, NumCheckpointsBetweenHashes - 1, IterationCount, RestSteps, LastStep, ResetSaltBinary, ResetSeed, ThreadCount) of {ok, Steps} -> {true, Steps}; _ -> false end; _ -> false end. verify2(StartStepNumber, PrevOutput, NumCheckpointsBetweenHashes, Hashes, ResetStepNumber, ResetSeed, ThreadCount, IterationCount) -> StartSalt = step_number_to_salt_number(StartStepNumber), ResetSalt = step_number_to_salt_number(ResetStepNumber - 1), case verify(StartSalt, PrevOutput, NumCheckpointsBetweenHashes, Hashes, ResetSalt, ResetSeed, ThreadCount, IterationCount) of false -> false; {true, CheckpointBuffer} -> {true, ar_util:take_every_nth(?VDF_CHECKPOINT_COUNT_IN_STEP, checkpoint_buffer_to_checkpoints(CheckpointBuffer))} end. checkpoint_buffer_to_checkpoints(Buffer) -> checkpoint_buffer_to_checkpoints(Buffer, []). checkpoint_buffer_to_checkpoints(<<>>, Checkpoints) -> Checkpoints; checkpoint_buffer_to_checkpoints(<< Checkpoint:32/binary, Rest/binary >>, Checkpoints) -> checkpoint_buffer_to_checkpoints(Rest, [Checkpoint | Checkpoints]). %%%=================================================================== %%% Debug implementations. %%% Erlang implementations of of NIFs. Usee in tests. %%%=================================================================== hash(0, _Salt, Input) -> Input; hash(N, Salt, Input) -> hash(N - 1, Salt, crypto:hash(sha256, << Salt/binary, Input/binary >>)). %% @doc An Erlang implementation of ar_vdf:compute2/3. Used in tests. debug_sha2(StepNumber, Output, IterationCount) -> Salt = step_number_to_salt_number(StepNumber - 1), {Output2, Checkpoints} = lists:foldl( fun(I, {Acc, L}) -> SaltBinary = << (Salt + I):256 >>, H = hash(IterationCount, SaltBinary, Acc), {H, [H | L]} end, {Output, []}, lists:seq(0, ?VDF_CHECKPOINT_COUNT_IN_STEP - 1) ), timer:sleep(500), {ok, Output2, Checkpoints}. %% @doc An Erlang implementation of ar_vdf:verify/7. Used in tests. debug_sha_verify_no_reset(StepNumber, Output, NumCheckpointsBetweenHashes, Hashes, _ThreadCount, IterationCount) -> Salt = step_number_to_salt_number(StepNumber), debug_verify_no_reset(Salt, Output, NumCheckpointsBetweenHashes, Hashes, [], IterationCount). debug_verify_no_reset(Salt, Output, Size, Hashes, Steps, IterationCount) -> true = Size == 1 orelse Size rem ?VDF_CHECKPOINT_COUNT_IN_STEP == 0, {NextOutput, Steps2} = lists:foldl( fun(I, {Acc, S}) -> SaltBinary = << (Salt + I):256 >>, O2 = hash(IterationCount, SaltBinary, Acc), S2 = case (Salt + I) rem ?VDF_CHECKPOINT_COUNT_IN_STEP of 0 -> [O2 | S]; _ -> S end, {O2, S2} end, {Output, []}, lists:seq(0, Size - 1) ), Salt2 = Salt + Size, case Hashes of [ NextOutput ] -> {true, Steps2 ++ Steps}; [ NextOutput | Rest ] -> debug_verify_no_reset(Salt2, NextOutput, Size, Rest, Steps2 ++ Steps, IterationCount); _ -> false end. %% @doc An Erlang implementation of ar_vdf:verify/7. Used in tests. debug_sha_verify(StepNumber, Output, NumCheckpointsBetweenHashes, Hashes, ResetStepNumber, ResetSeed, _ThreadCount, IterationCount) -> StartSalt = step_number_to_salt_number(StepNumber), ResetSalt = step_number_to_salt_number(ResetStepNumber - 1), debug_verify(StartSalt, Output, NumCheckpointsBetweenHashes, Hashes, ResetSalt, ResetSeed, [], IterationCount). debug_verify(StartSalt, Output, Size, Hashes, ResetSalt, ResetSeed, Steps, IterationCount) -> true = Size rem ?VDF_CHECKPOINT_COUNT_IN_STEP == 0, {NextOutput, Steps2} = lists:foldl( fun(I, {Acc, S}) -> SaltBinary = << (StartSalt + I):256 >>, case I rem ?VDF_CHECKPOINT_COUNT_IN_STEP /= 0 of true -> H = hash(IterationCount, SaltBinary, Acc), case (StartSalt + I) rem ?VDF_CHECKPOINT_COUNT_IN_STEP of 0 -> {H, [H | S]}; _ -> {H, S} end; false -> Acc2 = case StartSalt + I == ResetSalt of true -> crypto:hash(sha256, << Acc/binary, ResetSeed/binary >>); false -> Acc end, H = hash(IterationCount, SaltBinary, Acc2), case (StartSalt + I) rem ?VDF_CHECKPOINT_COUNT_IN_STEP of 0 -> {H, [H | S]}; _ -> {H, S} end end end, {Output, []}, lists:seq(0, Size - 1) ), case Hashes of [ NextOutput ] -> {true, Steps2 ++ Steps}; [ NextOutput | Rest ] -> debug_verify(StartSalt + Size, NextOutput, Size, Rest, ResetSalt, ResetSeed, Steps2 ++ Steps, IterationCount); _ -> false end. ================================================ FILE: apps/arweave/src/ar_vdf_nif.erl ================================================ -module(ar_vdf_nif). -on_load(init_nif/0). -export([vdf_sha2_nif/5, vdf_sha2_fused_nif/5, vdf_sha2_hiopt_nif/5, vdf_parallel_sha_verify_with_reset_nif/10]). %%%=================================================================== %%% Public interface. %%%=================================================================== vdf_sha2_nif(_Salt, _PrevState, _CheckpointCount, _skipCheckpointCount, _Iterations) -> erlang:nif_error(nif_not_loaded). vdf_sha2_fused_nif(_Salt, _PrevState, _CheckpointCount, _skipCheckpointCount, _Iterations) -> erlang:nif_error(nif_not_loaded). vdf_sha2_hiopt_nif(_Salt, _PrevState, _CheckpointCount, _skipCheckpointCount, _Iterations) -> erlang:nif_error(nif_not_loaded). vdf_parallel_sha_verify_with_reset_nif(_Salt, _PrevState, _CheckpointCount, _skipCheckpointCount, _Iterations, _InCheckpoint, _InRes, _ResetSalt, _ResetSeed, _MaxThreadCount) -> erlang:nif_error(nif_not_loaded). init_nif() -> PrivDir = code:priv_dir(arweave), ok = erlang:load_nif(filename:join([PrivDir, "vdf_arweave"]), 0). ================================================ FILE: apps/arweave/src/ar_verify_chunks.erl ================================================ -module(ar_verify_chunks). -behaviour(gen_server). -export([start_link/2, name/1]). -export([init/1, handle_cast/2, handle_call/3, handle_info/2, terminate/2]). -include("ar.hrl"). -include("ar_poa.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include("ar_consensus.hrl"). -include("ar_chunk_storage.hrl"). -include("ar_verify_chunks.hrl"). -include_lib("eunit/include/eunit.hrl"). -record(state, { mode = log :: purge | log, store_id :: string(), packing :: term(), start_offset :: non_neg_integer(), end_offset :: non_neg_integer(), cursor :: non_neg_integer(), ready = false :: boolean(), chunk_samples = ?SAMPLE_CHUNK_COUNT :: non_neg_integer(), verify_report = #verify_report{} :: #verify_report{} }). %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Start the server. start_link(Name, StorageModule) -> gen_server:start_link({local, Name}, ?MODULE, StorageModule, []). -spec name(binary()) -> atom(). name(StoreID) -> list_to_atom("ar_verify_chunks_" ++ ar_storage_module:label(StoreID)). %%%=================================================================== %%% Generic server callbacks. %%%=================================================================== init(StoreID) -> {ok, Config} = arweave_config:get_env(), ?LOG_INFO([{event, verify_chunk_storage_started}, {store_id, StoreID}, {mode, Config#config.verify}, {chunk_samples, Config#config.verify_samples}]), {StartOffset, EndOffset} = ar_storage_module:get_range(StoreID), gen_server:cast(self(), sample), {ok, #state{ mode = Config#config.verify, store_id = StoreID, packing = ar_storage_module:get_packing(StoreID), start_offset = StartOffset, end_offset = EndOffset, cursor = StartOffset, ready = is_ready(EndOffset), chunk_samples = Config#config.verify_samples, verify_report = #verify_report{ start_time = erlang:system_time(millisecond) } }}. handle_cast(sample, #state{ready = false, end_offset = EndOffset} = State) -> ar_util:cast_after(1000, self(), sample), {noreply, State#state{ready = is_ready(EndOffset)}}; handle_cast(sample, #state{cursor = Cursor, end_offset = EndOffset} = State) when Cursor >= EndOffset -> ar:console("Done!~n"), {noreply, State}; handle_cast(sample, State) -> %% Sample ?SAMPLE_CHUNK_COUNT random chunks, read them, unpack them and verify them. %% Report the collected statistics and continue with the "verify" procedure. io:format("Sampling ~p chunks from ~p to ~p~n", [State#state.chunk_samples, State#state.start_offset, State#state.end_offset]), MaxSamples = case State#state.chunk_samples of all -> (State#state.end_offset - State#state.start_offset) div ?DATA_CHUNK_SIZE; Count -> Count end, sample_chunks( State#state.chunk_samples, sets:new(), #sample_report{samples = MaxSamples}, State), gen_server:cast(self(), verify), {noreply, State}; handle_cast(verify, #state{ready = false, end_offset = EndOffset} = State) -> ar_util:cast_after(1000, self(), verify), {noreply, State#state{ready = is_ready(EndOffset)}}; handle_cast(verify, #state{cursor = Cursor, end_offset = EndOffset} = State) when Cursor >= EndOffset -> ar:console("Done!~n"), {noreply, State}; handle_cast(verify, State) -> State2 = verify(State), State3 = report_progress(State2), {noreply, State3}; handle_cast(Cast, State) -> ?LOG_WARNING([{event, unhandled_cast}, {module, ?MODULE}, {cast, Cast}]), {noreply, State}. handle_call(Call, From, State) -> ?LOG_WARNING([{event, unhandled_call}, {module, ?MODULE}, {call, Call}, {from, From}]), {reply, ok, State}. handle_info(Info, State) -> ?LOG_WARNING([{event, unhandled_info}, {module, ?MODULE}, {info, Info}]), {noreply, State}. terminate(Reason, _State) -> ?LOG_INFO([{module, ?MODULE},{pid, self()},{callback, terminate},{reason, Reason}]), ok. is_ready(EndOffset) -> case ar_block_index:get_last() of '$end_of_table' -> false; {WeaveSize, _Height, _H, _TXRoot} -> WeaveSize >= EndOffset end. %% @doc verify runs through a series of checks: %% 1. All chunks covered by the ar_chunk_storage or ar_data_sync sync records exist. %% 2. All chunks in the ar_data_sync sync record are also in the ar_chunk_storage sync record. %% 3. All chunks have valid proofs. %% 4. The ar_data_sync record has the expected packing format. %% 5. All chunks in the ar_chunk_storage sync record are also in the ar_data_sync sync record. %% %% For any chunk that fails one of the above checks: invalidate it so that it can be resynced. verify(State) -> #state{store_id = StoreID} = State, {UnionInterval, Intervals} = query_intervals(State), State2 = verify_chunks(UnionInterval, Intervals, State), case State2#state.cursor >= State2#state.end_offset of true -> ar:console("Done verifying ~s!~n", [StoreID]), ?LOG_INFO([{event, verify_chunk_storage_verify_chunks_done}, {store_id, StoreID}]); false -> gen_server:cast(self(), verify) end, State2. verify_chunks(not_found, _Intervals, State) -> State#state{ cursor = State#state.end_offset }; verify_chunks({End, _Start}, _Intervals, #state{cursor = Cursor} = State) when Cursor >= End -> State; verify_chunks({IntervalEnd, IntervalStart}, Intervals, State) -> #state{ cursor = Cursor } = State, Cursor2 = max(IntervalStart, Cursor), State3 = case verify_chunks_index(State#state{ cursor = Cursor2 }) of {error, State2} -> State2; {ChunkData, State2} -> verify_chunk(ChunkData, Intervals, State2) end, verify_chunks({IntervalEnd, IntervalStart}, Intervals, State3). verify_chunks_index(State) -> #state{ cursor = Cursor, store_id = StoreID } = State, ChunkData = ar_data_sync:get_chunk_by_byte(Cursor, StoreID), verify_chunks_index2(ChunkData, State). verify_chunks_index2({error, Reason}, State) -> #state{ cursor = Cursor } = State, NextCursor = ar_data_sync:advance_chunks_index_cursor(Cursor), State2 = invalidate_sync_record( chunks_index_error, Cursor, NextCursor, [{reason, Reason}], State), {error, State2#state{ cursor = NextCursor }}; verify_chunks_index2( {AbsoluteOffset, _, _, _, _, _, ChunkSize}, #state{cursor = Cursor} = State) when AbsoluteOffset - Cursor >= ChunkSize -> NextCursor = AbsoluteOffset - ChunkSize, State2 = invalidate_sync_record(chunks_index_gap, Cursor, NextCursor, [], State), {error, State2#state{ cursor = NextCursor + 1 }}; verify_chunks_index2(ChunkData, State) -> {ChunkData, State}. verify_chunk({ok, _Key, Metadata}, Intervals, State) -> {AbsoluteOffset, _ChunkDataKey, _TXRoot, _DataRoot, _TXPath, _TXRelativeOffset, _ChunkSize} = Metadata, {ChunkStorageInterval, _DataSyncInterval} = Intervals, PaddedOffset = ar_block:get_chunk_padded_offset(AbsoluteOffset), State2 = verify_chunk_storage(PaddedOffset, Metadata, ChunkStorageInterval, State), State3 = verify_proof(Metadata, State2), State4 = verify_packing(Metadata, State3), State4#state{ cursor = PaddedOffset + 1 }; verify_chunk(_ChunkData, _Intervals, State) -> State. verify_proof(Metadata, State) -> #state{ store_id = StoreID } = State, {AbsoluteOffset, ChunkDataKey, TXRoot, _DataRoot, TXPath, _TXRelativeOffset, ChunkSize} = Metadata, case ar_data_sync:read_data_path(ChunkDataKey, StoreID) of {ok, DataPath} -> ChunkMetadata = #chunk_metadata{ tx_root = TXRoot, tx_path = TXPath, data_path = DataPath }, ChunkProof = ar_poa:chunk_proof(ChunkMetadata, AbsoluteOffset - 1), case ar_poa:validate_paths(ChunkProof) of {false, _} -> invalidate_chunk(validate_paths_error, AbsoluteOffset, ChunkSize, State); {true, _} -> State end; Error -> invalidate_chunk( read_data_path_error, AbsoluteOffset, ChunkSize, [{reason, Error}], State) end. %% @doc Verify that the ar_data_sync record is configured correctly - namely that it has %% entry in the expected packing format. This also indirectly detects the case where an %% interval exists in the ar_chunk_storage record, but not the ar_data_sync record. verify_packing(Metadata, State) -> #state{packing = Packing, store_id = StoreID} = State, {AbsoluteOffset, _ChunkDataKey, _TXRoot, _DataRoot, _TXPath, _TXRelativeOffset, ChunkSize} = Metadata, PaddedOffset = ar_block:get_chunk_padded_offset(AbsoluteOffset), StoredPackingCheck = ar_sync_record:is_recorded(AbsoluteOffset, ar_data_sync, StoreID), ExpectedPacking = case ar_chunk_storage:is_storage_supported(PaddedOffset, ChunkSize, Packing) of true -> Packing; false -> unpacked end, case {StoredPackingCheck, ExpectedPacking} of {{true, ExpectedPacking}, _} -> %% Chunk is recorded in ar_sync_record under the expected Packing. State; {{true, StoredPacking}, _} -> %% This check will invalidate chunks that are not packed to the expected %% *final* packing format. A storage module that is in the process of being %% packed to replica_2_8 may have chunks that are stored in the intermediate %% unpacked_padded format. This check will invalidate those chunks as well. %% Miners should make sure to only run `verify` in the `purge` mode after they %% have completed packing. invalidate_chunk(unexpected_packing, AbsoluteOffset, ChunkSize, [{stored_packing, ar_serialize:encode_packing(StoredPacking, true)}], State); {Reply, _} -> invalidate_chunk(missing_packing_info, AbsoluteOffset, ChunkSize, [{packing_reply, io_lib:format("~p", [Reply])}], State) end. %% @doc Verify that chunk exists on disk or in chunk_data_db. This also indirectly detects the %% case where an interval exists in the ar_data_sync record, but not the ar_chunk_storage %% record. verify_chunk_storage(PaddedOffset, Metadata, {End, Start}, State) when PaddedOffset - ?DATA_CHUNK_SIZE >= Start andalso PaddedOffset =< End -> #state{store_id = StoreID} = State, {AbsoluteOffset, ChunkDataKey, _TXRoot, _DataRoot, _TXPath, _TXRelativeOffset, ChunkSize} = Metadata, {_ChunkFileStart, _Filepath, _Position, ExpectedChunkOffset} = ar_chunk_storage:locate_chunk_on_disk(PaddedOffset, StoreID), case ar_chunk_storage:read_offset(PaddedOffset, StoreID) of {ok, << ExpectedChunkOffset:?OFFSET_BIT_SIZE >>} -> State; {ok, << ActualChunkOffset:?OFFSET_BIT_SIZE >>} -> %% The chunk is recorded in the ar_chunk_storage sync record, but not stored. invalidate_chunk( invalid_chunk_offset, AbsoluteOffset, ChunkSize, [ {expected_chunk_offset, ExpectedChunkOffset}, {actual_chunk_offset, ActualChunkOffset} ], State); Error -> IsChunkStoredInRocksDB = case ar_data_sync:get_chunk_data(ChunkDataKey, StoreID) of not_found -> false; {ok, Value} -> case binary_to_term(Value, [safe]) of {_Chunk, _DataPath} -> true; _ -> false end end, invalidate_chunk( invalid_chunk_offset, AbsoluteOffset, ChunkSize, [ {expected_chunk_offset, ExpectedChunkOffset}, {error, Error}, {is_chunk_stored_in_rocksdb, IsChunkStoredInRocksDB} ], State) end; verify_chunk_storage(PaddedOffset, Metadata, Interval, State) -> #state{ packing = Packing, store_id = StoreID } = State, {AbsoluteOffset, _ChunkDataKey, _TXRoot, _DataRoot, _TXPath, _TXRelativeOffset, ChunkSize} = Metadata, case ar_chunk_storage:is_storage_supported(PaddedOffset, ChunkSize, Packing) of true -> Logs = [ {ar_data_sync, ar_sync_record:is_recorded(AbsoluteOffset, ar_data_sync, StoreID)}, {ar_chunk_storage, ar_sync_record:is_recorded(AbsoluteOffset, ar_chunk_storage, StoreID)}, {ar_chunk_storage_replica_2_9_1_unpacked, ar_sync_record:is_recorded(AbsoluteOffset, ar_chunk_storage_replica_2_9_1_unpacked, StoreID)}, {unpacked_padded, ar_sync_record:is_recorded(AbsoluteOffset, unpacked_padded, StoreID)}, {is_entropy_recorded, ar_entropy_storage:is_entropy_recorded( AbsoluteOffset, Packing, StoreID)}, {is_blacklisted, ar_tx_blacklist:is_byte_blacklisted(AbsoluteOffset)}, {interval, Interval}, {padded_offset, PaddedOffset} ], invalidate_chunk(chunk_storage_gap, AbsoluteOffset, ChunkSize, Logs, State); false -> verify_chunk_data(Metadata, State) end. verify_chunk_data(Metadata, State) -> #state{ store_id = StoreID } = State, {AbsoluteOffset, ChunkDataKey, _TXRoot, _DataRoot, _TXPath, _TXRelativeOffset, ChunkSize} = Metadata, case ar_data_sync:get_chunk_data(ChunkDataKey, StoreID) of not_found -> invalidate_chunk(chunk_data_not_found, AbsoluteOffset, ChunkSize, [], State); {ok, Value} -> case binary_to_term(Value, [safe]) of {_Chunk, _DataPath} -> State; _DataPath -> invalidate_chunk( chunk_data_no_chunk, AbsoluteOffset, ChunkSize, [], State) end; Error -> invalidate_chunk( chunk_data_error, AbsoluteOffset, ChunkSize, [{reason, Error}], State) end. invalidate_chunk(Type, AbsoluteOffset, ChunkSize, State) -> invalidate_chunk(Type, AbsoluteOffset, ChunkSize, [], State). invalidate_chunk(Type, AbsoluteOffset, ChunkSize, Logs, State) -> #state{ mode = Mode, store_id = StoreID } = State, case Mode of purge -> ar_data_sync:invalidate_bad_data_record(AbsoluteOffset, ChunkSize, StoreID, Type); log -> ok end, log_error(Type, AbsoluteOffset, ChunkSize, Logs, State). invalidate_sync_record(Type, Cursor, NextCursor, Logs, State) -> #state{ mode = Mode, store_id = StoreID } = State, case Mode of purge -> ar_footprint_record:delete(NextCursor, StoreID), ar_sync_record:delete(NextCursor, Cursor, ar_data_sync, StoreID), ar_sync_record:delete(NextCursor, Cursor, ar_chunk_storage, StoreID); log -> ok end, Range = NextCursor - Cursor, log_error(Type, Cursor, Range, Logs, State). log_error(Type, AbsoluteOffset, ChunkSize, Logs, State) -> #state{ verify_report = Report, store_id = StoreID, cursor = Cursor, packing = Packing } = State, LogMessage = [{event, verify_chunk_error}, {type, Type}, {store_id, StoreID}, {expected_packing, ar_serialize:encode_packing(Packing, true)}, {absolute_end_offset, AbsoluteOffset}, {cursor, Cursor}, {chunk_size, ChunkSize}] ++ Logs, ?LOG_INFO(LogMessage), NewBytes = maps:get(Type, Report#verify_report.error_bytes, 0) + ChunkSize, NewChunks = maps:get(Type, Report#verify_report.error_chunks, 0) + 1, Report2 = Report#verify_report{ total_error_bytes = Report#verify_report.total_error_bytes + ChunkSize, total_error_chunks = Report#verify_report.total_error_chunks + 1, error_bytes = maps:put(Type, NewBytes, Report#verify_report.error_bytes), error_chunks = maps:put(Type, NewChunks, Report#verify_report.error_chunks) }, State#state{ verify_report = Report2 }. %% @doc Returns 3 sets of intervals: %% 1. ar_chunk_storage: should cover all chunks that have been stored on disk. %% 2. ar_data_sync, Packing: should cover all chunks of the specified packing that have been %% synced %% 3. The union of the above two intervals. %% %% We will use these intervals to determine errors in the node state (e.g. a chunk that %% exists in ar_chunk_storage but not ar_data_sync - or vice versa). query_intervals(State) -> #state{cursor = Cursor, store_id = StoreID} = State, ChunkStorageInterval = ar_sync_record:get_next_synced_interval( Cursor, infinity, ar_chunk_storage, StoreID), DataSyncInterval = ar_sync_record:get_next_synced_interval( Cursor, infinity, ar_data_sync, StoreID), {ChunkStorageInterval2, DataSyncInterval2} = align_intervals( Cursor, ChunkStorageInterval, DataSyncInterval), UnionInterval = union_intervals(ChunkStorageInterval2, DataSyncInterval2), {UnionInterval, {ChunkStorageInterval2, DataSyncInterval2}}. align_intervals(_Cursor, not_found, not_found) -> {not_found, not_found}; align_intervals(Cursor, not_found, DataSyncInterval) -> {not_found, clamp_interval(Cursor, infinity, DataSyncInterval)}; align_intervals(Cursor, ChunkStorageInterval, not_found) -> {clamp_interval(Cursor, infinity, ChunkStorageInterval), not_found}; align_intervals(Cursor, ChunkStorageInterval, DataSyncInterval) -> {ChunkStorageEnd, _} = ChunkStorageInterval, {DataSyncEnd, _} = DataSyncInterval, { clamp_interval(Cursor, DataSyncEnd, ChunkStorageInterval), clamp_interval(Cursor, ChunkStorageEnd, DataSyncInterval) }. union_intervals(not_found, not_found) -> not_found; union_intervals(not_found, B) -> B; union_intervals(A, not_found) -> A; union_intervals({End1, Start1}, {End2, Start2}) -> {max(End1, End2), min(Start1, Start2)}. clamp_interval(ClampMin, ClampMax, {End, Start}) -> check_interval({min(End, ClampMax), max(Start, ClampMin)}). check_interval({End, Start}) when Start > End -> not_found; check_interval(Interval) -> Interval. report_progress(State) -> #state{ store_id = StoreID, verify_report = Report, cursor = Cursor, start_offset = StartOffset, end_offset = EndOffset } = State, Status = case Cursor >= EndOffset of true -> done; false -> running end, BytesProcessed = Cursor - StartOffset, Progress = BytesProcessed * 100 div (EndOffset - StartOffset), Report2 = Report#verify_report{ bytes_processed = BytesProcessed, progress = Progress, status = Status }, ar_verify_chunks_reporter:update(StoreID, Report2), State#state{ verify_report = Report2 }. %% Generate offset in the range [Start, End] %% (i.e. offsets greater than or equal to Start and less than or equal to End) %% Offsets are normalized to a bucket boundary such that if that bucket boundary has %% been sampled before, it won't be sampled again. generate_sample_offset(Start, End, SampledOffsets, Retry) when Retry > 0 -> Range = End - Start, Offset = Start + rand:uniform(Range), BucketStartOffset = ar_chunk_storage:get_chunk_bucket_start(Offset), SampleOffset = BucketStartOffset + 1, case sets:is_element(SampleOffset, SampledOffsets) of true -> generate_sample_offset(Start, End, SampledOffsets, Retry - 1); false -> SampleOffset end. sample_chunks(0, _SampledOffsets, SampleReport, _State) -> SampleReport; sample_chunks(all, _SampledOffsets, SampleReport, State) -> #state{ store_id = StoreID, start_offset = Start, end_offset = End } = State, SampleOffset = ar_chunk_storage:get_chunk_bucket_start(Start) + 1, lists:foldl( fun(Offset, Report) -> {_IsRecorded, NewReport} = sample_offset(Offset, StoreID, Report), ar_verify_chunks_reporter:update(StoreID, NewReport), NewReport end, SampleReport, lists:seq(SampleOffset, End, ?DATA_CHUNK_SIZE) ); sample_chunks(Count, SampledOffsets, SampleReport, State) -> #state{ store_id = StoreID, start_offset = Start, end_offset = End } = State, SampleOffset = generate_sample_offset(Start+1, End, SampledOffsets, 100), SampledOffsets2 = sets:add_element(SampleOffset, SampledOffsets), {IsRecorded, SampleReport2} = sample_offset(SampleOffset, StoreID, SampleReport), case IsRecorded of true -> ar_verify_chunks_reporter:update(StoreID, SampleReport2), sample_chunks(Count - 1, SampledOffsets2, SampleReport2, State); false -> sample_chunks(Count, SampledOffsets2, SampleReport2, State) end. sample_offset(Offset, StoreID, SampleReport) -> IsRecorded = case ar_sync_record:is_recorded(Offset, ar_data_sync, StoreID) of {true, _} -> true; true -> true; false -> false end, SampleReport2 = case IsRecorded of true -> case ar_data_sync:get_chunk( Offset, #{pack => true, packing => unpacked, origin => verify}) of {ok, _Proof} -> SampleReport#sample_report{ total = SampleReport#sample_report.total + 1, success = SampleReport#sample_report.success + 1 }; {error, Reason} -> ?LOG_INFO([{event, sample_chunk_error}, {offset, Offset}, {status, Reason}]), SampleReport#sample_report{ total = SampleReport#sample_report.total + 1, failure = SampleReport#sample_report.failure + 1 } end; false -> SampleReport end, {IsRecorded, SampleReport2}. %%%=================================================================== %%% Tests. %%%=================================================================== intervals_test_() -> [ {timeout, 30, fun test_align_intervals/0}, {timeout, 30, fun test_union_intervals/0} ]. verify_chunk_storage_test_() -> [ ar_test_node:test_with_mocked_functions( [{ar_chunk_storage, read_offset, fun(_Offset, _StoreID) -> {ok, << ?DATA_CHUNK_SIZE:24 >>} end}], fun test_verify_chunk_storage_in_interval/0), ar_test_node:test_with_mocked_functions( [{ar_chunk_storage, read_offset, fun(_Offset, _StoreID) -> {ok, << ?DATA_CHUNK_SIZE:24 >>} end}, {ar_sync_record, is_recorded, fun(_, _, _) -> false end}, {ar_entropy_storage, is_entropy_recorded, fun(_, _, _) -> false end}, {ar_tx_blacklist, is_byte_blacklisted, fun(_) -> false end}], fun test_verify_chunk_storage_should_store/0), ar_test_node:test_with_mocked_functions( [{ar_chunk_storage, read_offset, fun(_Offset, _StoreID) -> {ok, << ?DATA_CHUNK_SIZE:24 >>} end}, {ar_data_sync, get_chunk_data, fun(_, _) -> {ok, term_to_binary({<<>>, <<>>})} end}], fun test_verify_chunk_storage_should_not_store/0) ]. verify_proof_test_() -> [ ar_test_node:test_with_mocked_functions([ {ar_data_sync, read_data_path, fun(_, _) -> not_found end}], fun test_verify_proof_no_datapath/0 ), ar_test_node:test_with_mocked_functions([ {ar_data_sync, read_data_path, fun(_, _) -> {ok, <<>>} end}, {ar_poa, chunk_proof, fun(_, _) -> #chunk_proof{} end}, {ar_poa, validate_paths, fun(_) -> {true, <<>>} end} ], fun test_verify_proof_valid_paths/0 ), ar_test_node:test_with_mocked_functions([ {ar_data_sync, read_data_path, fun(_, _) -> {ok, <<>>} end}, {ar_poa, chunk_proof, fun(_, _) -> #chunk_proof{} end}, {ar_poa, validate_paths, fun(_) -> {false, <<>>} end} ], fun test_verify_proof_invalid_paths/0 ) ]. verify_chunk_test_() -> [ ar_test_node:test_with_mocked_functions([ {ar_data_sync, read_data_path, fun(_, _) -> {ok, <<>>} end}, {ar_poa, validate_paths, fun(_) -> {true, <<>>} end}, {ar_poa, chunk_proof, fun(_, _) -> #chunk_proof{} end}, {ar_chunk_storage, read_offset, fun(_Offset, _StoreID) -> {ok, << ?DATA_CHUNK_SIZE:24 >>} end}, {ar_data_sync, get_chunk_data, fun(_, _) -> {ok, term_to_binary({<<>>, <<>>})} end}, {ar_sync_record, is_recorded, fun(_, _, _) -> false end} ], fun test_verify_chunk/0 ) ]. test_align_intervals() -> ?assertEqual( {not_found, not_found}, align_intervals(0, not_found, not_found)), ?assertEqual( {{10, 5}, not_found}, align_intervals(0, {10, 5}, not_found)), ?assertEqual( {{10, 7}, not_found}, align_intervals(7, {10, 5}, not_found)), ?assertEqual( {not_found, not_found}, align_intervals(12, {10, 5}, not_found)), ?assertEqual( {not_found, {10, 5}}, align_intervals(0, not_found, {10, 5})), ?assertEqual( {not_found, {10, 7}}, align_intervals(7, not_found, {10, 5})), ?assertEqual( {not_found, not_found}, align_intervals(12, not_found, {10, 5})), ?assertEqual( {{9, 4}, {9, 5}}, align_intervals(0, {9, 4}, {10, 5})), ?assertEqual( {{9, 7}, {9, 7}}, align_intervals(7, {9, 4}, {10, 5})), ?assertEqual( {not_found, not_found}, align_intervals(12, {9, 4}, {10, 5})), ?assertEqual( {{9, 5}, {9, 4}}, align_intervals(0, {10, 5}, {9, 4})), ?assertEqual( {{9, 7}, {9, 7}}, align_intervals(7, {10, 5}, {9, 4})), ?assertEqual( {not_found, not_found}, align_intervals(12, {10, 5}, {9, 4})), ok. test_union_intervals() -> ?assertEqual( not_found, union_intervals(not_found, not_found)), ?assertEqual( {10, 5}, union_intervals(not_found, {10, 5})), ?assertEqual( {10, 5}, union_intervals({10, 5}, not_found)), ?assertEqual( {10, 3}, union_intervals({10, 7}, {5, 3})), ok. test_verify_chunk_storage_in_interval() -> ?assertEqual( #state{ packing = unpacked }, verify_chunk_storage( 10*?DATA_CHUNK_SIZE, {10*?DATA_CHUNK_SIZE, <<>>, <<>>, <<>>, <<>>, <<>>, ?DATA_CHUNK_SIZE}, {20*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE}, #state{ packing = unpacked })), ?assertEqual( #state{ packing = unpacked }, verify_chunk_storage( 6*?DATA_CHUNK_SIZE, {6*?DATA_CHUNK_SIZE - 1, <<>>, <<>>, <<>>, <<>>, <<>>, ?DATA_CHUNK_SIZE div 2}, {20*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE}, #state{ packing = unpacked })), ?assertEqual( #state{ packing = unpacked }, verify_chunk_storage( 20*?DATA_CHUNK_SIZE, {20*?DATA_CHUNK_SIZE - ?DATA_CHUNK_SIZE div 2, <<>>, <<>>, <<>>, <<>>, <<>>, ?DATA_CHUNK_SIZE div 2}, {20*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE}, #state{ packing = unpacked })), ok. test_verify_chunk_storage_should_store() -> Addr = crypto:strong_rand_bytes(32), ExpectedState = #state{ packing = unpacked, verify_report = #verify_report{ total_error_bytes = ?DATA_CHUNK_SIZE, total_error_chunks = 1, error_bytes = #{chunk_storage_gap => ?DATA_CHUNK_SIZE}, error_chunks = #{chunk_storage_gap => 1} } }, ?assertEqual( ExpectedState, verify_chunk_storage( 0, {0, <<>>, <<>>, <<>>, <<>>, <<>>, ?DATA_CHUNK_SIZE}, {20*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE}, #state{ packing = unpacked })), ?assertEqual( ExpectedState, verify_chunk_storage( ar_block:strict_data_split_threshold() + 1, {ar_block:strict_data_split_threshold() + 1, <<>>, <<>>, <<>>, <<>>, <<>>, ?DATA_CHUNK_SIZE}, {20*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE}, #state{ packing = unpacked })), ?assertEqual( #state{ packing = {composite, Addr, 1}, verify_report = #verify_report{ total_error_bytes = ?DATA_CHUNK_SIZE div 2, total_error_chunks = 1, error_bytes = #{chunk_storage_gap => ?DATA_CHUNK_SIZE div 2}, error_chunks = #{chunk_storage_gap => 1} } }, verify_chunk_storage( ar_block:strict_data_split_threshold() + 1, {ar_block:strict_data_split_threshold() + 1, <<>>, <<>>, <<>>, <<>>, <<>>, ?DATA_CHUNK_SIZE div 2}, {20*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE}, #state{ packing = {composite, Addr, 1} })), ok. test_verify_chunk_storage_should_not_store() -> ExpectedState = #state{ packing = unpacked }, ?assertEqual( ExpectedState, verify_chunk_storage( 0, {0, <<>>, <<>>, <<>>, <<>>, <<>>, ?DATA_CHUNK_SIZE div 2}, {20*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE}, #state{ packing = unpacked })), ?assertEqual( ExpectedState, verify_chunk_storage( ar_block:strict_data_split_threshold() + 1, {ar_block:strict_data_split_threshold() + 1, <<>>, <<>>, <<>>, <<>>, <<>>, ?DATA_CHUNK_SIZE div 2}, {20*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE}, #state{ packing = unpacked })), ok. test_verify_proof_no_datapath() -> ExpectedState1 = #state{ packing = unpacked, verify_report = #verify_report{ total_error_bytes = ?DATA_CHUNK_SIZE, total_error_chunks = 1, error_bytes = #{read_data_path_error => ?DATA_CHUNK_SIZE}, error_chunks = #{read_data_path_error => 1} } }, ExpectedState2 = #state{ packing = unpacked, verify_report = #verify_report{ total_error_bytes = ?DATA_CHUNK_SIZE div 2, total_error_chunks = 1, error_bytes = #{read_data_path_error => ?DATA_CHUNK_SIZE div 2}, error_chunks = #{read_data_path_error => 1} } }, ?assertEqual( ExpectedState1, verify_proof( {10, <<>>, <<>>, <<>>, <<>>, <<>>, ?DATA_CHUNK_SIZE}, #state{ packing = unpacked })), ?assertEqual( ExpectedState2, verify_proof( {10, <<>>, <<>>, <<>>, <<>>, <<>>, ?DATA_CHUNK_SIZE div 2}, #state{ packing = unpacked })), ok. test_verify_proof_valid_paths() -> ?assertEqual( #state{}, verify_proof( {10, <<>>, <<>>, <<>>, <<>>, <<>>, ?DATA_CHUNK_SIZE}, #state{})), ok. test_verify_proof_invalid_paths() -> ExpectedState1 = #state{ packing = unpacked, verify_report = #verify_report{ total_error_bytes = ?DATA_CHUNK_SIZE, total_error_chunks = 1, error_bytes = #{validate_paths_error => ?DATA_CHUNK_SIZE}, error_chunks = #{validate_paths_error => 1} } }, ExpectedState2 = #state{ packing = unpacked, verify_report = #verify_report{ total_error_bytes = ?DATA_CHUNK_SIZE div 2, total_error_chunks = 1, error_bytes = #{validate_paths_error => ?DATA_CHUNK_SIZE div 2}, error_chunks = #{validate_paths_error => 1} } }, ?assertEqual( ExpectedState1, verify_proof( {10, <<>>, <<>>, <<>>, <<>>, <<>>, ?DATA_CHUNK_SIZE}, #state{ packing = unpacked })), ?assertEqual( ExpectedState2, verify_proof( {10, <<>>, <<>>, <<>>, <<>>, <<>>, ?DATA_CHUNK_SIZE div 2}, #state{ packing = unpacked })), ok. test_verify_chunk() -> PreSplitOffset = ar_block:strict_data_split_threshold() - (?DATA_CHUNK_SIZE div 2), PostSplitOffset = ar_block:strict_data_split_threshold() + (?DATA_CHUNK_SIZE div 2), IntervalStart = ar_block:strict_data_split_threshold() - ?DATA_CHUNK_SIZE, IntervalEnd = ar_block:strict_data_split_threshold() + ?DATA_CHUNK_SIZE, Interval = {IntervalEnd, IntervalStart}, ?assertEqual( #state{ cursor = PreSplitOffset + 1, packing = unpacked, verify_report = #verify_report{ total_error_bytes = ?DATA_CHUNK_SIZE div 2, total_error_chunks = 1, error_bytes = #{missing_packing_info => ?DATA_CHUNK_SIZE div 2}, error_chunks = #{missing_packing_info => 1} } }, verify_chunk( {ok, <<>>, {PreSplitOffset, <<>>, <<>>, <<>>, <<>>, <<>>, ?DATA_CHUNK_SIZE div 2}}, {Interval, not_found}, #state{packing=unpacked})), ?assertEqual( #state{ cursor = ar_block:strict_data_split_threshold() + ?DATA_CHUNK_SIZE + 1, packing = unpacked, verify_report = #verify_report{ total_error_bytes = ?DATA_CHUNK_SIZE div 2, total_error_chunks = 1, error_bytes = #{missing_packing_info => ?DATA_CHUNK_SIZE div 2}, error_chunks = #{missing_packing_info => 1} } }, verify_chunk( {ok, <<>>, {PostSplitOffset, <<>>, <<>>, <<>>, <<>>, <<>>, ?DATA_CHUNK_SIZE div 2}}, {Interval, not_found}, #state{packing=unpacked})), ExpectedState = #state{ cursor = 33554432, %% = 2 * 2^24. From ar_data_sync:advance_chunks_index_cursor/1 packing = unpacked, verify_report = #verify_report{ total_error_bytes = 33554432, total_error_chunks = 1, error_bytes = #{chunks_index_error => 33554432}, error_chunks = #{chunks_index_error => 1} } }, ?assertEqual( {error, ExpectedState}, verify_chunks_index2( {error, some_error}, #state{ cursor = 0, packing = unpacked })), ok. %% Verify that generate_sample_offsets/3 samples without replacement. sample_offsets_loop(Start, End, Count) -> %% Compute the number of available unique candidates. Candidates = lists:seq(Start + 1, End, ?DATA_CHUNK_SIZE), ActualCount = erlang:min(Count, length(Candidates)), sample_offsets_loop(Start, End, ActualCount, sets:new()). sample_offsets_loop(_Start, _End, 0, _SampledSet) -> []; sample_offsets_loop(Start, End, Count, SampledSet) -> Offset = generate_sample_offset(Start, End, SampledSet, 100), NewSet = sets:add_element(Offset, SampledSet), [Offset | sample_offsets_loop(Start, End, Count - 1, NewSet)]. sample_offsets_without_replacement_test() -> ChunkSize = ?DATA_CHUNK_SIZE, Count = 5, %% Use the helper function to generate a list of offsets. Offsets = sample_offsets_loop(ChunkSize * 10, ChunkSize * 1000, Count), %% Check that exactly Count unique offsets are produced. ?assertEqual(Count, length(Offsets)), %% For every pair, ensure the absolute difference is at least ?DATA_CHUNK_SIZE. lists:foreach(fun(A) -> lists:foreach(fun(B) -> case {A == B, abs(A - B) < ?DATA_CHUNK_SIZE} of {true, _} -> ok; {false, true} -> ?assert(false); _ -> ok end end, Offsets) end, Offsets), %% When the available candidates are fewer than Count, %% only one unique offset should be returned. Offsets2 = sample_offsets_loop(0, ChunkSize, Count), ?assertEqual(1, length(Offsets2)). %% Verify sample_random_chunks/4 aggregates outcomes correctly. %% %% We mock ar_data_sync:get_chunk/2 such that: %% - The first call returns {error, chunk_not_found}, %% - The second call returns {ok, <<"valid_proof">>}, %% - The third call returns {error, invalid_chunk}. %% Note: Using atoms for partition borders triggers the fallback in generate_sample_offsets/3. sample_random_chunks_test_() -> [ ar_test_node:test_with_mocked_functions( [{ar_data_sync, get_chunk, fun(_Offset, _Opts) -> %% Use process dictionary to simulate sequential responses. Counter = case erlang:get(sample_counter) of undefined -> 0; C -> C end, erlang:put(sample_counter, Counter + 1), case Counter of 0 -> {error, chunk_not_found}; 1 -> {ok, <<"valid_proof">>}; 2 -> {error, invalid_chunk} end end}, {ar_sync_record, is_recorded, fun(_, _, _) -> true end} ], fun test_sample_random_chunks/0) ]. test_sample_random_chunks() -> %% Initialize counter. erlang:put(sample_counter, 0), State = #state{ packing = unpacked, start_offset = 0, end_offset = ?DATA_CHUNK_SIZE * 10 , store_id = "test" }, Report = sample_chunks(3, sets:new(), #sample_report{}, State), ExpectedReport = #sample_report{total = 3, success = 1, failure = 2}, ?assertEqual(ExpectedReport, Report). ================================================ FILE: apps/arweave/src/ar_verify_chunks_reporter.erl ================================================ %%% The blob storage optimized for fast reads. -module(ar_verify_chunks_reporter). -behaviour(gen_server). -export([start_link/0, update/2]). -export([init/1, handle_cast/2, handle_call/3, handle_info/2, terminate/2]). -include("ar.hrl"). -include("ar_verify_chunks.hrl"). -include_lib("eunit/include/eunit.hrl"). -record(state, { verify_reports = #{} :: #{string() => #verify_report{}}, sample_reports = #{} :: #{string() => #sample_report{}} }). -define(REPORT_PROGRESS_INTERVAL, 10000). %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Start the server. start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). -spec update(string(), #verify_report{} | #sample_report{}) -> ok. update(StoreID, Report) -> gen_server:cast(?MODULE, {update, StoreID, Report}). %%%=================================================================== %%% Generic server callbacks. %%%=================================================================== init([]) -> ar_util:cast_after(?REPORT_PROGRESS_INTERVAL, self(), report_progress), {ok, #state{}}. handle_cast({update, StoreID, #verify_report{} = Report}, State) -> {noreply, State#state{ verify_reports = maps:put(StoreID, Report, State#state.verify_reports) }}; handle_cast({update, StoreID, #sample_report{} = Report}, State) -> {noreply, State#state{ sample_reports = maps:put(StoreID, Report, State#state.sample_reports) }}; handle_cast(report_progress, State) -> #state{ verify_reports = VerifyReports, sample_reports = SampleReports } = State, print_sample_reports(SampleReports), print_verify_reports(VerifyReports), ar_util:cast_after(?REPORT_PROGRESS_INTERVAL, self(), report_progress), {noreply, State}; % handle_cast({sample_update, StoreID, SampleReport}, State) -> % NewSampleReports = maps:put(StoreID, SampleReport, State#state.sample_reports), % print_sampling_header(), % print_sample_report(StoreID, SampleReport), % {noreply, State#state{sample_reports = NewSampleReports}}; handle_cast(Cast, State) -> ?LOG_WARNING([{event, unhandled_cast}, {module, ?MODULE}, {cast, Cast}]), {noreply, State}. handle_call(Call, From, State) -> ?LOG_WARNING([{event, unhandled_call}, {module, ?MODULE}, {call, Call}, {from, From}]), {reply, ok, State}. handle_info(Info, State) -> ?LOG_WARNING([{event, unhandled_info}, {module, ?MODULE}, {info, Info}]), {noreply, State}. terminate(Reason, _State) -> ?LOG_INFO([{module, ?MODULE},{pid, self()},{callback, terminate},{reason, Reason}]), ok. print_verify_reports(Reports) when map_size(Reports) == 0 -> ok; print_verify_reports(Reports) -> print_verify_header(), maps:foreach( fun(StoreID, Report) -> print_verify_report(StoreID, Report) end, Reports ), print_verify_footer(), ok. print_verify_header() -> ar:console("Verification Report~n", []), ar:console("+-------------------------------------------------------------------+-----------+-------+------------+------------+-------------+---------+~n", []), ar:console("| Storage Module | Processed | % | Errors (#) | Errors (%) | Verify Rate | Status |~n", []), ar:console("+-------------------------------------------------------------------+-----------+-------+------------+------------+-------------+---------+~n", []). print_verify_footer() -> ar:console("+-------------------------------------------------------------------+-----------+-------+------------+------------+-------------+---------+~n~n", []). print_verify_report(StoreID, Report) -> #verify_report{ total_error_chunks = TotalErrorChunks, total_error_bytes = TotalErrorBytes, bytes_processed = BytesProcessed, progress = Progress, start_time = StartTime, status = Status } = Report, Duration = erlang:system_time(millisecond) - StartTime, Rate = 1000 * BytesProcessed / Duration, ar:console("| ~65s | ~4B GB | ~4B% | ~10B | ~9.2f% | ~6.1f MB/s | ~7s |~n", [ StoreID, BytesProcessed div 1000000000, Progress, TotalErrorChunks, (TotalErrorBytes * 100) / BytesProcessed, Rate / 1000000, Status ] ). print_sample_reports(Reports) when map_size(Reports) == 0 -> ok; print_sample_reports(Reports) -> print_sample_header(), maps:foreach( fun(StoreID, Report) -> print_sample_report(StoreID, Report) end, Reports ), print_sample_footer(), ok. print_sample_report(StoreID, Report) -> #sample_report{ samples = MaxSamples, total = Total, success = Success, failure = Failure } = Report, ar:console("| ~65s | ~9B | ~4B% | ~6.1f% | ~6.1f% |~n", [ StoreID, Total, (Total * 100) div MaxSamples, (Success * 100) / Total, (Failure * 100) / Total ]). print_sample_header() -> ar:console("Chunk Sample Report~n", []), ar:console("+-------------------------------------------------------------------+-----------+-------+---------+---------+~n", []), ar:console("| Storage Module | Processed | % | Success | Error |~n", []), ar:console("+-------------------------------------------------------------------+-----------+-------+---------+---------+~n", []). print_sample_footer() -> ar:console("+-------------------------------------------------------------------+-----------+-------+---------+---------+~n~n", []). ================================================ FILE: apps/arweave/src/ar_verify_chunks_sup.erl ================================================ -module(ar_verify_chunks_sup). -behaviour(supervisor). -export([start_link/0]). -export([init/1]). -include_lib("arweave/include/ar_sup.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). %%%=================================================================== %%% Public interface. %%%=================================================================== start_link() -> supervisor:start_link({local, ?MODULE}, ?MODULE, []). %% =================================================================== %% Supervisor callbacks. %% =================================================================== init([]) -> {ok, Config} = arweave_config:get_env(), case Config#config.verify of false -> ignore; _ -> Workers = lists:map( fun(StorageModule) -> StoreID = ar_storage_module:id(StorageModule), Name = ar_verify_chunks:name(StoreID), ?CHILD_WITH_ARGS(ar_verify_chunks, worker, Name, [Name, StoreID]) end, Config#config.storage_modules ), Reporter = ?CHILD(ar_verify_chunks_reporter, worker), {ok, {{one_for_one, 5, 10}, [Reporter | Workers]}} end. ================================================ FILE: apps/arweave/src/ar_wallet.erl ================================================ %%% @doc Utilities for manipulating wallets. -module(ar_wallet). -export([new/0, new_ecdsa/0, new/1, sign/2, verify/3, verify_pre_fork_2_4/3, to_address/1, to_address/2, hash_pub_key/1, load_key/1, load_keyfile/1, new_keyfile/0, new_keyfile/1, new_keyfile/2, new_keyfile/3, base64_address_with_optional_checksum_to_decoded_address/1, base64_address_with_optional_checksum_to_decoded_address_safe/1, wallet_filepath/1, wallet_filepath/3, get_or_create_wallet/1, recover_key/3]). -include_lib("arweave_config/include/arweave_config.hrl"). -include("ar.hrl"). -include_lib("public_key/include/public_key.hrl"). -include_lib("eunit/include/eunit.hrl"). %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Generate a new wallet public key and private key. new() -> new(?DEFAULT_KEY_TYPE). new(KeyType = {KeyAlg, PublicExpnt}) when KeyType =:= {?RSA_SIGN_ALG, 65537} -> {[_, Pub], [_, Pub, Priv|_]} = {[_, Pub], [_, Pub, Priv|_]} = crypto:generate_key(KeyAlg, {?RSA_PRIV_KEY_SZ, PublicExpnt}), {{KeyType, Priv, Pub}, {KeyType, Pub}}; new(KeyType = {KeyAlg, KeyCrv}) when KeyAlg =:= ?ECDSA_SIGN_ALG andalso KeyCrv =:= secp256k1 -> {OrigPub, Priv} = crypto:generate_key(ecdh, KeyCrv), Pub = compress_ecdsa_pubkey(OrigPub), {{KeyType, Priv, Pub}, {KeyType, Pub}}; new(KeyType = {KeyAlg, KeyCrv}) when KeyAlg =:= ?EDDSA_SIGN_ALG andalso KeyCrv =:= ed25519 -> {Pub, Priv} = crypto:generate_key(KeyAlg, KeyCrv), {{KeyType, Priv, Pub}, {KeyType, Pub}}. %% @doc Generate a new ECDSA key, store it in a keyfile. new_ecdsa() -> new_keyfile({?ECDSA_SIGN_ALG, secp256k1}). %% @doc Generate a new wallet public and private key, with a corresponding keyfile. new_keyfile() -> new_keyfile(?DEFAULT_KEY_TYPE, wallet_address). new_keyfile(KeyType) -> new_keyfile(KeyType, wallet_address). %% @doc Generate a new wallet public and private key, with a corresponding keyfile. %% The provided key is used as part of the file name. new_keyfile(KeyType, WalletName) -> {ok, Config} = arweave_config:get_env(), new_keyfile(KeyType, WalletName, Config#config.data_dir). new_keyfile(KeyType, WalletName, DataDir) -> {Pub, Priv, Key} = case KeyType of {?RSA_SIGN_ALG, PublicExpnt} -> {[Expnt, Pb], [Expnt, Pb, Prv, P1, P2, E1, E2, C]} = crypto:generate_key(rsa, {?RSA_PRIV_KEY_SZ, PublicExpnt}), Ky = ar_serialize:jsonify( { [ {kty, <<"RSA">>}, {ext, true}, {e, ar_util:encode(Expnt)}, {n, ar_util:encode(Pb)}, {d, ar_util:encode(Prv)}, {p, ar_util:encode(P1)}, {q, ar_util:encode(P2)}, {dp, ar_util:encode(E1)}, {dq, ar_util:encode(E2)}, {qi, ar_util:encode(C)} ] } ), {Pb, Prv, Ky}; {?ECDSA_SIGN_ALG, secp256k1} -> {OrigPub, Prv} = crypto:generate_key(ecdh, secp256k1), <<4:8, PubPoint/binary>> = OrigPub, PubPointMid = byte_size(PubPoint) div 2, <> = PubPoint, Ky = ar_serialize:jsonify( { [ {kty, <<"EC">>}, {crv, <<"secp256k1">>}, {x, ar_util:encode(X)}, {y, ar_util:encode(Y)}, {d, ar_util:encode(Prv)} ] } ), {compress_ecdsa_pubkey(OrigPub), Prv, Ky}; {?EDDSA_SIGN_ALG, ed25519} -> {{_, Prv, Pb}, _} = new(KeyType), Ky = ar_serialize:jsonify( { [ {kty, <<"OKP">>}, {alg, <<"EdDSA">>}, {crv, <<"Ed25519">>}, {x, ar_util:encode(Pb)}, {d, ar_util:encode(Prv)} ] } ), {Pb, Prv, Ky} end, Filename = wallet_filepath(WalletName, Pub, KeyType, DataDir), case filelib:ensure_dir(Filename) of ok -> case ar_storage:write_file_atomic(Filename, Key) of ok -> {{KeyType, Priv, Pub}, {KeyType, Pub}}; Error2 -> Error2 end; Error -> Error end. wallet_filepath(Wallet) -> {ok, Config} = arweave_config:get_env(), wallet_filepath(Wallet, Config#config.data_dir). wallet_filepath(Wallet, DataDir) -> Filename = lists:flatten(["arweave_keyfile_", binary_to_list(Wallet), ".json"]), filename:join([DataDir, ?WALLET_DIR, Filename]). wallet_filepath2(Wallet) -> {ok, Config} = arweave_config:get_env(), Filename = lists:flatten([binary_to_list(Wallet), ".json"]), filename:join([Config#config.data_dir, ?WALLET_DIR, Filename]). %% @doc Read the keyfile for the key with the given address from disk. %% Return not_found if arweave_keyfile_[addr].json or [addr].json is not found %% in [data_dir]/?WALLET_DIR. load_key(Addr) -> Path = wallet_filepath(ar_util:encode(Addr)), case filelib:is_file(Path) of false -> Path2 = wallet_filepath2(ar_util:encode(Addr)), case filelib:is_file(Path2) of false -> not_found; true -> load_keyfile(Path2) end; true -> load_keyfile(Path) end. %% @doc Extract the public and private key from a keyfile. load_keyfile(File) -> {ok, Body} = file:read_file(File), {Key} = ar_serialize:dejsonify(Body), {Pub, Priv, KeyType} = case lists:keyfind(<<"kty">>, 1, Key) of {<<"kty">>, <<"EC">>} -> {<<"x">>, XEncoded} = lists:keyfind(<<"x">>, 1, Key), {<<"y">>, YEncoded} = lists:keyfind(<<"y">>, 1, Key), {<<"d">>, PrivEncoded} = lists:keyfind(<<"d">>, 1, Key), OrigPub = iolist_to_binary([<<4:8>>, ar_util:decode(XEncoded), ar_util:decode(YEncoded)]), Pb = compress_ecdsa_pubkey(OrigPub), Prv = ar_util:decode(PrivEncoded), KyType = {?ECDSA_SIGN_ALG, secp256k1}, {Pb, Prv, KyType}; {<<"kty">>, <<"OKP">>} -> {<<"x">>, PubEncoded} = lists:keyfind(<<"x">>, 1, Key), {<<"d">>, PrivEncoded} = lists:keyfind(<<"d">>, 1, Key), Pb = ar_util:decode(PubEncoded), Prv = ar_util:decode(PrivEncoded), KyType = {?EDDSA_SIGN_ALG, ed25519}, {Pb, Prv, KyType}; _ -> {<<"n">>, PubEncoded} = lists:keyfind(<<"n">>, 1, Key), {<<"d">>, PrivEncoded} = lists:keyfind(<<"d">>, 1, Key), Pb = ar_util:decode(PubEncoded), Prv = ar_util:decode(PrivEncoded), KyType = {?RSA_SIGN_ALG, 65537}, {Pb, Prv, KyType} end, {{KeyType, Priv, Pub}, {KeyType, Pub}}. %% @doc Sign some data with a private key. sign({{KeyAlg, PublicExpnt}, Priv, Pub}, Data) when KeyAlg =:= ?RSA_SIGN_ALG andalso PublicExpnt =:= 65537 -> rsa_pss:sign( Data, sha256, #'RSAPrivateKey'{ publicExponent = PublicExpnt, modulus = binary:decode_unsigned(Pub), privateExponent = binary:decode_unsigned(Priv) } ); sign({{KeyAlg, KeyCrv}, Priv, _}, Data) when KeyAlg =:= ?ECDSA_SIGN_ALG andalso KeyCrv =:= secp256k1 -> secp256k1_nif:sign(Data, Priv); sign({{KeyAlg, KeyCrv}, Priv, _}, Data) when KeyAlg =:= ?EDDSA_SIGN_ALG andalso KeyCrv =:= ed25519 -> crypto:sign( KeyAlg, sha512, Data, [Priv, KeyCrv] ). %%-------------------------------------------------------------------- %% @doc Verify that a signature is correct. %% @end %%-------------------------------------------------------------------- -spec verify(PublicKeyInfo, Data, Signature) -> Return when PublicKeyInfo :: {{KeyAlgorithm, PublicExponent}, PublicKey}, KeyAlgorithm :: atom(), PublicExponent :: pos_integer() | secp256k1 | ed25519, PublicKey :: binary(), Signature :: binary(), Data :: binary(), Return :: boolean(). verify({{KeyAlg, PublicExpnt}, Pub}, Data, Sig) when KeyAlg =:= ?RSA_SIGN_ALG andalso PublicExpnt =:= 65537 -> try rsa_pss:verify( Data, sha256, Sig, #'RSAPublicKey'{ publicExponent = PublicExpnt, modulus = binary:decode_unsigned(Pub) } ) catch C:R:S -> ?LOG_ERROR([ {event, rsa_pss_verify_failed}, {class, C}, {reason, R}, {stacktrace, S}, {pub_size, byte_size(Pub)}, {signature_size, byte_size(Sig)} ]), false end; % NOTE. We will not write pubkey for ECDSA signature. So don't use verify function for ECDSA, use ecrecover % So this function will return always false if called with no Pub verify({{KeyAlg, KeyCrv}, Pub}, Data, Sig) when KeyAlg =:= ?ECDSA_SIGN_ALG andalso KeyCrv =:= secp256k1 -> {Pass, PubExtracted} = secp256k1_nif:ecrecover(Data, Sig), Pass andalso PubExtracted =:= Pub; verify({{KeyAlg, KeyCrv}, Pub}, Data, Sig) when KeyAlg =:= ?EDDSA_SIGN_ALG andalso KeyCrv =:= ed25519 -> crypto:verify( KeyAlg, sha512, Data, Sig, [Pub, KeyCrv] ). %%-------------------------------------------------------------------- %% @doc Verify that a signature is correct. The function was used to %% verify transactions until the fork 2.4. It rejects a valid %% transaction when the key modulus bit size is less than 4096. The %% new method (verify/3) successfully verifies all the historical %% transactions so this function is not used anywhere after the fork %% 2.4. %% @end %%-------------------------------------------------------------------- -spec verify_pre_fork_2_4(PublicKeyInfo, Data, Signature) -> Return when PublicKeyInfo :: {{KeyAlgorithm, PublicExponent}, PublicKey}, KeyAlgorithm :: atom(), PublicExponent :: pos_integer(), PublicKey :: binary(), Signature :: binary(), Data :: binary(), Return :: boolean(). verify_pre_fork_2_4({{KeyAlg, PublicExpnt}, Pub}, Data, Sig) when KeyAlg =:= ?RSA_SIGN_ALG andalso PublicExpnt =:= 65537 -> try rsa_pss:verify_legacy( Data, sha256, Sig, #'RSAPublicKey'{ publicExponent = PublicExpnt, modulus = binary:decode_unsigned(Pub) } ) catch C:R:S -> ?LOG_ERROR([ {event, rsa_pss_verify_legacy_failed}, {class, C}, {reason, R}, {stacktrace, S}, {pub_size, byte_size(Pub)}, {signature_size, byte_size(Sig)} ]), false end. %% @doc Generate an address from a public key. to_address({{SigType, _Priv, Pub}, {SigType, Pub}}) -> to_address(Pub, SigType); to_address({SigType, Pub}) -> to_address(Pub, SigType); to_address({SigType, _Priv, Pub}) -> to_address(Pub, SigType). %% @doc Generate an address from a public key. to_address(PubKey, {?RSA_SIGN_ALG, 65537}) when bit_size(PubKey) == 256 -> %% Small keys are not secure, nobody is using them, the clause %% is for backwards-compatibility. PubKey; to_address(PubKey, _SigType) -> hash_pub_key(PubKey). hash_pub_key(PubKey) -> crypto:hash(?HASH_ALG, PubKey). base64_address_with_optional_checksum_to_decoded_address(AddrBase64) -> Size = byte_size(AddrBase64), case Size > 7 of false -> ar_util:decode(AddrBase64); true -> case AddrBase64 of << MainBase64url:(Size - 7)/binary, ":", ChecksumBase64url:6/binary >> -> AddrDecoded = ar_util:decode(MainBase64url), case byte_size(AddrDecoded) < 20 of true -> throw({error, invalid_address}); false -> ok end, case byte_size(AddrDecoded) > 64 of true -> throw({error, invalid_address}); false -> ok end, Checksum = ar_util:decode(ChecksumBase64url), case decoded_address_to_checksum(AddrDecoded) =:= Checksum of true -> AddrDecoded; false -> throw({error, invalid_address_checksum}) end; _ -> ar_util:decode(AddrBase64) end end. base64_address_with_optional_checksum_to_decoded_address_safe(AddrBase64)-> try D = base64_address_with_optional_checksum_to_decoded_address(AddrBase64), {ok, D} catch _:_ -> {error, invalid} end. %% @doc Read a wallet of one of the given types from disk. Files modified later are prefered. %% If no file is found, create one of the type standing first in the list. get_or_create_wallet(Types) -> {ok, Config} = arweave_config:get_env(), WalletDir = filename:join(Config#config.data_dir, ?WALLET_DIR), Entries = lists:reverse(lists:sort(filelib:fold_files( WalletDir, "(.*\\.json$)", false, fun(F, Acc) -> [{filelib:last_modified(F), F} | Acc] end, []) )), get_or_create_wallet(Entries, Types). get_or_create_wallet([], [Type | _]) -> ar_wallet:new_keyfile(Type); get_or_create_wallet([{_LastModified, F} | Entries], Types) -> {{Type, _, _}, _} = W = load_keyfile(F), case lists:member(Type, Types) of true -> W; false -> get_or_create_wallet(Entries, Types) end. recover_key(_Data, <<>>, ?ECDSA_KEY_TYPE) -> <<>>; recover_key(Data, Signature, ?ECDSA_KEY_TYPE) -> {_Pass, PubKey} = secp256k1_nif:ecrecover(Data, Signature), % Note. if Pass = false, then PubKey will be <<>> PubKey. %%%=================================================================== %%% Private functions. %%%=================================================================== wallet_filepath(WalletName, PubKey, KeyType) -> {ok, Config} = arweave_config:get_env(), wallet_filepath(WalletName, PubKey, KeyType, Config#config.data_dir). wallet_filepath(WalletName, PubKey, KeyType, DataDir) -> wallet_filepath(wallet_name(WalletName, PubKey, KeyType), DataDir). wallet_name(wallet_address, PubKey, KeyType) -> ar_util:encode(to_address(PubKey, KeyType)); wallet_name(WalletName, _, _) -> WalletName. decoded_address_to_checksum(AddrDecoded) -> Crc = erlang:crc32(AddrDecoded), << Crc:32 >>. decoded_address_to_base64_address_with_checksum(AddrDecoded) -> Checksum = decoded_address_to_checksum(AddrDecoded), AddrBase64 = ar_util:encode(AddrDecoded), ChecksumBase64 = ar_util:encode(Checksum), << AddrBase64/binary, ":", ChecksumBase64/binary >>. compress_ecdsa_pubkey(<<4:8, PubPoint/binary>>) -> PubPointMid = byte_size(PubPoint) div 2, <> = PubPoint, PubKeyHeader = case Y rem 2 of 0 -> <<2:8>>; 1 -> <<3:8>> end, iolist_to_binary([PubKeyHeader, X]). %%%=================================================================== %%% Tests. %%%=================================================================== wallet_sign_verify_test() -> TestData = <<"TEST DATA">>, {Priv, Pub} = new(), Signature = sign(Priv, TestData), true = verify(Pub, TestData, Signature). invalid_signature_test() -> TestData = <<"TEST DATA">>, {Priv, Pub} = new(), << _:32, Signature/binary >> = sign(Priv, TestData), false = verify(Pub, TestData, << 0:32, Signature/binary >>). %% @doc Check generated keyfiles can be retrieved. generate_keyfile_test() -> {Priv, Pub} = new_keyfile(), FileName = wallet_filepath(ar_util:encode(to_address(Pub))), {Priv, Pub} = load_keyfile(FileName). checksum_test() -> {_, Pub} = new(), Addr = to_address(Pub), AddrBase64 = ar_util:encode(Addr), AddrBase64Wide = decoded_address_to_base64_address_with_checksum(Addr), Addr = base64_address_with_optional_checksum_to_decoded_address(AddrBase64Wide), Addr = base64_address_with_optional_checksum_to_decoded_address(AddrBase64), %% 64 bytes, for future. CorrectLongAddress = <<"0123456789012345678901234567890123456789012345678901234567890123">>, CorrectCheckSum = decoded_address_to_checksum(CorrectLongAddress), CorrectLongAddressBase64 = ar_util:encode(CorrectLongAddress), CorrectCheckSumBase64 = ar_util:encode(CorrectCheckSum), CorrectLongAddressWithChecksumBase64 = <>, case catch base64_address_with_optional_checksum_to_decoded_address(CorrectLongAddressWithChecksumBase64) of {error, _} -> throw({error, correct_long_address_should_bypass}); _ -> ok end, %% 65 bytes. InvalidLongAddress = <<"01234567890123456789012345678901234567890123456789012345678901234">>, InvalidLongAddressBase64 = ar_util:encode(InvalidLongAddress), case catch base64_address_with_optional_checksum_to_decoded_address(<>) of {'EXIT', _} -> ok end, %% 100 bytes. InvalidLongAddress2 = <<"0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789">>, InvalidLongAddress2Base64 = ar_util:encode(InvalidLongAddress2), case catch base64_address_with_optional_checksum_to_decoded_address(<>) of {'EXIT', _} -> ok end, %% 10 bytes InvalidShortAddress = <<"0123456789">>, InvalidShortAddressBase64 = ar_util:encode(InvalidShortAddress), case catch base64_address_with_optional_checksum_to_decoded_address(<>) of {'EXIT', _} -> ok end, InvalidChecksum = ar_util:encode(<< 0:32 >>), case catch base64_address_with_optional_checksum_to_decoded_address( << AddrBase64/binary, ":", InvalidChecksum/binary >>) of {error, invalid_address_checksum} -> ok end, case catch base64_address_with_optional_checksum_to_decoded_address(<<":MDA">>) of {'EXIT', _} -> ok end. ================================================ FILE: apps/arweave/src/ar_wallets.erl ================================================ %%% @doc The module manages the states of wallets (their balances and last transactions) %%% in different blocks. Since wallet lists are huge, only one copy is stored at any time, %%% along with the small "diffs", which allow to reconstruct the wallet lists of the previous, %%% following, and uncle blocks. -module(ar_wallets). -export([start_link/1, get/1, get/2, get_chunk/2, get_balance/1, get_balance/2, get_last_tx/1, apply_block/2, add_wallets/4, set_current/3, get_size/0]). -export([init/1, handle_call/3, handle_cast/2, terminate/2]). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave/include/ar_wallets.hrl"). %%%=================================================================== %%% Public interface. %%%=================================================================== start_link(Args) -> gen_server:start_link({local, ?MODULE}, ?MODULE, Args, []). %% @doc Return the map mapping the given addresses to the corresponding wallets %% from the latest wallet tree. get(Address) when is_binary(Address) -> ar_wallets:get([Address]); get(Addresses) -> gen_server:call(?MODULE, {get, Addresses}, ?DEFAULT_CALL_TIMEOUT). %% @doc Return the map mapping the given addresses to the corresponding wallets %% from the wallet tree with the given root hash. get(RootHash, Address) when is_binary(Address) -> get(RootHash, [Address]); get(RootHash, Addresses) -> gen_server:call(?MODULE, {get, RootHash, Addresses}, ?DEFAULT_CALL_TIMEOUT). %% @doc Return the map containing the wallets, up to ?WALLET_LIST_CHUNK_SIZE, starting %% from the given cursor (first or an address). The wallets are picked in the ascending %% alphabetical order, from the tree with the given root hash. get_chunk(RootHash, Cursor) -> gen_server:call(?MODULE, {get_chunk, RootHash, Cursor}, ?DEFAULT_CALL_TIMEOUT). %% @doc Return balance of the given wallet in the latest wallet tree. get_balance(Address) -> gen_server:call(?MODULE, {get_balance, Address}, ?DEFAULT_CALL_TIMEOUT). %% @doc Return balance of the given wallet in the given wallet tree. get_balance(RootHash, Address) -> gen_server:call(?MODULE, {get_balance, RootHash, Address}, ?DEFAULT_CALL_TIMEOUT). %% @doc Return the anchor (last_tx) of the given wallet in the latest wallet tree. get_last_tx(Address) -> gen_server:call(?MODULE, {get_last_tx, Address}, ?DEFAULT_CALL_TIMEOUT). %% @doc Compute and cache the account tree for the given new block and its previous block. apply_block(B, PrevB) -> gen_server:call(?MODULE, {apply_block, B, PrevB}, ?DEFAULT_CALL_TIMEOUT). %% @doc Cache the wallets to be upserted into the tree with the given root hash. Return %% the root hash of the new wallet tree. add_wallets(RootHash, Wallets, Height, Denomination) -> gen_server:call(?MODULE, {add_wallets, RootHash, Wallets, Height, Denomination}, ?DEFAULT_CALL_TIMEOUT). %% @doc Make the wallet tree with the given root hash "the current tree". The current tree %% is used by get/1, get_balance/1, and get_last_tx/1. set_current(RootHash, Height, PruneDepth) when is_binary(RootHash) -> Call = {set_current, RootHash, Height, PruneDepth}, gen_server:call(?MODULE, Call, ?DEFAULT_CALL_TIMEOUT). %% @doc Return the number of accounts in the latest state. get_size() -> gen_server:call(?MODULE, get_size, ?DEFAULT_CALL_TIMEOUT). %%%=================================================================== %%% Generic server callbacks. %%%=================================================================== init([{blocks, []} | _]) -> %% Trap exit to avoid corrupting any open files on quit. process_flag(trap_exit, true), DAG = ar_diff_dag:new(<<>>, ar_patricia_tree:new(), not_set), ar_node_worker ! wallets_ready, {ok, DAG}; init([{blocks, Blocks} | Args]) -> %% Trap exit to avoid corrupting any open files on quit. process_flag(trap_exit, true), gen_server:cast(?MODULE, {init, Blocks, Args}), DAG = ar_diff_dag:new(<<>>, ar_patricia_tree:new(), not_set), {ok, DAG}. handle_call({get, Addresses}, _From, DAG) -> {reply, get_map(ar_diff_dag:get_sink(DAG), Addresses), DAG}; handle_call({get, RootHash, Addresses}, _From, DAG) -> case ar_diff_dag:reconstruct(DAG, RootHash, fun apply_diff/2) of {error, _} = Error -> {reply, Error, DAG}; Tree -> {reply, get_map(Tree, Addresses), DAG} end; handle_call({get_chunk, RootHash, Cursor}, _From, DAG) -> case ar_diff_dag:reconstruct(DAG, RootHash, fun apply_diff/2) of {error, not_found} -> {reply, {error, root_hash_not_found}, DAG}; Tree -> {NextCursor, Range} = get_account_tree_range(Tree, Cursor), {reply, {ok, {NextCursor, Range}}, DAG} end; handle_call(get_size, _From, DAG) -> {reply, ar_patricia_tree:size(ar_diff_dag:get_sink(DAG)), DAG}; handle_call({get_balance, Address}, _From, DAG) -> case ar_patricia_tree:get(Address, ar_diff_dag:get_sink(DAG)) of not_found -> {reply, 0, DAG}; Entry -> Denomination = ar_diff_dag:get_sink_metadata(DAG), case Entry of {Balance, _LastTX} -> {reply, ar_pricing:redenominate(Balance, 1, Denomination), DAG}; {Balance, _LastTX, BaseDenomination, _MiningPermission} -> {reply, ar_pricing:redenominate(Balance, BaseDenomination, Denomination), DAG} end end; handle_call({get_balance, RootHash, Address}, _From, DAG) -> case ar_diff_dag:reconstruct(DAG, RootHash, fun apply_diff/2) of {error, _} = Error -> {reply, Error, DAG}; Tree -> case ar_patricia_tree:get(Address, Tree) of not_found -> {reply, 0, DAG}; Entry -> Denomination = ar_diff_dag:get_metadata(DAG, RootHash), case Entry of {Balance, _LastTX} -> {reply, ar_pricing:redenominate(Balance, 1, Denomination), DAG}; {Balance, _LastTX, BaseDenomination, _MiningPermission} -> {reply, ar_pricing:redenominate(Balance, BaseDenomination, Denomination), DAG} end end end; handle_call({get_last_tx, Address}, _From, DAG) -> {reply, case ar_patricia_tree:get(Address, ar_diff_dag:get_sink(DAG)) of not_found -> <<>>; {_Balance, LastTX} -> LastTX; {_Balance, LastTX, _Denomination, _MiningPermission} -> LastTX end, DAG}; handle_call({apply_block, B, PrevB}, _From, DAG) -> {Reply, DAG2} = apply_block(B, PrevB, DAG), {reply, Reply, DAG2}; handle_call({add_wallets, RootHash, Wallets, Height, Denomination}, _From, DAG) -> Tree = ar_diff_dag:reconstruct(DAG, RootHash, fun apply_diff/2), RootHash2 = compute_hash(Tree, Wallets, Height), DAG2 = maybe_add_node(DAG, RootHash2, RootHash, Wallets, Denomination), {reply, {ok, RootHash2}, DAG2}; handle_call({set_current, RootHash, Height, PruneDepth}, _, DAG) -> {reply, ok, set_current(DAG, RootHash, Height, PruneDepth)}. handle_cast({init, Blocks, Args}, _) -> case proplists:get_value(from_state, Args) of undefined -> Peers = proplists:get_value(from_peers, Args), B = case length(Blocks) >= ar_block:get_consensus_window_size() of true -> lists:nth(ar_block:get_consensus_window_size(), Blocks); false -> lists:last(Blocks) end, Tree = get_tree_from_peers(B, Peers), initialize_state(Blocks, Tree); SearchDepth -> ?LOG_DEBUG([{event, init_from_state}, {block_count, length(Blocks)}]), CustomDir = proplists:get_value(custom_dir, Args, not_set), case find_local_account_tree(Blocks, SearchDepth, CustomDir) of not_found -> ar:console("~n~n\tThe local state is missing an account tree, consider joining " "the network via the trusted peers.~n"), timer:sleep(1000), init:stop(1); {Skipped, Tree} -> Blocks2 = lists:nthtail(Skipped, Blocks), initialize_state(Blocks2, Tree) end end; handle_cast(Msg, DAG) -> ?LOG_ERROR([{event, unhandled_cast}, {module, ?MODULE}, {message, Msg}]), {noreply, DAG}. terminate(Reason, _State) -> ?LOG_INFO([{event, ar_wallets_terminated}, {reason, Reason}]). %%%=================================================================== %%% Private functions. %%%=================================================================== find_local_account_tree(Blocks, SearchDepth, CustomDir) -> find_local_account_tree(Blocks, SearchDepth, 0, CustomDir). find_local_account_tree(_Blocks, Skipped, Skipped, _CustomDir) -> not_found; find_local_account_tree(Blocks, SearchDepth, Skipped, CustomDir) -> {IsLast, B} = case length(Blocks) >= ar_block:get_consensus_window_size() of true -> {false, lists:nth(ar_block:get_consensus_window_size(), Blocks)}; false -> {true, lists:last(Blocks)} end, ID = B#block.wallet_list, case ar_storage:read_wallet_list(ID, CustomDir) of {ok, Tree} -> {Skipped, Tree}; _ -> case IsLast of true -> not_found; false -> find_local_account_tree(tl(Blocks), SearchDepth, Skipped + 1, CustomDir) end end. initialize_state(Blocks, Tree) -> InitialDepth = ar_block:get_consensus_window_size(), {DAG3, LastB} = lists:foldl( fun (B, start) -> {RootHash, UpdatedTree, UpdateMap} = ar_block:hash_wallet_list(Tree), gen_server:cast(ar_storage, {store_account_tree_update, B#block.height, RootHash, UpdateMap}), RootHash = B#block.wallet_list, DAG = ar_diff_dag:new(RootHash, UpdatedTree, B#block.denomination), {DAG, B}; (B, {DAG, PrevB}) -> ExpectedRootHash = B#block.wallet_list, {{ok, ExpectedRootHash}, DAG2} = apply_block(B, PrevB, DAG), {DAG2, B} end, start, lists:reverse(lists:sublist(Blocks, InitialDepth)) ), WalletList = LastB#block.wallet_list, LastHeight = LastB#block.height, DAG4 = set_current(DAG3, WalletList, LastHeight, InitialDepth), ar_events:send(node_state, {account_tree_initialized, LastB#block.height}), {noreply, DAG4}. get_tree_from_peers(B, Peers) -> ID = B#block.wallet_list, ar:console("Downloading the wallet tree, chunk 1.~n", []), case ar_http_iface_client:get_wallet_list_chunk(Peers, ID) of {ok, {Cursor, Chunk}} -> {ok, Tree} = load_wallet_tree_from_peers( ID, Peers, ar_patricia_tree:from_proplist(Chunk), Cursor, 2 ), ar:console("Downloaded the wallet tree successfully.~n", []), Tree; _ -> ar:console("Failed to download wallet tree chunk, retrying...~n", []), timer:sleep(1000), get_tree_from_peers(B, Peers) end. load_wallet_tree_from_peers(_ID, _Peers, Acc, last, _) -> {ok, Acc}; load_wallet_tree_from_peers(ID, Peers, Acc, Cursor, N) -> ar_util:terminal_clear(), ar:console("Downloading the wallet tree, chunk ~B.~n", [N]), case ar_http_iface_client:get_wallet_list_chunk(Peers, ID, Cursor) of {ok, {NextCursor, Chunk}} -> Acc3 = lists:foldl( fun({K, V}, Acc2) -> ar_patricia_tree:insert(K, V, Acc2) end, Acc, Chunk ), load_wallet_tree_from_peers(ID, Peers, Acc3, NextCursor, N + 1); _ -> ar:console("Failed to download wallet tree chunk, retrying...~n", []), timer:sleep(1000), load_wallet_tree_from_peers(ID, Peers, Acc, Cursor, N) end. apply_block(B, PrevB, DAG) -> Denomination2 = B#block.denomination, RedenominationHeight2 = B#block.redenomination_height, case ar_pricing:may_be_redenominate(PrevB) of {Denomination2, RedenominationHeight2} -> apply_block2(B, PrevB, DAG); _ -> {{error, invalid_denomination}, DAG} end. apply_block2(B, PrevB, DAG) -> RootHash = PrevB#block.wallet_list, Tree = ar_diff_dag:reconstruct(DAG, RootHash, fun apply_diff/2), TXs = B#block.txs, RewardAddr = B#block.reward_addr, Addresses = [RewardAddr | ar_tx:get_addresses(TXs)], Addresses2 = [ar_rewards:get_oldest_locked_address(PrevB) | Addresses], Addresses3 = case B#block.double_signing_proof of undefined -> Addresses2; Proof -> [ar_wallet:hash_pub_key(element(1, Proof)) | Addresses2] end, Accounts = get_map(Tree, Addresses3), case ar_node_utils:update_accounts(B, PrevB, Accounts) of {ok, Args} -> apply_block2(B, PrevB, Args, Tree, DAG); Error -> {Error, DAG} end. apply_block2(B, PrevB, Args, Tree, DAG) -> {EndowmentPool, MinerReward, DebtSupply, KryderPlusRateMultiplierLatch, KryderPlusRateMultiplier, Accounts} = Args, Denomination = PrevB#block.denomination, Denomination2 = B#block.denomination, EndowmentPool2 = ar_pricing:redenominate(EndowmentPool, Denomination, Denomination2), MinerReward2 = ar_pricing:redenominate(MinerReward, Denomination, Denomination2), DebtSupply2 = ar_pricing:redenominate(DebtSupply, Denomination, Denomination2), case {B#block.reward_pool == EndowmentPool2, B#block.reward == MinerReward2, B#block.debt_supply == DebtSupply2, B#block.kryder_plus_rate_multiplier_latch == KryderPlusRateMultiplierLatch, B#block.kryder_plus_rate_multiplier == KryderPlusRateMultiplier, B#block.height >= ar_fork:height_2_6()} of {false, _, _, _, _, _} -> {{error, invalid_reward_pool}, DAG}; {true, false, _, _, _, true} -> {{error, invalid_miner_reward}, DAG}; {true, true, false, _, _, true} -> {{error, invalid_debt_supply}, DAG}; {true, true, true, false, _, true} -> {{error, invalid_kryder_plus_rate_multiplier_latch}, DAG}; {true, true, true, true, false, true} -> {{error, invalid_kryder_plus_rate_multiplier}, DAG}; _ -> Tree2 = apply_diff(Accounts, Tree), {RootHash2, _, UpdateMap} = ar_block:hash_wallet_list(Tree2), case B#block.wallet_list == RootHash2 of true -> RootHash = PrevB#block.wallet_list, DAG2 = maybe_add_node(DAG, RootHash2, RootHash, Accounts, Denomination2), gen_server:cast(ar_storage, {store_account_tree_update, B#block.height, RootHash2, UpdateMap}), {{ok, RootHash2}, DAG2}; false -> {{error, invalid_wallet_list}, DAG} end end. set_current(DAG, RootHash, Height, PruneDepth) -> UpdatedDAG = ar_diff_dag:update_sink( ar_diff_dag:move_sink(DAG, RootHash, fun apply_diff/2, fun reverse_diff/2), RootHash, fun(Tree, Meta) -> {RootHash, UpdatedTree, UpdateMap} = ar_block:hash_wallet_list(Tree), gen_server:cast(ar_storage, {store_account_tree_update, Height, RootHash, UpdateMap}), {RootHash, UpdatedTree, Meta} end ), Tree = ar_diff_dag:get_sink(UpdatedDAG), true = Height >= ar_fork:height_2_2(), prometheus_counter:inc(wallet_list_size, ar_patricia_tree:size(Tree)), ar_diff_dag:filter(UpdatedDAG, PruneDepth). apply_diff(Diff, Tree) -> maps:fold( fun (Addr, remove, Acc) -> ar_patricia_tree:delete(Addr, Acc); (Addr, {Balance, LastTX}, Acc) -> ar_patricia_tree:insert(Addr, {Balance, LastTX}, Acc); (Addr, {Balance, LastTX, Denomination, MiningPermission}, Acc) -> ar_patricia_tree:insert(Addr, {Balance, LastTX, Denomination, MiningPermission}, Acc) end, Tree, Diff ). reverse_diff(Diff, Tree) -> maps:map( fun(Addr, _Value) -> case ar_patricia_tree:get(Addr, Tree) of not_found -> remove; Value -> Value end end, Diff ). get_map(Tree, Addresses) -> lists:foldl( fun(Addr, Acc) -> case ar_patricia_tree:get(Addr, Tree) of not_found -> Acc; Value -> maps:put(Addr, Value, Acc) end end, #{}, Addresses ). get_account_tree_range(Tree, Cursor) -> Range = case Cursor of first -> ar_patricia_tree:get_range(?WALLET_LIST_CHUNK_SIZE + 1, Tree); _ -> ar_patricia_tree:get_range(Cursor, ?WALLET_LIST_CHUNK_SIZE + 1, Tree) end, case length(Range) of ?WALLET_LIST_CHUNK_SIZE + 1 -> {element(1, hd(Range)), tl(Range)}; _ -> {last, Range} end. compute_hash(Tree, Diff, Height) -> Tree2 = apply_diff(Diff, Tree), true = Height >= ar_fork:height_2_2(), element(1, ar_block:hash_wallet_list(Tree2)). maybe_add_node(DAG, RootHash, RootHash, _Wallets, _Metadata) -> %% The wallet list has not changed - there are no transactions %% and the miner did not claim the reward. DAG; maybe_add_node(DAG, UpdatedRootHash, RootHash, Wallets, Metadata) -> case ar_diff_dag:is_node(DAG, UpdatedRootHash) of true -> %% The new wallet list is already known from a different fork. DAG; false -> ar_diff_dag:add_node(DAG, UpdatedRootHash, RootHash, Wallets, Metadata) end. ================================================ FILE: apps/arweave/src/ar_watchdog.erl ================================================ %% This Source Code Form is subject to the terms of the GNU General %% Public License, v. 2.0. If a copy of the GPLv2 was not distributed %% with this file, You can obtain one at %% https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html %%% @doc Watchdog process. Logs the information about mined blocks or missing external blocks. -module(ar_watchdog). -behaviour(gen_server). -export([start_link/0, started_hashing/0, block_received_n_confirmations/2, mined_block/3, is_mined_block/1, block_orphaned/2]). -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2]). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -record(state, { mined_blocks, miner_logging = false }). %%%=================================================================== %%% API %%%=================================================================== started_hashing() -> gen_server:cast(?MODULE, started_hashing). block_received_n_confirmations(BH, Height) -> gen_server:cast(?MODULE, {block_received_n_confirmations, BH, Height}). block_orphaned(BH, Height) -> gen_server:cast(?MODULE, {block_orphaned, BH, Height}). mined_block(BH, Height, PrevH) -> gen_server:cast(?MODULE, {mined_block, BH, Height, PrevH}). is_mined_block(Block) -> gen_server:call(?MODULE, {is_mined_block, Block#block.indep_hash}, ?DEFAULT_CALL_TIMEOUT). %%-------------------------------------------------------------------- %% @doc %% Starts the server %% %% @spec start_link() -> {ok, Pid} | ignore | {error, Error} %% @end %%-------------------------------------------------------------------- start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). %%% gen_server callbacks %%%=================================================================== %%-------------------------------------------------------------------- %% @private %% @doc %% Initializes the server %% %% @spec init(Args) -> {ok, State} | %% {ok, State, Timeout} | %% ignore | %% {stop, Reason} %% @end %%-------------------------------------------------------------------- init([]) -> process_flag(trap_exit, true), {ok, Config} = arweave_config:get_env(), MinerLogging = not lists:member(miner_logging, Config#config.disable), State = #state{ mined_blocks = maps:new(), miner_logging = MinerLogging }, {ok, State}. %%-------------------------------------------------------------------- %% @private %% @doc %% Handling call messages %% %% @spec handle_call(Request, From, State) -> %% {reply, Reply, State} | %% {reply, Reply, State, Timeout} | %% {noreply, State} | %% {noreply, State, Timeout} | %% {stop, Reason, Reply, State} | %% {stop, Reason, State} %% @end %%-------------------------------------------------------------------- handle_call({is_mined_block, H}, _From, State) -> {reply, lists:member(H, maps:values(State#state.mined_blocks)), State}; handle_call(Request, _From, State) -> ?LOG_ERROR([{event, unhandled_call}, {request, Request}]), {reply, ok, State}. %%-------------------------------------------------------------------- %% @private %% @doc %% Handling cast messages %% %% @spec handle_cast(Msg, State) -> {noreply, State} | %% {noreply, State, Timeout} | %% {stop, Reason, State} %% @end %%-------------------------------------------------------------------- handle_cast(started_hashing, State) when State#state.miner_logging == true -> Message = "Starting to hash.", ?LOG_INFO([{event, starting_to_hash}]), ar:console("~s~n", [Message]), {noreply, State}; handle_cast(started_hashing, State) -> {noreply, State}; handle_cast({block_received_n_confirmations, BH, Height}, State) -> MinedBlocks = State#state.mined_blocks, UpdatedMinedBlocks = case maps:take(Height, MinedBlocks) of {BH, Map} -> ar_events:send(solution, {confirmed, #{ indep_hash => BH, confirmations => 10 }}), ar_mining_stats:block_found(), case State#state.miner_logging of true -> Message = io_lib:format("Your block ~s was accepted by the network!", [ar_util:encode(BH)]), ?LOG_INFO([{event, block_got_10_confirmations}, {block, ar_util:encode(BH)}]), ar:console("~s~n", [Message]), ar_mining_stats:block_found(), Map; _ -> Map end; {_BH, _Map} -> %% The mined block was orphaned. ar_mining_stats:block_mined_but_orphaned(), MinedBlocks; error -> MinedBlocks end, {noreply, State#state{ mined_blocks = UpdatedMinedBlocks }}; handle_cast({block_orphaned, BH, Height}, State) -> MinedBlocks = State#state.mined_blocks, UpdatedMinedBlocks = case maps:take(Height, MinedBlocks) of {BH, Map} -> ar_events:send(solution, {orphaned, #{ indep_hash => BH }}), case State#state.miner_logging of true -> Message = io_lib:format("Your block ~s was orphaned.", [ar_util:encode(BH)]), ?LOG_INFO([{event, mined_block_orphaned}, {block, ar_util:encode(BH)}]), ar:console("~s~n", [Message]), Map; _ -> Map end; error -> MinedBlocks end, {noreply, State#state{ mined_blocks = UpdatedMinedBlocks }}; handle_cast({mined_block, BH, Height, PrevH}, State) -> case State#state.miner_logging of true -> Message = io_lib:format("Produced candidate block ~s (height ~B, previous block ~s).", [ar_util:encode(BH), Height, ar_util:encode(PrevH)]), ?LOG_INFO([{event, mined_block}, {block, ar_util:encode(BH)}, {height, Height}, {previous_block, ar_util:encode(PrevH)}]), ar:console("~s~n", [Message]); _ -> ok end, MinedBlocks = case maps:is_key(Height, State#state.mined_blocks) of false -> maps:put(Height, BH, State#state.mined_blocks); _ -> State#state.mined_blocks end, {noreply, State#state{ mined_blocks = MinedBlocks } }; handle_cast(Msg, State) -> ?LOG_ERROR([{event, unhandled_cast}, {message, Msg}]), {noreply, State}. handle_info({'EXIT', _Pid, normal}, State) -> %% Gun sets monitors on the spawned processes, so thats the reason why we %% catch them here. {noreply, State}; handle_info(Info, State) -> ?LOG_ERROR([{event, unhandled_info}, {module, ?MODULE}, {message, Info}]), {noreply, State}. %%-------------------------------------------------------------------- %% @private %% @doc %% This function is called by a gen_server when it is about to %% terminate. It should be the opposite of Module:init/1 and do any %% necessary cleaning up. When it returns, the gen_server terminates %% with Reason. The return value is ignored. %% %% @spec terminate(Reason, State) -> void() %% @end %%-------------------------------------------------------------------- terminate(Reason, _State) -> ?LOG_INFO([{module, ?MODULE},{pid, self()},{callback, terminate},{reason, Reason}]), ok. ================================================ FILE: apps/arweave/src/ar_weave.erl ================================================ -module(ar_weave). -export([init/0, init/1, init/2, init/3, create_mainnet_genesis_txs/0, generate_data/3, add_mainnet_v1_genesis_txs/0]). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave/include/ar_consensus.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("arweave/include/ar_pricing.hrl"). -include_lib("eunit/include/eunit.hrl"). %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Create a genesis block. The genesis block includes one transaction with %% at least one small chunk and the total data size equal to ar_block:strict_data_split_threshold(), %% to test the code branches dealing with small chunks placed before the threshold. init() -> init([]). %% @doc Create a genesis block with the given accounts. One system account is added to the %% list - we use it to sign a transaction included in the genesis block. init(WalletList) -> init(WalletList, 1). init(WalletList, Diff) -> Size = 3 * ?DATA_CHUNK_SIZE, % Matches ?STRICT_DATA_SPLIT_THRESHOLD in tests. init(WalletList, Diff, Size). init(_WalletList, _Diff, GenesisDataSize) when GenesisDataSize > (4 * ?GiB) -> erlang:error({size_exceeds_limit, "GenesisDataSize exceeds 4 GiB"}); %% @doc Create a genesis block with the given accounts and difficulty. init(WalletList, Diff, GenesisDataSize) -> {{_, _, _}, {_, _}} = Key = ar_wallet:new_keyfile(), TX = create_genesis_tx(Key, GenesisDataSize), WalletList2 = WalletList ++ [{ar_wallet:to_address(Key), 0, TX#tx.id}], TXs = [TX], AccountTree = ar_patricia_tree:from_proplist([{A, {B, LTX}} || {A, B, LTX} <- WalletList2]), WLH = element(1, ar_block:hash_wallet_list(AccountTree)), SizeTaggedTXs = ar_block:generate_size_tagged_list_from_txs(TXs, 0), BlockSize = case SizeTaggedTXs of [] -> 0; _ -> element(2, lists:last(SizeTaggedTXs)) end, SizeTaggedDataRoots = [{Root, Offset} || {{_, Root}, Offset} <- SizeTaggedTXs], {TXRoot, _Tree} = ar_merkle:generate_tree(SizeTaggedDataRoots), Timestamp = os:system_time(second), B0 = #block{ nonce = <<>>, txs = TXs, tx_root = TXRoot, wallet_list = WLH, diff = Diff, cumulative_diff = ar_difficulty:next_cumulative_diff(0, Diff, 0), weave_size = BlockSize, block_size = BlockSize, reward_pool = 0, timestamp = Timestamp, last_retarget = Timestamp, size_tagged_txs = SizeTaggedTXs, usd_to_ar_rate = ?NEW_WEAVE_USD_TO_AR_RATE, scheduled_usd_to_ar_rate = ?NEW_WEAVE_USD_TO_AR_RATE, packing_2_5_threshold = 0, strict_data_split_threshold = BlockSize, account_tree = AccountTree }, B1 = case ar_fork:height_2_6() > 0 of false -> RewardKey = element(2, ar_wallet:new()), RewardAddr = ar_wallet:to_address(RewardKey), HashRate = ar_difficulty:get_hash_rate_fixed_ratio(B0), RewardHistory = [{RewardAddr, HashRate, 10, 1}], PricePerGiBMinute = ar_pricing:get_price_per_gib_minute(0, B0#block{ reward_history = RewardHistory, denomination = 1 }), B0#block{ hash = crypto:strong_rand_bytes(32), nonce = 0, recall_byte = 0, partition_number = 0, reward_key = RewardKey, reward_addr = RewardAddr, reward = 10, recall_byte2 = 0, nonce_limiter_info = #nonce_limiter_info{ output = crypto:strong_rand_bytes(32), seed = crypto:strong_rand_bytes(48), partition_upper_bound = BlockSize, next_seed = crypto:strong_rand_bytes(48), next_partition_upper_bound = BlockSize }, price_per_gib_minute = PricePerGiBMinute, scheduled_price_per_gib_minute = PricePerGiBMinute, reward_history = RewardHistory, reward_history_hash = ar_rewards:reward_history_hash(0, <<>>, RewardHistory) }; true -> B0 end, B2 = case ar_fork:height_2_7() > 0 of false -> InitialHistory = get_initial_block_time_history(), B1#block{ merkle_rebase_support_threshold = ar_block:strict_data_split_threshold() * 2, chunk_hash = crypto:strong_rand_bytes(32), block_time_history = InitialHistory, block_time_history_hash = ar_block_time_history:hash(InitialHistory) }; true -> B1 end, [B2#block{ indep_hash = ar_block:indep_hash(B2) }]. -ifdef(AR_TEST). get_initial_block_time_history() -> [{1, 1, 1}]. -else. get_initial_block_time_history() -> [{120, 1, 1}]. -endif. %% @doc: create a genesis transaction with the given key and data size. This is only used %% in tests and when launching a localnet node. create_genesis_tx(Key, Size) -> {_, {_, Pk}} = Key, UnsignedTX = (ar_tx:new())#tx{ owner = Pk, reward = 0, data = generate_genesis_data(Size), data_size = Size, target = <<>>, quantity = 0, tags = [], last_tx = <<>>, format = 1 }, ar_tx:sign_v1(UnsignedTX, Key). %% @doc: generate binary data to be used as genesis data in tests. That data is incrementing %% integer data in 4 byte chunks. e.g. %% <<0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 2, ...>> %% This makes it easier to assert correct chunk data in tests. -spec generate_genesis_data(integer()) -> binary(). generate_genesis_data(DataSize) -> FullChunks = DataSize div 4, LeftoverBytes = DataSize rem 4, IncrementingData = generate_data(0, FullChunks * 4, <<>>), add_padding(IncrementingData, LeftoverBytes). generate_data(CurrentValue, RemainingBytes, Acc) when RemainingBytes >= 4 -> Chunk = <>, generate_data(CurrentValue + 1, RemainingBytes - 4, <>); generate_data(_, RemainingBytes, Acc) -> add_padding(Acc, RemainingBytes). add_padding(Data, 0) -> Data; add_padding(Data, LeftoverBytes) -> Padding = <<16#FF:8, 16#FF:8, 16#FF:8, 16#FF:8>>, <>. add_mainnet_v1_genesis_txs() -> case filelib:is_dir("genesis_data/genesis_txs") of true -> {ok, Files} = file:list_dir("genesis_data/genesis_txs"), {ok, Config} = arweave_config:get_env(), lists:foldl( fun(F, Acc) -> SourcePath = "genesis_data/genesis_txs/" ++ F, TargetPath = Config#config.data_dir ++ "/" ++ ?TX_DIR ++ "/" ++ F, file:copy(SourcePath, TargetPath), [ar_util:decode(hd(string:split(F, ".")))|Acc] end, [], Files ); false -> ?LOG_WARNING("genesis_data/genesis_txs directory not found. Node might not index the genesis " "block transactions."), [] end. %% @doc Return the mainnet genesis transactions. create_mainnet_genesis_txs() -> TXs = lists:map( fun({M}) -> {Priv, Pub} = ar_wallet:new(), LastTx = <<>>, Data = unicode:characters_to_binary(M), TX = ar_tx:new(Data, 0, LastTx), Reward = 0, SignedTX = ar_tx:sign_v1(TX#tx{reward = Reward}, Priv, Pub), ar_storage:write_tx(SignedTX), SignedTX end, ?GENESIS_BLOCK_MESSAGES ), ar_storage:write_file_atomic( "genesis_wallets.csv", lists:map(fun(T) -> binary_to_list(ar_util:encode(T#tx.id)) ++ "," end, TXs) ), [T#tx.id || T <- TXs]. ================================================ FILE: apps/arweave/src/ar_webhook.erl ================================================ %% This Source Code Form is subject to the terms of the GNU General %% Public License, v. 2.0. If a copy of the GPLv2 was not distributed %% with this file, You can obtain one at %% https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html %% -module(ar_webhook). -behaviour(gen_server). -export([start_link/1]). -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]). -include_lib("arweave_config/include/arweave_config.hrl"). -define(NUMBER_OF_TRIES, 10). -define(WAIT_BETWEEN_TRIES, 30 * 1000). -define(BASE_HEADERS, [ {<<"content-type">>, <<"application/json">>} ]). -define(MAX_TX_OFFSET_CACHE_SIZE, 100_000). %% Max number of transactions cached %% Functions which update or query the #tx_offset_cache are named with the `cache_` prefix %% e.g. `cache_is_tx_data_marked_synced/2`. -record(tx_offset_cache, { txid_to_timestamp = #{}, % Map TXID => Timestamp timestamp_txid = gb_sets:new(), % Ordered set of {Timestatmp, TXID} txid_to_start_offset_end_offset = #{}, % Map TXID => {Start, End} end_offset_txid_start_offset = gb_sets:new(), % Ordered set of {End, TXID, Start} size = 0, txid_to_status = #{} % Map TXID => synced | unsynced }). %% Internal state definition. -record(state, { url, headers, listen_to_block_stream = false, listen_to_transaction_stream = false, listen_to_transaction_data_stream = false, tx_offset_cache = #tx_offset_cache{} }). %%%=================================================================== %%% API %%%=================================================================== %%-------------------------------------------------------------------- %% @doc %% Starts the server %% %% @spec start_link() -> {ok, Pid} | ignore | {error, Error} %% @end %%-------------------------------------------------------------------- start_link(Args) -> gen_server:start_link(?MODULE, Args, []). %%%=================================================================== %%% gen_server callbacks %%%=================================================================== %%-------------------------------------------------------------------- %% @private %% @doc %% Initializes the server %% %% @spec init(Args) -> {ok, State} | %% {ok, State, Timeout} | %% ignore | %% {stop, Reason} %% @end %%-------------------------------------------------------------------- init(Hook) -> ?LOG_DEBUG("Started web hook for ~p", [Hook]), State = lists:foldl( fun (transaction, Acc) -> ar_events:subscribe(tx), Acc#state{ listen_to_transaction_stream = true }; (block, Acc) -> ok = ar_events:subscribe(block), Acc#state{ listen_to_block_stream = true }; (transaction_data, Acc) -> ar_events:subscribe(tx), ok = ar_events:subscribe(sync_record), Acc#state{ listen_to_transaction_data_stream = true }; (solution, Acc) -> ok = ar_events:subscribe(solution), Acc; (_, Acc) -> ?LOG_WARNING("Wrong event name in webhook ~p", [Hook]), Acc end, #state{}, Hook#config_webhook.events ), State2 = State#state{ url = Hook#config_webhook.url, headers = Hook#config_webhook.headers }, {ok, State2}. %%-------------------------------------------------------------------- %% @private %% @doc %% Handling call messages %% %% @spec handle_call(Request, From, State) -> %% {reply, Reply, State} | %% {reply, Reply, State, Timeout} | %% {noreply, State} | %% {noreply, State, Timeout} | %% {stop, Reason, Reply, State} | %% {stop, Reason, State} %% @end %%-------------------------------------------------------------------- handle_call(Request, _From, State) -> ?LOG_ERROR("unhandled call: ~p", [Request]), {reply, ok, State}. %%-------------------------------------------------------------------- %% @private %% @doc %% Handling cast messages %% %% @spec handle_cast(Msg, State) -> {noreply, State} | %% {noreply, State, Timeout} | %% {stop, Reason, State} %% @end %%-------------------------------------------------------------------- handle_cast(Msg, State) -> ?LOG_ERROR([{event, unhandled_cast}, {module, ?MODULE}, {message, Msg}]), {noreply, State}. %%-------------------------------------------------------------------- %% @private %% @doc %% Handling all non call/cast messages %% %% @spec handle_info(Info, State) -> {noreply, State} | %% {noreply, State, Timeout} | %% {stop, Reason, State} %% @end %%-------------------------------------------------------------------- handle_info({event, block, {new, Block, _Source}}, #state{ listen_to_block_stream = true } = State) -> URL = State#state.url, Headers = State#state.headers, call_webhook(URL, Headers, Block, block), {noreply, State}; handle_info({event, block, _}, State) -> {noreply, State}; handle_info({event, tx, {new, TX, _Source}}, #state{ listen_to_transaction_stream = true } = State) -> URL = State#state.url, Headers = State#state.headers, call_webhook(URL, Headers, TX, transaction), {noreply, State}; handle_info({event, tx, {registered_offset, TXID, EndOffset, Size}}, #state{ listen_to_transaction_data_stream = true } = State) -> Start = EndOffset - Size, Cache = State#state.tx_offset_cache, case cache_is_tx_data_marked_synced(TXID, Cache) of true -> %% This event has already been processed for TXID, no need to process again {noreply, State}; false -> Cache2 = cache_add_tx_offset_data([{TXID, Start, EndOffset}], Cache), State2 = State#state{ tx_offset_cache = Cache2 }, {noreply, maybe_call_transaction_data_synced_webhook(Start, EndOffset, TXID, no_store_id, State2)} end; handle_info({event, tx, {orphaned, TX}}, #state{ listen_to_transaction_data_stream = true } = State) -> URL = State#state.url, Headers = State#state.headers, Payload = #{ event => transaction_orphaned, txid => ar_util:encode(TX#tx.id) }, call_webhook(URL, Headers, Payload, transaction_orphaned), Cache = State#state.tx_offset_cache, Cache2 = cache_remove_tx_offset_data(TX#tx.id, Cache), {noreply, State#state{ tx_offset_cache = Cache2 }}; handle_info({event, tx, _}, State) -> {noreply, State}; handle_info({event, sync_record, {add_range, Start, End, ar_data_sync, #{ module := Module }}}, #state{ listen_to_transaction_data_stream = true } = State) -> case Module of ?DEFAULT_MODULE -> %% The disk pool data is not guaranteed to stay forever so we %% do not want to push it here. {noreply, State}; _ -> {noreply, handle_sync_record_add_range(Start, End, Module, State)} end; handle_info({event, sync_record, {global_remove_range, Start, End}}, #state{ listen_to_transaction_data_stream = true } = State) -> {noreply, handle_sync_record_remove_range(Start, End, State)}; handle_info({event, sync_record, _}, State) -> {noreply, State}; handle_info({event, solution, {rejected, #{ solution_hash := SolutionH, reason := Reason, source := Source }}}, State) -> URL = State#state.url, Headers = State#state.headers, Payload = #{ event => solution_rejected, solution_hash => ar_util:encode(SolutionH), reason => Reason, source => Source }, call_webhook(URL, Headers, Payload, solution_rejected), {noreply, State}; handle_info({event, solution, {stale, #{ solution_hash := SolutionH, source := Source }}}, State) -> URL = State#state.url, Headers = State#state.headers, Payload = #{ event => solution_stale, solution_hash => ar_util:encode(SolutionH), source => Source }, call_webhook(URL, Headers, Payload, solution_stale), {noreply, State}; handle_info({event, solution, {partial, #{ solution_hash := SolutionH, source := Source }}}, State) -> URL = State#state.url, Headers = State#state.headers, Payload = #{ event => solution_partial, solution_hash => ar_util:encode(SolutionH), source => Source }, call_webhook(URL, Headers, Payload, solution_partial), {noreply, State}; handle_info({event, solution, {accepted, #{ indep_hash := H, source := Source, is_rebase := IsRebase }}}, State) -> URL = State#state.url, Headers = State#state.headers, Payload = #{ event => solution_accepted, indep_hash => ar_util:encode(H), source => Source, is_rebase => IsRebase }, call_webhook(URL, Headers, Payload, solution_accepted), {noreply, State}; handle_info({event, solution, {confirmed, #{ indep_hash := BH, confirmations := N }}}, State) -> URL = State#state.url, Headers = State#state.headers, Payload = #{ event => solution_confirmed, indep_hash => ar_util:encode(BH), confirmations => N }, call_webhook(URL, Headers, Payload, solution_confirmed), {noreply, State}; handle_info({event, solution, {orphaned, #{ indep_hash := BH }}}, State) -> URL = State#state.url, Headers = State#state.headers, Payload = #{ event => solution_orphaned, indep_hash => ar_util:encode(BH) }, call_webhook(URL, Headers, Payload, solution_orphaned), {noreply, State}; handle_info({event, solution, _}, State) -> {noreply, State}; handle_info(Info, State) -> ?LOG_ERROR([{event, unhandled_info}, {module, ?MODULE}, {info, Info}]), {noreply, State}. %%-------------------------------------------------------------------- %% @private %% @doc %% This function is called by a gen_server when it is about to %% terminate. It should be the opposite of Module:init/1 and do any %% necessary cleaning up. When it returns, the gen_server terminates %% with Reason. The return value is ignored. %% %% @spec terminate(Reason, State) -> void() %% @end %%-------------------------------------------------------------------- terminate(Reason, _State) -> ?LOG_INFO([{module, ?MODULE},{pid, self()},{callback, terminate},{reason, Reason}]), ok. %%-------------------------------------------------------------------- %% @private %% @doc %% Convert process state when code is changed %% %% @spec code_change(OldVsn, State, Extra) -> {ok, NewState} %% @end %%-------------------------------------------------------------------- code_change(_OldVsn, State, _Extra) -> {ok, State}. %%%=================================================================== %%% Internal functions %%%=================================================================== call_webhook(URL, Headers, Entity, Event) -> do_call_webhook(URL, Headers, Entity, Event, 0). do_call_webhook(URL, Headers, Entity, Event, N) when N < ?NUMBER_OF_TRIES -> #{ host := Host, path := Path } = Map = uri_string:parse(URL), Query = maps:get(query, Map, <<>>), Peer = case maps:get(port, Map, undefined) of undefined -> case maps:get(scheme, Map, undefined) of "https" -> {binary_to_list(Host), 443}; _ -> {binary_to_list(Host), 1984} end; Port -> {binary_to_list(Host), Port} end, case catch ar_http:req(#{ method => post, peer => Peer, path => binary_to_list(<>), headers => ?BASE_HEADERS ++ Headers, body => to_json(Entity), timeout => 10000, is_peer_request => false }) of {ok, {{<<"200">>, _}, _, _, _, _}} = Result -> ?LOG_INFO([ {event, webhook_call_success}, {webhook_event, Event}, {id, entity_id(Entity)}, {url, URL}, {headers, Headers}, {response, Result} ]), ok; Error -> ?LOG_ERROR([ {event, webhook_call_failure}, {webhook_event, Event}, {id, entity_id(Entity)}, {url, URL}, {headers, Headers}, {response, Error}, {retry_in, ?WAIT_BETWEEN_TRIES} ]), timer:sleep(?WAIT_BETWEEN_TRIES), do_call_webhook(URL, Headers, Entity, Event, N + 1) end; do_call_webhook(URL, Headers, Entity, Event, _N) -> ?LOG_WARNING([{event, gave_up_webhook_call}, {webhook_event, Event}, {id, entity_id(Entity)}, {url, URL}, {headers, Headers}, {number_of_tries, ?NUMBER_OF_TRIES}, {wait_between_tries, ?WAIT_BETWEEN_TRIES} ]), ok. entity_id(#block{ indep_hash = ID }) -> ar_util:encode(ID); entity_id(#tx{ id = ID }) -> ar_util:encode(ID); entity_id(#{ txid := TXID }) -> ar_util:encode(TXID); entity_id(#{ indep_hash := H }) -> ar_util:encode(H); entity_id(#{ solution_hash := H }) -> ar_util:encode(H). to_json(#block{} = Block) -> {JSONKVPairs} = ar_serialize:block_to_json_struct(Block), JSONStruct = {lists:keydelete(wallet_list, 1, JSONKVPairs)}, ar_serialize:jsonify({[{block, JSONStruct}]}); to_json(#tx{} = TX) -> {JSONKVPairs1} = ar_serialize:tx_to_json_struct(TX), JSONKVPairs2 = lists:keydelete(data, 1, JSONKVPairs1), JSONKVPairs3 = [{data_size, TX#tx.data_size} | JSONKVPairs2], JSONStruct = {JSONKVPairs3}, ar_serialize:jsonify({[{transaction, JSONStruct}]}); to_json(Map) when is_map(Map) -> jiffy:encode(Map). handle_sync_record_add_range(Start, End, Module, State) -> handle_sync_record_add_range(Start, End, Module, State, 0). handle_sync_record_add_range(Start, End, Module, State, N) when N < ?NUMBER_OF_TRIES -> case get_tx_offset_data(Start, End, State#state.tx_offset_cache) of {ok, Data, Cache} -> process_updated_tx_data(Data, Module, State#state{ tx_offset_cache = Cache }); not_found -> State; Error -> ?LOG_WARNING([{event, failed_to_process_webhook_sync_record_add_range}, {error, io_lib:format("~p", [Error])}, {range_start, Start}, {range_end, End}]), timer:sleep(?WAIT_BETWEEN_TRIES), handle_sync_record_add_range(Start, End, State, N + 1) end; handle_sync_record_add_range(Start, End, _Module, State, _N) -> ?LOG_WARNING([{event, gave_up_webhook_tx_offset_data_fetch}, {range_start, Start}, {range_end, End}, {number_of_tries, ?NUMBER_OF_TRIES}, {wait_between_tries, ?WAIT_BETWEEN_TRIES} ]), State. process_updated_tx_data([], _Module, State) -> State; process_updated_tx_data([{TXID, Start, End} | Data], Module, State) -> Cache = State#state.tx_offset_cache, case cache_is_tx_data_marked_synced(TXID, Cache) of true -> %% transaction_data_synced webhook has already been sent, advance to next range process_updated_tx_data(Data, Module, State); false -> %% Maybe send the transaction_data_synced webhook before advancing to the next range process_updated_tx_data(Data, Module, maybe_call_transaction_data_synced_webhook(Start, End, TXID, Module, State)) end. %% @doc Return a list of {TXID, Start2, End2} for recorded transactions from the given range. get_tx_offset_data(Start, End, Cache) -> case cache_get_tx_offset_data(Start, End, Cache) of [] -> case ar_data_sync:get_tx_offset_data_in_range(Start, End) of {ok, []} -> not_found; {ok, Data} -> Cache2 = cache_add_tx_offset_data(Data, Cache), {ok, Data, Cache2}; Error -> Error end; List -> {ok, List, Cache} end. cache_get_tx_offset_data(Start, End, Cache) -> #tx_offset_cache{ end_offset_txid_start_offset = Set } = Cache, %% 32-byte TXID > atom n => we choose any element with End at least Start + 1. Iterator = gb_sets:iterator_from({Start + 1, n, n}, Set), cache_get_tx_offset_data_from_iterator(End, Iterator). cache_get_tx_offset_data_from_iterator(End, Iterator) -> case gb_sets:next(Iterator) of none -> []; {{End2, TXID, Start2}, Iterator2} when Start2 < End -> [{TXID, Start2, End2} | cache_get_tx_offset_data_from_iterator(End, Iterator2)]; _ -> [] end. cache_add_tx_offset_data([], Cache) -> Cache; cache_add_tx_offset_data([{TXID, Start, End} | Data], Cache) -> #tx_offset_cache{ txid_to_timestamp = Map, timestamp_txid = Set, end_offset_txid_start_offset = OffsetSet, txid_to_start_offset_end_offset = OffsetMap, txid_to_status = StatusMap, size = Size } = Cache, Timestamp = erlang:monotonic_time(microsecond), Set2 = case maps:get(TXID, Map, not_found) of not_found -> Set; PrevTimestamp -> gb_sets:del_element({PrevTimestamp, TXID}, Set) end, Map2 = maps:put(TXID, Timestamp, Map), check_offset_set_consistency(Start, End, TXID, OffsetSet), Set3 = gb_sets:add_element({Timestamp, TXID}, Set2), OffsetSet2 = gb_sets:add_element({End, TXID, Start}, OffsetSet), OffsetMap2 = case maps:get(TXID, OffsetMap, not_found) of not_found -> maps:put(TXID, {Start, End}, OffsetMap); {Start, End} -> OffsetMap; {Start2, End2} -> ?LOG_WARNING([{event, inconsistent_tx_offset_cache}, {check, txid_to_start_offset_end_offset}, {cached_tx_start_offset, Start2}, {cached_tx_end_offset, End2}, {tx_start_offset, Start}, {tx_end_offset, End}, {txid, ar_util:encode(TXID)}]), maps:put(TXID, {Start, End}, OffsetMap) end, Cache2 = Cache#tx_offset_cache{ txid_to_timestamp = Map2, timestamp_txid = Set3, end_offset_txid_start_offset = OffsetSet2, txid_to_start_offset_end_offset = OffsetMap2, txid_to_status = maps:remove(TXID, StatusMap), size = Size + 1 }, Cache3 = maybe_remove_oldest(Cache2), cache_add_tx_offset_data(Data, Cache3). check_offset_set_consistency(Start, End, TXID, OffsetSet) -> %% 32-byte TXID > atom n => we choose any element with End at least Start + 1. Iterator = gb_sets:iterator_from({Start + 1, n, n}, OffsetSet), check_offset_set_consistency2(Start, End, TXID, Iterator). check_offset_set_consistency2(Start, End, TXID, Iterator) -> case gb_sets:next(Iterator) of none -> ok; {{End2, TXID2, Start2}, _Iterator2} when Start2 < End -> ?LOG_WARNING([{event, inconsistent_tx_offset_cache}, {check, end_offset_txid_start_offset}, {cached_tx_start_offset, Start2}, {cached_tx_end_offset, End2}, {cached_txid, ar_util:encode(TXID2)}, {tx_start_offset, Start}, {tx_end_offset, End}, {txid, ar_util:encode(TXID)}]); {_, Iterator2} -> check_offset_set_consistency2(Start, End, TXID, Iterator2) end. maybe_remove_oldest(Cache) -> #tx_offset_cache{ txid_to_timestamp = Map, timestamp_txid = Set, end_offset_txid_start_offset = OffsetSet, txid_to_start_offset_end_offset = OffsetMap, txid_to_status = StatusMap, size = Size } = Cache, case Size > ?MAX_TX_OFFSET_CACHE_SIZE of false -> Cache; true -> {{_Timestamp, TXID}, Set2} = gb_sets:take_smallest(Set), {Start, End} = maps:get(TXID, OffsetMap), OffsetSet2 = gb_sets:del_element({End, TXID, Start}, OffsetSet), Cache#tx_offset_cache{ txid_to_timestamp = maps:remove(TXID, Map), timestamp_txid = Set2, end_offset_txid_start_offset = OffsetSet2, txid_to_start_offset_end_offset = maps:remove(TXID, OffsetMap), txid_to_status = maps:remove(TXID, StatusMap), size = Size - 1 } end. cache_remove_tx_offset_data(TXID, Cache) -> #tx_offset_cache{ txid_to_timestamp = Map, timestamp_txid = Set, end_offset_txid_start_offset = OffsetSet, txid_to_start_offset_end_offset = OffsetMap, size = Size, txid_to_status = StatusMap } = Cache, case maps:get(TXID, Map, not_found) of not_found -> Cache; Timestamp -> Set2 = gb_sets:del_element({Timestamp, TXID}, Set), Map2 = maps:remove(TXID, Map), Size2 = Size - 1, {Start, End} = maps:get(TXID, OffsetMap), OffsetSet2 = gb_sets:del_element({End, TXID, Start}, OffsetSet), OffsetMap2 = maps:remove(TXID, OffsetMap), StatusMap2 = maps:remove(TXID, StatusMap), Cache#tx_offset_cache{ txid_to_timestamp = Map2, timestamp_txid = Set2, end_offset_txid_start_offset = OffsetSet2, txid_to_start_offset_end_offset = OffsetMap2, size = Size2, txid_to_status = StatusMap2 } end. cache_mark_tx_data_synced(TXID, Cache) -> #tx_offset_cache{ txid_to_status = Map } = Cache, Cache#tx_offset_cache{ txid_to_status = maps:put(TXID, synced, Map) }. cache_is_tx_data_marked_synced(TXID, Cache) -> maps:get(TXID, Cache#tx_offset_cache.txid_to_status, false) == synced. cache_mark_tx_data_unsynced(TXID, Cache) -> #tx_offset_cache{ txid_to_status = Map } = Cache, Cache#tx_offset_cache{ txid_to_status = maps:put(TXID, unsynced, Map) }. cache_is_tx_data_marked_unsynced(TXID, Cache) -> maps:get(TXID, Cache#tx_offset_cache.txid_to_status, false) == unsynced. maybe_call_transaction_data_synced_webhook(Start, End, TXID, MaybeModule, State) -> Cache = State#state.tx_offset_cache, Cache2 = case is_synced_by_storage_modules(Start, End, MaybeModule) of true -> URL = State#state.url, Headers = State#state.headers, Payload = #{ event => transaction_data_synced, txid => ar_util:encode(TXID) }, call_webhook(URL, Headers, Payload, transaction_data_synced), cache_mark_tx_data_synced(TXID, Cache); false -> Cache end, State#state{ tx_offset_cache = Cache2 }. is_synced_by_storage_modules(Start, End, Module) -> case ar_storage_module:get_cover(Start, End, Module) of not_found -> false; Intervals -> is_synced_by_storage_modules(Intervals) end. is_synced_by_storage_modules([]) -> true; is_synced_by_storage_modules([{Start, End, StoreID} | Intervals]) -> case ar_sync_record:get_next_unsynced_interval(Start, End, ar_data_sync, StoreID) of not_found -> is_synced_by_storage_modules(Intervals); _I -> false end. handle_sync_record_remove_range(Start, End, State) -> handle_sync_record_remove_range(Start, End, State, 0). handle_sync_record_remove_range(Start, End, State, N) when N < ?NUMBER_OF_TRIES -> case get_tx_offset_data(Start, End, State#state.tx_offset_cache) of {ok, Data, Cache} -> process_removed_tx_data(Data, State#state{ tx_offset_cache = Cache }); not_found -> State; Error -> ?LOG_WARNING([{event, failed_to_process_webhook_sync_record_remove_range}, {error, io_lib:format("~p", [Error])}, {range_start, Start}, {range_end, End}]), timer:sleep(?WAIT_BETWEEN_TRIES), handle_sync_record_remove_range(Start, End, State, N + 1) end; handle_sync_record_remove_range(Start, End, State, _N) -> ?LOG_WARNING([{event, gave_up_webhook_tx_offset_data_fetch}, {range_start, Start}, {range_end, End}, {number_of_tries, ?NUMBER_OF_TRIES}, {wait_between_tries, ?WAIT_BETWEEN_TRIES} ]), State. process_removed_tx_data([], State) -> State; process_removed_tx_data([{TXID, _Start, _End} | Data], State) -> Cache = State#state.tx_offset_cache, case cache_is_tx_data_marked_unsynced(TXID, Cache) of true -> process_removed_tx_data(Data, State); false -> URL = State#state.url, Headers = State#state.headers, Payload = #{ event => transaction_data_removed, txid => ar_util:encode(TXID) }, call_webhook(URL, Headers, Payload, transaction_data_removed), Cache2 = cache_mark_tx_data_unsynced(TXID, Cache), State2 = State#state{ tx_offset_cache = Cache2 }, process_removed_tx_data(Data, State2) end. ================================================ FILE: apps/arweave/src/ar_webhook_sup.erl ================================================ %% This Source Code Form is subject to the terms of the GNU General %% Public License, v. 2.0. If a copy of the GPLv2 was not distributed %% with this file, You can obtain one at %% https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html -module(ar_webhook_sup). -behaviour(supervisor). %% API -export([start_link/0]). %% Supervisor callbacks -export([init/1]). -include_lib("arweave/include/ar_sup.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). %% =================================================================== %% API functions %% =================================================================== start_link() -> supervisor:start_link({local, ?MODULE}, ?MODULE, []). %% =================================================================== %% Supervisor callbacks %% =================================================================== init([]) -> {ok, Config} = arweave_config:get_env(), Children = lists:map( fun (Hook) when is_record(Hook, config_webhook) -> Handler = {ar_webhook, Hook#config_webhook.url}, {Handler, {ar_webhook, start_link, [Hook]}, permanent, ?SHUTDOWN_TIMEOUT, worker, [ar_webhook]}; (Hook) -> ?LOG_ERROR([{event, failed_to_parse_webhook_config}, {webhook_config, io_lib:format("~p", [Hook])}]) end, Config#config.webhooks ), {ok, {{one_for_one, 5, 10}, Children}}. ================================================ FILE: apps/arweave/src/arweave.app.src ================================================ {application, arweave, [ {description, "Arweave"}, {vsn, "2.9.6-alpha1"}, {mod, {ar, []}}, {applications, [ kernel, stdlib, sasl, os_mon, gun, cowlib, ranch, cowboy, prometheus, prometheus_cowboy, prometheus_process_collector, prometheus_httpd, runtime_tools ]}, {extra_applications, [recon]} ]}. ================================================ FILE: apps/arweave/src/e2e/ar_e2e.erl ================================================ -module(ar_e2e). -export([fixture_dir/1, fixture_dir/2, install_fixture/3, load_wallet_fixture/1, write_chunk_fixture/3, load_chunk_fixture/2]). -export([delayed_print/2, packing_type_to_packing/2, start_source_node/3, start_source_node/4, source_node_storage_modules/3, source_node_storage_modules/4, max_chunk_offset/1, aligned_partition_size/3, assert_recall_byte/3, assert_block/2, assert_syncs_range/3, assert_syncs_range/4, assert_does_not_sync_range/3, assert_has_entropy/4, assert_no_entropy/4, assert_chunks/3, assert_chunks/4, assert_no_chunks/2, assert_partition_size/3, assert_partition_size/4, assert_empty_partition/3, assert_mine_and_validate/3]). -include_lib("ar.hrl"). -include_lib("ar_consensus.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("eunit/include/eunit.hrl"). %% Set to true to update the chunk fixtures. %% WARNING: ONLY SET TO true IF YOU KNOW WHAT YOU ARE DOING! -define(UPDATE_CHUNK_FIXTURES, false). %% Partition size is 2,000,000 bytes, so round up to a full chunk -define(ALIGNED_PARTITION_SIZE, 2_097_152). -spec fixture_dir(atom()) -> binary(). fixture_dir(FixtureType) -> Dir = filename:dirname(?FILE), filename:join([Dir, "fixtures", atom_to_list(FixtureType)]). -spec fixture_dir(atom(), [binary()]) -> binary(). fixture_dir(FixtureType, SubDirs) -> FixtureDir = fixture_dir(FixtureType), filename:join([FixtureDir] ++ SubDirs). -spec install_fixture(binary(), atom(), string()) -> binary(). install_fixture(FilePath, FixtureType, FixtureName) -> FixtureDir = fixture_dir(FixtureType), ok = filelib:ensure_dir(FixtureDir ++ "/"), FixturePath = filename:join([FixtureDir, FixtureName]), file:copy(FilePath, FixturePath), FixturePath. -spec load_wallet_fixture(atom()) -> tuple(). load_wallet_fixture(WalletFixture) -> WalletName = atom_to_list(WalletFixture), FixtureDir = fixture_dir(wallets), FixturePath = filename:join([FixtureDir, WalletName ++ ".json"]), Wallet = ar_wallet:load_keyfile(FixturePath), Address = ar_wallet:to_address(Wallet), WalletPath = ar_wallet:wallet_filepath(ar_util:encode(Address)), file:copy(FixturePath, WalletPath), ar_wallet:load_keyfile(WalletPath). -spec write_chunk_fixture(binary(), non_neg_integer(), binary()) -> ok. write_chunk_fixture(Packing, EndOffset, Chunk) -> FixtureDir = fixture_dir(chunks, [ar_serialize:encode_packing(Packing, true)]), ok = filelib:ensure_dir(FixtureDir ++ "/"), FixturePath = filename:join([FixtureDir, integer_to_list(EndOffset) ++ ".bin"]), file:write_file(FixturePath, Chunk). -spec load_chunk_fixture(binary(), non_neg_integer()) -> binary(). load_chunk_fixture(Packing, EndOffset) -> FixtureDir = fixture_dir(chunks, [ar_serialize:encode_packing(Packing, true)]), FixturePath = filename:join([FixtureDir, integer_to_list(EndOffset) ++ ".bin"]), file:read_file(FixturePath). packing_type_to_packing(PackingType, Address) -> case PackingType of replica_2_9 -> {replica_2_9, Address}; spora_2_6 -> {spora_2_6, Address}; composite_1 -> {composite, Address, 1}; composite_2 -> {composite, Address, 2}; unpacked -> unpacked end. start_source_node(Node, PackingType, WalletFixture) -> start_source_node(Node, PackingType, WalletFixture, default). start_source_node(Node, unpacked, _WalletFixture, ModuleSize) -> ?LOG_INFO("Starting source node ~p with packing type ~p and wallet fixture ~p", [Node, unpacked, _WalletFixture]), TempNode = case Node of peer1 -> peer2; peer2 -> peer1 end, {Blocks, _SourceAddr, Chunks} = start_source_node(TempNode, spora_2_6, wallet_a), {_, StorageModules} = source_node_storage_modules(Node, unpacked, wallet_a, ModuleSize), [B0, _, {TX2, _} | _] = Blocks, {ok, Config} = ar_test_node:get_config(Node), ar_test_node:start_other_node(Node, B0, Config#config{ peers = [ar_test_node:peer_ip(TempNode)], storage_modules = StorageModules, auto_join = true }, true), ?LOG_INFO("Source node ~p started.", [Node]), assert_syncs_range(Node, 0, 4*?ALIGNED_PARTITION_SIZE), assert_chunks(Node, unpacked, Chunks), ?LOG_INFO("Source node ~p assertions passed.", [Node]), ar_test_node:stop(TempNode), ar_test_node:restart_with_config(Node, Config#config{ peers = [], start_from_latest_state = true, storage_modules = StorageModules, auto_join = true }), %% pack_served_chunks is not enabled but the data is stored unpacked, so we should %% return it {ok, {{<<"200">>, _}, _, Data, _, _}} = ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(Node), path => "/tx/" ++ binary_to_list(ar_util:encode(TX2#tx.id)) ++ "/data" }), {ok, ExpectedData} = load_chunk_fixture( unpacked, ?ALIGNED_PARTITION_SIZE + floor(3.75 * ?DATA_CHUNK_SIZE)), ?assertEqual(ExpectedData, ar_util:decode(Data)), ?LOG_INFO("Source node ~p restarted.", [Node]), {Blocks, undefined, Chunks}; start_source_node(Node, PackingType, WalletFixture, ModuleSize) -> ?LOG_INFO("Starting source node ~p with packing type ~p and wallet fixture ~p", [Node, PackingType, WalletFixture]), {Wallet, StorageModules} = source_node_storage_modules( Node, PackingType, WalletFixture, ModuleSize), RewardAddr = ar_wallet:to_address(Wallet), [B0] = ar_weave:init([{RewardAddr, ?AR(200), <<>>}], 0, ?ALIGNED_PARTITION_SIZE), {ok, Config} = ar_test_node:remote_call(Node, arweave_config, get_env, []), ?assertEqual(ar_test_node:peer_name(Node), ar_test_node:start_other_node(Node, B0, Config#config{ peers = [], start_from_latest_state = true, storage_modules = StorageModules, auto_join = true, mining_addr = RewardAddr }, true) ), ?LOG_INFO("Source node ~p started.", [Node]), %% Note: small chunks will be padded to 256 KiB. So B1 actually contains 3 chunks of data %% and B2 starts at a chunk boundary and contains 1 chunk of data. %% %% p1, 2097152 to 2883584 {TX1, B1} = mine_block(Node, Wallet, floor(2.5 * ?DATA_CHUNK_SIZE), infinity), %% p1, 2883584 to 3145728 {TX2, B2} = mine_block(Node, Wallet, floor(0.75 * ?DATA_CHUNK_SIZE), infinity), %% p1 to p2, 3145728 to 5242880 {TX3, B3} = mine_block(Node, Wallet, ?ALIGNED_PARTITION_SIZE, infinity), %% p2, 5242880 to 6291456 (disk pool threshold falls in the middle of p2) {TX4, B4} = mine_block(Node, Wallet, floor(0.5 * ?ALIGNED_PARTITION_SIZE), 2 * ?DATA_CHUNK_SIZE), %% p3, 6291456 to 8388608 (chunks are stored in disk pool) {TX5, B5} = mine_block(Node, Wallet, ?ALIGNED_PARTITION_SIZE, 0), %% List of {Block, EndOffset, ChunkSize} Chunks = [ %% PaddedEndOffset: 2359296 {B1, ?ALIGNED_PARTITION_SIZE + ?DATA_CHUNK_SIZE, ?DATA_CHUNK_SIZE}, %% PaddedEndOffset: 2621440 {B1, ?ALIGNED_PARTITION_SIZE + (2*?DATA_CHUNK_SIZE), ?DATA_CHUNK_SIZE}, %% PaddedEndOffset: 2883584 {B1, ?ALIGNED_PARTITION_SIZE + floor(2.5 * ?DATA_CHUNK_SIZE), floor(0.5 * ?DATA_CHUNK_SIZE)}, %% PaddedEndOffset: 3145728 {B2, ?ALIGNED_PARTITION_SIZE + floor(3.75 * ?DATA_CHUNK_SIZE), floor(0.75 * ?DATA_CHUNK_SIZE)}, %% PaddedEndOffset: 3407872 {B3, ?ALIGNED_PARTITION_SIZE + (5*?DATA_CHUNK_SIZE), ?DATA_CHUNK_SIZE}, %% PaddedEndOffset: 3670016 {B3, ?ALIGNED_PARTITION_SIZE + (6*?DATA_CHUNK_SIZE), ?DATA_CHUNK_SIZE}, %% PaddedEndOffset: 3932160 {B3, ?ALIGNED_PARTITION_SIZE + (7*?DATA_CHUNK_SIZE), ?DATA_CHUNK_SIZE}, %% PaddedEndOffset: 4194304 {B3, ?ALIGNED_PARTITION_SIZE + (8*?DATA_CHUNK_SIZE), ?DATA_CHUNK_SIZE} ], ?LOG_INFO("Source node ~p blocks mined.", [Node]), SourcePacking = packing_type_to_packing(PackingType, RewardAddr), assert_syncs_range(Node, SourcePacking, 0, 4*?ALIGNED_PARTITION_SIZE), assert_chunks(Node, SourcePacking, Chunks), %% Restart the node to allow it to copy chunks between storage modules. ar_test_node:restart(Node), ?LOG_INFO("Source node ~p restarted.", [Node]), assert_syncs_range(Node, SourcePacking, 0, 4*?ALIGNED_PARTITION_SIZE), assert_chunks(Node, SourcePacking, Chunks), assert_partition_size(Node, 0, SourcePacking), assert_partition_size(Node, 1, SourcePacking), %% pack_served_chunks is not enabled so we shouldn't return unpacked data ?assertMatch({ok, {{<<"404">>, _}, _, _, _, _}}, ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(Node), path => "/tx/" ++ binary_to_list(ar_util:encode(TX1#tx.id)) ++ "/data" })), ?LOG_INFO("Source node ~p assertions passed.", [Node]), {[B0, {TX1, B1}, {TX2, B2}, {TX3, B3}, {TX4, B4}, {TX5, B5}], RewardAddr, Chunks}. max_chunk_offset(Chunks) -> lists:foldl(fun({_, EndOffset, _}, Acc) -> max(Acc, EndOffset) end, 0, Chunks). aligned_partition_size(Node, Partition, Packing) -> {ok, Config} = ar_test_node:get_config(Node), %% Include both regular storage modules and repack_in_place modules. %% For repack_in_place modules, use the target packing. RepackInPlaceModules = [{BucketSize, Bucket, TargetPacking} || {{BucketSize, Bucket, _FromPacking}, TargetPacking} <- Config#config.repack_in_place_storage_modules], AllStorageModules = Config#config.storage_modules ++ RepackInPlaceModules, PartitionStart = Partition * ar_block:partition_size(), PartitionEnd = (Partition + 1) * ar_block:partition_size(), StorageModules = filter_storage_modules_by_partition( PartitionStart, PartitionEnd, AllStorageModules), StorageModules2 = filter_storage_modules_by_packing(StorageModules, Packing), aligned_partition_size2(StorageModules2, PartitionStart, PartitionEnd, 0). filter_storage_modules_by_partition(PartitionStart, PartitionEnd, Modules) -> lists:filter(fun({ModuleSize, Bucket, _Packing}) -> ModuleStart = Bucket * ModuleSize, ModuleEnd = ModuleStart + ModuleSize, ModuleStart < PartitionEnd andalso ModuleEnd > PartitionStart end, Modules). filter_storage_modules_by_packing([{_, _, {replica_2_9, _} = Packing} = Module | Modules], unpacked_padded) -> [Module | filter_storage_modules_by_packing(Modules, Packing)]; filter_storage_modules_by_packing([{_, _, Packing} = Module | Modules], Packing) -> [Module | filter_storage_modules_by_packing(Modules, Packing)]; filter_storage_modules_by_packing([_Module | Modules], Packing) -> filter_storage_modules_by_packing(Modules, Packing); filter_storage_modules_by_packing([], _Packing) -> []. aligned_partition_size2([{ModuleSize, Bucket, Packing} | Modules], PartitionStart, PartitionEnd, Acc) -> Overlap = ar_storage_module:get_overlap(Packing), ModuleStart = Bucket * ModuleSize, ModuleEnd = ModuleStart + ModuleSize, ClippedStart = max(ModuleStart, PartitionStart), ClippedEnd = min(ModuleEnd, PartitionEnd), AlignedModuleStart = max(0, ar_block:get_chunk_padded_offset(ClippedStart) - ?DATA_CHUNK_SIZE), AlignedModuleEnd = ar_block:get_chunk_padded_offset(ClippedEnd + Overlap), AlignedModuleSize = AlignedModuleEnd - AlignedModuleStart, aligned_partition_size2(Modules, PartitionStart, PartitionEnd, Acc + AlignedModuleSize); aligned_partition_size2([], _PartitionStart, _PartitionEnd, Acc) -> Acc. source_node_storage_modules(Node, PackingType, WalletFixture) -> source_node_storage_modules(Node, PackingType, WalletFixture, default). source_node_storage_modules(_Node, unpacked, _WalletFixture, ModuleSize) -> {undefined, source_node_storage_modules(unpacked, ModuleSize)}; source_node_storage_modules(Node, PackingType, WalletFixture, ModuleSize) -> Wallet = ar_test_node:remote_call(Node, ar_e2e, load_wallet_fixture, [WalletFixture]), RewardAddr = ar_wallet:to_address(Wallet), SourcePacking = packing_type_to_packing(PackingType, RewardAddr), {Wallet, source_node_storage_modules(SourcePacking, ModuleSize)}. source_node_storage_modules(SourcePacking, default) -> Size = ar_block:partition_size(), lists:map(fun(I) -> {Size, I, SourcePacking} end, lists:seq(0, 4)); source_node_storage_modules(SourcePacking, small) -> Size = ar_block:partition_size() div 4, %% Put strict data split threshold inside the first storage module. [{Size * 2, 0, SourcePacking} | lists:map(fun(I) -> {Size, I, SourcePacking} end, lists:seq(2, 19))]. mine_block(Node, Wallet, DataSize, IsTemporary) -> WeaveSize = ar_test_node:remote_call(Node, ar_node, get_current_weave_size, []), Addr = ar_wallet:to_address(Wallet), {TX, Chunks} = generate_tx(Node, Wallet, WeaveSize, DataSize), B = ar_test_node:post_and_mine(#{ miner => Node, await_on => Node }, [TX]), ?assertEqual(Addr, B#block.reward_addr), Proofs = ar_test_data_sync:post_proofs(Node, B, TX, Chunks, IsTemporary), ar_test_data_sync:wait_until_syncs_chunks(Node, Proofs, infinity), {TX, B}. generate_tx(Node, Wallet, WeaveSize, DataSize) -> Chunks = generate_chunks(Node, WeaveSize, DataSize, []), {DataRoot, _DataTree} = ar_merkle:generate_tree( [{ar_tx:generate_chunk_id(Chunk), Offset} || {Chunk, Offset} <- Chunks] ), TX = ar_test_node:sign_tx(Node, Wallet, #{ data_size => DataSize, data_root => DataRoot }), {TX, [Chunk || {Chunk, _} <- Chunks]}. generate_chunks(Node, WeaveSize, DataSize, Acc) when DataSize > 0 -> ChunkSize = min(DataSize, ?DATA_CHUNK_SIZE), EndOffset = (length(Acc) * ?DATA_CHUNK_SIZE) + ChunkSize, Chunk = ar_test_node:get_genesis_chunk(WeaveSize + EndOffset), generate_chunks(Node, WeaveSize, DataSize - ChunkSize, Acc ++ [{Chunk, EndOffset}]); generate_chunks(_, _, _, Acc) -> Acc. assert_recall_byte(Node, RangeStart, RangeEnd) when RangeStart > RangeEnd -> ok; assert_recall_byte(Node, RangeStart, RangeEnd) -> Options = #{ pack => true, packing => unpacked, origin => miner }, Result = ar_test_node:remote_call( Node, ar_data_sync, get_chunk, [RangeStart + 1, Options]), case Result of {ok, _} -> ?LOG_INFO("Recall byte found at ~p", [RangeStart + 1]), assert_recall_byte(Node, RangeStart + 1, RangeEnd); Error -> ?LOG_ERROR([{event, recall_byte_not_found}, {recall_byte, RangeStart}, {error, Error}]) end. assert_block({spora_2_6, Address}, MinedBlock) -> ?assertEqual(Address, MinedBlock#block.reward_addr), ?assertEqual(0, MinedBlock#block.packing_difficulty); assert_block({composite, Address, PackingDifficulty}, MinedBlock) -> ?assertEqual(Address, MinedBlock#block.reward_addr), ?assertEqual(PackingDifficulty, MinedBlock#block.packing_difficulty); assert_block({replica_2_9, Address}, MinedBlock) -> ?assertEqual(Address, MinedBlock#block.reward_addr), ?assertEqual(?REPLICA_2_9_PACKING_DIFFICULTY, MinedBlock#block.packing_difficulty). assert_has_entropy(Node, StartOffset, EndOffset, StoreID) -> RangeSize = EndOffset - StartOffset, HasEntropy = ar_util:do_until( fun() -> Intersection = ar_test_node:remote_call( Node, ar_sync_record, get_intersection_size, [EndOffset, StartOffset, ar_entropy_storage:sync_record_id(), StoreID]), Intersection >= RangeSize end, 100, 60_000 ), case HasEntropy of true -> ok; _ -> Intersection = ar_test_node:remote_call( Node, ar_sync_record, get_intersection_size, [EndOffset, StartOffset, ar_entropy_storage:sync_record_id(), StoreID]), Intervals = ar_test_node:remote_call( Node, ar_sync_record, get, [ar_entropy_storage:sync_record_id(), StoreID]), ?assert(false, iolist_to_binary(io_lib:format( "~s failed to prepare entropy range ~p - ~p. " "Intersection size: ~p. Intervals: ~p", [Node, StartOffset, EndOffset, Intersection, ar_intervals:to_list(Intervals)]))) end. assert_no_entropy(Node, StartOffset, EndOffset, StoreID) -> HasEntropy = ar_util:do_until( fun() -> Intersection = ar_test_node:remote_call( Node, ar_sync_record, get_intersection_size, [EndOffset, StartOffset, ar_entropy_storage:sync_record_id(), StoreID]), Intersection > 0 end, 100, 15_000 ), case HasEntropy of true -> Intersection = ar_test_node:remote_call( Node, ar_sync_record, get_intersection_size, [EndOffset, StartOffset, ar_entropy_storage:sync_record_id(), StoreID]), Intervals = ar_test_node:remote_call( Node, ar_sync_record, get, [ar_entropy_storage:sync_record_id(), StoreID]), ?assert(false, iolist_to_binary(io_lib:format( "~s found entropy when it should not have. Range: ~p - ~p. " "Intersection size: ~p. Intervals: ~p", [Node, StartOffset, EndOffset, Intersection, ar_intervals:to_list(Intervals)]))); _ -> ok end. assert_syncs_range(Node, _Packing, StartOffset, EndOffset) -> assert_syncs_range(Node, StartOffset, EndOffset). assert_syncs_range(Node, StartOffset, EndOffset) -> HasRange = ar_util:do_until( fun() -> has_range(Node, StartOffset, EndOffset) end, 100, 300_000 ), case HasRange of true -> ok; _ -> {ok, SyncRecord} = ar_http_iface_client:get_sync_record( ar_test_node:peer_ip(Node)), ?assert(false, iolist_to_binary(io_lib:format( "~s failed to sync range ~p - ~p. Sync record: ~p", [Node, StartOffset, EndOffset, ar_intervals:to_list(SyncRecord)]))) end. assert_does_not_sync_range(Node, StartOffset, EndOffset) -> ar_util:do_until( fun() -> has_range(Node, StartOffset, EndOffset) end, 1000, 15_000 ), ?assertEqual(false, has_range(Node, StartOffset, EndOffset), iolist_to_binary(io_lib:format( "~s synced range when it should not have: ~p - ~p", [Node, StartOffset, EndOffset]))). assert_partition_size(Node, PartitionNumber, Packing) -> PartitionSize = aligned_partition_size(Node, PartitionNumber, Packing), assert_partition_size(Node, PartitionNumber, Packing, PartitionSize). assert_partition_size(Node, PartitionNumber, Packing, Size) -> ?LOG_INFO("~p: Asserting partition ~p,~p is size ~p", [Node, PartitionNumber, ar_serialize:encode_packing(Packing, true), Size]), ar_util:do_until( fun() -> ar_test_node:remote_call(Node, ar_mining_stats, get_partition_data_size, [PartitionNumber, Packing]) >= Size end, 100, 120_000 ), ?assertEqual( Size, ar_test_node:remote_call(Node, ar_mining_stats, get_partition_data_size, [PartitionNumber, Packing]), iolist_to_binary(io_lib:format( "~s partition ~p,~p was not the expected size.", [Node, PartitionNumber, ar_serialize:encode_packing(Packing, true)]))). assert_empty_partition(Node, PartitionNumber, Packing) -> ar_util:do_until( fun() -> ar_test_node:remote_call(Node, ar_mining_stats, get_partition_data_size, [PartitionNumber, Packing]) > 0 end, 100, 15_000 ), ?assertEqual( 0, ar_test_node:remote_call(Node, ar_mining_stats, get_partition_data_size, [PartitionNumber, Packing]), iolist_to_binary(io_lib:format( "~s partition ~p,~p is not empty", [Node, PartitionNumber, ar_serialize:encode_packing(Packing, true)]))). assert_mine_and_validate(MinerNode, ValidatorNode, MinerPacking) -> CurrentHeight = max( ar_test_node:remote_call(ValidatorNode, ar_node, get_height, []), ar_test_node:remote_call(MinerNode, ar_node, get_height, []) ), ar_test_node:wait_until_height(ValidatorNode, CurrentHeight), ar_test_node:wait_until_height(MinerNode, CurrentHeight), ar_test_node:mine(MinerNode), MinerBI = ar_test_node:wait_until_height(MinerNode, CurrentHeight + 1), {ok, MinerBlock} = ar_test_node:http_get_block(element(1, hd(MinerBI)), MinerNode), assert_block(MinerPacking, MinerBlock), ValidatorBI = ar_test_node:wait_until_height(ValidatorNode, MinerBlock#block.height), {ok, ValidatorBlock} = ar_test_node:http_get_block(element(1, hd(ValidatorBI)), ValidatorNode), ?assertEqual(MinerBlock, ValidatorBlock). get_intervals(NodeIP, StartOffset, EndOffset) -> case ar_http_iface_client:get_sync_record(NodeIP) of {ok, RegularIntervals} -> FootprintIntervals = collect_footprint_intervals(NodeIP, StartOffset, EndOffset), AllIntervals = ar_intervals:union(RegularIntervals, FootprintIntervals), AllIntervals; _Error -> FootprintIntervals = collect_footprint_intervals(NodeIP, StartOffset, EndOffset), FootprintIntervals end. has_range(Node, StartOffset, EndOffset) -> NodeIP = ar_test_node:peer_ip(Node), case ar_http_iface_client:get_sync_record(NodeIP) of {ok, RegularIntervals} -> FootprintIntervals = collect_footprint_intervals(NodeIP, StartOffset, EndOffset), AllIntervals = ar_intervals:union(RegularIntervals, FootprintIntervals), interval_contains(AllIntervals, StartOffset, EndOffset); Error -> Intervals = get_intervals(NodeIP, StartOffset, EndOffset), ?assert(false, iolist_to_binary(io_lib:format( "Failed to get sync record from ~p: ~p; range: ~p - ~p; intervals managed to collect: ~p", [Node, Error, StartOffset, EndOffset, ar_intervals:to_list(Intervals)]))), false end. collect_footprint_intervals(NodeIP, StartOffset, EndOffset) -> StartPartition = ar_replica_2_9:get_entropy_partition(StartOffset + 1), LastPartition = ar_replica_2_9:get_entropy_partition(EndOffset + 1), FootprintsPerPartition = ar_footprint_record:get_footprints_per_partition(), collect_footprint_intervals(NodeIP, StartPartition, LastPartition, 0, FootprintsPerPartition - 1, ar_intervals:new()). collect_footprint_intervals(_NodeIP, Partition, LastPartition, _Footprint, _MaxFootprint, Acc) when Partition > LastPartition -> Acc; collect_footprint_intervals(NodeIP, Partition, LastPartition, Footprint, MaxFootprint, Acc) when Footprint > MaxFootprint -> collect_footprint_intervals(NodeIP, Partition + 1, LastPartition, 0, MaxFootprint, Acc); collect_footprint_intervals(NodeIP, Partition, LastPartition, Footprint, MaxFootprint, Acc) -> FootprintByteIntervals = case ar_http_iface_client:get_footprints(NodeIP, Partition, Footprint) of {ok, FootprintIntervals} -> ar_footprint_record:get_intervals_from_footprint_intervals(FootprintIntervals); not_found -> ?debugFmt("No footprint record found on ~p for partition ~B, footprint ~B", [NodeIP, Partition, Footprint]), ar_intervals:new(); Error -> ?assert(false, iolist_to_binary(io_lib:format( "Failed to get footprint record from ~p: ~p, partition: ~B, footprint: ~B", [NodeIP, Error, Partition, Footprint]))) end, NewAcc = ar_intervals:union(Acc, FootprintByteIntervals), collect_footprint_intervals(NodeIP, Partition, LastPartition, Footprint + 1, MaxFootprint, NewAcc). interval_contains(Intervals, Start, End) when End > Start -> case gb_sets:iterator_from({Start, Start}, Intervals) of Iter -> interval_contains2(Iter, Start, End) end. interval_contains2(Iter, Start, End) -> case gb_sets:next(Iter) of none -> false; {{IntervalEnd, IntervalStart}, _} when IntervalStart =< Start andalso IntervalEnd >= End -> true; _ -> false end. assert_chunks(Node, Packing, Chunks) -> assert_chunks(Node, any, Packing, Chunks). assert_chunks(Node, RequestPacking, Packing, Chunks) -> lists:foreach(fun({Block, EndOffset, ChunkSize}) -> assert_chunk(Node, RequestPacking, Packing, Block, EndOffset, ChunkSize) end, Chunks). assert_chunk(Node, RequestPacking, Packing, Block, EndOffset, ChunkSize) -> ?LOG_INFO("Asserting chunk at offset ~p, size ~p", [EndOffset, ChunkSize]), Result = ar_test_node:get_chunk(Node, EndOffset, RequestPacking), {ok, {{StatusCode, _}, _, EncodedProof, _, _}} = Result, ?assertEqual(<<"200">>, StatusCode, iolist_to_binary(io_lib:format( "Chunk not found. Node: ~p, Offset: ~p", [Node, EndOffset]))), Proof = ar_serialize:json_map_to_poa_map( jiffy:decode(EncodedProof, [return_maps]) ), Proof = ar_serialize:json_map_to_poa_map( jiffy:decode(EncodedProof, [return_maps]) ), ChunkMetadata = #chunk_metadata{ tx_root = Block#block.tx_root, tx_path = maps:get(tx_path, Proof), data_path = maps:get(data_path, Proof) }, ChunkProof = ar_test_node:remote_call(Node, ar_poa, chunk_proof, [ChunkMetadata, EndOffset - 1]), ?LOG_INFO([{chunk_proof, ChunkProof}]), {true, _} = ar_test_node:remote_call(Node, ar_poa, validate_paths, [ChunkProof]), Chunk = maps:get(chunk, Proof), maybe_write_chunk_fixture(Packing, EndOffset, Chunk), {ok, ExpectedPackedChunk} = load_chunk_fixture(Packing, EndOffset), ?assertEqual(byte_size(ExpectedPackedChunk), byte_size(Chunk), iolist_to_binary(io_lib:format( "~p: Chunk at offset ~p size mismatch expected ~p, got ~p", [Node, EndOffset, byte_size(ExpectedPackedChunk), byte_size(Chunk)]))), ?assertEqual(ExpectedPackedChunk, Chunk, iolist_to_binary(io_lib:format( "~p: Chunk at offset ~p, size ~p, packing ~p does not match packed chunk", [Node, EndOffset, ChunkSize, ar_serialize:encode_packing(Packing, true)]))), {ok, UnpackedChunk} = ar_packing_server:unpack( Packing, EndOffset, Block#block.tx_root, Chunk, ?DATA_CHUNK_SIZE), UnpaddedChunk = ar_packing_server:unpad_chunk( Packing, UnpackedChunk, ChunkSize, byte_size(Chunk)), ExpectedUnpackedChunk = ar_test_node:get_genesis_chunk(EndOffset), ?assertEqual(ExpectedUnpackedChunk, UnpaddedChunk, iolist_to_binary(io_lib:format( "~p: Chunk at offset ~p, size ~p does not match unpacked chunk", [Node, EndOffset, ChunkSize]))). assert_no_chunks(Node, Chunks) -> lists:foreach(fun({_Block, EndOffset, _ChunkSize}) -> assert_no_chunk(Node, EndOffset) end, Chunks). assert_no_chunk(Node, EndOffset) -> Result = ar_test_node:get_chunk(Node, EndOffset, any), {ok, {{StatusCode, _}, _, _, _, _}} = Result, ?assertEqual(<<"404">>, StatusCode, iolist_to_binary(io_lib:format( "Chunk found when it should not have been. Node: ~p, Offset: ~p", [Node, EndOffset]))). delayed_print(Format, Args) -> %% Print the specific flavor of this test since it isn't captured in the test name. %% Delay the print by 1 second to allow the eunit output to be flushed. spawn(fun() -> timer:sleep(1000), io:fwrite(user, Format, Args) end). %% -------------------------------------------------------------------------------------------- %% Test Data Generation %% -------------------------------------------------------------------------------------------- write_wallet_fixtures() -> Wallets = [wallet_a, wallet_b, wallet_c, wallet_d], lists:foreach(fun(Wallet) -> WalletName = atom_to_list(Wallet), ar_wallet:new_keyfile(?DEFAULT_KEY_TYPE, WalletName), install_fixture( ar_wallet:wallet_filepath(Wallet), wallets, WalletName ++ ".json") end, Wallets), ok. maybe_write_chunk_fixture(Packing, EndOffset, Chunk) when ?UPDATE_CHUNK_FIXTURES =:= true -> ?LOG_ERROR("WARNING: Updating chunk fixture! EndOffset: ~p, Packing: ~p", [EndOffset, ar_serialize:encode_packing(Packing, true)]), write_chunk_fixture(Packing, EndOffset, Chunk); maybe_write_chunk_fixture(_, _, _) -> ok. ================================================ FILE: apps/arweave/src/e2e/ar_repack_in_place_mine_tests.erl ================================================ -module(ar_repack_in_place_mine_tests). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("eunit/include/eunit.hrl"). -define(REPACK_IN_PLACE_MINE_TEST_TIMEOUT, 600). %% -------------------------------------------------------------------------------------------- %% Test Registration %% -------------------------------------------------------------------------------------------- %% %% Note: %% Repacking in place *from* replica_2_9 to any format is not currently supported. repack_in_place_mine_test_() -> Timeout = ?REPACK_IN_PLACE_MINE_TEST_TIMEOUT, [ {timeout, Timeout, {with, {unpacked, replica_2_9, default}, [fun test_repack_in_place_mine/1]}}, {timeout, Timeout, {with, {spora_2_6, replica_2_9, default}, [fun test_repack_in_place_mine/1]}}, {timeout, Timeout, {with, {replica_2_9, replica_2_9, default}, [fun test_repack_in_place_mine/1]}}, {timeout, Timeout, {with, {replica_2_9, unpacked, default}, [fun test_repack_in_place_mine/1]}}, {timeout, Timeout, {with, {spora_2_6, unpacked, default}, [fun test_repack_in_place_mine/1]}}, {timeout, Timeout, {with, {unpacked, replica_2_9, small}, [fun test_repack_in_place_mine/1]}}, {timeout, Timeout, {with, {spora_2_6, replica_2_9, small}, [fun test_repack_in_place_mine/1]}}, {timeout, Timeout, {with, {replica_2_9, replica_2_9, small}, [fun test_repack_in_place_mine/1]}}, {timeout, Timeout, {with, {replica_2_9, unpacked, small}, [fun test_repack_in_place_mine/1]}}, {timeout, Timeout, {with, {spora_2_6, unpacked, small}, [fun test_repack_in_place_mine/1]}} ]. %% -------------------------------------------------------------------------------------------- %% test_repack_in_place_mine %% -------------------------------------------------------------------------------------------- test_repack_in_place_mine({FromPackingType, ToPackingType, ModuleSize}) -> ar_e2e:delayed_print(<<" ~p -> ~p (~p) ">>, [FromPackingType, ToPackingType, ModuleSize]), ?LOG_INFO([{event, test_repack_in_place_mine}, {module, ?MODULE}, {from_packing_type, FromPackingType}, {to_packing_type, ToPackingType}, {module_size, ModuleSize}]), ValidatorNode = peer1, RepackerNode = peer2, ar_test_node:stop(ValidatorNode), ar_test_node:stop(RepackerNode), {Blocks, _AddrA, Chunks} = ar_e2e:start_source_node( RepackerNode, FromPackingType, wallet_a, ModuleSize), [B0 | _] = Blocks, start_validator_node(ValidatorNode, RepackerNode, B0), NumModules = case ModuleSize of default -> 2; small -> 8 end, {WalletB, SourceStorageModules} = ar_e2e:source_node_storage_modules( RepackerNode, ToPackingType, wallet_b, ModuleSize), AddrB = case WalletB of undefined -> undefined; _ -> ar_wallet:to_address(WalletB) end, FinalStorageModules = lists:sublist(SourceStorageModules, NumModules), ToPacking = ar_e2e:packing_type_to_packing(ToPackingType, AddrB), {ok, Config} = ar_test_node:get_config(RepackerNode), RepackInPlaceStorageModules = lists:sublist([ {Module, ToPacking} || Module <- Config#config.storage_modules ], NumModules), ar_test_node:restart_with_config(RepackerNode, Config#config{ storage_modules = [], repack_in_place_storage_modules = RepackInPlaceStorageModules, mining_addr = undefined }), ExpectedSize0 = ar_e2e:aligned_partition_size(RepackerNode, 0, ToPacking), ar_e2e:assert_partition_size(RepackerNode, 0, ToPacking, ExpectedSize0), ExpectedSize1 = ar_e2e:aligned_partition_size(RepackerNode, 1, ToPacking), ar_e2e:assert_partition_size(RepackerNode, 1, ToPacking, ExpectedSize1), ar_test_node:stop(RepackerNode), %% Rename storage_modules DataDir = Config#config.data_dir, lists:foreach(fun({SourceModule, Packing}) -> {BucketSize, Bucket, _Packing} = SourceModule, SourceID = ar_storage_module:id(SourceModule), SourcePath = ar_chunk_storage:get_storage_module_path(DataDir, SourceID), TargetModule = {BucketSize, Bucket, Packing}, TargetID = ar_storage_module:id(TargetModule), TargetPath = ar_chunk_storage:get_storage_module_path(DataDir, TargetID), file:rename(SourcePath, TargetPath) end, RepackInPlaceStorageModules), ar_test_node:restart_with_config(RepackerNode, Config#config{ storage_modules = FinalStorageModules, repack_in_place_storage_modules = [], mining_addr = AddrB }), ar_e2e:assert_chunks(RepackerNode, ToPacking, Chunks), case ToPackingType of unpacked -> ok; _ -> ar_e2e:assert_mine_and_validate(RepackerNode, ValidatorNode, ToPacking) end. start_validator_node(ValidatorNode, RepackerNode, B0) -> {ok, Config} = ar_test_node:get_config(ValidatorNode), ?assertEqual(ar_test_node:peer_name(ValidatorNode), ar_test_node:start_other_node(ValidatorNode, B0, Config#config{ peers = [ar_test_node:peer_ip(RepackerNode)], start_from_latest_state = true, auto_join = true }, true) ), ok. ================================================ FILE: apps/arweave/src/e2e/ar_repack_mine_tests.erl ================================================ -module(ar_repack_mine_tests). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("arweave/include/ar_consensus.hrl"). -include_lib("eunit/include/eunit.hrl"). -define(REPACK_MINE_TEST_TIMEOUT, 600). %% -------------------------------------------------------------------------------------------- %% Test Registration %% -------------------------------------------------------------------------------------------- repack_mine_test_() -> Timeout = ?REPACK_MINE_TEST_TIMEOUT, [ {timeout, Timeout, {with, {replica_2_9, replica_2_9}, [fun test_repack_mine/1]}}, {timeout, Timeout, {with, {replica_2_9, spora_2_6}, [fun test_repack_mine/1]}}, {timeout, Timeout, {with, {replica_2_9, unpacked}, [fun test_repack_mine/1]}}, {timeout, Timeout, {with, {unpacked, replica_2_9}, [fun test_repack_mine/1]}}, {timeout, Timeout, {with, {unpacked, spora_2_6}, [fun test_repack_mine/1]}}, {timeout, Timeout, {with, {spora_2_6, replica_2_9}, [fun test_repack_mine/1]}}, {timeout, Timeout, {with, {spora_2_6, spora_2_6}, [fun test_repack_mine/1]}}, {timeout, Timeout, {with, {spora_2_6, unpacked}, [fun test_repack_mine/1]}} ]. %% -------------------------------------------------------------------------------------------- %% test_repack_mine %% -------------------------------------------------------------------------------------------- test_repack_mine({FromPackingType, ToPackingType}) -> ar_e2e:delayed_print(<<" ~p -> ~p ">>, [FromPackingType, ToPackingType]), ?LOG_INFO([{event, test_repack_mine}, {module, ?MODULE}, {from_packing_type, FromPackingType}, {to_packing_type, ToPackingType}]), ValidatorNode = peer1, RepackerNode = peer2, ar_test_node:stop(ValidatorNode), ar_test_node:stop(RepackerNode), {Blocks, _AddrA, Chunks} = ar_e2e:start_source_node( RepackerNode, FromPackingType, wallet_a), [B0 | _] = Blocks, start_validator_node(ValidatorNode, RepackerNode, B0), {WalletB, StorageModules} = ar_e2e:source_node_storage_modules( RepackerNode, ToPackingType, wallet_b), AddrB = case WalletB of undefined -> undefined; _ -> ar_wallet:to_address(WalletB) end, ToPacking = ar_e2e:packing_type_to_packing(ToPackingType, AddrB), {ok, Config} = ar_test_node:get_config(RepackerNode), ar_test_node:restart_with_config(RepackerNode, Config#config{ storage_modules = Config#config.storage_modules ++ StorageModules, mining_addr = AddrB }), ar_e2e:assert_syncs_range(RepackerNode, 0, 4*ar_block:partition_size()), ar_e2e:assert_partition_size(RepackerNode, 0, ToPacking), ar_e2e:assert_partition_size(RepackerNode, 1, ToPacking), RangeStart2 = 2 * ar_block:partition_size(), RangeEnd2 = RangeStart2 + floor(0.5 * ar_block:partition_size()), RangeSize2 = ar_util:ceil_int(RangeEnd2, ?DATA_CHUNK_SIZE) - ar_util:floor_int(RangeStart2, ?DATA_CHUNK_SIZE), ar_e2e:assert_partition_size( RepackerNode, 2, ToPacking, RangeSize2), %% Don't assert chunks here. Since we have two storage modules defined we won't know %% which packing format will be found - which complicates the assertion. We'll rely %% on the assert_chunks later (after we restart with only a single set of storage modules) %% to verify that the chunks are present. %% ar_e2e:assert_chunks(RepackerNode, ToPacking, Chunks), ar_e2e:assert_empty_partition(RepackerNode, 3, ToPacking), ar_test_node:restart_with_config(RepackerNode, Config#config{ storage_modules = StorageModules, mining_addr = AddrB }), ar_e2e:assert_syncs_range(RepackerNode, ToPacking, 0, 4*ar_block:partition_size()), ar_e2e:assert_partition_size(RepackerNode, 0, ToPacking), ar_e2e:assert_partition_size(RepackerNode, 1, ToPacking), RangeStart4 = 2 * ar_block:partition_size(), RangeEnd4 = RangeStart4 + floor(0.5 * ar_block:partition_size()), RangeSize4 = ar_util:ceil_int(RangeEnd4, ?DATA_CHUNK_SIZE) - ar_util:floor_int(RangeStart4, ?DATA_CHUNK_SIZE), ar_e2e:assert_partition_size( RepackerNode, 2, ToPacking, RangeSize4), ar_e2e:assert_chunks(RepackerNode, ToPacking, Chunks), ar_e2e:assert_empty_partition(RepackerNode, 3, ToPacking), case ToPackingType of unpacked -> ok; _ -> ar_e2e:assert_mine_and_validate(RepackerNode, ValidatorNode, ToPacking), %% Now that we mined a block, the rest of partition 2 is below the disk pool %% threshold ar_e2e:assert_syncs_range(RepackerNode, ToPacking, 0, 4*ar_block:partition_size()), ar_e2e:assert_partition_size(RepackerNode, 0, ToPacking), ar_e2e:assert_partition_size(RepackerNode, 1, ToPacking), ar_e2e:assert_partition_size(RepackerNode, 2, ToPacking), %% All of partition 3 is still above the disk pool threshold, %% except for two chunks, both are below the disk pool threshold = 6291456. %% The chunk ending at 6029312 crosses the beginnig of the partition 3 so %% it is also synced by this partition. ar_e2e:assert_partition_size(RepackerNode, 3, ToPacking, 2 * ?DATA_CHUNK_SIZE) end. start_validator_node(ValidatorNode, RepackerNode, B0) -> {ok, Config} = ar_test_node:get_config(ValidatorNode), ?assertEqual(ar_test_node:peer_name(ValidatorNode), ar_test_node:start_other_node(ValidatorNode, B0, Config#config{ peers = [ar_test_node:peer_ip(RepackerNode)], start_from_latest_state = true, auto_join = true, storage_modules = [] }, true) ), ok. ================================================ FILE: apps/arweave/src/e2e/ar_sync_pack_mine_tests.erl ================================================ -module(ar_sync_pack_mine_tests). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("eunit/include/eunit.hrl"). %% -------------------------------------------------------------------------------------------- %% Fixtures %% -------------------------------------------------------------------------------------------- setup_source_node(PackingType) -> SourceNode = peer1, SinkNode = peer2, ar_test_node:stop(SinkNode), ar_test_node:stop(SourceNode), {Blocks, _SourceAddr, Chunks} = ar_e2e:start_source_node(SourceNode, PackingType, wallet_a), {Blocks, Chunks, PackingType}. instantiator(GenesisData, SinkPackingType, TestFun) -> {timeout, 600, {with, {GenesisData, SinkPackingType}, [TestFun]}}. %% -------------------------------------------------------------------------------------------- %% Test Registration %% -------------------------------------------------------------------------------------------- replica_2_9_syncing_test_() -> {setup, fun () -> setup_source_node(replica_2_9) end, fun (GenesisData) -> [ instantiator(GenesisData, replica_2_9, fun test_sync_pack_mine/1), instantiator(GenesisData, spora_2_6, fun test_sync_pack_mine/1), instantiator(GenesisData, unpacked, fun test_sync_pack_mine/1) ] end}. spora_2_6_sync_pack_mine_test_() -> {setup, fun () -> setup_source_node(spora_2_6) end, fun (GenesisData) -> [ instantiator(GenesisData, replica_2_9, fun test_sync_pack_mine/1), instantiator(GenesisData, spora_2_6, fun test_sync_pack_mine/1), instantiator(GenesisData, unpacked, fun test_sync_pack_mine/1) ] end}. unpacked_sync_pack_mine_test_() -> {setup, fun () -> setup_source_node(unpacked) end, fun (GenesisData) -> [ instantiator(GenesisData, replica_2_9, fun test_sync_pack_mine/1), instantiator(GenesisData, spora_2_6, fun test_sync_pack_mine/1), instantiator(GenesisData, unpacked, fun test_sync_pack_mine/1) ] end}. % Note: we should limit the number of tests run per setup_source_node to 5, if it gets % too long then the source node may hit a difficulty adjustment, which can impact the % results. unpacked_edge_case_test_() -> {setup, fun () -> setup_source_node(unpacked) end, fun (GenesisData) -> [ instantiator(GenesisData, {replica_2_9, unpacked}, fun test_unpacked_and_packed_sync_pack_mine/1), instantiator(GenesisData, {unpacked, replica_2_9}, fun test_unpacked_and_packed_sync_pack_mine/1), instantiator(GenesisData, replica_2_9, fun test_entropy_first_sync_pack_mine/1), instantiator(GenesisData, replica_2_9, fun test_entropy_last_sync_pack_mine/1) ] end}. spora_2_6_edge_case_test_() -> {setup, fun () -> setup_source_node(spora_2_6) end, fun (GenesisData) -> [ instantiator(GenesisData, {replica_2_9, unpacked}, fun test_unpacked_and_packed_sync_pack_mine/1), instantiator(GenesisData, {unpacked, replica_2_9}, fun test_unpacked_and_packed_sync_pack_mine/1), instantiator(GenesisData, replica_2_9, fun test_entropy_first_sync_pack_mine/1), instantiator(GenesisData, replica_2_9, fun test_entropy_last_sync_pack_mine/1) ] end}. unpacked_small_module_test_() -> {setup, fun () -> setup_source_node(unpacked) end, fun (GenesisData) -> [ instantiator(GenesisData, replica_2_9, fun test_small_module_aligned_sync_pack_mine/1), instantiator(GenesisData, replica_2_9, fun test_small_module_unaligned_sync_pack_mine/1) ] end}. replica_2_9_small_module_test_() -> {setup, fun () -> setup_source_node(replica_2_9) end, fun (GenesisData) -> [ instantiator(GenesisData, replica_2_9, fun test_small_module_aligned_sync_pack_mine/1), instantiator(GenesisData, replica_2_9, fun test_small_module_unaligned_sync_pack_mine/1) ] end}. spora_2_6_small_module_test_() -> {setup, fun () -> setup_source_node(spora_2_6) end, fun (GenesisData) -> [ instantiator(GenesisData, replica_2_9, fun test_small_module_aligned_sync_pack_mine/1), instantiator(GenesisData, replica_2_9, fun test_small_module_unaligned_sync_pack_mine/1) ] end}. %% NOTE: the test_large_module_unaligned_sync_pack_mine test should always be %% run after the test_large_module_aligned_sync_pack_mine test. This is because %% once a block is mined it shifts the disk pool threshold and changes the %% expected syncable chunks. The test_large_module_unaligned_sync_pack_mine test %% assumes the higher disk pool threshold in its assertions. unpacked_large_module_test_() -> {setup, fun () -> setup_source_node(unpacked) end, fun (GenesisData) -> [ instantiator(GenesisData, replica_2_9, fun test_large_module_aligned_sync_pack_mine/1), instantiator(GenesisData, replica_2_9, fun test_large_module_unaligned_sync_pack_mine/1) ] end}. replica_2_9_large_module_test_() -> {setup, fun () -> setup_source_node(replica_2_9) end, fun (GenesisData) -> [ instantiator(GenesisData, replica_2_9, fun test_large_module_aligned_sync_pack_mine/1), instantiator(GenesisData, replica_2_9, fun test_large_module_unaligned_sync_pack_mine/1) ] end}. spora_2_6_large_module_test_() -> {setup, fun () -> setup_source_node(spora_2_6) end, fun (GenesisData) -> [ instantiator(GenesisData, replica_2_9, fun test_large_module_aligned_sync_pack_mine/1), instantiator(GenesisData, replica_2_9, fun test_large_module_unaligned_sync_pack_mine/1) ] end}. disk_pool_threshold_test_() -> [ instantiator(unpacked, replica_2_9, fun test_disk_pool_threshold/1), instantiator(unpacked, spora_2_6, fun test_disk_pool_threshold/1), instantiator(spora_2_6, replica_2_9, fun test_disk_pool_threshold/1), instantiator(spora_2_6, spora_2_6, fun test_disk_pool_threshold/1), instantiator(spora_2_6, unpacked, fun test_disk_pool_threshold/1) ]. %% -------------------------------------------------------------------------------------------- %% test_sync_pack_mine %% -------------------------------------------------------------------------------------------- test_sync_pack_mine({{Blocks, Chunks, SourcePackingType}, SinkPackingType}) -> ar_e2e:delayed_print(<<" ~p -> ~p ">>, [SourcePackingType, SinkPackingType]), ?LOG_INFO([{event, test_sync_pack_mine}, {module, ?MODULE}, {from_packing_type, SourcePackingType}, {to_packing_type, SinkPackingType}]), [B0 | _] = Blocks, SourceNode = peer1, SinkNode = peer2, SinkPacking = start_sink_node(SinkNode, SourceNode, B0, SinkPackingType), RangeStart = ar_block:partition_size(), RangeEnd = 2*ar_block:partition_size() + ar_storage_module:get_overlap(SinkPacking), %% Partition 1 and half of partition 2 are below the disk pool threshold ar_e2e:assert_syncs_range(SinkNode, SinkPacking, RangeStart, RangeEnd), ar_e2e:assert_partition_size(SinkNode, 1, SinkPacking), ar_e2e:assert_chunks(SinkNode, SinkPacking, Chunks), case SinkPackingType of unpacked -> ok; _ -> ar_e2e:assert_mine_and_validate(SinkNode, SourceNode, SinkPacking), ok end. test_unpacked_and_packed_sync_pack_mine( {{Blocks, _Chunks, SourcePackingType}, {PackingType1, PackingType2}}) -> ar_e2e:delayed_print(<<" ~p -> {~p, ~p} ">>, [SourcePackingType, PackingType1, PackingType2]), ?LOG_INFO([{event, test_unpacked_and_packed_sync_pack_mine}, {module, ?MODULE}, {from_packing_type, SourcePackingType}, {to_packing_type, {PackingType1, PackingType2}}]), [B0 | _] = Blocks, SourceNode = peer1, SinkNode = peer2, {SinkPacking1, SinkPacking2} = start_sink_node( SinkNode, SourceNode, B0, PackingType1, PackingType2), RangeStart1 = ar_block:partition_size(), RangeEnd1 = 2*ar_block:partition_size() + ar_storage_module:get_overlap(SinkPacking1), %% Data exists as both packed and unmpacked, so will exist in the global sync record %% even though replica_2_9 data is filtered out. ar_e2e:assert_syncs_range(SinkNode, RangeStart1, RangeEnd1), ar_e2e:assert_partition_size(SinkNode, 1, SinkPacking1), ar_e2e:assert_partition_size(SinkNode, 1, SinkPacking2), %% XXX: we should be able to assert the chunks here, but since we have two %% storage modules configured and are querying the replica_2_9 chunk, GET /chunk gets %% confused and tries to load the unpacked chunk, which then fails within the middleware %% handler and 404s. To fix we'd need to update GET /chunk to query all matching %% storage modules and then find the best one to return. But since this is a rare edge %% case, we'll just disable the assertion for now. %% ar_e2e:assert_chunks(SinkNode, SinkPacking, Chunks), MinablePacking = case PackingType1 of unpacked -> SinkPacking2; _ -> SinkPacking1 end, ar_e2e:assert_mine_and_validate(SinkNode, SourceNode, MinablePacking), ok. test_entropy_first_sync_pack_mine({{Blocks, Chunks, SourcePackingType}, SinkPackingType}) -> ar_e2e:delayed_print(<<" ~p -> ~p ">>, [SourcePackingType, SinkPackingType]), ?LOG_INFO([{event, test_entropy_first_sync_pack_mine}, {module, ?MODULE}, {from_packing_type, SourcePackingType}, {to_packing_type, SinkPackingType}]), [B0 | _] = Blocks, SourceNode = peer1, SinkNode = peer2, Wallet = ar_test_node:remote_call(SinkNode, ar_e2e, load_wallet_fixture, [wallet_b]), SinkAddr = ar_wallet:to_address(Wallet), SinkPacking = ar_e2e:packing_type_to_packing(SinkPackingType, SinkAddr), {ok, Config} = ar_test_node:get_config(SinkNode), Module = {ar_block:partition_size(), 1, SinkPacking}, StoreID = ar_storage_module:id(Module), StorageModules = [ Module ], %% 1. Run node with no sync jobs so that it only prepares entropy Config2 = Config#config{ peers = [ar_test_node:peer_ip(SourceNode)], start_from_latest_state = true, storage_modules = StorageModules, auto_join = true, mining_addr = SinkAddr, sync_jobs = 0 }, ?assertEqual(ar_test_node:peer_name(SinkNode), ar_test_node:start_other_node(SinkNode, B0, Config2, true) ), RangeStart = ar_block:partition_size(), RangeEnd = 2*ar_block:partition_size() + ar_storage_module:get_overlap(SinkPacking), ar_e2e:assert_has_entropy(SinkNode, RangeStart, RangeEnd, StoreID), ar_e2e:assert_empty_partition(SinkNode, 1, unpacked), ar_e2e:assert_empty_partition(SinkNode, 1, unpacked_padded), %% Delete two chunks of entropy from storage to test that the node will heal itself. %% 1. Delete the chunk from disk as well as all sync records. %% 2. Delete the chunk only from disk, but keep it in the sync records. DeleteOffset1 = RangeStart + ?DATA_CHUNK_SIZE, ar_test_node:remote_call(SinkNode, ar_chunk_storage, delete, [DeleteOffset1, StoreID]), DeleteOffset2 = DeleteOffset1 + ?DATA_CHUNK_SIZE, ar_test_node:remote_call(SinkNode, ar_chunk_storage, delete_chunk, [DeleteOffset2, StoreID]), %% 2. Run node with sync jobs so that it syncs and packs data ar_test_node:restart_with_config(SinkNode, Config2#config{ sync_jobs = 100 }), ar_e2e:assert_syncs_range(SinkNode, SinkPacking, RangeStart, RangeEnd), ar_e2e:assert_partition_size(SinkNode, 1, SinkPacking), ar_e2e:assert_empty_partition(SinkNode, 1, unpacked), ar_e2e:assert_empty_partition(SinkNode, 1, unpacked_padded), ar_e2e:assert_chunks(SinkNode, SinkPacking, Chunks), %% 3. Make sure the data is minable ar_e2e:assert_mine_and_validate(SinkNode, SourceNode, SinkPacking), ok. test_entropy_last_sync_pack_mine({{Blocks, Chunks, SourcePackingType}, SinkPackingType}) -> ar_e2e:delayed_print(<<" ~p -> ~p ">>, [SourcePackingType, SinkPackingType]), ?LOG_INFO([{event, test_entropy_last_sync_pack_mine}, {module, ?MODULE}, {from_packing_type, SourcePackingType}, {to_packing_type, SinkPackingType}]), [B0 | _] = Blocks, SourceNode = peer1, SinkNode = peer2, Wallet = ar_test_node:remote_call(SinkNode, ar_e2e, load_wallet_fixture, [wallet_b]), SinkAddr = ar_wallet:to_address(Wallet), SinkPacking = ar_e2e:packing_type_to_packing(SinkPackingType, SinkAddr), {ok, Config} = ar_test_node:get_config(SinkNode), Module = {ar_block:partition_size(), 1, SinkPacking}, StoreID = ar_storage_module:id(Module), StorageModules = [ Module ], %% 1. Run node with no replica_2_9 workers so that it only syncs chunks Config2 = Config#config{ peers = [ar_test_node:peer_ip(SourceNode)], start_from_latest_state = true, storage_modules = StorageModules, auto_join = true, mining_addr = SinkAddr, replica_2_9_workers = 0 }, ?assertEqual(ar_test_node:peer_name(SinkNode), ar_test_node:start_other_node(SinkNode, B0, Config2, true) ), RangeStart = ar_block:partition_size(), RangeEnd = 2*ar_block:partition_size() + ar_storage_module:get_overlap(SinkPacking), ar_e2e:assert_syncs_range(SinkNode, SinkPacking, RangeStart, RangeEnd), ar_e2e:assert_partition_size(SinkNode, 1, unpacked_padded), ar_e2e:assert_empty_partition(SinkNode, 1, unpacked), %% 2. Run node with sync jobs so that it syncs and packs data ar_test_node:restart_with_config(SinkNode, Config2#config{ replica_2_9_workers = 8 }), ar_e2e:assert_has_entropy(SinkNode, RangeStart, RangeEnd, StoreID), ar_e2e:assert_syncs_range(SinkNode, SinkPacking, RangeStart, RangeEnd), ar_e2e:assert_partition_size(SinkNode, 1, SinkPacking), ar_e2e:assert_empty_partition(SinkNode, 1, unpacked_padded), ar_e2e:assert_empty_partition(SinkNode, 1, unpacked), ar_e2e:assert_chunks(SinkNode, SinkPacking, Chunks), %% 3. Make sure the data is minable ar_e2e:assert_mine_and_validate(SinkNode, SourceNode, SinkPacking), ok. test_small_module_aligned_sync_pack_mine({{Blocks, Chunks, SourcePackingType}, SinkPackingType}) -> ar_e2e:delayed_print(<<" ~p -> ~p ">>, [SourcePackingType, SinkPackingType]), ?LOG_INFO([{event, test_small_module_aligned_sync_pack_mine}, {module, ?MODULE}, {from_packing_type, SourcePackingType}, {to_packing_type, SinkPackingType}]), [B0 | _] = Blocks, SourceNode = peer1, SinkNode = peer2, Wallet = ar_test_node:remote_call(SinkNode, ar_e2e, load_wallet_fixture, [wallet_b]), SinkAddr = ar_wallet:to_address(Wallet), SinkPacking = ar_e2e:packing_type_to_packing(SinkPackingType, SinkAddr), {ok, Config} = ar_test_node:get_config(SinkNode), Module = {floor(0.5 * ar_block:partition_size()), 2, SinkPacking}, StoreID = ar_storage_module:id(Module), StorageModules = [ Module ], %% Sync the second half of partition 1 Config2 = Config#config{ peers = [ar_test_node:peer_ip(SourceNode)], start_from_latest_state = true, storage_modules = StorageModules, auto_join = true, mining_addr = SinkAddr }, ?assertEqual(ar_test_node:peer_name(SinkNode), ar_test_node:start_other_node(SinkNode, B0, Config2, true) ), RangeStart = floor(ar_block:partition_size()), RangeEnd = floor(1.5 * ar_block:partition_size()), Partition = ar_node:get_partition_number(RangeStart), RangeSize = ar_e2e:aligned_partition_size(SinkNode, Partition, SinkPacking), %% Make sure the expected data was synced ar_e2e:assert_partition_size(SinkNode, 1, SinkPacking, RangeSize), ar_e2e:assert_empty_partition(SinkNode, 1, unpacked_padded), ar_e2e:assert_empty_partition(SinkNode, 1, unpacked), ar_e2e:assert_chunks(SinkNode, SinkPacking, lists:sublist(Chunks, 1, 4)), ar_e2e:assert_syncs_range(SinkNode, SinkPacking, RangeStart, RangeEnd), %% Make sure no extra entropy was generated AlignedStart = ar_util:floor_int(RangeStart, ?DATA_CHUNK_SIZE), AlignedEnd = ar_util:ceil_int(RangeEnd, ?DATA_CHUNK_SIZE) + ?DATA_CHUNK_SIZE, ar_e2e:assert_has_entropy(SinkNode, AlignedStart, AlignedEnd, StoreID), ar_e2e:assert_no_entropy(SinkNode, AlignedEnd, AlignedEnd + ar_block:partition_size(), StoreID), %% Make sure the data is minable ar_e2e:assert_mine_and_validate(SinkNode, SourceNode, SinkPacking), ok. test_small_module_unaligned_sync_pack_mine({{Blocks, Chunks, SourcePackingType}, SinkPackingType}) -> ar_e2e:delayed_print(<<" ~p -> ~p ">>, [SourcePackingType, SinkPackingType]), ?LOG_INFO([{event, test_small_module_unaligned_sync_pack_mine}, {module, ?MODULE}, {from_packing_type, SourcePackingType}, {to_packing_type, SinkPackingType}]), [B0 | _] = Blocks, SourceNode = peer1, SinkNode = peer2, Wallet = ar_test_node:remote_call(SinkNode, ar_e2e, load_wallet_fixture, [wallet_b]), SinkAddr = ar_wallet:to_address(Wallet), SinkPacking = ar_e2e:packing_type_to_packing(SinkPackingType, SinkAddr), {ok, Config} = ar_test_node:get_config(SinkNode), Module = {floor(0.5 * ar_block:partition_size()), 3, SinkPacking}, StoreID = ar_storage_module:id(Module), StorageModules = [ Module ], %% Sync the second half of partition 1 Config2 = Config#config{ peers = [ar_test_node:peer_ip(SourceNode)], start_from_latest_state = true, storage_modules = StorageModules, auto_join = true, mining_addr = SinkAddr }, ?assertEqual(ar_test_node:peer_name(SinkNode), ar_test_node:start_other_node(SinkNode, B0, Config2, true) ), RangeStart = floor(1.5 * ar_block:partition_size()), RangeEnd = floor(2 * ar_block:partition_size()), Partition = ar_node:get_partition_number(RangeStart), RangeSize = ar_e2e:aligned_partition_size(SinkNode, Partition, SinkPacking), %% Make sure the expected data was synced ar_e2e:assert_partition_size(SinkNode, 1, SinkPacking, RangeSize), ar_e2e:assert_empty_partition(SinkNode, 1, unpacked_padded), ar_e2e:assert_empty_partition(SinkNode, 1, unpacked), ar_e2e:assert_chunks(SinkNode, SinkPacking, lists:sublist(Chunks, 5, 4)), %% Even though the packing type is replica_2_9, the data will still exist in the %% default partition as unpacked - and so will exist in the global sync record. ar_e2e:assert_syncs_range(SinkNode, RangeStart, RangeEnd), %% Make sure no extra entropy was generated AlignedStart = ar_util:floor_int(RangeStart, ?DATA_CHUNK_SIZE), AlignedEnd = ar_util:ceil_int(RangeEnd, ?DATA_CHUNK_SIZE) + ?DATA_CHUNK_SIZE, ar_e2e:assert_has_entropy(SinkNode, AlignedStart, AlignedEnd, StoreID), ar_e2e:assert_no_entropy(SinkNode, 0, AlignedStart, StoreID), %% Make sure the data is minable ar_e2e:assert_mine_and_validate(SinkNode, SourceNode, SinkPacking), ok. test_large_module_aligned_sync_pack_mine({{Blocks, Chunks, SourcePackingType}, SinkPackingType}) -> ar_e2e:delayed_print(<<" ~p -> ~p ">>, [SourcePackingType, SinkPackingType]), ?LOG_INFO([{event, test_large_module_aligned_sync_pack_mine}, {module, ?MODULE}, {from_packing_type, SourcePackingType}, {to_packing_type, SinkPackingType}]), [B0 | _] = Blocks, SourceNode = peer1, SinkNode = peer2, Wallet = ar_test_node:remote_call(SinkNode, ar_e2e, load_wallet_fixture, [wallet_b]), SinkAddr = ar_wallet:to_address(Wallet), SinkPacking = ar_e2e:packing_type_to_packing(SinkPackingType, SinkAddr), {ok, Config} = ar_test_node:get_config(SinkNode), ModuleSize = floor(2 * ar_block:partition_size()), Module = {ModuleSize, 0, SinkPacking}, StoreID = ar_storage_module:id(Module), StorageModules = [ Module ], Config2 = Config#config{ peers = [ar_test_node:peer_ip(SourceNode)], start_from_latest_state = true, storage_modules = StorageModules, auto_join = true, mining_addr = SinkAddr }, ?assertEqual(ar_test_node:peer_name(SinkNode), ar_test_node:start_other_node(SinkNode, B0, Config2, true) ), RangeStart = 0, RangeEnd = ModuleSize, %% Make sure the expected data was synced ar_e2e:assert_syncs_range(SinkNode, SinkPacking, RangeStart, RangeEnd), %% The assert_partition_size logic uses ar_mining_stats as the data source. %% It does not currently handle large storage modules well - it attributes all %% chunks in a large storage module to the first partition covered. So we'll %% just assert on partition 0 and ignore partition 1. It is worth keeping %% assert_partition_size in addition to assert_syncs_range despite this limitation %% as it provides some coverage of the v2_index_data_size_by_packing metric (which %% is relied on by miners). ar_e2e:assert_partition_size(SinkNode, 0, SinkPacking, 4456448), ar_e2e:assert_empty_partition(SinkNode, 1, unpacked_padded), ar_e2e:assert_empty_partition(SinkNode, 1, unpacked), ar_e2e:assert_chunks(SinkNode, SinkPacking, lists:sublist(Chunks, 7, 2)), %% Make sure entropy was generated for the module range ar_e2e:assert_has_entropy(SinkNode, RangeStart, RangeEnd, StoreID), %% Make sure the data is minable ar_e2e:assert_mine_and_validate(SinkNode, SourceNode, SinkPacking), ok. %% @doc Test a large storage module that is unaligned with the partition (it starts in the %% middle of partition 1 and covers through the end of partition 2). test_large_module_unaligned_sync_pack_mine({{Blocks, Chunks, SourcePackingType}, SinkPackingType}) -> ar_e2e:delayed_print(<<" ~p -> ~p ">>, [SourcePackingType, SinkPackingType]), ?LOG_INFO([{event, test_large_module_unaligned_sync_pack_mine}, {module, ?MODULE}, {from_packing_type, SourcePackingType}, {to_packing_type, SinkPackingType}]), [B0 | _] = Blocks, SourceNode = peer1, SinkNode = peer2, Wallet = ar_test_node:remote_call(SinkNode, ar_e2e, load_wallet_fixture, [wallet_b]), SinkAddr = ar_wallet:to_address(Wallet), SinkPacking = ar_e2e:packing_type_to_packing(SinkPackingType, SinkAddr), {ok, Config} = ar_test_node:get_config(SinkNode), ModuleSize = floor(1.5 * ar_block:partition_size()), Module = {ModuleSize, 1, SinkPacking}, StoreID = ar_storage_module:id(Module), StorageModules = [ Module ], Config2 = Config#config{ peers = [ar_test_node:peer_ip(SourceNode)], start_from_latest_state = true, storage_modules = StorageModules, auto_join = true, mining_addr = SinkAddr }, ?assertEqual(ar_test_node:peer_name(SinkNode), ar_test_node:start_other_node(SinkNode, B0, Config2, true) ), RangeStart = ModuleSize, RangeEnd = 2 * ModuleSize, %% Make sure the expected data was synced ar_e2e:assert_syncs_range(SinkNode, SinkPacking, RangeStart, RangeEnd), %% The assert_partition_size logic uses ar_mining_stats as the data source. %% It does not currently handle large storage modules well - it attributes all %% chunks in a large storage module to the first partition covered. So we'll %% just assert on partition 1 and ignore partition 2. It is worth keeping %% assert_partition_size in addition to assert_syncs_range despite this limitation %% as it provides some coverage of the v2_index_data_size_by_packing metric (which %% is relied on by miners). %% %% Also note: this test assumes that it runs after the %% test_large_module_aligned_sync_pack_mine test - and therefore the blockchain is %% at height 6 rather than the default 5. This shifts the disk pool threshold and %% allows peer2 to fully sync its large storage module. ar_e2e:assert_partition_size(SinkNode, 1, SinkPacking, 3407872), ar_e2e:assert_empty_partition(SinkNode, 0, SinkPacking), ar_e2e:assert_empty_partition(SinkNode, 1, unpacked_padded), ar_e2e:assert_empty_partition(SinkNode, 1, unpacked), ar_e2e:assert_chunks(SinkNode, SinkPacking, lists:sublist(Chunks, 7, 2)), %% Make sure entropy was generated for the module range ar_e2e:assert_has_entropy(SinkNode, RangeStart, RangeEnd, StoreID), %% Make sure the data is minable ar_e2e:assert_mine_and_validate(SinkNode, SourceNode, SinkPacking), ok. test_disk_pool_threshold({SourcePackingType, SinkPackingType}) -> ar_e2e:delayed_print(<<" ~p -> ~p ">>, [SourcePackingType, SinkPackingType]), ?LOG_INFO([{event, test_disk_pool_threshold}, {module, ?MODULE}, {from_packing_type, SourcePackingType}, {to_packing_type, SinkPackingType}]), SourceNode = peer1, SinkNode = peer2, %% When the source packing type is unpacked, this setup process performs some %% extra disk pool checks: %% 1. spin up a spora_2_6 node and mine some blocks %% 2. some chunks are below the disk pool threshold and some above %% 3. spin up an unpacked node and sync from spora_2_6 %% 4. shut down the spora_2_6 node %% 5. now the unpacked node should have synced all of the chunks, both above and below %% the disk pool threshold %% 6. proceed with test and spin up the sink node and confirm it too can sink all chunks %% from the unpacked source node - both above and below the disk pool threshold {Blocks, Chunks, SourcePackingType} = setup_source_node(SourcePackingType), [B0 | _] = Blocks, SinkPacking = start_sink_node(SinkNode, SourceNode, B0, SinkPackingType), RangeStart = floor(2 * ar_block:partition_size()), RangeEnd = floor(2.5 * ar_block:partition_size()), RangeSize = ar_util:ceil_int(RangeEnd, ?DATA_CHUNK_SIZE) - ar_util:floor_int(RangeStart, ?DATA_CHUNK_SIZE), %% Partition 1 and half of partition 2 are below the disk pool threshold ar_e2e:assert_syncs_range(SinkNode, SinkPacking, ar_block:partition_size(), 4*ar_block:partition_size()), ar_e2e:assert_partition_size(SinkNode, 1, SinkPacking), ar_e2e:assert_partition_size(SinkNode, 2, SinkPacking, RangeSize), ar_e2e:assert_empty_partition(SinkNode, 3, SinkPacking), ar_e2e:assert_does_not_sync_range(SinkNode, 0, ar_block:partition_size()), ar_e2e:assert_chunks(SinkNode, SinkPacking, Chunks), case SinkPackingType of unpacked -> ok; _ -> ar_e2e:assert_mine_and_validate(SinkNode, SourceNode, SinkPacking), %% Now that we mined a block, the rest of partition 2 is below the disk pool %% threshold ar_e2e:assert_syncs_range(SinkNode, SinkPacking, ar_block:partition_size(), 4*ar_block:partition_size()), ar_e2e:assert_partition_size(SinkNode, 2, SinkPacking), %% All of partition 3 is still above the disk pool threshold, %% except for two chunks, both are below the disk pool threshold = 6291456. %% The chunk ending at 6029312 crosses the beginnig of the partition 3 so %% it is also synced by this partition. ar_e2e:assert_partition_size(SinkNode, 3, SinkPacking, 2 * ?DATA_CHUNK_SIZE), ar_e2e:assert_does_not_sync_range(SinkNode, 0, ar_block:partition_size()), ok end. start_sink_node(Node, SourceNode, B0, PackingType) -> Wallet = ar_test_node:remote_call(Node, ar_e2e, load_wallet_fixture, [wallet_b]), SinkAddr = ar_wallet:to_address(Wallet), SinkPacking = ar_e2e:packing_type_to_packing(PackingType, SinkAddr), {ok, Config} = ar_test_node:get_config(Node), StorageModules = [ {ar_block:partition_size(), 1, SinkPacking}, {ar_block:partition_size(), 2, SinkPacking}, {ar_block:partition_size(), 3, SinkPacking}, {ar_block:partition_size(), 4, SinkPacking}, {ar_block:partition_size(), 5, SinkPacking}, {ar_block:partition_size(), 6, SinkPacking}, {ar_block:partition_size(), 10, SinkPacking} ], ?assertEqual(ar_test_node:peer_name(Node), ar_test_node:start_other_node(Node, B0, Config#config{ peers = [ar_test_node:peer_ip(SourceNode)], start_from_latest_state = true, storage_modules = StorageModules, auto_join = true, mining_addr = SinkAddr }, true) ), SinkPacking. start_sink_node(Node, SourceNode, B0, PackingType1, PackingType2) -> Wallet = ar_test_node:remote_call(Node, ar_e2e, load_wallet_fixture, [wallet_b]), SinkAddr = ar_wallet:to_address(Wallet), SinkPacking1 = ar_e2e:packing_type_to_packing(PackingType1, SinkAddr), SinkPacking2 = ar_e2e:packing_type_to_packing(PackingType2, SinkAddr), {ok, Config} = ar_test_node:get_config(Node), StorageModules = [ {ar_block:partition_size(), 1, SinkPacking1}, {ar_block:partition_size(), 1, SinkPacking2} ], ?assertEqual(ar_test_node:peer_name(Node), ar_test_node:start_other_node(Node, B0, Config#config{ peers = [ar_test_node:peer_ip(SourceNode)], start_from_latest_state = true, storage_modules = StorageModules, auto_join = true, mining_addr = SinkAddr }, true) ), {SinkPacking1, SinkPacking2}. ================================================ FILE: apps/arweave/src/e2e/fixtures/wallets/wallet_a.json ================================================ {"kty":"RSA","ext":true,"e":"AQAB","n":"qi9ZdgEE_uoA804tHIeyiHdCpjZ608K3qX8cU-SrVMmyegXn3rTjGf29rwb2YqQuIohBVv--FSTDf4tQykCuJpKO5EHKH7qi7Hy1sxrJkHR13YbqX99vx4qAIQ-H8Zik4KdD8lKeaAmaOZ0lUt4Y48dxU51HHn06Bxg1HD7SRNnsFDt6juIPREn5pCzFd54braeIxexOM-0DekLR2dh8TjNAYfHy3tkDy64oSt_T5e4HzRLSGccMiGOo-6HFKj7ChxPiuFkDKBsBWcr9opK9TRaTzfQbCWWUasD9Hs5EtFPrS1HYTGpD4SuM33RPNXQZSTJp6BDRPP7bm-xH012uXoQNLe0Y9LUSphNrJZ1eDJ_pyXkS0NXgY9ggYIoJkjLxUCnAV_DO0e7w5BX1dpQv1N8gst-RvfDAYmIUYiDcMRMbmc8NqcJNITW81HqbSWhCLQ87w1UwwdLnB3vQ6dkcFSbzJjI2afqH-DieWvA1Pc9j8A9dQO3ac_4ZyvLo6EW_xAleM37x9jl7x3eSamGubi5IIYvaEVpyxqAHTxifVms-y8P9YiJEQbScdXtHKDEEFe8fZorPQppFscO-BSIzY6lrh_DbOdzTuytPOgMELeQ3bgAL4CVsizFoIMGo-61sEQwVbpCw3XHy-_TLfrn4crVJYn7WxX4SHNwpdfljeu8","d":"G18PL-P9FjSzn24w4jhO9hTcUthLS_iyyl-HwlRyW-ouuuJtRwvnxLvjQJ3JjdbjFqm8fI4YV9U4XjCdd1IM0GZc9ghAxnahkpCCNsK1rXaVqGH1GyNYGotDjU2uqyRGTF2Kl5RDJu94bxC_uoK_FQ90QiL3F8fDR_XUQO03q1wzVJO2Y_mmw_Bz5rxOrCzxPa5G2LJnZ4GUwBq0HqnrYDZtAfPEgKP9sMobb-Ns9LuiZJDE2uGBOgRxXrtHd0JtzgTcP5MNZ2tkfbkgrv-T06ywa_z5RjsgskTE0SoSscAXhV8t_yhOL45uE1hlDu9Ty8qAbxMZXAqPbpYDfVLBYu3EiHduGoyrGhxElprxt7BCJ_CzyobZfGQGwelvPL_zYhu2EwzFQGKgrhPgLiW5mru6Jlq6CK86J7KNuFFRXzhjqjNcfupC1XWI9X7SFVKCi4mNf_ZM4ZmagJUTP-hyPowrnw4FnKOjJBadWaAdCVC5uWydHYQPFToraatJXWA5QUl2tktOaK8DPkd1KhIak5pxNaLhihqCdJurLrpTQ638r_jA5wBcLmt6SoYpWm3b7JngiuemlNk-G7B2f5y79_GT-t3jKbyaPClet6fOa2FnqDOT9eqR_ZU6h2h-kWlVSadXzSGGCmqfru57rAxpmJZ5SbuKSBEKnjx8Z-6HbOE","p":"5McQNMYpbXEoaid0vFdWl6HOkOn0bSd1XeNDIxISkOsCO2G4kR5faxfA4rEZneXcQ4C_PhilRlgpjtkEG-5B9hgtWeeUWkoTTgBbuHqzv52R3Xc5L_VYtayHGjPi6wiJ5YYaDy4rVJv65gWGpO-6vj__DSnN4tvBcRryogzNH0BDvFyFKIbkqzp0a_psOXG3StxHv5Rue0ySAzvVHsgRxM0UJG06sH9KDstrhQRzQ6UGRWhV9uBPz-xMZYmFQC603kgWUEYRfF10eN7xdD9kM-NW_h4vzDqT6hdfOueZ7I0EUaLTqdb3QuX2UzvsZWCya37m4qW3a6t7zXky6nHaDw","q":"vm91oPH5LRficcNfSP4AN-mL3nBn3Pkd8lkL9ZP6G-tR4CIB3-T6FusOeD-v8RwRtoBjbZYDM5XlYZHI1XF1-WYc4TXQp6mvF0S13tU62KyesHxVTRp6v0IyL8F1WOmQReDqoQrsr0ccIxF7jJmxgo-djKAVTU8U1dFGGgSvpxuuI0G7gDYyB5ieL2s38TX1H8FjfhAPEf5DIhzdmiYzoX4qXrT93tCYHCB7h8ffJT9tj_eTuXUitqc9A1FKT8hcbA4-DnUhxr-hI-lkK_dcjuVpAf9VbaKbSQNJ03SG7W8ALkZXmAXMw17t8mw7nFV4nWQIvEly7dgUk8eBwe6xIQ","dp":"Du_sX_W8SLgFsoCm_5EYR0g6S33rBqF36UxoWsbYTXv6plPoEBmSk1R2tJZpnMSgUAv88Jn9WI1zES-cNBKnXeEQPPmA1zBU-FfPpUjlqZIpLvOU2UvEogAExjIzE7N4BXNvCiSykZCpnhEoTGaWo8tb5Mkg9znv9GmVA_2f-vVgNtE3pIDCN2fWqCIupMWG-S1OxfR0DjreobVrYdogRuA4-3PiTBnThQnFGGdE-1qwASIh0r-sll_QUSTcfWdPSeAdDNq2U49qhmXQEA3_hd_HE0p3Rndgpv0lq5vpkedXK9lcxo8Rj92h6qdT9P6OR7R-cLfvNOl6aN0L9QDAAw","dq":"LOuXwJ4zW7qtlI40VMBthsLVVmQHa-1rbfYpRwVf0uQgTRFYhdq6T1uk7yJ-uw4W84i3a2seWDW8hNZhnE-GN40ptMn_7Pyuq3tutyBvIBsf15uMd4KOf7z6n58vsghuGr2iOtib2gCZF4CRNyot4BFGZZyBSdoknQcfVRXT5UQ3QGPJ-cVO6dHLRn4xFPnYV2RDtsHM_D6Q0WQjta_bL_XVwr9Ivx1PNBtJaE7ySRP8ISCSPQXvaUxrrPOo5sbpXifB5aEllX8wYIs2MNTJhX-B1JHJMfJQVNmsuW9cQHeVgFThZp-_nDoxQKTdLtROfjnRgbCFpqr4t58w8XD_YQ","qi":"ImkdM0kwMvIQynehAma2kpjq72Wn54nr1TspB1CTrCTu9swcrgxhHbrd0CELrB5L1xhSBUzzRS05_Jx23fnKjVohV_Zm86QoaY8OAPKwVqVzA77PFfNChBoQ68NQhpXsFYKPU_5bhohhGMytkFKOqA3V1YDhyr8hStlcWLmIkt7MQMzPRK5s2z8uFmZ2KdSynfwr53pqr0UJwq9eCdzKGfL4aLiCEpL60yOk4ZGNCmKSl6pwG-cJQ9s-3cak5aiR54PxdjE0uJ3VUKQDq0lRAETM5lk8cEdINLwVlQ6g3Laxbh-BZIQl2WepmddH8ufAbZMaUgtYq_rzThM68qHLYw"} ================================================ FILE: apps/arweave/src/e2e/fixtures/wallets/wallet_b.json ================================================ {"kty":"RSA","ext":true,"e":"AQAB","n":"2xxp5KmsGIvR8Y6oSQJJNPgblXNX9vqKnE9P3CoujCnXdaYWsfbpBczZrSnaNV5w3_28Ph4fQHx4JWg6IqegSCB0tpZdnApRvLg406Ho8SWsc0d5QNtEZCvDb4RqWFwVmb7s6cXKtLw41Mq0rk_wI5pKurKpc5Kg_etw3K7TaSWOcsOH6Q42tiKAohN1tkwYnD0-nfRms6pZMVbLsgAMrA5-hojyPF3hSbtYEDYkMk9LOaXh2CXihlRKmpvWlZUo4jjJLUwVwB4k1YiMbLbnBcXpWKcCZtyU3_B_Q5-V1cPBHTMlG1AHrZGtkchRPTyozWvAPGzuyOpbD48k-RjrWkfzXOgsLfCYq5nCBWmOxoP0EgWm7fcgkKK0Q6LJ1CpIa-TtYzFn1Fph6ZrbYG9vxbkI5DP294txGxxjxK-f6gStQj3BG444q_NKfwfpf-sc2v--x50DG_LH3RnGshlcEdHml-107U8M4nHUBjEovtV92MPX0L9j0xLFYA27nWAHyNQhcuWkQbtXYxxk9WoSHM65o-_heDw7BAXc5NxVWXESy-Y_Zpm7i_fyAPiAiHMorqnR_x1ZaXbr7YnSD3e3eEWU0IomCjqcrEyF28kFhWTrlN0UbbCUMSRMTHq8jvTDL3aTggVWABw53DuneCoy87X06x5nBfHQet5DVeCqRdM","d":"Led5Nt2qjka8vcUt8_ugOJJKzrfsnFGt2poYfu4TJ8G-e6uuFYmtnW_ebxwZ1uFQKvKOylKnW5-E4aa4qWfbV0EPEIcJrydHGrF9vtjPDj8A-65bjjJh1N7v6EJ3wOkjez0Vy8ITaOwoLxc3oSsvPd_ZlO5QxSGEIYxLh85L4sQEC22FJWf_luSjluqv-laQjRoDYjZi53RZ9xXaMJs4ctOFe0OtFdZzIMOfHnEGJFDv8aPoxnmP0QwtHZ13cQFO7UlNqUlhAOdx8jl-qWowyR5xzTqFX-VOGCUOyM9IajOyW0EPBRjHtjeCLiI4fIJBY7bzl9dct_9QNraK3ccwD7SkMJwMlkycIR0V3rvn1nvg7MrJOaDZCiKOod_6M1NIYuKHTFcnvQu67E147YGnWVX2BVMlIiB6M9IUymjxfpxhdR-iu_t-i94NoLcxaaK8BaaVV9N2qbZhCmEXjQGF8qo-lEPRpMHlhJKN7I68L30kMkGexz9OjiOT0WAgpmxOxRhvRsl0bI8A26jvg4amkY_nc48lmRF9xdhYeArBZm0S3NFO2qVcyc4CaF1XAx6GkAd7k4MQi1TqEMz5hENEzFMQ516CIJAN3IzqEh0R-1B66uWm3Lm-XYsUUssKJ2BUPWm1toplfEguXVSk9eYhy_MG03LHDej-YUeLAa9X0Q","p":"7Vu-VFXsiTjBG9rxWWnriVDA65fWq3TSa3-x7DCaKpAOOV7yl5Mw0t5Jnp37qJJCPTU_-F_inST5a7Uhp07i_StQNoL9WrIQh7io8zk2GuvuvJx5h85zq6VYq9pC-jbf3ymO-H05jsjKL_ixX0UxaH4ng8wtUgsVyGhWF5CFk-AHiyugsIbMzoG_jVVPMkLiT0gd8yjI1fwVGFp2ZY-_KHiVT5pF33uRu3_DP_i3isMwUMFlnSb7sfZutCG0riVgZveyiKyj-PNcVpZ9kXm82ZVw5bA_pm_FG05kvHU5XT28iQLdIm09zMedrrdhJ7CGMebUtBC0pbApDXa222e5sQ","q":"7FHLM7XQXgB5u7lVhYU_lV5hydFGo_G-TDkFY3_jHWTHyd_7fRiEqHCFk5INNgJSDVwzp3eesNT7MkWS2JTxnBIWH--4myAqUfSp1ttFYWNm2AfDndht14-R1YE2fXWasgTmEXTgkpID4WmWeSTl7tv28vWs-CgzSWl--C3Vr7fHUOTFiPkg2pDTdznHVgzT-SFFFqnvIMS0C1VWf5C85cOigNQy7RWmtBqslPtv48LQ0hYkbMAVAZ3XYnatBmsj1bc8EVO1eFLNeP5hvkV7SuwGKdCQyvMkgBtW7gb9sJUfhAANfvIuSWaREegtPJ5KwspEhBhpsv9F2hOmDAYUww","dp":"qryBrlyYZyTCE-1sCqtcWEwUWePA8Vh5PAaAz6suWkuBT9dynYGtbyGix0xRCDMdHrY9K8adVfiQyd9jM9xU_1O2wV98K09HALneHgcbWkY4Vsgfy4bAQcoQfJ3l6-KpKvfT9f7t9j2M4vD7ddJp9gY5Gl82gnui0aPruculqndONdfOIOz2Sd2fEmU5MKhX7jur_4to3DQWYIxB-lBqawxCKx6IAHf8nmkK4-te65v4Fz7mfyLZjmv7ues88r_EFo06iYHV-W_lDgv2izyMkd8jdLVRM8HWgQvk_oM8HkwYYF4E_4yhFbrJPDKA2nHqNd8bReN2bnDHNv4cDrsQIQ","dq":"lymm4nfdRhPlyme9xb-7MU-DG7ZLCll7EYSz5raKT2YEyiQE2TsSuC_pscCNxMttMvCUdf31O0WxPLH2QaXceqmzD1Cm9Et55pyq-y2dTrNnuK4WueQUNvu2HC0f7taIUnEBvY7Wi8rswoZo4yrwDX8UkssFjmMgk0fxGM0wz8qtqxf7Jye8lTJooe4KjQd9m_FlIR8oP_yy8kDvKIAr5IjkbKXPwYnE7ZXWaSIAq18Vdh0Fxa6EgVk2ydwBx4ZHENC5kpfKD6JfnpKRcUU-nWkmdB7eT4OCCJP0YiOEqSxqUWQ7PcWqR_dcumiabxkN11XMx_ZZvk69nsZMw4osQw","qi":"FI3Yl18HjFsJhUuxh-WynnSIK9gSegwVK_EEdtYIvOOpKkjJDMQlnRYzNDnVbkFk6LB_dCZfmahQh5z193zRzAXRklE03d_WECc_UrCXhqjrwXkF33ZMQVZtXpjvB3krCXhnOmSNFwQZQDlJcUcPcqbTLrUukMm1_kgJxfch5PFANLUPKL-2_A8W4llpKvTeRZlED0BIKeJOMgDSsZvqbzHZif6ahSDOFO6D_SwkTjZ_qZ0IU2gU5wXNa6QhjcZX80Y05ROJrrT1O-K6rc6SJMYL7TW2xHZ2YmC2GX54rLUtuMiXWxllP1dhMDHIoZv3K8yDKi6-0ciOmWiuQJKNbA"} ================================================ FILE: apps/arweave/src/e2e/fixtures/wallets/wallet_c.json ================================================ {"kty":"RSA","ext":true,"e":"AQAB","n":"qmUNnwxgCkuOlHxaDy89YKmh1tUPg3yID0O0ZSGGJJz-E_gQkLQuigq5zZZCLKnL5KGekpPeq66SNQHZmu2x2O_VbsMJ2rH2WlPqBU9NOgSYh-4MCm56sDeTPzYwGcN5RkTJqvrfvy3E0Ej1V2tPCITJYUINmMoaY9zUuhFQ2Az0sCLRnpyRu8pLmn5ccwmLEYXl3ncvzvgZT6wm-uhcVFRfCIBMe_3ccy7-i3uhS_3R1usq1CoDkyULKYSBeD3UcQwcmSyDaP8kU5bG0NQIzP10cX2_qwsUbm9f27e-Mxs7OPYLWZFW1Xl4WOkXEWYrmtSxJue7eDEEp_gBaSGSYy4_XtjPhM6LsOSs59NR-qVRchoT8q4w8PUfYwW1wc6ntgP2B931Z_oTK1Xzg6A8yIs8knJDCw0o78i6B6ersMoOHuPVt-NroxAIb3B_Ui97MUPb2UwGj1Lw4ige1R6XZy3rOL0IHaaVnaJQObXBCjDiurYqna7tHACANb7xlVXsVvhi9EliLBe5JCDphpmh2dfDj3nyMI0Ab7d5_BuefchnQwkw_kna3iykxnH5poftKEBFnF7EmjzABm4Xvh8YEVloYO18e19B9VUYJQcZbCOUtq9oD5xBeVt0X1u9waeAQUWLr9HIBg4CkgGv5BdQ5A50mUfVO8uinSh5O0VKau8","d":"p86ow9GbgRRYSiYJ6hRwAO4Ro5fm16pXSgHK56PJ5lZv6dFlIEqtrbGqWF8Jt5dy2-02cGNPlTR4XAVHDrRqO3v9jZBSPmfTuUf44Zl4h-ugE9TfPuh5r6hVZ08-PU7E1SAohicNEB_zKXoPNhhhZ0ow0OLtSPLnIwUowOheOToY7V3bR0mINUd6LWfQlimLdfrE0fH4Mxz_n0fGUFqlJme424wf_vHQxUq8nFCr3tTp8ONmBg3Kk4Z5T12vgm_1LAE6ixLV_KSuhcazGw56rd3qwxXaXe5forACHuPXfGS5BQnlFZFKM9Yr5bgRVanQK57z23qXqQ56KnpKJPrCsc6oroV3U-RvM9Nj_yLStwyeZCsGt0bd_TOxuTkwG9M04KRNM2qx4RuFhBQHiXH7yBe_QZHlxY1ou7K8jBmkNpp73gMOA01szjkOO9KAQI1WbA4BPyIzW428a9Or0H0W3x99Q6u2uMMt9mld3DK_vkRHc8JAXfTEfo43CeS4Gr7RtFel8WZ2n8tXKaAYcM8sGUoifiL-iI1dGKGysBsR8uMafOl4pOJLy7LouzNIvhBu0n-hDxOLbXETQ3y_GCZiuMWKdwqXqJ_BMCOQA1hcr9_sxcpalCy48dtEiQXtKDLt7qFtR8LXgBAiW4yjlkdkwGMx1rkwJMfwPcXNzCqWLQ","p":"vM5SLTPsnVilEwgXs5YUhHGd2fOb8xdQIHPl2_3mn2YntxenS1qDoCYs6FO8hEhE_OEVCf7xaJ0TAEfX_0WgX_-Tptb3XW2GBX6vHTLs-zyO-XulKKyDpKxNTT4W7f4QW486rdDsDG40xxNZkv-lDShFe8h2WuIZ9_EhzHZSrbDOBBg4yM2ERR1h0wv8yM2JjTB5G-tpuhrwdJ_6jeiXsuuO85rVuXuveiSAMIH2AYz1Z0STIGhqgL93uxw29JHg63Q1PaqpyCo9Td0kxyqQX3tN4ZRDZAKj4nO5j_u1Sx2n_TLBzpgkH_Xy1F9y9_IcXbm2AiqnJWnJuVS5viQenQ","q":"5wlUW8FjI3RvwL1QedfFp15ZVPaYc8N8sWH9U5tITK6G6IQ8UWtKphPO_gzxJFtP77sB_70tcf71u3UWY85vzuyQiQAIyjhJAWMupgYxvXn6Up8zUqgIMEUJxzhgVQ5319sWgpAL47YE7zKcy8BHdhesdaPvzIRQxlTHA57kXWur6X6f7QZiP9IySJwaWaXtjPxI07xx1l_D4dbPK6EFSrmW0Q92kC6koqNHiuNKQVhSpSnZ7EkT4VqNjsIvojPEBmTj-4bxEEObV-b-DGDppNXBnTBm2-gKpwsV4GdyGdK8wP1gZTIQtRBm_1GHBe9v8O-ZIGOaQ_NPiYZoQanT-w","dp":"IKEPdpxoofC15ooZfoHLXfA8tXPyWZqH0HP3H4PLnXSMHIpL8SvdX4n5bNU72SicM4-6kRWsJsYuiHfiDk28H5sNq2GvMkhBRyXToZoxdmHK27bQniziO01DtruqPssPjKM-ItfeU2-gU182tb7UiWeSSogkXCSDFGRp0OoJ89aAZBjDh4BtAXzIcS67KwDKasobxAV1KiKJt74GEQxHWzZ2aAc0NG_5rYQtWzS6jR4NMyGYw5sH_OQaDw4bOT0Uv9w_bz7VRLB4E8LKHlluxfGLThbPZrNGG1aglQ-ND0Q6yflBoTCN3bAlnSo5tjvzRwdXOxyf8klMAWlxCDk5yQ","dq":"zgHGo65TvPiE8UKdcJeSmcOKOjVMCOVF2VE7toIevKleiBPpSNw3itDc4DEgEEAPjf6dMLE5xY0HBijIVyRrFAJiepZ6P_5iMoeCv-2ECqSqLWPhOpG0A3570pUVaKJnACVN9AuHXnsd-T-TCicgUU-YqqkMGLve3ooXjsXucNKiTqhm582qa6f8yDvRTyCiKfWG5q4Af5uSqVyGDCwe8Nt9fFqiaLv-dzrKfzBeNNgRkU45D_S1clrxIFtMaABqiR0LIGvZpZvy9zV0UAtWKnGjm4reHLXSUdKTpi33UslTH26Oto0m0pyWipDiqcsvcJHkYzoNAwwAXutnKS3KYw","qi":"F_9EOpmQ3q4-NliFmm0XFUNSp4nNeEJOwWcfCTrlF2ktMtojlt7__7PuBN6uILWQwqfgDsaDjSYQxiCSICMYQve3TqJxOToMv10EAjcQgyZkNiOmuJKYuG8GhRkB9PWRPeATCSuUJrL2D7zFm-ROcfEo6TblktmyEt1bRMIVqaFDuu8tFA31HKvKQxr3N8umkOHBa_RSxhn0dRmT3horCWwBKbPy7RV1JM7h5qSKbOJxZf2PgzaTZ3KTkoPRov4_Po-anTg2yOsIdqh92PrbN82WO_IJNhhlXWsWCYvNBlOTPjmkCQhUQy77qtMwpVOi60XoITIyq0b9D8LO1c6d7Q"} ================================================ FILE: apps/arweave/src/e2e/fixtures/wallets/wallet_d.json ================================================ {"kty":"RSA","ext":true,"e":"AQAB","n":"y3dvwHO8Bclzz58uwX5BMRjbB3i9S3ODEGW8b8mrsnWjm0m104ymNVXEtfgcaRfxit4Odt4MtQd5TrNzVQdmYruOZ_P7frXXCc8kqjGfjNLm96RqTDqVqpl5s8SuTlNOjnDwEdNrEewq2kCWR4roUxHTCc3DUOM6PXyZhlmr2zmdjCmn8L0eTxije7p5QdObW_64kycDbwXcyhOQrZ_588aXFFfTMogXCXk0lmRu_YvLieRmT-nIw7uLKhSjsMwCkEO4DD5lGb9_BE2kl8BGp9kb_Sqebh75kE9IL3b6bU5iABqDRwcPyiIzjKYrSDYpm_8dq6rT2ylw8TMombh9HlLSb-nijFftxW34nC5ue-g5pien-Cwk9Bfwsd-4Gj1nx03sBMbOwdPUmZ6gY0vqW65TOJVhSrD2GO9fIX8b9KdoU15rybCKsiDEWBiaq045vtyv1W78JYonZ5p7nmPzCRieyJuuHRCT3TnLRyobG6eguVQbEdhCp5FWmITT4lDLl5TqHPoqJPxINGHwFtZvqmKw5p1whM4hWCs3A7QMUMaJqZ0vAEOUBTEIph1c17tVuJYADNgKRwZGkXz-JvDH5o8vB01P-BRU5CWlGjzsCfHUvND-zuvlhkoCWwaUrlLSwQZ2ZqIKuAFs3hA-dRYQRcx7OMSJUYiACtXeSfaQAxs","d":"Em3PAW96KEwG4VdZtMzqure1nwegnaToyiNs3fM2SgO9veL_RRoIM-yA1LqUWDCDAED8rmeOXxc-NZKrb5gr_eVfEKtYrDFsOMc6WvADs42mved2eVEVHU6pZ075Or7w7pXsKLEtkYICn6IZ-oDqahvDMbAhcMIkFE2k2jZlCoY9buSXAYcfp6pjpGFPelbgS4TW0v1Foli1ltgO0qsayKnEJWOPDZSmAYWo7bZLF0wCM4sseTCDrrbd9AHKkcjosohvsywznBFsP8eIkPYpcCqKDnQ9xVuo3xlPQH1WUXA4ECpWmahaFcTjRmoGoZPGUQradSIT7lXilPY9Ry8epecbaMdsu4yKcxLl5hF3_zTogtTi1VUnVaDHsDkxu3l3HBvly0OrchVPt44vmyvmsrhHrUUQCvqrKE3CV_00PzRBTzsUSuJLCQeDchTmh2azKoerojk4dqYA0I276mo8B19kEI1G8LZVp1sCaSWAtHzOyQn_bdZnl2o7tjTNzflqmzFtcvl-p8ocvvLCjfMXzY7aC8lNgqNhvTH-lcgLdSIj2x2lnJ2ThgHwjILSSk_R6WC6t1CE96l2akwCkIWsfLnCoxt_XpPw14OxEB4efD_dVI-KuzS-l5CfHAe225GgHFVAB2n-Zetwbr6bL3fflt5rZMf3VWQKdWiwcPuijYE","p":"7L8bLfK8h0ZbJA0Q2A5AltWj5cMbYf5r9AYAiMs8-bo7RnPMT02dKB0KKmCbMJFe8uXTZ6d24-bKBYSjHFJAoONkbe35IdKG_6mUrYRN__N9-__p08YoEE2FdiSDgG6XqlK7DzpPfhPS5a4fM5g8V-hLFXqJb69LfPPARmlaaQoHJEZ05XDaTQwxjtuWFUJ8HASiwvtdFE_uU1i3gfNJswKgfKTbB614-DpNRHtdDMeuExM6_WMmqnwzUnvgPvMWbojHcXpkd0XrCh8Wdz-TIaprvQtHO1NO-WdNPL3wGvceC_6aoNRHdAP5XJIL2wibgBzdmCNCuMq2RRCp6xsNBw","q":"3AN3HP4skZpSQJAS9Hm7y-HzMGYllorPjZtL3hYQiJG0nG7w_GPp_F9fPh5Bvs2pSG852Pftj2H7uchOGn9V0KzN-U9TYOwfSffZI6SrZfg6Mzl_wLOUqHN6Aq5jE4qVgoEv_fvTNcMSlI4kK-iQwW8JHzXuR-kEXhWK8EUmTNHtExzpVYQascyGpBfsJ74O8BdzUh8TrTVxRcO9w6PYQg_Hgn347001CvtEAEOWMSIADBBar1dro6LfyVm20FJLX3gnGSPOH3-kuozAL07dAh_GhHTKpRuwyySDfR8N5T21YL-0HJAlmu-zD7V25jKASuQVVffEfOldZnWTGnwoTQ","dp":"zADtkc1-SW8F8G3V2ueFHrSf08gpW2raaV-WrEm9lE-27kGwh5GQ39UOQnAWqmZKFDKY1dQHbeEcql6eEzSJfloT22pZ6Jw6OipN9KtybyDJqhHe0t8I_OtgGurh6hTiWiGKEVgk0baRX9uIBXSkYvfHY43Ayl2aReThBYuZHbRHbSnNZzy0z_m25qwvishMm_QesLfbgDpUWruy_abAFiIoWt_P4bDI8dWDaYSILRAP314N0fTTh8sYinY2SOg9pyfz_MQDuIemPoWFXWKKDVOGHVOPoP5rqhwrATGGqiXRXXKamgXyQHWANhWfY7HqFR5KkOOphgUfxSnT0cTwlw","dq":"CYRw26U3IllNo5NX7pFxiUFN9tMEXz3D-rk0D_heYLoE2RuHezOLRKqPgS1n5Kwa3ZJKK1OWSDSR4hiDIGxPtwYyps1CqxerxtRc5UjTTUbupZagKyLZlGviZElM6eR90TZrcA47tcCphhmcAPY_hM6b02jO1PeEg9lkuD4ViQ8vtTrz8QoU6YoSbPjH83QqS0KIb43-mOiN7Nmp1NO6oCj0lXWDlj59w-rYpzZFQfzZiawPcDRU6LA8BAbIfLyCnC-jaVf-K6im5JcAHUvJDbV4LfSra3cGL9N1iK0WOctwlC3WycGGjuw9j7lm2lBm8lZpgd2E925U5wDBC01BpQ","qi":"ImTCUAjhh2zfP8JGgRmcftSQ-sqn1jchpNpT548HBO4Bcm4rAlRyOQDCV3Jjqyb3UnbHfdx278f0g4jWHOeHsfw5VZ1BKyJ0uDLnYlUuXQ9WO4Rig0j7iUJjR7xV5y6tQFXbZTqSHnEPp9UQJFKhjoBdNmO7IC-24RfA2aahonsRFbvqDOLdGqYKHJhajCa0IFPqBSkkOhPshMHDdKn5iX0AyT2-YTeYXL1u17H_57Hbry3xjxQsdnAcB8uZy4vwMLiac2-Yy9GmSKP-PoP1O2jCeLbBHDbHQW_MdVw0dpbqwLHDwMpU66e8TJeU5-uzn33qZSNOc0nZCYeoAGoxUw"} ================================================ FILE: apps/arweave/src/rsa_pss.erl ================================================ %%%------------------------------------------------------------------- %%% @author Andrew Bennett %%% @copyright 2014-2015, Andrew Bennett %%% @doc %%% Distributed under the Mozilla Public License v2.0. %%% Original available at: %%% https://github.com/potatosalad/erlang-crypto_rsassa_pss %%% @end %%% Created : 20 Jul 2015 by Andrew Bennett %%% Modified: 17 Nov 2017 by The Arweave Team %%%------------------------------------------------------------------- -module(rsa_pss). -include_lib("public_key/include/public_key.hrl"). %% API -export([sign/3]). -export([sign/4]). -export([verify/4]). %% Types -type rsa_public_key() :: #'RSAPublicKey'{}. -type rsa_private_key() :: #'RSAPrivateKey'{}. -type rsa_digest_type() :: 'md5' | 'sha' | 'sha224' | 'sha256' | 'sha384' | 'sha512'. -define(PSS_TRAILER_FIELD, 16#BC). -export([verify_legacy/4]). %%==================================================================== %% API functions %%==================================================================== -spec sign(Message, DigestType, PrivateKey) -> Signature when Message :: binary() | {digest, binary()}, DigestType :: rsa_digest_type() | atom(), PrivateKey :: rsa_private_key(), Signature :: binary(). sign(Message, DigestType, PrivateKey) when is_binary(Message) -> sign({digest, crypto:hash(DigestType, Message)}, DigestType, PrivateKey); sign(Message={digest, _}, DigestType, PrivateKey) -> SaltLen = byte_size(crypto:hash(DigestType, <<>>)), Salt = crypto:strong_rand_bytes(SaltLen), sign(Message, DigestType, Salt, PrivateKey). -spec sign(Message, DigestType, Salt, PrivateKey) -> Signature when Message :: binary() | {digest, binary()}, DigestType :: rsa_digest_type() | atom(), Salt :: binary(), PrivateKey :: rsa_private_key(), Signature :: binary(). sign(Message, DigestType, Salt, PrivateKey) when is_binary(Message) -> sign({digest, crypto:hash(DigestType, Message)}, DigestType, Salt, PrivateKey); sign({digest, Digest}, DigestType, Salt, PrivateKey=#'RSAPrivateKey'{modulus=N}) -> DigestLen = byte_size(Digest), SaltLen = byte_size(Salt), PublicBitSize = int_to_bit_size(N), PrivateByteSize = (PublicBitSize + 7) div 8, PublicByteSize = int_to_byte_size(N), case PublicByteSize < (DigestLen + SaltLen + 2) of false -> DBLen = PrivateByteSize - DigestLen - 1, M = << 0:64, Digest/binary, Salt/binary >>, H = crypto:hash(DigestType, M), DB = << 0:((DBLen - SaltLen - 1) * 8), 1, Salt/binary >>, DBMask = mgf1(DigestType, H, DBLen), MaskedDB = normalize_to_key_size(PublicBitSize, crypto:exor(DB, DBMask)), EM = << MaskedDB/binary, H/binary, ?PSS_TRAILER_FIELD >>, DM = pad_to_key_size(PublicByteSize, dp(EM, PrivateKey)), DM; true -> erlang:error(badarg, [{digest, Digest}, DigestType, Salt, PrivateKey]) end. -spec verify(Message, DigestType, Signature, PublicKey) -> boolean() when Message :: binary() | {digest, binary()}, DigestType :: rsa_digest_type() | atom(), Signature :: binary(), PublicKey :: rsa_public_key(). verify(Message, DigestType, Signature, PublicKey) when is_binary(Message) -> verify({digest, crypto:hash(DigestType, Message)}, DigestType, Signature, PublicKey); verify({digest, Digest}, DigestType, Signature, PublicKey=#'RSAPublicKey'{modulus=N}) -> DigestLen = byte_size(Digest), PublicBitSize = int_to_bit_size(N), PrivateByteSize = (PublicBitSize + 7) div 8, PublicByteSize = int_to_byte_size(N), SignatureSize = byte_size(Signature), case PublicByteSize < DigestLen + 2 of true -> false; false -> case PublicByteSize =:= SignatureSize of true -> SignatureNumber = binary:decode_unsigned(Signature, big), case SignatureNumber >= 0 andalso SignatureNumber < N of true -> DBLen = PrivateByteSize - DigestLen - 1, EM = pad_to_key_size(PrivateByteSize, ep(Signature, PublicKey)), case binary:last(EM) of ?PSS_TRAILER_FIELD -> MaskedDB = binary:part(EM, 0, byte_size(EM) - DigestLen - 1), H = binary:part(EM, byte_size(MaskedDB), DigestLen), DBMask = mgf1(DigestType, H, DBLen), DB = normalize_to_key_size(PublicBitSize, crypto:exor(MaskedDB, DBMask)), case binary:match(DB, << 1 >>) of {Pos, Len} -> PS = binary:decode_unsigned(binary:part(DB, 0, Pos)), case PS =:= 0 of true -> Salt = binary:part(DB, Pos + Len, byte_size(DB) - Pos - Len), M = << 0:64, Digest/binary, Salt/binary >>, HOther = crypto:hash(DigestType, M), H =:= HOther; false -> false end; nomatch -> false end; _BadTrailer -> false end; _ -> false end; false -> false end end. verify_legacy(Message, DigestType, Signature, PublicKey) when is_binary(Message) -> verify_legacy({digest, crypto:hash(DigestType, Message)}, DigestType, Signature, PublicKey); verify_legacy({digest, Digest}, DigestType, Signature, PublicKey=#'RSAPublicKey'{modulus=N}) -> DigestLen = byte_size(Digest), PublicBitSize = int_to_bit_size(N), PrivateByteSize = PublicBitSize div 8, PublicByteSize = int_to_byte_size(N), SignatureSize = byte_size(Signature), case PublicByteSize < DigestLen + 2 of true -> false; false -> case PublicByteSize =:= SignatureSize of true -> SignatureNumber = binary:decode_unsigned(Signature, big), case SignatureNumber >= 0 andalso SignatureNumber < N of true -> DBLen = PrivateByteSize - DigestLen - 1, EM = pad_to_key_size(PrivateByteSize, ep(Signature, PublicKey)), case binary:last(EM) of ?PSS_TRAILER_FIELD -> MaskedDB = binary:part(EM, 0, byte_size(EM) - DigestLen - 1), H = binary:part(EM, byte_size(MaskedDB), DigestLen), DBMask = mgf1(DigestType, H, DBLen), DB = normalize_to_key_size(PublicBitSize, crypto:exor(MaskedDB, DBMask)), case binary:match(DB, << 1 >>) of {Pos, Len} -> PS = binary:decode_unsigned(binary:part(DB, 0, Pos)), case PS =:= 0 of true -> Salt = binary:part(DB, Pos + Len, byte_size(DB) - Pos - Len), M = << 0:64, Digest/binary, Salt/binary >>, HOther = crypto:hash(DigestType, M), H =:= HOther; false -> false end; nomatch -> false end; _BadTrailer -> false end; _ -> false end; false -> false end end. %%%------------------------------------------------------------------- %%% Internal functions %%%------------------------------------------------------------------- %% @private dp(B, #'RSAPrivateKey'{modulus=N, privateExponent=E}) -> crypto:mod_pow(B, E, N). %% @private ep(B, #'RSAPublicKey'{modulus=N, publicExponent=E}) -> crypto:mod_pow(B, E, N). %% @private int_to_bit_size(I) -> int_to_bit_size(I, 0). %% @private int_to_bit_size(0, B) -> B; int_to_bit_size(I, B) -> int_to_bit_size(I bsr 1, B + 1). %% @private int_to_byte_size(I) -> int_to_byte_size(I, 0). %% @private int_to_byte_size(0, B) -> B; int_to_byte_size(I, B) -> int_to_byte_size(I bsr 8, B + 1). %% @private mgf1(DigestType, Seed, Len) -> mgf1(DigestType, Seed, Len, <<>>, 0). %% @private mgf1(_DigestType, _Seed, Len, T, _Counter) when byte_size(T) >= Len -> binary:part(T, 0, Len); mgf1(DigestType, Seed, Len, T, Counter) -> CounterBin = << Counter:8/unsigned-big-integer-unit:4 >>, NewT = << T/binary, (crypto:hash(DigestType, << Seed/binary, CounterBin/binary >>))/binary >>, mgf1(DigestType, Seed, Len, NewT, Counter + 1). %% @private normalize_to_key_size(_, <<>>) -> <<>>; normalize_to_key_size(Bits, _A = << C, Rest/binary >>) -> SH = (Bits - 1) band 16#7, Mask = case SH > 0 of false -> 16#FF; true -> 16#FF bsr (8 - SH) end, B = << (C band Mask), Rest/binary >>, B. %% @private pad_to_key_size(Bytes, Data) when byte_size(Data) < Bytes -> pad_to_key_size(Bytes, << 0, Data/binary >>); pad_to_key_size(_Bytes, Data) -> Data. ================================================ FILE: apps/arweave/src/secp256k1_nif.erl ================================================ -module(secp256k1_nif). -export([sign/2, ecrecover/2]). -on_load(init/0). -define(SigUpperBound, binary:decode_unsigned(<<16#7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF5D576E7357A4501DDFE92F46681B20A0:256>>)). -define(SigDiv, binary:decode_unsigned(<<16#FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141:256>>)). init() -> PrivDir = code:priv_dir(arweave), ok = erlang:load_nif(filename:join([PrivDir, "secp256k1_arweave"]), 0). sign_recoverable(_Digest, _PrivateBytes) -> erlang:nif_error(nif_not_loaded). recover_pk_and_verify(_Digest, _Signature) -> erlang:nif_error(nif_not_loaded). sign(Msg, PrivBytes) -> Digest = crypto:hash(sha256, Msg), {ok, Signature} = sign_recoverable(Digest, PrivBytes), Signature. ecrecover(Msg, Signature) -> Digest = crypto:hash(sha256, Msg), case recover_pk_and_verify(Digest, Signature) of {ok, true, PubKey} -> {true, PubKey}; {ok, false, _PubKey} -> {false, <<>>}; {error, _Reason} -> {false, <<>>} end. ================================================ FILE: apps/arweave/src/user_default.erl ================================================ %% %% This file is loaded upon starting Erlang REPL, and loads all the records %% from user_default.hrl file. %% Another possibility is to add some broadly-user functions here: these %% functions will be useable from the REPL as first-class commands. As an %% example, running the `config().` in the REPL will return current node config. %% -module(user_default). -include_lib("arweave/include/user_default.hrl"). -compile([export_all, nowarn_export_all]). config() -> {ok, Config} = arweave_config:get_env(), Config. ================================================ FILE: apps/arweave/test/ar_base64_compatibility_tests.erl ================================================ -module(ar_base64_compatibility_tests). %%% The compatibility tests to assert the used %%% Base64URL encoding and decoding functions are %%% compatible with base64url 1.0.1. -include_lib("eunit/include/eunit.hrl"). back_to_back_encode_test() -> Inputs = [ 42, foo, "foo", {foo}, << "zany" >>, << "zan" >>, << "za" >>, << "z" >>, << >>, binary:copy(<<"0123456789">>, 100000) ], lists:foreach( fun(Input) -> io:format("Running input: ~p...~n", [Input]), assert_encode(Input) end, Inputs ). back_to_back_decode_test_() -> {timeout, 10, fun test_back_to_back_decode/0}. test_back_to_back_decode() -> Inputs = [ 42, foo, "foo", {foo}, << "." >>, << "+" >>, << "!" >>, << "/" >>, << "Σ">>, << "a" >>, << "aa" >>, << "aaa" >>, << "aaaa" >>, << "aaaaa" >>, << "aaaaaa" >>, << "aaaaaaa" >>, << "aaaaaaaa" >>, << "aaaaaaaaa" >>, << "aaaaaaaaaa" >>, << "!~[]" >>, << "emFueQ==" >>, << "emFu" >>, << "emE=" >>, << "eg==" >>, << >>, << " emFu" >>, << "em Fu" >>, << "emFu " >>, << " " >>, << " =" >>, << " ==" >>, << "= " >>, << "== " >>, << "\temFu">>, << "\tem F u">>, << "em F \t u">>, << "em F \tu">>, << "e\nm\nF\nu\n" >>, << "e\nm\nF\nu" >>, << "e\nm\nF\nu " >>, << "AAAA" >>, << "AAA=" >>, << "AAAA=" >>, << "AAA" >>, << "AA==" >>, << "AA=" >>, << "AA" >>, << "A==" >>, << "A=" >>, << "A" >>, << "==" >>, << "=" >>, << "=a" >>, << "==a" >>, << "===a" >>, << "====a" >>, << "=====a" >>, << "=======a" >>, << "========a" >>, << >>, <<"PDw/Pz8+Pg==">>, <<"PDw:Pz8.Pg==">>, binary:copy(<<"a">>, 1000000), binary:copy(<<"a">>, 1000001), binary:copy(<<"a">>, 1000002), binary:copy(<<"a">>, 1000003), binary:copy(<<"a">>, 1000004), binary:copy(<<"a">>, 1000005), binary:copy(<<"a">>, 1000006), binary:copy(<<"a">>, 1000007), binary:copy(<<"a">>, 1000008), binary:copy(<<"a">>, 1000009), binary:copy(<<"0123456789">>, 100000), binary:copy(<<"0123456789_-">>, 100000) ], lists:foreach( fun(Input) -> io:format("Running input: ~p...~n", [Input]), assert_decode(Input) end, Inputs ). assert_encode(Input) -> case catch encode(Input) of {'EXIT', {badarg, _}} -> ?assertException(error, badarg, ar_util:encode(Input)); {'EXIT', {function_clause, _}} -> ?assertException(error, badarg, ar_util:encode(Input)); {'EXIT', {badarith, _}} -> ?assertException(error, badarg, ar_util:encode(Input)); {'EXIT', {missing_padding, _}} -> ?assertException(error, badarg, ar_util:encode(Input)); Output -> ?assertEqual(Output, ar_util:encode(Input)) end. assert_decode(Input) -> case catch decode(Input) of {'EXIT', {badarg, _}} -> ?assertException(error, badarg, ar_util:decode(Input)); {'EXIT', {function_clause, _}} -> ?assertException(error, badarg, ar_util:decode(Input)); {'EXIT', {badarith, _}} -> ?assertException(error, badarg, ar_util:decode(Input)); {'EXIT', {missing_padding, _}} -> ?assertException(error, badarg, ar_util:decode(Input)); Output -> ?assertEqual(Output, ar_util:decode(Input)) end. encode(Bin) when is_binary(Bin) -> << << (urlencode_digit(D)) >> || <> <= base64:encode(Bin), D =/= $= >>; encode(L) when is_list(L) -> encode(iolist_to_binary(L)); encode(_) -> error(badarg). decode(Bin) when is_binary(Bin) -> Bin2 = case byte_size(Bin) rem 4 of 2 -> << Bin/binary, "==" >>; 3 -> << Bin/binary, "=" >>; _ -> Bin end, base64:decode(<< << (urldecode_digit(D)) >> || <> <= Bin2 >>); decode(L) when is_list(L) -> decode(iolist_to_binary(L)); decode(_) -> error(badarg). urlencode_digit($/) -> $_; urlencode_digit($+) -> $-; urlencode_digit(D) -> D. urldecode_digit($_) -> $/; urldecode_digit($-) -> $+; urldecode_digit(D) -> D. ================================================ FILE: apps/arweave/test/ar_canary.erl ================================================ %%%=================================================================== %%% @doc A test that always fail. %%% @end %%%=================================================================== -module(ar_canary). -include_lib("eunit/include/eunit.hrl"). canary_test_() -> ?assert(4 =:= 5). ================================================ FILE: apps/arweave/test/ar_config_tests.erl ================================================ -module(ar_config_tests). -include_lib("ar_consensus.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("eunit/include/eunit.hrl"). parse_test_() -> {timeout, 60, fun test_parse_config/0}. validate_test_() -> [ {timeout, 60, fun test_validate_repack_in_place/0}, {timeout, 60, fun test_validate_cm_pool/0}, {timeout, 60, fun test_validate_storage_modules/0}, {timeout, 60, fun test_validate_cm/0} ]. test_parse_config() -> ExpectedMiningAddr = ar_util:decode(<<"LKC84RnISouGUw4uMQGCpPS9yDC-tIoqM2UVbUIt-Sw">>), {ok, ParsedConfig} = ar_config:parse(config_fixture()), ExpectedBlockHash = ar_util:decode( <<"lfoR_PyKV6t7Z6Xi2QJZlZ0JWThh0Ke7Zc5Q82CSshUhFGcjiYufP234ph1mVofX">>), PartitionSize = ar_block:partition_size(), ?assertMatch(#config{ init = true, port = 1985, mine = true, peers = [ {188,166,200,45,1984}, {188,166,192,169,1984}, {163,47,11,64,1984}, {159,203,158,108,1984}, {159,203,49,13,1984}, {139,59,51,59,1984}, {138,197,232,192,1984}, {46,101,67,172,1984} ], local_peers = [ {192, 168, 2, 3, 1984}, {172, 16, 10, 11, 1985} ], sync_from_local_peers_only = true, block_gossip_peers = [{159,203,158,108,1984}, {150,150,150,150, 1983}], data_dir = "some_data_dir", log_dir = "log_dir", storage_modules = [{PartitionSize, 0, unpacked}, {PartitionSize, 2, {spora_2_6, ExpectedMiningAddr}}, {PartitionSize, 100, unpacked}, {1, 0, unpacked}, {1000000000000, 14, {spora_2_6, ExpectedMiningAddr}}, {PartitionSize, 0, {replica_2_9, ExpectedMiningAddr}}], repack_in_place_storage_modules = [ {{PartitionSize, 1, unpacked}, {spora_2_6, ExpectedMiningAddr}}, {{1, 1, {spora_2_6, ExpectedMiningAddr}}, unpacked}, {{PartitionSize,8, {replica_2_9, ExpectedMiningAddr}}, unpacked}], repack_batch_size = 200, polling = 10, block_pollers = 100, auto_join = false, join_workers = 9, diff = 42, mining_addr = ExpectedMiningAddr, hashing_threads = 17, data_cache_size_limit = 10000, packing_cache_size_limit = 20000, mining_cache_size_mb = 3, max_propagation_peers = 8, max_block_propagation_peers = 60, post_tx_timeout = 50, max_emitters = 4, replica_2_9_workers = 16, disable_replica_2_9_device_limit = true, replica_2_9_entropy_cache_size_mb = 2000, packing_workers = 25, sync_jobs = 10, header_sync_jobs = 1, disk_pool_jobs = 2, requests_per_minute_limit = 2500, requests_per_minute_limit_by_ip = #{ {127, 0, 0, 1} := #{ chunk := 100000, data_sync_record := 1, recent_hash_list_diff := 200000, default := 100 } }, disk_space_check_frequency = 10 * 1000, start_from_latest_state = true, start_from_block = ExpectedBlockHash, internal_api_secret = <<"some_very_very_long_secret">>, enable = [feature_1, feature_2], disable = [feature_3, feature_4], transaction_blacklist_files = ["some_blacklist_1", "some_blacklist_2"], transaction_blacklist_urls = ["http://some_blacklist_1", "http://some_blacklist_2/x"], transaction_whitelist_files = ["some_whitelist_1", "some_whitelist_2"], transaction_whitelist_urls = ["http://some_whitelist"], webhooks = [ #config_webhook{ events = [transaction, block], url = <<"https://example.com/hook">>, headers = [{<<"Authorization">>, <<"Bearer 123456">>}] } ], 'http_api.tcp.max_connections' = 512, disk_pool_data_root_expiration_time = 10000, max_disk_pool_buffer_mb = 100000, max_disk_pool_data_root_buffer_mb = 100000000, max_duplicate_data_roots = 7, disk_cache_size = 1024, semaphores = #{ get_chunk := 1, get_and_pack_chunk := 2, get_tx_data := 3, post_chunk := 999, get_block_index := 1, get_wallet_list := 2, arql := 3, gateway_arql := 3, get_sync_record := 10 }, vdf = hiopt_m4, max_nonce_limiter_validation_thread_count = 2, max_nonce_limiter_last_step_validation_thread_count = 3, nonce_limiter_server_trusted_peers = ["127.0.0.1", "2.3.4.5", "6.7.8.9:1982"], nonce_limiter_client_peers = [<<"2.3.6.7:1984">>, <<"4.7.3.1:1983">>, <<"3.3.3.3">>], run_defragmentation = true, defragmentation_trigger_threshold = 1_000, defragmentation_modules = [ {PartitionSize, 0, unpacked}, {PartitionSize, 2, {spora_2_6, ExpectedMiningAddr}}, {PartitionSize, 100, unpacked}, {1, 0, unpacked}, {1000000000000, 14, {spora_2_6, ExpectedMiningAddr}} ], block_throttle_by_ip_interval = 5_000, block_throttle_by_solution_interval = 12_000, http_api_transport_idle_timeout = 15_000 }, ParsedConfig). config_fixture() -> {ok, Cwd} = file:get_cwd(), Path = filename:join(Cwd, "./apps/arweave/test/ar_config_tests_config_fixture.json"), {ok, FileData} = file:read_file(Path), FileData. test_validate_repack_in_place() -> Addr1 = crypto:strong_rand_bytes(32), Addr2 = crypto:strong_rand_bytes(32), PartitionSize = ar_block:partition_size(), ?assertEqual(true, ar_config:validate_config(#config{ storage_modules = [], repack_in_place_storage_modules = []})), ?assertEqual(true, ar_config:validate_config(#config{ storage_modules = [{PartitionSize, 0, {spora_2_6, Addr1}}], repack_in_place_storage_modules = []})), ?assertEqual(true, ar_config:validate_config(#config{ storage_modules = [{PartitionSize, 0, {spora_2_6, Addr1}}], repack_in_place_storage_modules = [ {{PartitionSize, 1, {spora_2_6, Addr1}}, {replica_2_9, Addr2}}]})), ?assertEqual(false, ar_config:validate_config(#config{ storage_modules = [{PartitionSize, 0, {spora_2_6, Addr1}}], repack_in_place_storage_modules = [ {{PartitionSize, 0, {spora_2_6, Addr1}}, {replica_2_9, Addr2}}]})), ?assertEqual(true, ar_config:validate_config(#config{ storage_modules = [], repack_in_place_storage_modules = [ {{PartitionSize, 0, {replica_2_9, Addr1}}, {replica_2_9, Addr2}}]})), ?assertEqual(true, ar_config:validate_config(#config{ storage_modules = [], repack_in_place_storage_modules = [ {{PartitionSize, 0, {replica_2_9, Addr1}}, {spora_2_6, Addr2}}]})), ?assertEqual(true, ar_config:validate_config(#config{ storage_modules = [], repack_in_place_storage_modules = [ {{PartitionSize, 0, {replica_2_9, Addr2}}, unpacked}]})), ?assertEqual(true, ar_config:validate_config(#config{ storage_modules = [], repack_in_place_storage_modules = [ {{PartitionSize, 0, unpacked}, {replica_2_9, Addr2}}]})), ?assertEqual(true, ar_config:validate_config(#config{ storage_modules = [], repack_in_place_storage_modules = [ {{PartitionSize, 0, {spora_2_6, Addr1}}, {replica_2_9, Addr2}}]})), ?assertEqual(true, ar_config:validate_config(#config{ storage_modules = [], repack_in_place_storage_modules = [ {{PartitionSize, 0, unpacked}, {spora_2_6, Addr2}}]})). test_validate_cm_pool() -> ?assertEqual(false, ar_config:validate_config( #config{ coordinated_mining = true, is_pool_server = true, mine = true, cm_api_secret = <<"secret">>})), ?assertEqual(true, ar_config:validate_config( #config{ coordinated_mining = true, is_pool_server = false, mine = true, cm_api_secret = <<"secret">>})), ?assertEqual(true, ar_config:validate_config( #config{ coordinated_mining = false, is_pool_server = true, mine = true, cm_api_secret = <<"secret">>})), ?assertEqual(true, ar_config:validate_config( #config{ coordinated_mining = false, is_pool_server = false, mine = true, cm_api_secret = <<"secret">>})), ?assertEqual(false, ar_config:validate_config( #config{is_pool_server = true, is_pool_client = true, mine = true})), ?assertEqual(true, ar_config:validate_config( #config{is_pool_server = true, is_pool_client = false})), ?assertEqual(true, ar_config:validate_config( #config{is_pool_server = false, is_pool_client = true, mine = true})), ?assertEqual(true, ar_config:validate_config( #config{is_pool_server = false, is_pool_client = false, mine = true})), ?assertEqual(false, ar_config:validate_config( #config{is_pool_client = true, mine = false})), ?assertEqual(true, ar_config:validate_config( #config{is_pool_client = true, mine = true})), ?assertEqual(true, ar_config:validate_config( #config{is_pool_client = false, mine = true})), ?assertEqual(true, ar_config:validate_config( #config{is_pool_client = false, mine = false})). test_validate_cm() -> ?assertEqual(true, ar_config:validate_config( #config{coordinated_mining = true, mine = true, cm_api_secret = <<"secret">>})), ?assertEqual(true, ar_config:validate_config( #config{coordinated_mining = false, mine = false, cm_api_secret = not_set})), ?assertEqual(false, ar_config:validate_config( #config{coordinated_mining = true, mine = false, cm_api_secret = <<"secret">>})), ?assertEqual(false, ar_config:validate_config( #config{coordinated_mining = true, mine = true, cm_api_secret = not_set})). test_validate_storage_modules() -> Addr1 = crypto:strong_rand_bytes(32), Addr2 = crypto:strong_rand_bytes(32), LegacyPacking = {spora_2_6, Addr1}, PartitionSize = ar_block:partition_size(), Unpacked = {PartitionSize, 0, unpacked}, Legacy = {PartitionSize, 1, LegacyPacking}, Replica29 = {PartitionSize, 2, {replica_2_9, Addr1}}, ?assertEqual(true, ar_config:validate_config( #config{ storage_modules = [Unpacked, Legacy, Replica29], mining_addr = Addr1, mine = false})), ?assertEqual(true, ar_config:validate_config( #config{ storage_modules = [Unpacked, Legacy, Replica29], mining_addr = Addr2, mine = true})), ?assertEqual(true, ar_config:validate_config( #config{ storage_modules = [Unpacked, Legacy], mining_addr = Addr1, mine = true})), ?assertEqual(true, ar_config:validate_config( #config{ storage_modules = [Unpacked, Replica29], mining_addr = Addr1, mine = true})), ?assertEqual(false, ar_config:validate_config( #config{ storage_modules = [Legacy, Replica29], mining_addr = Addr1, mine = true})). ================================================ FILE: apps/arweave/test/ar_config_tests_config_fixture.json ================================================ { "peers": [ "188.166.200.45", "188.166.192.169", "163.47.11.64", "159.203.158.108", "159.203.49.13", "139.59.51.59", "138.197.232.192", "46.101.67.172" ], "sync_from_local_peers_only": true, "block_gossip_peers": ["159.203.158.108", "150.150.150.150:1983"], "local_peers": ["192.168.2.3", "172.16.10.11:1985"], "start_from_latest_state": true, "start_from_block": "lfoR_PyKV6t7Z6Xi2QJZlZ0JWThh0Ke7Zc5Q82CSshUhFGcjiYufP234ph1mVofX", "mine": true, "port": 1985, "data_dir": "some_data_dir", "log_dir": "log_dir", "storage_modules": [ "0,unpacked", "2,LKC84RnISouGUw4uMQGCpPS9yDC-tIoqM2UVbUIt-Sw", "100,unpacked", "1,unpacked,repack_in_place,LKC84RnISouGUw4uMQGCpPS9yDC-tIoqM2UVbUIt-Sw", "0,1,unpacked", "14,1000000000000,LKC84RnISouGUw4uMQGCpPS9yDC-tIoqM2UVbUIt-Sw", "1,1,LKC84RnISouGUw4uMQGCpPS9yDC-tIoqM2UVbUIt-Sw,repack_in_place,unpacked", "0,LKC84RnISouGUw4uMQGCpPS9yDC-tIoqM2UVbUIt-Sw.replica.2.9", "8,LKC84RnISouGUw4uMQGCpPS9yDC-tIoqM2UVbUIt-Sw.replica.2.9,repack_in_place,unpacked" ], "repack_batch_size": 200, "polling": 10, "block_pollers": 100, "no_auto_join": true, "join_workers": 9, "diff": 42, "mining_addr": "LKC84RnISouGUw4uMQGCpPS9yDC-tIoqM2UVbUIt-Sw", "hashing_threads": 17, "data_cache_size_limit": 10000, "packing_cache_size_limit": 20000, "mining_cache_size_mb": 3, "max_propagation_peers": 8, "max_block_propagation_peers": 60, "post_tx_timeout": 50, "max_emitters": 4, "sync_jobs": 10, "header_sync_jobs": 1, "disk_pool_jobs": 2, "requests_per_minute_limit": 2500, "requests_per_minute_limit_by_ip": { "127.0.0.1": { "chunk": 100000, "data_sync_record": 1, "recent_hash_list_diff": 200000, "default": 100 } }, "transaction_blacklists": ["some_blacklist_1", "some_blacklist_2"], "transaction_blacklist_urls": [ "http://some_blacklist_1", "http://some_blacklist_2/x" ], "transaction_whitelists": ["some_whitelist_1", "some_whitelist_2"], "transaction_whitelist_urls": ["http://some_whitelist"], "disk_space_check_frequency": 10, "init": true, "internal_api_secret": "some_very_very_long_secret", "enable": ["feature_1", "feature_2"], "disable": ["feature_3", "feature_4"], "webhooks": [ { "events": ["transaction", "block"], "url": "https://example.com/hook", "headers": { "Authorization": "Bearer 123456" } } ], "max_connections": 512, "disk_pool_data_root_expiration_time": 10000, "max_disk_pool_buffer_mb": 100000, "max_disk_pool_data_root_buffer_mb": 100000000, "max_duplicate_data_roots": 7, "disk_cache_size_mb": 1024, "semaphores": { "get_chunk": 1, "get_and_pack_chunk": 2, "get_tx_data": 3, "post_chunk": 999, "get_block_index": 1, "get_wallet_list": 2, "arql": 3, "gateway_arql": 3 }, "replica_2_9_workers": 16, "disable_replica_2_9_device_limit": true, "replica_2_9_entropy_cache_size_mb": 2000, "packing_workers": 25, "max_nonce_limiter_validation_thread_count": 2, "max_nonce_limiter_last_step_validation_thread_count": 3, "vdf": "hiopt_m4", "vdf_server_trusted_peer": "127.0.0.1", "vdf_server_trusted_peers": ["2.3.4.5", "6.7.8.9:1982"], "vdf_client_peers": ["2.3.6.7:1984", "4.7.3.1:1983", "3.3.3.3"], "run_defragmentation": true, "defragmentation_trigger_threshold": 1000, "defragment_modules": [ "0,unpacked", "2,LKC84RnISouGUw4uMQGCpPS9yDC-tIoqM2UVbUIt-Sw", "100,unpacked", "0,1,unpacked", "14,1000000000000,LKC84RnISouGUw4uMQGCpPS9yDC-tIoqM2UVbUIt-Sw" ], "block_throttle_by_ip_interval": 5000, "block_throttle_by_solution_interval": 12000, "http_client.http.closing_timeout": 5000, "http_client.http.keepalive": "infinity", "http_client.tcp.delay_send": true, "http_client.tcp.keepalive": false, "http_client.tcp.linger": true, "http_client.tcp.linger_timeout": 0, "http_client.tcp.nodelay": true, "http_client.tcp.send_timeout_close": true, "http_client.tcp.send_timeout": 5000, "http_api.http.active_n": 2, "http_api.http.inactivity_timeout": 15000, "http_api.http.linger_timeout": 0, "http_api.http.request_timeout": 5000, "http_api.tcp.delay_send": true, "http_api.tcp.idle_timeout_seconds": 15, "http_api.tcp.keepalive": false, "http_api.tcp.linger": true, "http_api.tcp.linger_timeout": 0, "http_api.tcp.listener_shutdown": 5000, "http_api.tcp.nodelay": true, "http_api.tcp.num_acceptors": 10, "http_api.tcp.send_timeout_close": true, "http_api.tcp.send_timeout": 5000, "network.socket.backend": "socket", "network.tcp.shutdown.mode": "shutdown" } ================================================ FILE: apps/arweave/test/ar_coordinated_mining_tests.erl ================================================ -module(ar_coordinated_mining_tests). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("arweave/include/ar_mining.hrl"). -include_lib("eunit/include/eunit.hrl"). -import(ar_test_node, [http_get_block/2]). -define(COORDINATED_MINING_WAIT_TIMEOUT, 900_000). %% -------------------------------------------------------------------- %% Test registration %% -------------------------------------------------------------------- mining_test_() -> [ {timeout, ?TEST_NODE_TIMEOUT, fun test_single_node_one_chunk/0}, ar_test_node:test_with_mocked_functions( [ ar_test_node:mock_to_force_invalid_h1(), {ar_retarget, is_retarget_height, fun(_Height) -> false end}, {ar_retarget, is_retarget_block, fun(_Block) -> false end} ], fun test_single_node_two_chunk/0, ?TEST_NODE_TIMEOUT), ar_test_node:test_with_mocked_functions( [ ar_test_node:mock_to_force_invalid_h1(), {ar_retarget, is_retarget_height, fun(_Height) -> false end}, {ar_retarget, is_retarget_block, fun(_Block) -> false end} ], fun test_cross_node/0, ?TEST_NODE_TIMEOUT), ar_test_node:test_with_mocked_functions( [ ar_test_node:mock_to_force_invalid_h1(), mock_for_single_difficulty_adjustment_height(), mock_for_single_difficulty_adjustment_block() ], fun test_cross_node_retarget/0, 2 * ?TEST_NODE_TIMEOUT), {timeout, ?TEST_NODE_TIMEOUT, fun test_two_node_retarget/0}, ar_test_node:test_with_mocked_functions( [ {ar_retarget, is_retarget_height, fun(_Height) -> false end}, {ar_retarget, is_retarget_block, fun(_Block) -> false end} ], fun test_three_node/0, 2 * ?TEST_NODE_TIMEOUT), {timeout, ?TEST_NODE_TIMEOUT, fun test_no_exit_node/0} ]. api_test_() -> [ {timeout, ?TEST_NODE_TIMEOUT, fun test_no_secret/0}, {timeout, ?TEST_NODE_TIMEOUT, fun test_bad_secret/0}, {timeout, ?TEST_NODE_TIMEOUT, fun test_partition_table/0} ]. refetch_partitions_test_() -> [ {timeout, ?TEST_NODE_TIMEOUT, fun test_peers_by_partition/0} ]. %% -------------------------------------------------------------------- %% Tests %% -------------------------------------------------------------------- %% @doc One-node coordinated mining cluster mining a block with one %% or two chunks. test_single_node_one_chunk() -> [Node, _ExitNode, ValidatorNode] = ar_test_node:start_coordinated(1), ar_test_node:mine(Node), BI = ar_test_node:wait_until_height(ValidatorNode, 1, false), {ok, B} = http_get_block(element(1, hd(BI)), ValidatorNode), ?assert(byte_size((B#block.poa)#poa.data_path) > 0), assert_empty_cache(Node). %% @doc One-node coordinated mining cluster mining a block with two chunks. test_single_node_two_chunk() -> [Node, _ExitNode, ValidatorNode] = ar_test_node:start_coordinated(1), ar_test_node:mine(Node), BI = ar_test_node:wait_until_height(ValidatorNode, 1, false), {ok, B} = http_get_block(element(1, hd(BI)), ValidatorNode), ?assert(byte_size((B#block.poa2)#poa.data_path) > 0), assert_empty_cache(Node). %% @doc Two-node coordinated mining cluster mining until a difficulty retarget. test_two_node_retarget() -> [Node1, Node2, _ExitNode, ValidatorNode] = ar_test_node:start_coordinated(2), lists:foreach( fun(Height) -> mine_in_parallel([Node1, Node2], ValidatorNode, Height) end, lists:seq(0, ?RETARGET_BLOCKS)), assert_empty_cache(Node1), assert_empty_cache(Node2). %% @doc Three-node coordinated mining cluster mining until all nodes have contributed %% to a solution. This test does not force cross-node solutions. test_three_node() -> [Node1, Node2, Node3, _ExitNode, ValidatorNode] = ar_test_node:start_coordinated(3), wait_for_each_node([Node1, Node2, Node3], ValidatorNode, 0, [0, 2, 4]), assert_empty_cache(Node1), assert_empty_cache(Node2), assert_empty_cache(Node3). %% @doc Two-node, mine until a block is found that incorporates hashes from each node. test_cross_node() -> [Node1, Node2, _ExitNode, ValidatorNode] = ar_test_node:start_coordinated(2), wait_for_cross_node([Node1, Node2], ValidatorNode, 0, [0, 2]), assert_empty_cache(Node1), assert_empty_cache(Node2). %% @doc Two-node, mine through difficulty retarget, then mine until a block is found that %% incorporates hashes from each node. test_cross_node_retarget() -> [Node1, Node2, _ExitNode, ValidatorNode] = ar_test_node:start_coordinated(2), lists:foreach( fun(H) -> mine_in_parallel([Node1, Node2], ValidatorNode, H) end, lists:seq(0, ?RETARGET_BLOCKS)), wait_for_cross_node([Node1, Node2], ValidatorNode, ?RETARGET_BLOCKS, [0, 2]), assert_empty_cache(Node1), assert_empty_cache(Node2). test_no_exit_node() -> %% Assert that when the exit node is down, CM miners don't share their solution with any %% other peers. [Node, ExitNode, ValidatorNode] = ar_test_node:start_coordinated(1), ar_test_node:stop(ExitNode), ar_test_node:mine(Node), timer:sleep(5000), BI = ar_test_node:get_blocks(ValidatorNode), ?assertEqual(1, length(BI)). test_no_secret() -> [Node, _ExitNode, _ValidatorNode] = ar_test_node:start_coordinated(1), Peer = ar_test_node:peer_ip(Node), ?assertMatch( {error, {ok, {{<<"421">>, _}, _, <<"CM API disabled or invalid CM API secret in request.">>, _, _}}}, ar_http_iface_client:get_cm_partition_table(Peer)), ?assertMatch( {error, {ok, {{<<"421">>, _}, _, <<"CM API disabled or invalid CM API secret in request.">>, _, _}}}, ar_http_iface_client:cm_h1_send(Peer, dummy_candidate())), ?assertMatch( {error, {ok, {{<<"421">>, _}, _, <<"CM API disabled or invalid CM API secret in request.">>, _, _}}}, ar_http_iface_client:cm_h2_send(Peer, dummy_candidate())), ?assertMatch( {error, {ok, {{<<"421">>, _}, _, <<"CM API disabled or invalid CM API secret in request.">>, _, _}}}, ar_http_iface_client:cm_publish_send(Peer, dummy_solution())). test_bad_secret() -> [Node, _ExitNode, _ValidatorNode] = ar_test_node:start_coordinated(1), Peer = ar_test_node:peer_ip(Node), {ok, Config} = arweave_config:get_env(), try ok = arweave_config:set_env(Config#config{ cm_api_secret = <<"this_is_not_the_actual_secret">> }), ?assertMatch( {error, {ok, {{<<"421">>, _}, _, <<"CM API disabled or invalid CM API secret in request.">>, _, _}}}, ar_http_iface_client:get_cm_partition_table(Peer)), ?assertMatch( {error, {ok, {{<<"421">>, _}, _, <<"CM API disabled or invalid CM API secret in request.">>, _, _}}}, ar_http_iface_client:cm_h1_send(Peer, dummy_candidate())), ?assertMatch( {error, {ok, {{<<"421">>, _}, _, <<"CM API disabled or invalid CM API secret in request.">>, _, _}}}, ar_http_iface_client:cm_h2_send(Peer, dummy_candidate())), ?assertMatch( {error, {ok, {{<<"421">>, _}, _, <<"CM API disabled or invalid CM API secret in request.">>, _, _}}}, ar_http_iface_client:cm_publish_send(Peer, dummy_solution())) after ok = arweave_config:set_env(Config) end. test_partition_table() -> [B0] = ar_weave:init([], ar_test_node:get_difficulty_for_invalid_hash(), 5 * ar_block:partition_size()), Config = ar_test_node:base_cm_config([]), MiningAddr = Config#config.mining_addr, RandomAddress = crypto:strong_rand_bytes(32), Peer = ar_test_node:peer_ip(main), %% No partitions ar_test_node:start_node(B0, Config, false), ?assertEqual( {ok, []}, ar_http_iface_client:get_cm_partition_table(Peer) ), %% Partition jumble with 2 addresses ar_test_node:start_node(B0, Config#config{ storage_modules = [ {ar_block:partition_size(), 0, {spora_2_6, MiningAddr}}, {ar_block:partition_size(), 0, {spora_2_6, RandomAddress}}, {1000, 2, {spora_2_6, MiningAddr}}, {1000, 2, {spora_2_6, RandomAddress}}, {1000, 10, {spora_2_6, MiningAddr}}, {1000, 10, {spora_2_6, RandomAddress}}, {ar_block:partition_size() * 2, 4, {spora_2_6, MiningAddr}}, {ar_block:partition_size() * 2, 4, {spora_2_6, RandomAddress}}, {(ar_block:partition_size() div 10), 18, {spora_2_6, MiningAddr}}, {(ar_block:partition_size() div 10), 18, {spora_2_6, RandomAddress}}, {(ar_block:partition_size() div 10), 19, {spora_2_6, MiningAddr}}, {(ar_block:partition_size() div 10), 19, {spora_2_6, RandomAddress}}, {(ar_block:partition_size() div 10), 20, {spora_2_6, MiningAddr}}, {(ar_block:partition_size() div 10), 20, {spora_2_6, RandomAddress}}, {(ar_block:partition_size() div 10), 21, {spora_2_6, MiningAddr}}, {(ar_block:partition_size() div 10), 21, {spora_2_6, RandomAddress}}, {ar_block:partition_size()+1, 30, {spora_2_6, MiningAddr}}, {ar_block:partition_size()+1, 30, {spora_2_6, RandomAddress}}, {ar_block:partition_size(), 40, {spora_2_6, MiningAddr}}, {ar_block:partition_size(), 40, {spora_2_6, RandomAddress}} ]}, false), %% get_cm_partition_table returns the currently minable partitions - which is [] if the %% node is not mining. ?assertEqual( {ok, []}, ar_http_iface_client:get_cm_partition_table(Peer) ), %% Simulate mining start PartitionUpperBound = 35 * ar_block:partition_size(), %% less than the highest configured partition ar_mining_io:set_largest_seen_upper_bound(PartitionUpperBound), ?assertEqual( {ok, [ {0, ar_block:partition_size(), MiningAddr, 0}, {1, ar_block:partition_size(), MiningAddr, 0}, {2, ar_block:partition_size(), MiningAddr, 0}, {8, ar_block:partition_size(), MiningAddr, 0}, {9, ar_block:partition_size(), MiningAddr, 0}, {30, ar_block:partition_size(), MiningAddr, 0}, {31, ar_block:partition_size(), MiningAddr, 0} ]}, ar_http_iface_client:get_cm_partition_table(Peer) ). test_peers_by_partition() -> PartitionUpperBound = 6 * ar_block:partition_size(), [B0] = ar_weave:init([], ar_test_node:get_difficulty_for_invalid_hash(), PartitionUpperBound), Peer1 = ar_test_node:peer_ip(peer1), Peer2 = ar_test_node:peer_ip(peer2), Peer3 = ar_test_node:peer_ip(peer3), BaseConfig = ar_test_node:base_cm_config([]), Config = BaseConfig#config{ cm_exit_peer = Peer1 }, MiningAddr = Config#config.mining_addr, ar_test_node:remote_call(peer1, ar_test_node, start_node, [B0, Config#config{ cm_exit_peer = not_set, cm_peers = [Peer2, Peer3], local_peers = [Peer2, Peer3], storage_modules = [ {ar_block:partition_size(), 0, {spora_2_6, MiningAddr}}, {ar_block:partition_size(), 1, {spora_2_6, MiningAddr}}, {ar_block:partition_size(), 2, {spora_2_6, MiningAddr}} ]}, false]), ar_test_node:remote_call(peer2, ar_test_node, start_node, [B0, Config#config{ cm_peers = [Peer1, Peer3], local_peers = [Peer1, Peer3], storage_modules = [ {ar_block:partition_size(), 1, {spora_2_6, MiningAddr}}, {ar_block:partition_size(), 2, {spora_2_6, MiningAddr}}, {ar_block:partition_size(), 3, {spora_2_6, MiningAddr}} ]}, false]), ar_test_node:remote_call(peer3, ar_test_node, start_node, [B0, Config#config{ cm_peers = [Peer1, Peer2], local_peers = [Peer1, Peer2], storage_modules = [ {ar_block:partition_size(), 2, {spora_2_6, MiningAddr}}, {ar_block:partition_size(), 3, {spora_2_6, MiningAddr}}, {ar_block:partition_size(), 4, {spora_2_6, MiningAddr}} ]}, false]), ar_test_node:remote_call(peer1, ar_mining_io, set_largest_seen_upper_bound, [PartitionUpperBound]), ar_test_node:remote_call(peer2, ar_mining_io, set_largest_seen_upper_bound, [PartitionUpperBound]), ar_test_node:remote_call(peer3, ar_mining_io, set_largest_seen_upper_bound, [PartitionUpperBound]), timer:sleep(3000), assert_peers([], peer1, 0), assert_peers([Peer2], peer1, 1), assert_peers([Peer2, Peer3], peer1, 2), assert_peers([Peer2, Peer3], peer1, 3), assert_peers([Peer3], peer1, 4), assert_peers([], peer1, 5), assert_peers([Peer1], peer2, 0), assert_peers([Peer1], peer2, 1), assert_peers([Peer1, Peer3], peer2, 2), assert_peers([Peer3], peer2, 3), assert_peers([Peer3], peer2, 4), assert_peers([], peer2, 5), assert_peers([Peer1], peer3, 0), assert_peers([Peer1, Peer2], peer3, 1), assert_peers([Peer1, Peer2], peer3, 2), assert_peers([Peer2], peer3, 3), assert_peers([], peer3, 4), assert_peers([], peer3, 5), ar_test_node:remote_call(peer1, ar_test_node, stop, []), timer:sleep(3000), assert_peers([Peer1], peer2, 0), assert_peers([Peer1], peer2, 1), assert_peers([Peer1, Peer3], peer2, 2), assert_peers([Peer3], peer2, 3), assert_peers([Peer3], peer2, 4), assert_peers([Peer1], peer3, 0), assert_peers([Peer1, Peer2], peer3, 1), assert_peers([Peer1, Peer2], peer3, 2), assert_peers([Peer2], peer3, 3), assert_peers([], peer3, 4), ar_test_node:remote_call(peer1, ar_test_node, start_node, [B0, Config#config{ cm_exit_peer = not_set, cm_peers = [Peer2, Peer3], local_peers = [Peer2, Peer3], storage_modules = [ {ar_block:partition_size(), 0, {spora_2_6, MiningAddr}}, {ar_block:partition_size(), 4, {spora_2_6, MiningAddr}}, {ar_block:partition_size(), 5, {spora_2_6, MiningAddr}} ]}, false]), ar_test_node:remote_call(peer1, ar_mining_io, set_largest_seen_upper_bound, [PartitionUpperBound]), timer:sleep(3000), assert_peers([], peer1, 0), assert_peers([Peer2], peer1, 1), assert_peers([Peer2, Peer3], peer1, 2), assert_peers([Peer2, Peer3], peer1, 3), assert_peers([Peer3], peer1, 4), assert_peers([], peer1, 5), assert_peers([Peer1], peer2, 0), assert_peers([], peer2, 1), assert_peers([Peer3], peer2, 2), assert_peers([Peer3], peer2, 3), assert_peers([Peer1, Peer3], peer2, 4), assert_peers([Peer1], peer2, 5), assert_peers([Peer1], peer3, 0), assert_peers([Peer2], peer3, 1), assert_peers([Peer2], peer3, 2), assert_peers([Peer2], peer3, 3), assert_peers([Peer1], peer3, 4), assert_peers([Peer1], peer3, 5), ok. %% -------------------------------------------------------------------- %% Helpers %% -------------------------------------------------------------------- assert_peers(ExpectedPeers, Node, Partition) -> ?assert(ar_util:do_until( fun() -> Peers = ar_test_node:remote_call(Node, ar_coordination, get_peers, [Partition]), lists:sort(ExpectedPeers) == lists:sort(Peers) end, 200, 5000 )). wait_for_each_node(Miners, ValidatorNode, CurrentHeight, ExpectedPartitions) -> wait_for_each_node( Miners, ValidatorNode, CurrentHeight, sets:from_list(ExpectedPartitions), 40). wait_for_each_node( _Miners, _ValidatorNode, _CurrentHeight, _ExpectedPartitions, 0) -> ?assert(false, "Timed out waiting for all mining nodes to win a solution"); wait_for_each_node( Miners, ValidatorNode, CurrentHeight, ExpectedPartitions, RetryCount) -> Partitions = mine_in_parallel(Miners, ValidatorNode, CurrentHeight), ExpectedPartitions2 = sets:subtract(ExpectedPartitions, sets:from_list(Partitions)), case sets:is_empty(ExpectedPartitions2) of true -> CurrentHeight+1; false -> wait_for_each_node( Miners, ValidatorNode, CurrentHeight+1, ExpectedPartitions2, RetryCount-1) end. wait_for_cross_node(Miners, ValidatorNode, CurrentHeight, ExpectedPartitions) -> wait_for_cross_node( Miners, ValidatorNode, CurrentHeight, sets:from_list(ExpectedPartitions), 20). wait_for_cross_node(_Miners, _ValidatorNode, _CurrentHeight, _ExpectedPartitions, 0) -> ?assert(false, "Timed out waiting for a cross-node solution"); wait_for_cross_node(_Miners, _ValidatorNode, _CurrentHeight, ExpectedPartitions, _RetryCount) when length(ExpectedPartitions) /= 2 -> ?assert(false, "Cross-node solutions can only have 2 partitions."); wait_for_cross_node(Miners, ValidatorNode, CurrentHeight, ExpectedPartitions, RetryCount) -> A = mine_in_parallel(Miners, ValidatorNode, CurrentHeight), Partitions = sets:from_list(A), MinedCrossNodeBlock = sets:is_subset(Partitions, ExpectedPartitions) andalso sets:is_subset(ExpectedPartitions, Partitions), case MinedCrossNodeBlock of true -> CurrentHeight+1; false -> wait_for_cross_node( Miners, ValidatorNode, CurrentHeight+1, ExpectedPartitions, RetryCount-1) end. mine_in_parallel(Miners, ValidatorNode, CurrentHeight) -> report_miners(Miners), CurrentB = ar_test_node:remote_call(ValidatorNode, ar_node, get_current_block, []), ar_util:pmap(fun(Node) -> ar_test_node:mine(Node) end, Miners), ?debugFmt( "Waiting until the validator node (port ~B) advances to height ~B. " "Current block hash: ~s, solution hash: ~s.", [ ar_test_node:peer_port(ValidatorNode), CurrentHeight + 1, ar_util:encode(CurrentB#block.indep_hash), ar_util:encode(CurrentB#block.hash) ] ), BIValidator = ar_test_node:wait_until_height( ValidatorNode, CurrentHeight + 1, false, ?COORDINATED_MINING_WAIT_TIMEOUT), %% Since multiple nodes are mining in parallel it's possible that multiple blocks %% were mined. Get the Validator's current height in cas it's more than CurrentHeight+1. NewHeight = ar_test_node:remote_call(ValidatorNode, ar_node, get_height, []), Hashes = [Hash || {Hash, _, _} <- lists:sublist(BIValidator, NewHeight - CurrentHeight)], lists:foreach( fun(Node) -> ?LOG_DEBUG([{test, ar_coordinated_mining_tests}, {waiting_for_height, NewHeight}, {node, Node}]), %% Make sure the miner contains all of the new validator hashes, it's okay if %% the miner contains *more* hashes since it's possible concurrent blocks were %% mined between when the Validator checked and now. BIMiner = ar_test_node:wait_until_height( Node, NewHeight, false, ?COORDINATED_MINING_WAIT_TIMEOUT), MinerHashes = [Hash || {Hash, _, _} <- BIMiner], Message = lists:flatten(io_lib:format( "Node ~p did not mine the same block as the validator node", [Node])), ?assert(lists:all(fun(Hash) -> lists:member(Hash, MinerHashes) end, Hashes), Message) end, Miners ), LatestHash = lists:last(Hashes), {ok, Block} = ar_test_node:http_get_block(LatestHash, ValidatorNode), case Block#block.recall_byte2 of undefined -> [ ar_node:get_partition_number(Block#block.recall_byte) ]; RecallByte2 -> [ ar_node:get_partition_number(Block#block.recall_byte), ar_node:get_partition_number(RecallByte2) ] end. report_miners(Miners) -> report_miners(Miners, 1). report_miners([], _I) -> ok; report_miners([Miner | Miners], I) -> ?debugFmt("Miner ~B: ~p, port: ~B.", [I, Miner, ar_test_node:peer_port(Miner)]), report_miners(Miners, I + 1). assert_empty_cache(_Node) -> %% wait until the mining has stopped, then assert that the cache is empty timer:sleep(10000), ok. % [{_, Size}] = ar_test_node:remote_call(Node, ets, lookup, [ar_mining_server, chunk_cache_size]), %% We should assert that the size is 0, but there is a lot of concurrency in these tests %% so it's been hard to guarantee the cache is always empty by the time this check runs. %% It's possible there is a bug in the cache management code, but that code is pretty complex. %% In the future, if cache size ends up being a problem we can revisit - but for now, not %% worth the time for a test failure that may not have any realworld implications. % ?assertEqual(0, Size, Node). dummy_candidate() -> #mining_candidate{ cm_diff = {rand:uniform(1024), rand:uniform(1024)}, h0 = crypto:strong_rand_bytes(32), h1 = crypto:strong_rand_bytes(32), mining_address = crypto:strong_rand_bytes(32), next_seed = crypto:strong_rand_bytes(32), next_vdf_difficulty = rand:uniform(1024), nonce_limiter_output = crypto:strong_rand_bytes(32), partition_number = rand:uniform(1024), partition_number2 = rand:uniform(1024), partition_upper_bound = rand:uniform(1024), seed = crypto:strong_rand_bytes(32), session_key = dummy_session_key(), start_interval_number = rand:uniform(1024), step_number = rand:uniform(1024) }. dummy_solution() -> #mining_solution{ last_step_checkpoints = [], merkle_rebase_threshold = rand:uniform(1024), mining_address = crypto:strong_rand_bytes(32), next_seed = crypto:strong_rand_bytes(32), next_vdf_difficulty = rand:uniform(1024), nonce = rand:uniform(1024), nonce_limiter_output = crypto:strong_rand_bytes(32), partition_number = rand:uniform(1024), partition_upper_bound = rand:uniform(1024), poa1 = dummy_poa(), poa2 = dummy_poa(), preimage = crypto:strong_rand_bytes(32), recall_byte1 = rand:uniform(1024), seed = crypto:strong_rand_bytes(32), solution_hash = crypto:strong_rand_bytes(32), start_interval_number = rand:uniform(1024), step_number = rand:uniform(1024), steps = [] }. dummy_poa() -> #poa{ option = rand:uniform(1024), tx_path = crypto:strong_rand_bytes(32), data_path = crypto:strong_rand_bytes(32), chunk = crypto:strong_rand_bytes(?DATA_CHUNK_SIZE) }. dummy_session_key() -> {crypto:strong_rand_bytes(32), rand:uniform(100), rand:uniform(10000)}. mock_for_single_difficulty_adjustment_height() -> {ar_retarget, is_retarget_height, fun(Height) -> case Height of ?RETARGET_BLOCKS -> true; _ -> false end end}. mock_for_single_difficulty_adjustment_block() -> {ar_retarget, is_retarget_block, fun(Block) -> case Block#block.height of ?RETARGET_BLOCKS -> true; _ -> false end end}. ================================================ FILE: apps/arweave/test/ar_data_roots_sync_tests.erl ================================================ -module(ar_data_roots_sync_tests). -include("ar.hrl"). -include("ar_data_sync.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("eunit/include/eunit.hrl"). %%% Group: data roots metadata only (GET/POST /data_roots, sync from chain — no /chunk roundtrip). %%% --------------------------------------------------------------------------- %% Data roots sync from a peer via ar_data_root_sync when main joins with partial storage (not header sync). data_roots_sync_from_peer_test_() -> ar_test_node:test_with_mocked_functions([ {ar_block, get_consensus_window_size, fun() -> 5 end}, {ar_block, get_max_tx_anchor_depth, fun() -> 5 end}, {ar_storage_module, get_overlap, fun(_Packing) -> 0 end}], fun test_data_roots_sync_from_peer/0). %% Data roots pushed with HTTP: GET /data_roots from miner, POST /data_roots to peer; assert metadata via GET. data_roots_http_post_test_() -> ar_test_node:test_with_mocked_functions([ {ar_block, get_consensus_window_size, fun() -> 5 end}, {ar_block, get_max_tx_anchor_depth, fun() -> 5 end}, {ar_storage_module, get_overlap, fun(_Packing) -> 0 end}], fun test_data_roots_http_post/0). %%% Group: chunk POST/GET after data roots are available on the peer. %%% --------------------------------------------------------------------------- %% HTTP share of roots then chunk roundtrip: per block, GET roots from miner, POST to main, POST/GET /chunk. chunk_after_data_roots_http_post_test_() -> ar_test_node:test_with_mocked_functions([ {ar_block, get_consensus_window_size, fun() -> 5 end}, {ar_block, get_max_tx_anchor_depth, fun() -> 5 end}, {ar_storage_module, get_overlap, fun(_Packing) -> 0 end}], fun test_chunk_after_data_roots_http_post/0). %% Background ar_data_root_sync + header_sync_jobs > 0; then POST/GET /chunk (regression: POST 200, GET 404). chunk_after_data_roots_background_sync_test_() -> ar_test_node:test_with_mocked_functions([ {ar_block, get_consensus_window_size, fun() -> 5 end}, {ar_block, get_max_tx_anchor_depth, fun() -> 5 end}, {ar_storage_module, get_overlap, fun(_Packing) -> 0 end}], fun test_chunk_after_data_roots_background_sync/0). chunk_skipped_with_duplicate_data_root_test_() -> ar_test_node:test_with_mocked_functions([ {ar_block, get_consensus_window_size, fun() -> 5 end}, {ar_block, get_max_tx_anchor_depth, fun() -> 5 end}, {ar_storage_module, get_overlap, fun(_Packing) -> 0 end}], fun test_chunk_skipped_with_duplicate_data_root/0). chunk_skipped_with_depth_exhaustion_test_() -> ar_test_node:test_with_mocked_functions([ {ar_block, get_consensus_window_size, fun() -> 5 end}, {ar_block, get_max_tx_anchor_depth, fun() -> 5 end}, {ar_storage_module, get_overlap, fun(_Packing) -> 0 end}], fun test_chunk_skipped_with_depth_exhaustion/0). test_data_roots_sync_from_peer() -> Wallet = {_, Pub} = ar_wallet:new(), [B0] = ar_weave:init([{ar_wallet:to_address(Pub), ?AR(2_000_000_000_000_000), <<>>}]), %% Peer1 will mine while main stays disconnected until join_on later. start_peers_then_disconnect(main, peer1, B0), %% Mine blocks with transactions with data on peer1 BEFORE main joins. BlocksBeforeJoin = lists:map( fun(_) -> Data = generate_random_txs(Wallet), TXs = [TX || {TX, _} <- Data], ar_test_node:post_and_mine(#{ miner => peer1, await_on => peer1 }, TXs) end, lists:seq(1, 3) ), %% The node fetches this many latest blocks after joining the network. %% We want all our data blocks be older so that the node has to use %% the data root syncing mechanism to fetch data roots (we explicitly %% assert the unexpected data roots are not synced further down here). ?assertEqual(10, 2 * ar_block:get_max_tx_anchor_depth()), Blocks = BlocksBeforeJoin ++ lists:map( fun(_) -> Data = generate_random_txs(Wallet), TXs = [TX || {TX, _} <- Data], ar_test_node:post_and_mine(#{ miner => peer1, await_on => peer1 }, TXs) end, lists:seq(1, 11) ), %% Now start main (node A) with header syncing disabled and storage modules covering PART of the range. {ok, BaseConfig} = arweave_config:get_env(), MainRewardAddr = ar_wallet:to_address(ar_wallet:new_keyfile()), %% Cover only the first partition and half of the second one to ensure partial coverage. MainConfig = BaseConfig#config{ mine = false, header_sync_jobs = 0, storage_modules = [ %% The first MB of the weave. {?MiB, 0, {replica_2_9, MainRewardAddr}}, %% The second 3 MB of the weave (skipping 1-2 MB). {3 * ?MiB, 1, {replica_2_9, MainRewardAddr}} ] }, ConfiguredRanges = ar_intervals:from_list([{?MiB, 0}, {6 * ?MiB, 3 * ?MiB}]), ar_test_node:join_on(#{ node => main, join_on => peer1, config => MainConfig }, true), ar_test_node:connect_to_peer(peer1), ar_test_node:wait_until_joined(main), LastConsensusWindowHeight = 4, lists:foreach( fun (#block{ block_size = 0 }) -> ok; (B) -> BlockStart = block_start(B), case BlockStart >= ar_data_sync:get_disk_pool_threshold() of true -> ok; false -> BlockEnd = B#block.weave_size, BlockRange = ar_intervals:from_list([{BlockEnd, BlockStart}]), Intersection = ar_intervals:intersection(BlockRange, ConfiguredRanges), case B#block.height >= LastConsensusWindowHeight of true -> ?debugFmt("Asserting data roots synced during consensus " "are stored, even outside the configured storage modules, " "height: ~B, configured ranges: ~0p, intersection: ~0p", [B#block.height, ConfiguredRanges, Intersection]), wait_for_data_roots(main, B); false -> case ar_intervals:is_empty(Intersection) of false -> ?debugFmt("Asserting data roots synced for partitions " "we configured, range intersection: ~0p", [Intersection]), wait_for_data_roots(main, B); true -> ?debugFmt("Asserting no data roots for partitions " "we did not configure, block range: ~0p", [BlockRange]), assert_no_data_roots(main, B) end end end end, Blocks ). test_data_roots_http_post() -> Wallet = {_, Pub} = ar_wallet:new(), [B0] = ar_weave:init([{ar_wallet:to_address(Pub), ?AR(2_000_000_000_000_000), <<>>}]), start_peers_then_disconnect(peer1, main, B0), {B, _} = mine_block_with_small_fixed_data_tx(peer1, Wallet), %% Mine some empty blocks to push the data block out of the recent window. mine_empty_blocks_on_peer_after(peer1, B, 11), join_main_on_peer1(B#block.height + 11, false), ar_test_node:disconnect_from(peer1), assert_no_data_roots(main, B), {ok, Body} = get_data_roots(peer1, B), post_data_roots(main, B, Body), wait_for_data_roots(main, B), ok. test_chunk_after_data_roots_http_post() -> Wallet = {_, Pub} = ar_wallet:new(), [B0] = ar_weave:init([{ar_wallet:to_address(Pub), ?AR(2_000_000_000_000_000), <<>>}]), %% Main must not sync while peer1 builds the chain we will POST later. start_peers_then_disconnect(main, peer1, B0), %% Mine blocks with data on peer1. "Guaranteed" is a block with a fixed small data tx that %% is guaranteed to trigger a POST /data_roots in the test loop. The remaining random data %% provides some good additional coverage, but aren't guaranteed to always %% trigger a POST /data_roots. Guaranteed = mine_block_with_small_fixed_data_tx(peer1, Wallet), Random = lists:map( fun(_) -> TXData0 = generate_random_txs(Wallet), TXs = [TX || {TX, _} <- TXData0], B = ar_test_node:post_and_mine(#{ miner => peer1, await_on => peer1 }, TXs), {B, filter_mined_tx_data(B, TXData0)} end, lists:seq(1, 4) ), Data = [Guaranteed | Random], %% Mine some empty blocks to push the data blocks out of the recent window. {LastB, _} = lists:last(Data), mine_empty_blocks_on_peer_after(peer1, LastB, 11), join_main_on_peer1(LastB#block.height + 11, false), ar_test_node:disconnect_from(peer1), %% GET/POST /data_roots only apply below each peer's disk pool bound (see %% ar_data_sync:get_data_roots_for_offset/1 and POST handler in ar_http_iface_middleware). Peer1PoolBound = ar_test_node:remote_call(peer1, ar_data_sync, get_disk_pool_threshold, []), MainPoolBound = ar_test_node:remote_call(main, ar_data_sync, get_disk_pool_threshold, []), %% Verify POST /data_roots and POST /chunk for each non-empty block in range on both peers. lists:foreach( fun({B, TXData}) -> BlockStart = block_start(B), case B#block.block_size > 0 andalso BlockStart < Peer1PoolBound andalso BlockStart < MainPoolBound of true -> assert_no_data_roots(main, B), %% Fetch data roots from peer1 and POST to main. {ok, Body} = get_data_roots(peer1, B), post_data_roots(main, B, Body), %% For each transaction with data, POST its chunks to main and verify GET /chunk. lists:foreach( fun({TX, Chunks}) -> case TX#tx.data_size > 0 of true -> post_then_get_chunks(main, B, TX, Chunks); false -> ok end end, TXData ); false -> ok end end, Data ), ok. test_chunk_after_data_roots_background_sync() -> Wallet = {_, Pub} = ar_wallet:new(), [B0] = ar_weave:init([{ar_wallet:to_address(Pub), ?AR(2_000_000_000_000_000), <<>>}]), start_peers_then_disconnect(peer1, main, B0), {B, [{TX, Chunks}]} = mine_block_with_small_fixed_data_tx(peer1, Wallet), mine_empty_blocks_on_peer_after(peer1, B, 11), join_main_on_peer1(B#block.height + 11, true), true = B#block.block_size > 0, wait_until_data_roots_synced(main, B), ar_test_node:disconnect_from(peer1), post_then_get_chunks(main, B, TX, Chunks), ok. test_chunk_skipped_with_duplicate_data_root() -> Wallet = {_, Pub} = ar_wallet:new(), [B0] = ar_weave:init([{ar_wallet:to_address(Pub), ?AR(2_000_000_000_000_000), <<>>}]), start_peers_then_disconnect(peer1, main, B0), %% Mine two consecutive blocks on peer1 with the SAME data root (identical chunk data). {B1, [{TX1, Chunks1}]} = mine_block_with_small_fixed_data_tx(peer1, Wallet), {B2, [{TX2, Chunks2}]} = mine_block_with_small_fixed_data_tx(peer1, Wallet), ?assertEqual(TX1#tx.data_root, TX2#tx.data_root), %% Mine empty blocks to push the data blocks out of the recent window. mine_empty_blocks_on_peer_after(peer1, B2, 11), %% Join main with background data_roots syncing turned off. %% We manually sync only B2's data roots (simulating incomplete background sync where %% B2 was processed but B1 was not). join_main_on_peer1(B2#block.height + 11, false), %% Pre-fetch B1's data roots while still connected to peer1. We'll post them to main %% immediately after posting B1's chunk to beat the disk pool scan. {ok, Body1} = get_data_roots(peer1, B1), {ok, Body2} = get_data_roots(peer1, B2), post_data_roots(main, B2, Body2), wait_for_data_roots(main, B2), ar_test_node:disconnect_from(peer1), [{AbsEnd2, Proof2}] = ar_test_data_sync:build_proofs(B2, TX2, Chunks2), post_chunk(main, Proof2), wait_for_sync_record_update(main, AbsEnd2), %% POST TX1's chunk. Since we've only synced one copy of the data_roots, and we've already %% postd one chunk matching that data_root (TX2's chunk), we should get a 200 when %% posting Chunk1 - *but* we will not see Chunk1 persisted. This is not ideal but it is %% by design. %% For more context, and to track the state of any improvements to the process, see: %% https://github.com/ArweaveTeam/arweave-dev/issues/1112 [{AbsEnd1, Proof1}] = ar_test_data_sync:build_proofs(B1, TX1, Chunks1), post_chunk(main, Proof1), %% Allow time for the chunk to be persisted (it shouldn't be) timer:sleep(10_000), ?assertEqual(not_found, get_chunk(main, AbsEnd1)), %% Now we post B1's data roots, which should allow Chunk1 to be persisted post_data_roots(main, B1, Body1), wait_for_data_roots(main, B1), %% Re-POST the chunk — now B1's index entry exists so the chunk can be promoted. post_chunk(main, Proof1), ok = wait_for_chunk_to_persist(main, AbsEnd1). test_chunk_skipped_with_depth_exhaustion() -> MaxDuplicateDataRoots = 3, Wallet = {_, Pub} = ar_wallet:new(), [B0] = ar_weave:init([{ar_wallet:to_address(Pub), ?AR(2_000_000_000_000_000), <<>>}]), start_peers_then_disconnect(peer1, main, B0), %% Mine one more block than the configured duplicate-depth limit. This ensures the oldest %% chunk falls outside the configured duplicate data-root window. Blocks = lists:map( fun(_) -> mine_block_with_small_fixed_data_tx(peer1, Wallet) end, lists:seq(1, MaxDuplicateDataRoots + 1) ), {LastB, _} = lists:last(Blocks), %% Mine enough empty blocks to push the chunks out of the disk pool. The later duplicates %% should still be persistable, but the oldest should now be outside the configured window. mine_empty_blocks_on_peer_after(peer1, LastB, 10), join_main_on_peer1(LastB#block.height + 10, false, MaxDuplicateDataRoots), ar_test_node:disconnect_from(peer1), [{B1, [{TX1, Chunks1}]} | HigherBlocks] = Blocks, lists:foreach( fun({B, _}) -> {ok, Body} = get_data_roots(peer1, B), post_data_roots(main, B, Body), wait_for_data_roots(main, B) end, HigherBlocks ), %% POST chunks for the later duplicate blocks and wait for promotion. The oldest block B1 %% is outside the configured duplicate-offset window, so it should not become queryable via %% duplicate fanout. HigherAbsEnds = lists:map( fun({B, [{TX, Chunks}]}) -> [{AbsEnd, Proof}] = ar_test_data_sync:build_proofs(B, TX, Chunks), post_chunk(main, Proof), AbsEnd end, HigherBlocks ), lists:foreach( fun(AbsEnd) -> wait_for_chunk_to_persist(main, AbsEnd) end, HigherAbsEnds ), timer:sleep(10_000), %% POST chunk for block 1 (lowest offset). chunk_offsets_synced/5 only checks the configured %% number of synced duplicate offsets, so B1 is treated as already synced and skipped. [{AbsEnd1, Proof1}] = ar_test_data_sync:build_proofs(B1, TX1, Chunks1), post_chunk(main, Proof1), timer:sleep(10_000), ?assertEqual(not_found, get_chunk(main, AbsEnd1)), {ok, Body1} = get_data_roots(peer1, B1), %% This test confirms that doign a POST /data_roots before posting a chunk is not a reliable %% workaround to force the chunk to be persisted. We will either need to change the %% duplicate data_roots logic, or implement a new endpoint. %% For more context, and to track the state of any improvements to the process, see: %% https://github.com/ArweaveTeam/arweave-dev/issues/1112 post_data_roots(main, B1, Body1), wait_for_data_roots(main, B1), post_chunk(main, Proof1), timer:sleep(10_000), ?assertEqual(not_found, get_chunk(main, AbsEnd1)), % ok = wait_for_chunk_to_persist(main, AbsEnd1), ok. %% Start PeerA and PeerB from the same genesis, wait until both joined, then have PeerA %% disconnect from PeerB so they stop syncing while tests extend the chain on one side. start_peers_then_disconnect(PeerA, PeerB, B0) -> ar_test_node:start_peer(PeerA, B0), ar_test_node:start_peer(PeerB, B0), ar_test_node:wait_until_joined(PeerA), ar_test_node:wait_until_joined(PeerB), ar_test_node:remote_call(PeerA, ar_test_node, disconnect_from, [PeerB]), ok. %% Mine Count empty blocks on Peer immediately after Block (height advances from Block#block.height). mine_empty_blocks_on_peer_after(Peer, Block, Count) -> lists:foldl( fun(_, Height) -> ar_test_node:mine(Peer), ar_test_node:assert_wait_until_height(Peer, Height + 1), Height + 1 end, Block#block.height, lists:seq(1, Count) ). %% Rejoin main onto peer1 with mine disabled. When EnableBackgroundSync is true, enable the %% normal background sync setup; when false, disable both header syncing and background %% data_roots syncing. Does not disconnect — caller may call %% ar_test_node:remote_call(main, ar_test_node, disconnect_from, [peer1]) when needed. join_main_on_peer1(ExpectedHeight, EnableBackgroundSync) -> join_main_on_peer1(ExpectedHeight, EnableBackgroundSync, undefined). join_main_on_peer1(ExpectedHeight, EnableBackgroundSync, MaxDuplicateDataRoots) -> {ok, BaseConfig} = arweave_config:get_env(), Config = BaseConfig#config{ mine = false, sync_jobs = 0, header_sync_jobs = case EnableBackgroundSync of true -> 2; false -> 0 end, enable_data_roots_syncing = EnableBackgroundSync }, MainConfig = case MaxDuplicateDataRoots of undefined -> Config; Value -> Config#config{ max_duplicate_data_roots = Value } end, ar_test_node:join_on(#{ node => main, join_on => peer1, config => MainConfig }, true), ar_test_node:connect_to_peer(peer1), ar_test_node:wait_until_joined(main), ar_test_node:assert_wait_until_height(main, ExpectedHeight), ok. wait_until_data_roots_synced(Peer, B) -> Start = block_start(B), End = B#block.weave_size, true = ar_util:do_until( fun() -> ar_test_node:remote_call(Peer, ar_data_sync, are_data_roots_synced, [Start, End, B#block.tx_root]) end, 500, 120_000), ok. %% Mine one block on peer1 with a single small v2 data tx. TXData matches generate_random_txs/1 entries. mine_block_with_small_fixed_data_tx(Peer, Wallet) -> Chunks = [<< 0:(4096 * 8) >>], {DataRoot, _DataTree} = ar_merkle:generate_tree( ar_tx:sized_chunks_to_sized_chunk_ids( ar_tx:chunks_to_size_tagged_chunks(Chunks) ) ), {TX, _} = ar_test_data_sync:tx( #{ wallet => Wallet, split_type => {fixed_data, DataRoot, Chunks}, format => v2, reward => ?AR(10_000_000_000), tx_anchor_peer => Peer, get_fee_peer => Peer }), B = ar_test_node:post_and_mine(#{ miner => Peer, await_on => Peer }, [TX]), {B, [{TX, Chunks}]}. generate_random_txs(Wallet) -> Coin = rand:uniform(12), case Coin of Val when Val =< 3 -> %% Add three data txs with different data roots. [ar_test_data_sync:tx( #{ wallet => Wallet, split_type => original_split, format => v2, reward => ?AR(10_000_000_000), tx_anchor_peer => peer1, get_fee_peer => peer1 }), ar_test_data_sync:tx( #{ wallet => Wallet, split_type => original_split, format => v2, reward => ?AR(10_000_000_000), tx_anchor_peer => peer1, get_fee_peer => peer1 }), ar_test_data_sync:tx( #{ wallet => Wallet, split_type => original_split, format => v2, reward => ?AR(10_000_000_000), tx_anchor_peer => peer1, get_fee_peer => peer1 }) | generate_random_txs(Wallet)]; Val when Val =< 6 -> %% A bit smaller than 256 KiB to provoke padding. Chunks = [<< 0:(262140 * 8) >>], {DataRoot, _DataTree} = ar_merkle:generate_tree( ar_tx:sized_chunks_to_sized_chunk_ids( ar_tx:chunks_to_size_tagged_chunks(Chunks) ) ), %% Two transactions with the same data root. [ar_test_data_sync:tx( #{ wallet => Wallet, split_type => {fixed_data, DataRoot, Chunks}, format => v2, reward => ?AR(10_000_000_000), tx_anchor_peer => peer1, get_fee_peer => peer1 }), ar_test_data_sync:tx( #{ wallet => Wallet, split_type => {fixed_data, DataRoot, Chunks}, format => v2, reward => ?AR(10_000_000_000), tx_anchor_peer => peer1, get_fee_peer => peer1 }) | generate_random_txs(Wallet)]; Val when Val =< 9 -> %% Add empty tx and two transactions with different data roots. [ar_test_data_sync:tx( #{ wallet => Wallet, split_type => {fixed_data, <<>>, []}, format => v2, reward => ?AR(10_000_000_000), tx_anchor_peer => peer1, get_fee_peer => peer1 }), ar_test_data_sync:tx( #{ wallet => Wallet, split_type => original_split, format => v2, reward => ?AR(10_000_000_000), tx_anchor_peer => peer1, get_fee_peer => peer1 }), ar_test_data_sync:tx( #{ wallet => Wallet, split_type => original_split, format => v2, reward => ?AR(10_000_000_000), tx_anchor_peer => peer1, get_fee_peer => peer1 }) | generate_random_txs(Wallet)]; _ -> [] end. block_start(B) -> B#block.weave_size - B#block.block_size. filter_mined_tx_data(B, TXData) -> MinedTXIDs = sets:from_list(B#block.txs), [{TX, Chunks} || {TX, Chunks} <- TXData, sets:is_element(TX#tx.id, MinedTXIDs)]. data_roots_path(BlockStart) -> "/data_roots/" ++ integer_to_list(BlockStart). %% GET /data_roots for B on Peer. Returns {ok, Body} | not_found. get_data_roots(Peer, B) -> Start = block_start(B), case ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(Peer), path => data_roots_path(Start) }) of {ok, {{<<"200">>, _}, _, Body, _, _}} -> {ok, Body}; {ok, {{<<"404">>, _}, _, _, _, _}} -> not_found; Other -> ?assert(false, lists:flatten(io_lib:format( "GET /data_roots/~B: unexpected reply ~p", [Start, Other]))) end. %% POST /data_roots for B to Peer. Asserts 200. post_data_roots(Peer, B, Body) -> Start = block_start(B), case ar_http:req(#{ method => post, peer => ar_test_node:peer_ip(Peer), path => data_roots_path(Start), body => Body }) of {ok, {{<<"200">>, _}, _, <<>>, _, _}} -> ok; Other -> ?assert(false, lists:flatten(io_lib:format( "POST /data_roots/~B: expected 200, got ~p", [Start, Other]))) end. %% POST /chunk with a proof map. Asserts 200. post_chunk(Node, Proof) -> case ar_test_node:post_chunk(Node, ar_serialize:jsonify(Proof)) of {ok, {{<<"200">>, _}, _, <<>>, _, _}} -> ok; Other -> ?assert(false, lists:flatten(io_lib:format( "POST /chunk: expected 200, got ~p", [Other]))) end. %% Poll until AbsoluteEndOffset appears in the sync record for the given node. wait_for_sync_record_update(Node, AbsoluteEndOffset) -> true = ar_util:do_until( fun() -> case ar_test_node:remote_call(Node, ar_sync_record, is_recorded, [AbsoluteEndOffset, ar_data_sync]) of {{true, _}, _} -> true; _ -> false end end, 500, 60_000). %% GET /chunk at the given offset with any packing. Returns ok | not_found. get_chunk(Node, GlobalEndOffset) -> case ar_test_node:get_chunk(Node, GlobalEndOffset, any) of {ok, {{<<"200">>, _}, _, _, _, _}} -> ok; {ok, {{<<"404">>, _}, _, _, _, _}} -> not_found; Other -> ?assert(false, lists:flatten(io_lib:format( "GET /chunk/~B: unexpected reply ~p", [GlobalEndOffset, Other]))) end. %% POST /chunk for each proof from get_records_with_proofs/3, then poll GET /chunk until %% queryable (disk pool → sync_record promotion is asynchronous). post_then_get_chunks(Node, B, TX, Chunks) -> Records = ar_test_data_sync:get_records_with_proofs(B, TX, Chunks), lists:foreach( fun({_, _, _, {GlobalChunkEndOffset, Proof}}) -> post_chunk(Node, Proof), ok = wait_for_chunk_to_persist(Node, GlobalChunkEndOffset) end, Records ). %% Poll GET /chunk until HTTP 200 (disk-pool → sync_record promotion is asynchronous). wait_for_chunk_to_persist(Node, GlobalEndOffset) -> wait_for_chunk_to_persist(Node, GlobalEndOffset, 15_000). wait_for_chunk_to_persist(Node, GlobalEndOffset, TimeoutMs) -> case ar_util:do_until( fun() -> case get_chunk(Node, GlobalEndOffset) of ok -> true; not_found -> false end end, 100, TimeoutMs ) of true -> ok; {error, timeout} -> ?assert(false, lists:flatten(io_lib:format("Timeout waiting for chunk to persist: ~p", [GlobalEndOffset]))) end. wait_for_data_roots(Peer, B) -> Start = block_start(B), End = B#block.weave_size, Height = B#block.height, true = ar_util:do_until( fun() -> case get_data_roots(Peer, B) of {ok, Body} -> case ar_serialize:binary_to_data_roots(Body) of {ok, {_TXRoot, BlockSize, _Entries}} when Start + BlockSize == End -> true; {ok, {_TXRoot, BlockSize2, _Entries}} -> ?debugFmt("Unexpected block size: ~B, expected: ~B, height: ~B", [BlockSize2, End - Start, Height]), ?assert(false); {error, Error} -> ?debugFmt("Unexpected error: ~p, height: ~B", [Error, Height]), ?assert(false) end; not_found -> false end end, 200, 120_000 ), ok. assert_no_data_roots(Peer, B) -> case get_data_roots(Peer, B) of not_found -> ok; {ok, Body} -> ?debugFmt("Expected 404 but got body: ~p ~p", [Body, ar_serialize:binary_to_data_roots(Body)]), ?assert(false) end. ================================================ FILE: apps/arweave/test/ar_data_sync_disk_pool_rotation_test.erl ================================================ -module(ar_data_sync_disk_pool_rotation_test). -include_lib("eunit/include/eunit.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include("ar.hrl"). -include("ar_consensus.hrl"). -import(ar_test_node, [assert_wait_until_height/2]). disk_pool_rotation_test_() -> {timeout, 120, fun test_disk_pool_rotation/0}. test_disk_pool_rotation() -> ?LOG_DEBUG([{event, test_disk_pool_rotation_start}]), Addr = ar_wallet:to_address(ar_wallet:new_keyfile()), %% Will store the three genesis chunks. %% The third one falls inside the "overlap" (see ar_storage_module.erl) StorageModules = [{2 * ?DATA_CHUNK_SIZE, 0, ar_test_node:get_default_storage_module_packing(Addr, 0)}], Wallet = ar_test_data_sync:setup_nodes( #{ addr => Addr, storage_modules => StorageModules }), Chunks = [crypto:strong_rand_bytes(?DATA_CHUNK_SIZE)], {DataRoot, DataTree} = ar_merkle:generate_tree( ar_tx:sized_chunks_to_sized_chunk_ids( ar_tx:chunks_to_size_tagged_chunks(Chunks) ) ), {TX, Chunks} = ar_test_data_sync:tx(Wallet, {fixed_data, DataRoot, Chunks}), ar_test_node:assert_post_tx_to_peer(main, TX), Offset = ?DATA_CHUNK_SIZE, DataSize = ?DATA_CHUNK_SIZE, DataPath = ar_merkle:generate_path(DataRoot, Offset, DataTree), Proof = #{ data_root => ar_util:encode(DataRoot), data_path => ar_util:encode(DataPath), chunk => ar_util:encode(hd(Chunks)), offset => integer_to_binary(Offset), data_size => integer_to_binary(DataSize) }, ?assertMatch({ok, {{<<"200">>, _}, _, _, _, _}}, ar_test_node:post_chunk(main, ar_serialize:jsonify(Proof))), ar_test_node:mine(main), assert_wait_until_height(main, 1), timer:sleep(2_000), Options = #{ format => etf, random_subset => false }, {ok, Binary1} = ar_global_sync_record:get_serialized_sync_record(Options), {ok, Global1} = ar_intervals:safe_from_etf(Binary1), %% 3 genesis chunks are packed with the replica 2.9 format and therefore stored %% in the footprint record and not here. ?assertEqual([{1048576, 786432}], ar_intervals:to_list(Global1)), ar_test_node:mine(main), assert_wait_until_height(main, 2), {ok, Binary2} = ar_global_sync_record:get_serialized_sync_record(Options), {ok, Global2} = ar_intervals:safe_from_etf(Binary2), ?assertEqual([{1048576, 786432}], ar_intervals:to_list(Global2)), ar_test_node:mine(main), assert_wait_until_height(main, 3), ar_test_node:mine(main), assert_wait_until_height(main, 4), %% The new chunk has been confirmed but there is not storage module to take it. ?assertEqual(3, ?SEARCH_SPACE_UPPER_BOUND_DEPTH), true = ar_util:do_until( fun() -> {ok, Binary3} = ar_global_sync_record:get_serialized_sync_record(Options), {ok, Global3} = ar_intervals:safe_from_etf(Binary3), [] == ar_intervals:to_list(Global3) end, 200, 5000 ). ================================================ FILE: apps/arweave/test/ar_data_sync_enqueue_intervals_test.erl ================================================ -module(ar_data_sync_enqueue_intervals_test). -include_lib("eunit/include/eunit.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include("ar.hrl"). enqueue_intervals_test() -> test_enqueue_intervals([], 2, [], [], [], "Empty Intervals"), Peer1 = {1, 2, 3, 4, 1984}, Peer2 = {101, 102, 103, 104, 1984}, Peer3 = {201, 202, 203, 204, 1984}, test_enqueue_intervals( [ {Peer1, ar_intervals:from_list([ {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE}, {9*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE} ]), none} ], 5, [{20*?DATA_CHUNK_SIZE, 10*?DATA_CHUNK_SIZE}], [ {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE}, {9*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE} ], [ {none, 2*?DATA_CHUNK_SIZE, 3*?DATA_CHUNK_SIZE, Peer1}, {none, 3*?DATA_CHUNK_SIZE, 4*?DATA_CHUNK_SIZE, Peer1}, {none, 6*?DATA_CHUNK_SIZE, 7*?DATA_CHUNK_SIZE, Peer1}, {none, 7*?DATA_CHUNK_SIZE, 8*?DATA_CHUNK_SIZE, Peer1}, {none, 8*?DATA_CHUNK_SIZE, 9*?DATA_CHUNK_SIZE, Peer1} ], "Single peer, full intervals, all chunks. Non-overlapping QIntervals."), test_enqueue_intervals( [ {Peer1, ar_intervals:from_list([ {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE}, {9*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE} ]), none} ], 2, [{20*?DATA_CHUNK_SIZE, 10*?DATA_CHUNK_SIZE}], [ {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE} ], [ {none, 2*?DATA_CHUNK_SIZE, 3*?DATA_CHUNK_SIZE, Peer1}, {none, 3*?DATA_CHUNK_SIZE, 4*?DATA_CHUNK_SIZE, Peer1} ], "Single peer, full intervals, 2 chunks. Non-overlapping QIntervals."), test_enqueue_intervals( [ {Peer1, ar_intervals:from_list([ {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE}, {9*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE} ]), none}, {Peer2, ar_intervals:from_list([ {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE}, {7*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE} ]), none}, {Peer3, ar_intervals:from_list([ {8*?DATA_CHUNK_SIZE, 7*?DATA_CHUNK_SIZE} ]), none} ], 2, [{20*?DATA_CHUNK_SIZE, 10*?DATA_CHUNK_SIZE}], [ {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE}, {8*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE} ], [ {none, 2*?DATA_CHUNK_SIZE, 3*?DATA_CHUNK_SIZE, Peer1}, {none, 3*?DATA_CHUNK_SIZE, 4*?DATA_CHUNK_SIZE, Peer1}, {none, 5*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE, Peer2}, {none, 6*?DATA_CHUNK_SIZE, 7*?DATA_CHUNK_SIZE, Peer2}, {none, 7*?DATA_CHUNK_SIZE, 8*?DATA_CHUNK_SIZE, Peer3} ], "Multiple peers, overlapping, full intervals, 2 chunks. Non-overlapping QIntervals."), test_enqueue_intervals( [ {Peer1, ar_intervals:from_list([ {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE}, {9*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE} ]), none}, {Peer2, ar_intervals:from_list([ {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE}, {7*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE} ]), none}, {Peer3, ar_intervals:from_list([ {8*?DATA_CHUNK_SIZE, 7*?DATA_CHUNK_SIZE} ]), none} ], 3, [{20*?DATA_CHUNK_SIZE, 10*?DATA_CHUNK_SIZE}], [ {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE}, {8*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE} ], [ {none, 2*?DATA_CHUNK_SIZE, 3*?DATA_CHUNK_SIZE, Peer1}, {none, 3*?DATA_CHUNK_SIZE, 4*?DATA_CHUNK_SIZE, Peer1}, {none, 5*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE, Peer2}, {none, 6*?DATA_CHUNK_SIZE, 7*?DATA_CHUNK_SIZE, Peer1}, {none, 7*?DATA_CHUNK_SIZE, 8*?DATA_CHUNK_SIZE, Peer3} ], "Multiple peers, overlapping, full intervals, 3 chunks. Non-overlapping QIntervals."), test_enqueue_intervals( [ {Peer1, ar_intervals:from_list([ {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE}, {9*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE} ]), none} ], 5, [{20*?DATA_CHUNK_SIZE, 10*?DATA_CHUNK_SIZE}, {9*?DATA_CHUNK_SIZE, 7*?DATA_CHUNK_SIZE}], [ {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE}, {7*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE} ], [ {none, 2*?DATA_CHUNK_SIZE, 3*?DATA_CHUNK_SIZE, Peer1}, {none, 3*?DATA_CHUNK_SIZE, 4*?DATA_CHUNK_SIZE, Peer1}, {none, 6*?DATA_CHUNK_SIZE, 7*?DATA_CHUNK_SIZE, Peer1} ], "Single peer, full intervals, all chunks. Overlapping QIntervals."), test_enqueue_intervals( [ {Peer1, ar_intervals:from_list([ {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE}, {9*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE} ]), none}, {Peer2, ar_intervals:from_list([ {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE}, {7*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE} ]), none}, {Peer3, ar_intervals:from_list([ {8*?DATA_CHUNK_SIZE, 7*?DATA_CHUNK_SIZE} ]), none} ], 2, [{20*?DATA_CHUNK_SIZE, 10*?DATA_CHUNK_SIZE}, {9*?DATA_CHUNK_SIZE, 7*?DATA_CHUNK_SIZE}], [ {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE}, {7*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE} ], [ {none, 2*?DATA_CHUNK_SIZE, 3*?DATA_CHUNK_SIZE, Peer1}, {none, 3*?DATA_CHUNK_SIZE, 4*?DATA_CHUNK_SIZE, Peer1}, {none, 5*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE, Peer2}, {none, 6*?DATA_CHUNK_SIZE, 7*?DATA_CHUNK_SIZE, Peer2} ], "Multiple peers, overlapping, full intervals, 2 chunks. Overlapping QIntervals."), test_enqueue_intervals( [ {Peer1, ar_intervals:from_list([ {trunc(3.25*?DATA_CHUNK_SIZE), 2*?DATA_CHUNK_SIZE}, {9*?DATA_CHUNK_SIZE, trunc(5.75*?DATA_CHUNK_SIZE)} ]), none} ], 2, [ {20*?DATA_CHUNK_SIZE, 10*?DATA_CHUNK_SIZE}, {trunc(8.5*?DATA_CHUNK_SIZE), trunc(6.5*?DATA_CHUNK_SIZE)} ], [ {trunc(3.25*?DATA_CHUNK_SIZE), 2*?DATA_CHUNK_SIZE} ], [ {none, 2*?DATA_CHUNK_SIZE, 3*?DATA_CHUNK_SIZE, Peer1}, {none, 3*?DATA_CHUNK_SIZE, trunc(3.25*?DATA_CHUNK_SIZE), Peer1} ], "Single peer, partial intervals, 2 chunks. Overlapping partial QIntervals."), test_enqueue_intervals( [ {Peer1, ar_intervals:from_list([ {trunc(3.25*?DATA_CHUNK_SIZE), 2*?DATA_CHUNK_SIZE}, {9*?DATA_CHUNK_SIZE, trunc(5.75*?DATA_CHUNK_SIZE)} ]), none}, {Peer2, ar_intervals:from_list([ {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE}, {7*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE} ]), none}, {Peer3, ar_intervals:from_list([ {8*?DATA_CHUNK_SIZE, 7*?DATA_CHUNK_SIZE} ]), none} ], 2, [ {20*?DATA_CHUNK_SIZE, 10*?DATA_CHUNK_SIZE}, {trunc(8.5*?DATA_CHUNK_SIZE), trunc(6.5*?DATA_CHUNK_SIZE)} ], [ {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE}, {8*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE} ], [ {none, 2*?DATA_CHUNK_SIZE, 3*?DATA_CHUNK_SIZE, Peer1}, {none, 3*?DATA_CHUNK_SIZE, trunc(3.25*?DATA_CHUNK_SIZE), Peer1}, {none, trunc(3.25*?DATA_CHUNK_SIZE), 4*?DATA_CHUNK_SIZE, Peer2}, {none, 6*?DATA_CHUNK_SIZE, trunc(6.5*?DATA_CHUNK_SIZE), Peer2} ], "Multiple peers, overlapping, full intervals, 2 chunks. Overlapping QIntervals."). test_enqueue_intervals(Intervals, ChunksPerPeer, QIntervalsRanges, ExpectedQIntervalRanges, ExpectedChunks, Label) -> QIntervals = ar_intervals:from_list(QIntervalsRanges), Q = gb_sets:new(), {QResult, QIntervalsResult} = ar_data_sync:enqueue_intervals(Intervals, ChunksPerPeer, {Q, QIntervals}), ExpectedQIntervals = lists:foldl(fun({End, Start}, Acc) -> ar_intervals:add(Acc, End, Start) end, QIntervals, ExpectedQIntervalRanges), ?assertEqual(ar_intervals:to_list(ExpectedQIntervals), ar_intervals:to_list(QIntervalsResult), Label), ?assertEqual(ExpectedChunks, gb_sets:to_list(QResult), Label). ================================================ FILE: apps/arweave/test/ar_data_sync_mines_off_only_last_chunks_test.erl ================================================ -module(ar_data_sync_mines_off_only_last_chunks_test). -include_lib("eunit/include/eunit.hrl"). -include("ar.hrl"). -include("ar_consensus.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -import(ar_test_node, [test_with_mocked_functions/2]). mines_off_only_last_chunks_test_() -> test_with_mocked_functions([{ar_fork, height_2_6, fun() -> 0 end}, mock_reset_frequency()], fun test_mines_off_only_last_chunks/0). mock_reset_frequency() -> {ar_nonce_limiter, get_reset_frequency, fun() -> 5 end}. test_mines_off_only_last_chunks() -> ?LOG_DEBUG([{event, test_mines_off_only_last_chunks_start}]), Wallet = ar_test_data_sync:setup_nodes(), %% Submit only the last chunks (smaller than 256 KiB) of transactions. %% Assert the nodes construct correct proofs of access from them. lists:foreach( fun(Height) -> RandomID = crypto:strong_rand_bytes(32), Chunk = crypto:strong_rand_bytes(1023), ChunkID = ar_tx:generate_chunk_id(Chunk), DataSize = ?DATA_CHUNK_SIZE + 1023, {DataRoot, DataTree} = ar_merkle:generate_tree([{RandomID, ?DATA_CHUNK_SIZE}, {ChunkID, DataSize}]), TX = ar_test_node:sign_tx(Wallet, #{ last_tx => ar_test_node:get_tx_anchor(main), data_size => DataSize, data_root => DataRoot }), ar_test_node:post_and_mine(#{ miner => main, await_on => peer1 }, [TX]), Offset = ?DATA_CHUNK_SIZE + 1, DataPath = ar_merkle:generate_path(DataRoot, Offset, DataTree), Proof = #{ data_root => ar_util:encode(DataRoot), data_path => ar_util:encode(DataPath), chunk => ar_util:encode(Chunk), offset => integer_to_binary(Offset), data_size => integer_to_binary(DataSize) }, ?assertMatch({ok, {{<<"200">>, _}, _, _, _, _}}, ar_test_node:post_chunk(main, ar_serialize:jsonify(Proof))), case Height - ?SEARCH_SPACE_UPPER_BOUND_DEPTH of -1 -> %% Make sure we waited enough to have the next block use %% the new entropy reset source. [{_, Info}] = ets:lookup(node_state, nonce_limiter_info), PrevStepNumber = Info#nonce_limiter_info.global_step_number, true = ar_util:do_until( fun() -> ar_nonce_limiter:get_current_step_number() > PrevStepNumber + ar_nonce_limiter:get_reset_frequency() end, 100, 60000 ); 0 -> %% Wait until the new chunks fall below the new upper bound and %% remove the original big chunks. The protocol will increase the upper %% bound based on the nonce limiter entropy reset, but ar_data_sync waits %% for ?SEARCH_SPACE_UPPER_BOUND_DEPTH confirmations before packing the %% chunks. {ok, Config} = arweave_config:get_env(), lists:foreach( fun(O) -> [ar_chunk_storage:delete(O, ar_storage_module:id(Module)) || Module <- Config#config.storage_modules] end, lists:seq(?DATA_CHUNK_SIZE, ar_block:strict_data_split_threshold(), ?DATA_CHUNK_SIZE) ); _ -> ok end end, lists:seq(1, 6) ). ================================================ FILE: apps/arweave/test/ar_data_sync_mines_off_only_second_last_chunks_test.erl ================================================ -module(ar_data_sync_mines_off_only_second_last_chunks_test). -include_lib("eunit/include/eunit.hrl"). -include("ar.hrl"). -include("ar_consensus.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -import(ar_test_node, [test_with_mocked_functions/2]). mines_off_only_second_last_chunks_test_() -> test_with_mocked_functions([{ar_fork, height_2_6, fun() -> 0 end}, mock_reset_frequency()], fun test_mines_off_only_second_last_chunks/0). mock_reset_frequency() -> {ar_nonce_limiter, get_reset_frequency, fun() -> 5 end}. test_mines_off_only_second_last_chunks() -> ?LOG_DEBUG([{event, test_mines_off_only_second_last_chunks_start}]), Wallet = ar_test_data_sync:setup_nodes(), %% Submit only the second last chunks (smaller than 256 KiB) of transactions. %% Assert the nodes construct correct proofs of access from them. lists:foreach( fun(Height) -> RandomID = crypto:strong_rand_bytes(32), Chunk = crypto:strong_rand_bytes(?DATA_CHUNK_SIZE div 2), ChunkID = ar_tx:generate_chunk_id(Chunk), DataSize = (?DATA_CHUNK_SIZE) div 2 + (?DATA_CHUNK_SIZE) div 2 + 3, {DataRoot, DataTree} = ar_merkle:generate_tree([{ChunkID, ?DATA_CHUNK_SIZE div 2}, {RandomID, DataSize}]), TX = ar_test_node:sign_tx(Wallet, #{ last_tx => ar_test_node:get_tx_anchor(main), data_size => DataSize, data_root => DataRoot }), ar_test_node:post_and_mine(#{ miner => main, await_on => peer1 }, [TX]), Offset = 0, DataPath = ar_merkle:generate_path(DataRoot, Offset, DataTree), Proof = #{ data_root => ar_util:encode(DataRoot), data_path => ar_util:encode(DataPath), chunk => ar_util:encode(Chunk), offset => integer_to_binary(Offset), data_size => integer_to_binary(DataSize) }, ?assertMatch({ok, {{<<"200">>, _}, _, _, _, _}}, ar_test_node:post_chunk(main, ar_serialize:jsonify(Proof))), case Height - ?SEARCH_SPACE_UPPER_BOUND_DEPTH >= 0 of true -> %% Wait until the new chunks fall below the new upper bound and %% remove the original big chunks. The protocol will increase the upper %% bound based on the nonce limiter entropy reset, but ar_data_sync waits %% for ?SEARCH_SPACE_UPPER_BOUND_DEPTH confirmations before packing the %% chunks. {ok, Config} = arweave_config:get_env(), lists:foreach( fun(O) -> [ar_chunk_storage:delete(O, ar_storage_module:id(Module)) || Module <- Config#config.storage_modules] end, lists:seq(?DATA_CHUNK_SIZE, ar_block:strict_data_split_threshold(), ?DATA_CHUNK_SIZE) ); _ -> ok end end, lists:seq(1, 6) ). ================================================ FILE: apps/arweave/test/ar_data_sync_records_footprints_test.erl ================================================ -module(ar_data_sync_records_footprints_test). -include_lib("eunit/include/eunit.hrl"). -include("ar.hrl"). -include("ar_consensus.hrl"). records_footprints_test_() -> {timeout, 120, fun test_records_footprints/0}. test_records_footprints() -> Wallet = ar_wallet:new_keyfile(), Addr = ar_wallet:to_address(Wallet), [B0] = ar_weave:init([{Addr, ?AR(1000), <<>>}]), ar_test_node:start(#{ b0 => B0, addr => Addr, storage_modules => [ {262144 * 3, 0, {replica_2_9, Addr}} ] }), Peer = ar_test_node:peer_ip(main), %% The partition 1 is not configured. ?assertEqual(not_found, ar_http_iface_client:get_footprints(Peer, 1, 0)), {ok, Footprint1} = ar_http_iface_client:get_footprints(Peer, 0, 0), ?assertEqual(ar_intervals:from_list([{2, 0}]), Footprint1), {ok, Footprint1_1} = ar_http_iface_client:get_footprints(Peer, 0, 1), ?assertEqual(ar_intervals:from_list([{5, 4}]), Footprint1_1), %% We have 2 footprints with 4 chunks in each in partition 0. ?assertEqual({error, footprint_number_too_large}, ar_http_iface_client:get_footprints(Peer, 0, 2)), ?assertEqual({error, negative_footprint_number}, ar_http_iface_client:get_footprints(Peer, 0, -1)), ?assertEqual({error, negative_partition_number}, ar_http_iface_client:get_footprints(Peer, -1, 0)). ================================================ FILE: apps/arweave/test/ar_data_sync_recovers_from_corruption_test.erl ================================================ -module(ar_data_sync_recovers_from_corruption_test). -include_lib("eunit/include/eunit.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include("ar.hrl"). -include("ar_consensus.hrl"). -import(ar_test_node, [assert_wait_until_height/2]). recovers_from_corruption_test_() -> {timeout, 300, fun test_recovers_from_corruption/0}. test_recovers_from_corruption() -> ?LOG_DEBUG([{event, test_recovers_from_corruption_start}]), ar_test_data_sync:setup_nodes(), StoreID = ar_storage_module:id(hd(ar_storage_module:get_all(262144 * 3))), ?debugFmt("Corrupting ~s...", [StoreID]), [ar_chunk_storage:write_chunk(PaddedEndOffset, << 0:(262144*8) >>, #{}, StoreID) || PaddedEndOffset <- lists:seq(262144, 262144 * 3, 262144)], ar_test_node:mine(), ar_test_node:assert_wait_until_height(main, 1). ================================================ FILE: apps/arweave/test/ar_data_sync_syncs_after_joining_test.erl ================================================ -module(ar_data_sync_syncs_after_joining_test). -include_lib("eunit/include/eunit.hrl"). -include("ar.hrl"). -include("ar_consensus.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -import(ar_test_node, [assert_wait_until_height/2, test_with_mocked_functions/2]). syncs_after_joining_test_() -> ar_test_node:test_with_mocked_functions([{ar_fork, height_2_5, fun() -> 0 end}], fun test_syncs_after_joining/0, 240). test_syncs_after_joining() -> test_syncs_after_joining(original_split). test_syncs_after_joining(Split) -> ?LOG_DEBUG([{event, test_syncs_after_joining}, {split, Split}]), Wallet = ar_test_data_sync:setup_nodes(), {TX1, Chunks1} = ar_test_data_sync:tx(Wallet, {Split, 1}, v2, ?AR(1)), B1 = ar_test_node:post_and_mine(#{ miner => main, await_on => peer1 }, [TX1]), Proofs1 = ar_test_data_sync:post_proofs(main, B1, TX1, Chunks1), UpperBound = ar_node:get_partition_upper_bound(ar_node:get_block_index()), ar_test_data_sync:wait_until_syncs_chunks(peer1, Proofs1, UpperBound), ar_test_data_sync:wait_until_syncs_chunks(Proofs1), ar_test_node:disconnect_from(peer1), {MainTX2, MainChunks2} = ar_test_data_sync:tx(Wallet, {Split, 3}, v2, ?AR(1)), MainB2 = ar_test_node:post_and_mine(#{ miner => main, await_on => main }, [MainTX2]), MainProofs2 = ar_test_data_sync:post_proofs(main, MainB2, MainTX2, MainChunks2), {MainTX3, MainChunks3} = ar_test_data_sync:tx(Wallet, {Split, 2}, v2, ?AR(1)), MainB3 = ar_test_node:post_and_mine(#{ miner => main, await_on => main }, [MainTX3]), MainProofs3 = ar_test_data_sync:post_proofs(main, MainB3, MainTX3, MainChunks3), {PeerTX2, PeerChunks2} = ar_test_data_sync:tx(Wallet, {Split, 2}, v2, ?AR(1)), PeerB2 = ar_test_node:post_and_mine( #{ miner => peer1, await_on => peer1 }, [PeerTX2] ), PeerProofs2 = ar_test_data_sync:post_proofs(peer1, PeerB2, PeerTX2, PeerChunks2), ar_test_data_sync:wait_until_syncs_chunks(peer1, PeerProofs2, infinity), _Peer2 = ar_test_node:rejoin_on(#{ node => peer1, join_on => main }), assert_wait_until_height(peer1, 3), ar_test_node:connect_to_peer(peer1), UpperBound2 = ar_node:get_partition_upper_bound(ar_node:get_block_index()), ar_test_data_sync:wait_until_syncs_chunks(peer1, MainProofs2, UpperBound2), ar_test_data_sync:wait_until_syncs_chunks(peer1, MainProofs3, UpperBound2), ar_test_data_sync:wait_until_syncs_chunks(peer1, Proofs1, infinity). ================================================ FILE: apps/arweave/test/ar_data_sync_syncs_data_test.erl ================================================ -module(ar_data_sync_syncs_data_test). -include_lib("eunit/include/eunit.hrl"). -include("ar.hrl"). -include("ar_consensus.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -import(ar_test_node, [assert_wait_until_height/2]). syncs_data_test_() -> {timeout, 240, fun test_syncs_data/0}. test_syncs_data() -> ?LOG_DEBUG([{event, test_syncs_data_start}]), Wallet = ar_test_data_sync:setup_nodes(), Records = ar_test_data_sync:post_random_blocks(Wallet), RecordsWithProofs = lists:flatmap( fun({B, TX, Chunks}) -> ar_test_data_sync:get_records_with_proofs(B, TX, Chunks) end, Records), lists:foreach( fun({_, _, _, {_, Proof}}) -> ?assertMatch({ok, {{<<"200">>, _}, _, _, _, _}}, ar_test_node:post_chunk(main, ar_serialize:jsonify(Proof))), ?assertMatch({ok, {{<<"200">>, _}, _, _, _, _}}, ar_test_node:post_chunk(main, ar_serialize:jsonify(Proof))) end, RecordsWithProofs ), Proofs = [Proof || {_, _, _, Proof} <- RecordsWithProofs], ar_test_data_sync:wait_until_syncs_chunks(Proofs), DiskPoolThreshold = ar_node:get_partition_upper_bound(ar_node:get_block_index()), ar_test_data_sync:wait_until_syncs_chunks(peer1, Proofs, DiskPoolThreshold), lists:foreach( fun({B, #tx{ id = TXID }, Chunks, {_, Proof}}) -> TXSize = byte_size(binary:list_to_bin(Chunks)), TXOffset = ar_merkle:extract_note(ar_util:decode(maps:get(tx_path, Proof))), AbsoluteTXOffset = B#block.weave_size - B#block.block_size + TXOffset, ExpectedOffsetInfo = ar_serialize:jsonify(#{ offset => integer_to_binary(AbsoluteTXOffset), size => integer_to_binary(TXSize) }), true = ar_util:do_until( fun() -> case ar_test_data_sync:get_tx_offset(peer1, TXID) of {ok, {{<<"200">>, _}, _, ExpectedOffsetInfo, _, _}} -> true; _ -> false end end, 100, 120 * 1000 ), ExpectedData = ar_util:encode(binary:list_to_bin(Chunks)), ar_test_node:assert_get_tx_data(main, TXID, ExpectedData), case AbsoluteTXOffset > DiskPoolThreshold of true -> ok; false -> ar_test_node:assert_get_tx_data(peer1, TXID, ExpectedData) end end, RecordsWithProofs ). ================================================ FILE: apps/arweave/test/ar_difficulty_tests.erl ================================================ -module(ar_difficulty_tests). -include_lib("eunit/include/eunit.hrl"). next_cumul_diff_test() -> OldCDiff = 10, NewDiff = 25, Expected = OldCDiff + erlang:trunc(math:pow(2, 256) / (math:pow(2, 256) - NewDiff)), Actual = ar_difficulty:next_cumulative_diff(OldCDiff, NewDiff, 0), ?assertEqual(Expected, Actual). ================================================ FILE: apps/arweave/test/ar_ecdsa_tests.erl ================================================ -module(ar_ecdsa_tests). -include("ar.hrl"). -include_lib("eunit/include/eunit.hrl"). sign_ecrecover_test() -> {{_, PrivBytes, PubBytes}, _} = ar_wallet:new({?ECDSA_SIGN_ALG, secp256k1}), % Just call. It should not fail ar_wallet:hash_pub_key(PubBytes), Msg = <<"This is a test message!">>, SigRecoverable = secp256k1_nif:sign(Msg, PrivBytes), ?assertEqual(byte_size(SigRecoverable), 65), % recid byte <> = SigRecoverable, ?assert(lists:member(RecId, [0, 1, 2, 3])), % deterministic Sig NewSig = secp256k1_nif:sign(Msg, PrivBytes), ?assertEqual(NewSig, SigRecoverable), % Recover pk {true, RecoveredBytes} = secp256k1_nif:ecrecover(Msg, SigRecoverable), io:format("Prv ~p~n", [PrivBytes]), io:format("Pub ~p~n", [PubBytes]), ?assertEqual(RecoveredBytes, PubBytes), BadRecidSig = <>, {false, <<>>} = secp256k1_nif:ecrecover(Msg, BadRecidSig), BadMsg = <<"This is a bad test message!">>, {true, ArbitraryPubBytes1} = secp256k1_nif:ecrecover(BadMsg, SigRecoverable), ?assertNotEqual(PubBytes, ArbitraryPubBytes1), % recover and verify returns true for arbitrary message, but non matching PK {true, ArbitraryPubBytes2} = secp256k1_nif:ecrecover(crypto:strong_rand_bytes(100), SigRecoverable), ?assertNotEqual(PubBytes, ArbitraryPubBytes2), ok. ================================================ FILE: apps/arweave/test/ar_forced_validation_tests.erl ================================================ -module(ar_forced_validation_tests). -include_lib("eunit/include/eunit.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -import(ar_test_node, [wait_until_height/2, post_block/2, sign_block/3, send_new_block/2]). avde3_post_2_8_test_() -> {setup, fun setup_all_post_2_8/0, fun cleanup_all_post_fork/1, {foreach, fun reset_node/0, [ instantiator(fun inject_undersized_rsa/1) ] } }. avde3_post_2_9_height_test_() -> {setup, fun setup_all_post_2_9_height/0, fun cleanup_all_post_fork/1, {foreach, fun reset_node/0, [ instantiator(fun inject_undersized_rsa/1) ] } }. start_node() -> [B0] = ar_weave:init([], 0), %% Set difficulty to 0 to speed up tests ar_test_node:start(B0), ar_test_node:start_peer(peer1, B0), ar_test_node:connect_to_peer(peer1). reset_node() -> ar_blacklist_middleware:reset(), ar_test_node:remote_call(peer1, ar_blacklist_middleware, reset, []), ar_test_node:connect_to_peer(peer1), Height = height(peer1), [{PrevH, _, _} | _] = wait_until_height(main, Height), ar_test_node:disconnect_from(peer1), ar_test_node:mine(peer1), [{H, _, _} | _] = ar_test_node:assert_wait_until_height(peer1, Height + 1), B = ar_test_node:remote_call(peer1, ar_block_cache, get, [block_cache, H]), PrevB = ar_test_node:remote_call(peer1, ar_block_cache, get, [block_cache, PrevH]), {ok, Config} = ar_test_node:remote_call(peer1, arweave_config, get_env, []), Key = ar_test_node:remote_call(peer1, ar_wallet, load_key, [Config#config.mining_addr]), {Key, B, PrevB}. setup_all_post_2_8() -> {Setup, Cleanup} = ar_test_node:mock_functions([ {ar_fork, height_2_8, fun() -> 0 end} ]), Functions = Setup(), start_node(), {Cleanup, Functions}. setup_all_post_2_9_height() -> {Setup, Cleanup} = ar_test_node:mock_functions([ {ar_fork, height_2_9, fun() -> 0 end} ]), Functions = Setup(), start_node(), {Cleanup, Functions}. cleanup_all_post_fork({Cleanup, Functions}) -> Cleanup(Functions). instantiator(TestFun) -> fun (Fixture) -> {timeout, 180, {with, Fixture, [TestFun]}} end. debug_decode_block(B) -> %% TEST ENCODE/DECODE - This is to help to determine where %% encode/decode errors occur, as the HTTP API can be %% a bit opaque. BinaryB = ar_serialize:block_to_binary(B), try ar_serialize:binary_to_block(BinaryB), ?debugFmt("============== DECODE PASSED ================== ~n", []) catch Exception:Reason:Stacktrace -> ?debugFmt("==>>>decode error: ~p:~p -->>>>>stack:~p~n", [Exception, Reason, Stacktrace]), {error, {Exception, Reason}} end. inject_undersized_rsa({Key, BIn, PrevB}) -> Victim = main, Peer = ar_test_node:peer_ip(Victim), ReqRes = ar_http:req(#{ method => get, peer => Peer, path => "/info", headers => p2p_headers() }), ?assertMatch({ok, {{<<"200">>, _}, _, _Body, _Start, _End}}, ReqRes), TX = poc07_tx(), B = block_with_undersized_rsa(Key, PrevB, BIn, TX), debug_decode_block(B), post_block(B, valid), timer:sleep(1000), ReqRes2 = ar_http:req(#{ method => get, peer => Peer, path => "/info", headers => p2p_headers() }), ?assertMatch({ok, {{<<"200">>, _}, _, _Body, _Start, _End}}, ReqRes2), ok. block_with_undersized_rsa(Key, PrevB, BIn, TX) -> ok = ar_events:subscribe(block), Height = BIn#block.height, BlockSize = ar_tx:get_weave_size_increase(TX, Height), WeaveSize = PrevB#block.weave_size + BlockSize, TxRoot = ar_block:generate_tx_root_for_block([TX], Height), SizeTagged = ar_block:generate_size_tagged_list_from_txs([TX], Height), B1 = BIn#block{ txs = [TX], block_size = BlockSize, weave_size = WeaveSize, tx_root = TxRoot, size_tagged_txs = SizeTagged }, B2 = sign_block(B1, PrevB, Key), B2. poc07_tx() -> Sig = <<71>>, #tx{ format = 1, id = crypto:hash(sha256, << Sig/binary >>), last_tx = <<>>, owner = <<191>>, owner_address = not_set, tags = [], target = <<>>, quantity = 0, data = <<>>, data_size = 0, data_root = <<>>, signature = Sig, reward = 0, denomination = 0, signature_type = {rsa, 65537} }. p2p_headers() -> {ok, Config} = arweave_config:get_env(), [{<<"x-p2p-port">>, integer_to_binary(Config#config.port)}, {<<"x-release">>, integer_to_binary(?RELEASE_NUMBER)}]. %% ------------------------------------------------------------------------------------------ %% Helper functions %% ------------------------------------------------------------------------------------------ height(Node) -> ar_test_node:remote_call(Node, ar_node, get_height, []). ================================================ FILE: apps/arweave/test/ar_fork_recovery_tests.erl ================================================ -module(ar_fork_recovery_tests). -include_lib("arweave/include/ar.hrl"). -include_lib("eunit/include/eunit.hrl"). -import(ar_test_node, [ assert_wait_until_height/2, wait_until_height/2, read_block_when_stored/1]). height_plus_one_fork_recovery_test_() -> {timeout, 240, fun test_height_plus_one_fork_recovery/0}. test_height_plus_one_fork_recovery() -> %% Mine on two nodes until they fork. Mine an extra block on one of them. %% Expect the other one to recover. {_, Pub} = ar_wallet:new(), [B0] = ar_weave:init([{ar_wallet:to_address(Pub), ?AR(20), <<>>}]), ar_test_node:start(B0), ar_test_node:start_peer(peer1, B0), ar_test_node:disconnect_from(peer1), ar_test_node:mine(peer1), assert_wait_until_height(peer1, 1), ar_test_node:mine(), wait_until_height(main, 1), ar_test_node:mine(), MainBI = wait_until_height(main, 2), ar_test_node:connect_to_peer(peer1), ?assertEqual(MainBI, ar_test_node:wait_until_height(peer1, 2)), ar_test_node:disconnect_from(peer1), ar_test_node:mine(), wait_until_height(main, 3), ar_test_node:mine(peer1), assert_wait_until_height(peer1, 3), ar_test_node:rejoin_on(#{ node => main, join_on => peer1 }), ar_test_node:mine(peer1), PeerBI = ar_test_node:wait_until_height(peer1, 4), ?assertEqual(PeerBI, wait_until_height(main, 4)). height_plus_three_fork_recovery_test_() -> {timeout, 240, fun test_height_plus_three_fork_recovery/0}. test_height_plus_three_fork_recovery() -> %% Mine on two nodes until they fork. Mine three extra blocks on one of them. %% Expect the other one to recover. {_, Pub} = ar_wallet:new(), [B0] = ar_weave:init([{ar_wallet:to_address(Pub), ?AR(20), <<>>}]), ar_test_node:start(B0), ar_test_node:start_peer(peer1, B0), ar_test_node:disconnect_from(peer1), ar_test_node:mine(peer1), assert_wait_until_height(peer1, 1), ar_test_node:mine(), wait_until_height(main, 1), ar_test_node:mine(), wait_until_height(main, 2), ar_test_node:mine(peer1), assert_wait_until_height(peer1, 2), ar_test_node:mine(), wait_until_height(main, 3), ar_test_node:mine(peer1), assert_wait_until_height(peer1, 3), ar_test_node:connect_to_peer(peer1), ar_test_node:mine(), MainBI = wait_until_height(main, 4), ?assertEqual(MainBI, ar_test_node:wait_until_height(peer1, 4)). missing_txs_fork_recovery_test_() -> {timeout, 240, fun test_missing_txs_fork_recovery/0}. test_missing_txs_fork_recovery() -> %% Mine a block with a transaction on the peer1 node %% but do not gossip the transaction. The main node %% is expected fetch the missing transaction and apply the block. Key = {_, Pub} = ar_wallet:new(), [B0] = ar_weave:init([{ar_wallet:to_address(Pub), ?AR(20), <<>>}]), ar_test_node:start(B0), ar_test_node:start_peer(peer1, B0), ar_test_node:disconnect_from(peer1), TX1 = ar_test_node:sign_tx(Key, #{}), ar_test_node:assert_post_tx_to_peer(peer1, TX1), %% Wait to make sure the tx will not be gossiped upon reconnect. timer:sleep(2000), % == 2 * ?CHECK_MEMPOOL_FREQUENCY ar_test_node:rejoin_on(#{ node => main, join_on => peer1 }), ?assertEqual([], ar_mempool:get_all_txids()), ar_test_node:mine(peer1), [{H1, _, _} | _] = wait_until_height(main, 1), ?assertEqual(1, length((read_block_when_stored(H1))#block.txs)). orphaned_txs_are_remined_after_fork_recovery_test_() -> {timeout, 240, fun test_orphaned_txs_are_remined_after_fork_recovery/0}. test_orphaned_txs_are_remined_after_fork_recovery() -> %% Mine a transaction on peer1, mine two blocks on main to %% make the transaction orphaned. Mine a block on peer1 and %% assert the transaction is re-mined. Key = {_, Pub} = ar_wallet:new(), [B0] = ar_weave:init([{ar_wallet:to_address(Pub), ?AR(20), <<>>}]), ar_test_node:start(B0), ar_test_node:start_peer(peer1, B0), ar_test_node:disconnect_from(peer1), TX = #tx{ id = TXID } = ar_test_node:sign_tx(Key, #{ denomination => 1, reward => ?AR(1) }), ar_test_node:assert_post_tx_to_peer(peer1, TX), ar_test_node:mine(peer1), [{H1, _, _} | _] = ar_test_node:wait_until_height(peer1, 1), H1TXIDs = (ar_test_node:remote_call(peer1, ar_test_node, read_block_when_stored, [H1]))#block.txs, ?assertEqual([TXID], H1TXIDs), ar_test_node:mine(), [{H2, _, _} | _] = wait_until_height(main, 1), ar_test_node:mine(), [{H3, _, _}, {H2, _, _}, {_, _, _}] = wait_until_height(main, 2), ar_test_node:connect_to_peer(peer1), ?assertMatch([{H3, _, _}, {H2, _, _}, {_, _, _}], ar_test_node:wait_until_height(peer1, 2)), ar_test_node:mine(peer1), [{H4, _, _} | _] = ar_test_node:wait_until_height(peer1, 3), H4TXIDs = (ar_test_node:remote_call(peer1, ar_test_node, read_block_when_stored, [H4]))#block.txs, ?debugFmt("Expecting ~s to be re-mined.~n", [ar_util:encode(TXID)]), ?assertEqual([TXID], H4TXIDs). invalid_block_with_high_cumulative_difficulty_test_() -> ar_test_node:test_with_mocked_functions([{ar_fork, height_2_6, fun() -> 0 end}], fun() -> test_invalid_block_with_high_cumulative_difficulty() end). test_invalid_block_with_high_cumulative_difficulty() -> %% Submit an alternative fork with valid blocks weaker than the tip and %% an invalid block on top, much stronger than the tip. Make sure the node %% ignores the invalid block and continues to build on top of the valid fork. RewardKey = ar_wallet:new_keyfile(), RewardAddr = ar_wallet:to_address(RewardKey), WalletName = ar_util:encode(RewardAddr), Path = ar_wallet:wallet_filepath(WalletName), PeerPath = ar_test_node:remote_call(peer1, ar_wallet, wallet_filepath, [WalletName]), %% Copy the key because we mine blocks on both nodes using the same key in this test. {ok, _} = file:copy(Path, PeerPath), [B0] = ar_weave:init(), ar_test_node:start(B0, RewardAddr), ar_test_node:start_peer(peer1, B0, RewardAddr), ar_test_node:disconnect_from(peer1), ar_test_node:mine(peer1), [{H1, _, _} | _] = ar_test_node:wait_until_height(peer1, 1), ar_test_node:mine(), [{H2, _, _} | _] = wait_until_height(main, 1), ar_test_node:connect_to_peer(peer1), ?assertNotEqual(H2, H1), B1 = read_block_when_stored(H2), B2 = fake_block_with_strong_cumulative_difficulty(B1, B0, 10000000000000000), B2H = B2#block.indep_hash, ?debugFmt("Fake block: ~s.", [ar_util:encode(B2H)]), ok = ar_events:subscribe(block), ?assertMatch({ok, {{<<"200">>, _}, _, _, _, _}}, ar_http_iface_client:send_block_binary(ar_test_node:peer_ip(main), B2#block.indep_hash, ar_serialize:block_to_binary(B2))), receive {event, block, {rejected, invalid_cumulative_difficulty, B2H, _Peer2}} -> ok; {event, block, {new, #block{ indep_hash = B2H }, _Peer3}} -> ?assert(false, "Unexpected block acceptance"); {event, block, Other} -> ?debugFmt("Unexpected block event: ~p", [Other]), ?assert(false, "Unexpected block event") after 60_000 -> ?assert(false, "Timed out waiting for the node to pre-validate the fake " "block.") end, [{H1, _, _} | _] = ar_test_node:wait_until_height(peer1, 1), ar_test_node:mine(), %% Assert the nodes have continued building on the original fork. [{H3, _, _} | _] = ar_test_node:wait_until_height(peer1, 2), ?assertNotEqual(B2#block.indep_hash, H3), {_Peer, B3, _Time, _Size} = ar_http_iface_client:get_block_shadow(1, ar_test_node:peer_ip(peer1), binary, #{}), ?assertEqual(H2, B3#block.indep_hash). fake_block_with_strong_cumulative_difficulty(B, PrevB, CDiff) -> #block{ height = Height, partition_number = PartitionNumber, previous_solution_hash = PrevSolutionH, nonce_limiter_info = #nonce_limiter_info{ partition_upper_bound = PartitionUpperBound }, diff = Diff } = B, B2 = B#block{ cumulative_diff = CDiff }, Wallet = ar_wallet:new(), RewardAddr2 = ar_wallet:to_address(Wallet), H0 = ar_block:compute_h0(B, PrevB), {RecallByte, _RecallRange2Start} = ar_block:get_recall_range(H0, PartitionNumber, PartitionUpperBound), {ok, #{ data_path := DataPath, tx_path := TXPath, chunk := Chunk } } = ar_data_sync:get_chunk(RecallByte + 1, #{ pack => true, packing => {spora_2_6, RewardAddr2}, origin => test }), {H1, Preimage} = ar_block:compute_h1(H0, 0, Chunk), case binary:decode_unsigned(H1) > Diff of true -> PoA = #poa{ chunk = Chunk, data_path = DataPath, tx_path = TXPath }, B3 = B2#block{ hash = H1, hash_preimage = Preimage, reward_addr = RewardAddr2, reward_key = element(2, Wallet), recall_byte = RecallByte, nonce = 0, recall_byte2 = undefined, poa2 = #poa{}, unpacked_chunk2_hash = undefined, poa = #poa{ chunk = Chunk, data_path = DataPath, tx_path = TXPath }, chunk_hash = crypto:hash(sha256, Chunk) }, B4 = case ar_fork:height_2_8() of 0 -> {ok, #{ chunk := UnpackedChunk } } = ar_data_sync:get_chunk( RecallByte + 1, #{ pack => true, packing => unpacked, origin => test }), B3#block{ packing_difficulty = 1, poa = PoA#poa{ unpacked_chunk = UnpackedChunk }, unpacked_chunk_hash = crypto:hash(sha256, UnpackedChunk) }; _ -> B3 end, PrevCDiff = PrevB#block.cumulative_diff, SignedH = ar_block:generate_signed_hash(B4), SignaturePreimage = ar_block:get_block_signature_preimage(CDiff, PrevCDiff, << PrevSolutionH/binary, SignedH/binary >>, Height), Signature = ar_wallet:sign(element(1, Wallet), SignaturePreimage), B4#block{ indep_hash = ar_block:indep_hash2(SignedH, Signature), signature = Signature }; false -> fake_block_with_strong_cumulative_difficulty(B, PrevB, CDiff) end. fork_recovery_test_() -> %% Allow headroom for many sequential wait_until_syncs_chunks calls on slow CI. {timeout, 480, fun test_fork_recovery/0}. test_fork_recovery() -> test_fork_recovery(original_split). test_fork_recovery(Split) -> Wallet = ar_test_data_sync:setup_nodes(#{ packing => {composite, 1} }), {TX1, Chunks1} = ar_test_data_sync:tx(Wallet, {Split, 3}, v2, ?AR(10)), ?debugFmt("Posting tx to main ~s.~n", [ar_util:encode(TX1#tx.id)]), B1 = ar_test_node:post_and_mine(#{ miner => main, await_on => peer1 }, [TX1]), ?debugFmt("Mined block ~s, height ~B.~n", [ar_util:encode(B1#block.indep_hash), B1#block.height]), Proofs1 = ar_test_data_sync:post_proofs(main, B1, TX1, Chunks1), ar_test_data_sync:wait_until_syncs_chunks(Proofs1), UpperBound = ar_node:get_partition_upper_bound(ar_node:get_block_index()), ar_test_data_sync:wait_until_syncs_chunks(peer1, Proofs1, UpperBound), ar_test_node:disconnect_from(peer1), {PeerTX2, PeerChunks2} = ar_test_data_sync:tx(Wallet, {Split, 5}, v2, ?AR(10)), {PeerTX3, PeerChunks3} = ar_test_data_sync:tx(Wallet, {Split, 2}, v2, ?AR(10)), ?debugFmt("Posting tx to peer1 ~s.~n", [ar_util:encode(PeerTX2#tx.id)]), ?debugFmt("Posting tx to peer1 ~s.~n", [ar_util:encode(PeerTX3#tx.id)]), PeerB2 = ar_test_node:post_and_mine(#{ miner => peer1, await_on => peer1 }, [PeerTX2, PeerTX3]), ?debugFmt("Mined block ~s, height ~B.~n", [ar_util:encode(PeerB2#block.indep_hash), PeerB2#block.height]), {MainTX2, MainChunks2} = ar_test_data_sync:tx(Wallet, {Split, 1}, v2, ?AR(10)), ?debugFmt("Posting tx to main ~s.~n", [ar_util:encode(MainTX2#tx.id)]), MainB2 = ar_test_node:post_and_mine(#{ miner => main, await_on => main }, [MainTX2]), ?debugFmt("Mined block ~s, height ~B.~n", [ar_util:encode(MainB2#block.indep_hash), MainB2#block.height]), _PeerProofs2 = ar_test_data_sync:post_proofs(peer1, PeerB2, PeerTX2, PeerChunks2), _PeerProofs3 = ar_test_data_sync:post_proofs(peer1, PeerB2, PeerTX3, PeerChunks3), {PeerTX4, PeerChunks4} = ar_test_data_sync:tx(Wallet, {Split, 2}, v2, ?AR(10)), ?debugFmt("Posting tx to peer1 ~s.~n", [ar_util:encode(PeerTX4#tx.id)]), PeerB3 = ar_test_node:post_and_mine(#{ miner => peer1, await_on => peer1 }, [PeerTX4]), ?debugFmt("Mined block ~s, height ~B.~n", [ar_util:encode(PeerB3#block.indep_hash), PeerB3#block.height]), _PeerProofs4 = ar_test_data_sync:post_proofs(peer1, PeerB3, PeerTX4, PeerChunks4), ar_test_node:post_and_mine(#{ miner => main, await_on => main }, []), MainProofs2 = ar_test_data_sync:post_proofs(main, MainB2, MainTX2, MainChunks2), {MainTX3, MainChunks3} = ar_test_data_sync:tx(Wallet, {Split, 1}, v2, ?AR(10)), ?debugFmt("Posting tx to main ~s.~n", [ar_util:encode(MainTX3#tx.id)]), MainB3 = ar_test_node:post_and_mine(#{ miner => main, await_on => main }, [MainTX3]), ?debugFmt("Mined block ~s, height ~B.~n", [ar_util:encode(MainB3#block.indep_hash), MainB3#block.height]), ar_test_node:connect_to_peer(peer1), MainProofs3 = ar_test_data_sync:post_proofs(main, MainB3, MainTX3, MainChunks3), UpperBound2 = ar_node:get_partition_upper_bound(ar_node:get_block_index()), ar_test_data_sync:wait_until_syncs_chunks(peer1, MainProofs2, UpperBound2), ar_test_data_sync:wait_until_syncs_chunks(peer1, MainProofs3, UpperBound2), ar_test_data_sync:wait_until_syncs_chunks(peer1, Proofs1, infinity), %% The peer1 node will return the orphaned transactions to the mempool %% and gossip them. ?debugFmt("Posting tx to main ~s.~n", [ar_util:encode(PeerTX2#tx.id)]), ?debugFmt("Posting tx to main ~s.~n", [ar_util:encode(PeerTX4#tx.id)]), ar_test_node:post_tx_to_peer(main, PeerTX2), ar_test_node:post_tx_to_peer(main, PeerTX4), ar_test_node:assert_wait_until_receives_txs([PeerTX2, PeerTX4]), MainB4 = ar_test_node:post_and_mine(#{ miner => main, await_on => main }, []), ?debugFmt("Mined block ~s, height ~B.~n", [ar_util:encode(MainB4#block.indep_hash), MainB4#block.height]), Proofs4 = ar_test_data_sync:post_proofs(main, MainB4, PeerTX4, PeerChunks4), %% We did not submit proofs for PeerTX4 to main - they are supposed to be still stored %% in the disk pool. ar_test_data_sync:wait_until_syncs_chunks(peer1, Proofs4, infinity), UpperBound3 = ar_node:get_partition_upper_bound(ar_node:get_block_index()), ar_test_data_sync:wait_until_syncs_chunks(Proofs4, UpperBound3), ar_test_data_sync:post_proofs(peer1, PeerB2, PeerTX2, PeerChunks2). ================================================ FILE: apps/arweave/test/ar_get_chunk_tests.erl ================================================ -module(ar_get_chunk_tests). -include_lib("eunit/include/eunit.hrl"). -include_lib("arweave/include/ar.hrl"). get_chunk_below_strict_threshold_test_() -> ar_test_node:test_with_mocked_functions( [strict_data_split_threshold_mock(10 * ?DATA_CHUNK_SIZE)], fun test_get_chunk_below_strict_threshold/0, 120 ). get_chunk_below_strict_threshold_small_tail_test_() -> ar_test_node:test_with_mocked_functions( [strict_data_split_threshold_mock(10 * ?DATA_CHUNK_SIZE)], fun test_get_chunk_below_strict_threshold_small_tail/0, 120 ). get_chunk_above_strict_threshold_test_() -> ar_test_node:test_with_mocked_functions( [strict_data_split_threshold_mock(?DATA_CHUNK_SIZE)], fun test_get_chunk_above_strict_threshold/0, 180 ). get_chunk_above_strict_threshold_small_tail_test_() -> ar_test_node:test_with_mocked_functions( [strict_data_split_threshold_mock(?DATA_CHUNK_SIZE)], fun test_get_chunk_above_strict_threshold_small_tail/0, 180 ). test_get_chunk_below_strict_threshold() -> Wallet = ar_test_data_sync:setup_nodes(), Chunks = [ crypto:strong_rand_bytes(?DATA_CHUNK_SIZE), crypto:strong_rand_bytes(?DATA_CHUNK_SIZE) ], {TX, _} = tx_with_chunks(Wallet, Chunks), B = ar_test_node:post_and_mine(#{ miner => main, await_on => main }, [TX]), [{AbsoluteEndOffset, Proof} | _] = ar_test_data_sync:build_proofs(B, TX, Chunks), post_and_wait_for_chunks([{AbsoluteEndOffset, Proof}]), ?assert(AbsoluteEndOffset =< ar_block:strict_data_split_threshold()), assert_chunk_offsets_same(AbsoluteEndOffset, Proof). test_get_chunk_below_strict_threshold_small_tail() -> SmallChunkSize = 12345, Wallet = ar_test_data_sync:setup_nodes(), Chunks = [ crypto:strong_rand_bytes(?DATA_CHUNK_SIZE), crypto:strong_rand_bytes(SmallChunkSize) ], {TX, _} = tx_with_chunks(Wallet, Chunks), B = ar_test_node:post_and_mine(#{ miner => main, await_on => main }, [TX]), [{AbsoluteEndOffset, Proof} | _] = ar_test_data_sync:build_proofs(B, TX, Chunks), post_and_wait_for_chunks([{AbsoluteEndOffset, Proof}]), ?assert(byte_size(lists:last(Chunks)) < ?DATA_CHUNK_SIZE), ?assert(AbsoluteEndOffset =< ar_block:strict_data_split_threshold()), assert_chunk_offsets_same(AbsoluteEndOffset, Proof). test_get_chunk_above_strict_threshold() -> Wallet = ar_test_data_sync:setup_nodes(), Chunks = [ crypto:strong_rand_bytes(?DATA_CHUNK_SIZE), crypto:strong_rand_bytes(?DATA_CHUNK_SIZE) ], {TX, _} = tx_with_chunks(Wallet, Chunks), B = ar_test_node:post_and_mine(#{ miner => main, await_on => main }, [TX]), [{FirstEndOffset, FirstProof}, {SecondEndOffset, SecondProof}] = ar_test_data_sync:build_proofs(B, TX, Chunks), post_and_wait_for_chunks([{FirstEndOffset, FirstProof}, {SecondEndOffset, SecondProof}]), Threshold = ar_block:strict_data_split_threshold(), AboveThreshold = [{AbsoluteEndOffset, Proof} || {AbsoluteEndOffset, Proof} <- [{FirstEndOffset, FirstProof}, {SecondEndOffset, SecondProof}], AbsoluteEndOffset > Threshold], ?assertMatch([_ | _], AboveThreshold), lists:foreach( fun({AbsoluteEndOffset, Proof}) -> assert_chunk_offsets_same(AbsoluteEndOffset, Proof) end, AboveThreshold ). test_get_chunk_above_strict_threshold_small_tail() -> Wallet = ar_test_data_sync:setup_nodes(), SmallChunkSize = 12345, FirstChunk = crypto:strong_rand_bytes(?DATA_CHUNK_SIZE), LastChunk = crypto:strong_rand_bytes(SmallChunkSize), ?assert(byte_size(LastChunk) < ?DATA_CHUNK_SIZE), Chunks = [FirstChunk, LastChunk], {TX, _} = tx_with_chunks(Wallet, Chunks), B = ar_test_node:post_and_mine(#{ miner => main, await_on => main }, [TX]), [{FirstEndOffset, FirstProof}, {SecondEndOffset, SecondProof}] = ar_test_data_sync:build_proofs(B, TX, Chunks), post_and_wait_for_chunks([{FirstEndOffset, FirstProof}, {SecondEndOffset, SecondProof}]), Threshold = ar_block:strict_data_split_threshold(), ?assert(FirstEndOffset > Threshold), ?assert(SecondEndOffset > Threshold), assert_chunk_offsets_same(FirstEndOffset, FirstProof), assert_chunk_offsets_same(SecondEndOffset, SecondProof). assert_chunk_offsets_same(AbsoluteEndOffset, ExpectedProof) -> ChunkSize = byte_size(ar_util:decode(maps:get(chunk, ExpectedProof))), StartOffset = AbsoluteEndOffset - ChunkSize, Offsets = unique_offsets([ AbsoluteEndOffset, AbsoluteEndOffset - 1, AbsoluteEndOffset - max(1, ChunkSize div 2) ], StartOffset), Responses = [fetch_chunk_response(Offset) || Offset <- Offsets], [FirstResponse | Rest] = Responses, lists:foreach(fun(Response) -> ?assertEqual(FirstResponse, Response) end, Rest), assert_chunk_response(FirstResponse, AbsoluteEndOffset, ExpectedProof). fetch_chunk_response(Offset) -> {ok, {{<<"200">>, _}, _, ProofJSON, _, _}} = ar_test_node:get_chunk(main, Offset), {ok, Response} = ar_serialize:json_decode(ProofJSON, [return_maps]), Response. assert_chunk_response(Response, AbsoluteEndOffset, ExpectedProof) -> ?assertEqual(maps:get(chunk, ExpectedProof), maps:get(<<"chunk">>, Response)), ?assertEqual(maps:get(data_path, ExpectedProof), maps:get(<<"data_path">>, Response)), ?assertEqual(maps:get(tx_path, ExpectedProof), maps:get(<<"tx_path">>, Response)), ?assertEqual(integer_to_binary(AbsoluteEndOffset), maps:get(<<"absolute_end_offset">>, Response)), ?assertEqual( integer_to_binary(byte_size(ar_util:decode(maps:get(chunk, ExpectedProof)))), maps:get(<<"chunk_size">>, Response) ), ?assertEqual( iolist_to_binary(ar_serialize:encode_packing(unpacked, true)), maps:get(<<"packing">>, Response) ). strict_data_split_threshold_mock(Value) -> {ar_block, strict_data_split_threshold, fun() -> Value end}. tx_with_chunks(Wallet, Chunks) -> {DataRoot, _} = ar_merkle:generate_tree( ar_tx:sized_chunks_to_sized_chunk_ids( ar_tx:chunks_to_size_tagged_chunks(Chunks) ) ), ar_test_data_sync:tx(Wallet, {fixed_data, DataRoot, Chunks}). post_and_wait_for_chunks(Proofs) -> lists:foreach( fun({_EndOffset, Proof}) -> ?assertMatch( {ok, {{<<"200">>, _}, _, _, _, _}}, ar_test_node:post_chunk(main, ar_serialize:jsonify(Proof)) ) end, Proofs ), lists:foreach( fun({AbsoluteEndOffset, Proof}) -> Expected = #{ chunk => maps:get(chunk, Proof), data_path => maps:get(data_path, Proof), tx_path => maps:get(tx_path, Proof) }, ar_test_data_sync:wait_until_syncs_chunk(AbsoluteEndOffset, Expected) end, Proofs ). unique_offsets(Offsets, StartOffset) -> lists:usort([Offset || Offset <- Offsets, Offset > StartOffset, Offset >= 0]). ================================================ FILE: apps/arweave/test/ar_header_sync_tests.erl ================================================ -module(ar_header_sync_tests). -include_lib("arweave/include/ar.hrl"). -include_lib("eunit/include/eunit.hrl"). -import(ar_test_node, [sign_v1_tx/3, wait_until_height/2, assert_wait_until_height/2, read_block_when_stored/1, random_v1_data/1 ]). syncs_headers_test_() -> ar_test_node:test_with_mocked_functions([ {ar_fork, height_2_8, fun() -> 10 end}, {ar_retarget, is_retarget_height, fun(_Height) -> false end}, {ar_retarget, is_retarget_block, fun(_Block) -> false end}], fun test_syncs_headers/0). test_syncs_headers() -> Wallet = {_, Pub} = ar_wallet:new(), [B0] = ar_weave:init([{ar_wallet:to_address(Pub), ?AR(2000), <<>>}]), ar_test_node:start(B0), post_random_blocks(Wallet, ar_block:get_max_tx_anchor_depth() + 5, B0), ar_test_node:join_on(#{ node => peer1, join_on => main }), BI = assert_wait_until_height(peer1, ar_block:get_max_tx_anchor_depth() + 5), lists:foreach( fun(Height) -> {ok, B} = ar_util:do_until( fun() -> case ar_test_node:remote_call(peer1, ar_storage, read_block, [Height, BI]) of unavailable -> unavailable; B2 -> {ok, B2} end end, 200, 30000 ), MainB = ar_storage:read_block(Height, ar_node:get_block_index()), ?assertEqual(B, MainB), TXs = ar_test_node:remote_call(peer1, ar_storage, read_tx, [B#block.txs]), MainTXs = ar_storage:read_tx(B#block.txs), ?assertEqual(TXs, MainTXs) end, lists:reverse(lists:seq(0, ar_block:get_max_tx_anchor_depth() + 5)) ), %% Throw the event to simulate running out of disk space. ar_disksup:pause(), ar_events:send(disksup, {remaining_disk_space, ?DEFAULT_MODULE, true, 0, 0}), NoSpaceHeight = ar_block:get_max_tx_anchor_depth() + 6, NoSpaceTX = sign_v1_tx(main, Wallet, #{ data => random_v1_data(10 * 1024), last_tx => ar_test_node:get_tx_anchor(peer1) }), ar_test_node:assert_post_tx_to_peer(main, NoSpaceTX), ar_test_node:mine(), [{NoSpaceH, _, _} | _] = wait_until_height(main, NoSpaceHeight), timer:sleep(1000), %% The cleanup is not expected to kick in yet. NoSpaceB = read_block_when_stored(NoSpaceH), ?assertMatch(#block{}, NoSpaceB), ?assertMatch(#tx{}, ar_storage:read_tx(NoSpaceTX#tx.id)), ?assertMatch({ok, _}, ar_storage:read_wallet_list(NoSpaceB#block.wallet_list)), ets:new(test_syncs_header, [set, named_table]), ets:insert(test_syncs_header, {height, NoSpaceHeight + 1}), true = ar_util:do_until( fun() -> %% Keep mining blocks. At some point the cleanup procedure will %% kick in and remove the oldest files. TX = sign_v1_tx(main, Wallet, #{ data => random_v1_data(200 * 1024), last_tx => ar_test_node:get_tx_anchor(peer1) }), ar_test_node:assert_post_tx_to_peer(main, TX), ar_test_node:mine(), [{_, Height}] = ets:lookup(test_syncs_header, height), [_ | _] = wait_until_height(main, Height), ets:insert(test_syncs_header, {height, Height + 1}), unavailable == ar_storage:read_block(NoSpaceH) andalso ar_storage:read_tx(NoSpaceTX#tx.id) == unavailable end, 100, 20000 ), timer:sleep(1000), [{LatestH, _, _} | _] = ar_node:get_block_index(), %% The latest block must not be cleaned up. LatestB = read_block_when_stored(LatestH), ?assertMatch(#block{}, LatestB), ?assertMatch(#tx{}, ar_storage:read_tx(lists:nth(1, LatestB#block.txs))), ?assertMatch({ok, _}, ar_storage:read_wallet_list(LatestB#block.wallet_list)), ar_disksup:resume(). post_random_blocks(Wallet, TargetHeight, B0) -> lists:foldl( fun(Height, Anchor) -> ?LOG_INFO([{event, post_random_blocks}, {height, Height}]), TXs = lists:foldl( fun(_, Acc) -> case rand:uniform(2) == 1 of true -> TX = ar_test_node:sign_tx(main, Wallet, #{ last_tx => Anchor, data => crypto:strong_rand_bytes(10 * ?MiB) }), ar_test_node:assert_post_tx_to_peer(main, TX), [TX | Acc]; false -> Acc end end, [], lists:seq(1, 2) ), ?LOG_INFO([{event, post_random_blocks}, {transactions_posted, length(TXs)}, {height, Height}]), ar_test_node:mine(), [{H, _, _} | _] = wait_until_height(main, Height), ?LOG_INFO([{event, post_random_blocks}, {block_mined, ar_util:encode(H)}, {height, Height}]), ?assertEqual(length(TXs), length((read_block_when_stored(H))#block.txs)), H end, B0#block.indep_hash, lists:seq(1, TargetHeight) ). ================================================ FILE: apps/arweave/test/ar_http_iface_tests.erl ================================================ -module(ar_http_iface_tests). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("eunit/include/eunit.hrl"). -import(ar_test_node, [wait_until_height/2, wait_until_receives_txs/1, read_block_when_stored/1, read_block_when_stored/2, assert_wait_until_height/2]). start_node() -> %% Starting a node is slow so we'll run it once for the whole test module Wallet1 = {_, Pub1} = ar_wallet:new(), Wallet2 = {_, Pub2} = ar_wallet:new(), %% This wallet is never spent from or deposited to, so the balance is predictable StaticWallet = {_, Pub3} = ar_wallet:new(), [B0] = ar_weave:init([ {ar_wallet:to_address(Pub1), ?AR(10000), <<>>}, {ar_wallet:to_address(Pub2), ?AR(10000), <<>>}, {ar_wallet:to_address(Pub3), ?AR(10), <<"TEST_ID">>} ], 0), %% Set difficulty to 0 to speed up tests ar_test_node:start(B0), ar_test_node:start_peer(peer1, B0), ar_test_node:connect_to_peer(peer1), {B0, Wallet1, Wallet2, StaticWallet}. reset_node() -> ar_blacklist_middleware:reset(), arweave_limiter_sup:reset_all(), ar_test_node:remote_call(peer1, ar_blacklist_middleware, reset, []), ar_test_node:connect_to_peer(peer1). setup_all_batch() -> %% Never retarget the difficulty - this ensures the tests are always %% run against difficulty 0. Because of this we also have to hardcode %% the TX fee, otherwise it can jump pretty high. {Setup, Cleanup} = ar_test_node:mock_functions([ {ar_retarget, is_retarget_height, fun(_Height) -> false end}, {ar_retarget, is_retarget_block, fun(_Block) -> false end}, {ar_tx, get_tx_fee, fun(_Args) -> ?AR(1) end} ]), Functions = Setup(), GenesisData = start_node(), {GenesisData, {Cleanup, Functions}}. cleanup_all_batch({_GenesisData, {Cleanup, Functions}}) -> Cleanup(Functions). test_register(TestFun, Fixture) -> {timeout, 120, {with, Fixture, [TestFun]}}. %% ------------------------------------------------------------------- %% The spammer tests must run first. All the other tests will call %% ar_blacklist_middleware:reset() periodically and this will restart %% the 30-second throttle counter using timer:apply_after(30000, ...). %% However since most tests are less than 30 seconds, we end up with %% several pending timer:apply_after that can hit, and reset the %% throttle counter at any moment. This has caused these spammer tests %% to fail randomly in the past depending on whether or not the %% throttle counter was reset before the test finished. %% ------------------------------------------------------------------- %% @doc Test that nodes sending too many requests are temporarily blocked: (a) GET. node_blacklisting_get_spammer_test_() -> {timeout, 30, fun test_node_blacklisting_get_spammer/0}. % @doc Test that nodes sending too many requests are temporarily blocked: (b) POST. node_blacklisting_post_spammer_test_() -> {timeout, 30, fun test_node_blacklisting_post_spammer/0}. batch_test_() -> {setup, fun setup_all_batch/0, fun cleanup_all_batch/1, fun ({GenesisData, _MockData}) -> {foreach, fun reset_node/0, [ %% --------------------------------------------------------- %% The following tests must be run at a block height of 0. %% --------------------------------------------------------- test_register(fun test_get_total_supply/1, GenesisData), test_register(fun test_get_current_block/1, GenesisData), test_register(fun test_get_height/1, GenesisData), %% --------------------------------------------------------- %% The following tests are read-only and will not modify %% state. They assume that the blockchain state %% is fixed (and set by start_node and test_get_height). %% --------------------------------------------------------- test_register(fun test_get_wallet_list_in_chunks/1, GenesisData), test_register(fun test_get_info/1, GenesisData), test_register(fun test_get_last_tx_single/1, GenesisData), test_register(fun test_get_block_by_hash/1, GenesisData), test_register(fun test_get_block_by_height/1, GenesisData), test_register(fun test_get_non_existent_block/1, GenesisData), %% --------------------------------------------------------- %% The following tests are *not* read-only and may modify %% state. They can *not* assume a fixed blockchain state. %% --------------------------------------------------------- test_register(fun test_addresses_with_checksum/1, GenesisData), test_register(fun test_single_regossip/1, GenesisData), test_register(fun test_get_balance/1, GenesisData), test_register(fun test_get_format_2_tx/1, GenesisData), test_register(fun test_get_format_1_tx/1, GenesisData), test_register(fun test_add_external_tx_with_tags/1, GenesisData), test_register(fun test_find_external_tx/1, GenesisData), test_register(fun test_add_tx_and_get_last/1, GenesisData), test_register(fun test_get_subfields_of_tx/1, GenesisData), test_register(fun test_get_pending_tx/1, GenesisData), test_register(fun test_get_tx_body/1, GenesisData), test_register(fun test_get_tx_status/1, GenesisData), test_register(fun test_post_unsigned_tx/1, GenesisData), test_register(fun test_get_error_of_data_limit/1, GenesisData), test_register(fun test_send_missing_tx_with_the_block/1, GenesisData), test_register( fun test_fallback_to_block_endpoint_if_cannot_send_tx/1, GenesisData), test_register(fun test_get_recent_hash_list_diff/1, GenesisData), test_register(fun test_get_total_supply/1, GenesisData) ]} end }. %% @doc Check that we can qickly get the local time from the peer. get_time_test() -> Now = os:system_time(second), {ok, {Min, Max}} = ar_http_iface_client:get_time(ar_test_node:peer_ip(main), 10 * 1000), ?assert(Min < Now), ?assert(Now < Max). test_addresses_with_checksum({_, Wallet1, {_, Pub2}, _}) -> LocalHeight = ar_node:get_height(), RemoteHeight = height(peer1), Address19 = crypto:strong_rand_bytes(19), Address65 = crypto:strong_rand_bytes(65), Address20 = crypto:strong_rand_bytes(20), Address32 = ar_wallet:to_address(Pub2), TX = ar_test_node:sign_tx(Wallet1, #{ last_tx => ar_test_node:get_tx_anchor(peer1) }), {JSON} = ar_serialize:tx_to_json_struct(TX), JSON2 = proplists:delete(<<"target">>, JSON), TX2 = ar_test_node:sign_tx(Wallet1, #{ last_tx => ar_test_node:get_tx_anchor(peer1), target => Address32 }), {JSON3} = ar_serialize:tx_to_json_struct(TX2), InvalidPayloads = [ [{<<"target">>, <<":">>} | JSON2], [{<<"target">>, << <<":">>/binary, (ar_util:encode(<< 0:32 >>))/binary >>} | JSON2], [{<<"target">>, << (ar_util:encode(Address19))/binary, <<":">>/binary, (ar_util:encode(<< (erlang:crc32(Address19)):32 >> ))/binary >>} | JSON2], [{<<"target">>, << (ar_util:encode(Address65))/binary, <<":">>/binary, (ar_util:encode(<< (erlang:crc32(Address65)):32 >>))/binary >>} | JSON2], [{<<"target">>, << (ar_util:encode(Address32))/binary, <<":">>/binary, (ar_util:encode(<< 0:32 >>))/binary >>} | JSON2], [{<<"target">>, << (ar_util:encode(Address20))/binary, <<":">>/binary, (ar_util:encode(<< 1:32 >>))/binary >>} | JSON2], [{<<"target">>, << (ar_util:encode(Address32))/binary, <<":">>/binary, (ar_util:encode(<< (erlang:crc32(Address32)):32 >>))/binary, <<":">>/binary >>} | JSON2], [{<<"target">>, << (ar_util:encode(Address32))/binary, <<":">>/binary >>} | JSON3] ], lists:foreach( fun(Struct) -> Payload = ar_serialize:jsonify({Struct}), ?assertMatch({ok, {{<<"400">>, _}, _, <<"Invalid JSON.">>, _, _}}, ar_test_node:post_tx_json(main, Payload)) end, InvalidPayloads ), ValidPayloads = [ [{<<"target">>, << (ar_util:encode(Address32))/binary, <<":">>/binary, (ar_util:encode(<< (erlang:crc32(Address32)):32 >>))/binary >>} | JSON3], JSON ], lists:foreach( fun(Struct) -> Payload = ar_serialize:jsonify({Struct}), ?assertMatch({ok, {{<<"200">>, _}, _, <<"OK">>, _, _}}, ar_test_node:post_tx_json(main, Payload)) end, ValidPayloads ), ar_test_node:assert_wait_until_receives_txs(main, [TX, TX2]), ar_test_node:assert_wait_until_receives_txs(peer1, [TX, TX2]), ar_test_node:mine(), [{H, _, _} | _] = ar_test_node:wait_until_height(main, LocalHeight + 1), ar_test_node:assert_wait_until_height(peer1, RemoteHeight + 1), B = read_block_when_stored(H, true), ChecksumAddr = << (ar_util:encode(Address32))/binary, <<":">>/binary, (ar_util:encode(<< (erlang:crc32(Address32)):32 >>))/binary >>, ?assertEqual(2, length(B#block.txs)), Balance = get_balance(ar_util:encode(Address32)), ?assertEqual(Balance, get_balance(ChecksumAddr)), LastTX = get_last_tx(ar_util:encode(Address32)), ?assertEqual(LastTX, get_last_tx(ChecksumAddr)), Price = get_price(ar_util:encode(Address32)), ?assertEqual(Price, get_price(ChecksumAddr)), ServeTXTarget = maps:get(<<"target">>, jiffy:decode(get_tx(TX2#tx.id), [return_maps])), ?assertEqual(ar_util:encode(TX2#tx.target), ServeTXTarget). get_balance(EncodedAddr) -> Peer = ar_test_node:peer_ip(main), {_, _, _, _, Port} = Peer, {ok, {{<<"200">>, _}, _, Reply, _, _}} = ar_http:req(#{ method => get, peer => Peer, path => "/wallet/" ++ binary_to_list(EncodedAddr) ++ "/balance", headers => [{<<"x-p2p-port">>, integer_to_binary(Port)}] }), binary_to_integer(Reply). get_last_tx(EncodedAddr) -> Peer = ar_test_node:peer_ip(main), {_, _, _, _, Port} = Peer, {ok, {{<<"200">>, _}, _, Reply, _, _}} = ar_http:req(#{ method => get, peer => Peer, path => "/wallet/" ++ binary_to_list(EncodedAddr) ++ "/last_tx", headers => [{<<"x-p2p-port">>, integer_to_binary(Port)}] }), Reply. get_price(EncodedAddr) -> Peer = ar_test_node:peer_ip(main), {_, _, _, _, Port} = Peer, {ok, {{<<"200">>, _}, _, Reply, _, _}} = ar_http:req(#{ method => get, peer => Peer, path => "/price/0/" ++ binary_to_list(EncodedAddr), headers => [{<<"x-p2p-port">>, integer_to_binary(Port)}] }), binary_to_integer(Reply). get_tx(ID) -> Peer = ar_test_node:peer_ip(main), {_, _, _, _, Port} = Peer, {ok, {{<<"200">>, _}, _, Reply, _, _}} = ar_http:req(#{ method => get, peer => Peer, path => "/tx/" ++ binary_to_list(ar_util:encode(ID)), headers => [{<<"x-p2p-port">>, integer_to_binary(Port)}] }), Reply. %% @doc Ensure that server info can be retreived via the HTTP interface. test_get_info(_) -> ?assertEqual(info_unavailable, ar_http_iface_client:get_info(ar_test_node:peer_ip(main), bad_key)), ?assertEqual(<>, ar_http_iface_client:get_info(ar_test_node:peer_ip(main), network)), ?assertEqual(?RELEASE_NUMBER, ar_http_iface_client:get_info(ar_test_node:peer_ip(main), release)), ?assertEqual( ?CLIENT_VERSION, ar_http_iface_client:get_info(ar_test_node:peer_ip(main), version)), ?assertEqual(1, ar_http_iface_client:get_info(ar_test_node:peer_ip(main), peers)), ar_util:do_until( fun() -> 1 == ar_http_iface_client:get_info(ar_test_node:peer_ip(main), blocks) end, 100, 2000 ), ?assertEqual(1, ar_http_iface_client:get_info(ar_test_node:peer_ip(main), height)). %% @doc Ensure that transactions are only accepted once. test_single_regossip(_) -> ar_test_node:disconnect_from(peer1), TX = ar_tx:new(), ?assertMatch( {ok, {{<<"200">>, _}, _, _, _, _}}, ar_http_iface_client:send_tx_json(ar_test_node:peer_ip(main), TX#tx.id, ar_serialize:jsonify(ar_serialize:tx_to_json_struct(TX))) ), ?assertMatch( {ok, {{<<"200">>, _}, _, _, _, _}}, ar_test_node:remote_call(peer1, ar_http_iface_client, send_tx_binary, [ar_test_node:peer_ip(peer1), TX#tx.id, ar_serialize:tx_to_binary(TX)]) ), ?assertMatch( {ok, {{<<"208">>, _}, _, _, _, _}}, ar_test_node:remote_call(peer1, ar_http_iface_client, send_tx_binary, [ar_test_node:peer_ip(peer1), TX#tx.id, ar_serialize:tx_to_binary(TX)]) ), ?assertMatch( {ok, {{<<"208">>, _}, _, _, _, _}}, ar_test_node:remote_call(peer1, ar_http_iface_client, send_tx_json, [ar_test_node:peer_ip(peer1), TX#tx.id, ar_serialize:jsonify(ar_serialize:tx_to_json_struct(TX))]) ). test_node_blacklisting_get_spammer() -> {ok, Config} = arweave_config:get_env(), {RequestFun, ErrorResponse} = get_fun_msg_pair(get_info), LimitWithBursts = Config#config.'http_api.limiter.general.sliding_window_limit' + Config#config.'http_api.limiter.general.leaky_limit', node_blacklisting_test_frame( RequestFun, ErrorResponse, LimitWithBursts, 1 ). test_node_blacklisting_post_spammer() -> {ok, Config} = arweave_config:get_env(), LimitWithBursts = Config#config.'http_api.limiter.general.sliding_window_limit' + Config#config.'http_api.limiter.general.leaky_limit', {RequestFun, ErrorResponse} = get_fun_msg_pair(send_tx_binary), NErrors = 11, NRequests = LimitWithBursts + NErrors, node_blacklisting_test_frame( RequestFun, ErrorResponse, NRequests, NErrors ). %% @doc Given a label, return a fun and a message. -spec get_fun_msg_pair(atom()) -> {fun(), any()}. get_fun_msg_pair(get_info) -> { fun(_) -> ar_http_iface_client:get_info(ar_test_node:peer_ip(main)) end , info_unavailable}; get_fun_msg_pair(send_tx_binary) -> { fun(Index) -> InvalidTX = (ar_tx:new())#tx{ owner = <<"key">>, signature = <<"invalid">> }, send_tx_binary(Index, InvalidTX) end , too_many_requests}. send_tx_binary(Index, InvalidTX) -> case ar_http_iface_client:send_tx_binary(ar_test_node:peer_ip(main), InvalidTX#tx.id, ar_serialize:tx_to_binary(InvalidTX)) of {ok, {{<<"429">>, <<"Too Many Requests">>}, _, <<"Too Many Requests">>, _, _}} -> too_many_requests; {ok, _} -> ok; {error, Error} -> ?debugFmt("Unexpected response on call ~p: ~p. Trying again...~n", [Index, Error]), send_tx_binary(Index, InvalidTX) end. %% @doc Frame to test spamming an endpoint. -spec node_blacklisting_test_frame(fun(), any(), non_neg_integer(), non_neg_integer()) -> ok. node_blacklisting_test_frame(RequestFun, ErrorResponse, NRequests, ExpectedErrors) -> ar_blacklist_middleware:reset(), arweave_limiter_sup:reset_all(), ar_rate_limiter:off(), Responses = ar_util:batch_pmap( RequestFun, lists:seq(1, NRequests), 50, 60_000 ), ?assertEqual(length(Responses), NRequests), ar_blacklist_middleware:reset(), arweave_limiter_sup:reset_all(), Got = count_by_response_type(ErrorResponse, Responses), %% Other test nodes may occasionally make some requests in the background disturbing the stats. Tolerance = 5, ?debugFmt("Requests sent: ~p, ExpectedErrors: ~p, Tolerance: ~p, Got: ~p~n", [NRequests, ExpectedErrors, Tolerance, maps:get(error_responses, Got, 0)]), ?assert(maps:get(error_responses, Got, 0) =< ExpectedErrors + Tolerance), ?assert(maps:get(error_responses, Got, 0) >= ExpectedErrors - Tolerance), ?assertEqual(NRequests - maps:get(error_responses, Got, 0), maps:get(ok_responses, Got, 0)), ar_rate_limiter:on(). %% @doc Count the number of successful and error responses. count_by_response_type(ErrorResponse, Responses) -> count_by( fun (Response) when Response == ErrorResponse -> error_responses; (_) -> ok_responses end, Responses ). %% @doc Count the occurances in the list based on the predicate. count_by(Pred, List) -> maps:map(fun (_, Value) -> length(Value) end, group(Pred, List)). %% @doc Group the list based on the key generated by Grouper. group(Grouper, Values) -> group(Grouper, Values, maps:new()). group(_, [], Acc) -> Acc; group(Grouper, [Item | List], Acc) -> Key = Grouper(Item), Updater = fun (Old) -> [Item | Old] end, NewAcc = maps:update_with(Key, Updater, [Item], Acc), group(Grouper, List, NewAcc). %% @doc Check that balances can be retreived over the network. test_get_balance({B0, _, _, {_, Pub1}}) -> LocalHeight = ar_node:get_height(), Addr = binary_to_list(ar_util:encode(ar_wallet:to_address(Pub1))), {ok, {{<<"200">>, _}, _, Body, _, _}} = ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), path => "/wallet/" ++ Addr ++ "/balance" }), ?assertEqual(?AR(10), binary_to_integer(Body)), RootHash = binary_to_list(ar_util:encode(B0#block.wallet_list)), {ok, {{<<"200">>, _}, _, Body, _, _}} = ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), path => "/wallet_list/" ++ RootHash ++ "/" ++ Addr ++ "/balance" }), ar_test_node:mine(), wait_until_height(main, LocalHeight + 1), {ok, {{<<"200">>, _}, _, Body, _, _}} = ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), path => "/wallet_list/" ++ RootHash ++ "/" ++ Addr ++ "/balance" }). test_get_wallet_list_in_chunks({B0, {_, Pub1}, {_, Pub2}, {_, StaticPub}}) -> Addr1 = ar_wallet:to_address(Pub1), Addr2 = ar_wallet:to_address(Pub2), StaticAddr = ar_wallet:to_address(StaticPub), NonExistentRootHash = binary_to_list(ar_util:encode(crypto:strong_rand_bytes(32))), {ok, {{<<"404">>, _}, _, <<"Root hash not found.">>, _, _}} = ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), path => "/wallet_list/" ++ NonExistentRootHash }), [TX] = B0#block.txs, GenesisAddr = ar_wallet:to_address(TX#tx.owner, {?RSA_SIGN_ALG, 65537}), TXID = TX#tx.id, ExpectedWallets = lists:sort([ {Addr1, {?AR(10000), <<>>}}, {Addr2, {?AR(10000), <<>>}}, {StaticAddr, {?AR(10), <<"TEST_ID">>}}, {GenesisAddr, {0, TXID}}]), {ExpectedWallets1, ExpectedWallets2} = lists:split(2, ExpectedWallets), RootHash = binary_to_list(ar_util:encode(B0#block.wallet_list)), {ok, {{<<"200">>, _}, _, Body1, _, _}} = ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), path => "/wallet_list/" ++ RootHash }), Cursor = maps:get(next_cursor, binary_to_term(Body1)), ?assertEqual(#{ next_cursor => Cursor, wallets => lists:reverse(ExpectedWallets1) }, binary_to_term(Body1)), {ok, {{<<"200">>, _}, _, Body2, _, _}} = ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), path => "/wallet_list/" ++ RootHash ++ "/" ++ ar_util:encode(Cursor) }), ?assertEqual(#{ next_cursor => last, wallets => lists:reverse(ExpectedWallets2) }, binary_to_term(Body2)). %% @doc Test that heights are returned correctly. test_get_height(_) -> 0 = ar_http_iface_client:get_height(ar_test_node:peer_ip(main)), ar_test_node:mine(), wait_until_height(main, 1), 1 = ar_http_iface_client:get_height(ar_test_node:peer_ip(main)). %% @doc Test that last tx associated with a wallet can be fetched. test_get_last_tx_single({_, _, _, {_, StaticPub}}) -> Addr = binary_to_list(ar_util:encode(ar_wallet:to_address(StaticPub))), {ok, {{<<"200">>, _}, _, Body, _, _}} = ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), path => "/wallet/" ++ Addr ++ "/last_tx" }), ?assertEqual(<<"TEST_ID">>, ar_util:decode(Body)). %% @doc Ensure that blocks can be received via a hash. test_get_block_by_hash({B0, _, _, _}) -> {_Peer, B1, _Time, _Size} = ar_http_iface_client:get_block_shadow(B0#block.indep_hash, ar_test_node:peer_ip(main), binary, #{}), TXIDs = [TX#tx.id || TX <- B0#block.txs], ?assertEqual(B0#block{ size_tagged_txs = unset, account_tree = undefined, txs = TXIDs, reward_history = [], block_time_history = [] }, B1). %% @doc Ensure that blocks can be received via a height. test_get_block_by_height({B0, _, _, _}) -> {_Peer, B1, _Time, _Size} = ar_http_iface_client:get_block_shadow(0, ar_test_node:peer_ip(main), binary, #{}), TXIDs = [TX#tx.id || TX <- B0#block.txs], ?assertEqual(B0#block{ size_tagged_txs = unset, account_tree = undefined, txs = TXIDs, reward_history = [], block_time_history = [] }, B1). test_get_current_block({B0, _, _, _}) -> Peer = ar_test_node:peer_ip(main), {ok, BI} = ar_http_iface_client:get_block_index(Peer, 0, 100), {_Peer, B1, _Time, _Size} = ar_http_iface_client:get_block_shadow(hd(BI), Peer, binary, #{}), TXIDs = [TX#tx.id || TX <- B0#block.txs], ?assertEqual(B0#block{ size_tagged_txs = unset, txs = TXIDs, reward_history = [], block_time_history = [], account_tree = undefined }, B1), {ok, {{<<"200">>, _}, _, Body, _, _}} = ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), path => "/block/current" }), {JSONStruct} = jiffy:decode(Body), ?assertEqual(ar_util:encode(B0#block.indep_hash), proplists:get_value(<<"indep_hash">>, JSONStruct)). %% @doc Test that the various different methods of GETing a block all perform %% correctly if the block cannot be found. test_get_non_existent_block(_) -> {ok, {{<<"404">>, _}, _, _, _, _}} = ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), path => "/block/height/100" }), {ok, {{<<"404">>, _}, _, _, _, _}} = ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), path => "/block2/height/100" }), {ok, {{<<"404">>, _}, _, _, _, _}} = ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), path => "/block/hash/abcd" }), {ok, {{<<"404">>, _}, _, _, _, _}} = ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), path => "/block2/hash/abcd" }), {ok, {{<<"404">>, _}, _, _, _, _}} = ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), path => "/block/height/101/wallet_list" }), {ok, {{<<"404">>, _}, _, _, _, _}} = ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), path => "/block/hash/abcd/wallet_list" }), {ok, {{<<"404">>, _}, _, _, _, _}} = ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), path => "/block/height/101/hash_list" }), {ok, {{<<"404">>, _}, _, _, _, _}} = ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), path => "/block/hash/abcd/hash_list" }). %% @doc A test for retrieving format=2 transactions from HTTP API. test_get_format_2_tx(_) -> LocalHeight = ar_node:get_height(), DataRoot = (ar_tx:generate_chunk_tree(#tx{ data = <<"DATA">> }))#tx.data_root, ValidTX = #tx{ id = TXID } = (ar_tx:new(<<"DATA">>))#tx{ format = 2, data_root = DataRoot }, InvalidDataRootTX = #tx{ id = InvalidTXID } = (ar_tx:new(<<"DATA">>))#tx{ format = 2 }, EmptyTX = #tx{ id = EmptyTXID } = (ar_tx:new())#tx{ format = 2 }, EncodedTXID = binary_to_list(ar_util:encode(TXID)), EncodedInvalidTXID = binary_to_list(ar_util:encode(InvalidTXID)), EncodedEmptyTXID = binary_to_list(ar_util:encode(EmptyTXID)), ar_http_iface_client:send_tx_json(ar_test_node:peer_ip(main), ValidTX#tx.id, ar_serialize:jsonify(ar_serialize:tx_to_json_struct(ValidTX))), {ok, {{<<"400">>, _}, _, <<"The attached data is split in an unknown way.">>, _, _}} = ar_http:req(#{ method => post, peer => ar_test_node:peer_ip(main), path => "/tx", body => ar_serialize:jsonify(ar_serialize:tx_to_json_struct(InvalidDataRootTX)) }), ar_http_iface_client:send_tx_binary(ar_test_node:peer_ip(main), InvalidDataRootTX#tx.id, ar_serialize:tx_to_binary(InvalidDataRootTX#tx{ data = <<>> })), ar_http_iface_client:send_tx_binary(ar_test_node:peer_ip(main), EmptyTX#tx.id, ar_serialize:tx_to_binary(EmptyTX)), wait_until_receives_txs([ValidTX, EmptyTX, InvalidDataRootTX]), ar_test_node:mine(), wait_until_height(main, LocalHeight + 1), %% Ensure format=2 transactions can be retrieved over the HTTP %% interface with no populated data, while retaining info on all other fields. {ok, {{<<"200">>, _}, _, Body, _, _}} = ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), path => "/tx/" ++ EncodedTXID }), ?assertEqual(ValidTX#tx{ data = <<>>, data_size = 4 }, (ar_serialize:json_struct_to_tx(Body))#tx{ owner_address = not_set }), %% Ensure data can be fetched for format=2 transactions via /tx/[ID]/data. {ok, Data} = wait_until_syncs_tx_data(TXID), ?assertEqual(ar_util:encode(<<"DATA">>), Data), {ok, {{<<"404">>, _}, _, _, _, _}} = ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), path => "/tx/" ++ EncodedInvalidTXID ++ "/data" }), %% Ensure /tx/[ID]/data works for format=2 transactions when the data is empty. {ok, {{<<"200">>, _}, _, <<>>, _, _}} = ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), path => "/tx/" ++ EncodedEmptyTXID ++ "/data" }), %% Ensure data can be fetched for format=2 transactions via /tx/[ID]/data.html. {ok, {{<<"200">>, _}, Headers, HTMLData, _, _}} = ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), path => "/tx/" ++ EncodedTXID ++ "/data.html" }), ?assertEqual(<<"DATA">>, HTMLData), ?assertEqual( [{<<"content-type">>, <<"text/html">>}], proplists:lookup_all(<<"content-type">>, Headers) ). test_get_format_1_tx(_) -> LocalHeight = ar_node:get_height(), TX = #tx{ id = TXID } = ar_tx:new(<<"DATA">>), EncodedTXID = binary_to_list(ar_util:encode(TXID)), ar_http_iface_client:send_tx_binary(ar_test_node:peer_ip(main), TX#tx.id, ar_serialize:tx_to_binary(TX)), wait_until_receives_txs([TX]), ar_test_node:mine(), wait_until_height(main, LocalHeight + 1), {ok, Body} = ar_util:do_until( fun() -> case ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), path => "/tx/" ++ EncodedTXID }) of {ok, {{<<"404">>, _}, _, _, _, _}} -> false; {ok, {{<<"200">>, _}, _, Payload, _, _}} -> {ok, Payload} end end, 100, 2000 ), ?assertEqual(TX, (ar_serialize:json_struct_to_tx(Body))#tx{ owner_address = not_set }). %% @doc Test adding transactions to a block. test_add_external_tx_with_tags(_) -> LocalHeight = ar_node:get_height(), TX = ar_tx:new(<<"DATA">>), TaggedTX = TX#tx { tags = [ {<<"TEST_TAG1">>, <<"TEST_VAL1">>}, {<<"TEST_TAG2">>, <<"TEST_VAL2">>} ] }, ar_http_iface_client:send_tx_json(ar_test_node:peer_ip(main), TaggedTX#tx.id, ar_serialize:jsonify(ar_serialize:tx_to_json_struct(TaggedTX))), wait_until_receives_txs([TaggedTX]), ar_test_node:mine(), wait_until_height(main, LocalHeight + 1), [B1Hash | _] = ar_node:get_blocks(), B1 = read_block_when_stored(B1Hash, true), TXID = TaggedTX#tx.id, ?assertEqual([TXID], [TX2#tx.id || TX2 <- B1#block.txs]), ?assertEqual(TaggedTX, (ar_storage:read_tx(hd(B1#block.txs)))#tx{ owner_address = not_set }). %% @doc Test getting transactions test_find_external_tx(_) -> LocalHeight = ar_node:get_height(), TX = ar_tx:new(<<"DATA">>), ar_http_iface_client:send_tx_binary(ar_test_node:peer_ip(main), TX#tx.id, ar_serialize:tx_to_binary(TX)), wait_until_receives_txs([TX]), ar_test_node:mine(), wait_until_height(main, LocalHeight + 1), {ok, FoundTXID} = ar_util:do_until( fun() -> case ar_http_iface_client:get_tx(ar_test_node:peer_ip(main), TX#tx.id) of not_found -> false; TX2 -> case TX2#tx.id == TX#tx.id of true -> {ok, TX#tx.id}; false -> false end end end, 100, 5000 ), ?assertEqual(FoundTXID, TX#tx.id). %% @doc Post a tx to the network and ensure that last_tx call returns the ID of last tx. test_add_tx_and_get_last({_B0, Wallet1, Wallet2, _StaticWallet}) -> LocalHeight = ar_node:get_height(), ar_test_node:disconnect_from(peer1), {_Priv1, Pub1} = Wallet1, {_Priv2, Pub2} = Wallet2, SignedTX = ar_test_node:sign_tx(Wallet1, #{ target => ar_wallet:to_address(Pub2), quantity => ?AR(2), reward => ?AR(1)}), ID = SignedTX#tx.id, ar_http_iface_client:send_tx_binary(ar_test_node:peer_ip(main), SignedTX#tx.id, ar_serialize:tx_to_binary(SignedTX)), wait_until_receives_txs([SignedTX]), ar_test_node:mine(), wait_until_height(main, LocalHeight + 1), {ok, {{<<"200">>, _}, _, Body, _, _}} = ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), path => "/wallet/" ++ binary_to_list(ar_util:encode(ar_wallet:to_address(Pub1))) ++ "/last_tx" }), ?assertEqual(ID, ar_util:decode(Body)). %% @doc Post a tx to the network and ensure that its subfields can be gathered test_get_subfields_of_tx(_) -> LocalHeight = ar_node:get_height(), TX = ar_tx:new(<<"DATA">>), ar_http_iface_client:send_tx_binary(ar_test_node:peer_ip(main), TX#tx.id, ar_serialize:tx_to_binary(TX)), wait_until_receives_txs([TX]), ar_test_node:mine(), wait_until_height(main, LocalHeight + 1), {ok, Body} = wait_until_syncs_tx_data(TX#tx.id), Orig = TX#tx.data, ?assertEqual(Orig, ar_util:decode(Body)). %% @doc Correctly check the status of pending is returned for a pending transaction test_get_pending_tx(_) -> TX = ar_tx:new(<<"DATA1">>), ar_http_iface_client:send_tx_json(ar_test_node:peer_ip(main), TX#tx.id, ar_serialize:jsonify(ar_serialize:tx_to_json_struct(TX))), wait_until_receives_txs([TX]), {ok, {{<<"202">>, _}, _, Body, _, _}} = ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), path => "/tx/" ++ binary_to_list(ar_util:encode(TX#tx.id)) }), ?assertEqual(<<"Pending">>, Body). %% @doc Mine a transaction into a block and retrieve it's binary body via HTTP. test_get_tx_body(_) -> ar_test_node:disconnect_from(peer1), LocalHeight = ar_node:get_height(), TX = ar_tx:new(<<"TEST DATA">>), ar_test_node:assert_post_tx_to_peer(main, TX), ar_test_node:mine(), wait_until_height(main, LocalHeight + 1), {ok, Data} = wait_until_syncs_tx_data(TX#tx.id), ?assertEqual(<<"TEST DATA">>, ar_util:decode(Data)). test_get_tx_status(_) -> ar_test_node:connect_to_peer(peer1), Height = ar_node:get_height(), assert_wait_until_height(peer1, Height), ar_test_node:disconnect_from(peer1), TX = (ar_tx:new())#tx{ tags = [{<<"TestName">>, <<"TestVal">>}] }, ar_test_node:assert_post_tx_to_peer(main, TX), FetchStatus = fun() -> ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), path => "/tx/" ++ binary_to_list(ar_util:encode(TX#tx.id)) ++ "/status" }) end, ?assertMatch({ok, {{<<"202">>, _}, _, <<"Pending">>, _, _}}, FetchStatus()), ar_test_node:mine(), wait_until_height(main, Height + 1), ar_util:do_until( fun() -> case FetchStatus() of {ok, {{<<"200">>, _}, _, _, _, _}} -> true; _ -> false end end, 200, 5000 ), {ok, {{<<"200">>, _}, _, Body, _, _}} = FetchStatus(), {Res} = ar_serialize:dejsonify(Body), BI = ar_node:get_block_index(), ?assertEqual( #{ <<"block_height">> => length(BI) - 1, <<"block_indep_hash">> => ar_util:encode(element(1, hd(BI))), <<"number_of_confirmations">> => 1 }, maps:from_list(Res) ), ar_test_node:mine(), wait_until_height(main, Height + 2), ar_util:do_until( fun() -> {ok, {{<<"200">>, _}, _, Body2, _, _}} = FetchStatus(), {Res2} = ar_serialize:dejsonify(Body2), #{ <<"block_height">> => length(BI) - 1, <<"block_indep_hash">> => ar_util:encode(element(1, hd(BI))), <<"number_of_confirmations">> => 2 } == maps:from_list(Res2) end, 200, 5000 ), %% Create a fork which returns the TX to mempool. ar_test_node:mine(peer1), assert_wait_until_height(peer1, Height + 1), ar_test_node:mine(peer1), assert_wait_until_height(peer1, Height + 2), ar_test_node:connect_to_peer(peer1), ar_test_node:mine(peer1), wait_until_height(main, Height + 3), ?assertMatch({ok, {{<<"202">>, _}, _, _, _, _}}, FetchStatus()). test_post_unsigned_tx({_B0, Wallet1, _Wallet2, _StaticWallet}) -> LocalHeight = ar_node:get_height(), {_, Pub} = Wallet = Wallet1, %% Generate a wallet and receive a wallet access code. {ok, {{<<"421">>, _}, _, _, _, _}} = ar_http:req(#{ method => post, peer => ar_test_node:peer_ip(main), path => "/wallet" }), {ok, Config} = arweave_config:get_env(), try arweave_config:set_env(Config#config{ internal_api_secret = <<"correct_secret">> }), {ok, {{<<"421">>, _}, _, _, _, _}} = ar_http:req(#{ method => post, peer => ar_test_node:peer_ip(main), path => "/wallet", headers => [{<<"X-Internal-Api-Secret">>, <<"incorrect_secret">>}] }), {ok, {{<<"200">>, <<"OK">>}, _, CreateWalletBody, _, _}} = ar_http:req(#{ method => post, peer => ar_test_node:peer_ip(main), path => "/wallet", headers => [{<<"X-Internal-Api-Secret">>, <<"correct_secret">>}] }), arweave_config:set_env(Config#config{ internal_api_secret = not_set }), {CreateWalletRes} = ar_serialize:dejsonify(CreateWalletBody), [WalletAccessCode] = proplists:get_all_values(<<"wallet_access_code">>, CreateWalletRes), [Address] = proplists:get_all_values(<<"wallet_address">>, CreateWalletRes), %% Top up the new wallet. TopUpTX = ar_test_node:sign_tx(Wallet, #{ owner => Pub, target => ar_util:decode(Address), quantity => ?AR(100), reward => ?AR(1) }), {ok, {{<<"200">>, _}, _, _, _, _}} = ar_http:req(#{ method => post, peer => ar_test_node:peer_ip(main), path => "/tx", body => ar_serialize:jsonify(ar_serialize:tx_to_json_struct(TopUpTX)) }), wait_until_receives_txs([TopUpTX]), ar_test_node:mine(), wait_until_height(main, LocalHeight + 1), %% Send an unsigned transaction to be signed with the generated key. TX = (ar_tx:new())#tx{reward = ?AR(1), last_tx = TopUpTX#tx.id}, UnsignedTXProps = [ {<<"last_tx">>, <<>>}, {<<"target">>, TX#tx.target}, {<<"quantity">>, integer_to_binary(TX#tx.quantity)}, {<<"data">>, TX#tx.data}, {<<"reward">>, integer_to_binary(TX#tx.reward)}, {<<"denomination">>, integer_to_binary(TopUpTX#tx.denomination)}, {<<"wallet_access_code">>, WalletAccessCode} ], {ok, {{<<"421">>, _}, _, _, _, _}} = ar_http:req(#{ method => post, peer => ar_test_node:peer_ip(main), path => "/unsigned_tx", body => ar_serialize:jsonify({UnsignedTXProps}) }), arweave_config:set_env(Config#config{ internal_api_secret = <<"correct_secret">> }), {ok, {{<<"421">>, _}, _, _, _, _}} = ar_http:req(#{ method => post, peer => ar_test_node:peer_ip(main), path => "/unsigned_tx", headers => [{<<"X-Internal-Api-Secret">>, <<"incorrect_secret">>}], body => ar_serialize:jsonify({UnsignedTXProps}) }), {ok, {{<<"200">>, <<"OK">>}, _, Body, _, _}} = ar_http:req(#{ method => post, peer => ar_test_node:peer_ip(main), path => "/unsigned_tx", headers => [{<<"X-Internal-Api-Secret">>, <<"correct_secret">>}], body => ar_serialize:jsonify({UnsignedTXProps}) }), arweave_config:set_env(Config#config{ internal_api_secret = not_set }), {Res} = ar_serialize:dejsonify(Body), TXID = proplists:get_value(<<"id">>, Res), timer:sleep(200), ar_test_node:mine(), wait_until_height(main, LocalHeight + 2), timer:sleep(200), {ok, {{<<"200">>, <<"OK">>}, _, GetTXBody, _, _}} = ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), path => "/tx/" ++ binary_to_list(TXID) ++ "/status" }), {GetTXRes} = ar_serialize:dejsonify(GetTXBody), ?assertMatch( #{ <<"number_of_confirmations">> := 1 }, maps:from_list(GetTXRes) ) after ok = arweave_config:set_env(Config) end. %% @doc Ensure the HTTP client stops fetching data from an endpoint when its data size %% limit is exceeded. test_get_error_of_data_limit(_) -> LocalHeight = ar_node:get_height(), Limit = 1460, TX = ar_tx:new(<< <<0>> || _ <- lists:seq(1, Limit * 2) >>), ar_http_iface_client:send_tx_binary(ar_test_node:peer_ip(main), TX#tx.id, ar_serialize:tx_to_binary(TX)), wait_until_receives_txs([TX]), ar_test_node:mine(), wait_until_height(main, LocalHeight + 1), {ok, _} = wait_until_syncs_tx_data(TX#tx.id), Resp = ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), path => "/tx/" ++ binary_to_list(ar_util:encode(TX#tx.id)) ++ "/data", limit => Limit }), ?assertEqual({error, too_much_data}, Resp). test_send_missing_tx_with_the_block({_B0, Wallet1, _Wallet2, _StaticWallet}) -> ar_test_node:disconnect_from(peer1), LocalHeight = ar_node:get_height(), RemoteHeight = height(peer1), TXs = [ar_test_node:sign_tx(Wallet1, #{ last_tx => ar_test_node:get_tx_anchor(peer1) }) || _ <- lists:seq(1, 10)], lists:foreach(fun(TX) -> ar_test_node:assert_post_tx_to_peer(main, TX) end, TXs), EverySecondTX = element(2, lists:foldl(fun(TX, {N, Acc}) when N rem 2 /= 0 -> {N + 1, [TX | Acc]}; (_TX, {N, Acc}) -> {N + 1, Acc} end, {0, []}, TXs)), lists:foreach(fun(TX) -> ar_test_node:assert_post_tx_to_peer(peer1, TX) end, EverySecondTX), ar_test_node:mine(), BI = wait_until_height(main, LocalHeight + 1), B = ar_storage:read_block(hd(BI)), B2 = B#block{ txs = ar_storage:read_tx(B#block.txs) }, ar_test_node:connect_to_peer(peer1), ar_bridge ! {event, block, {new, B2, #{ recall_byte => undefined }}}, assert_wait_until_height(peer1, RemoteHeight + 1). test_fallback_to_block_endpoint_if_cannot_send_tx({_B0, Wallet1, _Wallet2, _StaticWallet}) -> ar_test_node:disconnect_from(peer1), LocalHeight = ar_node:get_height(), RemoteHeight = height(peer1), TXs = [ar_test_node:sign_tx(Wallet1, #{ last_tx => ar_test_node:get_tx_anchor(peer1) }) || _ <- lists:seq(1, 10)], lists:foreach(fun(TX) -> ar_test_node:assert_post_tx_to_peer(main, TX) end, TXs), EverySecondTX = element(2, lists:foldl(fun(TX, {N, Acc}) when N rem 2 /= 0 -> {N + 1, [TX | Acc]}; (_TX, {N, Acc}) -> {N + 1, Acc} end, {0, []}, TXs)), lists:foreach(fun(TX) -> ar_test_node:assert_post_tx_to_peer(peer1, TX) end, EverySecondTX), ar_test_node:mine(), BI = wait_until_height(main, LocalHeight + 1), B = ar_storage:read_block(hd(BI)), ar_test_node:connect_to_peer(peer1), ar_bridge ! {event, block, {new, B, #{ recall_byte => undefined }}}, assert_wait_until_height(peer1, RemoteHeight + 1). test_get_recent_hash_list_diff({_B0, Wallet1, _Wallet2, _StaticWallet}) -> LocalHeight = ar_node:get_height(), BTip = ar_node:get_current_block(), ar_test_node:disconnect_from(peer1), {ok, {{<<"404">>, _}, _, <<>>, _, _}} = ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), path => "/recent_hash_list_diff", headers => [], body => <<>> }), {ok, {{<<"400">>, _}, _, <<>>, _, _}} = ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), path => "/recent_hash_list_diff", headers => [], body => crypto:strong_rand_bytes(47) }), {ok, {{<<"404">>, _}, _, <<>>, _, _}} = ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), path => "/recent_hash_list_diff", headers => [], body => crypto:strong_rand_bytes(48) }), B0H = BTip#block.indep_hash, {ok, {{<<"200">>, _}, _, B0H, _, _}} = ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), path => "/recent_hash_list_diff", headers => [], body => B0H }), ar_test_node:mine(), BI1 = wait_until_height(main, LocalHeight + 1), {B1H, _, _} = hd(BI1), {ok, {{<<"200">>, _}, _, << B0H:48/binary, B1H:48/binary, 0:16 >> , _, _}} = ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), path => "/recent_hash_list_diff", headers => [], body => B0H }), TXs = [ar_test_node:sign_tx(main, Wallet1, #{ last_tx => ar_test_node:get_tx_anchor(peer1) }) || _ <- lists:seq(1, 3)], lists:foreach(fun(TX) -> ar_test_node:assert_post_tx_to_peer(main, TX) end, TXs), ar_test_node:mine(), BI2 = wait_until_height(main, LocalHeight + 2), {B2H, _, _} = hd(BI2), [TXID1, TXID2, TXID3] = [TX#tx.id || TX <- (ar_node:get_current_block())#block.txs], {ok, {{<<"200">>, _}, _, << B0H:48/binary, B1H:48/binary, 0:16, B2H:48/binary, 3:16, TXID1:32/binary, TXID2:32/binary, TXID3/binary >> , _, _}} = ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), path => "/recent_hash_list_diff", headers => [], body => B0H }), {ok, {{<<"200">>, _}, _, << B0H:48/binary, B1H:48/binary, 0:16, B2H:48/binary, 3:16, TXID1:32/binary, TXID2:32/binary, TXID3/binary >> , _, _}} = ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), path => "/recent_hash_list_diff", headers => [], body => << B0H/binary, (crypto:strong_rand_bytes(48))/binary >>}), {ok, {{<<"200">>, _}, _, << B1H:48/binary, B2H:48/binary, 3:16, TXID1:32/binary, TXID2:32/binary, TXID3/binary >> , _, _}} = ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), path => "/recent_hash_list_diff", headers => [], body => << B0H/binary, B1H/binary, (crypto:strong_rand_bytes(48))/binary >>}). test_get_total_supply(_Args) -> BlockDenomination = (ar_node:get_current_block())#block.denomination, TotalSupply = ar_patricia_tree:foldr( fun (_, {B, _}, Acc) -> Acc + ar_pricing:redenominate(B, 1, BlockDenomination); (_, {B, _, Denomination, _}, Acc) -> Acc + ar_pricing:redenominate(B, Denomination, BlockDenomination) end, 0, ar_diff_dag:get_sink(sys:get_state(ar_wallets)) ), TotalSupplyBin = integer_to_binary(TotalSupply), ?assertMatch({ok, {{<<"200">>, _}, _, TotalSupplyBin, _, _}}, ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), path => "/total_supply" })). wait_until_syncs_tx_data(TXID) -> ar_util:do_until( fun() -> case ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), path => "/tx/" ++ binary_to_list(ar_util:encode(TXID)) ++ "/data" }) of {ok, {{<<"404">>, _}, _, _, _, _}} -> false; {ok, {{<<"200">>, _}, _, <<>>, _, _}} -> false; {ok, {{<<"200">>, _}, _, Payload, _, _}} -> {ok, Payload} end end, 100, 10000 ). height(Node) -> ar_test_node:remote_call(Node, ar_node, get_height, []). ================================================ FILE: apps/arweave/test/ar_http_util_tests.erl ================================================ -module(ar_http_util_tests). -include_lib("eunit/include/eunit.hrl"). -include_lib("arweave/include/ar.hrl"). get_tx_content_type_test() -> ?assertEqual( none, content_type_from_tags([]) ), ?assertEqual( {valid, <<"text/plain">>}, content_type_from_tags([ {<<"Content-Type">>, <<"text/plain">>} ]) ), ?assertEqual( {valid, <<"text/html; charset=utf-8">>}, content_type_from_tags([ {<<"Content-Type">>, <<"text/html; charset=utf-8">>} ]) ), ?assertEqual( {valid, <<"application/x.arweave-manifest+json">>}, content_type_from_tags([ {<<"Content-Type">>, <<"application/x.arweave-manifest+json">>} ]) ), ?assertEqual( invalid, content_type_from_tags([ {<<"Content-Type">>, <<"application/javascript\r\nSet-Cookie: foo=bar">>} ]) ). content_type_from_tags(Tags) -> ar_http_util:get_tx_content_type(#tx { tags = Tags }). ================================================ FILE: apps/arweave/test/ar_info_tests.erl ================================================ -module(ar_info_tests). -include_lib("eunit/include/eunit.hrl"). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave/include/ar_chain_stats.hrl"). recent_blocks_test_() -> [ {timeout, 300, fun test_recent_blocks_post/0}, {timeout, 300, fun test_recent_blocks_announcement/0} ]. recent_forks_test_() -> [ {timeout, 300, fun test_get_recent_forks/0}, {timeout, 300, fun test_recent_forks/0} ]. %% ------------------------------------------------------------------------------------------- %% Recent blocks tests %% ------------------------------------------------------------------------------------------- test_recent_blocks_post() -> test_recent_blocks(post). test_recent_blocks_announcement() -> test_recent_blocks(announcement). test_recent_blocks(Type) -> [B0] = ar_weave:init(), ar_test_node:start_peer(peer1, B0), GenesisBlock = [#{ <<"id">> => ar_util:encode(B0#block.indep_hash), <<"received">> => <<"pending">>, <<"height">> => 0 }], ?assertEqual(GenesisBlock, get_recent(ar_test_node:peer_ip(peer1), blocks)), TargetHeight = ?CHECKPOINT_DEPTH+2, PeerBI = lists:foldl( fun(Height, _Acc) -> ar_test_node:mine(peer1), ar_test_node:wait_until_height(peer1, Height) end, ok, lists:seq(1, TargetHeight) ), %% Peer1 recent has no timestamps since it hasn't received any of its own blocks %% gossipped back ?assertEqual(expected_blocks(peer1, PeerBI, true), get_recent(ar_test_node:peer_ip(peer1), blocks)), %% Share blocks to peer1 lists:foreach( fun({H, _WeaveSize, _TXRoot}) -> timer:sleep(1000), B = ar_test_node:remote_call(peer1, ar_block_cache, get, [block_cache, H]), case Type of post -> ar_test_node:send_new_block(ar_test_node:peer_ip(peer1), B); announcement -> Announcement = #block_announcement{ indep_hash = H, previous_block = B#block.previous_block, recall_byte = B#block.recall_byte, recall_byte2 = B#block.recall_byte2, solution_hash = B#block.hash, tx_prefixes = [] }, ar_http_iface_client:send_block_announcement( ar_test_node:peer_ip(peer1), Announcement) end end, %% Reverse the list so that the peer receives the blocks in the same order they %% were mined. lists:reverse(lists:sublist(PeerBI, TargetHeight)) ), %% Peer1 recent should now have timestamps, but also black out the most recent %% ones. ?assertEqual(expected_blocks(peer1, PeerBI), get_recent(ar_test_node:peer_ip(peer1), blocks)). expected_blocks(Node, BI) -> expected_blocks(Node, BI, false). expected_blocks(Node, BI, ForcePending) -> %% There are a few list reversals that happen here: %% 1. BI has the blocks in reverse chronological order (latest block first) %% 2. [Element | Acc] reverses the list into chronological order (latest block last) %% 3. The final lists:reverse puts the list back into reverse chronological order %% (latest block first) Blocks = lists:foldl( fun({H, _WeaveSize, _TXRoot}, Acc) -> B = ar_test_node:remote_call(Node, ar_block_cache, get, [block_cache, H]), Timestamp = case ForcePending of true -> <<"pending">>; false -> case length(Acc) < ?RECENT_BLOCKS_WITHOUT_TIMESTAMP of true -> <<"pending">>; false -> ar_util:timestamp_to_seconds(B#block.receive_timestamp) end end, [#{ <<"id">> => ar_util:encode(H), <<"received">> => Timestamp, <<"height">> => B#block.height } | Acc] end, [], lists:sublist(BI, ?CHECKPOINT_DEPTH) ), lists:reverse(Blocks). %% ------------------------------------------------------------------------------------------- %% Recent forks tests %% ------------------------------------------------------------------------------------------- test_get_recent_forks() -> [B0] = ar_weave:init(), ar_test_node:start(B0), ForkRootB1 = #block{ indep_hash = <<"1">>, height = 1 }, ForkRootB2= #block{ indep_hash = <<"2">>, height = 2 }, ForkRootB3= #block{ indep_hash = <<"3">>, height = 3 }, Orphans1 = [<<"a">>], timer:sleep(5), ar_chain_stats:log_fork(Orphans1, ForkRootB1), ExpectedFork1 = #fork{ id = crypto:hash(sha256, list_to_binary(Orphans1)), height = 2, block_ids = Orphans1 }, assert_forks_json_equal([ExpectedFork1]), Orphans2 = [<<"b">>, <<"c">>], timer:sleep(5), ar_chain_stats:log_fork(Orphans2, ForkRootB1), ExpectedFork2 = #fork{ id = crypto:hash(sha256, list_to_binary(Orphans2)), height = 2, block_ids = Orphans2 }, assert_forks_json_equal([ExpectedFork2, ExpectedFork1]), Orphans3 = [<<"b">>, <<"c">>, <<"d">>], timer:sleep(5), ar_chain_stats:log_fork(Orphans3, ForkRootB1), ExpectedFork3 = #fork{ id = crypto:hash(sha256, list_to_binary(Orphans3)), height = 2, block_ids = Orphans3 }, assert_forks_json_equal([ExpectedFork3, ExpectedFork2, ExpectedFork1]), Orphans4 = [<<"e">>, <<"f">>, <<"g">>], timer:sleep(5), ar_chain_stats:log_fork(Orphans4, ForkRootB2), ExpectedFork4 = #fork{ id = crypto:hash(sha256, list_to_binary(Orphans4)), height = 3, block_ids = Orphans4 }, assert_forks_json_equal([ExpectedFork4, ExpectedFork3, ExpectedFork2, ExpectedFork1]), %% Same fork seen again - not sure this is possible, but since we're just tracking %% forks based on when they occur, it should be handled. timer:sleep(5), ar_chain_stats:log_fork(Orphans3, ForkRootB1), assert_forks_json_equal( [ExpectedFork3, ExpectedFork4, ExpectedFork3, ExpectedFork2, ExpectedFork1]), %% If the fork is empty, ignore it. timer:sleep(5), ar_chain_stats:log_fork([], ForkRootB2), assert_forks_json_equal( [ExpectedFork3, ExpectedFork4, ExpectedFork3, ExpectedFork2, ExpectedFork1]), %% Confirm that limiting the number of forks returned is handled correctly (e.g. %% the oldest fork is not returned) Orphans5 = [<<"h">>, <<"i">>, <<"j">>], timer:sleep(5), ar_chain_stats:log_fork(Orphans5, ForkRootB3), ExpectedFork5 = #fork{ id = crypto:hash(sha256, list_to_binary(Orphans5)), height = 4, block_ids = Orphans5 }, assert_forks_json_equal( [ExpectedFork5, ExpectedFork3, ExpectedFork4, ExpectedFork3, ExpectedFork2]), ok. test_recent_forks() -> [B0] = ar_weave:init(), ar_test_node:start(B0), ar_test_node:start_peer(peer1, B0), ar_test_node:start_peer(peer2, B0), ar_test_node:connect_to_peer(peer1), ar_test_node:connect_to_peer(peer2), ar_test_node:connect_peers(peer1, peer2), %% Mine a few blocks, shared by both peers ar_test_node:mine(peer1), ar_test_node:wait_until_height(peer1, 1), ar_test_node:wait_until_height(peer2, 1), ar_test_node:mine(peer2), ar_test_node:wait_until_height(peer1, 2), ar_test_node:wait_until_height(peer2, 2), ar_test_node:mine(peer1), ar_test_node:wait_until_height(peer1, 3), ar_test_node:wait_until_height(peer2, 3), %% Disconnect peers, and have peer1 mine 1 block, and peer2 mine 3 ar_test_node:disconnect_from(peer1), ar_test_node:disconnect_from(peer2), ar_test_node:disconnect_peers(peer1, peer2), ar_test_node:mine(peer1), BI1 = ar_test_node:wait_until_height(peer1, 4), Orphans1 = [ID || {ID, _, _} <- lists:sublist(BI1, 1)], Fork1 = #fork{ id = crypto:hash(sha256, list_to_binary(Orphans1)), height = 4, block_ids = Orphans1 }, ar_test_node:mine(peer2), ar_test_node:wait_until_height(peer2, 4), ar_test_node:mine(peer2), ar_test_node:wait_until_height(peer2, 5), ar_test_node:mine(peer2), ar_test_node:wait_until_height(peer2, 6), %% Reconnect the peers. This will orphan peer1's block ar_test_node:connect_to_peer(peer2), ar_test_node:wait_until_height(main, 6), ar_test_node:connect_to_peer(peer1), ar_test_node:wait_until_height(peer1, 6), ar_test_node:connect_peers(peer1, peer2), ar_test_node:wait_until_height(peer2, 6), %% Disconnect peers, and have peer1 mine 2 block2, and peer2 mine 3 ar_test_node:disconnect_from(peer1), ar_test_node:disconnect_from(peer2), ar_test_node:disconnect_peers(peer1, peer2), ar_test_node:mine(peer1), ar_test_node:wait_until_height(peer1, 7), ar_test_node:mine(peer1), BI2 = ar_test_node:wait_until_height(peer1, 8), Orphans2 = [ID || {ID, _, _} <- lists:reverse(lists:sublist(BI2, 2))], Fork2 = #fork{ id = crypto:hash(sha256, list_to_binary(Orphans2)), height = 7, block_ids = Orphans2 }, ar_test_node:mine(peer2), ar_test_node:wait_until_height(peer2, 7), ar_test_node:mine(peer2), ar_test_node:wait_until_height(peer2, 8), ar_test_node:mine(peer2), ar_test_node:wait_until_height(peer2, 9), %% Reconnect the peers. This will create a second fork as peer1's blocks are orphaned ar_test_node:connect_to_peer(peer2), ar_test_node:wait_until_height(main, 9), ar_test_node:connect_to_peer(peer1), ar_test_node:wait_until_height(peer1, 9), ar_test_node:connect_peers(peer1, peer2), ar_test_node:wait_until_height(peer2, 9), ar_test_node:disconnect_from(peer1), ar_test_node:disconnect_from(peer2), ar_test_node:disconnect_peers(peer1, peer2), assert_forks_json_equal([Fork2, Fork1], get_recent(ar_test_node:peer_ip(peer1), forks)), ok. assert_forks_json_equal(ExpectedForks) -> assert_forks_json_equal(ExpectedForks, get_recent(ar_test_node:peer_ip(main), forks)). assert_forks_json_equal(ExpectedForks, ActualForks) -> ExpectedForksStripped = [ #{ <<"id">> => ar_util:encode(Fork#fork.id), <<"height">> => Fork#fork.height, <<"blocks">> => [ ar_util:encode(BlockID) || BlockID <- Fork#fork.block_ids ] } || Fork <- ExpectedForks], ActualForksStripped = [ maps:remove(<<"timestamp">>, Fork) || Fork <- ActualForks ], ?assertEqual(ExpectedForksStripped, ActualForksStripped). get_recent(Peer, Type) -> case get_recent(Peer) of info_unavailable -> info_unavailable; Info -> maps:get(atom_to_binary(Type), Info) end. get_recent(Peer) -> case ar_http:req(#{ method => get, peer => Peer, path => "/recent", connect_timeout => 1000, timeout => 2 * 1000 }) of {ok, {{<<"200">>, _}, _, JSON, _, _}} -> case ar_serialize:json_decode(JSON, [return_maps]) of {ok, JsonMap} -> JsonMap; {error, _} -> info_unavailable end; _ -> info_unavailable end. ================================================ FILE: apps/arweave/test/ar_mempool_tests.erl ================================================ -module(ar_mempool_tests). -include_lib("arweave/include/ar.hrl"). -include_lib("eunit/include/eunit.hrl"). start_node() -> %% Starting a node is slow so we'll run it once for the whole test module Key = ar_wallet:new(), OtherKey = ar_wallet:new(), LastTXID = crypto:strong_rand_bytes(32), [B0] = ar_weave:init([ wallet(Key, 1000, LastTXID), wallet(OtherKey, 800, crypto:strong_rand_bytes(32)) ]), ar_test_node:start(B0), ar_test_node:start_peer(peer1, B0), ar_test_node:connect_to_peer(peer1), ets:insert(node_state, [{wallet_list, B0#block.wallet_list}]), {Key, LastTXID, OtherKey, B0}. reset_node_state() -> ar_mempool:reset(), ets:delete_all_objects(ar_tx_emitter_recently_emitted), ets:match_delete(node_state, {{tx, '_'}, '_'}), ets:match_delete(node_state, {{tx_prefixes, '_'}, '_'}). add_tx_test_() -> Timeout = 30, {setup, fun start_node/0, fun (GenesisData) -> {foreach, fun reset_node_state/0, [ {timeout, Timeout, {with, GenesisData, [fun test_mempool_sorting/1]}}, {timeout, Timeout, {with, GenesisData, [fun test_drop_low_priority_txs_header/1]}}, {timeout, Timeout, {with, GenesisData, [fun test_drop_low_priority_txs_data/1]}}, {timeout, Timeout, {with, GenesisData, [fun test_drop_low_priority_txs_data_and_header/1]}}, {timeout, Timeout, {with, GenesisData, [fun test_clashing_last_tx/1]}}, {timeout, Timeout, {with, GenesisData, [fun test_overspent_tx/1]}}, {timeout, Timeout, {with, GenesisData, [fun test_mixed_deposit_spend_tx_old_address/1]}}, {timeout, Timeout, {with, GenesisData, [fun test_mixed_deposit_spend_tx_new_address/1]}}, {timeout, Timeout, {with, GenesisData, [fun test_clash_and_overspend_tx/1]}}, {timeout, Timeout, {with, GenesisData, [fun test_clash_and_low_priority_tx/1]}}, {timeout, Timeout, {with, GenesisData, [fun test_load_from_disk_denomination/1]}} ] } end }. %% @doc Test that mempool transactions are correctly sorted in priority order test_mempool_sorting({{_, {_, Owner}}, _LastTXID, _OtherKey, _B0}) -> %% Transactions are named with their expected, prioritized order. %% The sorting is assumed to consider, in order: %% 1. Reward (higher reward is higher priority) %% 2. Timestamp (lower timestamp is higher priority) %% 3. Format 1 transactions with a lot of data are deprioritized %% %% Only the above criteria are expected to impact sort order TX9 = tx(1, Owner, 15, crypto:strong_rand_bytes(200)), ar_mempool:add_tx(TX9, waiting), TX8 = tx(1, Owner, 20, crypto:strong_rand_bytes(200)), ar_mempool:add_tx(TX8, waiting), TX5 = tx(2, Owner, 1, <<>>), ar_mempool:add_tx(TX5, waiting), TX6 = tx(2, Owner, 1, <<"abc">>), ar_mempool:add_tx(TX6, waiting), TX7 = tx(1, Owner, 1, <<>>), ar_mempool:add_tx(TX7, waiting), TX1 = tx(1, Owner, 10, <<>>), ar_mempool:add_tx(TX1, waiting), TX2 = tx(1, Owner, 10, <<"abcdef">>), ar_mempool:add_tx(TX2, waiting), TX3 = tx(2, Owner, 10, <<>>), ar_mempool:add_tx(TX3, waiting), TX4 = tx(1, Owner, 10, <<>>), ar_mempool:add_tx(TX4, waiting), %% {HeaderSize, DataSize} %% HeaderSize: TX_SIZE_BASE per transaction plus the data size of all %% format 1 transactions. %% DataSize: the total data size of all format 2 transactions ExpectedMempoolSize = {(9* ?TX_SIZE_BASE) + 200 + 200 + 6, 3}, ExpectedTXIDs = [ TX1#tx.id, TX2#tx.id, TX3#tx.id, TX4#tx.id, TX5#tx.id, TX6#tx.id, TX7#tx.id, TX8#tx.id, TX9#tx.id ], assertMempoolTXIDs(ExpectedTXIDs, "Sorted mempool transactions"), assertMempoolSize(ExpectedMempoolSize). %% @doc Test dropping transactions when the mempool max header size is exceeded test_drop_low_priority_txs_header({{_, {_, Owner}}, _LastTXID, _OtherKey, _B0}) -> %% Add 9x Format 1 transactions each with a data size equal to %% 1/10 the MEMPOOL_HEADER_SIZE_LIMIT. This puts us close to exceeding the %% header size limit NumTransactions = 9, DataSize = ?MEMPOOL_HEADER_SIZE_LIMIT div (NumTransactions + 1), {ExpectedTXIDs, HighestReward, LowestReward} = add_transactions(NumTransactions, 1, Owner, DataSize), ExpectedMempoolSize = {NumTransactions * (?TX_SIZE_BASE + DataSize),0}, assertMempoolTXIDs(ExpectedTXIDs, "Mempool is below the header size limit"), assertMempoolSize(ExpectedMempoolSize), %% Add multiple low priority transactions to push us over the header size %% limit. All of these new transactions should be dropped ar_mempool:add_tx( tx(1, Owner, LowestReward-2, crypto:strong_rand_bytes(500)), waiting), ar_mempool:add_tx( tx(1, Owner, LowestReward-2, crypto:strong_rand_bytes(500)), waiting), ar_mempool:add_tx( tx(1, Owner, LowestReward-2, crypto:strong_rand_bytes(500)), waiting), ar_mempool:add_tx( tx(1, Owner, LowestReward-1, crypto:strong_rand_bytes(DataSize)), waiting), assertMempoolTXIDs( ExpectedTXIDs, "Multiple low priority TX pushed Mempool over the header size limit"), assertMempoolSize(ExpectedMempoolSize), %% Add a high priority transaction to push us over the header size limit. %% This newest transaction should *not* be dropped, instead a lower priority %% transaction should be dropped TXHigh = tx(1, Owner, HighestReward+1, crypto:strong_rand_bytes(DataSize)), ar_mempool:add_tx(TXHigh, waiting), ExpectedTXIDs2 = [TXHigh#tx.id | lists:delete(lists:last(ExpectedTXIDs), ExpectedTXIDs)], assertMempoolTXIDs( ExpectedTXIDs2, "High priority TX pushed Mempool over the header size limit"), assertMempoolSize(ExpectedMempoolSize). %% @doc Test dropping transactions when the mempool max data size is exceeded test_drop_low_priority_txs_data({{_, {_, Owner}}, _LastTXID, _OtherKey, _B0}) -> %% Add 9x Format 2 transactions each with a data size slightly larger than %% 1/10 the MEMPOOL_DATA_SIZE_LIMIT. This puts us close to exceeding the %% data size limit NumTransactions = 9, DataSize = (?MEMPOOL_DATA_SIZE_LIMIT div (NumTransactions+1)) + 1, {ExpectedTXIDs, HighestReward, LowestReward} = add_transactions(NumTransactions, 2, Owner, DataSize), ExpectedMempoolSize = {NumTransactions * ?TX_SIZE_BASE, NumTransactions * DataSize}, assertMempoolTXIDs(ExpectedTXIDs, "Mempool is below the data size limit"), assertMempoolSize(ExpectedMempoolSize), %% Add multiple low priority transactions to push us over the data size %% limit. All of these new transactions should be dropped ar_mempool:add_tx( tx(2, Owner, LowestReward-2, crypto:strong_rand_bytes(500)), waiting), ar_mempool:add_tx( tx(2, Owner, LowestReward-2, crypto:strong_rand_bytes(500)), waiting), ar_mempool:add_tx( tx(2, Owner, LowestReward-2, crypto:strong_rand_bytes(500)), waiting), ar_mempool:add_tx( tx(2, Owner, LowestReward-1, crypto:strong_rand_bytes(DataSize)), waiting), assertMempoolTXIDs( ExpectedTXIDs, "Low priority TX pushed Mempool over the data size limit"), assertMempoolSize(ExpectedMempoolSize), %% Add a high priority transaction to push us over the data size limit. %% This newest transaction should *not* be dropped, instead a lower priority %% transaction should be dropped TXHigh = tx(2, Owner, HighestReward+1, crypto:strong_rand_bytes(DataSize)), ar_mempool:add_tx(TXHigh, waiting), ExpectedTXIDs2 = [TXHigh#tx.id | lists:delete(lists:last(ExpectedTXIDs), ExpectedTXIDs)], assertMempoolTXIDs( ExpectedTXIDs2, "High priority TX pushed Mempool over the data size limit"), assertMempoolSize(ExpectedMempoolSize). %% @doc Test dropping transactions when both the mempool data size and header %% size are exceeded test_drop_low_priority_txs_data_and_header({{_, {_, Owner}}, _LastTXID, _OtherKey, _B0}) -> NumTransactions = 9, Format1DataSize = ?MEMPOOL_HEADER_SIZE_LIMIT div (NumTransactions+1), {Format1ExpectedTXIDs, _Format1HighestReward, Format1LowestReward} = add_transactions(NumTransactions, 1, Owner, Format1DataSize), Format2DataSize = (?MEMPOOL_DATA_SIZE_LIMIT div (NumTransactions+1)) + 1, {Format2ExpectedTXIDs, _Format2HighestReward, Format2LowestReward} = add_transactions(NumTransactions, 2, Owner, Format2DataSize), ExpectedTXIDs = Format2ExpectedTXIDs ++ Format1ExpectedTXIDs, {ExpectedHeaderSize, ExpectedDataSize} = { (2 * NumTransactions * ?TX_SIZE_BASE) + (NumTransactions * Format1DataSize), NumTransactions * Format2DataSize}, assertMempoolTXIDs(ExpectedTXIDs, "Mempool is below both the data and header size limits"), assertMempoolSize({ExpectedHeaderSize, ExpectedDataSize}), RemainingHeaderSpace = ?MEMPOOL_HEADER_SIZE_LIMIT - ExpectedHeaderSize, ar_mempool:add_tx( tx( 1, Owner, Format1LowestReward-2, crypto:strong_rand_bytes(RemainingHeaderSpace - ?TX_SIZE_BASE) ), waiting), %% This transaction will cause both the header and data size limits to be %% exceeded simultaneously ar_mempool:add_tx( tx( 2, Owner, Format2LowestReward-1, crypto:strong_rand_bytes(Format2DataSize) ), waiting), %% Last two transactions should be dropped assertMempoolTXIDs( ExpectedTXIDs, "TX pushed the Mempool over both the header and data size limits"). %% @doc Test that only 1 TX with a given last_tx can exist in the mempool. test_clashing_last_tx({{_, {_, Owner}}, LastTXID, _OtherKey, B0}) -> BaseID = crypto:strong_rand_bytes(31), %% Add some extra, non-clashing, transactions to test that only clashing %% transactions are dropped NumTransactions = 2, Format1DataSize = 200, {Format1ExpectedTXIDs, Format1HighestReward, _} = add_transactions(NumTransactions, 1, Owner, Format1DataSize), Format2DataSize = 50, {Format2ExpectedTXIDs, Format2HighestReward, _} = add_transactions(NumTransactions, 2, Owner, Format2DataSize), ExpectedTXIDs = Format2ExpectedTXIDs ++ Format1ExpectedTXIDs, Test0 = "Test 0: Transactions with empty last_tx can never clash", assertMempoolTXIDs(ExpectedTXIDs, Test0), Test1 = "Test 1: Lower reward TX is dropped", TX1 = tx(2, Owner, Format2HighestReward+2, <<>>, <<"c", BaseID/binary>>, LastTXID), TX2 = tx(2, Owner, Format2HighestReward+1, <<>>, <<"d", BaseID/binary>>, LastTXID), ar_mempool:add_tx(TX1, waiting), ar_mempool:add_tx(TX2, waiting), assertMempoolTXIDs([TX1#tx.id | ExpectedTXIDs], Test1), Test2 = "Test 2: Higher reward TX replace existing TX with lower reward", TX3 = tx(2, Owner, Format2HighestReward+3, <<>>, <<"e", BaseID/binary>>, LastTXID), ar_mempool:add_tx(TX3, waiting), assertMempoolTXIDs([TX3#tx.id | ExpectedTXIDs], Test2), Test3 = "Test 3: Higher TXID alphanumeric order replaces lower", TX4 = tx(2, Owner, Format2HighestReward+3, <<>>, <<"f", BaseID/binary>>, LastTXID), ar_mempool:add_tx(TX4, waiting), assertMempoolTXIDs([TX4#tx.id | ExpectedTXIDs], Test3), Test4 = "Test 4: Lower TXID alphanumeric order is dropped", TX5 = tx(2, Owner, Format2HighestReward+3, <<>>, <<"b", BaseID/binary>>, LastTXID), ar_mempool:add_tx(TX5, waiting), assertMempoolTXIDs([TX4#tx.id | ExpectedTXIDs], Test4), Test5 = "Test 5: Deprioritized format 1 TX is dropped", TX6 = tx( 1, Owner, Format1HighestReward+3, crypto:strong_rand_bytes(200), <<"g", BaseID/binary>>, LastTXID ), ar_mempool:add_tx(TX6, waiting), assertMempoolTXIDs([TX4#tx.id | ExpectedTXIDs], Test5), Test6 = "Test 6: High priority format 1 replaces a low priority format 2", TX7 = tx(1, Owner, Format1HighestReward+3, <<>>, <<"h", BaseID/binary>>, LastTXID), ar_mempool:add_tx(TX7, waiting), assertMempoolTXIDs([TX7#tx.id | ExpectedTXIDs], Test6), Test7 = "Test 7: TX last_tx set to block hash can not clash", TX8 = tx( 2, Owner, Format2HighestReward+4, <<>>, <<"i", BaseID/binary>>, B0#block.indep_hash ), TX9 = tx( 2, Owner, Format2HighestReward+5, <<>>, <<"j", BaseID/binary>>, B0#block.indep_hash ), ar_mempool:add_tx(TX8, waiting), ar_mempool:add_tx(TX9, waiting), ExpectedTXIDs2 = [TX9#tx.id | [TX8#tx.id | [TX7#tx.id | ExpectedTXIDs]]], assertMempoolTXIDs(ExpectedTXIDs2, Test7), Test8 = "Test 8: TX last_tx set to <<>> can not clash", TX10 = tx(2, Owner, Format2HighestReward+6, <<>>, <<"k", BaseID/binary>>, <<>>), TX11 = tx(2, Owner, Format2HighestReward+7, <<>>, <<"l", BaseID/binary>>, <<>>), ar_mempool:add_tx(TX10, waiting), ar_mempool:add_tx(TX11, waiting), assertMempoolTXIDs([TX11#tx.id | [TX10#tx.id | ExpectedTXIDs2]], Test8). %% @doc Test that TXs that would overspend an account are dropped from the %% mempool. test_overspent_tx({{_, {_, Owner}}, _LastTXID, _OtherKey, _B0}) -> BaseID = crypto:strong_rand_bytes(31), Test1 = "Test 1: Lower reward TX is dropped", TX1 = tx(2, Owner, 3, <<>>, <<"c", BaseID/binary>>, <<>>, 400), TX2 = tx(2, Owner, 2, <<>>, <<"d", BaseID/binary>>, <<>>, 400), TX3 = tx(2, Owner, 1, <<>>, <<"e", BaseID/binary>>, <<>>, 400), ar_mempool:add_tx(TX1, waiting), ar_mempool:add_tx(TX2, waiting), ar_mempool:add_tx(TX3, waiting), assertMempoolTXIDs([TX1#tx.id, TX2#tx.id], Test1), Test2 = "Test 2: Higher reward TX replace existing TX with lower reward", TX4 = tx(2, Owner, 4, <<>>, <<"f", BaseID/binary>>, <<>>, 400), ar_mempool:add_tx(TX4, waiting), assertMempoolTXIDs([TX4#tx.id, TX1#tx.id], Test2), Test3 = "Test 3: Higher TXID alphanumeric order replaces lower", TX5 = tx(2, Owner, 3, <<>>, <<"g", BaseID/binary>>, <<>>, 400), ar_mempool:add_tx(TX5, waiting), assertMempoolTXIDs([TX4#tx.id, TX5#tx.id], Test3), Test4 = "Test 4: Lower TXID alphanumeric order is dropped", TX6 = tx(2, Owner, 3, <<>>, <<"b", BaseID/binary>>, <<>>, 400), ar_mempool:add_tx(TX6, waiting), assertMempoolTXIDs([TX4#tx.id, TX5#tx.id], Test4), Test5 = "Test 5: Deprioritized format 1 TX is dropped", TX7 = tx(1, Owner, 3, crypto:strong_rand_bytes(200), <<"h", BaseID/binary>>, <<>>, 400), ar_mempool:add_tx(TX7, waiting), assertMempoolTXIDs([TX4#tx.id, TX5#tx.id], Test5), Test6 = "Test 6: High priority format 1 replaces a low priority format 2", TX8 = tx(1, Owner, 3, <<>>, <<"i", BaseID/binary>>, <<>>, 400), ar_mempool:add_tx(TX8, waiting), assertMempoolTXIDs([TX4#tx.id, TX8#tx.id], Test6), Test7 = "Test 7: 0 quantity TX can still overspend", TX9 = tx(2, Owner, 400, <<>>, <<"j", BaseID/binary>>, <<>>, 0), ar_mempool:add_tx(TX9, waiting), assertMempoolTXIDs([TX9#tx.id, TX4#tx.id], Test7). %% @doc Test that unconfirmed deposit TXs are ignored when determining whether an %% account is overspent. In this case the deposit comes from an address which %% has an on-chain balance. test_mixed_deposit_spend_tx_old_address({ {_, Pub} = {_, {_, Owner}}, _LastTXID, {_, {_, OtherOwner}}, _B0}) -> BaseID = crypto:strong_rand_bytes(31), Origin = ar_wallet:to_address(Pub), Test1 = "Test 1: Unconfirmed deposits from old addresses are not considered for overspend", TX2 = tx(2, OtherOwner, 9, <<>>, <<"d", BaseID/binary>>, <<>>, 500, Origin), TX3 = tx(2, Owner, 8, <<>>, <<"c", BaseID/binary>>, <<>>, 600), TX4 = tx(2, Owner, 7, <<>>, <<"b", BaseID/binary>>, <<>>, 600), ar_mempool:add_tx(TX2, waiting), ar_mempool:add_tx(TX3, waiting), ar_mempool:add_tx(TX4, waiting), assertMempoolTXIDs([TX2#tx.id, TX3#tx.id], Test1). %% @doc Test that unconfirmed deposit TXs are ignored when determining whether an %% account is overspent. In this case the deposit comes from an address which %% has not made it on-chain yet (deposit and spend are both in the mempool). test_mixed_deposit_spend_tx_new_address({ {_, Pub} = {_, {_, Owner}}, _LastTXID, _OtherKey, _B0}) -> BaseID = crypto:strong_rand_bytes(31), Origin = ar_wallet:to_address(Pub), {_, NewPub} = {_, {_, NewOwner}} = ar_wallet:new(), NewAddr = ar_wallet:to_address(NewPub), Test1 = "Test 1: Unconfirmed deposits from new addresses are not considered for overspend", TX1 = tx(2, Owner, 10, <<>>, <<"e", BaseID/binary>>, <<>>, 400, NewAddr), TX2 = tx(2, NewOwner, 9, <<>>, <<"d", BaseID/binary>>, <<>>, 400, Origin), TX3 = tx(2, Owner, 8, <<>>, <<"c", BaseID/binary>>, <<>>, 400), TX4 = tx(2, Owner, 7, <<>>, <<"b", BaseID/binary>>, <<>>, 400), ar_mempool:add_tx(TX1, waiting), ar_mempool:add_tx(TX2, waiting), ar_mempool:add_tx(TX3, waiting), ar_mempool:add_tx(TX4, waiting), assertMempoolTXIDs([TX1#tx.id, TX3#tx.id], Test1). %% @doc Test a TX that has a last_tx clash and overspends an account is %% handled correctly. test_clash_and_overspend_tx({{_, {_, Owner}}, LastTXID, _OtherKey, _B0}) -> BaseID = crypto:strong_rand_bytes(31), Test1 = "Test 1: Clashing TXs are dropped before overspend is calculated", TX1 = tx(2, Owner, 3, <<>>, <<"d", BaseID/binary>>, <<>>, 400), TX2 = tx(2, Owner, 2, <<>>, <<"c", BaseID/binary>>, LastTXID, 400), TX3 = tx(2, Owner, 1, <<>>, <<"b", BaseID/binary>>, LastTXID, 400), TX4 = tx(2, Owner, 4, <<>>, <<"e", BaseID/binary>>, LastTXID, 400), ar_mempool:add_tx(TX1, waiting), ar_mempool:add_tx(TX2, waiting), ar_mempool:add_tx(TX3, waiting), ar_mempool:add_tx(TX4, waiting), assertMempoolTXIDs([TX4#tx.id, TX1#tx.id], Test1). %% @doc Test that the right TXs are dropped when the mempool max data size is reached due to %% clashing TXs. Only the clashing TXs should be dropped. test_clash_and_low_priority_tx({{_, {_, Owner}}, LastTXID, _OtherKey, _B0}) -> %% Add 9x Format 2 transactions each with a data size slightly larger than %% 1/10 the MEMPOOL_DATA_SIZE_LIMIT. This puts us close to exceeding the %% data size limit NumTransactions = 9, DataSize = (?MEMPOOL_DATA_SIZE_LIMIT div (NumTransactions+1)) + 1, {ExpectedTXIDs, HighestReward, LowestReward} = add_transactions(NumTransactions, 2, Owner, DataSize), TX1 = tx(2, Owner, HighestReward+1, <<>>, crypto:strong_rand_bytes(32), LastTXID), ar_mempool:add_tx(TX1, waiting), ExpectedMempoolSize = {(NumTransactions+1) * ?TX_SIZE_BASE, NumTransactions * DataSize}, assertMempoolTXIDs([TX1#tx.id] ++ ExpectedTXIDs, "Mempool is below the data size limit"), assertMempoolSize(ExpectedMempoolSize), ClashTX = tx( 2, Owner, LowestReward-1, crypto:strong_rand_bytes(DataSize), crypto:strong_rand_bytes(32), LastTXID), ar_mempool:add_tx(ClashTX, waiting), assertMempoolTXIDs([TX1#tx.id] ++ ExpectedTXIDs, "Clashing TX dropped"), assertMempoolSize(ExpectedMempoolSize). add_transactions(NumTransactions, Format, Owner, DataSize) -> HighestReward = NumTransactions+2, LowestReward = 3, TXs = [ tx(Format, Owner, Reward, crypto:strong_rand_bytes(DataSize)) || Reward <- lists:seq(HighestReward, LowestReward, -1) ], lists:foreach( fun(TX) -> ar_mempool:add_tx(TX, waiting) end, TXs), ExpectedTXIDs = lists:map( fun(#tx{id = TXID}) -> TXID end, TXs), {ExpectedTXIDs, HighestReward, LowestReward}. %% @doc Test that load_from_disk computes origin_spent_total_map using the maximum %% denomination found among stored TXs, not denomination 0. %% TX1 has denomination 1, TX2 has denomination 2. The spent totals must %% be computed in denomination 2 (the max). With the bug (denomination 0), %% the denomination-1 TX cost is not scaled up by 1000x. test_load_from_disk_denomination({{_, {_, Owner}}, _LastTXID, _OtherKey, _B0}) -> BaseID = crypto:strong_rand_bytes(31), TX1 = (tx(2, Owner, 3000, <<>>, <<"a", BaseID/binary>>, <<>>))#tx{ denomination = 1 }, TX2 = (tx(2, Owner, 5, <<>>, <<"b", BaseID/binary>>, <<>>))#tx{ denomination = 2 }, SerializedTXs = #{ TX1#tx.id => {ar_serialize:tx_to_binary(TX1), waiting}, TX2#tx.id => {ar_serialize:tx_to_binary(TX2), waiting} }, ar_storage:write_term(mempool, {SerializedTXs, {0, 0}}), reset_node_state(), ar_mempool:load_from_disk(), [{origin_spent_total_denomination, LoadedDenomination}] = ets:lookup(node_state, origin_spent_total_denomination), ?assertEqual(2, LoadedDenomination, "load_from_disk should use the max TX denomination, not 0"), [{origin_spent_total_map, SpentTotalMap}] = ets:lookup(node_state, origin_spent_total_map), Addr = ar_wallet:to_address(Owner, ?DEFAULT_KEY_TYPE), TX1Cost = ar_pricing:redenominate(3000, 1, 2), TX2Cost = ar_pricing:redenominate(5, 2, 2), ExpectedTotal = TX1Cost + TX2Cost, ?assertEqual(ExpectedTotal, maps:get(Addr, SpentTotalMap), "Spent totals should be redenominated to the max TX denomination"). wallet({_, Pub}, Balance, LastTXID) -> {ar_wallet:to_address(Pub), Balance, LastTXID}. tx(Format, Owner, Reward, Data) -> tx(Format, Owner, Reward, Data, crypto:strong_rand_bytes(32), <<>>). tx(Format, Owner, Reward, Data, TXID, Anchor) -> tx(Format, Owner, Reward, Data, TXID, Anchor, 0). tx(Format, Owner, Reward, Data, TXID, Anchor, Quantity) -> tx(Format, Owner, Reward, Data, TXID, Anchor, Quantity, <<>>). tx(Format, Owner, Reward, Data, TXID, Anchor, Quantity, Target) -> #tx{ id = TXID, format = Format, reward = Reward, data = Data, data_size = byte_size(Data), owner = Owner, target = Target, last_tx = Anchor, quantity = Quantity }. assertMempoolSize(ExpectedMempoolSize) -> [{mempool_size, MempoolSize}] = ets:lookup(node_state, mempool_size), ?assertEqual(ExpectedMempoolSize, MempoolSize). assertMempoolTXIDs(ExpectedTXIDs, Title) -> %% Unordered list of all TXIDs in the mempool TXIDs = lists:map( fun([TXID]) -> TXID end, ets:match(node_state, {{tx, '$1'}, '_'}) ), %% Ordered list of expected TXIDs paired with their expected status ExpectedTXIDsStatuses = lists:map( fun(ExpectedTXID) -> {ExpectedTXID, waiting} end, ExpectedTXIDs ), %% gb_sets:to_list returns elements in ascending order of utility %% (lowest reward, latest TX first), so we need to reverse the list to get %% the true priority order (highest reward, oldest TX first) MempoolInPriorityOrder = lists:reverse(gb_sets:to_list(ar_mempool:get_priority_set())), %% Ordered list of actual TXIDs paired with their actual status ActualTXIDsStatuses = lists:map( fun({_, ActualTXID, ActualStatus}) -> {ActualTXID, ActualStatus} end, MempoolInPriorityOrder ), %% If we're adding and removing TX to/from the last_tx_map and origin_tx_map %% correctly, this list of TXIDs should match the total set of TXIDs in the %% mempool LastTXMapTXIDs = lists:foldl( fun({_Priority, TXID}, Acc) -> [TXID | Acc] end, [], lists:flatten( lists:foldl( fun(Set, Acc) -> [gb_sets:to_list(Set) | Acc] end, [], maps:values(ar_mempool:get_last_tx_map()) ) ) ), OriginTXMapTXIDs = lists:foldl( fun({_Priority, TXID}, Acc) -> [TXID | Acc] end, [], lists:flatten( lists:foldl( fun(Set, Acc) -> [gb_sets:to_list(Set) | Acc] end, [], maps:values(ar_mempool:get_origin_tx_map()) ) ) ), %% Only this first test will assert the ordering of TXIDs in the mempools ?assertEqual(ExpectedTXIDsStatuses, ActualTXIDsStatuses, Title), %% These remaining tests only assert that the unordered set of TXIDs is correct ?assertEqual(lists:sort(ExpectedTXIDs), lists:sort(TXIDs), Title), ?assertEqual(lists:sort(ExpectedTXIDs), lists:sort(LastTXMapTXIDs), Title), ?assertEqual(lists:sort(ExpectedTXIDs), lists:sort(OriginTXMapTXIDs), Title). ================================================ FILE: apps/arweave/test/ar_mine_randomx_tests.erl ================================================ -module(ar_mine_randomx_tests). -include_lib("eunit/include/eunit.hrl"). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave/include/ar_consensus.hrl"). -define(ENCODED_RX512_HASH, <<"NcXUtn7gA42QoM8MtaS-vgVy8gJ21EE2YxV18mHndmM">>). -define(ENCODED_RX4096_HASH, <<"HqbpuoVNu8u4l4slkwnP3fvX9Q-mgjFH-3LgCyhMPPk">>). -define(ENCODED_NONCE, <<"f_z7RLug8etm3SrmRf-xPwXEL0ZQ_xHng2A5emRDQBw">>). -define(ENCODED_SEGMENT, <<"7XM3fgTCAY2GFpDjPZxlw4yw5cv8jNzZSZawywZGQ6_Ca-JDy2nX_MC2vjrIoDGp">> ). encrypt_chunk({rx512, RandomXState}, Key, Chunk, PackingRounds, JIT, LargePages, HardwareAES, _ExtraArgs) -> ar_rx512_nif:rx512_encrypt_chunk_nif( RandomXState, Key, Chunk, PackingRounds, JIT, LargePages, HardwareAES). decrypt_chunk({rx512, RandomXState}, Key, Chunk, PackingRounds, JIT, LargePages, HardwareAES, _ExtraArgs) -> ar_rx512_nif:rx512_decrypt_chunk_nif( RandomXState, Key, Chunk, byte_size(Chunk), PackingRounds, JIT, LargePages, HardwareAES). reencrypt_chunk({rx512, RandomXState}, Key1, Key2, Chunk, PackingRounds1, PackingRounds2, JIT, LargePages, HardwareAES, _ExtraArgs) -> ar_rx512_nif:rx512_reencrypt_chunk_nif( RandomXState, Key1, Key2, Chunk, byte_size(Chunk), PackingRounds1, PackingRounds2, JIT, LargePages, HardwareAES). encrypt_composite_chunk({rx4096, RandomXState}, Key, Chunk, PackingRounds, JIT, LargePages, HardwareAES, [IterationCount, SubChunkCount] = _ExtraArgs) -> ar_rx4096_nif:rx4096_encrypt_composite_chunk_nif( RandomXState, Key, Chunk, JIT, LargePages, HardwareAES, PackingRounds, IterationCount, SubChunkCount). decrypt_composite_chunk({rx4096, RandomXState}, Key, Chunk, PackingRounds, JIT, LargePages, HardwareAES, [IterationCount, SubChunkCount] = _ExtraArgs) -> ar_rx4096_nif:rx4096_decrypt_composite_chunk_nif( RandomXState, Key, Chunk, byte_size(Chunk), JIT, LargePages, HardwareAES, PackingRounds, IterationCount, SubChunkCount). reencrypt_composite_chunk({rx4096, RandomXState}, Key1, Key2, Chunk, PackingRounds1, PackingRounds2, JIT, LargePages, HardwareAES, [IterationCount1, IterationCount2, SubChunkCount1, SubChunkCount2] = _ExtraArgs) -> ar_rx4096_nif:rx4096_reencrypt_composite_chunk_nif( RandomXState, Key1, Key2, Chunk, JIT, LargePages, HardwareAES, PackingRounds1, PackingRounds2, IterationCount1, IterationCount2, SubChunkCount1, SubChunkCount2). setup() -> FastState512 = ar_mine_randomx:init_fast2(rx512, ?RANDOMX_PACKING_KEY, 0, 0, erlang:system_info(dirty_cpu_schedulers_online)), LightState512 = ar_mine_randomx:init_light2(rx512, ?RANDOMX_PACKING_KEY, 0, 0), FastState4096 = ar_mine_randomx:init_fast2(rx4096, ?RANDOMX_PACKING_KEY, 0, 0, erlang:system_info(dirty_cpu_schedulers_online)), LightState4096 = ar_mine_randomx:init_light2(rx4096, ?RANDOMX_PACKING_KEY, 0, 0), {FastState512, LightState512, FastState4096, LightState4096}. test_register(TestFun, Fixture) -> {timeout, 120, {with, Fixture, [TestFun]}}. randomx_suite_test_() -> {setup, fun setup/0, fun (SetupData) -> [ test_register(fun test_state/1, SetupData), test_register(fun test_bad_state/1, SetupData), test_register(fun test_regression/1, SetupData), test_register(fun test_empty_chunk_fails/1, SetupData), test_register(fun test_nif_wrappers/1, SetupData), test_register(fun test_pack_unpack/1, SetupData), test_register(fun test_repack/1, SetupData), test_register(fun test_input_changes_packing/1, SetupData), test_register(fun test_composite_packing/1, SetupData), test_register(fun test_composite_packs_incrementally/1, SetupData), test_register(fun test_composite_unpacked_sub_chunks/1, SetupData), test_register(fun test_composite_repack/1, SetupData), test_register(fun test_hash/1, SetupData) ] end }. %% ------------------------------------------------------------------------------------------- %% spora_2_6 and composite packing tests %% ------------------------------------------------------------------------------------------- test_state({ FastState512, LightState512, FastState4096, LightState4096}) -> %% The legacy dataset size is 568,433,920 bytes. Roughly 30 MiB more than 512 MiB. %% Our nifs don't have access to the raw dataset size used in the RandomX C code, but %% they have access to the dataset item count - which is just the size divided by 64. %% So the expected dataset size is 568,433,920 / 64 = 8,881,780 items. %% %% The new dataset size is 4,326,530,304 bytes. Roughly 30 MiB more than 4 GiB. %% So the expected dataset size is 4,326,530,304 / 64 = 67,602,036 items. ?assertEqual( {ok, {rx512, fast, 8881780, 2097152}}, ar_mine_randomx:info(FastState512) ), ?assertEqual( {ok, {rx4096, fast, 67602036, 2097152}}, ar_mine_randomx:info(FastState4096) ), %% Unfortunately we don't have access to the cache size. The randomx_info_nif will check %% that in fast mode the cache is not initialized, and in light mode the dataset is not %% initialized and return an error if either check fails. ?assertEqual( {ok, {rx512, light, 0, 2097152}}, ar_mine_randomx:info(LightState512) ), ?assertEqual( {ok, {rx4096, light, 0, 2097152}}, ar_mine_randomx:info(LightState4096) ). test_bad_state(_) -> BadState = {bad_mode, bad_state}, ?assertEqual({error, invalid_randomx_mode}, ar_mine_randomx:info(BadState)), ?assertEqual({error, invalid_randomx_mode}, ar_mine_randomx:hash(BadState, crypto:strong_rand_bytes(32))), ?assertEqual({error, invalid_randomx_mode}, ar_mine_randomx:randomx_encrypt_chunk( {spora_2_6, crypto:strong_rand_bytes(32)}, BadState, crypto:strong_rand_bytes(32), crypto:strong_rand_bytes(?DATA_CHUNK_SIZE))), ?assertEqual({error, invalid_randomx_mode}, ar_mine_randomx:randomx_encrypt_chunk( {composite, 2, crypto:strong_rand_bytes(32)}, BadState, crypto:strong_rand_bytes(32), crypto:strong_rand_bytes(?DATA_CHUNK_SIZE))), ?assertEqual({error, invalid_randomx_mode}, ar_mine_randomx:randomx_decrypt_chunk( {spora_2_6, crypto:strong_rand_bytes(32)}, BadState, crypto:strong_rand_bytes(32), crypto:strong_rand_bytes(?DATA_CHUNK_SIZE), ?DATA_CHUNK_SIZE)), ?assertEqual({error, invalid_randomx_mode}, ar_mine_randomx:randomx_decrypt_chunk( {composite, 2, crypto:strong_rand_bytes(32)}, BadState, crypto:strong_rand_bytes(32), crypto:strong_rand_bytes(?DATA_CHUNK_SIZE), ?DATA_CHUNK_SIZE)), ?assertEqual({error, invalid_randomx_mode}, ar_mine_randomx:randomx_decrypt_sub_chunk( {composite, 2, crypto:strong_rand_bytes(32)}, BadState, crypto:strong_rand_bytes(32), crypto:strong_rand_bytes(?DATA_CHUNK_SIZE),0)), ?assertEqual({error, invalid_randomx_mode}, ar_mine_randomx:randomx_reencrypt_chunk( {spora_2_6, crypto:strong_rand_bytes(32)}, {spora_2_6, crypto:strong_rand_bytes(32)}, BadState, crypto:strong_rand_bytes(32), crypto:strong_rand_bytes(32), crypto:strong_rand_bytes(?DATA_CHUNK_SIZE), ?DATA_CHUNK_SIZE)), ?assertEqual({error, invalid_reencrypt_packing}, ar_mine_randomx:randomx_reencrypt_chunk( {composite, 2, crypto:strong_rand_bytes(32)}, {composite, 2, crypto:strong_rand_bytes(32)}, BadState, crypto:strong_rand_bytes(32), crypto:strong_rand_bytes(32), crypto:strong_rand_bytes(?DATA_CHUNK_SIZE), ?DATA_CHUNK_SIZE)). test_regression({FastState512, LightState512, FastState4096, LightState4096}) -> %% Test all permutations of: %% 1. Light vs. fast state %% 2. spora_2_6 vs. depth-1 composite vs. depth-2 composite packing %% 3. JIT vs. no JIT %% 4. RandomX dataset size 512 vs. 4096 %% (this is handled implicitly by the legacy packing vs. composite packing) test_regression(FastState512, "ar_mine_randomx_tests/packed.spora26.bin", 0, [], fun encrypt_chunk/8, fun decrypt_chunk/8), test_regression(FastState512, "ar_mine_randomx_tests/packed.spora26.bin", 1, [], fun encrypt_chunk/8, fun decrypt_chunk/8), test_regression(FastState4096, "ar_mine_randomx_tests/packed.composite.1.bin", 0, [1, 32], fun encrypt_composite_chunk/8, fun decrypt_composite_chunk/8), test_regression(FastState4096, "ar_mine_randomx_tests/packed.composite.1.bin", 1, [1, 32], fun encrypt_composite_chunk/8, fun decrypt_composite_chunk/8), test_regression(FastState4096, "ar_mine_randomx_tests/packed.composite.2.bin", 0, [2, 32], fun encrypt_composite_chunk/8, fun decrypt_composite_chunk/8), test_regression(FastState4096, "ar_mine_randomx_tests/packed.composite.2.bin", 1, [2, 32], fun encrypt_composite_chunk/8, fun decrypt_composite_chunk/8), test_regression(LightState512, "ar_mine_randomx_tests/packed.spora26.bin", 0, [], fun encrypt_chunk/8, fun decrypt_chunk/8), test_regression(LightState512, "ar_mine_randomx_tests/packed.spora26.bin", 1, [], fun encrypt_chunk/8, fun decrypt_chunk/8), test_regression(LightState4096, "ar_mine_randomx_tests/packed.composite.1.bin", 0, [1, 32], fun encrypt_composite_chunk/8, fun decrypt_composite_chunk/8), test_regression(LightState4096, "ar_mine_randomx_tests/packed.composite.1.bin", 1, [1, 32], fun encrypt_composite_chunk/8, fun decrypt_composite_chunk/8), test_regression(LightState4096, "ar_mine_randomx_tests/packed.composite.2.bin", 0, [2, 32], fun encrypt_composite_chunk/8, fun decrypt_composite_chunk/8), test_regression(LightState4096, "ar_mine_randomx_tests/packed.composite.2.bin", 1, [2, 32], fun encrypt_composite_chunk/8, fun decrypt_composite_chunk/8), ok. test_regression(State, Fixture, JIT, ExtraArgs, EncryptFun, DecryptFun) -> Key = ar_test_node:load_fixture("ar_mine_randomx_tests/key.bin"), UnpackedFixture = ar_test_node:load_fixture("ar_mine_randomx_tests/unpacked.bin"), PackedFixture = ar_test_node:load_fixture(Fixture), {ok, Packed} = EncryptFun(State, Key, UnpackedFixture, 8, JIT, 0, 0, ExtraArgs), ?assertEqual(PackedFixture, Packed, Fixture), {ok, Unpacked} = DecryptFun(State, Key, PackedFixture, 8, JIT, 0, 0, ExtraArgs), ?assertEqual(UnpackedFixture, Unpacked, Fixture). test_empty_chunk_fails({FastState512, _LightState512, FastState4096, _LightState4096}) -> test_empty_chunk_fails(FastState512, [], fun encrypt_chunk/8), test_empty_chunk_fails(FastState4096, [1, 32], fun encrypt_composite_chunk/8), test_empty_chunk_fails(FastState512, [], fun decrypt_chunk/8), test_empty_chunk_fails(FastState4096, [1, 32], fun decrypt_composite_chunk/8). test_empty_chunk_fails(State, ExtraArgs, Fun) -> try Fun(State, crypto:strong_rand_bytes(32), <<>>, 1, 0, 0, 0, ExtraArgs), ?assert(false, "Encrypt/Decrypt with an empty chunk should have failed") catch error:badarg -> ok end. test_nif_wrappers({FastState512, _LightState512, FastState4096, _LightState4096}) -> test_nif_wrappers(FastState512, FastState4096, crypto:strong_rand_bytes(?DATA_CHUNK_SIZE - 12)), test_nif_wrappers(FastState512, FastState4096, crypto:strong_rand_bytes(?DATA_CHUNK_SIZE)). test_nif_wrappers(State512, State4096, Chunk) -> AddrA = crypto:strong_rand_bytes(32), AddrB = crypto:strong_rand_bytes(32), KeyA = crypto:strong_rand_bytes(32), KeyB= crypto:strong_rand_bytes(32), %% spora_26 randomx_encrypt_chunk {ok, Packed_2_6A} = ar_rx512_nif:rx512_encrypt_chunk_nif( element(2, State512), KeyA, Chunk, ?RANDOMX_PACKING_ROUNDS_2_6, ar_mine_randomx:jit(), ar_mine_randomx:large_pages(), ar_mine_randomx:hardware_aes()), ?assertEqual({ok, Packed_2_6A}, ar_mine_randomx:randomx_encrypt_chunk({spora_2_6, AddrA}, State512, KeyA, Chunk)), %% composite randomx_encrypt_composite_chunk {ok, PackedCompositeA2} = ar_rx4096_nif:rx4096_encrypt_composite_chunk_nif( element(2, State4096), KeyA, Chunk, ar_mine_randomx:jit(), ar_mine_randomx:large_pages(), ar_mine_randomx:hardware_aes(), ?COMPOSITE_PACKING_ROUND_COUNT, 2, ?COMPOSITE_PACKING_SUB_CHUNK_COUNT), ?assertEqual({ok, PackedCompositeA2}, ar_mine_randomx:randomx_encrypt_chunk({composite, AddrA, 2}, State4096, KeyA, Chunk)), %% spora_2_6 randomx_decrypt_chunk ?assertEqual({ok, Chunk}, ar_mine_randomx:randomx_decrypt_chunk( {spora_2_6, AddrA}, State512, KeyA, Packed_2_6A, byte_size(Chunk))), %% composite randomx_decrypt_composite_chunk ?assertEqual({ok, Chunk}, ar_mine_randomx:randomx_decrypt_chunk( {composite, AddrA, 2}, State4096, KeyA, PackedCompositeA2, byte_size(Chunk))), %% Prepare data for the reencryption tests {ok, Packed_2_6B} = ar_rx512_nif:rx512_encrypt_chunk_nif( element(2, State512), KeyB, Chunk, ?RANDOMX_PACKING_ROUNDS_2_6, ar_mine_randomx:jit(), ar_mine_randomx:large_pages(), ar_mine_randomx:hardware_aes()), {ok, PackedCompositeA3} = ar_rx4096_nif:rx4096_encrypt_composite_chunk_nif( element(2, State4096), KeyA, Chunk, ar_mine_randomx:jit(), ar_mine_randomx:large_pages(), ar_mine_randomx:hardware_aes(), ?COMPOSITE_PACKING_ROUND_COUNT, 3, ?COMPOSITE_PACKING_SUB_CHUNK_COUNT), {ok, PackedCompositeB3} = ar_rx4096_nif:rx4096_encrypt_composite_chunk_nif( element(2, State4096), KeyB, Chunk, ar_mine_randomx:jit(), ar_mine_randomx:large_pages(), ar_mine_randomx:hardware_aes(), ?COMPOSITE_PACKING_ROUND_COUNT, 3, ?COMPOSITE_PACKING_SUB_CHUNK_COUNT), %% spora_2_6 -> spora_2_6 randomx_reencrypt_chunk ?assertEqual({ok, Packed_2_6B, Chunk}, ar_mine_randomx:randomx_reencrypt_chunk( {spora_2_6, AddrA}, {spora_2_6, AddrB}, State512, KeyA, KeyB, Packed_2_6A, byte_size(Chunk))), %% composite -> composite randomx_reencrypt_chunk ?assertEqual({ok, PackedCompositeB3, Chunk}, ar_mine_randomx:randomx_reencrypt_chunk( {composite, AddrA, 2}, {composite, AddrB, 3}, State4096, KeyA, KeyB, PackedCompositeA2, byte_size(Chunk))), ?assertEqual({ok, PackedCompositeA3, none}, ar_mine_randomx:randomx_reencrypt_chunk( {composite, AddrA, 2}, {composite, AddrA, 3}, State4096, KeyA, KeyA, PackedCompositeA2, byte_size(Chunk))), %% spora_2_6 -> composite randomx_reencrypt_chunk ?assertEqual({error, invalid_reencrypt_packing}, ar_mine_randomx:randomx_reencrypt_chunk( {spora_2_6, AddrA}, {composite, AddrB, 3}, State512, KeyA, KeyB, Packed_2_6A, byte_size(Chunk))). test_pack_unpack({FastState512, _LightState512, FastState4096, _LightState4096}) -> test_pack_unpack(FastState512, [], fun encrypt_chunk/8, fun decrypt_chunk/8), test_pack_unpack( FastState4096, [1, 32], fun encrypt_composite_chunk/8, fun decrypt_composite_chunk/8), test_pack_unpack( FastState4096, [2, 32], fun encrypt_composite_chunk/8, fun decrypt_composite_chunk/8). test_pack_unpack(State, ExtraArgs, EncryptFun, DecryptFun) -> %% Add 3 0-bytes at the end to test automatic padding. ChunkWithoutPadding = crypto:strong_rand_bytes(?DATA_CHUNK_SIZE - 3), Chunk = << ChunkWithoutPadding/binary, 0:24 >>, Key = crypto:strong_rand_bytes(32), {ok, Packed1} = EncryptFun(State, Key, Chunk, 8, 0, 0, 0, ExtraArgs), ?assertEqual(?DATA_CHUNK_SIZE, byte_size(Packed1)), ?assertEqual({ok, Packed1}, EncryptFun(State, Key, Chunk, 8, 0, 0, 0, ExtraArgs)), %% Run the decryption twice to test that the nif isn't corrupting any state. {ok, Unpacked1} = DecryptFun(State, Key, Packed1, 8, 0, 0, 0, ExtraArgs), ?assertEqual(Unpacked1, Chunk), ?assertEqual({ok, Unpacked1}, DecryptFun(State, Key, Packed1, 8, 0, 0, 0, ExtraArgs)), %% Run the encryption twice to test that the nif isn't corrupting any state. {ok, Packed2} = EncryptFun(State, Key, ChunkWithoutPadding, 8, 0, 0, 0, ExtraArgs), ?assertEqual(Packed2, Packed1), ?assertEqual({ok, Packed2}, EncryptFun(State, Key, ChunkWithoutPadding, 8, 0, 0, 0, ExtraArgs)). test_repack({FastState512, _LightState512, FastState4096, _LightState4096}) -> test_repack(FastState512, [], [], fun encrypt_chunk/8, fun reencrypt_chunk/10), test_repack( FastState4096, [1, 32], [1, 1, 32, 32], fun encrypt_composite_chunk/8, fun reencrypt_composite_chunk/10), test_repack( FastState4096, [2, 32], [2, 2, 32, 32], fun encrypt_composite_chunk/8, fun reencrypt_composite_chunk/10). test_repack(State, EncryptArgs, ReencryptArgs, EncryptFun, ReencryptFun) -> Chunk = crypto:strong_rand_bytes(?DATA_CHUNK_SIZE - 12), Key1 = crypto:strong_rand_bytes(32), Key2 = crypto:strong_rand_bytes(32), {ok, Packed1} = EncryptFun(State, Key1, Chunk, 8, 0, 0, 0, EncryptArgs), {ok, Packed2} = EncryptFun(State, Key2, Chunk, 8, 0, 0, 0, EncryptArgs), {ok, Repacked, RepackInput} = ReencryptFun(State, Key1, Key2, Packed1, 8, 8, 0, 0, 0, ReencryptArgs), ?assertEqual(Chunk, binary:part(RepackInput, 0, byte_size(Chunk))), ?assertEqual(Packed2, Repacked), %% Reencrypt with different RandomX rounds. {ok, Repacked2, RepackInput2} = ReencryptFun(State, Key1, Key2, Packed1, 8, 10, 0, 0, 0, ReencryptArgs), ?assertEqual(Chunk, binary:part(RepackInput2, 0, byte_size(Chunk))), ?assertNotEqual(Packed2, Repacked2), ?assertEqual({ok, Repacked2, RepackInput2}, ReencryptFun(State, Key1, Key2, Packed1, 8, 10, 0, 0, 0, ReencryptArgs)). test_input_changes_packing({FastState512, _LightState512, FastState4096, _LightState4096}) -> test_input_changes_packing(FastState512, [], fun encrypt_chunk/8, fun decrypt_chunk/8), test_input_changes_packing( FastState4096, [1, 32], fun encrypt_composite_chunk/8, fun decrypt_composite_chunk/8), %% Also check arguments specific to composite packing: %% Chunk = crypto:strong_rand_bytes(?DATA_CHUNK_SIZE), Key = crypto:strong_rand_bytes(32), {ok, Packed} = encrypt_composite_chunk(FastState4096, Key, Chunk, 8, 0, 0, 0, [1, 32]), %% A different iterations count. {ok, Packed2} = encrypt_composite_chunk(FastState4096, Key, Chunk, 8, 0, 0, 0, [2, 32]), ?assertEqual(?DATA_CHUNK_SIZE, byte_size(Packed2)), ?assertNotEqual(Packed2, Packed), {ok, Unpacked2} = decrypt_composite_chunk( FastState4096, Key, Packed, 8, 0, 0, 0, [2, 32]), ?assertEqual(?DATA_CHUNK_SIZE, byte_size(Unpacked2)), ?assertNotEqual(Unpacked2, Chunk), %% A different sub-chunk count. {ok, Packed3} = encrypt_composite_chunk(FastState4096, Key, Chunk, 8, 0, 0, 0, [1, 64]), ?assertEqual(?DATA_CHUNK_SIZE, byte_size(Packed3)), ?assertNotEqual(Packed3, Packed), {ok, Unpacked3} = decrypt_composite_chunk(FastState4096, Key, Packed, 8, 0, 0, 0, [1, 64]), ?assertEqual(?DATA_CHUNK_SIZE, byte_size(Unpacked3)), ?assertNotEqual(Unpacked3, Chunk). test_input_changes_packing(State, ExtraArgs, EncryptFun, DecryptFun) -> Chunk = crypto:strong_rand_bytes(?DATA_CHUNK_SIZE), Key = crypto:strong_rand_bytes(32), {ok, Packed} = EncryptFun(State, Key, Chunk, 8, 0, 0, 0, ExtraArgs), {ok, Unpacked} = DecryptFun(State, Key, Packed, 8, 0, 0, 0, ExtraArgs), ?assertEqual(Unpacked, Chunk), %% Pack a slightly different chunk to assert the packing is different for different data. << ChunkPrefix:262143/binary, LastChunkByte:8 >> = Chunk, Chunk2 = << ChunkPrefix/binary, (LastChunkByte + 1):8 >>, {ok, Packed2} = EncryptFun(State, Key, Chunk2, 8, 0, 0, 0, ExtraArgs), ?assertEqual(?DATA_CHUNK_SIZE, byte_size(Packed2)), ?assertNotEqual(Packed2, Packed), %% Unpack a slightly different chunk to assert the packing is different for different data. << PackedPrefix:262143/binary, LastPackedByte:8 >> = Packed, Packed3 = << PackedPrefix/binary, (LastPackedByte + 1):8 >>, {ok, Unpacked2} = DecryptFun(State, Key, Packed3, 8, 0, 0, 0, ExtraArgs), ?assertEqual(?DATA_CHUNK_SIZE, byte_size(Unpacked2)), ?assertNotEqual(Unpacked2, Chunk), %% Pack with a slightly different key. << Prefix:31/binary, LastByte:8 >> = Key, Key2 = << Prefix/binary, (LastByte + 1):8 >>, {ok, Packed4} = EncryptFun(State, Key2, Chunk, 8, 0, 0, 0, ExtraArgs), ?assertEqual(?DATA_CHUNK_SIZE, byte_size(Packed4)), ?assertNotEqual(Packed4, Packed), %% Unpack with a slightly different key. {ok, Unpacked3} = DecryptFun(State, Key2, Packed, 8, 0, 0, 0, ExtraArgs), ?assertEqual(?DATA_CHUNK_SIZE, byte_size(Unpacked3)), ?assertNotEqual(Unpacked3, Chunk), %% Pack with a different RX program count. {ok, Packed5} = EncryptFun(State, Key, Chunk, 7, 0, 0, 0, ExtraArgs), ?assertEqual(?DATA_CHUNK_SIZE, byte_size(Packed5)), ?assertNotEqual(Packed5, Packed), %% Unpack with a different RX program count. {ok, Unpacked4} = DecryptFun(State, Key, Packed, 7, 0, 0, 0, ExtraArgs), ?assertEqual(?DATA_CHUNK_SIZE, byte_size(Unpacked4)), ?assertNotEqual(Unpacked4, Chunk). %% ------------------------------------------------------------------------------------------- %% Composite packing tests %% ------------------------------------------------------------------------------------------- test_composite_packing({FastState512, _LightState512, FastState4096, _LightState4096}) -> {rx512, RandomXState512} = FastState512, {rx4096, RandomXState4096} = FastState4096, ChunkWithoutPadding = crypto:strong_rand_bytes(?DATA_CHUNK_SIZE - 5), Chunk = << ChunkWithoutPadding/binary, 0:(5 * 8) >>, Key = crypto:strong_rand_bytes(32), {ok, Packed} = ar_rx4096_nif:rx4096_encrypt_composite_chunk_nif(RandomXState4096, Key, Chunk, 0, 0, 0, 8, 1, 1), Key2 = crypto:hash(sha256, << Key/binary, ?DATA_CHUNK_SIZE:24 >>), {ok, Packed2} = ar_rx512_nif:rx512_encrypt_chunk_nif(RandomXState512, Key2, Chunk, 8, % RANDOMX_PACKING_ROUNDS 0, 0, 0), ?assertNotEqual(Packed, Packed2), {ok, Packed3} = ar_rx4096_nif:rx4096_encrypt_composite_chunk_nif(RandomXState4096, Key, ChunkWithoutPadding, 0, 0, 0, 8, 1, 1), {ok, Packed4} = ar_rx512_nif:rx512_encrypt_chunk_nif(RandomXState512, Key2, ChunkWithoutPadding, 8, % RANDOMX_PACKING_ROUNDS 0, 0, 0), ?assertNotEqual(Packed3, Packed4). test_composite_packs_incrementally( {_FastState512, _LightState512, FastState4096, _LightState4096}) -> {rx4096, RandomXState4096} = FastState4096, Chunk = crypto:strong_rand_bytes(?DATA_CHUNK_SIZE - 3), Key = crypto:strong_rand_bytes(32), {ok, Packed1} = ar_rx4096_nif:rx4096_encrypt_composite_chunk_nif( RandomXState4096, Key, Chunk, 0, 0, 0, 8, 1, 32), {ok, Packed2} = ar_rx4096_nif:rx4096_encrypt_composite_chunk_nif( RandomXState4096, Key, Packed1, 0, 0, 0, 8, 1, 32), {ok, Packed3} = ar_rx4096_nif:rx4096_encrypt_composite_chunk_nif( RandomXState4096, Key, Chunk, 0, 0, 0, 8, 2, 32), ?assertEqual(Packed2, Packed3), {ok, Packed4} = ar_rx4096_nif:rx4096_encrypt_composite_chunk_nif( RandomXState4096, Key, Chunk, 0, 0, 0, 8, 3, 32), {ok, Packed5} = ar_rx4096_nif:rx4096_encrypt_composite_chunk_nif( RandomXState4096, Key, Packed1, 0, 0, 0, 8, 2, 32), {ok, Packed6} = ar_rx4096_nif:rx4096_encrypt_composite_chunk_nif( RandomXState4096, Key, Packed2, 0, 0, 0, 8, 1, 32), ?assertEqual(Packed4, Packed5), ?assertEqual(Packed4, Packed6). test_composite_unpacked_sub_chunks( {_FastState512, _LightState512, FastState4096, _LightState4096}) -> {rx4096, RandomXState4096} = FastState4096, ChunkWithoutPadding = crypto:strong_rand_bytes(?DATA_CHUNK_SIZE - 3), Chunk = << ChunkWithoutPadding/binary, 0:24 >>, Key = crypto:strong_rand_bytes(32), {ok, Packed} = ar_rx4096_nif:rx4096_encrypt_composite_chunk_nif( RandomXState4096, Key, Chunk, 0, 0, 0, 8, 1, 32), SubChunks = split_chunk_into_sub_chunks(Packed, ?DATA_CHUNK_SIZE div 32, 0), UnpackedInSubChunks = iolist_to_binary(lists:reverse(lists:foldl( fun({SubChunk, Offset}, Acc) -> {ok, Unpacked} = ar_rx4096_nif:rx4096_decrypt_composite_sub_chunk_nif( RandomXState4096, Key, SubChunk, byte_size(SubChunk), 0, 0, 0, 8, 1, Offset), {ok, Unpacked2} = ar_rx4096_nif:rx4096_decrypt_composite_sub_chunk_nif( RandomXState4096, Key, SubChunk, byte_size(SubChunk), 0, 0, 0, 8, 1, Offset), ?assertEqual(Unpacked, Unpacked2), [Unpacked | Acc] end, [], SubChunks ))), ?assertEqual(UnpackedInSubChunks, Chunk), Chunk2 = crypto:strong_rand_bytes(?DATA_CHUNK_SIZE - 3), {ok, Packed2} = ar_rx4096_nif:rx4096_encrypt_composite_chunk_nif( RandomXState4096, Key, Chunk2, 0, 0, 0, 8, 3, 32), SubChunks2 = split_chunk_into_sub_chunks(Packed2, ?DATA_CHUNK_SIZE div 32, 0), UnpackedInSubChunks2 = iolist_to_binary(lists:reverse(lists:foldl( fun({SubChunk, Offset}, Acc) -> {ok, Unpacked} = ar_rx4096_nif:rx4096_decrypt_composite_sub_chunk_nif( RandomXState4096, Key, SubChunk, byte_size(SubChunk), 0, 0, 0, 8, 3, Offset), {ok, Unpacked2} = ar_rx4096_nif:rx4096_decrypt_composite_sub_chunk_nif( RandomXState4096, Key, SubChunk, byte_size(SubChunk), 0, 0, 0, 8, 3, Offset), ?assertEqual(Unpacked, Unpacked2), [Unpacked | Acc] end, [], SubChunks2 ))), ?assertEqual(UnpackedInSubChunks2, << Chunk2/binary, 0:24 >>). split_chunk_into_sub_chunks(Bin, Size, Offset) -> case Bin of <<>> -> []; << SubChunk:Size/binary, Rest/binary >> -> [{SubChunk, Offset} | split_chunk_into_sub_chunks(Rest, Size, Offset + Size)] end. test_composite_repack({_FastState512, _LightState512, FastState4096, _LightState4096}) -> {rx4096, RandomXState4096} = FastState4096, Chunk = crypto:strong_rand_bytes(?DATA_CHUNK_SIZE - 12), Key = crypto:strong_rand_bytes(32), {ok, Packed2} = ar_rx4096_nif:rx4096_encrypt_composite_chunk_nif(RandomXState4096, Key, Chunk, 0, 0, 0, 8, 2, 32), {ok, Packed3} = ar_rx4096_nif:rx4096_encrypt_composite_chunk_nif(RandomXState4096, Key, Chunk, 0, 0, 0, 8, 3, 32), {ok, Repacked_2_3, RepackInput} = ar_rx4096_nif:rx4096_reencrypt_composite_chunk_nif(RandomXState4096, Key, Key, Packed2, 0, 0, 0, 8, 8, 2, 3, 32, 32), ?assertEqual(Packed2, RepackInput), ?assertEqual(Packed3, Repacked_2_3), %% Repacking a composite chunk to same-key higher-diff composite chunk... {ok, Repacked_2_5, RepackInput} = ar_rx4096_nif:rx4096_reencrypt_composite_chunk_nif(RandomXState4096, Key, Key, Packed2, 0, 0, 0, 8, 8, 2, 5, 32, 32), {ok, Packed5} = ar_rx4096_nif:rx4096_encrypt_composite_chunk_nif(RandomXState4096, Key, Chunk, 0, 0, 0, 8, 5, 32), ?assertEqual(Packed5, Repacked_2_5), Key2 = crypto:strong_rand_bytes(32), %% Repacking a composite chunk to different-key higher-diff composite chunk... {ok, RepackedDiffKey_2_3, RepackInput2} = ar_rx4096_nif:rx4096_reencrypt_composite_chunk_nif(RandomXState4096, Key, Key2, Packed2, 0, 0, 0, 8, 8, 2, 3, 32, 32), ?assertEqual(<< Chunk/binary, 0:(12 * 8) >>, RepackInput2), {ok, Packed2_3} = ar_rx4096_nif:rx4096_encrypt_composite_chunk_nif(RandomXState4096, Key2, Chunk, 0, 0, 0, 8, 3, 32), ?assertNotEqual(Packed2, Packed2_3), ?assertEqual(Packed2_3, RepackedDiffKey_2_3), try ar_rx4096_nif:rx4096_reencrypt_composite_chunk_nif(RandomXState4096, Key, Key, Packed2, 0, 0, 0, 8, 8, 2, 2, 32, 32), ?assert(false, "ar_rx4096_nif:rx4096_reencrypt_composite_chunk_nif to reencrypt " "to same diff should have failed") catch error:badarg -> ok end, try ar_rx4096_nif:rx4096_reencrypt_composite_chunk_nif(RandomXState4096, Key, Key, Packed2, 0, 0, 0, 8, 8, 2, 1, 32, 32), ?assert(false, "ar_rx4096_nif:rx4096_reencrypt_composite_chunk_nif to reencrypt " "to lower diff should have failed") catch error:badarg -> ok end. test_hash({ FastState512, LightState512, FastState4096, LightState4096}) -> ExpectedHash512 = ar_util:decode(?ENCODED_RX512_HASH), ExpectedHash4096 = ar_util:decode(?ENCODED_RX4096_HASH), Nonce = ar_util:decode(?ENCODED_NONCE), Segment = ar_util:decode(?ENCODED_SEGMENT), Input = << Nonce/binary, Segment/binary >>, ?assertEqual(ExpectedHash512, ar_mine_randomx:hash(FastState512, Input, 0, 0, 0)), ?assertEqual(ExpectedHash512, ar_mine_randomx:hash(LightState512, Input, 0, 0, 0)), ?assertEqual(ExpectedHash4096, ar_mine_randomx:hash(FastState4096, Input, 0, 0, 0)), ?assertEqual(ExpectedHash4096, ar_mine_randomx:hash(LightState4096, Input, 0, 0, 0)). ================================================ FILE: apps/arweave/test/ar_mine_vdf_tests.erl ================================================ -module(ar_mine_vdf_tests). -include_lib("eunit/include/eunit.hrl"). -define(ENCODED_PREV_STATE, <<"f_z7RLug8etm3SrmRf-xPwXEL0ZQ_xHng2A5emRDQBw">>). -define(ITERATIONS_SHA, 10). -define(CHECKPOINT_COUNT, 4). -define(CHECKPOINT_SKIP_COUNT, 9). -define(ENCODED_SHA_CHECKPOINT, <<"hewn3qrpFlGUPVexAqyqrb72v7dvhAMd26Cwih7R8w9WPNxZmSZSSqGcibtalnKnwHruGcFvEqzSWq2ySAMsRxzC57qaktI6gV6dAcPQ41ZLzpw9i3AJUPCFsShzNbV8EVx29JpHdMZ0VzlViPUrgbfS_EVSWAqiZKhJcJmUcbI">>). -define(ENCODED_SHA_CHECKPOINT_FULL, <<"hewn3qrpFlGUPVexAqyqrb72v7dvhAMd26Cwih7R8w9WPNxZmSZSSqGcibtalnKnwHruGcFvEqzSWq2ySAMsRxzC57qaktI6gV6dAcPQ41ZLzpw9i3AJUPCFsShzNbV8EVx29JpHdMZ0VzlViPUrgbfS_EVSWAqiZKhJcJmUcbJlfDXdCy38G1r-6tB_OLCVTYcIg7if-g6ejTQERJ2AXg">>). -define(ENCODED_SHA_CHECKPOINT_SKIP, <<"_Wl7xvFvX_XOHfnaHxtv17b_UCYIx1fOU2SeuM7VV-_5xuzpESPJCM2wh0ry9sjpDqLPjKmI3hkdWm3CZalD5lSdAOJp62vJXdKbTOstTDi__oL1OIPFOnDxawC_TIxLO-YebYbMN6hGk7bz7le65Bciat3ahUEIM_GmriHJVh4">>). -define(ENCODED_SHA_RES, <<"ZXw13Qst_Bta_urQfziwlU2HCIO4n_oOno00BESdgF4">>). -define(ENCODED_SHA_RES_SKIP, <<"Sobef2mx_AgxJ4ubzi2FLDYhouKqojyTzUXASCXrSZ0">>). %%%=================================================================== %%% utils %%%=================================================================== soft_implementation_vdf_sha(_Salt, PrevState, 0, _CheckpointCount) -> PrevState; soft_implementation_vdf_sha(Salt, PrevState, Iterations, 0) -> NextState = crypto:hash(sha256, <>), soft_implementation_vdf_sha(Salt, NextState, Iterations-1, 0); soft_implementation_vdf_sha(Salt, PrevState, Iterations, CheckpointCount) -> NextState = soft_implementation_vdf_sha(Salt, PrevState, Iterations, 0), << SaltValue:256 >> = Salt, NextSalt = << (SaltValue+1):256 >>, soft_implementation_vdf_sha(NextSalt, NextState, Iterations, CheckpointCount-1). %%%=================================================================== %%% %%% no skip iterations (needed for last 1 sec has more checkpoints) %%% %%%=================================================================== %%%=================================================================== %%% SHA. %%%=================================================================== vdf_sha_test_() -> {timeout, 500, fun test_vdf_sha/0}. test_vdf_sha() -> PrevState = ar_util:decode(?ENCODED_PREV_STATE), OutCheckpointSha3 = ar_util:decode(?ENCODED_SHA_CHECKPOINT), OutCheckpointSha3Full = ar_util:decode(?ENCODED_SHA_CHECKPOINT_FULL), RealSha3 = ar_util:decode(?ENCODED_SHA_RES), Salt1 = << (1):256 >>, Salt2 = << (2):256 >>, {ok, Real1, _OutCheckpointSha} = ar_vdf_nif:vdf_sha2_nif(Salt1, PrevState, 0, 0, ?ITERATIONS_SHA), ExpectedHash = soft_implementation_vdf_sha(Salt1, PrevState, ?ITERATIONS_SHA, 0), ?assertEqual(ExpectedHash, Real1), {ok, RealSha2, OutCheckpointSha2} = ar_vdf_nif:vdf_sha2_nif(Salt2, Real1, ?CHECKPOINT_COUNT-1, 0, ?ITERATIONS_SHA), {ok, RealSha3, OutCheckpointSha3} = ar_vdf_nif:vdf_sha2_nif(Salt1, PrevState, ?CHECKPOINT_COUNT, 0, ?ITERATIONS_SHA), ExpectedSha3 = soft_implementation_vdf_sha(Salt1, PrevState, ?ITERATIONS_SHA, ?CHECKPOINT_COUNT), ?assertEqual(ExpectedSha3, RealSha2), ?assertEqual(ExpectedSha3, RealSha3), ExpedctedOutCheckpoint3 = << Real1/binary, OutCheckpointSha2/binary >>, ?assertEqual(ExpedctedOutCheckpoint3, OutCheckpointSha3), ExpectedOutCheckpointSha3Full = << OutCheckpointSha3/binary, RealSha3/binary >>, ?assertEqual(ExpectedOutCheckpointSha3Full, OutCheckpointSha3Full), ok = test_vdf_sha_verify_break1(Salt1, PrevState, ?CHECKPOINT_COUNT, 0, ?ITERATIONS_SHA, OutCheckpointSha3, RealSha3), ok = test_vdf_sha_verify_break2(Salt1, PrevState, ?CHECKPOINT_COUNT, 0, ?ITERATIONS_SHA, OutCheckpointSha3, RealSha3), % test vdf_fused {ok, Real1, _OutCheckpointSha} = ar_vdf_nif:vdf_sha2_fused_nif(Salt1, PrevState, 0, 0, ?ITERATIONS_SHA), {ok, RealSha2, OutCheckpointSha2} = ar_vdf_nif:vdf_sha2_fused_nif(Salt2, Real1, ?CHECKPOINT_COUNT-1, 0, ?ITERATIONS_SHA), {ok, RealSha3, OutCheckpointSha3} = ar_vdf_nif:vdf_sha2_fused_nif(Salt1, PrevState, ?CHECKPOINT_COUNT, 0, ?ITERATIONS_SHA), % test vdf_hiopt {ok, Real1, _OutCheckpointSha} = ar_vdf_nif:vdf_sha2_hiopt_nif(Salt1, PrevState, 0, 0, ?ITERATIONS_SHA), {ok, RealSha2, OutCheckpointSha2} = ar_vdf_nif:vdf_sha2_hiopt_nif(Salt2, Real1, ?CHECKPOINT_COUNT-1, 0, ?ITERATIONS_SHA), {ok, RealSha3, OutCheckpointSha3} = ar_vdf_nif:vdf_sha2_hiopt_nif(Salt1, PrevState, ?CHECKPOINT_COUNT, 0, ?ITERATIONS_SHA), ok. test_vdf_sha_verify_break1(Salt, PrevState, CheckpointCount, SkipCheckpointCount, Iterations, OutCheckpoint, Hash) -> test_vdf_sha_verify_break1(Salt, PrevState, CheckpointCount, SkipCheckpointCount, Iterations, OutCheckpoint, Hash, size(OutCheckpoint)-1). test_vdf_sha_verify_break1(_Salt, _PrevState, _CheckpointCount, _SkipCheckpointCount, _Iterations, _OutCheckpoint, _Hash, 0) -> ok; test_vdf_sha_verify_break1(Salt, PrevState, CheckpointCount, SkipCheckpointCount, Iterations, OutCheckpoint, Hash, BreakPos) -> test_vdf_sha_verify_break1(Salt, PrevState, CheckpointCount, SkipCheckpointCount, Iterations, OutCheckpoint, Hash, BreakPos-1). test_vdf_sha_verify_break2(Salt, PrevState, CheckpointCount, SkipCheckpointCount, Iterations, OutCheckpoint, Hash) -> test_vdf_sha_verify_break2(Salt, PrevState, CheckpointCount, SkipCheckpointCount, Iterations, OutCheckpoint, Hash, size(Hash)-1). test_vdf_sha_verify_break2(_Salt, _PrevState, _CheckpointCount, _SkipCheckpointCount, _Iterations, _OutCheckpoint, _Hash, 0) -> ok; test_vdf_sha_verify_break2(Salt, PrevState, CheckpointCount, SkipCheckpointCount, Iterations, OutCheckpoint, Hash, BreakPos) -> test_vdf_sha_verify_break2(Salt, PrevState, CheckpointCount, SkipCheckpointCount, Iterations, OutCheckpoint, Hash, BreakPos-1). %%%=================================================================== %%% %%% with skip iterations %%% %%%=================================================================== %%%=================================================================== %%% SHA. %%%=================================================================== vdf_sha_skip_iterations_test_() -> {timeout, 500, fun test_vdf_sha_skip_iterations/0}. test_vdf_sha_skip_iterations() -> PrevState = ar_util:decode(?ENCODED_PREV_STATE), OutCheckpointSha3 = ar_util:decode(?ENCODED_SHA_CHECKPOINT_SKIP), RealSha3 = ar_util:decode(?ENCODED_SHA_RES_SKIP), Salt1 = << (1):256 >>, SaltJump = << (1+?CHECKPOINT_SKIP_COUNT+1):256 >>, {ok, Real1, _OutCheckpointSha} = ar_vdf_nif:vdf_sha2_nif(Salt1, PrevState, 0, ?CHECKPOINT_SKIP_COUNT, ?ITERATIONS_SHA), ExpectedHash = soft_implementation_vdf_sha(Salt1, PrevState, ?ITERATIONS_SHA, ?CHECKPOINT_SKIP_COUNT), ?assertEqual(ExpectedHash, Real1), {ok, RealSha2, OutCheckpointSha2} = ar_vdf_nif:vdf_sha2_nif(SaltJump, Real1, ?CHECKPOINT_COUNT-1, ?CHECKPOINT_SKIP_COUNT, ?ITERATIONS_SHA), {ok, RealSha3, OutCheckpointSha3} = ar_vdf_nif:vdf_sha2_nif(Salt1, PrevState, ?CHECKPOINT_COUNT, ?CHECKPOINT_SKIP_COUNT, ?ITERATIONS_SHA), ExpectedSha3 = soft_implementation_vdf_sha(Salt1, PrevState, ?ITERATIONS_SHA, (1+?CHECKPOINT_COUNT)*(1+?CHECKPOINT_SKIP_COUNT)-1), ?assertEqual(ExpectedSha3, RealSha2), ?assertEqual(ExpectedSha3, RealSha3), ExpedctedOutCheckpoint3 = << Real1/binary, OutCheckpointSha2/binary >>, ?assertEqual(ExpedctedOutCheckpoint3, OutCheckpointSha3), ok = test_vdf_sha_verify_break1(Salt1, PrevState, ?CHECKPOINT_COUNT, ?CHECKPOINT_SKIP_COUNT, ?ITERATIONS_SHA, OutCheckpointSha3, RealSha3), ok = test_vdf_sha_verify_break2(Salt1, PrevState, ?CHECKPOINT_COUNT, ?CHECKPOINT_SKIP_COUNT, ?ITERATIONS_SHA, OutCheckpointSha3, RealSha3), % test vdf_fused {ok, Real1, _OutCheckpointSha} = ar_vdf_nif:vdf_sha2_fused_nif(Salt1, PrevState, 0, ?CHECKPOINT_SKIP_COUNT, ?ITERATIONS_SHA), {ok, RealSha2, OutCheckpointSha2} = ar_vdf_nif:vdf_sha2_fused_nif(SaltJump, Real1, ?CHECKPOINT_COUNT-1, ?CHECKPOINT_SKIP_COUNT, ?ITERATIONS_SHA), {ok, RealSha3, OutCheckpointSha3} = ar_vdf_nif:vdf_sha2_fused_nif(Salt1, PrevState, ?CHECKPOINT_COUNT, ?CHECKPOINT_SKIP_COUNT, ?ITERATIONS_SHA), % test vdf_hiopt {ok, Real1, _OutCheckpointSha} = ar_vdf_nif:vdf_sha2_hiopt_nif(Salt1, PrevState, 0, ?CHECKPOINT_SKIP_COUNT, ?ITERATIONS_SHA), {ok, RealSha2, OutCheckpointSha2} = ar_vdf_nif:vdf_sha2_hiopt_nif(SaltJump, Real1, ?CHECKPOINT_COUNT-1, ?CHECKPOINT_SKIP_COUNT, ?ITERATIONS_SHA), {ok, RealSha3, OutCheckpointSha3} = ar_vdf_nif:vdf_sha2_hiopt_nif(Salt1, PrevState, ?CHECKPOINT_COUNT, ?CHECKPOINT_SKIP_COUNT, ?ITERATIONS_SHA), ok. ================================================ FILE: apps/arweave/test/ar_mining_io_tests.erl ================================================ -module(ar_mining_io_tests). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("arweave/include/ar_consensus.hrl"). -include_lib("arweave/include/ar_mining.hrl"). -include_lib("eunit/include/eunit.hrl"). -define(WEAVE_SIZE, trunc(2.5 * ar_block:partition_size())). chunks_read(_Worker, WhichChunk, Candidate, RangeStart, ChunkOffsets) -> ets:insert(?MODULE, {WhichChunk, Candidate, RangeStart, ChunkOffsets}). setup_all() -> [B0] = ar_weave:init([], 1, ?WEAVE_SIZE), RewardAddr = ar_wallet:to_address(ar_wallet:new_keyfile()), {ok, Config} = arweave_config:get_env(), StorageModules = lists:flatten( [[{8 * 262144, N, {spora_2_6, RewardAddr}}] || N <- lists:seq(0, 8)]), ar_test_node:start(B0, RewardAddr, Config, StorageModules), {Setup, Cleanup} = ar_test_node:mock_functions([ {ar_mining_worker, chunks_read, fun chunks_read/5}, {ar_block, partition_size, fun() -> 8 * 262144 end} ]), Functions = Setup(), {Cleanup, Functions}. cleanup_all({Cleanup, Functions}) -> Cleanup(Functions). setup_one() -> ets:new(?MODULE, [named_table, duplicate_bag, public]). cleanup_one(_) -> ets:delete(?MODULE). read_recall_range_test_() -> {setup, fun setup_all/0, fun cleanup_all/1, {foreach, fun setup_one/0, fun cleanup_one/1, [ {timeout, 30, fun test_read_recall_range/0}, {timeout, 30, fun test_partitions/0} ]} }. test_read_recall_range() -> Candidate = default_candidate(), ?assertEqual(true, ar_mining_io:read_recall_range(chunk1, self(), Candidate, 0)), wait_for_io(1), [Chunk1, Chunk2] = get_recall_chunks(), assert_chunks_read([{chunk1, Candidate, 0, [ {?DATA_CHUNK_SIZE, Chunk1}, {?DATA_CHUNK_SIZE*2, Chunk2}]}]), ?assertEqual(true, ar_mining_io:read_recall_range(chunk1, self(), Candidate, ?DATA_CHUNK_SIZE div 2)), wait_for_io(1), [Chunk1, Chunk2, Chunk3] = get_recall_chunks(), assert_chunks_read([{chunk1, Candidate, ?DATA_CHUNK_SIZE div 2, [ {?DATA_CHUNK_SIZE, Chunk1}, {?DATA_CHUNK_SIZE*2, Chunk2}, {?DATA_CHUNK_SIZE*3, Chunk3}]}]), ?assertEqual(true, ar_mining_io:read_recall_range(chunk1, self(), Candidate, ?DATA_CHUNK_SIZE)), wait_for_io(1), [Chunk2, Chunk3] = get_recall_chunks(), assert_chunks_read([{chunk1, Candidate, ?DATA_CHUNK_SIZE, [ {?DATA_CHUNK_SIZE*2, Chunk2}, {?DATA_CHUNK_SIZE*3, Chunk3}]}]), ?assertEqual(true, ar_mining_io:read_recall_range(chunk2, self(), Candidate, ar_block:partition_size() - ?DATA_CHUNK_SIZE)), wait_for_io(1), [Chunk4, Chunk5] = get_recall_chunks(), assert_chunks_read([{chunk2, Candidate, ar_block:partition_size() - ?DATA_CHUNK_SIZE, [ {ar_block:partition_size(), Chunk4}, {ar_block:partition_size() + ?DATA_CHUNK_SIZE, Chunk5}]}]), ?assertEqual(true, ar_mining_io:read_recall_range(chunk2, self(), Candidate, ar_block:partition_size())), wait_for_io(1), [Chunk5, Chunk6] = get_recall_chunks(), assert_chunks_read([{chunk2, Candidate, ar_block:partition_size(), [ {ar_block:partition_size() + ?DATA_CHUNK_SIZE, Chunk5}, {ar_block:partition_size() + (2*?DATA_CHUNK_SIZE), Chunk6}]}]), ?assertEqual(true, ar_mining_io:read_recall_range(chunk1, self(), Candidate, ?WEAVE_SIZE - ?DATA_CHUNK_SIZE)), wait_for_io(1), [Chunk7] = get_recall_chunks(), assert_chunks_read([{chunk1, Candidate, ?WEAVE_SIZE - ?DATA_CHUNK_SIZE, [ {?WEAVE_SIZE, Chunk7}]}]), ?assertEqual(false, ar_mining_io:read_recall_range(chunk1, self(), Candidate, ?WEAVE_SIZE)). test_partitions() -> Candidate = default_candidate(), MiningAddress = Candidate#mining_candidate.mining_address, ar_mining_io:set_largest_seen_upper_bound(0), ?assertEqual([], ar_mining_io:get_partitions()), ar_mining_io:set_largest_seen_upper_bound(ar_block:partition_size()), ?assertEqual([], ar_mining_io:get_partitions(0)), ?assertEqual([ {0, MiningAddress, 0}], ar_mining_io:get_partitions()), ar_mining_io:set_largest_seen_upper_bound(trunc(2.5 * ar_block:partition_size())), ?assertEqual([ {0, MiningAddress, 0}], ar_mining_io:get_partitions(ar_block:partition_size())), ?assertEqual([ {0, MiningAddress, 0}, {1, MiningAddress, 0}], ar_mining_io:get_partitions()), ar_mining_io:set_largest_seen_upper_bound(trunc(5 * ar_block:partition_size())), ?assertEqual([ {0, MiningAddress, 0}, {1, MiningAddress, 0}], ar_mining_io:get_partitions(trunc(2.5 * ar_block:partition_size()))), ?assertEqual([ {0, MiningAddress, 0}, {1, MiningAddress, 0}, {2, MiningAddress, 0}, {3, MiningAddress, 0}, {4, MiningAddress, 0}], ar_mining_io:get_partitions()), ?assertEqual([ {0, MiningAddress, 0}, {1, MiningAddress, 0}, {2, MiningAddress, 0}, {3, MiningAddress, 0}, {4, MiningAddress, 0}], ar_mining_io:get_partitions(trunc(5 * ar_block:partition_size()))). get_minable_storge_modules_test() -> {ok, Config} = arweave_config:get_env(), Addr = Config#config.mining_addr, try Input = [ {100, 0, {spora_2_6, Addr}}, {200, 0, unpacked}, {300, 0, {replica_2_9, Addr}} ], Expected = [ {100, 0, {spora_2_6, Addr}}, {300, 0, {replica_2_9, Addr}} ], arweave_config:set_env(Config#config{storage_modules = Input}), ?assertEqual(Expected, ar_mining_io:get_minable_storage_modules()) after arweave_config:set_env(Config) end. get_packing_test() -> {ok, Config} = arweave_config:get_env(), Addr = Config#config.mining_addr, try Input = [ {100, 0, unpacked}, {200, 0, {spora_2_6, Addr}}, {300, 0, {replica_2_9, Addr}} ], Expected = {spora_2_6, Addr}, arweave_config:set_env(Config#config{storage_modules = Input}), ?assertEqual(Expected, ar_mining_io:get_packing()) after arweave_config:set_env(Config) end. default_candidate() -> {ok, Config} = arweave_config:get_env(), MiningAddr = Config#config.mining_addr, #mining_candidate{ mining_address = MiningAddr }. wait_for_io(NumChunks) -> Result = ar_util:do_until( fun() -> NumChunks == length(ets:tab2list(?MODULE)) end, 100, 60000), ?assertEqual(true, Result, "Timeout while waiting to read chunks"). get_recall_chunks() -> case ets:tab2list(?MODULE) of [] -> []; [{_WhichChunk, _Candidate, _RangeStart, ChunkOffsets}] -> lists:map( fun({_Offset, Chunk}) -> Chunk end, ChunkOffsets ) end. assert_chunks_read(ExpectedChunks) -> ?assertEqual(ExpectedChunks, ets:tab2list(?MODULE)), ets:delete_all_objects(?MODULE). ================================================ FILE: apps/arweave/test/ar_mining_server_tests.erl ================================================ -module(ar_mining_server_tests). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("arweave/include/ar_consensus.hrl"). -include_lib("arweave/include/ar_mining.hrl"). -include_lib("eunit/include/eunit.hrl"). -define(ALIGNED_PARTITION_SIZE, 2_097_152). -define(WEAVE_SIZE, (3 * ?ALIGNED_PARTITION_SIZE)). %% RECALL_RANGE_1 and SYNCED_RECALL_RANGE_2 must be different partitions so that different io %% threads are used. %% ?RECALL_RANGE_1 is set so 1 chunk is synced and one is missing. -define(RECALL_RANGE_1, (3*?PARTITION_SIZE-?DATA_CHUNK_SIZE)). -define(SYNCED_RECALL_RANGE_2, ?PARTITION_SIZE). -define(UNSYNCED_RECALL_RANGE_2, 0). %% ------------------------------------------------------------------------------------------------ %% Fixtures %% ------------------------------------------------------------------------------------------------ setup_all() -> [B0] = ar_weave:init([], ar_test_node:get_difficulty_for_invalid_hash(), ?WEAVE_SIZE), RewardAddr = ar_wallet:to_address(ar_wallet:new_keyfile()), {ok, Config} = arweave_config:get_env(), %% We'll use partition 0 for any unsynced ranges. StorageModules = [ {ar_block:partition_size(), 1, {spora_2_6, RewardAddr}}, {ar_block:partition_size(), 2, {spora_2_6, RewardAddr}} ], ar_test_node:start(B0, RewardAddr, Config, StorageModules), Config. cleanup_all(Config) -> ok = arweave_config:set_env(Config). %% @doc Setup the environment so we can control VDF step generation. setup_pool_client() -> [B0] = ar_weave:init([], ar_test_node:get_difficulty_for_invalid_hash(), ?WEAVE_SIZE), RewardAddr = ar_wallet:to_address(ar_wallet:new_keyfile()), {ok, Config} = arweave_config:get_env(), %% We'll use partition 0 for any unsynced ranges. StorageModules = [ {ar_block:partition_size(), 1, {spora_2_6, RewardAddr}}, {ar_block:partition_size(), 2, {spora_2_6, RewardAddr}} ], ar_test_node:start(B0, RewardAddr, Config#config{ nonce_limiter_server_trusted_peers = [ ar_util:format_peer(vdf_server()) ], is_pool_client=true, pool_server_address= <<"http://localhost:2002">>, pool_api_key = <<"pool_secret">> }, StorageModules), Config. cleanup_pool_client(Config) -> ok = arweave_config:set_env(Config). setup_one() -> ets:new(mock_counter, [set, public, named_table]), ets:new(add_task, [named_table, bag, public]). cleanup_one(_) -> ets:delete(add_task), ets:delete(mock_counter). %% ------------------------------------------------------------------------------------------------ %% Test Registration %% ------------------------------------------------------------------------------------------------ chunk_cache_size_test_() -> {setup, fun setup_all/0, fun cleanup_all/1, {foreach, fun setup_one/0, fun cleanup_one/1, [ {timeout, 30, fun test_h2_solution_chunk1_first/0}, {timeout, 30, fun test_h2_solution_chunk2_first/0}, {timeout, 30, fun test_h1_solution_h2_synced_chunk1_first/0}, {timeout, 30, fun test_h1_solution_h2_synced_chunk2_first/0}, {timeout, 30, fun test_h1_solution_h2_unsynced/0}, {timeout, 30, fun test_no_solution_then_h2_solution/0}, {timeout, 30, fun test_no_solution_then_h1_solution_h2_synced/0}, {timeout, 30, fun test_no_solution_then_h1_solution_h2_unsynced/0} ]} }. pool_job_test_() -> {setup, fun setup_pool_client/0, fun cleanup_pool_client/1, {foreach, fun setup_one/0, fun cleanup_one/1, [ ar_test_node:test_with_mocked_functions([mock_add_task(), mock_get_current_sesssion()], fun test_pool_job_no_cached_sessions/0, 120) ]} }. %% ------------------------------------------------------------------------------------------------ %% chunk_cache_size_test_ %% ------------------------------------------------------------------------------------------------ test_h2_solution_chunk1_first() -> do_test_chunk_cache_size_with_mocks( [ar_test_node:invalid_solution()], [ar_test_node:valid_solution()], [?SYNCED_RECALL_RANGE_2], [chunk1] ). test_h2_solution_chunk2_first() -> do_test_chunk_cache_size_with_mocks( [ar_test_node:invalid_solution()], [ar_test_node:valid_solution()], [?SYNCED_RECALL_RANGE_2], [chunk2] ). test_h1_solution_h2_synced_chunk1_first() -> do_test_chunk_cache_size_with_mocks( [ar_test_node:valid_solution()], [ar_test_node:invalid_solution()], [?SYNCED_RECALL_RANGE_2], [chunk1] ). test_h1_solution_h2_synced_chunk2_first() -> do_test_chunk_cache_size_with_mocks( [ar_test_node:valid_solution()], [ar_test_node:invalid_solution()], [?SYNCED_RECALL_RANGE_2], [chunk2] ). test_h1_solution_h2_unsynced() -> do_test_chunk_cache_size_with_mocks( [ar_test_node:valid_solution()], [], [?UNSYNCED_RECALL_RANGE_2], [chunk1] ). test_no_solution_then_h2_solution() -> do_test_chunk_cache_size_with_mocks( [ar_test_node:invalid_solution()], [ar_test_node:invalid_solution(), ar_test_node:invalid_solution(), ar_test_node:valid_solution()], [?SYNCED_RECALL_RANGE_2], [chunk1] ). test_no_solution_then_h1_solution_h2_synced() -> do_test_chunk_cache_size_with_mocks( [ar_test_node:invalid_solution(), ar_test_node:invalid_solution(), ar_test_node:valid_solution()], [ar_test_node:invalid_solution()], [?SYNCED_RECALL_RANGE_2], [chunk1] ). test_no_solution_then_h1_solution_h2_unsynced() -> do_test_chunk_cache_size_with_mocks( [ar_test_node:invalid_solution(), ar_test_node:invalid_solution(), ar_test_node:valid_solution()], [], [?UNSYNCED_RECALL_RANGE_2], [chunk1] ). %% ------------------------------------------------------------------------------------------------ %% pool_job_test_ %% ------------------------------------------------------------------------------------------------ %% we have to wait to let the ar_events get processed whenever we add a pool job -define(WAIT_TIME, 1000). test_pool_job_no_cached_sessions() -> SessionKey1 = {<<"session1">>, 1, 1}, SessionKey2 = {<<"session2">>, 2, 1}, SessionKey3 = {<<"session3">>, 3, 1}, Output = crypto:strong_rand_bytes(32), PartitionUpperBound = ?WEAVE_SIZE, Seed1 = crypto:strong_rand_bytes(32), Seed2 = crypto:strong_rand_bytes(32), Seed3 = crypto:strong_rand_bytes(32), PartialDiff = {1, 1}, ar_mining_server:add_pool_job( SessionKey1, 1, Output, PartitionUpperBound, Seed1, PartialDiff), ar_mining_server:add_pool_job( SessionKey1, 2, Output, PartitionUpperBound, Seed1, PartialDiff), timer:sleep(?WAIT_TIME), ?assertEqual(sets:from_list([SessionKey1]), ar_mining_server:active_sessions()), ?assertEqual([1, 1, 2, 2], lists:sort(mined_steps())), ar_mining_server:add_pool_job( SessionKey2, 5, Output, PartitionUpperBound, Seed2, PartialDiff), ar_mining_server:add_pool_job( SessionKey2, 6, Output, PartitionUpperBound, Seed2, PartialDiff), timer:sleep(?WAIT_TIME), ?assertEqual(sets:from_list([SessionKey1, SessionKey2]), ar_mining_server:active_sessions()), ?assertEqual([5, 5, 6, 6], lists:sort(mined_steps())), ar_mining_server:add_pool_job( SessionKey3, 10, Output, PartitionUpperBound, Seed3, PartialDiff), ar_mining_server:add_pool_job( SessionKey3, 12, Output, PartitionUpperBound, Seed3, PartialDiff), timer:sleep(?WAIT_TIME), ?assertEqual(sets:from_list([SessionKey2, SessionKey3]), ar_mining_server:active_sessions()), ?assertEqual([10, 10, 12, 12],lists:sort(mined_steps())), ar_mining_server:add_pool_job( SessionKey1, 4, Output, PartitionUpperBound, Seed1, PartialDiff), timer:sleep(?WAIT_TIME), ?assertEqual(sets:from_list([SessionKey2, SessionKey3]), ar_mining_server:active_sessions()), ?assertEqual([], mined_steps()). %% ------------------------------------------------------------------------------------------------ %% Helpers %% ------------------------------------------------------------------------------------------------ do_test_chunk_cache_size_with_mocks(H1s, H2s, RecallRange2s, FirstChunks) -> Height = ar_node:get_height() + 1, ets:insert(mock_counter, {compute_h1, 0}), ets:insert(mock_counter, {compute_h2, 0}), ets:insert(mock_counter, {get_recall_range, 0}), ets:insert(mock_counter, {get_range, 0}), {Setup, Cleanup} = ar_test_node:mock_functions([ { ar_retarget, is_retarget_height, fun (_Height) -> false end }, { ar_block, compute_h1, fun (_H0, _Nonce, _Chunk) -> Count = increment_mock_counter(compute_h1), Solution = get_mock_value(Count, H1s), {Solution, Solution} end }, { ar_block, compute_h2, fun (_H0, _Nonce, _Chunk) -> Count = increment_mock_counter(compute_h2), Solution = get_mock_value(Count, H2s), {Solution, Solution} end }, { ar_block, get_recall_range, fun (_H0, _PartitionNumber, _PartitionUpperBound) -> Count = increment_mock_counter(get_recall_range), RecallRange2 = get_mock_value(Count, RecallRange2s), {?RECALL_RANGE_1, RecallRange2} end }, { ar_chunk_storage, get_range, fun (RangeStart, Size, StoreID) -> Count = increment_mock_counter(get_range), FirstChunk = get_mock_value(Count, FirstChunks), case FirstChunk == chunk1 andalso RangeStart /= ?RECALL_RANGE_1 of true -> timer:sleep(100); _ -> ok end, case FirstChunk == chunk2 andalso RangeStart == ?RECALL_RANGE_1 of true -> timer:sleep(100); _ -> ok end, meck:passthrough([RangeStart, Size, StoreID]) end } ]), Functions = Setup(), try ar_test_node:mine(), ar_test_node:wait_until_height(main, Height), %% wait until the mining has stopped ?assert(ar_util:do_until(fun() -> get_chunk_cache_size() == 0 end, 200, 10000)) after Cleanup(Functions) end. get_chunk_cache_size() -> Pattern = {{chunk_cache_size, '$1'}, '_'}, % '$1' matches any PartitionNumber Entries = ets:match(mock_counter, Pattern), lists:foldl( fun(PartitionNumber, Acc) -> case ets:lookup(ar_mining_server, {chunk_cache_size, PartitionNumber}) of [] -> Acc; [{_, Size}] -> Acc + Size end end, 0, Entries ). get_mock_value(Index, Values) when Index < length(Values) -> lists:nth(Index, Values); get_mock_value(_, Values) -> lists:last(Values). increment_mock_counter(Mock) -> ets:update_counter(mock_counter, Mock, {2, 1}), [{_, Count}] = ets:lookup(mock_counter, Mock), Count. vdf_server() -> {127,0,0,1,2001}. mined_steps() -> Steps = lists:reverse(ets:foldl( fun({_Worker, _Task, Step}, Acc) -> [Step | Acc] end, [], add_task)), ets:delete_all_objects(add_task), Steps. mock_add_task() -> { ar_mining_worker, add_task, fun(Worker, TaskType, Candidate) -> ets:insert(add_task, {Worker, TaskType, Candidate#mining_candidate.step_number}) end }. mock_get_current_sesssion() -> { ar_nonce_limiter, get_current_session, fun() -> {undefined, not_found} end }. ================================================ FILE: apps/arweave/test/ar_mining_worker_tests.erl ================================================ -module(ar_mining_worker_tests). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("arweave/include/ar_mining.hrl"). -include_lib("arweave/include/ar_consensus.hrl"). -include_lib("eunit/include/eunit.hrl"). -define(PARTITION, 0). -define(TESTER_REGISTER_NAME, ?MODULE). -record(state, { worker_pid :: pid(), session_key :: binary(), candidate :: #mining_candidate{} }). -define(compute_h0(WorkerPid, Candidate), {compute_h0, WorkerPid, Candidate}). -define(compute_h1(WorkerPid, Candidate), {compute_h1, WorkerPid, Candidate}). -define(compute_h2(WorkerPid, Candidate), {compute_h2, WorkerPid, Candidate}). -define(get_recall_range(H0, Partition, PartitionUpperBound), {get_recall_range, H0, Partition, PartitionUpperBound}). -define(get_recall_range_response(RecallRange1, RecallRange2), {get_recall_range_response, RecallRange1, RecallRange2}). -define(is_recall_range_readable(H0, RecallRange), {is_recall_range_readable, H0, RecallRange}). -define(is_recall_range_readable_response(IsReadable), {is_recall_range_readable_response, IsReadable}). -define(read_recall_range(Kind, WorkerPid, Candidate2, RecallRangeStart), {read_recall_range, Kind, WorkerPid, Candidate2, RecallRangeStart}). -define(read_recall_range_response(), {read_recall_range_response}). -define(passes_diff_check(SolutionHash, IsPoA1, DiffPair, PackingDifficulty), {passes_diff_check, SolutionHash, IsPoA1, DiffPair, PackingDifficulty}). -define(passes_diff_check_response(IsPassed), {passes_diff_check_response, IsPassed}). -define(prepare_and_post_solution(Candidate), {prepare_and_post_solution, Candidate}). -define(with_setup_each(F), fun() -> State = setup_each(), try F(State) catch C:E:S -> ?debugFmt("Error: ~p:~p:~p", [C, E, S]), cleanup_each(State), throw({C, E}) after cleanup_each(State) end end). %% ------------------------------------------------------------------------------------------------ %% Fixtures %% ------------------------------------------------------------------------------------------------ %% This function is called for all tests. %% It mocks the necessary modules and functions. %% Mocked functions are sending messages to the ?TESTER_REGISTER_NAME process (the test). setup_all() -> Mocks = [ {ar_mining_cache, session_exists, fun(_, _) -> true end}, {ar_mining_hash, compute_h0, fun ar_mining_hash__compute_h0/2}, {ar_mining_hash, compute_h1, fun ar_mining_hash__compute_h1/2}, {ar_mining_hash, compute_h2, fun ar_mining_hash__compute_h2/2}, {ar_block, get_recall_range, fun ar_block__get_recall_range/3}, {ar_mining_io, is_recall_range_readable, fun ar_mining_io__is_recall_range_readable/2}, {ar_mining_io, read_recall_range, fun ar_mining_io__read_recall_range/4}, {ar_node_utils, passes_diff_check, fun ar_node_utils__passes_diff_check/4}, {ar_mining_server, prepare_and_post_solution, fun ar_mining_server__prepare_and_post_solution/1} ], MockedModules = lists:usort([Module || {Module, _, _} <- Mocks]), meck:new(MockedModules, [passthrough]), [meck:expect(M, F, Fun) || {M, F, Fun} <- Mocks], MockedModules. %% This function is called for each test. %% It creates a new worker and registers itself as a tester. %% It also sets up the worker with the necessary cache limits and sessions. setup_each() -> StepNumber = 1, MiningSession = <<"mining_session">>, Candidate = #mining_candidate{ packing_difficulty = ?REPLICA_2_9_PACKING_DIFFICULTY, session_key = MiningSession, step_number = StepNumber }, {ok, Pid} = ar_mining_worker:start_link(?PARTITION, ?REPLICA_2_9_PACKING_DIFFICULTY), ar_mining_worker:set_cache_limits(Pid, 10 * ?MiB, 1_000), ar_mining_worker:set_sessions(Pid, [MiningSession]), register(?TESTER_REGISTER_NAME, self()), #state{ worker_pid = Pid, session_key = MiningSession, candidate = Candidate }. %% This function is called after each test. %% It unregisters the tester and exits the worker. cleanup_each(State) -> cleanup_each_dump_messages(), unregister(?TESTER_REGISTER_NAME), erlang:unlink(State#state.worker_pid), erlang:exit(State#state.worker_pid, kill), ok. cleanup_each_dump_messages() -> receive Msg -> ?debugFmt("Unexpected message in queue: ~p", [Msg]), cleanup_each_dump_messages() after 0 -> ok end. %% This function is called after all tests. %% It unloads the mocked modules. cleanup_all(MockedModules) -> meck:unload(MockedModules). ar_mining_hash__compute_h0(WorkerPid, Candidate) -> ?TESTER_REGISTER_NAME ! ?compute_h0(WorkerPid, Candidate). ar_mining_hash__compute_h1(WorkerPid, Candidate) -> ?TESTER_REGISTER_NAME ! ?compute_h1(WorkerPid, Candidate). ar_mining_hash__compute_h2(WorkerPid, Candidate) -> ?TESTER_REGISTER_NAME ! ?compute_h2(WorkerPid, Candidate). ar_block__get_recall_range(H0, Partition, PartitionUpperBound) -> ?TESTER_REGISTER_NAME ! ?get_recall_range(H0, Partition, PartitionUpperBound), receive ?get_recall_range_response(RecallRange1, RecallRange2) -> {RecallRange1, RecallRange2} after 1000 -> exit(no_get_recall_range_response_received) end. ar_mining_io__is_recall_range_readable(Candidate, RecallRange) -> ?TESTER_REGISTER_NAME ! ?is_recall_range_readable(Candidate, RecallRange), receive ?is_recall_range_readable_response(IsReadable) -> IsReadable after 1000 -> exit(no_is_recall_range_readable_response_received) end. ar_mining_io__read_recall_range(Kind, WorkerPid, Candidate, RecallRangeStart) -> ?TESTER_REGISTER_NAME ! ?read_recall_range(Kind, WorkerPid, Candidate, RecallRangeStart), receive ?read_recall_range_response() -> ok after 1000 -> exit(no_read_recall_range_response_received) end. ar_node_utils__passes_diff_check(SolutionHash, IsPoA1, DiffPair, PackingDifficulty) -> ?TESTER_REGISTER_NAME ! ?passes_diff_check(SolutionHash, IsPoA1, DiffPair, PackingDifficulty), receive ?passes_diff_check_response(IsPassed) -> IsPassed after 1000 -> exit(no_passes_diff_check_response_received) end. %% ar_mining_server:prepare_and_post_solution(Candidate) ar_mining_server__prepare_and_post_solution(Candidate) -> ?TESTER_REGISTER_NAME ! ?prepare_and_post_solution(Candidate). %% ------------------------------------------------------------------------------------------------ %% Helpers %% ------------------------------------------------------------------------------------------------ handle_compute_h0(StepNumber, H0, State) -> ?debugFmt("Handling compute_h0 for step ~p (H0 ~p)", [StepNumber, H0]), receive ?compute_h0(Pid, #mining_candidate{step_number = StepNumber} = Candidate) when Pid == State#state.worker_pid -> ar_mining_worker:computed_hash(State#state.worker_pid, computed_h0, H0, undefined, Candidate) after 1000 -> exit(no_compute_h0_message_received) end. handle_compute_h1s(H1s, State) -> handle_compute_h1s(H1s, [], State). %% The order of calculations might be different, but it does not really matter: %% the "subchunks" we used to represent the recall range are all zeroes and %% therefore are all equal. %% In the test we only rely on the fake hash value to mark the hash valid or not, %% we do not rely on the order in which these hashes will arrive later for %% difficulty checks. %% The same stands for H2s below. handle_compute_h1s([], Acc, State) -> handle_send_computed_h1s(lists:reverse(Acc), State); handle_compute_h1s([H1 | H1s], Acc, State) -> ?debugFmt("Handling compute_h1 for H1 ~p", [H1]), Acc1 = receive ?compute_h1(Pid, Candidate) when Pid == State#state.worker_pid -> [{H1, Candidate} | Acc] after 1000 -> exit(no_compute_h1_message_received) end, handle_compute_h1s(H1s, Acc1, State). handle_send_computed_h1s([], _State) -> ok; handle_send_computed_h1s([{H1, Candidate} | Acc], State) -> ar_mining_worker:computed_hash(State#state.worker_pid, computed_h1, H1, <<"Preimage1">>, Candidate), handle_send_computed_h1s(Acc, State). handle_compute_h2s(H2s, State) -> handle_compute_h2s(H2s, [], State). handle_compute_h2s([], Acc, State) -> handle_send_computed_h2s(lists:reverse(Acc), State); handle_compute_h2s([H2 | H2s], Acc, State) -> ?debugFmt("Handling compute_h2 for H2 ~p", [H2]), Acc1 = receive ?compute_h2(Pid, Candidate) when Pid == State#state.worker_pid -> [{H2, Candidate} | Acc] after 1000 -> exit(no_compute_h2_message_received) end, handle_compute_h2s(H2s, Acc1, State). handle_send_computed_h2s([], _State) -> ok; handle_send_computed_h2s([{H2, Candidate} | Acc], State) -> ar_mining_worker:computed_hash(State#state.worker_pid, computed_h2, H2, <<"Preimage2">>, Candidate), handle_send_computed_h2s(Acc, State). handle_get_recall_range(H0, RecallRange1, RecallRange2, State) -> ?debugFmt("Handling get_recall_range for H0 ~p (~p ~p)", [H0, RecallRange1, RecallRange2]), receive ?get_recall_range(H0, _Partition1, _PartitionUpperBound) -> State#state.worker_pid ! ?get_recall_range_response(RecallRange1, RecallRange2) after 1000 -> exit(no_recall_range_message_received) end. handle_is_recall_range_readable(RecallRangeStart, IsReadable, State) -> ?debugFmt("Handling is_recall_range_readable for RecallRangeStart ~p (IsReadable ~p)", [RecallRangeStart, IsReadable]), receive ?is_recall_range_readable(_Candidate, RecallRangeStart) -> State#state.worker_pid ! ?is_recall_range_readable_response(IsReadable) after 1000 -> exit(no_is_recall_range_readable_message_received) end. handle_read_recall_range(Kind, RecallRangeStart, State) -> ?debugFmt("Handling read_recall_range for Kind ~p (RecallRangeStart ~p)", [Kind, RecallRangeStart]), receive ?read_recall_range(Kind, _WorkerPid, Candidate, RecallRangeStart) -> State#state.worker_pid ! ?read_recall_range_response(), Candidate after 1000 -> exit(no_read_recall_range_message_received) end. handle_passes_diff_checks(HashesMap, _State) when map_size(HashesMap) == 0 -> ok; handle_passes_diff_checks(HashesMap, State) -> HashesMap1 = receive ?passes_diff_check(Hash, _IsPoA1, _DiffPair, _PackingDifficulty) -> {IsPassed, HashesMap_} = maps:take(Hash, HashesMap), ?debugFmt("Checking difficulty for ~p (IsPassed ~p)", [Hash, IsPassed]), State#state.worker_pid ! ?passes_diff_check_response(IsPassed), HashesMap_ after 1000 -> exit(no_passes_diff_check_message_received) end, handle_passes_diff_checks(HashesMap1, State). handle_prepare_and_post_solution_h1(H1) -> ?debugFmt("Handling prepare_and_post_solution for H1 ~p", [H1]), receive ?prepare_and_post_solution(#mining_candidate{h1 = H1}) -> ok after 1000 -> exit(no_prepare_and_post_solution_message_received) end. handle_prepare_and_post_solution_h2(H2) -> ?debugFmt("Handling prepare_and_post_solution for H2 ~p", [H2]), receive ?prepare_and_post_solution(#mining_candidate{h2 = H2}) -> ok after 1000 -> exit(no_prepare_and_post_solution_message_received) end. assert_no_messages() -> ?debugMsg("Asserting no significant messages left"), receive Msg -> ?debugFmt("Unexpected message received: ~p", [Msg]), error({unexpected_message_received, Msg}) after 1000 -> ok end. generate_recall_range(RecallRangeStart, Difficulty) -> RecallRangeSize = ar_block:get_recall_range_size(Difficulty), [{RecallRangeStart + RecallRangeSize, <<0:RecallRangeSize/unit:8>>}]. generate_hashes_for_recall_range(Prefix, Difficulty) -> [<> || N <- lists:seq(1, ar_block:get_nonces_per_recall_range(Difficulty))]. %% ------------------------------------------------------------------------------------------------ %% Tests %% ------------------------------------------------------------------------------------------------ mining_worker_test_() -> {setup, fun setup_all/0, fun cleanup_all/1, [ ?with_setup_each(fun test_no_available_ranges/1), ?with_setup_each(fun test_only_second_range_available/1), ?with_setup_each(fun test_only_first_range_available_no_solutions/1), ?with_setup_each(fun test_both_ranges_available_no_solutions/1), ?with_setup_each(fun test_both_ranges_available_in_reverse_order_no_solutions/1), ?with_setup_each(fun test_both_ranges_available_h1_solution/1), ?with_setup_each(fun test_both_ranges_available_h2_solution/1) ] }. %% This test checks the worker behavior when it does not have any available %% recall ranges for a VDF step. test_no_available_ranges(State) -> H0 = <<"H0">>, RecallRange1Start = 100, RecallRange2Start = 200, Candidate1 = State#state.candidate, %% 1. Add compute_h0 task ?debugMsg("Adding compute_h0 task"), ar_mining_worker:add_task(State#state.worker_pid, compute_h0, Candidate1), %% 2. Worker asks to compute H0 handle_compute_h0(Candidate1#mining_candidate.step_number, H0, State), %% 3. Worker asks for recall ranges for the given H0 handle_get_recall_range(H0, RecallRange1Start, RecallRange2Start, State), %% 4. Worker checks if recall ranges are readable handle_is_recall_range_readable(RecallRange1Start, false, State), handle_is_recall_range_readable(RecallRange2Start, false, State), %% 5. No more messages expected, recall ranges are not readable assert_no_messages(). %% This test checks the worker behavior when it has only second recall range %% available for a VDF step. test_only_second_range_available(State) -> H0 = <<"H0">>, RecallRange1Start = 100, RecallRange2Start = 200, Candidate1 = State#state.candidate, %% 1. Add compute_h0 task ?debugMsg("Adding compute_h0 task"), ar_mining_worker:add_task(State#state.worker_pid, compute_h0, Candidate1), %% 2. Worker asks to compute H0 handle_compute_h0(Candidate1#mining_candidate.step_number, H0, State), %% 3. Worker asks for recall ranges for the given H0 handle_get_recall_range(H0, RecallRange1Start, RecallRange2Start, State), %% 4. Worker checks if recall ranges are readable handle_is_recall_range_readable(RecallRange1Start, false, State), handle_is_recall_range_readable(RecallRange2Start, true, State), %% 5. No more messages expected, only second recall range is readable assert_no_messages(). %% This test checks the worker behavior when it has only first recall range %% available for a VDF step, but no valid solutions produced. test_only_first_range_available_no_solutions(State) -> H0 = <<"H0">>, H1Prefix = <<"H1-">>, RecallRange1Start = 100, RecallRange2Start = 200, Candidate1 = State#state.candidate, %% 1. Add compute_h0 task ?debugMsg("Adding compute_h0 task"), ar_mining_worker:add_task(State#state.worker_pid, compute_h0, Candidate1), %% 2. Worker asks to compute H0 handle_compute_h0(Candidate1#mining_candidate.step_number, H0, State), %% 3. Worker asks for recall ranges for the given H0 handle_get_recall_range(H0, RecallRange1Start, RecallRange2Start, State), %% 4. Worker checks if recall ranges are readable handle_is_recall_range_readable(RecallRange1Start, true, State), handle_is_recall_range_readable(RecallRange2Start, false, State), %% 5. Worker asks to read only first recall range Candidate2 = handle_read_recall_range(chunk1, RecallRange1Start, State), %% 6. Providing the first recall range RecallRange1 = generate_recall_range(RecallRange1Start, Candidate2#mining_candidate.packing_difficulty), ar_mining_worker:chunks_read(State#state.worker_pid, chunk1, Candidate2, RecallRange1Start, RecallRange1), %% 7. Worker asks to compute H1s for the first recall range. %% The number of H1s is equal to the number of nonces in the recall range. H1s = generate_hashes_for_recall_range(H1Prefix, Candidate2#mining_candidate.packing_difficulty), handle_compute_h1s(H1s, State), %% 8. Worker checks if H1s pass the diff check, rejecting all of them Hashes1Map = maps:from_list([{H1, false} || H1 <- H1s]), handle_passes_diff_checks(Hashes1Map, State), %% 9. No more messages expected assert_no_messages(). %% This test checks the worker behavior when it has both recall ranges %% available for a VDF step, but no valid solutions produced. test_both_ranges_available_no_solutions(State) -> H0 = <<"H0">>, H1Prefix = <<"H1-">>, H2Prefix = <<"H2-">>, RecallRange1Start = 100, RecallRange2Start = 200, Candidate1 = State#state.candidate, %% 1. Add compute_h0 task ?debugMsg("Adding compute_h0 task"), ar_mining_worker:add_task(State#state.worker_pid, compute_h0, Candidate1), %% 2. Worker asks to compute H0 handle_compute_h0(Candidate1#mining_candidate.step_number, H0, State), %% 3. Worker asks for recall ranges for the given H0 handle_get_recall_range(H0, RecallRange1Start, RecallRange2Start, State), %% 4. Worker checks if recall ranges are readable handle_is_recall_range_readable(RecallRange1Start, true, State), handle_is_recall_range_readable(RecallRange2Start, true, State), %% 5. Worker asks to read both recall ranges Candidate2 = handle_read_recall_range(chunk1, RecallRange1Start, State), Candidate3 = handle_read_recall_range(chunk2, RecallRange2Start, State), %% 6. Providing the recall ranges RecallRange1 = generate_recall_range(RecallRange1Start, Candidate3#mining_candidate.packing_difficulty), RecallRange2 = generate_recall_range(RecallRange2Start, Candidate3#mining_candidate.packing_difficulty), ar_mining_worker:chunks_read(State#state.worker_pid, chunk1, Candidate2, RecallRange1Start, RecallRange1), ar_mining_worker:chunks_read(State#state.worker_pid, chunk2, Candidate3, RecallRange2Start, RecallRange2), %% 7. Worker asks to compute H1s for the recall ranges. %% The number of H1s is equal to the number of nonces in the recall range. H1s = generate_hashes_for_recall_range(H1Prefix, Candidate3#mining_candidate.packing_difficulty), handle_compute_h1s(H1s, State), %% 8. Worker checks if H1s pass the diff check, rejecting all of them Hashes1Map = maps:from_list([{H1, false} || H1 <- H1s]), handle_passes_diff_checks(Hashes1Map, State), %% 9. Worker asks to compute H2s for the recall ranges. %% The number of H2s is equal to the number of nonces in the recall range. H2s = generate_hashes_for_recall_range(H2Prefix, Candidate3#mining_candidate.packing_difficulty), handle_compute_h2s(H2s, State), %% 10. Worker checks if H2s pass the diff check, rejecting all of them Hashes2Map = maps:from_list([{H2, false} || H2 <- H2s]), handle_passes_diff_checks(Hashes2Map, State), %% 11. No more messages expected assert_no_messages(). %% This test checks the worker behavior when it has both recall ranges %% available for a VDF step, but no valid solutions produced. %% In this test the second recall range is available first. test_both_ranges_available_in_reverse_order_no_solutions(State) -> H0 = <<"H0">>, H1Prefix = <<"H1-">>, H2Prefix = <<"H2-">>, RecallRange1Start = 100, RecallRange2Start = 200, Candidate1 = State#state.candidate, %% 1. Add compute_h0 task ?debugMsg("Adding compute_h0 task"), ar_mining_worker:add_task(State#state.worker_pid, compute_h0, Candidate1), %% 2. Worker asks to compute H0 handle_compute_h0(Candidate1#mining_candidate.step_number, H0, State), %% 3. Worker asks for recall ranges for the given H0 handle_get_recall_range(H0, RecallRange1Start, RecallRange2Start, State), %% 4. Worker checks if recall ranges are readable handle_is_recall_range_readable(RecallRange1Start, true, State), handle_is_recall_range_readable(RecallRange2Start, true, State), %% 5. Worker asks to read both recall ranges Candidate2 = handle_read_recall_range(chunk1, RecallRange1Start, State), Candidate3 = handle_read_recall_range(chunk2, RecallRange2Start, State), %% 6. Providing the recall ranges RecallRange1 = generate_recall_range(RecallRange1Start, Candidate3#mining_candidate.packing_difficulty), RecallRange2 = generate_recall_range(RecallRange2Start, Candidate3#mining_candidate.packing_difficulty), ar_mining_worker:chunks_read(State#state.worker_pid, chunk2, Candidate3, RecallRange2Start, RecallRange2), ar_mining_worker:chunks_read(State#state.worker_pid, chunk1, Candidate2, RecallRange1Start, RecallRange1), %% 7. Worker asks to compute H1s for the recall ranges. %% The number of H1s is equal to the number of nonces in the recall range. H1s = generate_hashes_for_recall_range(H1Prefix, Candidate3#mining_candidate.packing_difficulty), handle_compute_h1s(H1s, State), %% 8. Worker checks if H1s pass the diff check, rejecting all of them Hashes1Map = maps:from_list([{H1, false} || H1 <- H1s]), handle_passes_diff_checks(Hashes1Map, State), %% 9. Worker asks to compute H2s for the recall ranges. %% The number of H2s is equal to the number of nonces in the recall range. H2s = generate_hashes_for_recall_range(H2Prefix, Candidate3#mining_candidate.packing_difficulty), handle_compute_h2s(H2s, State), %% 10. Worker checks if H2s pass the diff check, rejecting all of them Hashes2Map = maps:from_list([{H2, false} || H2 <- H2s]), handle_passes_diff_checks(Hashes2Map, State), %% 11. No more messages expected assert_no_messages(). %% This test checks the worker behavior when it has both recall ranges %% available for a VDF step, only one valid H1 solution is produced. test_both_ranges_available_h1_solution(State) -> H0 = <<"H0">>, H1Prefix = <<"H1-">>, H2Prefix = <<"H2-">>, RecallRange1Start = 100, RecallRange2Start = 200, Candidate1 = State#state.candidate, %% 1. Add compute_h0 task ?debugMsg("Adding compute_h0 task"), ar_mining_worker:add_task(State#state.worker_pid, compute_h0, Candidate1), %% 2. Worker asks to compute H0 handle_compute_h0(Candidate1#mining_candidate.step_number, H0, State), %% 3. Worker asks for recall ranges for the given H0 handle_get_recall_range(H0, RecallRange1Start, RecallRange2Start, State), %% 4. Worker checks if recall ranges are readable handle_is_recall_range_readable(RecallRange1Start, true, State), handle_is_recall_range_readable(RecallRange2Start, true, State), %% 5. Worker asks to read both recall ranges Candidate2 = handle_read_recall_range(chunk1, RecallRange1Start, State), Candidate3 = handle_read_recall_range(chunk2, RecallRange2Start, State), %% 6. Providing the recall ranges RecallRange1 = generate_recall_range(RecallRange1Start, Candidate3#mining_candidate.packing_difficulty), RecallRange2 = generate_recall_range(RecallRange2Start, Candidate3#mining_candidate.packing_difficulty), ar_mining_worker:chunks_read(State#state.worker_pid, chunk1, Candidate2, RecallRange1Start, RecallRange1), ar_mining_worker:chunks_read(State#state.worker_pid, chunk2, Candidate3, RecallRange2Start, RecallRange2), %% 7. Worker asks to compute H1s for the recall ranges. %% The number of H1s is equal to the number of nonces in the recall range. [ValidH1 | RestH1s] = H1s = generate_hashes_for_recall_range(H1Prefix, Candidate3#mining_candidate.packing_difficulty), handle_compute_h1s(H1s, State), %% 8. Worker checks if H1s pass the diff check, rejecting all of them but the very first one Hashes1Map = maps:from_list([{ValidH1, true} | [{H1, false} || H1 <- RestH1s]]), handle_passes_diff_checks(Hashes1Map, State), %% 8.1. Worker posts the valid H1 solution handle_prepare_and_post_solution_h1(ValidH1), %% 9. Worker asks to compute H2s for the recall ranges. %% The number of H2s is equal to the number of nonces in the recall range. %% The very first H2 will never be called to be computed, because the %% corresponding H1 passes the difficulty check; therefore, we expect one less %% H2 to be computed. [_ | RestH2s] = _H2s = generate_hashes_for_recall_range(H2Prefix, Candidate3#mining_candidate.packing_difficulty), handle_compute_h2s(RestH2s, State), %% 10. Worker checks if H2s pass the diff check, rejecting all of them Hashes2Map = maps:from_list([{H2, false} || H2 <- RestH2s]), handle_passes_diff_checks(Hashes2Map, State), %% 11. No more messages expected assert_no_messages(). %% This test checks the worker behavior when it has both recall ranges %% available for a VDF step, only one valid H2 solution is produced. test_both_ranges_available_h2_solution(State) -> H0 = <<"H0">>, H1Prefix = <<"H1-">>, H2Prefix = <<"H2-">>, RecallRange1Start = 100, RecallRange2Start = 200, Candidate1 = State#state.candidate, %% 1. Add compute_h0 task ?debugMsg("Adding compute_h0 task"), ar_mining_worker:add_task(State#state.worker_pid, compute_h0, Candidate1), %% 2. Worker asks to compute H0 handle_compute_h0(Candidate1#mining_candidate.step_number, H0, State), %% 3. Worker asks for recall ranges for the given H0 handle_get_recall_range(H0, RecallRange1Start, RecallRange2Start, State), %% 4. Worker checks if recall ranges are readable handle_is_recall_range_readable(RecallRange1Start, true, State), handle_is_recall_range_readable(RecallRange2Start, true, State), %% 5. Worker asks to read both recall ranges Candidate2 = handle_read_recall_range(chunk1, RecallRange1Start, State), Candidate3 = handle_read_recall_range(chunk2, RecallRange2Start, State), %% 6. Providing the recall ranges RecallRange1 = generate_recall_range(RecallRange1Start, Candidate3#mining_candidate.packing_difficulty), RecallRange2 = generate_recall_range(RecallRange2Start, Candidate3#mining_candidate.packing_difficulty), ar_mining_worker:chunks_read(State#state.worker_pid, chunk1, Candidate2, RecallRange1Start, RecallRange1), ar_mining_worker:chunks_read(State#state.worker_pid, chunk2, Candidate3, RecallRange2Start, RecallRange2), %% 7. Worker asks to compute H1s for the recall ranges. %% The number of H1s is equal to the number of nonces in the recall range. H1s = generate_hashes_for_recall_range(H1Prefix, Candidate3#mining_candidate.packing_difficulty), handle_compute_h1s(H1s, State), %% 8. Worker checks if H1s pass the diff check, rejecting all of them but the very first one Passes1Map = maps:from_list([{H1, false} || H1 <- H1s]), handle_passes_diff_checks(Passes1Map, State), %% 9. Worker asks to compute H2s for the recall ranges. %% The number of H2s is equal to the number of nonces in the recall range. [ValidH2 | RestH2s] = H2s = generate_hashes_for_recall_range(H2Prefix, Candidate3#mining_candidate.packing_difficulty), handle_compute_h2s(H2s, State), %% 9.1. Worker checks if H2s pass the diff check, rejecting all of them but the very first one Hashes2Map = maps:from_list([{ValidH2, true} | [{H2, false} || H2 <- RestH2s]]), handle_passes_diff_checks(Hashes2Map, State), %% 10. Worker posts the valid H2 solution handle_prepare_and_post_solution_h2(ValidH2), %% 12. No more messages expected assert_no_messages(). ================================================ FILE: apps/arweave/test/ar_node_tests.erl ================================================ -module(ar_node_tests). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave/include/ar_pricing.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("eunit/include/eunit.hrl"). -import(ar_test_node, [sign_v1_tx/3, read_block_when_stored/1]). ar_node_interface_test_() -> {timeout, 300, fun test_ar_node_interface/0}. test_ar_node_interface() -> [B0] = ar_weave:init(), ar_test_node:start(B0), ?assertEqual(0, ar_node:get_height()), ?assertEqual(B0#block.indep_hash, ar_node:get_current_block_hash()), ar_test_node:mine(), B0H = B0#block.indep_hash, [{H, _, _}, {B0H, _, _}] = ar_test_node:wait_until_height(main, 1), ?assertEqual(1, ar_node:get_height()), ?assertEqual(H, ar_node:get_current_block_hash()). mining_reward_test_() -> {timeout, 120, fun test_mining_reward/0}. test_mining_reward() -> {_Priv1, Pub1} = ar_wallet:new_keyfile(), [B0] = ar_weave:init(), ar_test_node:start(B0, MiningAddr = ar_wallet:to_address(Pub1)), ar_test_node:mine(), ar_test_node:wait_until_height(main, 1), B1 = ar_node:get_current_block(), [{MiningAddr, _, Reward, 1}, _] = B1#block.reward_history, {_, TotalLocked} = lists:foldl( fun(Height, {PrevB, TotalLocked}) -> ?assertEqual(0, ar_node:get_balance(Pub1)), ?assertEqual(TotalLocked, ar_rewards:get_total_reward_for_address(MiningAddr, PrevB)), ar_test_node:mine(), ar_test_node:wait_until_height(main, Height + 1), B = ar_node:get_current_block(), {B, TotalLocked + B#block.reward} end, {B1, Reward}, lists:seq(1, ?LOCKED_REWARDS_BLOCKS) ), ?assertEqual(Reward, ar_node:get_balance(Pub1)), %% Unlock one more reward. ar_test_node:mine(), ar_test_node:wait_until_height(main, ?LOCKED_REWARDS_BLOCKS + 2), FinalB = ar_node:get_current_block(), ?assertEqual(Reward + 10, ar_node:get_balance(Pub1)), ?assertEqual( TotalLocked - Reward - 10 + FinalB#block.reward, ar_rewards:get_total_reward_for_address(MiningAddr, FinalB)). % @doc Check that other nodes accept a new block and associated mining reward. multi_node_mining_reward_test_() -> ar_test_node:test_with_mocked_functions([{ar_fork, height_2_6, fun() -> 0 end}], fun test_multi_node_mining_reward/0, 120). test_multi_node_mining_reward() -> {_Priv1, Pub1} = ar_test_node:remote_call(peer1, ar_wallet, new_keyfile, []), [B0] = ar_weave:init(), ar_test_node:start(B0), ar_test_node:start_peer(peer1, B0, MiningAddr = ar_wallet:to_address(Pub1)), ar_test_node:connect_to_peer(peer1), ar_test_node:mine(peer1), ar_test_node:wait_until_height(main, 1), B1 = ar_node:get_current_block(), [{MiningAddr, _, Reward, 1}, _] = B1#block.reward_history, ?assertEqual(0, ar_node:get_balance(Pub1)), lists:foreach( fun(Height) -> ?assertEqual(0, ar_node:get_balance(Pub1)), ar_test_node:mine(), ar_test_node:wait_until_height(main, Height + 1) end, lists:seq(1, ?LOCKED_REWARDS_BLOCKS) ), ?assertEqual(Reward, ar_node:get_balance(Pub1)). %% @doc Ensure that TX replay attack mitigation works. replay_attack_test_() -> {timeout, 120, fun() -> Key1 = {_Priv1, Pub1} = ar_wallet:new(), {_Priv2, Pub2} = ar_wallet:new(), [B0] = ar_weave:init([{ar_wallet:to_address(Pub1), ?AR(10000), <<>>}]), ar_test_node:start(B0), ar_test_node:start_peer(peer1, B0), ar_test_node:connect_to_peer(peer1), SignedTX = sign_v1_tx(main, Key1, #{ target => ar_wallet:to_address(Pub2), quantity => ?AR(1000), reward => ?AR(1), last_tx => <<>> }), ar_test_node:assert_post_tx_to_peer(main, SignedTX), ar_test_node:mine(), ar_test_node:assert_wait_until_height(peer1, 1), ?assertEqual(?AR(8999), ar_test_node:remote_call(peer1, ar_node, get_balance, [Pub1])), ?assertEqual(?AR(1000), ar_test_node:remote_call(peer1, ar_node, get_balance, [Pub2])), ar_events:send(tx, {ready_for_mining, SignedTX}), ar_test_node:wait_until_receives_txs([SignedTX]), ar_test_node:mine(), ar_test_node:assert_wait_until_height(peer1, 2), ?assertEqual(?AR(8999), ar_test_node:remote_call(peer1, ar_node, get_balance, [Pub1])), ?assertEqual(?AR(1000), ar_test_node:remote_call(peer1, ar_node, get_balance, [Pub2])) end}. %% @doc Create two new wallets and a blockweave with a wallet balance. %% Create and verify execution of a signed exchange of value tx. wallet_transaction_test_() -> ar_test_node:test_with_mocked_functions([{ar_fork, height_2_6, fun() -> 0 end}], fun test_wallet_transaction/0, 120). test_wallet_transaction() -> TestWalletTransaction = fun(KeyType) -> fun() -> {Priv1, Pub1} = ar_wallet:new_keyfile(KeyType), {_Priv2, Pub2} = ar_wallet:new(), TX = ar_tx:new(ar_wallet:to_address(Pub2), ?AR(1), ?AR(9000), <<>>), SignedTX = ar_tx:sign(TX#tx{ format = 2 }, Priv1, Pub1), [B0] = ar_weave:init([{ar_wallet:to_address(Pub1), ?AR(10000), <<>>}]), ar_test_node:start(B0, ar_wallet:to_address(ar_wallet:new_keyfile({eddsa, ed25519}))), ar_test_node:start_peer(peer1, B0), ar_test_node:connect_to_peer(peer1), ar_test_node:assert_post_tx_to_peer(main, SignedTX), ar_test_node:mine(), ar_test_node:wait_until_height(main, 1), ar_test_node:assert_wait_until_height(peer1, 1), ?assertEqual(?AR(999), ar_test_node:remote_call(peer1, ar_node, get_balance, [Pub1])), ?assertEqual(?AR(9000), ar_test_node:remote_call(peer1, ar_node, get_balance, [Pub2])) end end, [ {"PS256_65537", timeout, 60, TestWalletTransaction({?RSA_SIGN_ALG, 65537})}, {"ES256K", timeout, 60, TestWalletTransaction({?ECDSA_SIGN_ALG, secp256k1})}, {"Ed25519", timeout, 60, TestWalletTransaction({?EDDSA_SIGN_ALG, ed25519})} ]. %% @doc Ensure that TX Id threading functions correctly (in the positive case). tx_threading_test_() -> {timeout, 120, fun() -> Key1 = {_Priv1, Pub1} = ar_wallet:new(), {_Priv2, Pub2} = ar_wallet:new(), [B0] = ar_weave:init([{ar_wallet:to_address(Pub1), ?AR(10000), <<>>}]), ar_test_node:start(B0), ar_test_node:start_peer(peer1, B0), ar_test_node:connect_to_peer(peer1), SignedTX = sign_v1_tx(main, Key1, #{ target => ar_wallet:to_address(Pub2), quantity => ?AR(1000), reward => ?AR(1), last_tx => <<>> }), SignedTX2 = sign_v1_tx(main, Key1, #{ target => ar_wallet:to_address(Pub2), quantity => ?AR(1000), reward => ?AR(1), last_tx => SignedTX#tx.id }), ar_test_node:assert_post_tx_to_peer(main, SignedTX), ar_test_node:mine(), ar_test_node:wait_until_height(main, 1), ar_test_node:assert_post_tx_to_peer(main, SignedTX2), ar_test_node:mine(), ar_test_node:assert_wait_until_height(peer1, 2), ?assertEqual(?AR(7998), ar_test_node:remote_call(peer1, ar_node, get_balance, [Pub1])), ?assertEqual(?AR(2000), ar_test_node:remote_call(peer1, ar_node, get_balance, [Pub2])) end}. persisted_mempool_test_() -> %% Make the propagation delay noticeable so that the submitted transactions do not %% become ready for mining before the node is restarted and we assert that waiting %% transactions found in the persisted mempool are (re-)submitted to peers. ar_test_node:test_with_mocked_functions([{ar_node_worker, calculate_delay, fun(_Size) -> 5000 end}], fun test_persisted_mempool/0). test_persisted_mempool() -> {_, Pub} = Wallet = ar_wallet:new(), [B0] = ar_weave:init([{ar_wallet:to_address(Pub), ?AR(10000), <<>>}]), ar_test_node:start(B0), ar_test_node:start_peer(peer1, B0), ar_test_node:disconnect_from(peer1), SignedTX = ar_test_node:sign_tx(Wallet, #{ last_tx => ar_test_node:get_tx_anchor(main) }), {ok, {{<<"200">>, _}, _, <<"OK">>, _, _}} = ar_test_node:post_tx_to_peer(main, SignedTX, false), true = ar_util:do_until( fun() -> maps:is_key(SignedTX#tx.id, ar_mempool:get_map()) end, 100, 30000 ), Config = ar_test_node:stop(), try %% Rejoin the network. %% Expect the pending transactions to be picked up and distributed. ok = arweave_config:set_env(Config#config{ start_from_latest_state = false, peers = [ar_test_node:peer_ip(peer1)] }), ar:start_dependencies(), ar_test_node:wait_until_joined(), ar_test_node:connect_to_peer(peer1), ar_test_node:assert_wait_until_receives_txs(peer1, [SignedTX]), ar_test_node:mine(), [{H, _, _} | _] = ar_test_node:assert_wait_until_height(peer1, 1), B = read_block_when_stored(H), ?assertEqual([SignedTX#tx.id], B#block.txs) after ok = arweave_config:set_env(Config) end. ================================================ FILE: apps/arweave/test/ar_nonce_limiter_tests.erl ================================================ -module(ar_nonce_limiter_tests). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("eunit/include/eunit.hrl"). %% @doc Reset the state and stop computing steps automatically. Used in tests. reset_and_pause() -> gen_server:cast(ar_nonce_limiter, reset_and_pause). %% @doc Do not emit the initialized event. Used in tests. turn_off_initialized_event() -> gen_server:cast(ar_nonce_limiter, turn_off_initialized_event). %% @doc Get all steps starting from the latest on the current tip. Used in tests. get_steps() -> gen_server:call(ar_nonce_limiter, get_steps). %% @doc Compute a single step. Used in tests. step() -> Self = self(), spawn( fun() -> ok = ar_events:subscribe(nonce_limiter), gen_server:cast(ar_nonce_limiter, compute_step), receive {event, nonce_limiter, {computed_output, _}} -> Self ! done end end ), receive done -> ok end. assert_session(B, PrevB) -> %% vdf_diffic ulty and next_vdf_difficulty in cached VDF sessions should be %% updated whenever a new block is validated. #nonce_limiter_info{ vdf_difficulty = PrevBVDFDifficulty, next_vdf_difficulty = PrevBNextVDFDifficulty } = PrevB#block.nonce_limiter_info, #nonce_limiter_info{ vdf_difficulty = BVDFDifficulty, next_vdf_difficulty = BNextVDFDifficulty } = B#block.nonce_limiter_info, PrevBSessionKey = ar_nonce_limiter:session_key(PrevB#block.nonce_limiter_info), BSessionKey = ar_nonce_limiter:session_key(B#block.nonce_limiter_info), BSession = ar_nonce_limiter:get_session(BSessionKey), ?assertEqual(BVDFDifficulty, BSession#vdf_session.vdf_difficulty), ?assertEqual(BNextVDFDifficulty, BSession#vdf_session.next_vdf_difficulty), case PrevBSessionKey == BSessionKey of true -> ok; false -> PrevBSession = ar_nonce_limiter:get_session(PrevBSessionKey), ?assertEqual(PrevBVDFDifficulty, PrevBSession#vdf_session.vdf_difficulty), ?assertEqual(PrevBNextVDFDifficulty, PrevBSession#vdf_session.next_vdf_difficulty) end. assert_validate(B, PrevB, ExpectedResult) -> ar_nonce_limiter:request_validation(B#block.indep_hash, B#block.nonce_limiter_info, PrevB#block.nonce_limiter_info), BH = B#block.indep_hash, receive {event, nonce_limiter, {valid, BH}} -> case ExpectedResult of valid -> assert_session(B, PrevB), ok; _ -> ?assert(false, iolist_to_binary(io_lib:format("Unexpected " "validation success. Expected: ~p.", [ExpectedResult]))) end; {event, nonce_limiter, {invalid, BH, Code}} -> case ExpectedResult of {invalid, Code} -> ok; _ -> ?assert(false, iolist_to_binary(io_lib:format("Unexpected " "validation failure: ~p. Expected: ~p.", [Code, ExpectedResult]))) end after 2000 -> ?assert(false, "Validation timeout.") end. assert_step_number(N) -> timer:sleep(200), ?assert(ar_util:do_until(fun() -> ar_nonce_limiter:get_current_step_number() == N end, 100, 1000)). test_block(StepNumber, Output, Seed, NextSeed, LastStepCheckpoints, Steps, VDFDifficulty, NextVDFDifficulty) -> #block{ indep_hash = crypto:strong_rand_bytes(48), nonce_limiter_info = #nonce_limiter_info{ output = Output, global_step_number = StepNumber, seed = Seed, next_seed = NextSeed, last_step_checkpoints = LastStepCheckpoints, steps = Steps, vdf_difficulty = VDFDifficulty, next_vdf_difficulty = NextVDFDifficulty } }. mock_reset_frequency() -> {ar_nonce_limiter, get_reset_frequency, fun() -> 5 end}. applies_validated_steps_test_() -> ar_test_node:test_with_mocked_functions([mock_reset_frequency()], fun test_applies_validated_steps/0, 60). test_applies_validated_steps() -> reset_and_pause(), Seed = crypto:strong_rand_bytes(48), NextSeed = crypto:strong_rand_bytes(48), NextSeed2 = crypto:strong_rand_bytes(48), InitialOutput = crypto:strong_rand_bytes(32), B1VDFDifficulty = 3, B1NextVDFDifficulty = 3, B1 = test_block(1, InitialOutput, Seed, NextSeed, [], [], B1VDFDifficulty, B1NextVDFDifficulty), turn_off_initialized_event(), ar_nonce_limiter:account_tree_initialized([B1]), true = ar_util:do_until(fun() -> ar_nonce_limiter:get_current_step_number() == 1 end, 100, 1000), assert_session(B1, B1), {ok, Output2, _} = ar_nonce_limiter:compute(2, InitialOutput, B1VDFDifficulty), B2VDFDifficulty = 3, B2NextVDFDifficulty = 4, B2 = test_block(2, Output2, Seed, NextSeed, [], [Output2], B2VDFDifficulty, B2NextVDFDifficulty), ok = ar_events:subscribe(nonce_limiter), assert_validate(B2, B1, valid), assert_validate(B2, B1, valid), assert_validate(B2#block{ nonce_limiter_info = #nonce_limiter_info{} }, B1, {invalid, 1}), N2 = B2#block.nonce_limiter_info, assert_validate(B2#block{ nonce_limiter_info = N2#nonce_limiter_info{ steps = [] } }, B1, {invalid, 4}), assert_validate(B2#block{ nonce_limiter_info = N2#nonce_limiter_info{ steps = [Output2, Output2] } }, B1, {invalid, 2}), assert_step_number(2), [step() || _ <- lists:seq(1, 3)], assert_step_number(5), ar_events:send(node_state, {new_tip, B2, B1}), %% We have just applied B2 with a VDF difficulty update => a new session has to be opened. assert_step_number(2), assert_session(B2, B1), {ok, Output3, _} = ar_nonce_limiter:compute(3, Output2, B2VDFDifficulty), {ok, Output4, _} = ar_nonce_limiter:compute(4, Output3, B2VDFDifficulty), B3VDFDifficulty = 3, B3NextVDFDifficulty = 4, B3 = test_block(4, Output4, Seed, NextSeed, [], [Output4, Output3], B3VDFDifficulty, B3NextVDFDifficulty), assert_validate(B3, B2, valid), assert_validate(B3, B1, valid), %% Entropy reset line crossed at step 5, add entropy and apply next_vdf_difficulty {ok, Output5, _} = ar_nonce_limiter:compute(5, ar_nonce_limiter:mix_seed(Output4, NextSeed), B3NextVDFDifficulty), B4VDFDifficulty = 4, B4NextVDFDifficulty = 5, B4 = test_block(5, Output5, NextSeed, NextSeed2, [], [Output5], B4VDFDifficulty, B4NextVDFDifficulty), [step() || _ <- lists:seq(1, 6)], assert_step_number(10), assert_validate(B4, B3, valid), ar_events:send(node_state, {new_tip, B4, B3}), assert_step_number(9), assert_session(B4, B3), assert_validate(B4, B4, {invalid, 1}), % % 5, 6, 7, 8, 9, 10 B5VDFDifficulty = 5, B5NextVDFDifficulty = 6, B5 = test_block(10, <<>>, NextSeed, NextSeed2, [], [<<>>], B5VDFDifficulty, B5NextVDFDifficulty), assert_validate(B5, B4, {invalid, 3}), B6VDFDifficulty = 5, B6NextVDFDifficulty = 6, B6 = test_block(10, <<>>, NextSeed, NextSeed2, [], % Steps 10, 9, 8, 7, 6. [<<>> | lists:sublist(get_steps(), 4)], B6VDFDifficulty, B6NextVDFDifficulty), assert_validate(B6, B4, {invalid, 3}), Invalid = crypto:strong_rand_bytes(32), B7VDFDifficulty = 5, B7NextVDFDifficulty = 6, B7 = test_block(10, Invalid, NextSeed, NextSeed2, [], % Steps 10, 9, 8, 7, 6. [Invalid | lists:sublist(get_steps(), 4)], B7VDFDifficulty, B7NextVDFDifficulty), assert_validate(B7, B4, {invalid, 3}), %% Last valid block was B4, so that's the vdf_difficulty to use (not next_vdf_difficulty cause %% the next entropy reset line isn't until step 10) {ok, Output6, _} = ar_nonce_limiter:compute(6, Output5, B4VDFDifficulty), {ok, Output7, _} = ar_nonce_limiter:compute(7, Output6, B4VDFDifficulty), {ok, Output8, _} = ar_nonce_limiter:compute(8, Output7, B4VDFDifficulty), B8VDFDifficulty = 4, %% Change the next_vdf_difficulty to confirm that apply_tip2 handles updating an %% existing VDF session B8NextVDFDifficulty = 6, B8 = test_block(8, Output8, NextSeed, NextSeed2, [], [Output8, Output7, Output6], B8VDFDifficulty, B8NextVDFDifficulty), ar_events:send(node_state, {new_tip, B8, B4}), timer:sleep(1000), assert_session(B8, B4), assert_validate(B8, B4, valid), ok. ================================================ FILE: apps/arweave/test/ar_packing_tests.erl ================================================ -module(ar_packing_tests). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave/include/ar_consensus.hrl"). -include_lib("eunit/include/eunit.hrl"). -define(CHUNK_OFFSET, 10*256*1024). -define(ENCODED_TX_ROOT, <<"9d857DmXbSyhX6bgF7CDMDCl0f__RUjryMMvueFN9wE">>). -define(REQUEST_REPACK_TIMEOUT, 50_000). -define(REQUEST_UNPACK_TIMEOUT, 50_000). % request_test() -> % RewardAddress = ar_test_node:load_fixture("ar_packing_tests/address.bin"), % [B0] = ar_weave:init(), % ar_test_node:start(B0, RewardAddress), % test_full_chunk(), % test_partial_chunk(), % test_full_chunk_repack(), % test_partial_chunk_repack(), % test_invalid_pad(), % test_request_repack(RewardAddress), % test_request_unpack(RewardAddress). packing_test_() -> {setup, fun setup/0, fun teardown/1, [fun test_feistel/0, fun test_full_chunk/0, fun test_partial_chunk/0, fun test_full_chunk_repack/0, fun test_partial_chunk_repack/0, fun test_invalid_pad/0, fun test_request_repack/0, fun test_request_unpack/0]}. setup() -> RewardAddress = ar_test_node:load_fixture("ar_packing_tests/address.bin"), [B0] = ar_weave:init(), ar_test_node:start(B0, RewardAddress), RewardAddress. teardown(_) -> % optional cleanup code ok. test_feistel()-> Unpacked = << 1:(8*2097152) >>, Entropy = << 2:(8*2097152) >>, {ok, Packed} = ar_rxsquared_nif:rsp_feistel_encrypt_nif(Unpacked, Entropy), PackedHashReal = crypto:hash(sha256, Packed), PackedHashExpd = << 73,123,99,202,146,24,95,220,127,228,210,8,106,220,94, 251,234,166,63,206,16,213,64,208,35,104,15,144,215, 139,183,59 >>, ?assertEqual(PackedHashExpd, PackedHashReal), {ok, UnpackedReal} = ar_rxsquared_nif:rsp_feistel_decrypt_nif(Packed, Entropy), ?assertEqual(Unpacked, UnpackedReal), Unpacked2 = << 3:(8*2097152) >>, Entropy2 = << 4:(8*2097152) >>, {ok, Packed2} = ar_rxsquared_nif:rsp_feistel_encrypt_nif(Unpacked2, Entropy2), PackedHashReal2 = crypto:hash(sha256, Packed2), PackedHashExpd2 = << 226,95,254,246,118,154,133,215,229,243,245,255,18,48, 130,246,98,240,207,197,188,161,222,66,140,47,110,18, 193,145,96,210 >>, ?assertEqual(PackedHashExpd2, PackedHashReal2), {ok, UnpackedReal2} = ar_rxsquared_nif:rsp_feistel_decrypt_nif(Packed2, Entropy2), ?assertEqual(Unpacked2, UnpackedReal2), ok. test_full_chunk() -> UnpackedData = ar_test_node:load_fixture("ar_packing_tests/unpacked.256kb"), Spora25Data = ar_test_node:load_fixture("ar_packing_tests/spora25.256kb"), Spora26Data = ar_test_node:load_fixture("ar_packing_tests/spora26.256kb"), ChunkSize = 256*1024, TXRoot = ar_util:decode(?ENCODED_TX_ROOT), RewardAddress = ar_test_node:load_fixture("ar_packing_tests/address.bin"), ?assertEqual( {ok, UnpackedData}, ar_packing_server:pack( unpacked, ?CHUNK_OFFSET, TXRoot, UnpackedData)), ?assertEqual( {ok, Spora25Data}, ar_packing_server:pack( spora_2_5, ?CHUNK_OFFSET, TXRoot, UnpackedData)), ?assertEqual( {ok, Spora26Data}, ar_packing_server:pack( {spora_2_6, RewardAddress}, ?CHUNK_OFFSET, TXRoot, UnpackedData)), ?assertEqual( {ok, UnpackedData}, ar_packing_server:unpack( unpacked, ?CHUNK_OFFSET, TXRoot, UnpackedData, ChunkSize)), ?assertEqual( {ok, UnpackedData}, ar_packing_server:unpack( spora_2_5, ?CHUNK_OFFSET, TXRoot, Spora25Data, ChunkSize)), ?assertEqual( {ok, UnpackedData}, ar_packing_server:unpack( {spora_2_6, RewardAddress}, ?CHUNK_OFFSET, TXRoot, Spora26Data, ChunkSize)). test_partial_chunk() -> UnpackedData = ar_test_node:load_fixture("ar_packing_tests/unpacked.100kb"), Spora25Data = ar_test_node:load_fixture("ar_packing_tests/spora25.100kb"), Spora26Data = ar_test_node:load_fixture("ar_packing_tests/spora26.100kb"), ChunkSize = 100*1024, TXRoot = ar_util:decode(?ENCODED_TX_ROOT), RewardAddress = ar_test_node:load_fixture("ar_packing_tests/address.bin"), ?assertEqual( {ok, UnpackedData}, ar_packing_server:pack( unpacked, ?CHUNK_OFFSET, TXRoot, UnpackedData)), ?assertEqual( {ok, Spora25Data}, ar_packing_server:pack( spora_2_5, ?CHUNK_OFFSET, TXRoot, UnpackedData)), ?assertEqual( {ok, Spora26Data}, ar_packing_server:pack( {spora_2_6, RewardAddress}, ?CHUNK_OFFSET, TXRoot, UnpackedData)), ?assertEqual( {ok, UnpackedData}, ar_packing_server:unpack( unpacked, ?CHUNK_OFFSET, TXRoot, UnpackedData, ChunkSize)), ?assertEqual( {ok, UnpackedData}, ar_packing_server:unpack( spora_2_5, ?CHUNK_OFFSET, TXRoot, Spora25Data, ChunkSize)), ?assertEqual( {ok, UnpackedData}, ar_packing_server:unpack( {spora_2_6, RewardAddress}, ?CHUNK_OFFSET, TXRoot, Spora26Data, ChunkSize)). test_full_chunk_repack() -> UnpackedData = ar_test_node:load_fixture("ar_packing_tests/unpacked.256kb"), Spora25Data = ar_test_node:load_fixture("ar_packing_tests/spora25.256kb"), Spora26Data = ar_test_node:load_fixture("ar_packing_tests/spora26.256kb"), ChunkSize = 256*1024, TXRoot = ar_util:decode(?ENCODED_TX_ROOT), RewardAddress = ar_test_node:load_fixture("ar_packing_tests/address.bin"), ?assertEqual( {ok, UnpackedData, UnpackedData}, ar_packing_server:repack(unpacked, unpacked, ?CHUNK_OFFSET, TXRoot, UnpackedData, ChunkSize)), ?assertEqual( {ok, Spora25Data, UnpackedData}, ar_packing_server:repack(spora_2_5, unpacked, ?CHUNK_OFFSET, TXRoot, UnpackedData, ChunkSize)), ?assertEqual( {ok, Spora26Data, UnpackedData}, ar_packing_server:repack({spora_2_6, RewardAddress}, unpacked, ?CHUNK_OFFSET, TXRoot, UnpackedData, ChunkSize)), ?assertEqual( {ok, UnpackedData, UnpackedData}, ar_packing_server:repack(unpacked, spora_2_5, ?CHUNK_OFFSET, TXRoot, Spora25Data, ChunkSize)), ?assertEqual( {ok, Spora25Data, none}, ar_packing_server:repack(spora_2_5, spora_2_5, ?CHUNK_OFFSET, TXRoot, Spora25Data, ChunkSize)), ?assertEqual( {ok, Spora26Data, UnpackedData}, ar_packing_server:repack({spora_2_6, RewardAddress}, spora_2_5, ?CHUNK_OFFSET, TXRoot, Spora25Data, ChunkSize)), ?assertEqual( {ok, UnpackedData, UnpackedData}, ar_packing_server:repack(unpacked, {spora_2_6, RewardAddress}, ?CHUNK_OFFSET, TXRoot, Spora26Data, ChunkSize)), ?assertEqual( {ok, Spora25Data, UnpackedData}, ar_packing_server:repack(spora_2_5, {spora_2_6, RewardAddress}, ?CHUNK_OFFSET, TXRoot, Spora26Data, ChunkSize)), ?assertEqual( {ok, Spora26Data, none}, ar_packing_server:repack({spora_2_6, RewardAddress}, {spora_2_6, RewardAddress}, ?CHUNK_OFFSET, TXRoot, Spora26Data, ChunkSize)). test_partial_chunk_repack() -> UnpackedData = ar_test_node:load_fixture("ar_packing_tests/unpacked.100kb"), Spora25Data = ar_test_node:load_fixture("ar_packing_tests/spora25.100kb"), Spora26Data = ar_test_node:load_fixture("ar_packing_tests/spora26.100kb"), ChunkSize = 100*1024, TXRoot = ar_util:decode(?ENCODED_TX_ROOT), RewardAddress = ar_test_node:load_fixture("ar_packing_tests/address.bin"), ?assertEqual( {ok, UnpackedData, UnpackedData}, ar_packing_server:repack(unpacked, unpacked, ?CHUNK_OFFSET, TXRoot, UnpackedData, ChunkSize)), ?assertEqual( {ok, Spora25Data, UnpackedData}, ar_packing_server:repack(spora_2_5, unpacked, ?CHUNK_OFFSET, TXRoot, UnpackedData, ChunkSize)), ?assertEqual( {ok, Spora26Data, UnpackedData}, ar_packing_server:repack({spora_2_6, RewardAddress}, unpacked, ?CHUNK_OFFSET, TXRoot, UnpackedData, ChunkSize)), ?assertEqual( {ok, UnpackedData, UnpackedData}, ar_packing_server:repack(unpacked, spora_2_5, ?CHUNK_OFFSET, TXRoot, Spora25Data, ChunkSize)), ?assertEqual( {ok, Spora25Data, none}, ar_packing_server:repack(spora_2_5, spora_2_5, ?CHUNK_OFFSET, TXRoot, Spora25Data, ChunkSize)), ?assertEqual( {ok, Spora26Data, UnpackedData}, ar_packing_server:repack({spora_2_6, RewardAddress}, spora_2_5, ?CHUNK_OFFSET, TXRoot, Spora25Data, ChunkSize)), ?assertEqual( {ok, UnpackedData, UnpackedData}, ar_packing_server:repack(unpacked, {spora_2_6, RewardAddress}, ?CHUNK_OFFSET, TXRoot, Spora26Data, ChunkSize)), ?assertEqual( {ok, Spora25Data, UnpackedData}, ar_packing_server:repack(spora_2_5, {spora_2_6, RewardAddress}, ?CHUNK_OFFSET, TXRoot, Spora26Data, ChunkSize)), ?assertEqual( {ok, Spora26Data, none}, ar_packing_server:repack({spora_2_6, RewardAddress}, {spora_2_6, RewardAddress}, ?CHUNK_OFFSET, TXRoot, Spora26Data, ChunkSize)). test_invalid_pad() -> ChunkSize = 100*1024, UnpackedData = ar_test_node:load_fixture("ar_packing_tests/unpacked.256kb"), Spora25Data = ar_test_node:load_fixture("ar_packing_tests/spora25.256kb"), Spora26Data = ar_test_node:load_fixture("ar_packing_tests/spora26.256kb"), ShortUnpackedData = binary:part(UnpackedData, 0, ChunkSize), TXRoot = ar_util:decode(?ENCODED_TX_ROOT), RewardAddress = ar_test_node:load_fixture("ar_packing_tests/address.bin"), ?assertEqual( {ok, ShortUnpackedData}, ar_packing_server:unpack( spora_2_5, ?CHUNK_OFFSET, TXRoot, Spora25Data, ChunkSize), "We don't check the pad when unpacking SPoRA 2.5"), ?assertEqual( {error, invalid_padding}, ar_packing_server:unpack( {spora_2_6, RewardAddress}, ?CHUNK_OFFSET, TXRoot, Spora26Data, ChunkSize), "We do check the pad when unpacking SPoRA 2.6"), ?assertEqual( {ok, ShortUnpackedData, ShortUnpackedData}, ar_packing_server:repack( unpacked, spora_2_5, ?CHUNK_OFFSET, TXRoot, Spora25Data, ChunkSize), "We don't check the pad when repacking from SPoRA 2.5"), ?assertMatch( {ok, _, ShortUnpackedData}, ar_packing_server:repack( {spora_2_6, RewardAddress}, spora_2_5, ?CHUNK_OFFSET, TXRoot, Spora25Data, ChunkSize), "We don't check the pad when repacking from SPoRA 2.5"), ?assertEqual( {error, invalid_padding}, ar_packing_server:repack( unpacked, {spora_2_6, RewardAddress}, ?CHUNK_OFFSET, TXRoot, Spora25Data, ChunkSize), "We do check the pad when repacking from SPoRA 2.6"), ?assertMatch( {error, invalid_padding}, ar_packing_server:repack( spora_2_5, {spora_2_6, RewardAddress}, ?CHUNK_OFFSET, TXRoot, Spora25Data, ChunkSize), "We do check the pad when repacking from SPoRA 2.6"). test_request_repack() -> UnpackedData = ar_test_node:load_fixture("ar_packing_tests/unpacked.256kb"), Spora26Data = ar_test_node:load_fixture("ar_packing_tests/spora26.256kb"), ChunkSize = 256*1024, TXRoot = ar_util:decode(?ENCODED_TX_ROOT), RewardAddress = ar_test_node:load_fixture("ar_packing_tests/address.bin"), %% unpacked -> unpacked ar_packing_server:request_repack(?CHUNK_OFFSET, { unpacked, unpacked, UnpackedData, ?CHUNK_OFFSET, TXRoot, ChunkSize}), receive {chunk, {packed, _, {unpacked, Unpacked1, _, _, _}}} -> ?assertEqual(UnpackedData, Unpacked1) after ?REQUEST_REPACK_TIMEOUT -> erlang:error(timeout) end, %% unpacked -> packed ar_packing_server:request_repack(?CHUNK_OFFSET, { {spora_2_6, RewardAddress}, unpacked, UnpackedData, ?CHUNK_OFFSET, TXRoot, ChunkSize}), receive {chunk, {packed, _, {{spora_2_6, RewardAddress}, Packed, _, _, _}}} -> ?assertEqual(Spora26Data, Packed) after ?REQUEST_REPACK_TIMEOUT -> erlang:error(timeout) end, %% packed -> unpacked ar_packing_server:request_repack(?CHUNK_OFFSET, { unpacked, {spora_2_6, RewardAddress}, Spora26Data, ?CHUNK_OFFSET, TXRoot, ChunkSize}), receive {chunk, {packed, _, {unpacked, Unpacked2, _, _, _}}} -> ?assertEqual(UnpackedData, Unpacked2) after ?REQUEST_REPACK_TIMEOUT -> erlang:error(timeout) end, %% packed -> packed ar_packing_server:request_repack(?CHUNK_OFFSET, { {spora_2_6, RewardAddress}, {spora_2_6, RewardAddress}, Spora26Data, ?CHUNK_OFFSET, TXRoot, ChunkSize}), receive {chunk, {packed, _, {{spora_2_6, RewardAddress}, Packed2, _, _, _}}} -> ?assertEqual(Spora26Data, Packed2) after ?REQUEST_REPACK_TIMEOUT -> erlang:error(timeout) end. test_request_unpack() -> UnpackedData = ar_test_node:load_fixture("ar_packing_tests/unpacked.256kb"), Spora26Data = ar_test_node:load_fixture("ar_packing_tests/spora26.256kb"), ChunkSize = 256*1024, TXRoot = ar_util:decode(?ENCODED_TX_ROOT), RewardAddress = ar_test_node:load_fixture("ar_packing_tests/address.bin"), %% unpacked -> unpacked ar_packing_server:request_unpack(?CHUNK_OFFSET, { unpacked, UnpackedData, ?CHUNK_OFFSET, TXRoot, ChunkSize}), receive {chunk, {unpacked, _, {unpacked, Unpacked1, _, _, _}}} -> ?assertEqual(UnpackedData, Unpacked1) after ?REQUEST_UNPACK_TIMEOUT -> erlang:error(timeout) end, %% packed -> unpacked ar_packing_server:request_unpack(?CHUNK_OFFSET, { {spora_2_6, RewardAddress}, Spora26Data, ?CHUNK_OFFSET, TXRoot, ChunkSize}), receive {chunk, {unpacked, _, {{spora_2_6, RewardAddress}, Unpacked2, _, _, _}}} -> ?assertEqual(UnpackedData, Unpacked2) after ?REQUEST_UNPACK_TIMEOUT -> erlang:error(timeout) end, %% invalid padding ar_packing_server:request_unpack(?CHUNK_OFFSET, { {spora_2_6, RewardAddress}, Spora26Data, ?CHUNK_OFFSET, TXRoot, ChunkSize - 10}), % reduce chunk size to create invalid padding receive {chunk, {unpack_error, _, {{spora_2_6, RewardAddress}, Spora26Data, _, _, _}, invalid_padding}} -> ok after ?REQUEST_UNPACK_TIMEOUT -> erlang:error(timeout) end. packs_chunks_depending_on_packing_threshold_test_() -> ar_test_node:test_with_mocked_functions([ {ar_fork, height_2_9, fun() -> 10 end}, {ar_retarget, is_retarget_height, fun(_Height) -> false end}, {ar_retarget, is_retarget_block, fun(_Block) -> false end}], fun test_packs_chunks_depending_on_packing_threshold/0). test_packs_chunks_depending_on_packing_threshold() -> MainWallet = ar_wallet:new_keyfile(), PeerWallet = ar_test_node:remote_call(peer1, ar_wallet, new_keyfile, []), MainAddr = ar_wallet:to_address(MainWallet), PeerAddr = ar_wallet:to_address(PeerWallet), DataMap = lists:foldr( fun(Height, Acc) -> ChunkCount = 3, {DR1, Chunks1} = ar_test_data_sync:generate_random_split(ChunkCount), {DR2, Chunks2} = ar_test_data_sync:generate_random_original_split(ChunkCount), {DR3, Chunks3} = ar_test_data_sync:generate_random_original_v1_split(), maps:put(Height, {{DR1, Chunks1}, {DR2, Chunks2}, {DR3, Chunks3}}, Acc) end, #{}, lists:seq(1, 20) ), Wallet = ar_test_data_sync:setup_nodes(#{ addr => MainAddr, peer_addr => PeerAddr }), {_LegacyProofs, StrictProofs, V1Proofs} = lists:foldl( fun(Height, {Acc1, Acc2, Acc3}) -> {{DR1, Chunks1}, {DR2, Chunks2}, {DR3, Chunks3}} = maps:get(Height, DataMap), {#tx{ id = TXID1 } = TX1, Chunks1} = ar_test_data_sync:tx(Wallet, {fixed_data, DR1, Chunks1}), {#tx{ id = TXID2 } = TX2, Chunks2} = ar_test_data_sync:tx(Wallet, {fixed_data, DR2, Chunks2}), {#tx{ id = TXID3 } = TX3, Chunks3} = ar_test_data_sync:tx(Wallet, {fixed_data, DR3, Chunks3}, v1), {Miner, Receiver} = case rand:uniform(2) == 1 of true -> {main, peer1}; false -> {peer1, main} end, ?debugFmt("miner: ~p, receiver: ~p~n", [Miner, Receiver]), ?debugFmt("Mining block ~B.~n", [Height]), TXs = ar_util:pick_random([TX1, TX2, TX3], 2), B = ar_test_node:post_and_mine(#{ miner => Miner, await_on => Receiver }, TXs), Acc1_2 = case lists:member(TX1, TXs) of true -> ar_test_data_sync:post_proofs(main, B, TX1, Chunks1), maps:put(TXID1, ar_test_data_sync:get_records_with_proofs(B, TX1, Chunks1), Acc1); false -> Acc1 end, Acc2_2 = case lists:member(TX2, TXs) of true -> ar_test_data_sync:post_proofs(peer1, B, TX2, Chunks2), maps:put(TXID2, ar_test_data_sync:get_records_with_proofs(B, TX2, Chunks2), Acc2); false -> Acc2 end, Acc3_2 = case lists:member(TX3, TXs) of true -> maps:put(TXID3, ar_test_data_sync:get_records_with_proofs(B, TX3, Chunks3), Acc3); false -> Acc3 end, {Acc1_2, Acc2_2, Acc3_2} end, {#{}, #{}, #{}}, lists:seq(1, 20) ), %% Mine some empty blocks on top to force all submitted data to fall below %% the disk pool threshold so that the non-default storage modules can sync it. lists:foreach( fun(_) -> {Miner, Receiver} = case rand:uniform(2) == 1 of true -> {main, peer1}; false -> {peer1, main} end, ar_test_node:post_and_mine(#{ miner => Miner, await_on => Receiver }, []) end, lists:seq(1, 5) ), BILast = ar_node:get_block_index(), LastB = ar_test_node:read_block_when_stored( element(1, lists:nth(10, lists:reverse(BILast)))), lists:foldl( fun(Height, PrevB) -> H = element(1, lists:nth(Height + 1, lists:reverse(BILast))), B = ar_test_node:read_block_when_stored(H), PoA = B#block.poa, NonceLimiterInfo = B#block.nonce_limiter_info, PartitionUpperBound = NonceLimiterInfo#nonce_limiter_info.partition_upper_bound, H0 = ar_block:compute_h0(B, PrevB), {RecallRange1Start, _} = ar_block:get_recall_range(H0, B#block.partition_number, PartitionUpperBound), RecallByte = case B#block.packing_difficulty of 0 -> RecallRange1Start + B#block.nonce * ?DATA_CHUNK_SIZE; _ -> RecallRange1Start + (B#block.nonce div 32) * ?DATA_CHUNK_SIZE end, {BlockStart, BlockEnd, TXRoot} = ar_block_index:get_block_bounds(RecallByte), ?debugFmt("Mined a block. " "Computed recall byte: ~B, block's recall byte: ~p. " "Height: ~B. Previous block: ~s. " "Computed search space upper bound: ~B. " "Block start: ~B. Block end: ~B. TX root: ~s.", [RecallByte, B#block.recall_byte, Height, ar_util:encode(PrevB#block.indep_hash), PartitionUpperBound, BlockStart, BlockEnd, ar_util:encode(TXRoot)]), ?assertEqual(RecallByte, B#block.recall_byte), SubChunkIndex = ar_block:get_sub_chunk_index(B#block.packing_difficulty, B#block.nonce), {Packing, PoA2} = case B#block.packing_difficulty of 0 -> {{spora_2_6, B#block.reward_addr}, PoA}; ?REPLICA_2_9_PACKING_DIFFICULTY -> {ok, #{ chunk := UnpackedChunk }} = ar_data_sync:get_chunk(RecallByte + 1, #{ packing => unpacked, pack => true, origin => test }), UnpackedChunk2 = ar_packing_server:pad_chunk(UnpackedChunk), {{replica_2_9, B#block.reward_addr}, PoA#poa{ unpacked_chunk = UnpackedChunk2 }}; _ -> {ok, #{ chunk := UnpackedChunk }} = ar_data_sync:get_chunk(RecallByte + 1, #{ packing => unpacked, pack => true, origin => test }), UnpackedChunk2 = ar_packing_server:pad_chunk(UnpackedChunk), {{composite, B#block.reward_addr, B#block.packing_difficulty}, PoA#poa{ unpacked_chunk = UnpackedChunk2 }} end, ?assertMatch({true, _}, ar_poa:validate({BlockStart, RecallByte, TXRoot, BlockEnd - BlockStart, PoA2, Packing, SubChunkIndex, not_set})), B end, LastB, lists:seq(10, 20) ), ?debugMsg("Asserting synced data with the strict splits."), maps:map( fun(TXID, [{_, _, Chunks, _} | _]) -> ExpectedData = ar_util:encode(binary:list_to_bin(Chunks)), ar_test_node:assert_get_tx_data(main, TXID, ExpectedData), ar_test_node:assert_get_tx_data(peer1, TXID, ExpectedData) end, StrictProofs ), ?debugMsg("Asserting synced v1 data."), maps:map( fun(TXID, [{_, _, Chunks, _} | _]) -> ExpectedData = ar_util:encode(binary:list_to_bin(Chunks)), ar_test_node:assert_get_tx_data(main, TXID, ExpectedData), ar_test_node:assert_get_tx_data(peer1, TXID, ExpectedData) end, V1Proofs ), ?debugMsg("Asserting synced chunks."), ar_test_data_sync:wait_until_syncs_chunks([P || {_, _, _, P} <- lists:flatten(maps:values(StrictProofs))]), ar_test_data_sync:wait_until_syncs_chunks([P || {_, _, _, P} <- lists:flatten(maps:values(V1Proofs))]), ar_test_data_sync:wait_until_syncs_chunks(peer1, [P || {_, _, _, P} <- lists:flatten( maps:values(StrictProofs))], infinity), ar_test_data_sync:wait_until_syncs_chunks(peer1, [P || {_, _, _, P} <- lists:flatten(maps:values(V1Proofs))], infinity). ================================================ FILE: apps/arweave/test/ar_peer_intervals_discovery_test.erl ================================================ -module(ar_peer_intervals_discovery_test). -include_lib("eunit/include/eunit.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include("ar_data_discovery.hrl"). -include("ar.hrl"). no_unsynced_intervals_test_() -> TestCase = #{ synced => [{0, 10}], peer1 => [{0, 5}], peer2 => [{3, 8}] }, test_interval_discovery(TestCase, footprint, "No unsynced intervals"). basic_interval_discovery_test_() -> TestCase = #{ synced => [], peer1 => [{0, 3}] }, test_interval_discovery(TestCase, footprint, "Three chunks"). overlapping_intervals_test_() -> TestCase = #{ synced => [{8, 12}], peer1 => [{0, 1}], peer2 => [{3, 10}], peer3 => [{6, 13}] }, test_interval_discovery(TestCase, footprint, "Overlapping intervals"). test_interval_discovery(TestCase, Mode, Title) -> SyncedChunks = maps:get(synced, TestCase, []), PeerChunksData = maps:remove(synced, TestCase), %% Convert to bytes SyncedBytes = chunks_to_bytes(SyncedChunks), PeerBytesData = maps:map(fun(_K, V) -> chunks_to_bytes(V) end, PeerChunksData), TestRangeEnd = 50 * ?DATA_CHUNK_SIZE, UnsyncedBytes = calculate_unsynced_from_synced(SyncedBytes, TestRangeEnd), Peers = maps:keys(PeerBytesData), ExpectedIntervals = calculate_expected_intervals(UnsyncedBytes, PeerBytesData), Mocks = create_test_mocks(Peers), TestConfig = #config{ sync_from_local_peers_only = false, local_peers = [] }, arweave_config:set_env(TestConfig), setup_sync_record_servers(SyncedBytes, PeerBytesData), ar_test_node:test_with_mocked_functions(Mocks, fun() -> Start = 0, End = TestRangeEnd, StoreID = test_store_id, ar_peer_intervals:fetch(Start, Start, End, StoreID, Mode), %% Verify we get the expected intervals enqueued case maps:size(ExpectedIntervals) == 0 of true -> receive {'$gen_cast', {enqueue_intervals, []}} -> ok after 100 -> ok end; false -> AllEnqueueIntervals = collect_enqueue_intervals(#{}, StoreID, Mode), FlattenedIntervals = lists:flatten(AllEnqueueIntervals), verify_enqueued_intervals(FlattenedIntervals, ExpectedIntervals, Title) end end). collect_enqueue_intervals(Acc, StoreID, Mode) -> receive {'$gen_cast', {enqueue_intervals, EnqueueIntervals}} -> Acc2 = update_peer_intervals(EnqueueIntervals, Acc), collect_enqueue_intervals(Acc2, StoreID, Mode); {'$gen_cast', {collect_peer_intervals, Offset, _Start, End, _}} when Offset >= End -> maps:to_list(Acc); {'$gen_cast', {collect_peer_intervals, Offset, Start, End, _}} -> ar_peer_intervals:fetch(Offset, Start, End, StoreID, Mode), collect_enqueue_intervals(Acc, StoreID, Mode) after 10_000 -> ?assert(false, "No enqueue_intervals messages received") end. update_peer_intervals([], Acc) -> Acc; update_peer_intervals([{Peer, Intervals, _FootprintKey} | Rest], Acc) -> PeerIntervals = maps:get(Peer, Acc, ar_intervals:new()), PeerIntervals2 = ar_intervals:union(PeerIntervals, Intervals), update_peer_intervals(Rest, maps:put(Peer, PeerIntervals2, Acc)). create_test_mocks(Peers) -> [ {ar_data_discovery, get_footprint_bucket_peers, fun(_FootprintBucket) -> Peers end}, {ar_tx_blacklist, get_blacklisted_intervals, fun(_Start, _End) -> ar_intervals:new() end}, {ar_http_iface_client, get_footprints, fun(Peer, Partition, Footprint) -> Intervals = ar_footprint_record:get_intervals(Partition, Footprint, Peer), {ok, Intervals} end}, {ar_data_sync, name, fun(_StoreID) -> self() end}, {ar_peers, get_peer_release, fun(_Peer) -> ?GET_FOOTPRINT_SUPPORT_RELEASE end}, {ar_rate_limiter, is_on_cooldown, fun(_Peer, _Key) -> false end}, {ar_rate_limiter, is_throttled, fun(_Peer, _Path) -> false end} ]. verify_enqueued_intervals(EnqueueIntervals, ExpectedIntervals, Title) -> ?assert(is_list(EnqueueIntervals)), EnqueuedByPeer = maps:from_list(EnqueueIntervals), %% Verify each expected peer has intervals maps:fold(fun(Peer, ExpectedPeerIntervals, _) -> ?assert(maps:is_key(Peer, EnqueuedByPeer), lists:flatten( io_lib:format("Expected peer ~p not found in enqueued intervals", [Peer]))), ActualPeerIntervals = maps:get(Peer, EnqueuedByPeer), ?assertEqual(ar_intervals:to_list(ExpectedPeerIntervals), ar_intervals:to_list(ActualPeerIntervals), Title) end, ok, ExpectedIntervals). chunks_to_bytes(ChunkIntervals) -> lists:map(fun({Start, End}) -> StartBytes = trunc(Start * ?DATA_CHUNK_SIZE), EndBytes = trunc(End * ?DATA_CHUNK_SIZE), {EndBytes, StartBytes} end, ChunkIntervals). %% Calculate unsynced intervals as gaps in synced intervals within the test range calculate_unsynced_from_synced(SyncedBytes, TestRangeEnd) -> case SyncedBytes of [] -> %% Nothing synced, everything is unsynced [{TestRangeEnd, 0}]; _ -> %% Find gaps in synced intervals SyncedIntervals = ar_intervals:from_list(SyncedBytes), TestRange = ar_intervals:from_list([{TestRangeEnd, 0}]), UnsyncedIntervals = ar_intervals:outerjoin(SyncedIntervals, TestRange), ar_intervals:to_list(UnsyncedIntervals) end. calculate_expected_intervals(UnsyncedBytes, PeerBytesData) -> %% For each peer, calculate intersection with unsynced intervals UnsyncedIntervals = ar_intervals:from_list(UnsyncedBytes), ExpectedByPeer = maps:map(fun(_Peer, PeerIntervals) -> PeerIntervalsObj = ar_intervals:from_list(PeerIntervals), Intersection = ar_intervals:intersection(UnsyncedIntervals, PeerIntervalsObj), Intersection end, PeerBytesData), maps:filter(fun(_Peer, Intervals) -> not ar_intervals:is_empty(Intervals) end, ExpectedByPeer). setup_sync_record_servers(SyncedBytes, PeerBytesData) -> case ets:info(sync_records) of undefined -> ets:new(sync_records, [named_table, public, {read_concurrency, true}]); _ -> ets:delete_all_objects(sync_records) end, Packing = unpacked, SyncedStoreID = test_store_id, ProcessName = ar_sync_record:name(SyncedStoreID), case whereis(ProcessName) of undefined -> ar_sync_record:start_link(ProcessName, SyncedStoreID); _ -> ok end, add_bytes_to_footprint(SyncedBytes, Packing, SyncedStoreID), maps:foreach( fun(Peer, PeerBytes) -> PeerProcessName = ar_sync_record:name(Peer), case whereis(PeerProcessName) of undefined -> ar_sync_record:start_link(PeerProcessName, Peer); _ -> ok end, add_bytes_to_footprint(PeerBytes, Packing, Peer) end, PeerBytesData ). add_bytes_to_footprint([], _Packing, _StoreID) -> ok; add_bytes_to_footprint([{End, Start} | Rest], Packing, StoreID) -> lists:foreach( fun(Offset) -> ar_footprint_record:add(Offset, Packing, StoreID) end, lists:seq(Start + ?DATA_CHUNK_SIZE, End, ?DATA_CHUNK_SIZE) ), add_bytes_to_footprint(Rest, Packing, StoreID). ================================================ FILE: apps/arweave/test/ar_poa_tests.erl ================================================ -module(ar_poa_tests). -include_lib("arweave/include/ar.hrl"). -include_lib("eunit/include/eunit.hrl"). -import(ar_test_node, [wait_until_height/2, assert_wait_until_height/2, read_block_when_stored/1]). v1_transactions_after_2_0_test_() -> {timeout, 420, fun test_v1_transactions_after_2_0/0}. test_v1_transactions_after_2_0() -> Key = {_, Pub1} = ar_wallet:new(), Key2 = {_, Pub2} = ar_wallet:new(), [B0] = ar_weave:init([ {ar_wallet:to_address(Pub1), ?AR(100), <<>>}, {ar_wallet:to_address(Pub2), ?AR(100), <<>>} ]), ar_test_node:start(B0), ar_test_node:start_peer(peer1, B0), ar_test_node:connect_to_peer(peer1), TXs = generate_txs(Key, fun ar_test_node:sign_v1_tx/2), lists:foreach( fun(TX) -> ar_test_node:assert_post_tx_to_peer(peer1, TX) end, TXs ), ar_test_node:assert_wait_until_receives_txs(TXs), lists:foreach( fun(Height) -> ar_test_node:mine(peer1), BI = wait_until_height(main, Height), case Height of 1 -> assert_txs_mined(TXs, BI); _ -> noop end, assert_wait_until_height(peer1, Height) end, lists:seq(1, 10) ), MoreTXs = generate_txs(Key2, fun ar_test_node:sign_v1_tx/2), lists:foreach( fun(TX) -> ar_test_node:assert_post_tx_to_peer(peer1, TX) end, MoreTXs ), ar_test_node:assert_wait_until_receives_txs(MoreTXs), lists:foreach( fun(Height) -> ar_test_node:mine(peer1), BI = wait_until_height(main, Height), case Height of 11 -> assert_txs_mined(MoreTXs, BI); _ -> noop end, assert_wait_until_height(peer1, Height) end, lists:seq(11, 20) ). v2_transactions_after_2_0_test_() -> {timeout, 420, fun test_v2_transactions_after_2_0/0}. test_v2_transactions_after_2_0() -> Key = {_, Pub1} = ar_wallet:new(), Key2 = {_, Pub2} = ar_wallet:new(), [B0] = ar_weave:init([ {ar_wallet:to_address(Pub1), ?AR(100), <<>>}, {ar_wallet:to_address(Pub2), ?AR(100), <<>>} ]), ar_test_node:start(B0), ar_test_node:start_peer(peer1, B0), ar_test_node:connect_to_peer(peer1), TXs = generate_txs(Key, fun ar_test_node:sign_tx/2), lists:foreach( fun(TX) -> ar_test_node:assert_post_tx_to_peer(peer1, TX) end, TXs ), ar_test_node:assert_wait_until_receives_txs(TXs), lists:foreach( fun(Height) -> ar_test_node:mine(peer1), BI = wait_until_height(main, Height), case Height of 1 -> assert_txs_mined(TXs, BI); _ -> noop end, assert_wait_until_height(peer1, Height) end, lists:seq(1, 10) ), MoreTXs = generate_txs(Key2, fun ar_test_node:sign_tx/2), lists:foreach( fun(TX) -> ar_test_node:assert_post_tx_to_peer(peer1, TX) end, MoreTXs ), ar_test_node:assert_wait_until_receives_txs(MoreTXs), lists:foreach( fun(Height) -> ar_test_node:mine(peer1), BI = wait_until_height(main, Height), case Height of 11 -> assert_txs_mined(MoreTXs, BI); _ -> noop end, assert_wait_until_height(peer1, Height) end, lists:seq(11, 20) ). recall_byte_on_the_border_test_() -> {timeout, 420, fun test_recall_byte_on_the_border/0}. test_recall_byte_on_the_border() -> Key = {_, Pub} = ar_wallet:new(), [B0] = ar_weave:init([ {ar_wallet:to_address(Pub), ?AR(100), <<>>} ]), ar_test_node:start(B0), ar_test_node:start_peer(peer1, B0), ar_test_node:connect_to_peer(peer1), %% Generate one-byte transactions so that recall byte is often on the %% the border between two transactions. TXs = [ ar_test_node:sign_tx(Key, #{ data => <<"A">>, tags => [random_nonce()], last_tx => ar_test_node:get_tx_anchor(peer1) }), ar_test_node:sign_tx(Key, #{ data => <<"B">>, tags => [random_nonce()], last_tx => ar_test_node:get_tx_anchor(peer1) }), ar_test_node:sign_tx(Key, #{ data => <<"B">>, tags => [random_nonce()], last_tx => ar_test_node:get_tx_anchor(peer1) }), ar_test_node:sign_tx(Key, #{ data => <<"C">>, tags => [random_nonce()], last_tx => ar_test_node:get_tx_anchor(peer1) }) ], lists:foreach( fun(TX) -> ar_test_node:assert_post_tx_to_peer(peer1, TX) end, TXs ), ar_test_node:assert_wait_until_receives_txs(TXs), lists:foreach( fun(Height) -> ar_test_node:mine(peer1), BI = wait_until_height(main, Height), case Height of 1 -> assert_txs_mined(TXs, BI); _ -> noop end, assert_wait_until_height(peer1, Height) end, lists:seq(1, 10) ). ignores_transactions_with_invalid_data_root_test_() -> {timeout, 420, fun test_ignores_transactions_with_invalid_data_root/0}. test_ignores_transactions_with_invalid_data_root() -> Key = {_, Pub} = ar_wallet:new(), [B0] = ar_weave:init([ {ar_wallet:to_address(Pub), ?AR(100), <<>>} ]), ar_test_node:start(B0), ar_test_node:start_peer(peer1, B0), ar_test_node:connect_to_peer(peer1), %% Generate transactions where half of them are valid and the other %% half has an invalid data_root. GenerateTXParams = fun (valid) -> #{ data => <<"DATA">>, tags => [random_nonce()], last_tx => ar_test_node:get_tx_anchor(peer1) }; (invalid) -> #{ data_root => crypto:strong_rand_bytes(32), data => <<"DATA">>, tags => [random_nonce()], last_tx => ar_test_node:get_tx_anchor(peer1) } end, TXs = [ ar_test_node:sign_tx(Key, GenerateTXParams(valid)), (ar_test_node:sign_tx(Key, GenerateTXParams(invalid)))#tx{ data = <<>> }, ar_test_node:sign_tx(Key, GenerateTXParams(valid)), (ar_test_node:sign_tx(Key, GenerateTXParams(invalid)))#tx{ data = <<>> }, ar_test_node:sign_tx(Key, GenerateTXParams(valid)), (ar_test_node:sign_tx(Key, GenerateTXParams(invalid)))#tx{ data = <<>> }, ar_test_node:sign_tx(Key, GenerateTXParams(valid)), (ar_test_node:sign_tx(Key, GenerateTXParams(invalid)))#tx{ data = <<>> }, ar_test_node:sign_tx(Key, GenerateTXParams(valid)), (ar_test_node:sign_tx(Key, GenerateTXParams(invalid)))#tx{ data = <<>> } ], lists:foreach( fun(TX) -> ar_test_node:assert_post_tx_to_peer(peer1, TX) end, TXs ), ar_test_node:assert_wait_until_receives_txs(TXs), lists:foreach( fun(Height) -> ar_test_node:mine(peer1), BI = wait_until_height(main, Height), case Height of 1 -> assert_txs_mined(TXs, BI); _ -> noop end, assert_wait_until_height(peer1, Height) end, lists:seq(1, 10) ). generate_txs(Key, SignFun) -> [ SignFun(Key, #{ data => <<>>, tags => [random_nonce()], last_tx => ar_test_node:get_tx_anchor(peer1) }), SignFun(Key, #{ data => <<"B">>, tags => [random_nonce()], last_tx => ar_test_node:get_tx_anchor(peer1) }), SignFun( Key, #{ data => <<"DATA">>, tags => [random_nonce()], last_tx => ar_test_node:get_tx_anchor(peer1) } ), SignFun( Key, #{ data => << <<"B">> || _ <- lists:seq(1, ?DATA_CHUNK_SIZE) >>, tags => [random_nonce()], last_tx => ar_test_node:get_tx_anchor(peer1) } ), SignFun( Key, #{ data => << <<"B">> || _ <- lists:seq(1, ?DATA_CHUNK_SIZE * 2) >>, tags => [random_nonce()], last_tx => ar_test_node:get_tx_anchor(peer1) } ), SignFun( Key, #{ data => << <<"B">> || _ <- lists:seq(1, ?DATA_CHUNK_SIZE * 3) >>, tags => [random_nonce()], last_tx => ar_test_node:get_tx_anchor(peer1) } ), SignFun( Key, #{ data => << <<"B">> || _ <- lists:seq(1, ?DATA_CHUNK_SIZE * 13) >>, tags => [random_nonce()], last_tx => ar_test_node:get_tx_anchor(peer1) } ) ]. random_nonce() -> {<<"nonce">>, integer_to_binary(rand:uniform(1000000))}. assert_txs_mined(TXs, [{H, _, _} | _]) -> B = read_block_when_stored(H), TXIDs = [TX#tx.id || TX <- TXs], ?assertEqual(length(TXIDs), length(B#block.txs)), ?assertEqual(lists:sort(TXIDs), lists:sort(B#block.txs)). ================================================ FILE: apps/arweave/test/ar_poller_tests.erl ================================================ -module(ar_poller_tests). -include_lib("arweave/include/ar.hrl"). -include_lib("eunit/include/eunit.hrl"). -import(ar_test_node, [assert_wait_until_height/2, read_block_when_stored/1]). polling_test_() -> ar_test_node:test_with_mocked_functions([ {ar_retarget, is_retarget_height, fun(_Height) -> false end}, {ar_retarget, is_retarget_block, fun(_Block) -> false end}], fun test_polling/0). test_polling() -> {_, Pub} = Wallet = ar_wallet:new(), [B0] = ar_weave:init([{ar_wallet:to_address(Pub), ?AR(10000), <<>>}]), ar_test_node:start(B0), ar_test_node:start_peer(peer1, B0), ar_test_node:disconnect_from(peer1), TXs = lists:map( fun(Height) -> SignedTX = ar_test_node:sign_tx(Wallet, #{ last_tx => ar_test_node:get_tx_anchor(peer1) }), ar_test_node:assert_post_tx_to_peer(peer1, SignedTX), ar_test_node:mine(peer1), assert_wait_until_height(peer1, Height), SignedTX end, lists:seq(1, 9) ), ar_test_node:connect_to_peer(peer1), ar_test_node:wait_until_height(main, 9), lists:foreach( fun(Height) -> {H, _, _} = ar_node:get_block_index_entry(Height), B = read_block_when_stored(H), TX = lists:nth(Height, TXs), ?assertEqual([TX#tx.id], B#block.txs) end, lists:seq(1, 9) ), %% Make the nodes diverge. Expect one of them to fetch and apply the blocks %% from the winning fork. ar_test_node:disconnect_from(peer1), ar_test_node:mine(), ar_test_node:mine(peer1), [{MH11, _, _} | _] = ar_test_node:wait_until_height(main, 10), [{SH11, _, _} | _] = ar_test_node:wait_until_height(peer1, 10), ?assertNotEqual(SH11, MH11), ar_test_node:mine(), ar_test_node:mine(peer1), [{MH12, _, _} | _] = ar_test_node:wait_until_height(main, 11), [{SH12, _, _} | _] = ar_test_node:wait_until_height(peer1, 11), ?assertNotEqual(SH12, MH12), ar_test_node:mine(), ar_test_node:mine(peer1), [{MH13, _, _} | _] = MBI12 = ar_test_node:wait_until_height(main, 12), [{SH13, _, _} | _] = SBI12 = ar_test_node:wait_until_height(peer1, 12), ?assertNotEqual(SH13, MH13), BM13 = ar_block_cache:get(block_cache, MH13), BS13 = ar_test_node:remote_call(peer1, ar_block_cache, get, [block_cache, SH13]), CDiffM13 = BM13#block.cumulative_diff, CDiffS13 = BS13#block.cumulative_diff, ar_test_node:connect_to_peer(peer1), case CDiffM13 > CDiffS13 of true -> ?debugFmt("Case 1.", []), ?assertEqual(ok, ar_test_node:wait_until_block_index(peer1, MBI12)), ?assertMatch([{MH13, _, _} | _], ar_node:get_block_index()); false -> case CDiffM13 < CDiffS13 of true -> ?debugFmt("Case 2.", []), ?assertEqual(ok, ar_test_node:wait_until_block_index(SBI12)), ?assertMatch([{SH13, _, _} | _], ar_test_node:remote_call(peer1, ar_node, get_block_index, [])); false -> ?debugFmt("Case 3.", []), ar_test_node:mine(peer1), [{MH14, _, _}, {MH13_1, _, _}, {MH12_1, _, _}, {MH11_1, _, _} | _] = ar_test_node:wait_until_height(main, 13), [{SH14, _, _} | _] = ar_test_node:wait_until_height(peer1, 13), ?assertEqual(MH14, SH14), ?assertEqual(SH13, MH13_1), ?assertEqual(SH12, MH12_1), ?assertEqual(SH11, MH11_1) end end. ================================================ FILE: apps/arweave/test/ar_post_block_tests.erl ================================================ -module(ar_post_block_tests). -include_lib("arweave_config/include/arweave_config.hrl"). -include("ar_consensus.hrl"). -include_lib("eunit/include/eunit.hrl"). -import(ar_test_node, [ wait_until_height/2, post_block/2, send_new_block/2, sign_block/3, read_block_when_stored/2, assert_wait_until_height/2, test_with_mocked_functions/2]). start_node() -> [B0] = ar_weave:init([], 0), %% Set difficulty to 0 to speed up tests ar_test_node:start(B0), ar_test_node:start_peer(peer1, B0), ar_test_node:connect_to_peer(peer1). reset_node() -> ar_blacklist_middleware:reset(), ar_test_node:remote_call(peer1, ar_blacklist_middleware, reset, []), ar_test_node:connect_to_peer(peer1), Height = height(peer1), [{PrevH, _, _} | _] = wait_until_height(main, Height), ar_test_node:disconnect_from(peer1), ar_test_node:mine(peer1), [{H, _, _} | _] = ar_test_node:assert_wait_until_height(peer1, Height + 1), B = ar_test_node:remote_call(peer1, ar_block_cache, get, [block_cache, H]), PrevB = ar_test_node:remote_call(peer1, ar_block_cache, get, [block_cache, PrevH]), {ok, Config} = ar_test_node:remote_call(peer1, arweave_config, get_env, []), Key = ar_test_node:remote_call(peer1, ar_wallet, load_key, [Config#config.mining_addr]), {Key, B, PrevB}. setup_all_post_2_7() -> {Setup, Cleanup} = ar_test_node:mock_functions([ {ar_fork, height_2_7, fun() -> 0 end} ]), Functions = Setup(), start_node(), {Cleanup, Functions}. setup_all_post_2_8() -> {Setup, Cleanup} = ar_test_node:mock_functions([ {ar_fork, height_2_8, fun() -> 0 end} ]), Functions = Setup(), start_node(), {Cleanup, Functions}. cleanup_all_post_fork({Cleanup, Functions}) -> Cleanup(Functions). instantiator(TestFun) -> fun (Fixture) -> {timeout, 120, {with, Fixture, [TestFun]}} end. post_2_7_test_() -> {setup, fun setup_all_post_2_7/0, fun cleanup_all_post_fork/1, {foreach, fun reset_node/0, [ instantiator(fun test_reject_block_invalid_miner_reward/1), instantiator(fun test_reject_block_invalid_denomination/1), instantiator(fun test_reject_block_invalid_kryder_plus_rate_multiplier/1), instantiator(fun test_reject_block_invalid_kryder_plus_rate_multiplier_latch/1), instantiator(fun test_reject_block_invalid_endowment_pool/1), instantiator(fun test_reject_block_invalid_debt_supply/1), instantiator(fun test_reject_block_invalid_wallet_list/1), instantiator(fun test_mitm_poa_chunk_tamper_warn/1), instantiator(fun test_mitm_poa2_chunk_tamper_warn/1), instantiator(fun test_reject_block_invalid_proof_size/1), instantiator(fun test_cached_poa/1) ]} }. post_2_8_test_() -> {setup, fun setup_all_post_2_8/0, fun cleanup_all_post_fork/1, {foreach, fun reset_node/0, [ instantiator(fun test_reject_block_invalid_packing_difficulty/1), instantiator(fun test_reject_block_invalid_replica_format/1), instantiator(fun test_reject_block_invalid_denomination/1), instantiator(fun test_reject_block_invalid_kryder_plus_rate_multiplier/1), instantiator(fun test_reject_block_invalid_kryder_plus_rate_multiplier_latch/1), instantiator(fun test_reject_block_invalid_endowment_pool/1), instantiator(fun test_reject_block_invalid_debt_supply/1), instantiator(fun test_reject_block_invalid_wallet_list/1), instantiator(fun test_mitm_poa_chunk_tamper_warn/1), instantiator(fun test_mitm_poa2_chunk_tamper_warn/1), instantiator(fun test_reject_block_invalid_proof_size/1), instantiator(fun test_cached_poa/1) ]} }. %% ------------------------------------------------------------------------------------------ %% post_2_7_test_ %% ------------------------------------------------------------------------------------------ test_mitm_poa_chunk_tamper_warn({_Key, B, _PrevB}) -> %% Verify that, in 2.7, we don't ban a peer if the poa.chunk is tampered with. ok = ar_events:subscribe(block), assert_not_banned(ar_test_node:peer_ip(main)), B2 = B#block{ poa = #poa{ chunk = crypto:strong_rand_bytes(?DATA_CHUNK_SIZE) } }, post_block(B2, invalid_first_chunk), assert_not_banned(ar_test_node:peer_ip(main)). test_mitm_poa2_chunk_tamper_warn({Key, B, PrevB}) -> %% Verify that, in 2.7, we don't ban a peer if the poa2.chunk is tampered with. %% For this test we have to re-sign the block with the new poa2.chunk - but that's just a %% test limitation. In the wild the poa2 chunk could be modified without resigning. ok = ar_events:subscribe(block), assert_not_banned(ar_test_node:peer_ip(main)), B2 = sign_block(B#block{ recall_byte2 = 100000000, poa2 = #poa{ chunk = crypto:strong_rand_bytes(?DATA_CHUNK_SIZE) } }, PrevB, Key), post_block(B2, invalid_second_chunk), assert_not_banned(ar_test_node:peer_ip(main)). test_reject_block_invalid_proof_size({Key, B, PrevB}) -> ok = ar_events:subscribe(block), MaxDataPathSize = 349504, MaxTxPathSize = 2176, post_block(sign_block( B#block{ poa = #poa{ tx_path = crypto:strong_rand_bytes(MaxTxPathSize + 1) } }, PrevB, Key), invalid_proof_size), post_block(sign_block( B#block{ poa = #poa{ data_path = crypto:strong_rand_bytes(MaxDataPathSize + 1) } }, PrevB, Key), invalid_proof_size), post_block(sign_block( B#block{ poa = #poa{ chunk = crypto:strong_rand_bytes(?DATA_CHUNK_SIZE+1) } }, PrevB, Key), invalid_proof_size), post_block(sign_block( B#block{ poa2 = #poa{ tx_path = crypto:strong_rand_bytes(MaxTxPathSize + 1) } }, PrevB, Key), invalid_proof_size), post_block(sign_block( B#block{ poa2 = #poa{ data_path = crypto:strong_rand_bytes(MaxDataPathSize + 1) } }, PrevB, Key), invalid_proof_size), post_block(sign_block( B#block{ poa2 = #poa{ chunk = crypto:strong_rand_bytes(?DATA_CHUNK_SIZE+1) } }, PrevB, Key), invalid_proof_size). test_cached_poa({Key, B, PrevB}) -> %% Verify that comparing against a cached poa works ok = ar_events:subscribe(block), B2 = sign_block(B, PrevB, Key), post_block(B2, valid), B3 = sign_block(B, PrevB, Key), post_block(B3, valid). %% The banning process is asynchronous now so we may have to wait a little until %% the peer gets banned. assert_banned(Peer) -> case ar_util:do_until( fun() -> banned == ar_blacklist_middleware:is_peer_banned(Peer) end, 200, 2000 ) of true -> true; false -> ?assert(false, "Expected the peer to be banned but the peer was not banned.") end. %% The banning process is asynchronous now so we should wait a little to gain some %% confidence the peer is not banned. assert_not_banned(Peer) -> timer:sleep(2000), ?assertEqual(not_banned, ar_blacklist_middleware:is_peer_banned(Peer)). %% ------------------------------------------------------------------------------------------ %% post_2_6_test_ %% ------------------------------------------------------------------------------------------ test_reject_block_invalid_miner_reward({Key, B, PrevB}) -> ok = ar_events:subscribe(block), B2 = sign_block(B#block{ reward = 0 }, PrevB, Key), post_block(B2, invalid_reward_history_hash), HashRate = ar_difficulty:get_hash_rate_fixed_ratio(B2), RewardHistory = tl(B2#block.reward_history), Addr = B2#block.reward_addr, B3 = sign_block(B2#block{ reward_history_hash = ar_rewards:reward_history_hash( B2#block.height, PrevB#block.reward_history_hash, [{Addr, HashRate, 0, 1} | RewardHistory]) }, PrevB, Key), post_block(B3, invalid_miner_reward). test_reject_block_invalid_denomination({Key, B, PrevB}) -> ok = ar_events:subscribe(block), B2 = sign_block(B#block{ denomination = 0 }, PrevB, Key), post_block(B2, invalid_denomination). test_reject_block_invalid_kryder_plus_rate_multiplier({Key, B, PrevB}) -> ok = ar_events:subscribe(block), B2 = sign_block(B#block{ kryder_plus_rate_multiplier = 0 }, PrevB, Key), post_block(B2, invalid_kryder_plus_rate_multiplier). test_reject_block_invalid_kryder_plus_rate_multiplier_latch({Key, B, PrevB}) -> ok = ar_events:subscribe(block), B2 = sign_block(B#block{ kryder_plus_rate_multiplier_latch = 2 }, PrevB, Key), post_block(B2, invalid_kryder_plus_rate_multiplier_latch). test_reject_block_invalid_endowment_pool({Key, B, PrevB}) -> ok = ar_events:subscribe(block), B2 = sign_block(B#block{ reward_pool = 2 }, PrevB, Key), post_block(B2, invalid_reward_pool). test_reject_block_invalid_debt_supply({Key, B, PrevB}) -> ok = ar_events:subscribe(block), B2 = sign_block(B#block{ debt_supply = 100000000 }, PrevB, Key), post_block(B2, invalid_debt_supply). test_reject_block_invalid_wallet_list({Key, B, PrevB}) -> ok = ar_events:subscribe(block), B2 = sign_block(B#block{ wallet_list = crypto:strong_rand_bytes(32) }, PrevB, Key), post_block(B2, invalid_wallet_list). %% ------------------------------------------------------------------------------------------ %% post_2_8_test_ %% ------------------------------------------------------------------------------------------ test_reject_block_invalid_packing_difficulty({Key, B, PrevB}) -> ok = ar_events:subscribe(block), assert_not_banned(ar_test_node:peer_ip(main)), B2 = sign_block(B#block{ unpacked_chunk_hash = <<>>, packing_difficulty = 33 }, PrevB, Key), post_block(B2, invalid_first_unpacked_chunk), assert_not_banned(ar_test_node:peer_ip(main)), C = crypto:strong_rand_bytes(262144), PackedC = crypto:strong_rand_bytes(262144 div 32), UH = crypto:hash(sha256, C), H = crypto:hash(sha256, PackedC), PoA = B#block.poa, B3 = sign_block(B#block{ packing_difficulty = 33, poa = PoA#poa{ unpacked_chunk = C, chunk = PackedC }, unpacked_chunk_hash = UH, chunk_hash = H }, PrevB, Key), post_block(B3, invalid_packing_difficulty), assert_banned(ar_test_node:peer_ip(main)). test_reject_block_invalid_replica_format({Key, B, PrevB}) -> ok = ar_events:subscribe(block), assert_not_banned(ar_test_node:peer_ip(main)), C = crypto:strong_rand_bytes(262144), PackedC = crypto:strong_rand_bytes(262144 div 32), UH = crypto:hash(sha256, C), H = crypto:hash(sha256, PackedC), PoA = B#block.poa, B2 = sign_block(B#block{ replica_format = 2, poa = PoA#poa{ unpacked_chunk = C, chunk = PackedC }, unpacked_chunk_hash = UH, chunk_hash = H }, PrevB, Key), post_block(B2, invalid_packing_difficulty), assert_banned(ar_test_node:peer_ip(main)). %% ------------------------------------------------------------------------------------------ %% Others tests %% ------------------------------------------------------------------------------------------ add_external_block_with_invalid_timestamp_test_() -> ar_test_node:test_with_mocked_functions([{ar_fork, height_2_7, fun() -> 0 end}], fun test_add_external_block_with_invalid_timestamp/0). test_add_external_block_with_invalid_timestamp() -> start_node(), {Key, B, PrevB} = reset_node(), %% Expect the timestamp too far from the future to be rejected. FutureTimestampTolerance = ?JOIN_CLOCK_TOLERANCE * 2 + ?CLOCK_DRIFT_MAX, TooFarFutureTimestamp = os:system_time(second) + FutureTimestampTolerance + 3, B2 = sign_block(B#block{ timestamp = TooFarFutureTimestamp }, PrevB, Key), ok = ar_events:subscribe(block), post_block(B2, invalid_timestamp), %% Expect the timestamp from the future within the tolerance interval to be accepted. OkFutureTimestamp = os:system_time(second) + FutureTimestampTolerance - 3, B3 = sign_block(B#block{ timestamp = OkFutureTimestamp }, PrevB, Key), post_block(B3, valid), %% Expect the timestamp too far behind the previous timestamp to be rejected. PastTimestampTolerance = lists:sum([?JOIN_CLOCK_TOLERANCE * 2, ?CLOCK_DRIFT_MAX]), TooFarPastTimestamp = PrevB#block.timestamp - PastTimestampTolerance - 1, B4 = sign_block(B#block{ timestamp = TooFarPastTimestamp }, PrevB, Key), post_block(B4, invalid_timestamp), OkPastTimestamp = PrevB#block.timestamp - PastTimestampTolerance + 1, B5 = sign_block(B#block{ timestamp = OkPastTimestamp }, PrevB, Key), post_block(B5, valid). rejects_invalid_blocks_test_() -> {timeout, 120, fun test_rejects_invalid_blocks/0}. test_rejects_invalid_blocks() -> [B0] = ar_weave:init([], ar_retarget:switch_to_linear_diff(2)), ar_test_node:start(B0), ar_test_node:start_peer(peer1, B0), ar_test_node:disconnect_from(peer1), ar_test_node:mine(peer1), BI = ar_test_node:assert_wait_until_height(peer1, 1), B1 = ar_test_node:remote_call(peer1, ar_storage, read_block, [hd(BI)]), %% Try to post an invalid block. InvalidH = crypto:strong_rand_bytes(48), ok = ar_events:subscribe(block), post_block(B1#block{ indep_hash = InvalidH }, invalid_hash), %% Verify the IP address of self is NOT banned in ar_blacklist_middleware. InvalidH2 = crypto:strong_rand_bytes(48), post_block(B1#block{ indep_hash = InvalidH2 }, invalid_hash), %% The valid block with the ID from the failed attempt can still go through. post_block(B1, valid), %% Try to post the same block again. Peer = ar_test_node:peer_ip(main), ?assertMatch({ok, {{<<"208">>, _}, _, _, _, _}}, send_new_block(Peer, B1)), %% Correct hash, but invalid signature. B2Preimage = B1#block{ signature = <<>> }, B2 = B2Preimage#block{ indep_hash = ar_block:indep_hash(B2Preimage) }, post_block(B2, invalid_signature), %% Nonce limiter output too far in the future. Info1 = B1#block.nonce_limiter_info, {ok, Config} = ar_test_node:remote_call(peer1, arweave_config, get_env, []), Key = ar_test_node:remote_call(peer1, ar_wallet, load_key, [Config#config.mining_addr]), B3 = sign_block(B1#block{ %% Change the solution hash so that the validator does not go down %% the comparing the resigned solution with the cached solution path. hash = crypto:strong_rand_bytes(32), nonce_limiter_info = Info1#nonce_limiter_info{ global_step_number = 100000 } }, B0, Key), post_block(B3, invalid_nonce_limiter_global_step_number), %% Nonce limiter output lower than that of the previous block. B4 = sign_block(B1#block{ previous_block = B1#block.indep_hash, previous_cumulative_diff = B1#block.cumulative_diff, %% Change the solution hash so that the validator does not go down %% the comparing the resigned solution with the cached solution path. hash = crypto:strong_rand_bytes(32), height = B1#block.height + 1, nonce_limiter_info = Info1#nonce_limiter_info{ global_step_number = 1 } }, B1, Key), post_block(B4, invalid_nonce_limiter_global_step_number), B1SolutionH = B1#block.hash, B1SolutionNum = binary:decode_unsigned(B1SolutionH), B5 = sign_block(B1#block{ previous_block = B1#block.indep_hash, previous_cumulative_diff = B1#block.cumulative_diff, height = B1#block.height + 1, hash = binary:encode_unsigned(B1SolutionNum - 1) }, B1, Key), post_block(B5, invalid_nonce_limiter_global_step_number), %% Correct hash, but invalid PoW. InvalidKey = ar_wallet:new(), InvalidAddr = ar_wallet:to_address(InvalidKey), B6 = sign_block(B1#block{ reward_addr = InvalidAddr, %% Change the solution hash so that the validator does not go down %% the comparing the resigned solution with the cached solution path. hash = crypto:strong_rand_bytes(32), reward_key = element(2, InvalidKey) }, B0, InvalidKey), timer:sleep(100 * 2), % ?THROTTLE_BY_IP_INTERVAL_MS * 2 post_block(B6, [invalid_hash_preimage, invalid_pow]), assert_banned(Peer), ?assertMatch({ok, {{<<"403">>, _}, _, <<"IP address blocked due to previous request.">>, _, _}}, send_new_block(Peer, B1#block{ indep_hash = crypto:strong_rand_bytes(48) })), ar_blacklist_middleware:reset(), B7 = sign_block(B1#block{ %% Change the solution hash so that the validator does not go down %% the comparing the resigned solution with the cached solution path. %% Also, here it changes the block hash (the previous one would be ignored), %% because the poa field does not explicitly go in there (the motivation is to %% have a "quick pow" step which is quick to validate and somewhat expensive to %% forge). hash = crypto:strong_rand_bytes(32), poa = (B1#block.poa)#poa{ chunk = <<"a">> } }, B0, Key), post_block(B7, invalid_first_chunk), B7_1 = sign_block(B7#block{ chunk_hash = crypto:hash(sha256, <<"a">>) }, B0, Key), post_block(B7_1, invalid_pow), assert_banned(Peer), ?assertMatch({ok, {{<<"403">>, _}, _, <<"IP address blocked due to previous request.">>, _, _}}, send_new_block(Peer, B1#block{ indep_hash = crypto:strong_rand_bytes(48) })), ar_blacklist_middleware:reset(), B8 = sign_block(B1#block{ last_retarget = 100000 }, B0, Key), post_block(B8, invalid_last_retarget), assert_banned(Peer), ?assertMatch({ok, {{<<"403">>, _}, _, <<"IP address blocked due to previous request.">>, _, _}}, send_new_block(Peer, B1#block{ indep_hash = crypto:strong_rand_bytes(48) })), ar_blacklist_middleware:reset(), B9 = sign_block(B1#block{ diff = 100000 }, B0, Key), post_block(B9, invalid_difficulty), assert_banned(Peer), ?assertMatch({ok, {{<<"403">>, _}, _, <<"IP address blocked due to previous request.">>, _, _}}, send_new_block(Peer, B1#block{ indep_hash = crypto:strong_rand_bytes(48) })), ar_blacklist_middleware:reset(), B10 = sign_block(B1#block{ %% Change the solution hash so that the validator does not go down %% the comparing the resigned solution with the cached solution path. hash = crypto:strong_rand_bytes(32), nonce = 100 }, B0, Key), post_block(B10, invalid_nonce), assert_banned(Peer), ?assertMatch({ok, {{<<"403">>, _}, _, <<"IP address blocked due to previous request.">>, _, _}}, send_new_block(Peer, B1#block{ indep_hash = crypto:strong_rand_bytes(48) })), ar_blacklist_middleware:reset(), B11_1 = sign_block(B1#block{ partition_number = 1 }, B0, Key), %% We might get invalid_hash_preimage occasionally, because the partition number %% changes H0 which changes the solution hash which may happen to be lower than %% the difficulty. post_block(B11_1, [invalid_resigned_solution_hash, invalid_hash_preimage]), B11 = sign_block(B1#block{ %% Change the solution hash so that the validator does not go down %% the comparing the resigned solution with the cached solution path. hash = crypto:strong_rand_bytes(32), partition_number = 1 }, B0, Key), post_block(B11, [invalid_partition_number, invalid_hash_preimage]), assert_banned(Peer), ?assertMatch({ok, {{<<"403">>, _}, _, <<"IP address blocked due to previous request.">>, _, _}}, send_new_block(Peer, B1#block{ indep_hash = crypto:strong_rand_bytes(48) })), ar_blacklist_middleware:reset(), B12 = sign_block(B1#block{ nonce_limiter_info = (B1#block.nonce_limiter_info)#nonce_limiter_info{ last_step_checkpoints = [crypto:strong_rand_bytes(32)] } }, B0, Key), %% Reset the node to the genesis block. ar_test_node:start(B0), ok = ar_events:subscribe(block), post_block(B12, invalid_nonce_limiter), assert_banned(Peer), ?assertMatch({ok, {{<<"403">>, _}, _, <<"IP address blocked due to previous request.">>, _, _}}, send_new_block(Peer, B1#block{ indep_hash = crypto:strong_rand_bytes(48) })), ar_blacklist_middleware:reset(), B13 = sign_block(B1#block{ poa = (B1#block.poa)#poa{ data_path = <<>> } }, B0, Key), post_block(B13, invalid_poa), assert_banned(Peer), ?assertMatch({ok, {{<<"403">>, _}, _, <<"IP address blocked due to previous request.">>, _, _}}, send_new_block(Peer, B1#block{ indep_hash = crypto:strong_rand_bytes(48) })), ar_blacklist_middleware:reset(), B14 = sign_block(B1#block{ %% Change the solution hash so that the validator does not go down %% the comparing the resigned solution with the cached solution path. hash = crypto:strong_rand_bytes(32), nonce_limiter_info = (B1#block.nonce_limiter_info)#nonce_limiter_info{ next_seed = crypto:strong_rand_bytes(48) } }, B0, Key), post_block(B14, invalid_nonce_limiter_seed_data), assert_banned(Peer), ?assertMatch({ok, {{<<"403">>, _}, _, <<"IP address blocked due to previous request.">>, _, _}}, send_new_block(Peer, B1#block{ indep_hash = crypto:strong_rand_bytes(48) })), ar_blacklist_middleware:reset(), B15 = sign_block(B1#block{ %% Change the solution hash so that the validator does not go down %% the comparing the resigned solution with the cached solution path. hash = crypto:strong_rand_bytes(32), nonce_limiter_info = (B1#block.nonce_limiter_info)#nonce_limiter_info{ partition_upper_bound = 10000000 } }, B0, Key), post_block(B15, invalid_nonce_limiter_seed_data), assert_banned(Peer), ?assertMatch({ok, {{<<"403">>, _}, _, <<"IP address blocked due to previous request.">>, _, _}}, send_new_block(Peer, B1#block{ indep_hash = crypto:strong_rand_bytes(48) })), ar_blacklist_middleware:reset(), B16 = sign_block(B1#block{ %% Change the solution hash so that the validator does not go down %% the comparing the resigned solution with the cached solution path. hash = crypto:strong_rand_bytes(32), nonce_limiter_info = (B1#block.nonce_limiter_info)#nonce_limiter_info{ next_partition_upper_bound = 10000000 } }, B0, Key), post_block(B16, invalid_nonce_limiter_seed_data), assert_banned(Peer), ?assertMatch({ok, {{<<"403">>, _}, _, <<"IP address blocked due to previous request.">>, _, _}}, send_new_block(Peer, B1#block{ indep_hash = crypto:strong_rand_bytes(48) })), ar_blacklist_middleware:reset(). rejects_blocks_with_invalid_double_signing_proof_test_() -> test_with_mocked_functions([{ar_fork, height_2_9, fun() -> 0 end}], fun test_reject_block_invalid_double_signing_proof/0). rejects_blocks_with_small_rsa_keys_test_() -> {timeout, 60, fun test_rejects_blocks_with_small_rsa_keys/0}. test_rejects_blocks_with_small_rsa_keys() -> [B0] = ar_weave:init(), ar_test_node:start(B0), ok = ar_events:subscribe(block), ar_test_node:mine(main), BI = ar_test_node:assert_wait_until_height(main, 1), B1 = ar_storage:read_block(hd(BI)), Key2 = ar_test_node:new_custom_size_rsa_wallet(512), % normal 512-byte key B2 = sign_block(B1, B0, Key2), post_block(B2, invalid_resigned_solution_hash), % because reward_addr changed Key3 = ar_test_node:new_custom_size_rsa_wallet(66), % 66-byte key B3 = sign_block(B1, B0, Key3), post_block(B3, invalid_signature), Key4 = ar_test_node:new_custom_size_rsa_wallet(511), B4 = sign_block(B1, B0, Key4), post_block(B4, invalid_signature). test_reject_block_invalid_double_signing_proof() -> [test_reject_block_invalid_double_signing_proof(KeyType) || KeyType <- [?RSA_KEY_TYPE, ?ECDSA_KEY_TYPE]]. test_reject_block_invalid_double_signing_proof(KeyType) -> ?debugFmt("KeyType: ~p~n", [KeyType]), FullKey = ar_test_node:remote_call(peer1, ar_wallet, new_keyfile, [KeyType]), MiningAddr = ar_wallet:to_address(FullKey), Key0 = ar_wallet:new(), Addr0 = ar_wallet:to_address(Key0), [B0] = ar_weave:init([{Addr0, ?AR(1000), <<>>}], ar_retarget:switch_to_linear_diff(2)), ?debugFmt("Genesis address: ~s, initial balance: ~B AR.~n", [ar_util:encode(Addr0), 1000]), ar_test_node:start(B0), ar_test_node:start_peer(peer1, B0, MiningAddr), ar_test_node:disconnect_from(peer1), ok = ar_events:subscribe(block), {Priv, _} = Key = ar_test_node:remote_call(peer1, ar_wallet, load_key, [MiningAddr]), TX0 = ar_test_node:sign_tx(Key0, #{ target => ar_wallet:to_address(Key), quantity => ?AR(10) }), ar_test_node:assert_post_tx_to_peer(peer1, TX0), ar_test_node:assert_post_tx_to_peer(main, TX0), ar_test_node:mine(peer1), BI = ar_test_node:assert_wait_until_height(peer1, 1), B1 = ar_test_node:remote_call(peer1, ar_storage, read_block, [hd(BI)]), Random512 = crypto:strong_rand_bytes(512), Random64 = crypto:strong_rand_bytes(64), InvalidProof = {Random512, Random512, 2, 1, Random64, Random512, 3, 2, Random64}, B2 = sign_block(B1#block{ double_signing_proof = InvalidProof }, B0, Key), post_block(B2, invalid_double_signing_proof_same_signature), Random512_2 = crypto:strong_rand_bytes(512), InvalidProof_2 = {Random512, Random512, 2, 1, Random64, Random512_2, 3, 2, Random64}, B2_2 = sign_block(B1#block{ double_signing_proof = InvalidProof_2 }, B0, Key), post_block(B2_2, invalid_double_signing_proof_cdiff), CDiff = B1#block.cumulative_diff, PrevCDiff = B0#block.cumulative_diff, SignedH = ar_block:generate_signed_hash(B1), Preimage1 = << (B0#block.hash)/binary, SignedH/binary >>, Preimage2 = << (B0#block.hash)/binary, (crypto:strong_rand_bytes(32))/binary >>, SignaturePreimage = ar_block:get_block_signature_preimage(CDiff, PrevCDiff, Preimage2, 0), Signature2 = ar_wallet:sign(Priv, SignaturePreimage), %% We cannot ban ourselves. InvalidProof2 = {element(3, Priv), B1#block.signature, CDiff, PrevCDiff, Preimage1, Signature2, CDiff, PrevCDiff, Preimage2}, B3 = sign_block(B1#block{ double_signing_proof = InvalidProof2 }, B0, Key), post_block(B3, invalid_double_signing_proof_same_address), ar_test_node:mine(peer1), BI2 = ar_test_node:assert_wait_until_height(peer1, 2), {ok, MainConfig} = arweave_config:get_env(), Key2 = element(1, ar_wallet:load_key(MainConfig#config.mining_addr)), Preimage3 = << (B0#block.hash)/binary, (crypto:strong_rand_bytes(32))/binary >>, Preimage4 = << (B0#block.hash)/binary, (crypto:strong_rand_bytes(32))/binary >>, SignaturePreimage3 = ar_block:get_block_signature_preimage(CDiff, PrevCDiff, Preimage3, 0), SignaturePreimage4 = ar_block:get_block_signature_preimage(CDiff, PrevCDiff, Preimage4, 0), Signature3 = ar_wallet:sign(Key2, SignaturePreimage3), Signature4 = ar_wallet:sign(Key2, SignaturePreimage4), %% The account address is not in the reward history. InvalidProof3 = {element(3, Key2), Signature3, CDiff, PrevCDiff, Preimage3, Signature4, CDiff, PrevCDiff, Preimage4}, B5 = sign_block(B1#block{ double_signing_proof = InvalidProof3 }, B0, Key), post_block(B5, invalid_double_signing_proof_not_in_reward_history), B6 = ar_test_node:remote_call(peer1, ar_storage, read_block, [lists:nth(2, BI2)]), B7 = ar_test_node:remote_call(peer1, ar_storage, read_block, [hd(BI2)]), %% ECDSA signatures are deterministic - we add a new tag to get a new signature here. B7_2 = sign_block(B7#block{ tags = [<<"new_tag">>] }, B6, Key), post_block(B6, valid), post_block(B7, valid), post_block(B7_2, valid), %% Wait until the node records conflicting proofs. true = ar_util:do_until( fun() -> map_size(maps:get(double_signing_proofs, sys:get_state(ar_node_worker), #{})) > 0 end, 200, 30000 ), ar_test_node:connect_to_peer(peer1), ar_test_node:mine(), BI3 = assert_wait_until_height(peer1, 3), B8 = ar_test_node:remote_call(peer1, ar_storage, read_block, [hd(BI3)]), ?assertNotEqual(undefined, B8#block.double_signing_proof), RewardAddr = B8#block.reward_addr, BannedAddr = ar_wallet:to_address(Key), Accounts = ar_wallets:get(B8#block.wallet_list, [BannedAddr, RewardAddr]), ?assertMatch(#{ BannedAddr := {_, _, 1, false}, RewardAddr := {_, _} }, Accounts), %% The banned address may still use their accounts for transfers/uploads. Key3 = ar_wallet:new(), Target = ar_wallet:to_address(Key3), TX1 = ar_test_node:sign_tx(FullKey, #{ last_tx => <<>>, quantity => 1, target => Target }), TX2 = ar_test_node:sign_tx(FullKey, #{ last_tx => ar_test_node:get_tx_anchor(peer1), data => <<"a">> }), lists:foreach(fun(TX) -> ar_test_node:assert_post_tx_to_peer(main, TX) end, [TX1, TX2]), ar_test_node:mine(), BI4 = assert_wait_until_height(peer1, 4), B9 = ar_test_node:remote_call(peer1, ar_storage, read_block, [hd(BI4)]), Accounts2 = ar_wallets:get(B9#block.wallet_list, [BannedAddr, Target]), TXID = TX2#tx.id, ?assertEqual(2, length(B9#block.txs)), ?assertMatch(#{ Target := {1, <<>>}, BannedAddr := {_, TXID, 1, false} }, Accounts2). send_block2_test_() -> test_with_mocked_functions([{ar_fork, height_2_6, fun() -> 0 end}], fun() -> test_send_block2() end). test_send_block2() -> {_, Pub} = Wallet = ar_wallet:new(), [B0] = ar_weave:init([{ar_wallet:to_address(Pub), ?AR(100), <<>>}]), MainWallet = ar_wallet:new_keyfile(), MainAddress = ar_wallet:to_address(MainWallet), PeerWallet = ar_test_node:remote_call(peer1, ar_wallet, new_keyfile, []), PeerAddress = ar_wallet:to_address(PeerWallet), ar_test_node:start(B0, MainAddress), ar_test_node:start_peer(peer1, B0, PeerAddress), ar_test_node:disconnect_from(peer1), TXs = [ar_test_node:sign_tx(Wallet, #{ last_tx => ar_test_node:get_tx_anchor(peer1) }) || _ <- lists:seq(1, 10)], lists:foreach(fun(TX) -> ar_test_node:assert_post_tx_to_peer(main, TX) end, TXs), ar_test_node:mine(), [{H, _, _}, _] = wait_until_height(main, 1), B = ar_storage:read_block(H), TXs2 = sort_txs_by_block_order(TXs, B), EverySecondTX = element(2, lists:foldl(fun(TX, {N, Acc}) when N rem 2 /= 0 -> {N + 1, [TX | Acc]}; (_TX, {N, Acc}) -> {N + 1, Acc} end, {0, []}, TXs2)), lists:foreach(fun(TX) -> ar_test_node:assert_post_tx_to_peer(peer1, TX) end, EverySecondTX), Announcement = #block_announcement{ indep_hash = B#block.indep_hash, previous_block = B0#block.indep_hash, tx_prefixes = [binary:part(TX#tx.id, 0, 8) || TX <- TXs2] }, {ok, {{<<"200">>, _}, _, Body, _, _}} = ar_http:req(#{ method => post, peer => ar_test_node:peer_ip(peer1), path => "/block_announcement", body => ar_serialize:block_announcement_to_binary(Announcement) }), Response = ar_serialize:binary_to_block_announcement_response(Body), ?assertEqual({ok, #block_announcement_response{ missing_chunk = true, missing_tx_indices = [0, 2, 4, 6, 8] }}, Response), Announcement2 = Announcement#block_announcement{ recall_byte = 0 }, {ok, {{<<"200">>, _}, _, Body2, _, _}} = ar_http:req(#{ method => post, peer => ar_test_node:peer_ip(peer1), path => "/block_announcement", body => ar_serialize:block_announcement_to_binary(Announcement2) }), Response2 = ar_serialize:binary_to_block_announcement_response(Body2), %% We always report missing chunk currently. ?assertEqual({ok, #block_announcement_response{ missing_chunk = true, missing_tx_indices = [0, 2, 4, 6, 8] }}, Response2), Announcement3 = Announcement#block_announcement{ recall_byte = 100000000000000 }, {ok, {{<<"200">>, _}, _, Body, _, _}} = ar_http:req(#{ method => post, peer => ar_test_node:peer_ip(peer1), path => "/block_announcement", body => ar_serialize:block_announcement_to_binary(Announcement3) }), {ok, {{<<"418">>, _}, _, Body3, _, _}} = ar_http:req(#{ method => post, peer => ar_test_node:peer_ip(peer1), path => "/block2", body => ar_serialize:block_to_binary(B) }), ?assertEqual(iolist_to_binary(lists:foldl(fun(#tx{ id = TXID }, Acc) -> [TXID | Acc] end, [], TXs2 -- EverySecondTX)), Body3), B2 = B#block{ txs = [lists:nth(1, TXs2) | tl(B#block.txs)] }, {ok, {{<<"418">>, _}, _, Body4, _, _}} = ar_http:req(#{ method => post, peer => ar_test_node:peer_ip(peer1), path => "/block2", body => ar_serialize:block_to_binary(B2) }), ?assertEqual(iolist_to_binary(lists:foldl(fun(#tx{ id = TXID }, Acc) -> [TXID | Acc] end, [], (TXs2 -- EverySecondTX) -- [lists:nth(1, TXs2)])), Body4), TXs3 = [ar_test_node:sign_tx(main, Wallet, #{ last_tx => ar_test_node:get_tx_anchor(peer1), data => crypto:strong_rand_bytes(10 * 1024) }) || _ <- lists:seq(1, 10)], lists:foreach(fun(TX) -> ar_test_node:assert_post_tx_to_peer(main, TX) end, TXs3), ar_test_node:mine(), [{H2, _, _}, _, _] = wait_until_height(main, 2), {ok, {{<<"412">>, _}, _, <<>>, _, _}} = ar_http:req(#{ method => post, peer => ar_test_node:peer_ip(peer1), path => "/block_announcement", body => ar_serialize:block_announcement_to_binary(#block_announcement{ indep_hash = H2, previous_block = B#block.indep_hash }) }), BTXs = ar_storage:read_tx(B#block.txs), B3 = B#block{ txs = BTXs }, {ok, {{<<"200">>, _}, _, <<"OK">>, _, _}} = ar_http:req(#{ method => post, peer => ar_test_node:peer_ip(peer1), path => "/block2", body => ar_serialize:block_to_binary(B3) }), {ok, {{<<"200">>, _}, _, SerializedB, _, _}} = ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), path => "/block2/height/1" }), ?assertEqual({ok, B}, ar_serialize:binary_to_block(SerializedB)), Map = element(2, lists:foldl(fun(TX, {N, M}) -> {N + 1, maps:put(TX#tx.id, N, M)} end, {0, #{}}, TXs2)), {ok, {{<<"200">>, _}, _, Serialized2B, _, _}} = ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), path => "/block2/height/1", body => << 1:1, 0:(8 * 125 - 1) >> }), ?assertEqual({ok, B#block{ txs = [case maps:get(TX#tx.id, Map) == 0 of true -> TX; _ -> TX#tx.id end || TX <- BTXs] }}, ar_serialize:binary_to_block(Serialized2B)), {ok, {{<<"200">>, _}, _, Serialized2B, _, _}} = ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), path => "/block2/height/1", body => << 1:1, 0:7 >> }), {ok, {{<<"200">>, _}, _, Serialized3B, _, _}} = ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), path => "/block2/height/1", body => << 0:1, 1:1, 0:1, 1:1, 0:4 >> }), ?assertEqual({ok, B#block{ txs = [case lists:member(maps:get(TX#tx.id, Map), [1, 3]) of true -> TX; _ -> TX#tx.id end || TX <- BTXs] }}, ar_serialize:binary_to_block(Serialized3B)), B4 = read_block_when_stored(H2, true), timer:sleep(500), {ok, {{<<"200">>, _}, _, <<"OK">>, _, _}} = ar_http:req(#{ method => post, peer => ar_test_node:peer_ip(peer1), path => "/block2", body => ar_serialize:block_to_binary(B4) }), ar_test_node:connect_to_peer(peer1), lists:foreach( fun(Height) -> ar_test_node:mine(), assert_wait_until_height(peer1, Height) end, lists:seq(3, 3 + ?SEARCH_SPACE_UPPER_BOUND_DEPTH) ), B5 = ar_storage:read_block(ar_node:get_current_block_hash()), {ok, {{<<"208">>, _}, _, _, _, _}} = ar_http:req(#{ method => post, peer => ar_test_node:peer_ip(peer1), path => "/block_announcement", body => ar_serialize:block_announcement_to_binary(#block_announcement{ indep_hash = B5#block.indep_hash, previous_block = B5#block.previous_block }) }), ar_test_node:disconnect_from(peer1), ar_test_node:mine(), [_ | _] = wait_until_height(main, 3 + ?SEARCH_SPACE_UPPER_BOUND_DEPTH + 1), B6 = ar_storage:read_block(ar_node:get_current_block_hash()), {ok, {{<<"200">>, _}, _, Body5, _, _}} = ar_http:req(#{ method => post, peer => ar_test_node:peer_ip(peer1), path => "/block_announcement", body => ar_serialize:block_announcement_to_binary(#block_announcement{ indep_hash = B6#block.indep_hash, previous_block = B6#block.previous_block, recall_byte = 0 }) }), %% We always report missing chunk currently. ?assertEqual({ok, #block_announcement_response{ missing_chunk = true, missing_tx_indices = [] }}, ar_serialize:binary_to_block_announcement_response(Body5)), {ok, {{<<"200">>, _}, _, Body6, _, _}} = ar_http:req(#{ method => post, peer => ar_test_node:peer_ip(peer1), path => "/block_announcement", body => ar_serialize:block_announcement_to_binary(#block_announcement{ indep_hash = B6#block.indep_hash, previous_block = B6#block.previous_block, recall_byte = 1024 }) }), %% We always report missing chunk currently. ?assertEqual({ok, #block_announcement_response{ missing_chunk = true, missing_tx_indices = [] }}, ar_serialize:binary_to_block_announcement_response(Body6)). resigned_solution_test_() -> test_with_mocked_functions([{ar_fork, height_2_6, fun() -> 0 end}], fun() -> test_resigned_solution() end). test_resigned_solution() -> [B0] = ar_weave:init(), ar_test_node:start(B0), ar_test_node:start_peer(peer1, B0), ar_test_node:connect_to_peer(peer1), ar_test_node:mine(peer1), wait_until_height(main, 1), ar_test_node:disconnect_from(peer1), ar_test_node:mine(peer1), B = ar_node:get_current_block(), {ok, Config} = ar_test_node:remote_call(peer1, arweave_config, get_env, []), Key = ar_test_node:remote_call(peer1, ar_wallet, load_key, [Config#config.mining_addr]), ok = ar_events:subscribe(block), B2 = sign_block(B#block{ tags = [<<"tag1">>] }, B0, Key), post_block(B2, [valid]), B3 = sign_block(B#block{ tags = [<<"tag2">>] }, B0, Key), post_block(B3, [valid]), assert_wait_until_height(peer1, 2), B4 = ar_test_node:remote_call(peer1, ar_node, get_current_block, []), ?assertEqual(B#block.indep_hash, B4#block.previous_block), B2H = B2#block.indep_hash, ?assertNotEqual(B2#block.indep_hash, B4#block.previous_block), PrevStepNumber = ar_block:vdf_step_number(B), PrevInterval = PrevStepNumber div ar_nonce_limiter:get_reset_frequency(), Info4 = B4#block.nonce_limiter_info, StepNumber = Info4#nonce_limiter_info.global_step_number, Interval = StepNumber div ar_nonce_limiter:get_reset_frequency(), B5 = case Interval == PrevInterval of true -> sign_block(B4#block{ hash_list_merkle = ar_block:compute_hash_list_merkle(B2), previous_block = B2H }, B2, Key); false -> sign_block(B4#block{ previous_block = B2H, hash_list_merkle = ar_block:compute_hash_list_merkle(B2), nonce_limiter_info = Info4#nonce_limiter_info{ next_seed = B2H } }, B2, Key) end, B5H = B5#block.indep_hash, post_block(B5, [valid]), [{B5H, _, _}, {B2H, _, _}, _] = wait_until_height(main, 2), ar_test_node:mine(), [{B6H, _, _}, _, _, _] = wait_until_height(main, 3), ar_test_node:connect_to_peer(peer1), [{B6H, _, _}, {B5H, _, _}, {B2H, _, _}, _] = assert_wait_until_height(peer1, 3). %% ------------------------------------------------------------------------------------------ %% Helper functions %% ------------------------------------------------------------------------------------------ sort_txs_by_block_order(TXs, B) -> TXByID = lists:foldl(fun(TX, Acc) -> maps:put(tx_id(TX), TX, Acc) end, #{}, TXs), lists:foldr(fun(TX, Acc) -> [maps:get(tx_id(TX), TXByID) | Acc] end, [], B#block.txs). tx_id(#tx{ id = ID }) -> ID; tx_id(ID) -> ID. height(Node) -> ar_test_node:remote_call(Node, ar_node, get_height, []). ================================================ FILE: apps/arweave/test/ar_pricing_tests.erl ================================================ -module(ar_pricing_tests). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave/include/ar_pricing.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("eunit/include/eunit.hrl"). -define(DISTANT_FUTURE_BLOCK_HEIGHT, 262800000). %% 1,000 years from genesis get_price_per_gib_minute_test_() -> [ {timeout, 30, fun test_price_per_gib_minute_pre_block_time_history/0}, ar_test_node:test_with_mocked_functions( [ {ar_fork, height_2_7_2, fun() -> 10 end}, {ar_pricing_transition, transition_start_2_6_8, fun() -> 5 end}, {ar_pricing_transition, transition_start_2_7_2, fun() -> 15 end}, {ar_pricing_transition, transition_length_2_6_8, fun() -> 20 end}, {ar_pricing_transition, transition_length_2_7_2, fun() -> 40 end}, %% This test uses specific price constants computed for this partition size. {ar_block, partition_size, fun() -> 2097152 end} ], fun test_price_per_gib_minute_transition_phases/0), ar_test_node:test_with_mocked_functions( [ {ar_block, partition_size, fun() -> 2097152 end} ], fun test_v2_price/0), ar_test_node:test_with_mocked_functions( [ {ar_block, partition_size, fun() -> 2097152 end}, {ar_difficulty, poa1_diff_multiplier, fun(_) -> 2 end} ], fun test_v2_price_with_poa1_diff_multiplier/0) ]. %% @doc This test verifies an edge case code path that probably shouldn't ever be triggered. %% ar_fork:height_2_7() and ar_fork:height_2_6_8() are 0 %% ?BLOCK_TIME_HISTORY_BLOCKS is 3 %% ?PRICE_2_6_8_TRANSITION_START is 2 %% So when the price transition starts we don't have enough block time history to apply the %% new algorithm. test_price_per_gib_minute_pre_block_time_history() -> Start = ar_pricing_transition:transition_start_2_6_8(), B = #block{ reward_history = reward_history(1, 1), block_time_history = block_time_history(1, 1) }, ?assertEqual(ar_pricing_transition:static_price(), ar_pricing:get_price_per_gib_minute(Start, B), "Before we have enough block time history"). test_price_per_gib_minute_transition_phases() -> %% V2 price when calculated with: %% - reward_history(1, 1) %% - block_time_history(1, 1) %% - PoA1 difficulty multiplier of 1 B = #block{ reward_history = reward_history(1, 1), block_time_history = block_time_history(1, 1) }, V2Price = 61440, ?assertEqual(V2Price, ar_pricing:get_v2_price_per_gib_minute(10, B), "V2 Price"), %% Static price ?assertEqual(ar_pricing_transition:static_price(), ar_pricing:get_price_per_gib_minute(4, B), "Static price"), %% 2.6.8 start ?assertEqual(ar_pricing_transition:static_price(), ar_pricing:get_price_per_gib_minute(5, B), "2.6.8 start price"), %% 2.6.8 transition, pre-2.7.2 %% 1/20 interpolation from 8162 to 61440 ?assertEqual(10825, ar_pricing:get_price_per_gib_minute(6, B), "2.6.8 transition, pre-2.7.2 activation price"), %% 2.6.8 transition, post-2.7.2, pre-cap %% 5/20 interpolation from 8162 to 61440 ?assertEqual(21481, ar_pricing:get_price_per_gib_minute(10, B), "2.6.8 transition, at 2.7.2 activation price"), %% 6/20 interpolation from 8162 to 61440 ?assertEqual(24145, ar_pricing:get_price_per_gib_minute(11, B), "2.6.8 transition, post 2.7.2 activation, pre-cap price"), %% 2.6.8 transition, post-2.7.2, post-cap ?assertEqual(30_000, ar_pricing:get_price_per_gib_minute(14, B), "2.6.8 transition, post 2.7.2 activation, post-cap price"), %% 2.7.2 start ?assertEqual(30_000, ar_pricing:get_price_per_gib_minute(15, B), "2.7.2 start price"), %% 2.7.2 transition, before 2.6.8 end %% 5/40 interpolation from 30000 to 61440 ?assertEqual(33930, ar_pricing:get_price_per_gib_minute(20, B), "2.7.2 transition price, before 2.6.8 end"), %% 2.7.2 transition, at 2.6.8 end %% 10/40 interpolation from 30000 to 61440 ?assertEqual(37860, ar_pricing:get_price_per_gib_minute(25, B), "2.7.2 transition price, at 2.6.8 end"), %% 2.7.2 transition, after 2.6.8 end %% 11/40 interpolation from 30000 to 61440 ?assertEqual(38646, ar_pricing:get_price_per_gib_minute(26, B), "2.7.2 transition price, after 2.6.8 end"), %% 2.7.2 end ?assertEqual(V2Price, ar_pricing:get_price_per_gib_minute(55, B), "2.7.2 end price"), %% v2 price ?assertEqual(V2Price, ar_pricing:get_price_per_gib_minute(56, B), "After 2.7.2 transition end"). test_v2_price() -> AtTransitionEnd = ar_pricing_transition:transition_start_2_7_2() + ar_pricing_transition:transition_length(ar_pricing_transition:transition_start_2_7_2()), %% 2 chunks per partition when running tests %% If we get 1 solution per chunk (or 2 per partition), then we expect a price of 61440 %% that's our "baseline" for the purposes of this explanation %% AllOneChunkBaseline: 1x baseline %% - 3 1-chunk blocks, 0 2-chunk blocks %% => 3/3 solutions per chunk %% => 2 per partition %% AllTwoChunkBaseline: 2x baseline %% - 0 1-chunk blocks, 3 2-chunk blocks %% => max, 2 solutions per chunk %% => 4 per partition %% MixedChunkBaseline: 1.5x baseline %% - 2 1-chunk blocks, 1 2-chunk blocks %% => 3/2 solutions per chunk %% => 3 per partition do_price_per_gib_minute_post_transition(AtTransitionEnd, 61440, 122880, 92160), BeyondTransition = AtTransitionEnd + 1000, do_price_per_gib_minute_post_transition(BeyondTransition, 61440, 122880, 92160). test_v2_price_with_poa1_diff_multiplier() -> AtTransitionEnd = ar_pricing_transition:transition_start_2_7_2() + ar_pricing_transition:transition_length(ar_pricing_transition:transition_start_2_7_2()), %% 2 chunks per partition when running tests %% If we get 1 solution per chunk (or 2 per partition), then we expect a price of 61440 %% that's our "baseline" for the purposes of this explanation %% %% Note: in these tests teh poa1 difficulty modifier is set to 2, which changes the %% number of solutions per chunk. %% %% AllOneChunkBaseline: 0.5x baseline %% - 3 1-chunk blocks, 0 2-chunk blocks %% => 3/(3*2) solutions per chunk %% => 1 per partition %% AllTwoChunkBaseline: 1.5x baseline %% - 0 1-chunk blocks, 3 2-chunk blocks %% => max, (2+1)/2 solutions per chunk %% => 3 per partition %% MixedChunkBaseline: 0.5x baseline %% - 2 1-chunk blocks, 1 2-chunk blocks %% => 3/4 solutions per chunk %% => 1.5 per partition %% => Since we deal in integers, that gets rounded to 1 per partition do_price_per_gib_minute_post_transition(AtTransitionEnd, 30720, 92160, 30720), BeyondTransition = AtTransitionEnd + 1000, do_price_per_gib_minute_post_transition(BeyondTransition, 30720, 92160, 30720). do_price_per_gib_minute_post_transition(Height, AllOneChunkBaseline, AllTwoChunkBaseline, MixedChunkBaseline) -> PoA1DiffMultiplier = ar_difficulty:poa1_diff_multiplier(Height), B0 = #block{ reward_history = reward_history(1, 1), block_time_history = block_time_history(1, 1) }, ?assertEqual(AllOneChunkBaseline, ar_pricing:get_price_per_gib_minute(Height, B0), io_lib:format( "hash_rate: low, reward: low, vdf: perfect, chunks: all_one, poa1_diff: ~B", [PoA1DiffMultiplier])), B1 = #block{ reward_history = reward_history(1, 10), block_time_history = block_time_history(1, 1) }, ?assertEqual(AllOneChunkBaseline * 10, ar_pricing:get_price_per_gib_minute(Height, B1), io_lib:format( "hash_rate: low, reward: high, vdf: perfect, chunks: all_one, poa1_diff: ~B", [PoA1DiffMultiplier])), B2 = #block{ reward_history = reward_history(10, 1), block_time_history = block_time_history(1, 1) }, ?assertEqual(AllOneChunkBaseline div 10, ar_pricing:get_price_per_gib_minute(Height, B2), io_lib:format( "hash_rate: high, reward: low, vdf: perfect, chunks: all_one, poa1_diff: ~B", [PoA1DiffMultiplier])), B3 = #block{ reward_history = reward_history(10, 10), block_time_history = block_time_history(1, 1) }, ?assertEqual(AllOneChunkBaseline, ar_pricing:get_price_per_gib_minute(Height, B3), io_lib:format( "hash_rate: high, reward: high, vdf: perfect, chunks: all_one, poa1_diff: ~B", [PoA1DiffMultiplier])), B4 = #block{ reward_history = reward_history(1, 1), block_time_history = block_time_history(10, 1) }, ?assertEqual(AllOneChunkBaseline div 10, ar_pricing:get_price_per_gib_minute(Height, B4), io_lib:format( "hash_rate: low, reward: low, vdf: slow, chunks: all_one, poa1_diff: ~B", [PoA1DiffMultiplier])), B5 = #block{ reward_history = reward_history(1, 1), block_time_history = block_time_history(1, 10) }, ?assertEqual(AllOneChunkBaseline * 10, ar_pricing:get_price_per_gib_minute(Height, B5), io_lib:format( "hash_rate: low, reward: low, vdf: fast, chunks: all_one, poa1_diff: ~B", [PoA1DiffMultiplier])), B6 = #block{ reward_history = reward_history(1, 1), block_time_history = all_two_chunks() }, ?assertEqual(AllTwoChunkBaseline, ar_pricing:get_price_per_gib_minute(Height, B6), io_lib:format( "hash_rate: low, reward: low, vdf: perfect, chunks: all_two, poa1_diff: ~B", [PoA1DiffMultiplier])), B7 = #block{ reward_history = reward_history(1, 1), block_time_history = mix_chunks() }, ?assertEqual(MixedChunkBaseline, ar_pricing:get_price_per_gib_minute(Height, B7), io_lib:format( "hash_rate: low, reward: low, vdf: perfect, chunks: mix, poa1_diff: ~B", [PoA1DiffMultiplier])). reward_history(HashRate, Reward) -> [ {crypto:strong_rand_bytes(32), 1*HashRate, 1*Reward, 0}, {crypto:strong_rand_bytes(32), 2*HashRate, 2*Reward, 0}, {crypto:strong_rand_bytes(32), 3*HashRate, 3*Reward, 0} ]. block_time_history(BlockInterval, VDFSteps) -> [ {BlockInterval*10, VDFSteps*10, 1}, {BlockInterval*20, VDFSteps*20, 1}, {BlockInterval*30, VDFSteps*30, 1} ]. all_two_chunks() -> [ {10, 10, 2}, {20, 20, 2}, {30, 30, 2} ]. mix_chunks() -> [ {10, 10, 1}, {20, 20, 2}, {30, 30, 1} ]. recalculate_price_per_gib_minute_test_block() -> #block{ height = ?PRICE_ADJUSTMENT_FREQUENCY-1, denomination = 1, reward_history = [ {<<>>, 10000, 10, 1} ], block_time_history = [ {129, 135, 1} ], price_per_gib_minute = 10000, scheduled_price_per_gib_minute = 15000 }. recalculate_price_per_gib_minute_2_7_test_() -> ar_test_node:test_with_mocked_functions( [{ar_fork, height_2_6, fun() -> -1 end}, {ar_fork, height_2_7, fun() -> -1 end}, {ar_fork, height_2_7_1, fun() -> infinity end}], fun() -> B = recalculate_price_per_gib_minute_test_block(), ?assertEqual({15000, 8162}, ar_pricing:recalculate_price_per_gib_minute(B)), ok end). recalculate_price_per_gib_minute_2_7_1_ema_test_() -> ar_test_node:test_with_mocked_functions( [{ar_fork, height_2_6, fun() -> -1 end}, {ar_fork, height_2_7, fun() -> -1 end}, {ar_fork, height_2_7_1, fun() -> -1 end}], fun() -> B = recalculate_price_per_gib_minute_test_block(), ?assertEqual({15000, 14316}, ar_pricing:recalculate_price_per_gib_minute(B)), ok end). auto_redenomination_and_endowment_debt_test_() -> %% Set some weird mocks to preserve the existing behavior of this test ar_test_node:test_with_mocked_functions([ {ar_pricing_transition, transition_start_2_7_2, fun() -> 3 end}, {ar_pricing_transition, transition_length_2_7_2, fun() -> 1 end} ], fun test_auto_redenomination_and_endowment_debt/0). test_auto_redenomination_and_endowment_debt() -> Key1 = {_, Pub1} = ar_wallet:new(), {_, Pub2} = ar_wallet:new(), Key3 = {_, Pub3} = ar_wallet:new(), [B0] = ar_weave:init([ {ar_wallet:to_address(Pub1), 20000000000000, <<>>}, {ar_wallet:to_address(Pub2), 2000000000, <<>>}, {ar_wallet:to_address(Pub3), ?AR(1000000000000000000), <<>>} ]), ar_test_node:start(B0), ar_test_node:start_peer(peer1, B0), ar_test_node:connect_to_peer(peer1), ?assert(ar_pricing_transition:transition_start_2_6_8() == 2), ?assert(ar_pricing_transition:transition_length_2_6_8() == 2), ?assert(?PRICE_ADJUSTMENT_FREQUENCY == 2), ?assert(?REDENOMINATION_DELAY_BLOCKS == 2), ?assert(?REWARD_HISTORY_BLOCKS == 3), ?assert(?DOUBLE_SIGNING_REWARD_SAMPLE_SIZE == 2), ?assertEqual(262144 * 3, B0#block.weave_size), {ok, Config} = arweave_config:get_env(), {_, MinerPub} = ar_wallet:load_key(Config#config.mining_addr), ?assertEqual(0, get_balance(MinerPub)), ?assertEqual(0, get_reserved_balance(Config#config.mining_addr)), ar_test_node:mine(), ar_test_node:assert_wait_until_height(main, 1), ar_test_node:assert_wait_until_height(peer1, 1), ?assertEqual(0, get_balance(MinerPub)), B1 = ar_node:get_current_block(), ?assertEqual(10, B1#block.reward), ?assertEqual(10, get_reserved_balance(B1#block.reward_addr)), ?assertEqual(1, ar_difficulty:get_hash_rate_fixed_ratio(B1)), MinerAddr = ar_wallet:to_address(MinerPub), ?assertEqual([{MinerAddr, 1, 10, 1}, {B0#block.reward_addr, 1, 10, 1}], B1#block.reward_history), ?assertEqual([{time_diff(B1, B0), vdf_diff(B1, B0), chunk_count(B1)}, {1, 1, 1}], B1#block.block_time_history), %% The price is recomputed every two blocks. ?assertEqual(B1#block.price_per_gib_minute, B1#block.scheduled_price_per_gib_minute), ar_test_node:mine(), ar_test_node:assert_wait_until_height(main, 2), ar_test_node:assert_wait_until_height(peer1, 2), ?assertEqual(0, get_balance(MinerPub)), B2 = ar_node:get_current_block(), ?assertEqual(2, B2#block.height), ?assertEqual(10, B2#block.reward), ?assertEqual(20, get_reserved_balance(B2#block.reward_addr)), ?assertEqual(B1#block.price_per_gib_minute, B2#block.price_per_gib_minute), ?assertEqual(B2#block.scheduled_price_per_gib_minute, B2#block.price_per_gib_minute), ?assertEqual([{MinerAddr, 1, 10, 1}, {MinerAddr, 1, 10, 1}, {B0#block.reward_addr, 1, 10, 1}], B2#block.reward_history), ?assertEqual([{time_diff(B2, B1), vdf_diff(B2, B1), chunk_count(B2)}, {time_diff(B1, B0), vdf_diff(B1, B0), chunk_count(B1)}, {1, 1, 1}], B2#block.block_time_history), ar_test_node:mine(), ar_test_node:assert_wait_until_height(main, 3), ar_test_node:assert_wait_until_height(peer1, 3), ?assertEqual(0, get_balance(MinerPub)), B3 = ar_node:get_current_block(), ?assertEqual(10, B3#block.reward), ?assertEqual(30, get_reserved_balance(B3#block.reward_addr)), ?assertEqual(B3#block.price_per_gib_minute, B2#block.price_per_gib_minute), ?assertEqual(B3#block.scheduled_price_per_gib_minute, B2#block.scheduled_price_per_gib_minute), ?assertEqual(0, B3#block.kryder_plus_rate_multiplier_latch), ?assertEqual(1, B3#block.kryder_plus_rate_multiplier), ?assertEqual([{MinerAddr, 1, 10, 1}, {MinerAddr, 1, 10, 1}, {MinerAddr, 1, 10, 1}, {B0#block.reward_addr, 1, 10, 1}], B3#block.reward_history), Fee = ar_test_node:get_optimistic_tx_price(main, 1024), ar_test_node:mine(), ar_test_node:assert_wait_until_height(main, 4), ar_test_node:assert_wait_until_height(peer1, 4), B4 = ar_node:get_current_block(), %% We are at the height ?PRICE_2_6_8_TRANSITION_START + ?PRICE_2_6_8_TRANSITION_BLOCKS %% so the new algorithm kicks in which estimates the expected block reward and takes %% the missing amount from the endowment pool or takes on debt. AvgBlockTime4 = ar_block_time_history:compute_block_interval(B3), ExpectedReward4 = max(ar_inflation:calculate(4), B3#block.price_per_gib_minute * ?N_REPLICATIONS(B4#block.height) * AvgBlockTime4 div 60 * 3 div (4 * 1024)), % weave_size / GiB ?assertEqual(ExpectedReward4, B4#block.reward), ?assertEqual(ExpectedReward4 + 20, get_reserved_balance(B4#block.reward_addr)), ?assertEqual(10, get_balance(MinerPub)), ?assertEqual(ExpectedReward4 - 10, B4#block.debt_supply), ?assertEqual(1, B4#block.kryder_plus_rate_multiplier_latch), ?assertEqual(2, B4#block.kryder_plus_rate_multiplier), ?assertEqual(B4#block.price_per_gib_minute, B3#block.scheduled_price_per_gib_minute), PricePerGiBMinute3 = B3#block.price_per_gib_minute, ?assertEqual(max(PricePerGiBMinute3 div 2, min(PricePerGiBMinute3 * 2, ar_pricing:get_price_per_gib_minute(B3#block.height, B3))), B4#block.scheduled_price_per_gib_minute), %% The Kryder+ rate multiplier is 2 now so the fees should have doubled. ?assert(lists:member(ar_test_node:get_optimistic_tx_price(main, 1024), [Fee * 2, Fee * 2 + 1])), ?assertEqual([{MinerAddr, 1, ExpectedReward4, 1}, {MinerAddr, 1, 10, 1}, {MinerAddr, 1, 10, 1}, {MinerAddr, 1, 10, 1}, {B0#block.reward_addr, 1, 10, 1}], B4#block.reward_history), ?assertEqual( ar_rewards:reward_history_hash(B4#block.height, B3#block.reward_history_hash, [{MinerAddr, 1, ExpectedReward4, 1}, {MinerAddr, 1, 10, 1}, {MinerAddr, 1, 10, 1}]), B4#block.reward_history_hash), ar_test_node:mine(), ar_test_node:assert_wait_until_height(main, 5), ar_test_node:assert_wait_until_height(peer1, 5), B5 = ar_node:get_current_block(), ?assertEqual(20, get_balance(MinerPub)), AvgBlockTime5 = ar_block_time_history:compute_block_interval(B4), ExpectedReward5 = max(B4#block.price_per_gib_minute * ?N_REPLICATIONS(B5#block.height) * AvgBlockTime5 div 60 * 3 div (4 * 1024), % weave_size / GiB ar_inflation:calculate(5)), ?assertEqual(ExpectedReward5, B5#block.reward), ?assertEqual([{MinerAddr, 1, ExpectedReward5, 1}, {MinerAddr, 1, ExpectedReward4, 1}, {MinerAddr, 1, 10, 1}, {MinerAddr, 1, 10, 1}, {MinerAddr, 1, 10, 1}, {B0#block.reward_addr, 1, 10, 1}], B5#block.reward_history), ?assertEqual( ar_rewards:reward_history_hash(B5#block.height, B4#block.reward_history_hash, [{MinerAddr, 1, ExpectedReward5, 1}, {MinerAddr, 1, ExpectedReward4, 1}, {MinerAddr, 1, 10, 1}]), B5#block.reward_history_hash), ?assertEqual(1, B5#block.kryder_plus_rate_multiplier_latch), ?assertEqual(2, B5#block.kryder_plus_rate_multiplier), %% The price per GiB minute recalculation only happens every two blocks. ?assertEqual(B5#block.scheduled_price_per_gib_minute, B4#block.scheduled_price_per_gib_minute), ?assertEqual(B4#block.price_per_gib_minute, B5#block.price_per_gib_minute), ?assertEqual(20000000000000, get_balance(Pub1)), ?assert(lists:member(ar_test_node:get_optimistic_tx_price(main, 1024), [Fee * 2, Fee * 2 + 1])), HalfKryderLatchReset = ?RESET_KRYDER_PLUS_LATCH_THRESHOLD div 2, TX1 = ar_test_node:sign_tx(main, Key1, #{ denomination => 0, reward => HalfKryderLatchReset }), ar_test_node:assert_post_tx_to_peer(main, TX1), ar_test_node:mine(), ar_test_node:assert_wait_until_height(main, 6), ar_test_node:assert_wait_until_height(peer1, 6), B6 = ar_node:get_current_block(), {MinerShareDividend, MinerShareDivisor} = ?MINER_FEE_SHARE, ?assertEqual(30, get_balance(MinerPub)), ?assertEqual(10 + % inflation HalfKryderLatchReset, B6#block.reward + B6#block.reward_pool - B5#block.reward_pool - (B6#block.debt_supply - B5#block.debt_supply)), ?assertEqual(20000000000000 - HalfKryderLatchReset, get_balance(Pub1)), ?assertEqual(1, B6#block.kryder_plus_rate_multiplier_latch), ?assertEqual(2, B6#block.kryder_plus_rate_multiplier), ?assertEqual(B6#block.price_per_gib_minute, B5#block.scheduled_price_per_gib_minute), ScheduledPricePerGiBMinute5 = B5#block.scheduled_price_per_gib_minute, ?assertEqual( max(ScheduledPricePerGiBMinute5 div 2, min(ar_pricing:get_price_per_gib_minute(B5#block.height, B5), ScheduledPricePerGiBMinute5 * 2)), B6#block.scheduled_price_per_gib_minute), assert_new_account_fee(), ?assertEqual(1, B6#block.denomination), ?assertEqual(?TOTAL_SUPPLY + B6#block.debt_supply - B6#block.reward_pool, prometheus_gauge:value(available_supply)), ?assertEqual([{MinerAddr, 1, B6#block.reward, 1}, {MinerAddr, 1, ExpectedReward5, 1}, {MinerAddr, 1, ExpectedReward4, 1}], lists:sublist(B6#block.reward_history, 3)), TX2 = ar_test_node:sign_tx(main, Key1, #{ denomination => 0, reward => HalfKryderLatchReset * 2 }), ar_test_node:assert_post_tx_to_peer(main, TX2), ar_test_node:mine(), ar_test_node:assert_wait_until_height(main, 7), ar_test_node:assert_wait_until_height(peer1, 7), B7 = ar_node:get_current_block(), ?assertEqual(10 + % inflation HalfKryderLatchReset * 2, B7#block.reward + B7#block.reward_pool - B6#block.reward_pool - (B7#block.debt_supply - B6#block.debt_supply)), ?assertEqual(30 + ExpectedReward4, get_balance(MinerPub)), ?assertEqual(20000000000000 - HalfKryderLatchReset * 3, get_balance(Pub1)), ?assert(B7#block.reward_pool > ?RESET_KRYDER_PLUS_LATCH_THRESHOLD), ?assertEqual(1, B7#block.kryder_plus_rate_multiplier_latch), ?assertEqual(2, B7#block.kryder_plus_rate_multiplier), ?assertEqual(B6#block.price_per_gib_minute, B7#block.price_per_gib_minute), ?assert(ar_test_node:get_optimistic_tx_price(main, 1024) > Fee), ar_test_node:mine(), ar_test_node:assert_wait_until_height(main, 8), ar_test_node:assert_wait_until_height(peer1, 8), B8 = ar_node:get_current_block(), ?assertEqual(30 + ExpectedReward5 + ExpectedReward4, get_balance(MinerPub)), %% Release because at the previous block the endowment pool exceeded the threshold. ?assert(B8#block.reward_pool < B7#block.reward_pool), ?assertEqual(0, B8#block.kryder_plus_rate_multiplier_latch), ?assertEqual(2, B8#block.kryder_plus_rate_multiplier), ?assertEqual(1, B8#block.denomination), ?assert(prometheus_gauge:value(available_supply) > ?REDENOMINATION_THRESHOLD), ?assert(B8#block.scheduled_price_per_gib_minute > B8#block.price_per_gib_minute), %% A transaction with explicitly set denomination. TX3 = ar_test_node:sign_tx(main, Key3, #{ denomination => 1 }), {Reward3, _} = ar_test_node:get_tx_price(main, 0, ar_wallet:to_address(Pub2)), {Reward4, _} = ar_test_node:get_tx_price(main, 0), ar_test_node:assert_post_tx_to_peer(main, TX3), TX4 = ar_test_node:sign_tx(main, Key3, #{ denomination => 2 }), ?assertMatch({ok, {{<<"400">>, _}, _, _, _, _}}, ar_test_node:post_tx_to_peer(main, TX4)), ?assertEqual({ok, ["invalid_denomination"]}, ar_tx_db:get_error_codes(TX4#tx.id)), TX5 = ar_test_node:sign_tx(main, Key3, #{ denomination => 0, target => ar_wallet:to_address(Pub2), quantity => 10 }), ar_test_node:assert_post_tx_to_peer(main, TX5), ar_test_node:mine(), ar_test_node:assert_wait_until_height(main, 9), ar_test_node:assert_wait_until_height(peer1, 9), B9 = ar_node:get_current_block(), ?assertEqual(0, B9#block.kryder_plus_rate_multiplier_latch), ?assertEqual(2, B9#block.kryder_plus_rate_multiplier), ?assertEqual(1, B9#block.denomination), ?assertEqual(2, length(B9#block.txs)), ?assertEqual(0, B9#block.redenomination_height), ?assert(prometheus_gauge:value(available_supply) < ?REDENOMINATION_THRESHOLD), ?assertEqual(?AR(1000000000000000000) - Reward3 - Reward4 - 10, get_balance(Pub3)), ?assertEqual(2000000000 + 10, get_balance(Pub2)), ar_test_node:mine(), ar_test_node:assert_wait_until_height(main, 10), ar_test_node:assert_wait_until_height(peer1, 10), B10 = ar_node:get_current_block(), ?assertEqual(9 + ?REDENOMINATION_DELAY_BLOCKS, B10#block.redenomination_height), ?assertEqual(1, B10#block.denomination), TX6 = ar_test_node:sign_tx(main, Key3, #{ denomination => 0 }), %% Transactions without explicit denomination are not accepted now until %% the redenomination height. ?assertMatch({ok, {{<<"400">>, _}, _, _, _, _}}, ar_test_node:post_tx_to_peer(main, TX6)), ?assertEqual({ok, ["invalid_denomination"]}, ar_tx_db:get_error_codes(TX6#tx.id)), %% The redenomination did not start yet. TX7 = ar_test_node:sign_tx(main, Key3, #{ denomination => 2 }), ?assertMatch({ok, {{<<"400">>, _}, _, _, _, _}}, ar_test_node:post_tx_to_peer(main, TX7)), ?assertEqual({ok, ["invalid_denomination"]}, ar_tx_db:get_error_codes(TX7#tx.id)), {_, Pub4} = ar_wallet:new(), TX8 = ar_test_node:sign_tx(main, Key3, #{ denomination => 1, target => ar_wallet:to_address(Pub4), quantity => 3 }), ar_test_node:assert_post_tx_to_peer(main, TX8), ar_test_node:mine(), ar_test_node:assert_wait_until_height(main, 11), ar_test_node:assert_wait_until_height(peer1, 11), B11 = ar_node:get_current_block(), ?assertEqual(1, length(B11#block.txs)), ?assertEqual(1, B11#block.denomination), ?assertEqual(3, get_balance(Pub4)), Balance11 = get_balance(Pub3), {_, Pub5} = ar_wallet:new(), TX9 = ar_test_node:sign_v1_tx(main, Key3, #{ denomination => 0, target => ar_wallet:to_address(Pub5), quantity => 100 }), ?assertMatch({ok, {{<<"400">>, _}, _, _, _, _}}, ar_test_node:post_tx_to_peer(main, TX9)), ?assertEqual({ok, ["invalid_denomination"]}, ar_tx_db:get_error_codes(TX9#tx.id)), %% The redenomination did not start just yet. TX10 = ar_test_node:sign_v1_tx(main, Key3, #{ denomination => 2 }), ?assertMatch({ok, {{<<"400">>, _}, _, _, _, _}}, ar_test_node:post_tx_to_peer(main, TX10)), ?assertEqual({ok, ["invalid_denomination"]}, ar_tx_db:get_error_codes(TX10#tx.id)), {Reward11, _} = ar_test_node:get_tx_price(main, 0, ar_wallet:to_address(Pub5)), ?assert(ar_difficulty:get_hash_rate_fixed_ratio(B11) > 1), ?assertEqual(lists:sublist([{MinerAddr, ar_difficulty:get_hash_rate_fixed_ratio(B11), B11#block.reward, 1} | B10#block.reward_history], ?REWARD_HISTORY_BLOCKS + ar_block:get_consensus_window_size()), B11#block.reward_history), TX11 = ar_test_node:sign_tx(main, Key3, #{ denomination => 1, target => ar_wallet:to_address(Pub5), quantity => 100 }), ar_test_node:assert_post_tx_to_peer(main, TX11), ar_test_node:mine(), ar_test_node:assert_wait_until_height(main, 12), ar_test_node:assert_wait_until_height(peer1, 12), ?assertEqual(100 * 1000, get_balance(Pub5)), ?assertEqual(3 * 1000, get_balance(Pub4)), ?assertEqual((Balance11 - Reward11 - 100) * 1000, get_balance(Pub3)), B12 = ar_node:get_current_block(), ?assertEqual(1, length(B12#block.txs)), ?assertEqual(2, B12#block.denomination), ?assertEqual(10 * 1000 + % inflation Reward11 * 1000, % fees B12#block.reward + B12#block.reward_pool - B11#block.reward_pool * 1000 - (B12#block.debt_supply - B11#block.debt_supply * 1000)), ?assertEqual(B11#block.debt_supply * 1000, B12#block.debt_supply), %% Setting the price scheduled on height=10. ?assertEqual(B11#block.scheduled_price_per_gib_minute * 1000, B12#block.price_per_gib_minute), ?assertEqual([{MinerAddr, ar_difficulty:get_hash_rate_fixed_ratio(B12), B12#block.reward, 2} | B11#block.reward_history], B12#block.reward_history), TX12 = ar_test_node:sign_tx(main, Key3, #{ denomination => 0, quantity => 10, target => ar_wallet:to_address(Pub5) }), {Reward12, 2} = ar_test_node:get_tx_price(main, 0), ar_test_node:assert_post_tx_to_peer(main, TX12), TX13 = ar_test_node:sign_v1_tx(main, Key3, #{ denomination => 0, reward => ar_test_node:get_optimistic_tx_price(main, 0), target => ar_wallet:to_address(Pub4), quantity => 4 }), ar_test_node:assert_post_tx_to_peer(main, TX13), Reward14 = ar_test_node:get_optimistic_tx_price(main, 0), TX14 = ar_test_node:sign_v1_tx(main, Key3, #{ denomination => 2, reward => Reward14, quantity => 5, target => ar_wallet:to_address(Pub4) }), ar_test_node:assert_post_tx_to_peer(main, TX14), TX15 = ar_test_node:sign_v1_tx(main, Key3, #{ denomination => 2, reward => erlang:ceil(Reward14 / 1000) }), ?assertMatch({ok, {{<<"400">>, _}, _, _, _, _}}, ar_test_node:post_tx_to_peer(main, TX15)), ?assertEqual({ok, ["tx_too_cheap"]}, ar_tx_db:get_error_codes(TX15#tx.id)), TX16 = ar_test_node:sign_v1_tx(main, Key3, #{ denomination => 3 }), ?assertMatch({ok, {{<<"400">>, _}, _, _, _, _}}, ar_test_node:post_tx_to_peer(main, TX16)), ?assertEqual({ok, ["invalid_denomination"]}, ar_tx_db:get_error_codes(TX16#tx.id)), {_, Pub6} = ar_wallet:new(), %% Divide the reward by 1000 and specify the previous denomination. Reward17 = ar_test_node:get_optimistic_tx_price(main, 0, ar_wallet:to_address(Pub6)), TX17 = ar_test_node:sign_tx(main, Key3, #{ denomination => 1, reward => erlang:ceil(Reward17 / 1000), target => ar_wallet:to_address(Pub6), quantity => 7 }), ar_test_node:assert_post_tx_to_peer(main, TX17), ar_test_node:mine(), ar_test_node:assert_wait_until_height(main, 13), ar_test_node:assert_wait_until_height(peer1, 13), B13 = ar_node:get_current_block(), Reward17_2 = erlang:ceil(Reward17 / 1000) * 1000, AvgBlockTime13 = ar_block_time_history:compute_block_interval(B12), BaseReward13 = B12#block.price_per_gib_minute * ?N_REPLICATIONS(B13#block.height) * AvgBlockTime13 div 60 % minutes * B13#block.weave_size div (1024 * 1024 * 1024), % weave_size / GiB FeeSum13 = Reward12 * MinerShareDividend div MinerShareDivisor % TX12 + Reward14 * MinerShareDividend div MinerShareDivisor % TX13 + Reward14 * MinerShareDividend div MinerShareDivisor % TX14 + Reward17_2 * MinerShareDividend div MinerShareDivisor, % TX17 ?debugFmt("B12#block.reward_pool: ~B, fees: ~B, fees received by the reward pool:~B, " "expected reward: ~B, miner fee share: ~B~n", [B12#block.reward_pool, Reward12 + Reward14 * 2 + Reward17_2, Reward12 + Reward14 * 2 + Reward17_2 - FeeSum13, BaseReward13, FeeSum13]), ?assertEqual(B12#block.reward_pool + Reward12 + Reward14 * 2 + Reward17_2 - FeeSum13 - max(0, (BaseReward13 - (10 * 1000 + FeeSum13))), B13#block.reward_pool), ?assertEqual(4, length(B13#block.txs)), ?assertEqual(2, B13#block.denomination), ?assertEqual(100 * 1000 + 10, get_balance(Pub5)), ?assertEqual(7 * 1000, get_balance(Pub6)), ?assertEqual(3 * 1000 + 4 + 5, get_balance(Pub4)), assert_new_account_fee(), ar_test_node:mine(), ar_test_node:assert_wait_until_height(main, 14), ar_test_node:assert_wait_until_height(peer1, 14), B14 = ar_node:get_current_block(), ScheduledPricePerGiBMinute13 = B13#block.scheduled_price_per_gib_minute, ?assertEqual( max(ScheduledPricePerGiBMinute13 div 2, min( ar_pricing:get_price_per_gib_minute(B13#block.height, B13), ScheduledPricePerGiBMinute13 * 2 )), B14#block.scheduled_price_per_gib_minute). time_diff(#block{ timestamp = TS }, #block{ timestamp = PrevTS }) -> max(1, TS - PrevTS). vdf_diff(B, PrevB) -> ar_block:vdf_step_number(B) - ar_block:vdf_step_number(PrevB). chunk_count(#block{ recall_byte2 = undefined }) -> 1; chunk_count(_) -> 2. assert_new_account_fee() -> ?assert(ar_test_node:get_optimistic_tx_price(main, 262144 + ?NEW_ACCOUNT_FEE_DATA_SIZE_EQUIVALENT) > ar_test_node:get_optimistic_tx_price(main, 0, <<"non-existent-address">>)), ?assert(ar_test_node:get_optimistic_tx_price(main, ?NEW_ACCOUNT_FEE_DATA_SIZE_EQUIVALENT - 262144) < ar_test_node:get_optimistic_tx_price(main, 0, <<"non-existent-address">>)). %% @doc Return the current balance of the given account. get_balance(Pub) -> Address = ar_wallet:to_address(Pub), Peer = ar_test_node:peer_ip(main), {ok, {{<<"200">>, _}, _, Reply, _, _}} = ar_http:req(#{ method => get, peer => Peer, path => "/wallet/" ++ binary_to_list(ar_util:encode(Address)) ++ "/balance" }), Balance = binary_to_integer(Reply), B = ar_node:get_current_block(), {ok, {{<<"200">>, _}, _, Reply2, _, _}} = ar_http:req(#{ method => get, peer => Peer, path => "/wallet_list/" ++ binary_to_list(ar_util:encode(B#block.wallet_list)) ++ "/" ++ binary_to_list(ar_util:encode(Address)) ++ "/balance" }), case binary_to_integer(Reply2) of Balance -> Balance; Balance2 -> ?assert(false, io_lib:format("Expected: ~B, got: ~B.~n", [Balance, Balance2])) end. get_reserved_balance(Address) -> Peer = ar_test_node:peer_ip(main), {ok, {{<<"200">>, _}, _, Reply, _, _}} = ar_http:req(#{ method => get, peer => Peer, path => "/wallet/" ++ binary_to_list(ar_util:encode(Address)) ++ "/reserved_rewards_total" }), binary_to_integer(Reply). ================================================ FILE: apps/arweave/test/ar_reject_chunks_tests.erl ================================================ -module(ar_reject_chunks_tests). -include_lib("eunit/include/eunit.hrl"). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("arweave/include/ar_data_sync.hrl"). -import(ar_test_node, [sign_v1_tx/2, wait_until_height/2, assert_wait_until_height/2, read_block_when_stored/1, test_with_mocked_functions/2]). rejects_invalid_chunks_test_() -> {timeout, 180, fun test_rejects_invalid_chunks/0}. test_rejects_invalid_chunks() -> ar_test_data_sync:setup_nodes(), ?assertMatch( {ok, {{<<"400">>, _}, _, <<"{\"error\":\"chunk_too_big\"}">>, _, _}}, ar_test_node:post_chunk(main, ar_serialize:jsonify(#{ chunk => ar_util:encode(crypto:strong_rand_bytes(?DATA_CHUNK_SIZE + 1)), data_path => <<>>, offset => <<"0">>, data_size => <<"0">> })) ), ?assertMatch( {ok, {{<<"400">>, _}, _, <<"{\"error\":\"data_path_too_big\"}">>, _, _}}, ar_test_node:post_chunk(main, ar_serialize:jsonify(#{ data_path => ar_util:encode(crypto:strong_rand_bytes(?MAX_PATH_SIZE + 1)), chunk => <<>>, offset => <<"0">>, data_size => <<"0">> })) ), ?assertMatch( {ok, {{<<"400">>, _}, _, <<"{\"error\":\"offset_too_big\"}">>, _, _}}, ar_test_node:post_chunk(main, ar_serialize:jsonify(#{ offset => integer_to_binary(trunc(math:pow(2, 256))), data_path => <<>>, chunk => <<>>, data_size => <<"0">> })) ), ?assertMatch( {ok, {{<<"400">>, _}, _, <<"{\"error\":\"data_size_too_big\"}">>, _, _}}, ar_test_node:post_chunk(main, ar_serialize:jsonify(#{ data_size => integer_to_binary(trunc(math:pow(2, 256))), data_path => <<>>, chunk => <<>>, offset => <<"0">> })) ), ?assertMatch( {ok, {{<<"400">>, _}, _, <<"{\"error\":\"chunk_proof_ratio_not_attractive\"}">>, _, _}}, ar_test_node:post_chunk(main, ar_serialize:jsonify(#{ chunk => ar_util:encode(<<"a">>), data_path => ar_util:encode(<<"bb">>), offset => <<"0">>, data_size => <<"0">> })) ), ar_test_data_sync:setup_nodes(), Chunk = crypto:strong_rand_bytes(500), SizedChunkIDs = ar_tx:sized_chunks_to_sized_chunk_ids( ar_tx:chunks_to_size_tagged_chunks([Chunk]) ), {DataRoot, DataTree} = ar_merkle:generate_tree(SizedChunkIDs), DataPath = ar_merkle:generate_path(DataRoot, 0, DataTree), ?assertMatch( {ok, {{<<"400">>, _}, _, <<"{\"error\":\"data_root_not_found\"}">>, _, _}}, ar_test_node:post_chunk(main, ar_serialize:jsonify(#{ data_root => ar_util:encode(DataRoot), chunk => ar_util:encode(Chunk), data_path => ar_util:encode(DataPath), offset => <<"0">>, data_size => <<"500">> })) ), ?assertMatch( {ok, {{<<"413">>, _}, _, <<"Payload too large">>, _, _}}, ar_test_node:post_chunk(main, << <<0>> || _ <- lists:seq(1, ?MAX_SERIALIZED_CHUNK_PROOF_SIZE + 1) >>) ). does_not_store_small_chunks_after_2_5_test_() -> {timeout, 600, fun test_does_not_store_small_chunks_after_2_5/0}. test_does_not_store_small_chunks_after_2_5() -> Size = ?DATA_CHUNK_SIZE, Third = Size div 3, Splits = [ {"Even split", Size * 3, Size, Size, Size, Size, Size * 2, Size * 3, lists:seq(0, Size - 1, 2048), lists:seq(Size, Size * 2 - 1, 2048), lists:seq(Size * 2, Size * 3 + 2048, 2048), [{O, first} || O <- lists:seq(1, Size - 1, 2048)] ++ [{O, second} || O <- lists:seq(Size + 1, 2 * Size, 2048)] ++ [{O, third} || O <- lists:seq(2 * Size + 1, 3 * Size, 2048)] ++ [{3 * Size + 1, 404}, {4 * Size, 404}]}, {"Small last chunk", 2 * Size + Third, Size, Size, Third, Size, 2 * Size, 2 * Size + Third, lists:seq(3, Size - 1, 2048), lists:seq(Size, 2 * Size - 1, 2048), lists:seq(2 * Size, 2 * Size + Third, 2048), %% The chunk is expected to be returned by any offset of the 256 KiB %% bucket where it ends. [{O, first} || O <- lists:seq(1, Size - 1, 2048)] ++ [{O, second} || O <- lists:seq(Size + 1, 2 * Size, 2048)] ++ [{O, third} || O <- lists:seq(2 * Size + 1, 3 * Size, 2048)] ++ [{3 * Size + 1, 404}, {4 * Size, 404}]}, {"Small chunks crossing the bucket", 2 * Size + Third, Size, Third + 1, Size - 1, Size, Size + Third + 1, 2 * Size + Third, lists:seq(0, Size - 1, 2048), lists:seq(Size, Size + Third, 1024), lists:seq(Size + Third + 1, 2 * Size + Third, 2048), [{O, first} || O <- lists:seq(1, Size, 2048)] ++ [{O, second} || O <- lists:seq(Size + 1, 2 * Size, 2048)] ++ [{O, third} || O <- lists:seq(2 * Size + 1, 3 * Size, 2048)] ++ [{3 * Size + 1, 404}, {4 * Size, 404}]}, {"Small chunks in one bucket", 2 * Size, Size, Third, Size - Third, Size, Size + Third, 2 * Size, lists:seq(0, Size - 1, 2048), lists:seq(Size, Size + Third - 1, 2048), lists:seq(Size + Third, 2 * Size, 2048), [{O, first} || O <- lists:seq(1, Size - 1, 2048)] %% The second and third chunks must not be accepted. %% The second chunk does not precede a chunk crossing a bucket border, %% the third chunk ends at a multiple of 256 KiB. ++ [{O, 404} || O <- lists:seq(Size + 1, 2 * Size + 4096, 2048)]}, {"Small chunk preceding 256 KiB chunk", 2 * Size + Third, Third, Size, Size, Third, Size + Third, 2 * Size + Third, lists:seq(0, Third - 1, 2048), lists:seq(Third, Third + Size - 1, 2048), lists:seq(Third + Size, Third + 2 * Size, 2048), %% The first chunk must not be accepted, the first bucket stays empty. [{O, 404} || O <- lists:seq(1, Size - 1, 2048)] %% The other chunks are rejected too - their start offsets %% are not aligned with the buckets. ++ [{O, 404} || O <- lists:seq(Size + 1, 4 * Size, 2048)]}], lists:foreach( fun({Title, DataSize, FirstSize, SecondSize, ThirdSize, FirstMerkleOffset, SecondMerkleOffset, ThirdMerkleOffset, FirstPublishOffsets, SecondPublishOffsets, ThirdPublishOffsets, Expectations}) -> ?debugFmt("Running [~s]", [Title]), Wallet = ar_test_data_sync:setup_nodes(), {FirstChunk, SecondChunk, ThirdChunk} = {crypto:strong_rand_bytes(FirstSize), crypto:strong_rand_bytes(SecondSize), crypto:strong_rand_bytes(ThirdSize)}, {FirstChunkID, SecondChunkID, ThirdChunkID} = {ar_tx:generate_chunk_id(FirstChunk), ar_tx:generate_chunk_id(SecondChunk), ar_tx:generate_chunk_id(ThirdChunk)}, {DataRoot, DataTree} = ar_merkle:generate_tree([{FirstChunkID, FirstMerkleOffset}, {SecondChunkID, SecondMerkleOffset}, {ThirdChunkID, ThirdMerkleOffset}]), TX = ar_test_node:sign_tx(Wallet, #{ last_tx => ar_test_node:get_tx_anchor(main), data_size => DataSize, data_root => DataRoot }), ar_test_node:post_and_mine(#{ miner => main, await_on => main }, [TX]), lists:foreach( fun({Chunk, Offset}) -> DataPath = ar_merkle:generate_path(DataRoot, Offset, DataTree), Proof = #{ data_root => ar_util:encode(DataRoot), data_path => ar_util:encode(DataPath), chunk => ar_util:encode(Chunk), offset => integer_to_binary(Offset), data_size => integer_to_binary(DataSize) }, %% All chunks are accepted because we do not know their offsets yet - %% in theory they may end up below the strict data split threshold. ?assertMatch({ok, {{<<"200">>, _}, _, _, _, _}}, ar_test_node:post_chunk(main, ar_serialize:jsonify(Proof)), Title) end, [{FirstChunk, O} || O <- FirstPublishOffsets] ++ [{SecondChunk, O} || O <- SecondPublishOffsets] ++ [{ThirdChunk, O} || O <- ThirdPublishOffsets] ), %% In practice the chunks are above the strict data split threshold so those %% which do not pass strict validation will not be stored. timer:sleep(2000), GenesisOffset = ar_block:strict_data_split_threshold(), lists:foreach( fun ({Offset, 404}) -> ?assertMatch({ok, {{<<"404">>, _}, _, _, _, _}}, ar_test_node:get_chunk(main, GenesisOffset + Offset), Title); ({Offset, first}) -> {ok, {{<<"200">>, _}, _, ProofJSON, _, _}} = ar_test_node:get_chunk(main, GenesisOffset + Offset), ?assertEqual(FirstChunk, ar_util:decode(maps:get(<<"chunk">>, jiffy:decode(ProofJSON, [return_maps]))), Title); ({Offset, second}) -> {ok, {{<<"200">>, _}, _, ProofJSON, _, _}} = ar_test_node:get_chunk(main, GenesisOffset + Offset), ?assertEqual(SecondChunk, ar_util:decode(maps:get(<<"chunk">>, jiffy:decode(ProofJSON, [return_maps]))), Title); ({Offset, third}) -> {ok, {{<<"200">>, _}, _, ProofJSON, _, _}} = ar_test_node:get_chunk(main, GenesisOffset + Offset), ?assertEqual(ThirdChunk, ar_util:decode(maps:get(<<"chunk">>, jiffy:decode(ProofJSON, [return_maps]))), Title) end, Expectations ) end, Splits ). rejects_chunks_with_merkle_tree_borders_exceeding_max_chunk_size_test_() -> {timeout, 120, fun test_rejects_chunks_with_merkle_tree_borders_exceeding_max_chunk_size/0}. test_rejects_chunks_with_merkle_tree_borders_exceeding_max_chunk_size() -> Wallet = ar_test_data_sync:setup_nodes(), BigOutOfBoundsOffsetChunk = crypto:strong_rand_bytes(?DATA_CHUNK_SIZE), BigChunkID = ar_tx:generate_chunk_id(BigOutOfBoundsOffsetChunk), {BigDataRoot, BigDataTree} = ar_merkle:generate_tree([{BigChunkID, ?DATA_CHUNK_SIZE + 1}]), BigTX = ar_test_node:sign_tx(Wallet, #{ last_tx => ar_test_node:get_tx_anchor(main), data_size => ?DATA_CHUNK_SIZE, data_root => BigDataRoot }), ar_test_node:post_and_mine(#{ miner => main, await_on => main }, [BigTX]), BigDataPath = ar_merkle:generate_path(BigDataRoot, 0, BigDataTree), BigProof = #{ data_root => ar_util:encode(BigDataRoot), data_path => ar_util:encode(BigDataPath), chunk => ar_util:encode(BigOutOfBoundsOffsetChunk), offset => <<"0">>, data_size => integer_to_binary(?DATA_CHUNK_SIZE)}, ?assertMatch({ok, {{<<"400">>, _}, _, <<"{\"error\":\"invalid_proof\"}">>, _, _}}, ar_test_node:post_chunk(main, ar_serialize:jsonify(BigProof))). rejects_chunks_exceeding_disk_pool_limit_test_() -> {timeout, ?TEST_NODE_TIMEOUT, fun test_rejects_chunks_exceeding_disk_pool_limit/0}. test_rejects_chunks_exceeding_disk_pool_limit() -> Wallet = ar_test_data_sync:setup_nodes(), Data1 = crypto:strong_rand_bytes( (?DEFAULT_MAX_DISK_POOL_DATA_ROOT_BUFFER_MB * ?MiB) + 1 ), Chunks1 = ar_test_data_sync:imperfect_split(Data1), {DataRoot1, _} = ar_merkle:generate_tree( ar_tx:sized_chunks_to_sized_chunk_ids( ar_tx:chunks_to_size_tagged_chunks(Chunks1) ) ), {TX1, Chunks1} = ar_test_data_sync:tx(Wallet, {fixed_data, DataRoot1, Chunks1}), ar_test_node:assert_post_tx_to_peer(main, TX1), [{_, FirstProof1} | Proofs1] = ar_test_data_sync:build_proofs(TX1, Chunks1, [TX1], 0, 0), lists:foreach( fun({_, Proof}) -> ?assertMatch( {ok, {{<<"200">>, _}, _, _, _, _}}, ar_test_node:post_chunk(main, ar_serialize:jsonify(Proof)) ) end, Proofs1 ), ?assertMatch( {ok, {{<<"400">>, _}, _, <<"{\"error\":\"exceeds_disk_pool_size_limit\"}">>, _, _}}, ar_test_node:post_chunk(main, ar_serialize:jsonify(FirstProof1)) ), Data2 = crypto:strong_rand_bytes( min( ?DEFAULT_MAX_DISK_POOL_BUFFER_MB - ?DEFAULT_MAX_DISK_POOL_DATA_ROOT_BUFFER_MB, ?DEFAULT_MAX_DISK_POOL_DATA_ROOT_BUFFER_MB - 1 ) * ?MiB ), Chunks2 = ar_test_data_sync:imperfect_split(Data2), {DataRoot2, _} = ar_merkle:generate_tree( ar_tx:sized_chunks_to_sized_chunk_ids( ar_tx:chunks_to_size_tagged_chunks(Chunks2) ) ), {TX2, Chunks2} = ar_test_data_sync:tx(Wallet, {fixed_data, DataRoot2, Chunks2}), ar_test_node:assert_post_tx_to_peer(main, TX2), Proofs2 = ar_test_data_sync:build_proofs(TX2, Chunks2, [TX2], 0, 0), lists:foreach( fun({_, Proof}) -> %% The very last chunk will be dropped later because it starts and ends %% in the bucket of the previous chunk (the chunk sizes are 131072). ?assertMatch( {ok, {{<<"200">>, _}, _, _, _, _}}, ar_test_node:post_chunk(main, ar_serialize:jsonify(Proof)) ) end, Proofs2 ), Left = ?DEFAULT_MAX_DISK_POOL_BUFFER_MB * ?MiB - lists:sum([byte_size(Chunk) || Chunk <- tl(Chunks1)]) - byte_size(Data2), ?assert(Left < ?DEFAULT_MAX_DISK_POOL_DATA_ROOT_BUFFER_MB * ?MiB), Data3 = crypto:strong_rand_bytes(Left + 1), Chunks3 = ar_test_data_sync:imperfect_split(Data3), {DataRoot3, _} = ar_merkle:generate_tree( ar_tx:sized_chunks_to_sized_chunk_ids( ar_tx:chunks_to_size_tagged_chunks(Chunks3) ) ), {TX3, Chunks3} = ar_test_data_sync:tx(Wallet, {fixed_data, DataRoot3, Chunks3}), ar_test_node:assert_post_tx_to_peer(main, TX3), [{_, FirstProof3} | Proofs3] = ar_test_data_sync:build_proofs(TX3, Chunks3, [TX3], 0, 0), lists:foreach( fun({_, Proof}) -> %% The very last chunk will be dropped later because it starts and ends %% in the bucket of the previous chunk (the chunk sizes are 131072). ?assertMatch( {ok, {{<<"200">>, _}, _, _, _, _}}, ar_test_node:post_chunk(main, ar_serialize:jsonify(Proof)) ) end, Proofs3 ), ?assertMatch( {ok, {{<<"400">>, _}, _, <<"{\"error\":\"exceeds_disk_pool_size_limit\"}">>, _, _}}, ar_test_node:post_chunk(main, ar_serialize:jsonify(FirstProof3)) ), ar_test_node:mine(main), assert_wait_until_height(main, 1), true = ar_util:do_until( fun() -> %% After a block is mined, the chunks receive their absolute offsets, which %% end up above the strict data split threshold and so the node discovers %% the very last chunks of the last two transactions are invalid under these %% offsets and frees up 131072 + 131072 bytes in the disk pool => we can submit %% a 262144-byte chunk. Also, expect 303 instead of 200 because the last block %% was large such that the configured partitions do not cover at least two %% times as much space ahead of the current weave size. case ar_test_node:post_chunk(main, ar_serialize:jsonify(FirstProof3)) of {ok, {{<<"303">>, _}, _, _, _, _}} -> true; Response -> ?debugFmt("post_chunk response (offset: ~p, data_root: ~p): ~p", [maps:get(offset, FirstProof3), maps:get(data_root, FirstProof3), Response]), false end end, 2000, 30 * 1000 ), %% Now we do not have free space again. ?assertMatch( {ok, {{<<"400">>, _}, _, <<"{\"error\":\"exceeds_disk_pool_size_limit\"}">>, _, _}}, ar_test_node:post_chunk(main, ar_serialize:jsonify(FirstProof1)) ), %% Mine two more blocks to make the chunks mature so that we can remove them from the %% disk pool (they will stay in the corresponding storage modules though, if any). ar_test_node:mine(main), assert_wait_until_height(main, 2), ar_test_node:mine(main), true = ar_util:do_until( fun() -> case ar_test_node:post_chunk(main, ar_serialize:jsonify(FirstProof1)) of {ok, {{<<"200">>, _}, _, _, _, _}} -> true; _ -> false end end, 2000, 30 * 1000 ). accepts_chunks_test_() -> ar_test_node:test_with_mocked_functions([{ar_fork, height_2_5, fun() -> 0 end}], fun test_accepts_chunks/0, 120). test_accepts_chunks() -> test_accepts_chunks(original_split). test_accepts_chunks(Split) -> Wallet = ar_test_data_sync:setup_nodes(), {TX, Chunks} = ar_test_data_sync:tx(Wallet, {Split, 3}), ar_test_node:assert_post_tx_to_peer(peer1, TX), ar_test_node:assert_wait_until_receives_txs([TX]), [{Offset, FirstProof}, {_, SecondProof}, {_, ThirdProof}] = ar_test_data_sync:build_proofs(TX, Chunks, [TX], 0, 0), EndOffset = Offset + ar_block:strict_data_split_threshold(), %% Post the third proof to the disk pool. ?assertMatch( {ok, {{<<"200">>, _}, _, _, _, _}}, ar_test_node:post_chunk(main, ar_serialize:jsonify(ThirdProof)) ), ar_test_node:mine(peer1), [{BH, _, _} | _] = wait_until_height(main, 1), B = read_block_when_stored(BH), ?assertMatch( {ok, {{<<"404">>, _}, _, _, _, _}}, ar_test_node:get_chunk(main, EndOffset) ), ?assertMatch( {ok, {{<<"200">>, _}, _, _, _, _}}, ar_test_node:post_chunk(main, ar_serialize:jsonify(FirstProof)) ), %% Expect the chunk to be retrieved by any offset within %% (EndOffset - ChunkSize, EndOffset], but not outside of it. FirstChunk = ar_util:decode(maps:get(chunk, FirstProof)), FirstChunkSize = byte_size(FirstChunk), ExpectedProof = #{ data_path => maps:get(data_path, FirstProof), tx_path => maps:get(tx_path, FirstProof), chunk => ar_util:encode(FirstChunk) }, ar_test_data_sync:wait_until_syncs_chunk(EndOffset, ExpectedProof), ar_test_data_sync:wait_until_syncs_chunk( EndOffset - rand:uniform(FirstChunkSize - 2), ExpectedProof), ar_test_data_sync:wait_until_syncs_chunk(EndOffset - FirstChunkSize + 1, ExpectedProof), ?assertMatch({ok, {{<<"404">>, _}, _, _, _, _}}, ar_test_node:get_chunk(main, 0)), ?assertMatch({ok, {{<<"404">>, _}, _, _, _, _}}, ar_test_node:get_chunk(main, EndOffset + 1)), TXSize = byte_size(binary:list_to_bin(Chunks)), ExpectedOffsetInfo = ar_serialize:jsonify(#{ offset => integer_to_binary(TXSize + ar_block:strict_data_split_threshold()), size => integer_to_binary(TXSize) }), ?assertMatch({ok, {{<<"200">>, _}, _, ExpectedOffsetInfo, _, _}}, ar_test_data_sync:get_tx_offset(main, TX#tx.id)), %% Expect no transaction data because the second chunk is not synced yet. ?assertMatch({ok, {{<<"404">>, _}, _, _Binary, _, _}}, ar_test_data_sync:get_tx_data(TX#tx.id)), ?assertMatch({ok, {{<<"200">>, _}, _, _, _, _}}, ar_test_node:post_chunk(main, ar_serialize:jsonify(SecondProof))), ExpectedSecondProof = #{ data_path => maps:get(data_path, SecondProof), tx_path => maps:get(tx_path, SecondProof), chunk => maps:get(chunk, SecondProof) }, SecondChunk = ar_util:decode(maps:get(chunk, SecondProof)), SecondChunkOffset = ar_block:strict_data_split_threshold() + FirstChunkSize + byte_size(SecondChunk), ar_test_data_sync:wait_until_syncs_chunk(SecondChunkOffset, ExpectedSecondProof), true = ar_util:do_until( fun() -> {ok, {{<<"200">>, _}, _, Data, _, _}} = ar_test_data_sync:get_tx_data(TX#tx.id), ar_util:encode(binary:list_to_bin(Chunks)) == Data end, 500, 10 * 1000 ), ExpectedThirdProof = #{ data_path => maps:get(data_path, ThirdProof), tx_path => maps:get(tx_path, ThirdProof), chunk => maps:get(chunk, ThirdProof) }, ar_test_data_sync:wait_until_syncs_chunk(B#block.weave_size, ExpectedThirdProof), ?assertMatch({ok, {{<<"404">>, _}, _, _, _, _}}, ar_test_node:get_chunk(main, B#block.weave_size + 1)). ================================================ FILE: apps/arweave/test/ar_replica_2_9_nif_tests.erl ================================================ -module(ar_replica_2_9_nif_tests). -include_lib("eunit/include/eunit.hrl"). -include_lib("arweave/include/ar_consensus.hrl"). setup_replica_2_9() -> FastState = ar_mine_randomx:init_fast2(rxsquared, ?RANDOMX_PACKING_KEY, 0, 0, erlang:system_info(dirty_cpu_schedulers_online)), LightState = ar_mine_randomx:init_light2(rxsquared, ?RANDOMX_PACKING_KEY, 0, 0), {FastState, LightState}. test_register(TestFun, Fixture) -> {timeout, 120, {with, Fixture, [TestFun]}}. randomx_replica_2_9_suite_test_() -> {setup, fun setup_replica_2_9/0, fun (SetupData) -> [ test_register(fun test_vectors/1, SetupData), % TODO move bottom test_register(fun test_state/1, SetupData), test_register(fun test_pack_unpack_sub_chunks/1, SetupData) ] end }. %% ------------------------------------------------------------------------------------------- %% replica_2_9 tests %% ------------------------------------------------------------------------------------------- test_state({FastState, LightState}) -> ?assertEqual( {ok, {rxsquared, fast, 34047604, 2097152}}, ar_mine_randomx:info(FastState) ), ?assertEqual( {ok, {rxsquared, light, 0, 2097152}}, ar_mine_randomx:info(LightState) ), {ok, {_, _, _, ScratchpadSize}} = ar_mine_randomx:info(FastState), ?assertEqual(?RANDOMX_SCRATCHPAD_SIZE, ScratchpadSize). test_vectors({FastState, _LightState}) -> Key = << 1 >>, Entropy = ar_mine_randomx:randomx_generate_replica_2_9_entropy(FastState, Key), EntropyHash = crypto:hash(sha256, Entropy), EntropyHashExpd = << 56,199,231,119,170,151,220,154,45,204,70,193,80,68, 46,50,136,31,35,102,141,77,19,66,191,127,97,183,230, 119,243,151 >>, ?assertEqual(EntropyHashExpd, EntropyHash), Key2 = << 2 >>, Entropy2 = ar_mine_randomx:randomx_generate_replica_2_9_entropy(FastState, Key2), EntropyHash2 = crypto:hash(sha256, Entropy2), EntropyHashExpd2 = << 206,47,133,111,139,20,31,64,185,33,107,29,14,10,252, 76,201,75,203,186,131,32,20,45,34,125,76,248,64,90, 220,196 >>, ?assertEqual(EntropyHashExpd2, EntropyHash2), SubChunk = << 255:(8*8192) >>, EntropySubChunkIndex = 1, {ok, Packed} = ar_mine_randomx:randomx_encrypt_replica_2_9_sub_chunk( {FastState, Entropy, SubChunk, EntropySubChunkIndex}), PackedHashReal = crypto:hash(sha256, Packed), PackedHashExpd = << 15,46,184,11,124,31,150,77,199,107,221,0,136,154,61, 146,193,198,126,52,19,7,211,28,121,108,176,15,124,33, 48,99 >>, ?assertEqual(PackedHashExpd, PackedHashReal), {ok, Unpacked} = ar_mine_randomx:randomx_decrypt_replica_2_9_sub_chunk( {FastState, Entropy, Packed, EntropySubChunkIndex}), ?assertEqual(SubChunk, Unpacked), ok. test_pack_unpack_sub_chunks({State, _LightState}) -> Key = << 0:256 >>, SubChunk = << 0:(8192 * 8) >>, Entropy = ar_mine_randomx:randomx_generate_replica_2_9_entropy(State, Key), ?assertEqual(8388608, byte_size(Entropy)), PackedSubChunks = pack_sub_chunks(SubChunk, Entropy, 0, SubChunk, State), ?assert(lists:all(fun(PackedSubChunk) -> byte_size(PackedSubChunk) == 8192 end, PackedSubChunks)), unpack_sub_chunks(PackedSubChunks, 0, SubChunk, Entropy, State). pack_sub_chunks(_SubChunk, _Entropy, Index, _PreviousSubChunk, _State) when Index == 1024 -> []; pack_sub_chunks(SubChunk, Entropy, Index, PreviousSubChunk, State) -> {ok, PackedSubChunk} = ar_mine_randomx:randomx_encrypt_replica_2_9_sub_chunk( {State, Entropy, SubChunk, Index}), Note = io_lib:format("Packed a sub-chunk, index=~B.~n", [Index]), ?assertNotEqual(PackedSubChunk, PreviousSubChunk, Note), [PackedSubChunk | pack_sub_chunks(SubChunk, Entropy, Index + 1, PackedSubChunk, State)]. unpack_sub_chunks([], _Index, _SubChunk, _Entropy, _State) -> ok; unpack_sub_chunks([PackedSubChunk | PackedSubChunks], Index, SubChunk, Entropy, State) -> {ok, UnpackedSubChunk} = ar_mine_randomx:randomx_decrypt_replica_2_9_sub_chunk( {State, Entropy, PackedSubChunk, Index}), Note = io_lib:format("Unpacked a sub-chunk, index=~B.~n", [Index]), ?assertEqual(SubChunk, UnpackedSubChunk, Note), unpack_sub_chunks(PackedSubChunks, Index + 1, SubChunk, Entropy, State). ================================================ FILE: apps/arweave/test/ar_semaphore_tests.erl ================================================ -module(ar_semaphore_tests). -include_lib("arweave/include/ar.hrl"). -include_lib("eunit/include/eunit.hrl"). one_wait_per_process_test_() -> with_semaphore_(one_wait_per_process_sem, 4, fun() -> ?assertEqual(ok, ar_semaphore:acquire(one_wait_per_process_sem, ?DEFAULT_CALL_TIMEOUT)), ?assertEqual({error, process_already_waiting}, ar_semaphore:acquire(one_wait_per_process_sem, ?DEFAULT_CALL_TIMEOUT)) end). wait_for_one_process_at_a_time_test_() -> with_semaphore_(wait_for_one_process_at_a_time_sem, 1, fun() -> TestPid = self(), SleepMs = 500, NoMessageMs = 250, DoneTimeoutMs = 3000, spawn_worker(wait_for_one_process_at_a_time_sem, SleepMs, TestPid, p1), spawn_worker(wait_for_one_process_at_a_time_sem, SleepMs, TestPid, p2), spawn_worker(wait_for_one_process_at_a_time_sem, SleepMs, TestPid, p3), ?assert(receive _ -> false after NoMessageMs -> true end), Done1 = receive_done(DoneTimeoutMs), ?assert(receive _ -> false after NoMessageMs -> true end), Done2 = receive_done(DoneTimeoutMs), ?assertNotEqual(Done1, Done2), ?assert(receive _ -> false after NoMessageMs -> true end), Done3 = receive_done(DoneTimeoutMs), ?assertNotEqual(Done1, Done3), ?assertNotEqual(Done2, Done3) end). wait_for_two_processes_at_a_time_test_() -> with_semaphore_(wait_for_two_processes_at_a_time_sem, 2, fun() -> TestPid = self(), SleepMs = 400, DoneTimeoutMs = 3000, spawn_worker(wait_for_two_processes_at_a_time_sem, SleepMs, TestPid, p1), spawn_worker(wait_for_two_processes_at_a_time_sem, SleepMs, TestPid, p2), spawn_worker(wait_for_two_processes_at_a_time_sem, SleepMs, TestPid, p3), spawn_worker(wait_for_two_processes_at_a_time_sem, SleepMs, TestPid, p4), Done1 = receive_done(DoneTimeoutMs), Done2 = receive_done(DoneTimeoutMs), ?assertNotEqual(Done1, Done2), ?assert(receive _ -> false after 250 -> true end), Done3 = receive_done(DoneTimeoutMs), ?assertNotEqual(Done1, Done3), ?assertNotEqual(Done2, Done3), Done4 = receive_done(DoneTimeoutMs), ?assertNotEqual(Done1, Done4), ?assertNotEqual(Done2, Done4), ?assertNotEqual(Done3, Done4) end). spawn_worker(SemaphoreName, SleepMs, TestPid, WorkerID) -> spawn_link(fun() -> ok = ar_semaphore:acquire(SemaphoreName, ?DEFAULT_CALL_TIMEOUT), timer:sleep(SleepMs), TestPid ! {done, WorkerID} end). receive_done(TimeoutMs) -> receive {done, WorkerID} -> WorkerID after TimeoutMs -> ?assert(false) end. with_semaphore_(Name, Value, Fun) -> {setup, fun() -> {ok, _} = ar_semaphore:start_link(Name, Value) end, fun(_) -> _ = ar_semaphore:stop(Name) end, [Fun] }. ================================================ FILE: apps/arweave/test/ar_serialize_tests.erl ================================================ -module(ar_serialize_tests). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave/include/ar_mining.hrl"). -include_lib("arweave/include/ar_pool.hrl"). -include_lib("eunit/include/eunit.hrl"). block_to_binary_test_() -> %% Set the mainnet values here because we are using the mainnet fixtures. ar_test_node:test_with_mocked_functions([ {ar_fork, height_1_6, fun() -> 95000 end}, {ar_fork, height_1_7, fun() -> 235200 end}, {ar_fork, height_1_8, fun() -> 269510 end}, {ar_fork, height_1_9, fun() -> 315700 end}, {ar_fork, height_2_0, fun() -> 422250 end}, {ar_fork, height_2_2, fun() -> 552180 end}, {ar_fork, height_2_3, fun() -> 591140 end}, {ar_fork, height_2_4, fun() -> 633720 end}, {ar_fork, height_2_5, fun() -> 812970 end}, {ar_fork, height_2_6, fun() -> infinity end}], fun test_block_to_binary/0). test_block_to_binary() -> {ok, Cwd} = file:get_cwd(), BlockFixtureDir = filename:join(Cwd, "./apps/arweave/test/fixtures/blocks"), TXFixtureDir = filename:join(Cwd, "./apps/arweave/test/fixtures/txs"), {ok, BlockFixtures} = file:list_dir(BlockFixtureDir), test_block_to_binary([filename:join(BlockFixtureDir, Name) || Name <- BlockFixtures], TXFixtureDir). test_block_to_binary([], _TXFixtureDir) -> ok; test_block_to_binary([Fixture | Fixtures], TXFixtureDir) -> {ok, Bin} = file:read_file(Fixture), B = ar_storage:migrate_block_record(binary_to_term(Bin)), ?debugFmt("Block ~s, height ~B.~n", [ar_util:encode(B#block.indep_hash), B#block.height]), test_block_to_binary(B), RandomTags = [crypto:strong_rand_bytes(rand:uniform(2)) || _ <- lists:seq(1, rand:uniform(1024))], B2 = B#block{ tags = RandomTags }, test_block_to_binary(B2), B3 = B#block{ reward_addr = unclaimed }, test_block_to_binary(B3), {ok, TXFixtures} = file:list_dir(TXFixtureDir), TXs = lists:foldl( fun(TXFixture, Acc) -> {ok, TXBin} = file:read_file(filename:join(TXFixtureDir, TXFixture)), TX = ar_storage:migrate_tx_record(binary_to_term(TXBin)), maps:put(TX#tx.id, TX, Acc) end, #{}, TXFixtures), BlockTXs = [maps:get(TXID, TXs) || TXID <- B#block.txs], B4 = B#block{ txs = BlockTXs }, test_block_to_binary(B4), BlockTXs2 = [case rand:uniform(2) of 1 -> TX#tx.id; _ -> TX end || TX <- BlockTXs], B5 = B#block{ txs = BlockTXs2 }, test_block_to_binary(B5), TXIDs = [TX#tx.id || TX <- BlockTXs], B6 = B#block{ txs = TXIDs }, test_block_to_binary(B6), test_block_to_binary(Fixtures, TXFixtureDir). test_block_to_binary(B) -> {ok, B2} = ar_serialize:binary_to_block(ar_serialize:block_to_binary(B)), ?assertEqual(B#block{ txs = [] }, B2#block{ txs = [] }), ?assertEqual(true, compare_txs(B#block.txs, B2#block.txs)), lists:foreach( fun (TX) when is_record(TX, tx)-> ?assertEqual({ok, TX}, ar_serialize:binary_to_tx(ar_serialize:tx_to_binary(TX))); (_TXID) -> ok end, B#block.txs ). compare_txs([TXID | TXs], [#tx{ id = TXID } | TXs2]) -> compare_txs(TXs, TXs2); compare_txs([#tx{ id = TXID } | TXs], [TXID | TXs2]) -> compare_txs(TXs, TXs2); compare_txs([TXID | TXs], [TXID | TXs2]) -> compare_txs(TXs, TXs2); compare_txs([], []) -> true; compare_txs(_TXs, _TXs2) -> false. block_announcement_to_binary_test() -> A = #block_announcement{ indep_hash = crypto:strong_rand_bytes(48), previous_block = crypto:strong_rand_bytes(48) }, ?assertEqual({ok, A}, ar_serialize:binary_to_block_announcement( ar_serialize:block_announcement_to_binary(A))), A2 = A#block_announcement{ recall_byte = 0 }, ?assertEqual({ok, A2}, ar_serialize:binary_to_block_announcement( ar_serialize:block_announcement_to_binary(A2))), A3 = A#block_announcement{ recall_byte = 1000000000000000000000 }, ?assertEqual({ok, A3}, ar_serialize:binary_to_block_announcement( ar_serialize:block_announcement_to_binary(A3))), A4 = A3#block_announcement{ tx_prefixes = [crypto:strong_rand_bytes(8) || _ <- lists:seq(1, 1000)] }, ?assertEqual({ok, A4}, ar_serialize:binary_to_block_announcement( ar_serialize:block_announcement_to_binary(A4))), A5 = A#block_announcement{ recall_byte2 = 1, solution_hash = crypto:strong_rand_bytes(32) }, ?assertEqual({ok, A5}, ar_serialize:binary_to_block_announcement( ar_serialize:block_announcement_to_binary(A5))), A6 = A#block_announcement{ recall_byte2 = 1, recall_byte = 2, solution_hash = crypto:strong_rand_bytes(32) }, ?assertEqual({ok, A6}, ar_serialize:binary_to_block_announcement( ar_serialize:block_announcement_to_binary(A6))). block_announcement_response_to_binary_test() -> A = #block_announcement_response{}, ?assertEqual({ok, A}, ar_serialize:binary_to_block_announcement_response( ar_serialize:block_announcement_response_to_binary(A))), A2 = A#block_announcement_response{ missing_chunk = true, missing_tx_indices = lists:seq(0, 999) }, ?assertEqual({ok, A2}, ar_serialize:binary_to_block_announcement_response( ar_serialize:block_announcement_response_to_binary(A2))), A3 = A#block_announcement_response{ missing_chunk = true, missing_chunk2 = false, missing_tx_indices = lists:seq(0, 1) }, ?assertEqual({ok, A3}, ar_serialize:binary_to_block_announcement_response( ar_serialize:block_announcement_response_to_binary(A3))), A4 = A#block_announcement_response{ missing_chunk2 = true, missing_tx_indices = [731] }, ?assertEqual({ok, A4}, ar_serialize:binary_to_block_announcement_response( ar_serialize:block_announcement_response_to_binary(A4))). poa_map_to_json(Map) -> jiffy:encode(ar_serialize:poa_map_to_json_map(Map)). json_to_poa_map(Body) -> {ok, JSON} = ar_serialize:json_decode(Body, [return_maps]), {ok, ar_serialize:json_map_to_poa_map(JSON)}. poa_map_test() -> test_poa_map(fun ar_serialize:poa_map_to_binary/1, fun ar_serialize:binary_to_poa/1, #{}), test_poa_map(fun poa_map_to_json/1, fun json_to_poa_map/1, #{ data_size => 0, data_root => <<>> }). test_poa_map(Serialize, Deserialize, BaseProof) -> Proof = BaseProof#{ chunk => crypto:strong_rand_bytes(1), data_path => <<>>, tx_path => <<>>, packing => unpacked }, ?assertEqual({ok, Proof}, Deserialize(Serialize(Proof))), Proof2 = Proof#{ chunk => crypto:strong_rand_bytes(256 * 1024) }, ?assertEqual({ok, Proof2}, Deserialize(Serialize(Proof2))), Proof3 = Proof2#{ data_path => crypto:strong_rand_bytes(1024), packing => spora_2_5, tx_path => crypto:strong_rand_bytes(1024) }, ?assertEqual({ok, Proof3}, Deserialize(Serialize(Proof3))), Proof4 = Proof3#{ packing => {spora_2_6, crypto:strong_rand_bytes(33)} }, ?assertEqual({ok, Proof4}, Deserialize(Serialize(Proof4))), Proof5 = Proof3#{ packing => {composite, crypto:strong_rand_bytes(33), 2} }, ?assertEqual({ok, Proof5}, Deserialize(Serialize(Proof5))). poa_no_chunk_map_test() -> test_poa_no_chunk_map(fun ar_serialize:poa_no_chunk_map_to_binary/1, fun ar_serialize:binary_to_no_chunk_map/1). test_poa_no_chunk_map(Serialize, Deserialize) -> Proof = #{ data_path => crypto:strong_rand_bytes(500), tx_path => crypto:strong_rand_bytes(250) }, ?assertEqual({ok, Proof}, Deserialize(Serialize(Proof))). block_index_to_binary_test() -> lists:foreach( fun(BI) -> ?assertEqual({ok, BI}, ar_serialize:binary_to_block_index( ar_serialize:block_index_to_binary(BI))) end, [[], [{crypto:strong_rand_bytes(48), rand:uniform(1000), crypto:strong_rand_bytes(32)}], [{crypto:strong_rand_bytes(48), 0, <<>>}], [{crypto:strong_rand_bytes(48), rand:uniform(1000), crypto:strong_rand_bytes(32)} || _ <- lists:seq(1, 1000)]]). %% @doc Convert a new block into JSON and back, ensure the result is the same. block_roundtrip_test_() -> ar_test_node:test_with_mocked_functions([ {ar_fork, height_2_6, fun() -> infinity end}, {ar_fork, height_2_6_8, fun() -> infinity end}, {ar_fork, height_2_7, fun() -> infinity end}], fun test_block_roundtrip/0). test_block_roundtrip() -> [B] = ar_weave:init(), TXIDs = [TX#tx.id || TX <- B#block.txs], JSONStruct = ar_serialize:jsonify(ar_serialize:block_to_json_struct(B)), BRes = ar_serialize:json_struct_to_block(JSONStruct), ?assertEqual(B#block{ txs = TXIDs, size_tagged_txs = [], account_tree = undefined }, BRes#block{ hash_list = B#block.hash_list, size_tagged_txs = [] }). %% @doc Convert a new TX into JSON and back, ensure the result is the same. tx_roundtrip_test() -> TXBase = ar_tx:new(<<"test">>), TX = TXBase#tx{ format = 2, tags = [{<<"Name1">>, <<"Value1">>}], data_root = << 0:256 >>, signature_type = ?DEFAULT_KEY_TYPE, owner_address = ar_wallet:to_address(TXBase#tx.owner, ?DEFAULT_KEY_TYPE) }, JsonTX = ar_serialize:jsonify(ar_serialize:tx_to_json_struct(TX)), ?assertEqual( TX, ar_serialize:json_struct_to_tx(JsonTX) ). wallet_list_roundtrip_test_() -> {timeout, 30, fun test_wallet_list_roundtrip/0}. test_wallet_list_roundtrip() -> [B] = ar_weave:init(), WL = B#block.account_tree, JSONWL = ar_serialize:jsonify( ar_serialize:wallet_list_to_json_struct(B#block.reward_addr, false, WL)), ExpectedWL = ar_patricia_tree:foldr(fun(K, V, Acc) -> [{K, V} | Acc] end, [], WL), ActualWL = ar_patricia_tree:foldr( fun(K, V, Acc) -> [{K, V} | Acc] end, [], ar_serialize:json_struct_to_wallet_list(JSONWL) ), ?assertEqual(ExpectedWL, ActualWL). block_index_roundtrip_test_() -> {timeout, 10, fun test_block_index_roundtrip/0}. test_block_index_roundtrip() -> [B] = ar_weave:init(), HL = [B#block.indep_hash, B#block.indep_hash], JSONHL = ar_serialize:jsonify(ar_serialize:block_index_to_json_struct(HL)), HL = ar_serialize:json_struct_to_block_index(ar_serialize:dejsonify(JSONHL)), BI = [{B#block.indep_hash, 1, <<"Root">>}, {B#block.indep_hash, 2, <<>>}], JSONBI = ar_serialize:jsonify(ar_serialize:block_index_to_json_struct(BI)), BI = ar_serialize:json_struct_to_block_index(ar_serialize:dejsonify(JSONBI)). query_roundtrip_test() -> Query = {'equals', <<"TestName">>, <<"TestVal">>}, QueryJSON = ar_serialize:jsonify( ar_serialize:query_to_json_struct( Query ) ), ?assertEqual({ok, Query}, ar_serialize:json_struct_to_query(QueryJSON)). data_roots_roundtrip_test() -> %% TXRoot must be empty or 32 bytes: ?assertEqual({error, invalid_input1}, ar_serialize:binary_to_data_roots({<<"a">>, 0, []})), %% The number of entries must not exceed the transaction count limit: ?assertEqual({error, invalid_input1}, ar_serialize:binary_to_data_roots({<<>>, 0, make_entries(1001)})), TXRoot = crypto:strong_rand_bytes(32), BlockSizes = [0, 1, 255, 256, 65535, 65536, 123456789], Cases = lists:flatten([ {<<>>, 0, []}, [{TXRoot, BS, []} || BS <- BlockSizes], [{TXRoot, BS, make_entries(1)} || BS <- BlockSizes], [{TXRoot, BS, make_entries(2)} || BS <- BlockSizes], [{TXRoot, BS, make_entries(1000)} || BS <- BlockSizes] ]), lists:foreach( fun({TR, BS, Entries}) -> Bin = ar_serialize:data_roots_to_binary({TR, BS, Entries}), ?assertMatch({ok, {TR, BS, _}}, ar_serialize:binary_to_data_roots(Bin)), ?assertEqual({ok, {TR, BS, Entries}}, ar_serialize:binary_to_data_roots(Bin)) end, Cases ). make_entries(N) -> lists:map( fun(I) -> DataRoot = crypto:strong_rand_bytes(32), TXSize = case I rem 2 of 0 -> 0; _ -> rand:uniform(1000000) end, TXStartOffset = case I rem 2 of 0 -> 0; _ -> rand:uniform(1000000) end, TXPath = case I rem 2 of 0 -> <<>>; _ -> crypto:strong_rand_bytes(200) end, {DataRoot, TXSize, TXStartOffset, TXPath} end, lists:seq(1, N) ). candidate_to_json_struct_test() -> Test = fun(Candidate) -> JSON = ar_serialize:jsonify(ar_serialize:candidate_to_json_struct(Candidate)), {ok, JSONStruct} = ar_serialize:json_decode(JSON, [return_maps]), CandidateAfter = ar_serialize:json_map_to_candidate(JSONStruct), ExpectedCandidate = Candidate#mining_candidate{ cache_ref = not_set, chunk1 = not_set, chunk2 = not_set, cm_lead_peer = not_set }, ?assertEqual(ExpectedCandidate, CandidateAfter) end, DefaultCandidate = #mining_candidate{ cm_diff = {rand:uniform(1024), rand:uniform(1024)}, cm_h1_list = [ {crypto:strong_rand_bytes(32), rand:uniform(100)}, {crypto:strong_rand_bytes(32), rand:uniform(100)}, {crypto:strong_rand_bytes(32), rand:uniform(100)} ], h0 = crypto:strong_rand_bytes(32), h1 = crypto:strong_rand_bytes(32), h2 = crypto:strong_rand_bytes(32), mining_address = crypto:strong_rand_bytes(32), next_seed = crypto:strong_rand_bytes(32), next_vdf_difficulty = rand:uniform(100), nonce = rand:uniform(100), nonce_limiter_output = crypto:strong_rand_bytes(32), partition_number = rand:uniform(100), partition_number2 = rand:uniform(100), partition_upper_bound = rand:uniform(100), poa2 = #poa{ chunk = crypto:strong_rand_bytes(256 * 1024), data_path = crypto:strong_rand_bytes(1024), tx_path = crypto:strong_rand_bytes(1024) }, preimage = crypto:strong_rand_bytes(32), seed = crypto:strong_rand_bytes(32), session_key = {crypto:strong_rand_bytes(32), rand:uniform(100), rand:uniform(10000)}, start_interval_number = rand:uniform(100), step_number = rand:uniform(100) }, Test(DefaultCandidate), %% clear optional fields Test(DefaultCandidate#mining_candidate{ cm_h1_list = [], h1 = not_set, h2 = not_set, nonce = not_set, poa2 = not_set, preimage = not_set}), %% set unserialized fields Test(DefaultCandidate#mining_candidate{ cache_ref = {rand:uniform(100), rand:uniform(100), rand:uniform(100), make_ref()}, chunk1 = crypto:strong_rand_bytes(256 * 1024), chunk2 = crypto:strong_rand_bytes(256 * 1024), cm_lead_peer = ar_test_node:peer_ip(main)}). solution_to_json_struct_test() -> Test = fun(Solution) -> JSON = ar_serialize:jsonify(ar_serialize:solution_to_json_struct(Solution)), {ok, JSONStruct} = ar_serialize:json_decode(JSON, [return_maps]), SolutionAfter = ar_serialize:json_map_to_solution(JSONStruct), ?assertEqual(Solution, SolutionAfter) end, DefaultSolution = #mining_solution{ last_step_checkpoints = [ crypto:strong_rand_bytes(32), crypto:strong_rand_bytes(32), crypto:strong_rand_bytes(32)], mining_address = crypto:strong_rand_bytes(32), next_seed = crypto:strong_rand_bytes(32), next_vdf_difficulty = rand:uniform(100), nonce = rand:uniform(100), nonce_limiter_output = crypto:strong_rand_bytes(32), partition_number = rand:uniform(100), partition_upper_bound = rand:uniform(100), poa1 = #poa{ chunk = crypto:strong_rand_bytes(256 * 1024), data_path = crypto:strong_rand_bytes(1024), tx_path = crypto:strong_rand_bytes(1024) }, poa2 = #poa{ chunk = crypto:strong_rand_bytes(256 * 1024), data_path = crypto:strong_rand_bytes(1024), tx_path = crypto:strong_rand_bytes(1024) }, preimage = crypto:strong_rand_bytes(32), recall_byte1 = rand:uniform(100), recall_byte2 = rand:uniform(100), seed = crypto:strong_rand_bytes(32), solution_hash = crypto:strong_rand_bytes(32), start_interval_number = rand:uniform(100), step_number = rand:uniform(100), steps = [ crypto:strong_rand_bytes(32), crypto:strong_rand_bytes(32), crypto:strong_rand_bytes(32)] }, Test(DefaultSolution), %% clear optional fields Test(DefaultSolution#mining_solution{ recall_byte2 = undefined}). partial_solution_to_json_struct_test() -> TestCases = [ #mining_solution{ mining_address = <<"a">>, next_seed = <<"s">>, seed = <<"s">>, next_vdf_difficulty = 1, nonce = 2, partition_number = 10, partition_upper_bound = 5001, solution_hash = <<"h">>, nonce_limiter_output = <<"output">>, preimage = <<"pr">>, poa1 = #poa{ chunk = <<"c">>, tx_path = <<"t">>, data_path = <<"dpath">> }, poa2 = #poa{}, recall_byte1 = 123234234234, recall_byte2 = undefined, start_interval_number = 23, step_number = 1113423423423423423423423432342342342344 }, #mining_solution{ mining_address = <<"a">>, next_seed = <<"s">>, seed = <<"s">>, next_vdf_difficulty = 1, nonce = 2, partition_number = 10, partition_upper_bound = 5001, solution_hash = <<"h">>, nonce_limiter_output = <<"output">>, preimage = <<"pr">>, poa1 = #poa{ chunk = <<"c">>, tx_path = <<"t">>, data_path = <<"dpath">> }, poa2 = #poa{ chunk = <<"chunk2">>, tx_path = <<"t2">>, data_path = <<"d2">> }, recall_byte1 = 123234234234, recall_byte2 = 2, start_interval_number = 23, step_number = 1113423423423423423423423432342342342344 } ], lists:foreach( fun(Solution) -> ?assertEqual(Solution, ar_serialize:json_map_to_solution(jiffy:decode(ar_serialize:jsonify( ar_serialize:solution_to_json_struct(Solution)), [return_maps]))) end, TestCases ). partial_solution_response_to_json_struct_test() -> TestCases = [ {#partial_solution_response{}, <<>>, <<>>}, {#partial_solution_response{ indep_hash = <<"H">>, status = <<"S">>}, <<"H">>, <<"S">>} ], lists:foreach( fun({Case, ExpectedH, ExpectedStatus}) -> {Struct} = ar_serialize:dejsonify(ar_serialize:jsonify( ar_serialize:partial_solution_response_to_json_struct(Case))), ?assertEqual(ExpectedH, ar_util:decode(proplists:get_value(<<"indep_hash">>, Struct))), ?assertEqual(ExpectedStatus, proplists:get_value(<<"status">>, Struct)) end, TestCases ). jobs_to_json_struct_test() -> TestCases = [ #jobs{} #jobs{ seed = <<"a">> }, #jobs{ jobs = [#job{ output = <<"o">>, global_step_number = 1, partition_upper_bound = 100 }] }, #jobs{ jobs = [#job{ output = <<"o2">>, global_step_number = 2, partition_upper_bound = 100 }, #job{ output = <<"o1">>, global_step_number = 1, partition_upper_bound = 99 }], partial_diff = {12345, 6789}, seed = <<"gjhgjkghjhg">>, next_seed = <<"dfdgfdg">>, interval_number = 23, next_vdf_difficulty = 32434 } ], lists:foreach( fun(Jobs) -> ?assertEqual(Jobs, ar_serialize:json_struct_to_jobs( ar_serialize:dejsonify(ar_serialize:jsonify( ar_serialize:jobs_to_json_struct(Jobs))))) end, TestCases ). footprint_to_json_map_test() -> Addr = crypto:strong_rand_bytes(32), TestCases = [ {ar_intervals:new()}, {ar_intervals:from_list([{3, 0}, {2048, 1024}])}, {ar_intervals:from_list([{1024, 0}])}, {ar_intervals:from_list([{3, 0}, {10000, 500}, {200000, 100000}])} ], lists:foreach( fun(TestCase) -> {Intervals} = TestCase, ?assertEqual(Intervals, ar_serialize:json_map_to_footprint(jiffy:decode( jiffy:encode(ar_serialize:footprint_to_json_map(Intervals)), [return_maps]))) end, TestCases ). ================================================ FILE: apps/arweave/test/ar_start_from_block_tests.erl ================================================ -module(ar_start_from_block_tests). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("eunit/include/eunit.hrl"). start_from_block_test_() -> [ {timeout, ?TEST_NODE_TIMEOUT, fun test_start_from_block/0} ]. test_start_from_block() -> [B0] = ar_weave:init(), ar_test_node:start(B0), ar_test_node:start_peer(peer1, B0), ar_test_node:start_peer(peer2, B0), ar_test_node:connect_to_peer(peer1), ar_test_node:connect_to_peer(peer2), %% Mine a few blocks, shared by both peers ar_test_node:mine(peer1), ar_test_node:wait_until_height(peer1, 1), ar_test_node:wait_until_height(peer2, 1), ar_test_node:mine(peer2), ar_test_node:wait_until_height(peer1, 2), ar_test_node:wait_until_height(peer2, 2), ar_test_node:mine(peer1), ar_test_node:wait_until_height(peer1, 3), ar_test_node:wait_until_height(peer2, 3), %% Disconnect peers, and have peer1 mine 1 block, and peer2 mine 3 ar_test_node:disconnect_from(peer1), ar_test_node:disconnect_from(peer2), ar_test_node:mine(peer1), ar_test_node:wait_until_height(peer1, 4), ar_test_node:mine(peer2), ar_test_node:wait_until_height(peer2, 4), ar_test_node:mine(peer2), ar_test_node:wait_until_height(peer2, 5), ar_test_node:mine(peer2), ar_test_node:wait_until_height(peer2, 6), %% Reconnect the peers. This will orphan peer1's block ar_test_node:connect_to_peer(peer1), ar_test_node:connect_to_peer(peer2), ar_test_node:wait_until_height(peer1, 6), ar_test_node:wait_until_height(peer2, 6), ar_test_node:wait_until_height(main, 6), ar_test_node:disconnect_from(peer1), ar_test_node:disconnect_from(peer2), MainBI = ar_node:get_blocks(), StartFrom = get_block_hash(4, MainBI), StartMinus1 = get_block_hash(3, MainBI), assert_block_index(peer1, 6, MainBI), assert_block_index(peer2, 6, MainBI), assert_reward_history(main, peer1, StartFrom), assert_reward_history(main, peer2, StartFrom), assert_reward_history(main, peer1, StartMinus1), assert_reward_history(main, peer2, StartMinus1), %% Have peer1 start_from_block restart_from_block(peer1, StartFrom), assert_start_from(main, peer1, 4), restart_from_block(peer1, StartMinus1), assert_start_from(main, peer1, 3), %% Restart peer2 off of peer1 ar_test_node:start_peer(peer2, B0), ar_test_node:remote_call(peer2, ar_test_node, connect_to_peer, [peer1]), ar_test_node:wait_until_height(peer2, 3), assert_start_from(main, peer1, 3), assert_start_from(main, peer2, 3), %% disconnect peer2 and mine a block on peer1 ar_test_node:remote_call(peer2, ar_test_node, disconnect_from, [peer1]), ar_test_node:mine(peer1), ar_test_node:wait_until_height(peer1, 4), %% Confirm legacy block index still matches assert_start_from(main, peer1, 3), %% Restart peer2 off of peer1 ar_test_node:start_peer(peer2, B0), ar_test_node:remote_call(peer2, ar_test_node, connect_to_peer, [peer1]), ar_test_node:wait_until_height(peer2, 4), assert_start_from(peer1, peer2, 4), %% Mine a block on peer2 ar_test_node:mine(peer2), ar_test_node:wait_until_height(peer2, 5), ar_test_node:wait_until_height(peer1, 5), assert_start_from(peer2, peer1, 5), %% Have peer1 start_from_block one last time Peer1BI = get_block_index(peer1), restart_from_block(peer1, get_block_hash(4, Peer1BI)), assert_start_from(peer2, peer1, 4), ok. restart_from_block(Peer, BH) -> {ok, Config} = ar_test_node:get_config(Peer), ok = ar_test_node:set_config(Peer, Config#config{ start_from_latest_state = false, start_from_block = BH, block_pollers = 0 }), ar_test_node:restart(Peer), ar_test_node:remote_call(Peer, ar_test_node, wait_until_syncs_genesis_data, []). assert_start_from(ExpectedPeer, Peer, Height) -> BI = get_block_index(Peer), StartFrom = get_block_hash(Height, BI), StartMinus1 = get_block_hash(Height-1, BI), assert_block_index(Peer, Height, BI), assert_reward_history(ExpectedPeer, Peer, StartFrom), assert_reward_history(ExpectedPeer, Peer, StartMinus1). assert_block_index(Peer, Height, ExpectedBI) -> BI = get_block_index(Peer), BITail = lists:nthtail(length(BI)-Height-1, BI), ExpectedBITail = lists:nthtail(length(ExpectedBI)-Height-1, ExpectedBI), ?assertEqual(ExpectedBITail, BITail, io:format("Block Index mismatch for peer ~s", [Peer])). assert_reward_history(ExpectedPeer, Peer, H) -> RewardHistory = get_reward_history(Peer, H), {B, _} = ar_test_node:remote_call(ExpectedPeer, ar_block_cache, get_block_and_status, [block_cache, H]), ExpectedRewardHistory = B#block.reward_history, ?assertEqual(ExpectedRewardHistory, RewardHistory). get_block_hash(Height, BI) -> {H, _, _} = lists:nth(length(BI) - Height, BI), H. get_block_index(Peer) -> ar_test_node:remote_call(Peer, ar_node, get_blocks, []). get_reward_history(Peer, H) -> PeerIP = ar_test_node:peer_ip(Peer), case ar_http:req(#{ peer => PeerIP, method => get, path => "/reward_history/" ++ binary_to_list(ar_util:encode(H)), timeout => 30000 }) of {ok, {{<<"200">>, _}, _, Body, _, _}} -> case ar_serialize:binary_to_reward_history(Body) of {ok, RewardHistory} -> RewardHistory; {error, Error} -> Error end; Reply -> Reply end. ================================================ FILE: apps/arweave/test/ar_sync_record_tests.erl ================================================ -module(ar_sync_record_tests). -include("ar.hrl"). -include("ar_consensus.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). sync_record_test_() -> [ {timeout, 120, fun test_sync_record/0} ]. test_sync_record() -> SleepTime = 1000, DiskPoolStart = ar_block:partition_size(), PartitionStart = ar_block:partition_size() - ?DATA_CHUNK_SIZE, WeaveSize = 4 * ?DATA_CHUNK_SIZE, [B0] = ar_weave:init([], 1, WeaveSize), RewardAddr = ar_wallet:to_address(ar_wallet:new_keyfile()), {ok, Config} = arweave_config:get_env(), try Partition = {ar_block:partition_size(), 0, {composite, RewardAddr, 1}}, PartitionID = ar_storage_module:id(Partition), StorageModules = [Partition], ar_test_node:start(B0, RewardAddr, Config, StorageModules), Options = #{ format => etf, random_subset => false }, %% Genesis data only {ok, Binary1} = ar_global_sync_record:get_serialized_sync_record(Options), {ok, Global1} = ar_intervals:safe_from_etf(Binary1), ?assertEqual([{1048576, 0}], ar_intervals:to_list(Global1)), ?assertEqual(not_found, ar_sync_record:get_interval(DiskPoolStart+1, ar_data_sync, ?DEFAULT_MODULE)), ?assertEqual({1048576, 0}, ar_sync_record:get_interval(1, ar_data_sync, PartitionID)), %% Add a diskpool chunk ar_sync_record:add( DiskPoolStart+?DATA_CHUNK_SIZE, DiskPoolStart, unpacked, ar_data_sync, ?DEFAULT_MODULE), timer:sleep(SleepTime), {ok, Binary2} = ar_global_sync_record:get_serialized_sync_record(Options), {ok, Global2} = ar_intervals:safe_from_etf(Binary2), ?assertEqual([{1048576, 0},{DiskPoolStart+?DATA_CHUNK_SIZE,DiskPoolStart}], ar_intervals:to_list(Global2)), ?assertEqual({DiskPoolStart+?DATA_CHUNK_SIZE,DiskPoolStart}, ar_sync_record:get_interval(DiskPoolStart+1, ar_data_sync, ?DEFAULT_MODULE)), ?assertEqual({1048576, 0}, ar_sync_record:get_interval(1, ar_data_sync, PartitionID)), %% Remove the diskpool chunk ar_sync_record:delete( DiskPoolStart+?DATA_CHUNK_SIZE, DiskPoolStart, ar_data_sync, ?DEFAULT_MODULE), timer:sleep(SleepTime), {ok, Binary3} = ar_global_sync_record:get_serialized_sync_record(Options), {ok, Global3} = ar_intervals:safe_from_etf(Binary3), ?assertEqual([{1048576, 0},{DiskPoolStart+?DATA_CHUNK_SIZE,DiskPoolStart}], ar_intervals:to_list(Global3)), %% We need to explicitly declare global removal ar_events:send(sync_record, {global_remove_range, DiskPoolStart, DiskPoolStart+?DATA_CHUNK_SIZE}), true = ar_util:do_until( fun() -> {ok, Binary4} = ar_global_sync_record:get_serialized_sync_record(Options), {ok, Global4} = ar_intervals:safe_from_etf(Binary4), [{1048576, 0}] == ar_intervals:to_list(Global4) end, 200, 5000), %% Add a storage module chunk ar_sync_record:add( PartitionStart+?DATA_CHUNK_SIZE, PartitionStart, unpacked, ar_data_sync, PartitionID), timer:sleep(SleepTime), {ok, Binary5} = ar_global_sync_record:get_serialized_sync_record(Options), {ok, Global5} = ar_intervals:safe_from_etf(Binary5), ?assertEqual([{1048576, 0},{PartitionStart+?DATA_CHUNK_SIZE,PartitionStart}], ar_intervals:to_list(Global5)), ?assertEqual(not_found, ar_sync_record:get_interval(DiskPoolStart+1, ar_data_sync, ?DEFAULT_MODULE)), ?assertEqual({1048576, 0}, ar_sync_record:get_interval(1, ar_data_sync, PartitionID)), ?assertEqual({PartitionStart+?DATA_CHUNK_SIZE, PartitionStart}, ar_sync_record:get_interval(PartitionStart+1, ar_data_sync, PartitionID)), %% Remove the storage module chunk ar_sync_record:delete( PartitionStart+?DATA_CHUNK_SIZE, PartitionStart, ar_data_sync, PartitionID), timer:sleep(SleepTime), ?assertEqual([{1048576, 0},{PartitionStart+?DATA_CHUNK_SIZE,PartitionStart}], ar_intervals:to_list(Global5)), ar_events:send(sync_record, {global_remove_range, PartitionStart, PartitionStart+?DATA_CHUNK_SIZE}), true = ar_util:do_until( fun() -> {ok, Binary6} = ar_global_sync_record:get_serialized_sync_record(Options), {ok, Global6} = ar_intervals:safe_from_etf(Binary6), [{1048576, 0}] == ar_intervals:to_list(Global6) end, 200, 1000), ?assertEqual(not_found, ar_sync_record:get_interval(DiskPoolStart+1, ar_data_sync, ?DEFAULT_MODULE)), ?assertEqual({1048576, 0}, ar_sync_record:get_interval(1, ar_data_sync, PartitionID)), ?assertEqual(not_found, ar_sync_record:get_interval(PartitionStart+1, ar_data_sync, PartitionID)), %% Add chunk to both diskpool and storage module ar_sync_record:add( PartitionStart+?DATA_CHUNK_SIZE, PartitionStart, unpacked, ar_data_sync, ?DEFAULT_MODULE), ar_sync_record:add( PartitionStart+?DATA_CHUNK_SIZE, PartitionStart, unpacked, ar_data_sync, PartitionID), timer:sleep(SleepTime), {ok, Binary6} = ar_global_sync_record:get_serialized_sync_record(Options), {ok, Global6} = ar_intervals:safe_from_etf(Binary6), ?assertEqual([{1048576, 0}, {PartitionStart+?DATA_CHUNK_SIZE,PartitionStart}], ar_intervals:to_list(Global6)), ?assertEqual({PartitionStart+?DATA_CHUNK_SIZE,PartitionStart}, ar_sync_record:get_interval(PartitionStart+1, ar_data_sync, ?DEFAULT_MODULE)), ?assertEqual({1048576, 0}, ar_sync_record:get_interval(1, ar_data_sync, PartitionID)), ?assertEqual({PartitionStart+?DATA_CHUNK_SIZE, PartitionStart}, ar_sync_record:get_interval(PartitionStart+1, ar_data_sync, PartitionID)), %% Now remove it from just the diskpool ar_sync_record:delete( PartitionStart+?DATA_CHUNK_SIZE, PartitionStart, ar_data_sync, ?DEFAULT_MODULE), timer:sleep(SleepTime), {ok, Binary7} = ar_global_sync_record:get_serialized_sync_record(Options), {ok, Global7} = ar_intervals:safe_from_etf(Binary7), ?assertEqual([{1048576, 0}, {PartitionStart+?DATA_CHUNK_SIZE,PartitionStart}], ar_intervals:to_list(Global7)), ?assertEqual(not_found, ar_sync_record:get_interval(DiskPoolStart+1, ar_data_sync, ?DEFAULT_MODULE)), ?assertEqual({1048576, 0}, ar_sync_record:get_interval(1, ar_data_sync, PartitionID)), ?assertEqual({PartitionStart+?DATA_CHUNK_SIZE, PartitionStart}, ar_sync_record:get_interval(PartitionStart+1, ar_data_sync, PartitionID)), ar_test_node:stop() after ok = arweave_config:set_env(Config) end. ================================================ FILE: apps/arweave/test/ar_test_data_sync.erl ================================================ -module(ar_test_data_sync). -include_lib("eunit/include/eunit.hrl"). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -export([setup_nodes/0, setup_nodes/1, imperfect_split/1, build_proofs/3, build_proofs/5, tx/1, tx/2, tx/3, tx/4, wait_until_syncs_chunk/2, wait_until_syncs_chunks/1, wait_until_syncs_chunks/2, wait_until_syncs_chunks/3, get_tx_offset/2, get_tx_data/1, post_random_blocks/1, get_records_with_proofs/3, post_proofs/4, post_proofs/5, generate_random_split/1, generate_random_original_split/1, generate_random_standard_split/0, generate_random_original_v1_split/0]). -define(SYNC_CHUNKS_CHECK, 1000). %% Chunk sync can exceed 60s on slow CI (fork recovery, composite packing, many peers). -define(SYNC_CHUNKS_TIMEOUT, 120_000). get_records_with_proofs(B, TX, Chunks) -> [{B, TX, Chunks, Proof} || Proof <- build_proofs(B, TX, Chunks)]. setup_nodes() -> setup_nodes(#{}). setup_nodes(Options) -> Addr = maps:get(addr, Options, ar_wallet:to_address(ar_wallet:new_keyfile())), PeerAddr = maps:get(peer_addr, Options, ar_wallet:to_address( ar_test_node:remote_call(peer1, ar_wallet, new_keyfile, []))), setup_nodes2(Options#{ addr => Addr, peer_addr => PeerAddr }). setup_nodes2(#{ peer_addr := PeerAddr } = Options) -> Wallet = {_, Pub} = ar_wallet:new(), {B0, Options2} = case maps:get(b0, Options, not_set) of not_set -> [Genesis] = ar_weave:init([{ar_wallet:to_address(Pub), ?AR(200000), <<>>}], ar_retarget:switch_to_linear_diff(2)), {Genesis, Options#{ b0 => Genesis }}; Value -> {Value, Options} end, {ok, Config} = arweave_config:get_env(), Options3 = Options2#{ config => Config#config{ enable = Config#config.enable ++ [pack_served_chunks] } }, ar_test_node:start(Options3), {ok, PeerConfig} = ar_test_node:remote_call(peer1, arweave_config, get_env, []), ar_test_node:start_peer(peer1, B0, PeerAddr, PeerConfig#config{ enable = Config#config.enable ++ [pack_served_chunks] }), ar_test_node:connect_to_peer(peer1), Wallet. tx(Wallet, SplitType) -> tx(Wallet, SplitType, v2, fetch). v1_tx(Wallet) -> tx(Wallet, original_split, v1, fetch). tx(Wallet, SplitType, Format) -> tx(Wallet, SplitType, Format, fetch). tx(Wallet, SplitType, Format, Reward) -> tx(#{ wallet => Wallet, split_type => SplitType, format => Format, reward => Reward }). tx(Params) when is_map(Params) -> #{ wallet := Wallet, split_type := SplitType, format := Format, reward := Reward } = Params, TXAnchorPeer = maps:get(tx_anchor_peer, Params, main), TXAnchor = ar_test_node:get_tx_anchor(TXAnchorPeer), GetFeePeer = maps:get(get_fee_peer, Params, peer1), case {SplitType, Format} of {{fixed_data, DataRoot, Chunks}, v2} -> Data = binary:list_to_bin(Chunks), Args = #{ data_size => byte_size(Data), data_root => DataRoot, last_tx => TXAnchor }, Args2 = case Reward of fetch -> Args; _ -> Args#{ reward => Reward } end, {ar_test_node:sign_tx(GetFeePeer, Wallet, Args2), Chunks}; {{fixed_data, DataRoot, Chunks}, v1} -> Data = binary:list_to_bin(Chunks), Args = #{ data_size => byte_size(Data), data_root => DataRoot, last_tx => TXAnchor, data => Data }, Args2 = case Reward of fetch -> Args; _ -> Args#{ reward => Reward } end, {ar_test_node:sign_v1_tx(GetFeePeer, Wallet, Args2), Chunks}; {original_split, v1} -> {_, Chunks} = generate_random_original_v1_split(), Data = binary:list_to_bin(Chunks), Args = #{ data => Data, last_tx => TXAnchor }, Args2 = case Reward of fetch -> Args; _ -> Args#{ reward => Reward } end, {ar_test_node:sign_v1_tx(GetFeePeer, Wallet, Args2), Chunks}; {original_split, v2} -> {DataRoot, Chunks} = generate_random_original_split(), Data = binary:list_to_bin(Chunks), Args = #{ data_size => byte_size(Data), data_root => DataRoot, last_tx => TXAnchor }, Args2 = case Reward of fetch -> Args; _ -> Args#{ reward => Reward } end, {ar_test_node:sign_tx(GetFeePeer, Wallet, Args2), Chunks}; {{custom_split, ChunkNumber}, v2} -> {DataRoot, Chunks} = generate_random_split(ChunkNumber), Args = #{ data_size => byte_size(binary:list_to_bin(Chunks)), last_tx => TXAnchor, data_root => DataRoot }, Args2 = case Reward of fetch -> Args; _ -> Args#{ reward => Reward } end, TX = ar_test_node:sign_tx(GetFeePeer, Wallet, Args2), {TX, Chunks}; {standard_split, v2} -> {DataRoot, Chunks} = generate_random_standard_split(), Data = binary:list_to_bin(Chunks), Args = #{ data_size => byte_size(Data), data_root => DataRoot, last_tx => TXAnchor }, Args2 = case Reward of fetch -> Args; _ -> Args#{ reward => Reward } end, TX = ar_test_node:sign_tx(GetFeePeer, Wallet, Args2), {TX, Chunks}; {{original_split, ChunkNumber}, v2} -> {DataRoot, Chunks} = generate_random_original_split(ChunkNumber), Data = binary:list_to_bin(Chunks), Args = #{ data_size => byte_size(Data), data_root => DataRoot, last_tx => TXAnchor }, Args2 = case Reward of fetch -> Args; _ -> Args#{ reward => Reward } end, TX = ar_test_node:sign_tx(GetFeePeer, Wallet, Args2), {TX, Chunks} end. generate_random_split(ChunkCount) -> Chunks = lists:foldl( fun(_, Chunks) -> RandomSize = case rand:uniform(3) of 1 -> ?DATA_CHUNK_SIZE; _ -> OneThird = ?DATA_CHUNK_SIZE div 3, OneThird + rand:uniform(?DATA_CHUNK_SIZE - OneThird) - 1 end, Chunk = crypto:strong_rand_bytes(RandomSize), [Chunk | Chunks] end, [], lists:seq(1, case ChunkCount of random -> rand:uniform(5); _ -> ChunkCount end)), SizedChunkIDs = ar_tx:sized_chunks_to_sized_chunk_ids( ar_tx:chunks_to_size_tagged_chunks(Chunks)), {DataRoot, _} = ar_merkle:generate_tree(SizedChunkIDs), {DataRoot, Chunks}. generate_random_original_v1_split() -> %% Make sure v1 data does not end with a digit, otherwise it's malleable. Data = << (crypto:strong_rand_bytes(rand:uniform(?MiB)))/binary, <<"a">>/binary >>, original_split(Data). generate_random_original_split() -> Data = << (crypto:strong_rand_bytes(rand:uniform(?MiB)))/binary >>, original_split(Data). generate_random_standard_split() -> Data = crypto:strong_rand_bytes(rand:uniform(3 * ?DATA_CHUNK_SIZE)), v2_standard_split(Data). generate_random_original_split(ChunkCount) -> RandomSize = (ChunkCount - 1) * ?DATA_CHUNK_SIZE + rand:uniform(?DATA_CHUNK_SIZE), Data = crypto:strong_rand_bytes(RandomSize), original_split(Data). %% @doc Split the way v1 transactions are split. original_split(Data) -> Chunks = ar_tx:chunk_binary(?DATA_CHUNK_SIZE, Data), SizedChunkIDs = ar_tx:sized_chunks_to_sized_chunk_ids( ar_tx:chunks_to_size_tagged_chunks(Chunks) ), {DataRoot, _} = ar_merkle:generate_tree(SizedChunkIDs), {DataRoot, Chunks}. %% @doc Split the way v2 transactions are usually split (arweave-js does it %% this way as of the time this was written). v2_standard_split(Data) -> Chunks = v2_standard_split_get_chunks(Data), SizedChunkIDs = ar_tx:sized_chunks_to_sized_chunk_ids( ar_tx:chunks_to_size_tagged_chunks(Chunks) ), {DataRoot, _} = ar_merkle:generate_tree(SizedChunkIDs), {DataRoot, Chunks}. v2_standard_split_get_chunks(Data) -> v2_standard_split_get_chunks(Data, [], 32 * 1024). v2_standard_split_get_chunks(Chunk, Chunks, _MinSize) when byte_size(Chunk) =< 262144 -> lists:reverse([Chunk | Chunks]); v2_standard_split_get_chunks(<< _:262144/binary, LastChunk/binary >> = Rest, Chunks, MinSize) when byte_size(LastChunk) < MinSize -> FirstSize = round(math:ceil(byte_size(Rest) / 2)), << Chunk1:FirstSize/binary, Chunk2/binary >> = Rest, lists:reverse([Chunk2, Chunk1 | Chunks]); v2_standard_split_get_chunks(<< Chunk:262144/binary, Rest/binary >>, Chunks, MinSize) -> v2_standard_split_get_chunks(Rest, [Chunk | Chunks], MinSize). imperfect_split(Data) -> imperfect_split(?DATA_CHUNK_SIZE, Data). imperfect_split(_ChunkSize, Bin) when byte_size(Bin) == 0 -> []; imperfect_split(ChunkSize, Bin) when byte_size(Bin) < ChunkSize -> [Bin]; imperfect_split(ChunkSize, Bin) -> <> = Bin, HalfSize = ChunkSize div 2, case byte_size(Rest) < HalfSize of true -> <> = Bin, %% If Rest is <<>>, both chunks are HalfSize - the chunks are invalid %% after the strict data split threshold. [ChunkBin2, Rest2]; false -> [ChunkBin | imperfect_split(ChunkSize, Rest)] end. build_proofs(B, TX, Chunks) -> build_proofs(TX, Chunks, B#block.txs, B#block.weave_size - B#block.block_size, B#block.height). build_proofs(TX, Chunks, TXs, BlockStartOffset, Height) -> SizeTaggedTXs = ar_block:generate_size_tagged_list_from_txs(TXs, Height), SizeTaggedDataRoots = [{Root, Offset} || {{_, Root}, Offset} <- SizeTaggedTXs], {value, {_, TXOffset}} = lists:search(fun({{TXID, _}, _}) -> TXID == TX#tx.id end, SizeTaggedTXs), {TXRoot, TXTree} = ar_merkle:generate_tree(SizeTaggedDataRoots), TXPath = ar_merkle:generate_path(TXRoot, TXOffset - 1, TXTree), SizeTaggedChunks = ar_tx:chunks_to_size_tagged_chunks(Chunks), {DataRoot, DataTree} = ar_merkle:generate_tree( ar_tx:sized_chunks_to_sized_chunk_ids(SizeTaggedChunks) ), DataSize = byte_size(binary:list_to_bin(Chunks)), lists:foldl( fun ({<<>>, _}, Proofs) -> Proofs; ({Chunk, ChunkOffset}, Proofs) -> TXStartOffset = TXOffset - DataSize, AbsoluteChunkEndOffset = BlockStartOffset + TXStartOffset + ChunkOffset, Proof = #{ tx_path => ar_util:encode(TXPath), data_root => ar_util:encode(DataRoot), data_path => ar_util:encode( ar_merkle:generate_path(DataRoot, ChunkOffset - 1, DataTree) ), chunk => ar_util:encode(Chunk), offset => integer_to_binary(ChunkOffset - 1), data_size => integer_to_binary(DataSize) }, Proofs ++ [{AbsoluteChunkEndOffset, Proof}] end, [], SizeTaggedChunks ). get_tx_offset(Node, TXID) -> Peer = ar_test_node:peer_ip(Node), ar_http:req(#{ method => get, peer => Peer, path => "/tx/" ++ binary_to_list(ar_util:encode(TXID)) ++ "/offset" }). get_tx_data(TXID) -> {ok, Config} = arweave_config:get_env(), ar_http:req(#{ method => get, peer => {127, 0, 0, 1, Config#config.port}, path => "/tx/" ++ binary_to_list(ar_util:encode(TXID)) ++ "/data" }). post_random_blocks(Wallet) -> post_blocks(Wallet, [ [v1], empty, [v2, v1, fixed_data, v2_no_data], [v2, v2_standard_split, v1, v2], empty, [v1, v2, v2, empty_tx, v2_standard_split], [v2, v2_no_data, v2_no_data, v1, v2_no_data], [empty_tx], empty, [v2_standard_split, v2_no_data, v2, v1, v2], empty, [fixed_data, fixed_data], empty, [fixed_data, fixed_data] % same tx_root as in the block before the previous one ] ). post_blocks(Wallet, BlockMap) -> FixedChunks = [crypto:strong_rand_bytes(256 * 1024) || _ <- lists:seq(1, 4)], SizedChunkIDs = ar_tx:sized_chunks_to_sized_chunk_ids( ar_tx:chunks_to_size_tagged_chunks(FixedChunks)), {DataRoot, _} = ar_merkle:generate_tree(SizedChunkIDs), lists:foldl( fun ({empty, Height}, Acc) -> ar_test_node:mine(), ar_test_node:assert_wait_until_height(peer1, Height), Acc; ({TXMap, Height}, Acc) -> TXsWithChunks = lists:map( fun (v1) -> {v1_tx(Wallet), v1}; (v2) -> {tx(Wallet, original_split), v2}; (v2_no_data) -> % same as v2 but its data won't be submitted {tx(Wallet, {custom_split, random}), v2_no_data}; (v2_standard_split) -> {tx(Wallet, standard_split), v2_standard_split}; (empty_tx) -> {tx(Wallet, {custom_split, 0}), empty_tx}; (fixed_data) -> {tx(Wallet, {fixed_data, DataRoot, FixedChunks}), fixed_data} end, TXMap ), B = ar_test_node:post_and_mine( #{ miner => main, await_on => main }, [TX || {{TX, _}, _} <- TXsWithChunks] ), ar_test_node:assert_wait_until_height(peer1, Height), Acc ++ [{B, TX, C} || {{TX, C}, Type} <- lists:sort(TXsWithChunks), Type /= v2_no_data, Type /= empty_tx] end, [], lists:zip(BlockMap, lists:seq(1, length(BlockMap))) ). post_proofs(Peer, B, TX, Chunks) -> post_proofs(Peer, B, TX, Chunks, infinity). post_proofs(Peer, B, TX, Chunks, DiskPoolThreshold) -> Proofs = build_proofs(B, TX, Chunks), lists:foreach( fun({_, Proof}) -> Offset = binary_to_integer(maps:get(offset, Proof)), HttpStatus = case Offset > DiskPoolThreshold of true -> <<"303">>; false -> <<"200">> end, {ok, {{HttpStatus, _}, _, _, _, _}} = ar_test_node:post_chunk(Peer, ar_serialize:jsonify(Proof)) end, Proofs ), Proofs. wait_until_syncs_chunk(Offset, ExpectedProof) -> true = ar_util:do_until( fun() -> case ar_test_node:get_chunk(main, Offset) of {ok, {{<<"200">>, _}, _, ProofJSON, _, _}} -> Proof = jiffy:decode(ProofJSON, [return_maps]), {ok, {{<<"200">>, _}, _, NoChunkProofJSON, _, _}} = ar_test_node:get_chunk_proof(main, Offset), NoChunkProof = jiffy:decode(NoChunkProofJSON, [return_maps]), ?assertEqual(maps:get(<<"data_path">>, Proof), maps:get(<<"data_path">>, NoChunkProof)), ?assertEqual(maps:get(<<"tx_path">>, Proof), maps:get(<<"tx_path">>, NoChunkProof)), maps:fold( fun (_Key, _Value, false) -> false; (Key, Value, true) -> maps:get(atom_to_binary(Key), Proof, not_set) == Value end, true, ExpectedProof ); _ -> false end end, 1000, 20_000 ). wait_until_syncs_chunks(Proofs) -> wait_until_syncs_chunks(main, Proofs, infinity). wait_until_syncs_chunks(Proofs, UpperBound) -> wait_until_syncs_chunks(main, Proofs, UpperBound). wait_until_syncs_chunks(Node, Proofs, UpperBound) -> lists:foreach( fun({EndOffset, Proof}) -> true = ar_util:do_until( fun() -> case EndOffset > UpperBound of true -> true; false -> case ar_test_node:get_chunk(Node, EndOffset) of {ok, {{<<"200">>, _}, _, EncodedProof, _, _}} -> FetchedProof = ar_serialize:json_map_to_poa_map( jiffy:decode(EncodedProof, [return_maps]) ), ExpectedProof = #{ chunk => ar_util:decode(maps:get(chunk, Proof)), tx_path => ar_util:decode(maps:get(tx_path, Proof)), data_path => ar_util:decode(maps:get(data_path, Proof)) }, compare_proofs(FetchedProof, ExpectedProof, EndOffset); _ -> false end end end, ?SYNC_CHUNKS_CHECK, ?SYNC_CHUNKS_TIMEOUT ) end, Proofs ). compare_proofs(#{ chunk := C, data_path := D, tx_path := T }, #{ chunk := C, data_path := D, tx_path := T }, _EndOffset) -> true; compare_proofs(#{ chunk := C1, data_path := D1, tx_path := T1 } = FetchedProof, #{ chunk := C2, data_path := D2, tx_path := T2 }, EndOffset) -> ?debugFmt("Proof mismatch for ~B data_path: ~p tx_path: ~p chunk: ~p " "expected chunk size :~B chunk size: ~B fetched proof packing: ~p.~n", [EndOffset, D1 == D2, T1 == T2, C1 == C2, byte_size(C2), byte_size(C1), maps:get(packing, FetchedProof, not_set)]), false. ================================================ FILE: apps/arweave/test/ar_test_inet_mock.erl ================================================ %%%=================================================================== %%% @doc a module to mock `inet'. %%% @end %%%=================================================================== -module(ar_test_inet_mock). -export([getaddrs/2]). %%-------------------------------------------------------------------- %% @doc a function to mock `inet:getaddrs/2'. mostly used to test %% internal resolver feature in `ar_peers' and `ar_util'. %% @end %%-------------------------------------------------------------------- getaddrs("single.record.local", _) -> {ok, [{127, 0, 0, 1}]}; getaddrs("multi.record.local", _) -> {ok, [ {127,0,0,2}, {127,0,0,3}, {127,0,0,4}, {127,0,0,5} ]}; getaddrs("error.record.local", _) -> {error, not_found}; getaddrs(_, _) -> {error, invalid}. ================================================ FILE: apps/arweave/test/ar_test_node.erl ================================================ -module(ar_test_node). %% The new, more flexible, and more user-friendly interface. -export([boot_peers/1, wait_for_peers/1, get_config/1,set_config/2, wait_until_joined/0, wait_until_joined/1, restart/0, restart/1, restart_with_config/1, restart_with_config/2, start_other_node/4, start_node/2, start_node/3, start_coordinated/1, base_cm_config/1, mine/1, wait_until_height/1, wait_until_height/2, wait_until_height/3, wait_until_height/4, do_wait_until_height/2, assert_wait_until_height/2, wait_until_mining_paused/1, http_get_block/2, get_blocks/1, mock_to_force_invalid_h1/0, mainnet_packing_mocks/0, get_difficulty_for_invalid_hash/0, invalid_solution/0, valid_solution/0, new_mock/2, mock_function/3, unmock_module/1, remote_call/4, load_fixture/1, get_default_storage_module_packing/2, get_genesis_chunk/1, all_nodes/1, new_custom_size_rsa_wallet/1]). %% The "legacy" interface. -export([start/0, start/1, start/2, start/3, start/4, stop/0, stop/1, start_peer/2, start_peer/3, start_peer/4, peer_name/1, peer_port/1, stop_peers/1, stop_peer/1, connect_peers/2, connect_to_peer/1, disconnect_peers/2, disconnect_from/1, join/2, join/3, join_on/1, join_on/2, rejoin_on/1, generate_join_config/0, generate_join_config/1, peer_ip/1, get_node_namespace/0, get_unused_port/0, with_gossip_paused/2, mine/0, get_tx_anchor/1, get_tx_confirmations/2, get_tx_price/2, get_tx_price/3, get_optimistic_tx_price/2, get_optimistic_tx_price/3, sign_tx/1, sign_tx/2, sign_tx/3, sign_v1_tx/1, sign_v1_tx/2, sign_v1_tx/3, wait_until_block_index/1, wait_until_block_index/2, wait_until_receives_txs/1, assert_wait_until_receives_txs/1, assert_wait_until_receives_txs/2, post_tx_to_peer/2, post_tx_to_peer/3, assert_post_tx_to_peer/2, assert_post_tx_to_peer/3, post_and_mine/2, post_block/2, post_block/3, send_new_block/2, await_post_block/2, await_post_block/3, sign_block/3, read_block_when_stored/1, read_block_when_stored/2, get_chunk/2, get_chunk/3, get_chunk_proof/2, post_chunk/2, random_v1_data/1, assert_get_tx_data/3, assert_data_not_found/2, post_tx_json/2, wait_until_syncs_genesis_data/0, wait_until_syncs_genesis_data/1, mock_functions/1, test_with_mocked_functions/2, test_with_mocked_functions/3]). -include("ar.hrl"). -include("ar_consensus.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("eunit/include/eunit.hrl"). %% May occasionally take quite long on a slow CI server, expecially in tests %% with height >= 20 (2 difficulty retargets). -define(WAIT_UNTIL_BLOCK_HEIGHT_TIMEOUT, 500_000). -define(WAIT_UNTIL_RECEIVES_TXS_TIMEOUT, 500_000). %% Sometimes takes a while on a slow machine -define(PEER_START_TIMEOUT, 500_000). %% Set the maximum number of retry attempts -define(MAX_BOOT_RETRIES, 3). -define(MAX_MINERS, 3). % define check timeout and interval, used with ar_util:do_until/3. -define(NODE_READY_CHECK_INTERVAL, 200). -define(NODE_READY_CHECK_TIMEOUT, 500_000). -define(REMOTE_CALL_TIMEOUT, 500_000). -define(CONNECT_TO_PEER_TIMEOUT, 500_000). -define(BLOCK_INDEX_TIMEOUT, 500_000). -define(TEST_MOCKED_FUNCTIONS_TIMEOUT, 500). %% in seconds -define(POST_AND_MINE_TIMEOUT, 500_000). -define(READ_BLOCK_TIMEOUT, 500_000). -define(GET_TX_DATA_TIMEOUT, 200_000). -define(WAIT_UNTIL_JOINED_TIMEOUT, 200_000). -define(WAIT_SYNCS_DATA_TIMEOUT, 500_000). -define(WAIT_UNTIL_MINING_PAUSED_TIMEOUT, 60_000). -define(TEST_HTTP_CLIENT_KEEPALIVE, 4_000). %%%=================================================================== %%% Public interface. %%%=================================================================== all_peers(test) -> [{test, peer1}, {test, peer2}, {test, peer3}, {test, peer4}]; all_peers(e2e) -> [{e2e, peer1}, {e2e, peer2}]. all_nodes(TestType) -> [{TestType, main} | all_peers(TestType)]. new_custom_size_rsa_wallet(Size) -> KeyType = ?RSA_KEY_TYPE, PublicExpnt = 65537, {[Expnt, Pub], [Expnt, Pub, Priv, P1, P2, E1, E2, C]} = crypto:generate_key(rsa, {Size * 8, PublicExpnt}), Key = ar_serialize:jsonify( { [ {kty, <<"RSA">>}, {ext, true}, {e, ar_util:encode(Expnt)}, {n, ar_util:encode(Pub)}, {d, ar_util:encode(Priv)}, {p, ar_util:encode(P1)}, {q, ar_util:encode(P2)}, {dp, ar_util:encode(E1)}, {dq, ar_util:encode(E2)}, {qi, ar_util:encode(C)} ] } ), Filename = ar_wallet:wallet_filepath(wallet_address, Pub, KeyType), case filelib:ensure_dir(Filename) of ok -> case ar_storage:write_file_atomic(Filename, Key) of ok -> {{KeyType, Priv, Pub}, {KeyType, Pub}}; Error2 -> Error2 end; Error -> Error end. boot_peers([]) -> ok; boot_peers([{TestType, Node} | Peers]) -> boot_peer(TestType, Node), boot_peers(Peers); boot_peers(TestType) -> boot_peers(all_peers(TestType)). boot_peer(TestType, Node) -> try_boot_peer(TestType, Node, ?MAX_BOOT_RETRIES). try_boot_peer(_TestType, _Node, 0) -> %% You might log an error or handle this case specifically %% as per your application logic. {error, max_retries_exceeded}; try_boot_peer(TestType, Node, Retries) -> NodeName = peer_name(Node), Port = get_unused_port(), Cookie = erlang:get_cookie(), Paths = code:get_path(), filelib:ensure_dir("./.tmp"), Schedulers = erlang:system_info(schedulers_online), RawCommand = string:join([ "erl +S ~B:~B", "-pa", "~s", "-config", "config/sys.config", "-noshell", "-name", "~s", "-setcookie", "~s", "-run ar main", "debug", "port", "~p", "data_dir", ".tmp/data_~s_~s", "no_auto_join", "disable_replica_2_9_device_limit", "> ~s-~s.out 2>&1" ], " "), CommandParams = [ Schedulers, Schedulers, string:join(Paths, " "), NodeName, Cookie, Port, atom_to_list(TestType), NodeName, Node, get_node_namespace() ], Cmd = io_lib:format(RawCommand, CommandParams), run_command(Node, Cmd), case wait_until_node_is_ready(NodeName) of {ok, _Node} -> io:format("~s started at port ~p.~n", [NodeName, Port]), {node(), NodeName}; {error, Reason} -> io:format("Error starting ~s: ~p. Retries left: ~p~n", [NodeName, Reason, Retries]), try_boot_peer(TestType, Node, Retries - 1) end. %%-------------------------------------------------------------------- %% @doc run a command in asynchronous way using `spawn/1' instead of %% using `&' from shell feature. %% @end %%-------------------------------------------------------------------- run_command(Node, Command) -> spawn(fun() -> run_command_init(Node, Command) end). %% @hidden run_command_init(Node, Command) -> io:format("Launching peer (~p) ~p: ~s~n", [self(), Node, Command]), try Result = os:cmd(Command), io:format("command result: ~p~n", [Result]) catch E:R:S -> io:format("failed command: ~p:~p:~p~n", [E,R,S]) end. wait_for_peers([]) -> ok; wait_for_peers([{_TestType, Node} | Peers]) -> wait_for_peer(Node), wait_for_peers(Peers); wait_for_peers(TestType) -> wait_for_peers(all_peers(TestType)). wait_for_peer(Node) -> remote_call(Node, application, ensure_all_started, [arweave, permanent], 60000). self_node() -> list_to_atom(get_node()). peer_name(Node) -> list_to_atom( atom_to_list(Node) ++ "-" ++ get_node_namespace() ++ "@127.0.0.1" ). peer_port(Node) -> case get({peer_port, Node}) of undefined -> {ok, Config} = ar_test_node:remote_call(Node, arweave_config, get_env, []), Port = Config#config.port, put({peer_port, Node}, Port), Port; Port -> Port end. stop_peers([]) -> ok; stop_peers([{_TestType, Node} | Peers]) -> stop_peer(Node), stop_peers(Peers); stop_peers(TestType) -> stop_peers(all_peers(TestType)). stop_peer(Node) -> try rpc:call(peer_name(Node), init, stop, [], 30000) catch E:R:S -> io:format("stop_peer error: ~p:~p:~p~n", [E,R,S]), %% we don't care if the node is already stopped ok end. peer_ip({external, Peer}) -> Peer; peer_ip(Node) -> {127, 0, 0, 1, peer_port(Node)}. wait_until_joined(Node) -> remote_call(Node, ar_test_node, wait_until_joined, []). %% @doc Wait until the node joins the network (initializes the state). wait_until_joined() -> ar_util:do_until( fun() -> ar_node:is_joined() end, 100, ?WAIT_UNTIL_JOINED_TIMEOUT ). get_config(Node) -> remote_call(Node, arweave_config, get_env, []). set_config(Node, Config) -> remote_call(Node, arweave_config, set_env, [Config]). update_config(Config) -> {ok, BaseConfig} = arweave_config:get_env(), Config2 = BaseConfig#config{ start_from_latest_state = Config#config.start_from_latest_state, auto_join = Config#config.auto_join, mining_addr = Config#config.mining_addr, sync_jobs = Config#config.sync_jobs, replica_2_9_workers = Config#config.replica_2_9_workers, disk_pool_jobs = Config#config.disk_pool_jobs, header_sync_jobs = Config#config.header_sync_jobs, enable = Config#config.enable ++ BaseConfig#config.enable, mining_cache_size_mb = Config#config.mining_cache_size_mb, debug = Config#config.debug, coordinated_mining = Config#config.coordinated_mining, cm_api_secret = Config#config.cm_api_secret, cm_poll_interval = Config#config.cm_poll_interval, peers = Config#config.peers, cm_exit_peer = Config#config.cm_exit_peer, cm_peers = Config#config.cm_peers, local_peers = Config#config.local_peers, mine = Config#config.mine, storage_modules = Config#config.storage_modules, repack_in_place_storage_modules = Config#config.repack_in_place_storage_modules, allow_rebase = Config#config.allow_rebase, 'http_client.http.keepalive' = ?TEST_HTTP_CLIENT_KEEPALIVE }, ok = arweave_config:set_env(Config2), ?LOG_INFO("Updated Config:"), ar_config:log_config(Config2), Config2. start_other_node(Node, B0, Config, WaitUntilSync) -> remote_call(Node, ar_test_node, start_node, [B0, Config, WaitUntilSync], 90000). %% @doc Start a node with the given genesis block and configuration. start_node(B0, Config) -> start_node(B0, Config, true). start_node(B0, Config, WaitUntilSync) -> ?LOG_INFO("Starting node"), clean_up_and_stop(), prometheus:start(), arweave_config:start(), ok = arweave_limiter:start(), {ok, BaseConfig} = arweave_config:get_env(), write_genesis_files(BaseConfig#config.data_dir, B0), update_config(Config), start_dependencies(), wait_until_joined(), case WaitUntilSync of true -> wait_until_syncs_genesis_data(); false -> ok end, ?LOG_INFO("Node started"), erlang:node(). %% @doc Launch the given number (>= 1, =< ?MAX_MINERS) of the mining nodes in the coordinated %% mode plus an exit node and a validator node. %% Return [Node1, ..., NodeN, ExitNode, ValidatorNode]. start_coordinated(MiningNodeCount) when MiningNodeCount >= 1, MiningNodeCount =< ?MAX_MINERS -> %% Set weave larger than what we'll cover with the 3 nodes so that every node can find %% a solution. [B0] = ar_weave:init([], get_difficulty_for_invalid_hash(), ar_block:partition_size() * 5), ExitPeer = peer_ip(peer1), ValidatorPeer = peer_ip(main), MinerNodes = lists:sublist([peer2, peer3, peer4], MiningNodeCount), BaseCMConfig = base_cm_config([ValidatorPeer]), RewardAddr = BaseCMConfig#config.mining_addr, ExitNodeConfig = BaseCMConfig#config{ mine = true, local_peers = [peer_ip(Peer) || Peer <- MinerNodes] }, ValidatorNodeConfig = BaseCMConfig#config{ mine = false, peers = [ExitPeer], coordinated_mining = false, cm_api_secret = not_set }, %% Start the validator first so that its HTTP server is available when %% other nodes validate it as a trusted peer during startup. %% Use peers=[] here because the exit node isn't configured yet. remote_call(main, ar_test_node, start_node, [B0, ValidatorNodeConfig#config{ peers = [] }]), remote_call(peer1, ar_test_node, start_node, [B0, ExitNodeConfig]), %% exit node lists:foreach( fun(I) -> MinerNode = lists:nth(I, MinerNodes), MinerPeers = lists:filter(fun(Peer) -> Peer /= MinerNode end, MinerNodes), MinerPeerIPs = [peer_ip(Peer) || Peer <- MinerPeers], MinerConfig = BaseCMConfig#config{ cm_exit_peer = ExitPeer, cm_peers = MinerPeerIPs, local_peers = MinerPeerIPs ++ [ExitPeer], storage_modules = get_cm_storage_modules(RewardAddr, I, MiningNodeCount) }, remote_call(MinerNode, ar_test_node, start_node, [B0, MinerConfig]) end, lists:seq(1, MiningNodeCount) ), MinerNodes ++ [peer1, main]. base_cm_config(Peers) -> RewardAddr = ar_wallet:to_address(remote_call(peer1, ar_wallet, new_keyfile, [])), #config{ mining_cache_size_mb = 128, start_from_latest_state = true, auto_join = true, mining_addr = RewardAddr, hashing_threads = 1, sync_jobs = 2, disk_pool_jobs = 2, header_sync_jobs = 2, enable = [search_in_rocksdb_when_mining, serve_tx_data_without_limits, serve_wallet_lists, pack_served_chunks, public_vdf_server], debug = true, peers = Peers, coordinated_mining = true, cm_api_secret = <<"test_coordinated_mining_secret">>, cm_poll_interval = 2000, disable_replica_2_9_device_limit = true, %% Disable rebasing by default to make the tests more reliable. allow_rebase = false }. mine() -> ar_node_worker:mine_one_block(). %% @doc Start mining on the given node. The node will be mining until it finds a block. mine(Node) -> remote_call(Node, ar_test_node, mine, []). %% @doc Fetch and decode a binary-encoded block by hash H from the HTTP API of the %% given node. Return {ok, B} | {error, Reason}. http_get_block(H, Node) -> {ok, Config} = remote_call(Node, arweave_config, get_env, []), Port = Config#config.port, Peer = {127, 0, 0, 1, Port}, case ar_http:req(#{ peer => Peer, method => get, path => "/block2/hash/" ++ binary_to_list(ar_util:encode(H)) }) of {ok, {{<<"200">>, _}, _, BlockBin, _, _}} -> ar_serialize:binary_to_block(BlockBin); {error, Reason} -> {error, Reason}; {ok, {{StatusCode, _}, _, Body, _, _}} -> {error, {StatusCode, Body}} end. get_blocks(Node) -> remote_call(Node, ar_node, get_blocks, []). invalid_solution() -> <<0:256>>. valid_solution() -> <<255:256>>. mock_to_force_invalid_h1() -> { ar_block, compute_h1, fun(H0, Nonce, Chunk1) -> %% First call the original compute_h1 function meck:passthrough([H0, Nonce, Chunk1]), %% Then return invalid solutions {invalid_solution(), invalid_solution()} end }. %% @doc Mock out packing-related constants to replicate mainnet behavior. mainnet_packing_mocks() -> [ {ar_block, partition_size, fun() -> 3_600_000_000_000 end}, {ar_block, strict_data_split_threshold, fun() -> 30_607_159_107_830 end}, {ar_storage_module, get_overlap, fun(_) -> 104_857_600 end}, {ar_block, get_sub_chunks_per_replica_2_9_entropy, fun() -> 1024 end}, {ar_block, get_replica_2_9_entropy_sector_size, fun() -> 3_515_875_328 end} ]. get_difficulty_for_invalid_hash() -> %% Set the difficulty just high enough to exclude the invalid_solution(), this lets %% us selectively disable one- or two-chunk mining in tests. binary:decode_unsigned(invalid_solution(), big) + 1. load_fixture(Fixture) -> Dir = filename:dirname(?FILE), FixtureFilename = filename:join([Dir, "fixtures", Fixture]), {ok, Data} = file:read_file(FixtureFilename), Data. %%%=================================================================== %%% Private functions. %%%=================================================================== start_dependencies() -> ok = arweave_limiter:start(), {ok, _} = application:ensure_all_started(arweave, temporary), ok. clean_up_and_stop() -> Config = stop(), ?LOG_DEBUG([{event, clean_up_and_stop}, {data_dir, Config#config.data_dir}]), ok = filelib:ensure_dir(Config#config.data_dir), {ok, Entries} = file:list_dir_all(Config#config.data_dir), lists:foreach( fun ("wallets") -> ok; (Entry) -> ?LOG_DEBUG([{event, clean_up_and_stop}, {delete, filename:join(Config#config.data_dir, Entry)}]), ok = file:del_dir_r(filename:join(Config#config.data_dir, Entry)) end, Entries ). write_genesis_files(DataDir, B0) -> BH = B0#block.indep_hash, BlockDir = filename:join(DataDir, ?BLOCK_DIR), ok = filelib:ensure_dir(BlockDir ++ "/"), BlockFilepath = filename:join(BlockDir, binary_to_list(ar_util:encode(BH)) ++ ".bin"), ok = file:write_file(BlockFilepath, ar_serialize:block_to_binary(B0)), TXDir = filename:join(DataDir, ?TX_DIR), ok = filelib:ensure_dir(TXDir ++ "/"), lists:foreach( fun(TX) -> TXID = TX#tx.id, TXFilepath = filename:join(TXDir, binary_to_list(ar_util:encode(TXID)) ++ ".json"), TXJSON = ar_serialize:jsonify(ar_serialize:tx_to_json_struct(TX)), ok = file:write_file(TXFilepath, TXJSON) end, B0#block.txs ), _ = ar_kv:create_ets(), {ok, _} = ar_kv:start_link(), ok = ar_kv:open(#{ path => filename:join([DataDir, ?ROCKS_DB_DIR, "reward_history_db"]), name => reward_history_db}), ok = ar_kv:open(#{ path => filename:join([DataDir, ?ROCKS_DB_DIR, "block_time_history_db"]), name => block_time_history_db}), ok = ar_kv:open(#{ path => filename:join([DataDir, ?ROCKS_DB_DIR, "block_index_db"]), name => block_index_db}), H = B0#block.indep_hash, WeaveSize = B0#block.weave_size, TXRoot = B0#block.tx_root, ok = ar_kv:put(block_index_db, << 0:256 >>, term_to_binary({H, WeaveSize, TXRoot, <<>>})), ok = ar_kv:put(reward_history_db, H, term_to_binary(hd(B0#block.reward_history))), case ar_fork:height_2_7() of 0 -> ok = ar_kv:put(block_time_history_db, H, term_to_binary(hd(B0#block.block_time_history))); _ -> ok end, ok = gen_server:stop(ar_kv), _ = ets:delete(ar_kv), WalletListDir = filename:join(DataDir, ?WALLET_LIST_DIR), ok = filelib:ensure_dir(WalletListDir ++ "/"), RootHash = B0#block.wallet_list, WalletListFilepath = filename:join(WalletListDir, binary_to_list(ar_util:encode(RootHash)) ++ ".json"), WalletListJSON = ar_serialize:jsonify( ar_serialize:wallet_list_to_json_struct(B0#block.reward_addr, false, B0#block.account_tree) ), ok = file:write_file(WalletListFilepath, WalletListJSON). wait_until_syncs_data(Left, Right, WeaveSize, _Packing) when Left >= Right orelse Left >= WeaveSize orelse (Right - Left < ?DATA_CHUNK_SIZE) orelse (WeaveSize - Left < ?DATA_CHUNK_SIZE) -> ok; wait_until_syncs_data(Left, Right, WeaveSize, Packing) -> true = ar_util:do_until( fun() -> case Packing of any -> case ar_sync_record:is_recorded(Left + 1, ar_data_sync) of false -> false; _ -> true end; _ -> case ar_sync_record:is_recorded(Left + 1, {ar_data_sync, Packing}) of {{true, _}, _} -> true; _ -> false end end end, 1000, ?WAIT_SYNCS_DATA_TIMEOUT ), wait_until_syncs_data(Left + ?DATA_CHUNK_SIZE, Right, WeaveSize, Packing). get_cm_storage_modules(RewardAddr, 1, 1) -> %% When there's only 1 node it covers all 3 storage modules. get_cm_storage_modules(RewardAddr, 1, 3) ++ get_cm_storage_modules(RewardAddr, 2, 3) ++ get_cm_storage_modules(RewardAddr, 3, 3); get_cm_storage_modules(RewardAddr, N, MiningNodeCount) when MiningNodeCount == 2 orelse MiningNodeCount == 3 -> %% skip partitions so that no two nodes can mine the same range even accounting for ?OVERLAP %% Note that replica_2_9 modules do not have ?OVERLAP. RangeNumber = lists:nth(N, [0, 2, 4]), [{ar_block:partition_size(), RangeNumber, get_default_storage_module_packing(RewardAddr, 0)}]. remote_call(Node, Module, Function, Args) -> remote_call(Node, Module, Function, Args, ?REMOTE_CALL_TIMEOUT). remote_call(Node, Module, Function, Args, Timeout) -> NodeName = peer_name(Node), case node() == NodeName of true -> apply(Module, Function, Args); false -> Key = rpc:async_call(NodeName, Module, Function, Args), Result = ar_util:do_until( fun() -> case rpc:nb_yield(Key) of timeout -> false; {value, Reply} -> {ok, Reply} end end, 200, Timeout ), case Result of {error, timeout} -> ?LOG_ERROR("Timed out (~pms) waiting for the rpc reply; module: ~p, function: ~p, " "args: ~p, node: ~p.~n", [Timeout, Module, Function, Args, Node]); _ -> ok end, ?assertMatch({ok, _}, Result), element(2, Result) end. %%%=================================================================== %%% Legacy public interface. %%%=================================================================== %% @doc Start a fresh node. start() -> start(#{}). start(Options) when is_map(Options) -> prometheus:start(), arweave_config:start(), ok = arweave_limiter:start(), B0 = case maps:get(b0, Options, not_set) of not_set -> hd(ar_weave:init()); Value -> Value end, RewardAddr = case maps:get(addr, Options, not_set) of not_set -> ar_wallet:to_address(ar_wallet:new_keyfile()); Addr -> Addr end, Config = case maps:get(config, Options, not_set) of not_set -> element(2, arweave_config:get_env()); Value2 -> Value2 end, StorageModules = case maps:get(storage_modules, Options, not_set) of not_set -> [{10 * ar_block:partition_size(), N, get_default_storage_module_packing(RewardAddr, N, Options)} || N <- lists:seq(0, 8)]; Value3 -> Value3 end, start(B0, RewardAddr, Config, StorageModules); start(B0) -> start(#{ b0 => B0 }). start(B0, RewardAddr) -> start(#{ b0 => B0, addr => RewardAddr }). %% @doc Start a fresh node with the given genesis block, mining address, and config. start(B0, RewardAddr, Config) -> StorageModules = [{10 * ar_block:partition_size(), N, get_default_storage_module_packing(RewardAddr, N)} || N <- lists:seq(0, 8)], start(B0, RewardAddr, Config, StorageModules). %% @doc Start a fresh node with the given genesis block, mining address, config, %% and storage modules. %% %% Note: the Config provided here is written to disk. This is fine if it's the default Config, %% but if you've modified any of the Config fields for your test, please restore the default %% Config after the test is done. Otherwise the tests that run after yours may fail. start(B0, RewardAddr, Config, StorageModules) -> clean_up_and_stop(), prometheus:start(), arweave_config:start(), write_genesis_files(Config#config.data_dir, B0), ok = arweave_config:set_env(Config#config{ start_from_latest_state = true, auto_join = true, peers = [], cm_exit_peer = not_set, cm_peers = [], mining_addr = RewardAddr, storage_modules = StorageModules, disk_space_check_frequency = 1000, disable_replica_2_9_device_limit = true, sync_jobs = 2, disk_pool_jobs = 2, header_sync_jobs = 2, enable = [search_in_rocksdb_when_mining, serve_tx_data_without_limits, double_check_nonce_limiter, serve_wallet_lists | Config#config.enable], %% Disable rebasing by default to make the tests more reliable. allow_rebase = false, 'http_client.http.keepalive' = ?TEST_HTTP_CLIENT_KEEPALIVE, debug = true }), ok = arweave_limiter:start(), start_dependencies(), wait_until_joined(), wait_until_syncs_genesis_data(). restart() -> ?LOG_INFO("Restarting node"), stop(), start_dependencies(), wait_until_joined(). restart_with_config(Config) -> ?LOG_INFO("Restarting node with new config"), stop(), update_config(Config), start_dependencies(), wait_until_joined(). restart(Node) -> remote_call(Node, ?MODULE, restart, [], 90000). restart_with_config(Node, Config) -> remote_call(Node, ?MODULE, restart_with_config, [Config], 90000). start_peer(Node, Args) when is_map(Args) -> ?LOG_DEBUG([{event, start_peer}, {peer, Node}]), remote_call(Node, ?MODULE, start, [Args], ?PEER_START_TIMEOUT), wait_until_joined(Node), wait_until_syncs_genesis_data(Node); %% @doc Start a fresh peer node with the given genesis block. start_peer(Node, B0) -> start_peer(Node, #{ b0 => B0 }). %% @doc Start a fresh peer node with the given genesis block and mining address. start_peer(Node, B0, RewardAddr) -> start_peer(Node, #{ b0 => B0, addr => RewardAddr }). %% @doc Start a fresh peer node with the given genesis block, mining address, and config. start_peer(Node, B0, RewardAddr, Config) -> start_peer(Node, #{ b0 => B0, addr => RewardAddr, config => Config }). %% @doc Fetch the fee estimation and the denomination (call GET /price2/[size]) %% from the given node. get_tx_price(Node, DataSize) -> get_tx_price(Node, DataSize, <<>>). %% @doc Fetch the fee estimation and the denomination (call GET /price2/[size]/[addr]) %% from the given node. get_tx_price(Node, DataSize, Target) -> Peer = peer_ip(Node), Path = "/price/" ++ integer_to_list(DataSize) ++ "/" ++ binary_to_list(ar_util:encode(Target)), {ok, {{<<"200">>, _}, _, Reply, _, _}} = ar_http:req(#{ method => get, peer => Peer, path => Path }), Fee = binary_to_integer(Reply), Path2 = "/price2/" ++ integer_to_list(DataSize) ++ "/" ++ binary_to_list(ar_util:encode(Target)), {ok, {{<<"200">>, _}, _, Reply2, _, _}} = ar_http:req(#{ method => get, peer => Peer, path => Path2 }), Map = jiffy:decode(Reply2, [return_maps]), case binary_to_integer(maps:get(<<"fee">>, Map)) of Fee -> {Fee, maps:get(<<"denomination">>, Map)}; Fee2 -> ?assert(false, io_lib:format("Fee mismatch, expected: ~B, got: ~B.", [Fee, Fee2])) end. %% @doc Fetch the optimistic fee estimation (call GET /price/[size]) from the given node. get_optimistic_tx_price(Node, DataSize) -> get_optimistic_tx_price(Node, DataSize, <<>>). %% @doc Fetch the optimistic fee estimation (call GET /price/[size]/[addr]) from the given %% node. get_optimistic_tx_price(Node, DataSize, Target) -> Path = "/optimistic_price/" ++ integer_to_list(DataSize) ++ "/" ++ binary_to_list(ar_util:encode(Target)), {ok, {{<<"200">>, _}, _, Reply, _, _}} = ar_http:req(#{ method => get, peer => peer_ip(Node), path => Path }), binary_to_integer(maps:get(<<"fee">>, jiffy:decode(Reply, [return_maps]))). %% @doc Return a signed format=2 transaction with the minimum required fee fetched from %% GET /price/0 on the peer1 node. sign_tx(Wallet) -> sign_tx(peer1, Wallet, #{ format => 2 }, fun ar_tx:sign/2). %% @doc Return a signed format=2 transaction with properties from the given Args map. %% If the fee is not in Args, fetch it from GET /price/{data_size} %% or GET /price/{data_size}/{target} (if the target is specified) on the peer1 node. sign_tx(Wallet, Args) -> sign_tx(peer1, Wallet, insert_root(Args#{ format => 2 }), fun ar_tx:sign/2). %% @doc Like sign_tx/2, but use the given Node to fetch the fee estimation and %% block anchor from. sign_tx(Node, Wallet, Args) -> sign_tx(Node, Wallet, insert_root(Args#{ format => 2 }), fun ar_tx:sign/2). %% @doc Like sign_tx/1 but return a format=1 transaction. sign_v1_tx(Wallet) -> sign_tx(peer1, Wallet, #{}, fun ar_tx:sign_v1/2). %% @doc Like sign_tx/2 but return a format=1 transaction. sign_v1_tx(Wallet, TXParams) -> sign_tx(peer1, Wallet, TXParams, fun ar_tx:sign_v1/2). %% @doc Like sign_tx/3 but return a format=1 transaction. sign_v1_tx(Node, Wallet, Args) -> sign_tx(Node, Wallet, Args, fun ar_tx:sign_v1/2). %%%=================================================================== %%% Legacy private functions. %%%=================================================================== insert_root(Params) -> case {maps:get(data, Params, <<>>), maps:get(data_root, Params, <<>>)} of {<<>>, _} -> Params; {Data, <<>>} -> TX = ar_tx:generate_chunk_tree(#tx{ data = Data }), Params#{ data_root => TX#tx.data_root }; _ -> Params end. sign_tx(Node, Wallet, Args, SignFun) -> {_, {_, Pub}} = Wallet, Data = maps:get(data, Args, <<>>), DataSize = maps:get(data_size, Args, byte_size(Data)), Format = maps:get(format, Args, 1), {Fee, Denomination} = get_tx_price(Node, DataSize, maps:get(target, Args, <<>>)), Fee2 = case {Format, maps:get(reward, Args, none)} of {1, none} -> %% Make sure the v1 tx is not malleable by assigning a fee with only %% the first digit being non-zero. FirstDigit = binary_to_integer(binary:part(integer_to_binary(Fee), {0, 1})), Len = length(integer_to_list(Fee)), Fee3 = trunc((FirstDigit + 1) * math:pow(10, Len - 1)), Fee3; {_, none} -> Fee; {_, AssignedFee} -> AssignedFee end, SignFun( (ar_tx:new())#tx{ owner = Pub, reward = Fee2, data = Data, target = maps:get(target, Args, <<>>), quantity = maps:get(quantity, Args, 0), tags = maps:get(tags, Args, []), last_tx = maps:get(last_tx, Args, get_tx_anchor(Node)), data_size = DataSize, data_root = maps:get(data_root, Args, <<>>), format = Format, denomination = maps:get(denomination, Args, Denomination) }, Wallet ). stop() -> {ok, Config} = arweave_config:get_env(), case stop_application(arweave, 60000) of ok -> ok; {error, {not_started, arweave}} -> ok; {error, timeout} -> ?LOG_WARNING([{event, application_stop_timeout}, {app, arweave}]), force_stop_application(arweave) end, ar:stop_dependencies(), Config. stop_application(App, Timeout) -> Parent = self(), Ref = make_ref(), Pid = spawn(fun() -> Parent ! {Ref, application:stop(App)} end), receive {Ref, Result} -> Result after Timeout -> exit(Pid, kill), {error, timeout} end. force_stop_application(App) -> case application_controller:get_master(App) of Master when is_pid(Master) -> exit(Master, kill), timer:sleep(1000); _ -> ok end. stop(Node) -> remote_call(Node, ar_test_node, stop, []). rejoin_on(#{ node := Node, join_on := JoinOnNode } = Options) -> Config = maps:get(config, Options, generate_join_config(Node)), join_on(#{ node => Node, join_on => JoinOnNode, config => Config }, true). generate_join_config(Node) -> remote_call(Node, ar_test_node, generate_join_config, []). generate_join_config() -> {ok, Config} = arweave_config:get_env(), RewardAddr = ar_wallet:to_address(ar_wallet:new_keyfile()), StorageModules = [{ar_block:partition_size(), N, get_default_storage_module_packing(RewardAddr, N)} || N <- lists:seq(0, 4)], Config#config{ mining_addr = RewardAddr, storage_modules = StorageModules }. join_on(Params) -> join_on(Params, false). join_on(#{ node := Node, join_on := JoinOnNode } = Params, Rejoin) -> Config = maps:get(config, Params, generate_join_config(Node)), remote_call(Node, ar_test_node, join, [JoinOnNode, Rejoin, Config], ?REMOTE_CALL_TIMEOUT). join(JoinOnNode, Rejoin) -> join(JoinOnNode, Rejoin, generate_join_config()). join(JoinOnNode, Rejoin, Config) -> Peer = peer_ip(JoinOnNode), case Rejoin of true -> stop(); false -> clean_up_and_stop() end, prometheus:start(), arweave_config:start(), ok = arweave_config:set_env(Config#config{ start_from_latest_state = false, auto_join = true, peers = [Peer], 'http_client.http.keepalive' = ?TEST_HTTP_CLIENT_KEEPALIVE }), start_dependencies(), wait_until_joined(), whereis(ar_node_worker). get_default_storage_module_packing(RewardAddr, Index) -> get_default_storage_module_packing(RewardAddr, Index, #{}). get_default_storage_module_packing(RewardAddr, Index, Options) -> case {ar_fork:height_2_9(), ar_fork:height_2_8()} of {infinity, infinity} -> {spora_2_6, RewardAddr}; {infinity, 0} -> {composite, RewardAddr, 1}; {0, 0} -> case maps:get(packing, Options, not_set) of spora_2_6 -> {spora_2_6, RewardAddr}; {composite, PackingDiff} -> {composite, RewardAddr, PackingDiff}; replica_2_9 -> {replica_2_9, RewardAddr}; not_set -> {replica_2_9, RewardAddr} end; _ -> case maps:get(packing, Options, not_set) of spora_2_6 -> {spora_2_6, RewardAddr}; {composite, PackingDiff} -> {composite, RewardAddr, PackingDiff}; replica_2_9 -> {replica_2_9, RewardAddr}; not_set -> case Index rem 3 of 0 -> {spora_2_6, RewardAddr}; 1 -> {composite, RewardAddr, 1}; _ -> {replica_2_9, RewardAddr} end end end. connect_peers(Node, Peer) -> remote_call(Node, ar_test_node, connect_to_peer, [Peer]). connect_to_peer(Node) -> %% Unblock connections possibly blocked in the prior test code. ar_http:unblock_peer_connections(), remote_call(Node, ar_http, unblock_peer_connections, []), Peer = peer_ip(Node), Self = self_node(), %% Make requests to the nodes to make them discover each other. {ok, {{<<"200">>, <<"OK">>}, _, _, _, _}} = ar_http:req(#{ method => get, peer => Peer, path => "/info", headers => p2p_headers(Self) }), true = ar_util:do_until( fun() -> Peers = remote_call(Node, ar_peers, get_peers, [lifetime]), lists:member(peer_ip(Self), Peers) end, 100, ?CONNECT_TO_PEER_TIMEOUT ), {ok, {{<<"200">>, <<"OK">>}, _, _, _, _}} = ar_http:req(#{ method => get, peer => peer_ip(Self), path => "/info", headers => p2p_headers(Node) }), ar_util:do_until( fun() -> lists:member(Peer, ar_peers:get_peers(lifetime)) end, 100, ?CONNECT_TO_PEER_TIMEOUT ). disconnect_peers(Node, Peer) -> remote_call(Node, ar_test_node, disconnect_from, [Peer]). disconnect_from(Node) -> ar_http:block_peer_connections(), remote_call(Node, ar_http, block_peer_connections, []). with_gossip_paused(Node, Fun) when is_function(Fun, 0) -> ok = remote_call(Node, ar_bridge, stop_gossip, []), try Fun() after ok = remote_call(Node, ar_bridge, start_gossip, []) end. wait_until_syncs_genesis_data(Node) -> ok = remote_call(Node, ar_test_node, wait_until_syncs_genesis_data, [], 100_000). wait_until_syncs_genesis_data() -> {ok, Config} = arweave_config:get_env(), ar_util:do_until( fun() -> case ar_node:get_current_block() of not_joined -> false; _ -> true end end, 1000, 10_000 ), B = ar_node:get_current_block(), WeaveSize = B#block.weave_size, ?LOG_INFO([{event, wait_until_syncs_genesis_data}, {status, initial_sync_started}, {weave_size, WeaveSize}]), [wait_until_syncs_data(N * Size, (N + 1) * Size, WeaveSize, any) || {Size, N, _Packing} <- Config#config.storage_modules], ?LOG_INFO([{event, wait_until_syncs_genesis_data}, {status, initial_sync_complete}]), %% Once the data is stored in the disk pool, make the storage modules %% copy the missing data over from each other. This procedure is executed on startup %% but the disk pool did not have any data at the time. [ gen_server:cast( list_to_atom("ar_data_sync_" ++ ar_storage_module:label(ar_storage_module:id(M))), sync_data ) || M <- Config#config.storage_modules ], [wait_until_syncs_data(N * Size, (N + 1) * Size, WeaveSize, Packing) || {Size, N, Packing} <- Config#config.storage_modules], ?LOG_INFO([{event, wait_until_syncs_genesis_data}, {status, cross_module_sync_complete}]), ok. wait_until_height(Node, TargetHeight) -> wait_until_height(Node, TargetHeight, true, ?WAIT_UNTIL_BLOCK_HEIGHT_TIMEOUT). wait_until_height(Node, TargetHeight, Strict) -> wait_until_height(Node, TargetHeight, Strict, ?WAIT_UNTIL_BLOCK_HEIGHT_TIMEOUT). wait_until_height(Node, TargetHeight, Strict, Timeout) -> BI = case Node of main -> do_wait_until_height(TargetHeight, Timeout); _ -> remote_call(Node, ?MODULE, do_wait_until_height, [TargetHeight, Timeout], Timeout + 500) end, case Strict of true -> Height = length(BI) - 1, ?assert(Height >= TargetHeight, iolist_to_binary(io_lib:format( "Node ~p not at the expected height. Expected: ~B, got: ~B", [Node, TargetHeight, Height]))); false -> ok end, BI. wait_until_height(TargetHeight) -> do_wait_until_height(TargetHeight, ?WAIT_UNTIL_BLOCK_HEIGHT_TIMEOUT). do_wait_until_height(TargetHeight, Timeout) -> {ok, BI} = ar_util:do_until( fun() -> case ar_node:get_blocks() of BI when length(BI) - 1 >= TargetHeight -> {ok, BI}; _ -> false end end, 100, Timeout ), BI. assert_wait_until_height(Node, TargetHeight) -> BI = wait_until_height(Node, TargetHeight), ?assert(is_list(BI), iolist_to_binary(io_lib:format("Got ~p.", [BI]))), BI. wait_until_block_index(Node, BI) -> remote_call(Node, ?MODULE, wait_until_block_index, [BI]). wait_until_block_index(BI) -> ar_util:do_until( fun() -> case ar_node:get_blocks() of BI -> ok; _ -> false end end, 100, ?BLOCK_INDEX_TIMEOUT ). wait_until_mining_paused(Node) -> ar_util:do_until( fun() -> case Node of main -> ar_mining_server:is_paused(); _ -> remote_call(Node, ar_mining_server, is_paused, []) end end, 1000, ?WAIT_UNTIL_MINING_PAUSED_TIMEOUT ). %% Safely perform an rpc:call/4 and return results in a tagged tuple. safe_remote_call(Node, Module, Function, Args) -> try rpc:call(Node, Module, Function, Args, 30000) of Result -> {ok, Result} catch error:Reason:S -> %% Log the error if necessary io:format("Remote call error: ~p:~p~n", [Reason,S]), {error, Reason}; E:R:S -> %% Catching other exceptions, returning a general error. io:format("Remote call error: ~p:~p:~p~n", [E,R,S]), {error, unknown} end. wait_until_node_is_ready(NodeName) -> ar_util:do_until( fun() -> case net_adm:ping(NodeName) of pong -> %% The node is reachable, doing a second check. % safe_remote_call(NodeName, erlang, is_alive, []); RemoteApps = case safe_remote_call(NodeName, application, which_applications, []) of {ok, R} when is_list(R) -> R; _ -> [] end, case lists:keyfind(arweave, 1, RemoteApps) of {arweave, _, _} -> {ok, ready}; _ -> false end; pang -> %% Node is not reachable. false end end, ?NODE_READY_CHECK_INTERVAL, ?NODE_READY_CHECK_TIMEOUT ). assert_wait_until_receives_txs(TXs) -> ?assertEqual(ok, wait_until_receives_txs(TXs)). assert_wait_until_receives_txs(Node, TXs) -> ?assertEqual(ok, wait_until_receives_txs(Node, TXs)). wait_until_receives_txs(Node, TXs) -> remote_call(Node, ?MODULE, wait_until_receives_txs, [TXs], ?WAIT_UNTIL_RECEIVES_TXS_TIMEOUT + 500). wait_until_receives_txs(TXs) -> ar_util:do_until( fun() -> MinedTXIDs = ar_node:get_ready_for_mining_txs(), case lists:all(fun(TX) -> lists:member(TX#tx.id, MinedTXIDs) end, TXs) of true -> ok; _ -> false end end, 100, ?WAIT_UNTIL_RECEIVES_TXS_TIMEOUT ). assert_post_tx_to_peer(Node, TX) -> assert_post_tx_to_peer(Node, TX, true). assert_post_tx_to_peer(Node, TX, Wait) -> assert_post_tx_to_peer(Node, TX, Wait, 3). assert_post_tx_to_peer(Node, TX, Wait, Retries) -> {ok, {{<<"200">>, _}, _, <<"OK">>, _, _}} = post_tx_to_peer(Node, TX, Wait, Retries). post_tx_to_peer(Node, TX) -> post_tx_to_peer(Node, TX, true). post_tx_to_peer(Node, TX, Wait) -> post_tx_to_peer(Node, TX, Wait, 3). post_tx_to_peer(Node, TX, Wait, Retries) -> Reply = post_tx_json(Node, ar_serialize:jsonify(ar_serialize:tx_to_json_struct(TX))), case Reply of {ok, {{<<"200">>, _}, _, <<"OK">>, _, _}} -> case Wait of true -> assert_wait_until_receives_txs(Node, [TX]); false -> ok end; _ when Retries > 0 -> ?debugFmt("Failed to post transaction, retrying. Error: ~p~nRetries: ~p~n", [Reply, Retries]), timer:sleep(3000), post_tx_to_peer(Node, TX, Wait, Retries - 1); _ -> ErrorInfo = case Reply of {ok, {{StatusCode, _}, _, Text, _, _}} -> {StatusCode, Text}; Other -> Other end, Addr = case TX#tx.owner of <<>> -> DataSegment = ar_tx:generate_signature_data_segment(TX), ar_wallet:to_address( ar_wallet:recover_key(DataSegment, TX#tx.signature, TX#tx.signature_type), TX#tx.signature_type); _ -> ar_wallet:to_address(TX#tx.owner, TX#tx.signature_type) end, ?debugFmt( "Failed to post transaction.~nTX: ~s.~nTX format: ~B.~nTX fee: ~B.~n" "TX size: ~B.~nTX last_tx: ~s.~nTX owner: ~s.~nTX owner address: ~s.~n" "Error(s): ~p.~nReply: ~p.~n", [ar_util:encode(TX#tx.id), TX#tx.format, TX#tx.reward, TX#tx.data_size, ar_util:encode(TX#tx.last_tx), ar_util:encode(TX#tx.owner), ar_util:encode(Addr), remote_call(Node, ar_tx_db, get_error_codes, [TX#tx.id]), ErrorInfo]), noop end, Reply. post_tx_json(Node, JSON) -> ar_http:req(#{ method => post, peer => peer_ip(Node), path => "/tx", body => JSON }). get_tx_anchor(Node) -> {ok, {{<<"200">>, _}, _, Reply, _, _}} = ar_http:req(#{ method => get, peer => peer_ip(Node), path => "/tx_anchor" }), ar_util:decode(Reply). get_tx_confirmations(Node, TXID) -> Response = ar_http:req(#{ method => get, peer => peer_ip(Node), path => "/tx/" ++ binary_to_list(ar_util:encode(TXID)) ++ "/status" }), case Response of {ok, {{<<"200">>, _}, _, Reply, _, _}} -> {Status} = ar_serialize:dejsonify(Reply), lists:keyfind(<<"number_of_confirmations">>, 1, Status); {ok, {{<<"404">>, _}, _, _, _, _}} -> -1 end. new_mock(Module, Options) -> new_mock(Module, Options, 5). new_mock(_Module, _Options, 0) -> ok; new_mock(Module, Options, Retries) -> Options2 = lists:usort([no_link | Options]), try meck:new(Module, Options2) catch %% If the mock is already started, treat as success error:{already_started, _Pid} -> ok; %% Retry on other errors error:E -> ?debugFmt("ar_test_node (retries left ~p): Error creating mock for ~p: ~p", [Retries - 1, Module, E]), timer:sleep(1000), new_mock(Module, Options, Retries - 1); exit:E -> ?debugFmt("ar_test_node (retries left ~p): Exit creating mock for ~p: ~p", [Retries - 1, Module, E]), timer:sleep(1000), new_mock(Module, Options, Retries - 1) end. mock_function(Module, Fun, Mock) -> mock_function(Module, Fun, Mock, 5). mock_function(_Module, _Fun, _Mock, 0) -> ok; mock_function(Module, Fun, Mock, Retries) -> try meck:expect(Module, Fun, Mock) catch error:E -> ?debugFmt("ar_test_node (retries left ~p): Error setting mock for ~p: ~p", [Retries - 1, Module, E]), timer:sleep(1000), mock_function(Module, Fun, Mock, Retries - 1); exit:E -> ?debugFmt("ar_test_node (retries left ~p): Exit setting mock for ~p: ~p", [Retries - 1, Module, E]), timer:sleep(1000), mock_function(Module, Fun, Mock, Retries - 1) end. unmock_module(Module) -> unmock_module(Module, 5). unmock_module(_Module, 0) -> ok; unmock_module(Module, Retries) -> Pid = erlang:whereis(Module), case is_pid(Pid) of true -> catch sys:suspend(Pid, 5000); false -> ok end, try timed_meck_unload(Module, 10000) catch error:{not_mocked, Module} -> ok; error:E -> ?debugFmt("ar_test_node (retries left ~p): Error unloading mock for ~p: ~p", [Retries - 1, Module, E]), resume_if_alive(Pid), timer:sleep(1000), unmock_module(Module, Retries - 1); exit:E -> ?debugFmt("ar_test_node (retries left ~p): Exit unloading mock for ~p: ~p", [Retries - 1, Module, E]), resume_if_alive(Pid), timer:sleep(1000), unmock_module(Module, Retries - 1) after resume_if_alive(Pid) end. resume_if_alive(Pid) -> case is_pid(Pid) andalso erlang:is_process_alive(Pid) of true -> catch sys:resume(Pid); false -> ok end. %% meck:unload internally uses gen_server:call(..., infinity), so if the meck %% process is stuck handling a call from a blocked process, it will hang forever %% and the catch/retry logic above never fires. Wrap it with a finite timeout %% and kill the stuck meck process if needed. %% %% After killing the meck process we must restore the original module from the %% beam file on disk, because meck's terminate (which normally does this) did %% not run. timed_meck_unload(Module, Timeout) -> Caller = self(), Ref = make_ref(), Worker = spawn(fun() -> try Result = meck:unload(Module), Caller ! {Ref, {ok, Result}} catch Class:Reason -> Caller ! {Ref, {Class, Reason}} end end), receive {Ref, {ok, Result}} -> Result; {Ref, {error, Reason}} -> error(Reason); {Ref, {exit, Reason}} -> exit(Reason) after Timeout -> exit(Worker, kill), MeckProcName = list_to_atom(atom_to_list(Module) ++ "_meck"), case erlang:whereis(MeckProcName) of undefined -> ok; MeckPid -> exit(MeckPid, kill), timer:sleep(100) end, force_restore_module(Module), exit(timed_meck_unload_timeout) end. %% After force-killing the meck process, the module is left with meck-generated %% stub code and no backing ETS tables. Restore the original beam from disk so %% processes don't crash in an infinite meck stub loop. force_restore_module(Module) -> OrigName = list_to_atom(atom_to_list(Module) ++ "_meck_original"), code:purge(Module), code:delete(Module), code:purge(OrigName), code:delete(OrigName), code:load_file(Module). mock_functions(Functions) -> { fun() -> with_meck_lock(fun() -> lists:foldl( fun({Module, Fun, Mock}, Mocked) -> NewMocked = case maps:get(Module, Mocked, false) of false -> new_mock(Module, [passthrough]), lists:foreach( fun({_TestType, Node}) -> remote_call(Node, ar_test_node, new_mock, [Module, [no_link, passthrough]]) end, all_peers(test)), maps:put(Module, true, Mocked); true -> Mocked end, mock_function(Module, Fun, Mock), lists:foreach( fun({_TestType, Node}) -> remote_call(Node, ar_test_node, mock_function, [Module, Fun, Mock]) end, all_peers(test)), NewMocked end, maps:new(), Functions ) end) end, fun(Mocked) -> with_meck_lock(fun() -> maps:fold( fun(Module, _, _) -> unmock_module(Module), lists:foreach( fun({_TestType, Node}) -> remote_call(Node, ar_test_node, unmock_module, [Module]) end, all_peers(test)) end, noop, Mocked ) end) end }. %% @doc Execute Fun under a distributed lock to avoid concurrent meck operations. with_meck_lock(Fun) when is_function(Fun, 0) -> global:trans({arweave, meck_lock}, Fun). test_with_mocked_functions(Functions, TestFun) -> test_with_mocked_functions(Functions, TestFun, ?TEST_MOCKED_FUNCTIONS_TIMEOUT). test_with_mocked_functions(Functions, TestFun, Timeout) -> {Setup, Cleanup} = mock_functions(Functions), { foreach, Setup, Cleanup, [{timeout, Timeout, TestFun}] }. post_and_mine(#{ miner := Node, await_on := AwaitOnNode }, TXs) -> CurrentHeight = remote_call(Node, ar_node, get_height, []), lists:foreach(fun(TX) -> assert_post_tx_to_peer(Node, TX) end, TXs), mine(Node), [{H, _, _} | _] = wait_until_height(AwaitOnNode, CurrentHeight + 1), remote_call(AwaitOnNode, ar_test_node, read_block_when_stored, [H, true], ?POST_AND_MINE_TIMEOUT). post_block(B, ExpectedResult) when not is_list(ExpectedResult) -> post_block(B, [ExpectedResult], peer_ip(main)); post_block(B, ExpectedResults) -> post_block(B, ExpectedResults, peer_ip(main)). post_block(B, ExpectedResults, Peer) -> Result = send_new_block_with_retry(Peer, B, 2), ?assertMatch({ok, {{<<"200">>, _}, _, _, _, _}}, Result), await_post_block(B, ExpectedResults, Peer). send_new_block(Peer, B) -> ar_http_iface_client:send_block_binary(Peer, B#block.indep_hash, ar_serialize:block_to_binary(B)). send_new_block_with_retry(Peer, B, RetriesLeft) -> case send_new_block(Peer, B) of {error, {stream_error, closed}} when RetriesLeft > 0 -> timer:sleep(50), send_new_block_with_retry(Peer, B, RetriesLeft - 1); Result -> Result end. await_post_block(B, ExpectedResults) -> await_post_block(B, ExpectedResults, peer_ip(main)). await_post_block(#block{ indep_hash = H } = B, ExpectedResults, Peer) -> PostGossipFailureCodes = [invalid_denomination, invalid_double_signing_proof_same_signature, invalid_double_signing_proof_cdiff, invalid_double_signing_proof_same_address, invalid_double_signing_proof_not_in_reward_history, invalid_double_signing_proof_already_banned, invalid_double_signing_proof_invalid_signature, mining_address_banned, invalid_account_anchors, invalid_reward_pool, invalid_miner_reward, invalid_debt_supply, invalid_reward_history_hash, invalid_kryder_plus_rate_multiplier_latch, invalid_kryder_plus_rate_multiplier, invalid_wallet_list], receive {event, block, {rejected, Reason, H, Peer2}} -> case lists:member(Reason, PostGossipFailureCodes) of true -> ?assertEqual(no_peer, Peer2); false -> ?assertEqual(Peer, Peer2) end, case lists:member(Reason, ExpectedResults) of true -> ok; _ -> ?assert(false, iolist_to_binary(io_lib:format("Unexpected " "validation failure: ~p. Expected: ~p.", [Reason, ExpectedResults]))) end; {event, block, {new, #block{ indep_hash = H }, #{ source := {peer, Peer} }}} -> case ExpectedResults of [valid] -> ok; _ -> case lists:any(fun(FailureCode) -> not lists:member(FailureCode, PostGossipFailureCodes) end, ExpectedResults) of true -> ?assert(false, iolist_to_binary(io_lib:format("Unexpected " "validation success. Expected: ~p.", [ExpectedResults]))); false -> await_post_block(B, ExpectedResults) end end after 60_000 -> ?assert(false, iolist_to_binary(io_lib:format("Timed out. Expected: ~p.", [ExpectedResults]))) end. sign_block(#block{ cumulative_diff = CDiff } = B, PrevB, {Priv, Pub}) -> B2 = B#block{ reward_key = Pub, reward_addr = ar_wallet:to_address(Pub) }, SignedH = ar_block:generate_signed_hash(B2), PrevCDiff = PrevB#block.cumulative_diff, SignaturePreimage = ar_block:get_block_signature_preimage(CDiff, PrevCDiff, << (B#block.previous_solution_hash)/binary, SignedH/binary >>, B#block.height), Signature = ar_wallet:sign(Priv, SignaturePreimage), H = ar_block:indep_hash2(SignedH, Signature), B2#block{ indep_hash = H, signature = Signature }. read_block_when_stored(H) -> read_block_when_stored(H, false). read_block_when_stored(H, IncludeTXs) -> {ok, B} = ar_util:do_until( fun() -> case ar_storage:read_block(H) of unavailable -> unavailable; B2 -> ar_util:do_until( fun() -> TXs = ar_storage:read_tx(B2#block.txs), case lists:any(fun(TX) -> TX == unavailable end, TXs) of true -> false; false -> case IncludeTXs of true -> {ok, B2#block{ txs = TXs }}; false -> {ok, B2} end end end, 100, ?READ_BLOCK_TIMEOUT ) end end, 200, ?READ_BLOCK_TIMEOUT ), B. get_chunk(Node, Offset) -> get_chunk(Node, Offset, undefined). get_chunk(Node, Offset, Packing) -> Headers = case Packing of undefined -> []; _ -> PackingBinary = iolist_to_binary(ar_serialize:encode_packing(Packing, false)), [{<<"x-packing">>, PackingBinary}] end, ar_http:req(#{ method => get, peer => peer_ip(Node), path => "/chunk/" ++ integer_to_list(Offset), headers => [{<<"x-bucket-based-offset">>, <<"true">>} | Headers] }). get_chunk_proof(Node, Offset) -> ar_http:req(#{ method => get, peer => peer_ip(Node), path => "/chunk_proof/" ++ integer_to_list(Offset), headers => [{<<"x-bucket-based-offset">>, <<"true">>}] }). post_chunk(Node, Proof) -> Peer = peer_ip(Node), ar_http:req(#{ method => post, peer => Peer, path => "/chunk", body => Proof }). random_v1_data(Size) -> %% Make sure v1 txs do not end with a digit, otherwise they are malleable. << (crypto:strong_rand_bytes(Size - 1))/binary, <<"a">>/binary >>. assert_get_tx_data(Node, TXID, ExpectedData) -> ?debugFmt("Polling for data of ~s.", [ar_util:encode(TXID)]), Peer = peer_ip(Node), true = ar_util:do_until( fun() -> case ar_http:req(#{ method => get, peer => Peer, path => "/tx/" ++ binary_to_list(ar_util:encode(TXID)) ++ "/data" }) of {ok, {{<<"200">>, _}, _, ExpectedData, _, _}} -> true; {ok, {{<<"404">>, _}, _, _, _, _}} -> false; {ok, {{<<"200">>, _}, _, OtherData, _, _}} -> ?assertEqual(byte_size(ExpectedData), byte_size(OtherData), lists:flatten(io_lib:format( "TX data size mismatch. TXID: ~s. Peer: ~s.", [ar_util:encode(TXID), ar_util:format_peer(Peer)]))); UnexpectedResponse -> ?debugFmt("Got unexpected tx data response. TXID: ~s. Peer: ~s. " " response: ~p.~n", [ar_util:encode(TXID), ar_util:format_peer(Peer), UnexpectedResponse]), false end end, 200, ?GET_TX_DATA_TIMEOUT ), {ok, {{<<"200">>, _}, _, OffsetJSON, _, _}} = ar_http:req(#{ method => get, peer => Peer, path => "/tx/" ++ binary_to_list(ar_util:encode(TXID)) ++ "/offset" }), Map = jiffy:decode(OffsetJSON, [return_maps]), Offset = binary_to_integer(maps:get(<<"offset">>, Map)), Size = binary_to_integer(maps:get(<<"size">>, Map)), ?assertEqual(ExpectedData, get_tx_data_in_chunks(Offset, Size, Peer)), ?assertEqual(ExpectedData, get_tx_data_in_chunks_traverse_forward(Offset, Size, Peer)). get_tx_data_in_chunks(Offset, Size, Peer) -> get_tx_data_in_chunks(Offset, Offset - Size, Peer, []). get_tx_data_in_chunks(Offset, Start, _Peer, Bin) when Offset =< Start -> ar_util:encode(iolist_to_binary(Bin)); get_tx_data_in_chunks(Offset, Start, Peer, Bin) -> {ok, {{<<"200">>, _}, _, JSON, _, _}} = ar_http:req(#{ method => get, peer => Peer, path => "/chunk/" ++ integer_to_list(Offset) }), Map = jiffy:decode(JSON, [return_maps]), Chunk = ar_util:decode(maps:get(<<"chunk">>, Map)), get_tx_data_in_chunks(Offset - byte_size(Chunk), Start, Peer, [Chunk | Bin]). get_tx_data_in_chunks_traverse_forward(Offset, Size, Peer) -> get_tx_data_in_chunks_traverse_forward(Offset, Offset - Size, Peer, []). get_tx_data_in_chunks_traverse_forward(Offset, Start, _Peer, Bin) when Offset =< Start -> ar_util:encode(iolist_to_binary(lists:reverse(Bin))); get_tx_data_in_chunks_traverse_forward(Offset, Start, Peer, Bin) -> {ok, {{<<"200">>, _}, _, JSON, _, _}} = ar_http:req(#{ method => get, peer => Peer, path => "/chunk/" ++ integer_to_list(Start + 1) }), Map = jiffy:decode(JSON, [return_maps]), Chunk = ar_util:decode(maps:get(<<"chunk">>, Map)), get_tx_data_in_chunks_traverse_forward(Offset, Start + byte_size(Chunk), Peer, [Chunk | Bin]). assert_data_not_found(Node, TXID) -> Peer = peer_ip(Node), ?assertMatch({ok, {{<<"404">>, _}, _, _Binary, _, _}}, ar_http:req(#{ method => get, peer => Peer, path => "/tx/" ++ binary_to_list(ar_util:encode(TXID)) ++ "/data" })). get_node_namespace() -> % Return the namespace part (everything after first - and before @) {_, Namespace} = split_node_name(), Namespace. get_node() -> % Return the name part (everything before first -) {Name, _} = split_node_name(), Name. split_node_name() -> % First split by '@' to separate host part [NamePart, _Host] = string:split(atom_to_list(node()), "@"), % Then split by first '-' to get name and namespace case string:split(NamePart, "-", leading) of [Name, Namespace] -> {Name, Namespace}; [Name] -> {Name, ""} % Handle case where there is no '-' end. get_unused_port() -> {ok, ListenSocket} = gen_tcp:listen(0, [{port, 0}]), {ok, Port} = inet:port(ListenSocket), gen_tcp:close(ListenSocket), Port. p2p_headers(Node) -> [ {<<"x-p2p-port">>, integer_to_binary(peer_port(Node))}, {<<"x-release">>, integer_to_binary(?RELEASE_NUMBER)} ]. %% @doc: get the genesis chunk between a given start and end offset. -spec get_genesis_chunk(integer()) -> binary(). -spec get_genesis_chunk(integer(), integer()) -> binary(). get_genesis_chunk(EndOffset) -> StartOffset = case EndOffset rem ?DATA_CHUNK_SIZE of 0 -> EndOffset - ?DATA_CHUNK_SIZE; _ -> (EndOffset div ?DATA_CHUNK_SIZE) * ?DATA_CHUNK_SIZE end, get_genesis_chunk(StartOffset, EndOffset). get_genesis_chunk(StartOffset, EndOffset) -> Size = EndOffset - StartOffset, StartValue = StartOffset div 4, ar_weave:generate_data(StartValue, Size, <<>>). ================================================ FILE: apps/arweave/test/ar_test_runner.erl ================================================ %%% %%% @doc Test runner utilities for running EUnit tests with various granularity. %%% Supports running all tests, specific modules, or specific test functions. %%% -module(ar_test_runner). -export([run/1, run/2]). -export([start_shell/1, stop_shell/1]). -export([list_tests/1, list_tests_json/1]). -include("ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). %% @doc Run all tests for a given test type. %% TestType is 'test' or 'e2e'. run(TestType) -> Modules = default_modules(TestType), run_tests(TestType, {modules, Modules}). %% @doc Run tests based on CLI arguments. %% Supports: %% - module run all tests in module %% - module:test run specific test from module %% - module1 module2 run all tests in multiple modules %% - module1 module2:test mixed mode run(TestType, Args) when is_list(Args) -> Specs = lists:map(fun parse_arg/1, Args), run_tests(TestType, {mixed, Specs}). %% @doc Start the test environment for interactive shell use (without running tests). start_shell(TestType) -> ensure_started(TestType). %% @doc Stop the test environment started by start_shell/1. stop_shell(TestType) -> ar_test_node:stop_peers(TestType), init:stop(). %% Parse a CLI argument into either {module, Mod} or {test, Mod, Test}. parse_arg(Arg) when is_atom(Arg) -> {module, Arg}; parse_arg(Arg) when is_list(Arg) -> case string:split(Arg, ":") of [Mod, Test] -> {test, list_to_atom(Mod), list_to_atom(Test)}; [Mod] -> {module, list_to_atom(Mod)} end. %% @doc List all tests in a module. %% Returns a list of {Module, Test} tuples. list_tests(Mod) when is_atom(Mod) -> Exports = Mod:module_info(exports), Tests = lists:filtermap( fun({Name, 0}) -> NameStr = atom_to_list(Name), case lists:suffix("_test_", NameStr) of true -> {true, {Mod, Name}}; false -> false end; (_) -> false end, Exports ), lists:sort(Tests); list_tests(Mods) when is_list(Mods) -> lists:flatmap(fun list_tests/1, Mods). %% @doc Output tests as JSON for CI systems. list_tests_json(Mods) -> Tests = list_tests(Mods), JsonItems = lists:map( fun({Mod, Test}) -> io_lib:format("{\"module\":\"~s\",\"test\":\"~s\"}", [Mod, Test]) end, Tests ), JsonArray = "[" ++ string:join(JsonItems, ",") ++ "]", io:format("~s~n", [JsonArray]). %%%=================================================================== %%% Internal functions %%%=================================================================== default_modules(e2e) -> [ar_sync_pack_mine_tests, ar_repack_mine_tests, ar_repack_in_place_mine_tests]; default_modules(test) -> load_default_modules("scripts/full_test_modules.txt"). run_tests(TestType, TestSpec) -> ensure_started(TestType), TotalTimeout = case TestType of e2e -> ?E2E_TEST_SUITE_TIMEOUT; _ -> ?TEST_SUITE_TIMEOUT end, Result = try EunitSpec = build_eunit_spec(TotalTimeout, TestSpec), eunit:test(EunitSpec, [verbose, {print_depth, 100}]) after ar_test_node:stop_peers(TestType) end, case Result of ok -> ok; _ -> init:stop(1) end. ensure_started(TestType) -> try arweave_config:start(), ok = arweave_limiter:start(), start_for_tests(TestType), ar_test_node:boot_peers(TestType), ar_test_node:wait_for_peers(TestType) catch Type:Reason:S -> io:format("Failed to start the peers due to ~p:~p:~p~n", [Type, Reason, S]), init:stop(1) end. build_eunit_spec(Timeout, {modules, []}) -> {timeout, Timeout, []}; build_eunit_spec(Timeout, {modules, Mods}) -> {timeout, Timeout, Mods}; build_eunit_spec(Timeout, {mixed, Specs}) -> EunitSpecs = lists:map(fun spec_to_eunit/1, Specs), {timeout, Timeout, EunitSpecs}. spec_to_eunit({module, Mod}) -> Mod; spec_to_eunit({test, Mod, Test}) -> %% Check if it's a generator (_test_) or simple test (_test) TestName = atom_to_list(Test), case lists:suffix("_test_", TestName) of true -> %% Generator - returns a test spec {generator, fun() -> Mod:Test() end}; false -> %% Simple test function - run directly {Mod, Test} end. load_default_modules(Path) -> case file:read_file(Path) of {ok, Bin} -> parse_default_modules(binary_to_list(Bin)); {error, Reason} -> erlang:error({failed_to_load_test_modules, Path, Reason}) end. parse_default_modules(Content) -> Lines = string:split(Content, "\n", all), lists:filtermap(fun parse_default_module_line/1, Lines). parse_default_module_line(Line) -> Trimmed = string:trim(Line), case Trimmed of "" -> false; [$# | _] -> false; _ -> {true, list_to_atom(Trimmed)} end. start_for_tests(TestType) -> UniqueName = ar_test_node:get_node_namespace(), TestConfig = #config{ debug = true, peers = [], data_dir = ".tmp/data_" ++ atom_to_list(TestType) ++ "_main_" ++ UniqueName, port = ar_test_node:get_unused_port(), disable = [randomx_jit], 'http_client.http.keepalive' = 4_000, auto_join = false }, ar:start(TestConfig). ================================================ FILE: apps/arweave/test/ar_tx_blacklist_tests.erl ================================================ -module(ar_tx_blacklist_tests). -export([init/2]). -include_lib("eunit/include/eunit.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include("ar.hrl"). -import(ar_test_node, [ sign_v1_tx/2, random_v1_data/1, wait_until_height/2, assert_wait_until_height/2]). init(Req, State) -> SplitPath = ar_http_iface_server:split_path(cowboy_req:path(Req)), handle(SplitPath, Req, State). handle([<<"empty">>], Req, State) -> {ok, cowboy_req:reply(200, #{}, <<>>, Req), State}; handle([<<"good">>], Req, State) -> {ok, cowboy_req:reply(200, #{}, ar_util:encode(hd(State)), Req), State}; handle([<<"bad">>, <<"and">>, <<"good">>], Req, State) -> Reply = list_to_binary( io_lib:format( "~s\nbad base64url \n~s\n", lists:map(fun ar_util:encode/1, State) ) ), {ok, cowboy_req:reply(200, #{}, Reply, Req), State}. uses_blacklists_test_() -> {timeout, 300, fun test_uses_blacklists/0}. test_uses_blacklists() -> { BlacklistFiles, B0, Wallet, TXs, GoodTXIDs, BadTXIDs, V1TX, GoodOffsets, BadOffsets, DataTrees } = setup(), WhitelistFile = random_filename(), ok = file:write_file(WhitelistFile, <<>>), RewardAddr = ar_wallet:to_address(ar_wallet:new_keyfile()), {ok, Config} = arweave_config:get_env(), StorageModule = {30 * ?MiB, 0, {composite, RewardAddr, 1}}, try ar_test_node:start(#{ b0 => B0, addr => RewardAddr, config => Config#config{ transaction_blacklist_files = BlacklistFiles, transaction_whitelist_files = [WhitelistFile], sync_jobs = 10, transaction_blacklist_urls = [ %% Serves empty body. "http://localhost:1985/empty", %% Serves a valid TX ID (one from the BadTXIDs list). "http://localhost:1985/good", %% Serves some valid TX IDs (from the BadTXIDs list) and a line %% with invalid Base64URL. "http://localhost:1985/bad/and/good" ], enable = [pack_served_chunks | Config#config.enable]}, storage_modules => [StorageModule] }), ar_test_node:connect_to_peer(peer1), BadV1TXIDs = [V1TX#tx.id], lists:foreach( fun({TX, Height}) -> ar_test_node:assert_post_tx_to_peer(peer1, TX), ar_test_node:assert_wait_until_receives_txs([TX]), case Height == length(TXs) of true -> ar_test_node:assert_post_tx_to_peer(peer1, V1TX), ar_test_node:assert_wait_until_receives_txs([V1TX]); _ -> ok end, ar_test_node:mine(peer1), upload_data([TX], DataTrees), wait_until_height(main, Height) end, lists:zip(TXs, lists:seq(1, length(TXs))) ), assert_present_txs(GoodTXIDs), assert_present_txs(BadTXIDs), % V2 headers must not be removed. assert_removed_txs(BadV1TXIDs), assert_present_offsets(GoodOffsets), assert_removed_offsets(BadOffsets), StoreID = ar_storage_module:id(StorageModule), Chunks = ar_chunk_storage:get_range(0, 30 * ?MiB, StoreID), ChunkOffsets = [Offset || {Offset, _Chunk} <- Chunks], ?debugFmt("chunk offsets: ~p ~n good offsets: ~p ~n bad offsets: ~p~n", [ChunkOffsets, GoodOffsets, BadOffsets]), ?assert(lists:all(fun(BadOffset) -> not lists:member(ar_block:get_chunk_padded_offset(BadOffset), ChunkOffsets) end, lists:flatten(BadOffsets))), assert_does_not_accept_offsets(BadOffsets), %% Add a new transaction to the blacklist, add a blacklisted transaction to whitelist. ok = file:write_file(lists:nth(3, BlacklistFiles), <<>>), ok = file:write_file(WhitelistFile, ar_util:encode(lists:nth(2, BadTXIDs))), ok = file:write_file(lists:nth(4, BlacklistFiles), io_lib:format("~s~n~s", [ar_util:encode(hd(GoodTXIDs)), ar_util:encode(V1TX#tx.id)])), [UnblacklistedOffsets, WhitelistOffsets | BadOffsets2] = BadOffsets, RestoredOffsets = [UnblacklistedOffsets, WhitelistOffsets] ++ [lists:nth(6, lists:reverse(BadOffsets))], BadOffsets3 = BadOffsets2 -- [lists:nth(6, lists:reverse(BadOffsets))], [_UnblacklistedTXID, _WhitelistTXID | BadTXIDs2] = BadTXIDs, %% Expect the transaction data to be resynced. assert_present_offsets(RestoredOffsets), %% Expect the freshly blacklisted transaction to be erased. assert_present_txs([hd(GoodTXIDs)]), % V2 headers must not be removed. assert_removed_offsets([hd(GoodOffsets)]), assert_does_not_accept_offsets([hd(GoodOffsets)]), %% Expect the previously blacklisted transactions to stay blacklisted. assert_present_txs(BadTXIDs2), % V2 headers must not be removed. assert_removed_txs(BadV1TXIDs), assert_removed_offsets(BadOffsets3), assert_does_not_accept_offsets(BadOffsets3), %% Blacklist the last transaction. Fork the weave. Assert the blacklisted offsets are moved. ar_test_node:disconnect_from(peer1), TX = ar_test_node:sign_tx(Wallet, #{ data => crypto:strong_rand_bytes(?DATA_CHUNK_SIZE), last_tx => ar_test_node:get_tx_anchor(peer1) }), ar_test_node:assert_post_tx_to_peer(main, TX), ar_test_node:mine(), [{_, WeaveSize, _} | _] = wait_until_height(main, length(TXs) + 1), assert_present_offsets([[WeaveSize]]), ok = file:write_file(lists:nth(3, BlacklistFiles), ar_util:encode(TX#tx.id)), assert_removed_offsets([[WeaveSize]]), TX2 = sign_v1_tx(Wallet, #{ data => random_v1_data(2 * ?DATA_CHUNK_SIZE), last_tx => ar_test_node:get_tx_anchor(peer1) }), ar_test_node:assert_post_tx_to_peer(peer1, TX2), ar_test_node:mine(peer1), assert_wait_until_height(peer1, length(TXs) + 1), ar_test_node:assert_post_tx_to_peer(peer1, TX), ar_test_node:mine(peer1), assert_wait_until_height(peer1, length(TXs) + 2), ar_test_node:connect_to_peer(peer1), [{_, WeaveSize2, _} | _] = wait_until_height(main, length(TXs) + 2), assert_removed_offsets([[WeaveSize2]]), assert_present_offsets([[WeaveSize]]) after teardown(Config) end. setup() -> {B0, Wallet} = setup(peer1), {TXs, DataTrees} = create_txs(Wallet), TXIDs = [TX#tx.id || TX <- TXs], BadTXIDs = [lists:nth(1, TXIDs), lists:nth(3, TXIDs)], V1TX = sign_v1_tx(Wallet, #{ data => random_v1_data(3 * ?DATA_CHUNK_SIZE), last_tx => ar_test_node:get_tx_anchor(peer1), reward => ?AR(10000) }), DataSizes = [TX#tx.data_size || TX <- TXs], S0 = B0#block.block_size, [S1, S2, S3, S4, S5, S6, S7, S8 | _] = DataSizes, BadOffsets = [S0 + O || O <- [S1, S1 + S2 + S3, % Blacklisted in the file. S1 + S2 + S3 + S4 + S5, S1 + S2 + S3 + S4 + S5 + S6 + S7]], % Blacklisted in the endpoint. BlacklistFiles = create_files([V1TX#tx.id | BadTXIDs], [{S0 + S1 + S2 + S3 + ?DATA_CHUNK_SIZE, S0 + S1 + S2 + S3 + ?DATA_CHUNK_SIZE * 2}, {S0 + S1 + S2 + S3 + S4 + S5, S0 + S1 + S2 + S3 + S4 + S5 + ?DATA_CHUNK_SIZE * 5}, % This one just repeats the range of a blacklisted tx: {S0 + S1 + S2 + S3 + S4 + S5 + S6, S0 + S1 + S2 + S3 + S4 + S5 + S6 + S7} ]), BadTXIDs2 = [lists:nth(5, TXIDs), lists:nth(7, TXIDs)], % The endpoint. BadTXIDs3 = [lists:nth(4, TXIDs), lists:nth(6, TXIDs)], % Ranges. Routes = [{"/[...]", ar_tx_blacklist_tests, BadTXIDs2}], {ok, _PID} = ar_test_node:remote_call(peer1, cowboy, start_clear, [ ar_tx_blacklist_test_listener, [{port, 1985}], #{ env => #{ dispatch => cowboy_router:compile([{'_', Routes}]) } } ]), GoodTXIDs = TXIDs -- (BadTXIDs ++ BadTXIDs2 ++ BadTXIDs3), BadOffsets2 = lists:map( fun(TXOffset) -> %% Every TX in this test consists of 10 chunks. %% Only every second chunk is uploaded in this test %% for (originally) blacklisted transactions. [TXOffset - ?DATA_CHUNK_SIZE * I || I <- lists:seq(0, 9, 2)] end, BadOffsets ), BadOffsets3 = BadOffsets2 ++ [S0 + O || O <- [S1 + S2 + S3 + ?DATA_CHUNK_SIZE * 2, S1 + S2 + S3 + S4 + S5 + ?DATA_CHUNK_SIZE, S1 + S2 + S3 + S4 + S5 + ?DATA_CHUNK_SIZE * 2, S1 + S2 + S3 + S4 + S5 + ?DATA_CHUNK_SIZE * 3, S1 + S2 + S3 + S4 + S5 + ?DATA_CHUNK_SIZE * 4, S1 + S2 + S3 + S4 + S5 + ?DATA_CHUNK_SIZE * 5]], % Blacklisted as a range. GoodOffsets = [S0 + O || O <- [S1 + S2, S1 + S2 + S3 + S4, S1 + S2 + S3 + S4 + S5 + S6, S1 + S2 + S3 + S4 + S5 + S6 + S7 + S8]], GoodOffsets2 = lists:map( fun(TXOffset) -> %% Every TX in this test consists of 10 chunks. [TXOffset - ?DATA_CHUNK_SIZE * I || I <- lists:seq(0, 9)] -- BadOffsets3 end, GoodOffsets ), { BlacklistFiles, B0, Wallet, TXs, GoodTXIDs, BadTXIDs ++ BadTXIDs2 ++ BadTXIDs3, V1TX, GoodOffsets2, BadOffsets3, DataTrees }. setup(Node) -> {ok, Config} = ar_test_node:get_config(Node), Wallet = {_, Pub} = ar_test_node:remote_call(Node, ar_wallet, new_keyfile, []), RewardAddr = ar_wallet:to_address(Pub), [B0] = ar_weave:init([{RewardAddr, ?AR(100000000), <<>>}]), ar_test_node:start_peer(Node, B0, RewardAddr, Config#config{ enable = [pack_served_chunks | Config#config.enable] }), {B0, Wallet}. create_txs(Wallet) -> lists:foldl( fun (_, {TXs, DataTrees}) -> Chunks = lists:sublist( ar_tx:chunk_binary(?DATA_CHUNK_SIZE, crypto:strong_rand_bytes(10 * ?DATA_CHUNK_SIZE)), 10 ), % Exclude empty chunk created by chunk_to_binary. SizedChunkIDs = ar_tx:sized_chunks_to_sized_chunk_ids( ar_tx:chunks_to_size_tagged_chunks(Chunks) ), {DataRoot, DataTree} = ar_merkle:generate_tree(SizedChunkIDs), TX = ar_test_node:sign_tx(Wallet, #{ format => 2, data_root => DataRoot, data_size => 10 * ?DATA_CHUNK_SIZE, last_tx => ar_test_node:get_tx_anchor(peer1), reward => ?AR(10000), denomination => 1 }), {[TX | TXs], maps:put(TX#tx.id, {DataTree, Chunks}, DataTrees)} end, {[], #{}}, lists:seq(1, 10) ). create_files(BadTXIDs, [{Start1, End1}, {Start2, End2}, {Start3, End3}]) -> Files = [ {random_filename(), <<>>}, {random_filename(), <<"bad base64url ">>}, {random_filename(), ar_util:encode(lists:nth(2, BadTXIDs))}, {random_filename(), list_to_binary( io_lib:format( "~s\nbad base64url \n~s\n~s\n~B,~B\n", lists:map(fun ar_util:encode/1, BadTXIDs) ++ [Start1, End1] ) )}, {random_filename(), list_to_binary(io_lib:format("~B,~B\n~B,~B", [Start2, End2, Start3, End3]))} ], lists:foreach( fun ({Filename, Binary}) -> ok = file:write_file(Filename, Binary) end, Files ), [Filename || {Filename, _} <- Files]. random_filename() -> {ok, Config} = ar_test_node:remote_call(peer1, arweave_config, get_env, []), filename:join(Config#config.data_dir, "ar-tx-blacklist-tests-transaction-blacklist-" ++ binary_to_list(ar_util:encode(crypto:strong_rand_bytes(32)))). encode_chunk(Proof) -> ar_serialize:jsonify(#{ chunk => ar_util:encode(maps:get(chunk, Proof)), data_path => ar_util:encode(maps:get(data_path, Proof)), data_root => ar_util:encode(maps:get(data_root, Proof)), data_size => integer_to_binary(maps:get(data_size, Proof)), offset => integer_to_binary(maps:get(offset, Proof)) }). upload_data(TXs, DataTrees) -> lists:foreach( fun(TX) -> #tx{ id = TXID, data_root = DataRoot, data_size = DataSize } = TX, {DataTree, Chunks} = maps:get(TXID, DataTrees), ChunkOffsets = lists:zip(Chunks, lists:seq(?DATA_CHUNK_SIZE, 10 * ?DATA_CHUNK_SIZE, ?DATA_CHUNK_SIZE)), UploadChunks = ChunkOffsets, lists:foreach( fun({Chunk, Offset}) -> DataPath = ar_merkle:generate_path(DataRoot, Offset - 1, DataTree), {ok, {{<<"200">>, _}, _, _, _, _}} = ar_test_node:post_chunk(peer1, encode_chunk(#{ data_root => DataRoot, chunk => Chunk, data_path => DataPath, offset => Offset - 1, data_size => DataSize })) end, UploadChunks ) end, TXs ). assert_present_txs(GoodTXIDs) -> ?debugFmt("Waiting until these txids are stored: ~p.", [[ar_util:encode(TXID) || TXID <- GoodTXIDs]]), true = ar_util:do_until( fun() -> lists:all( fun(TXID) -> is_record(ar_storage:read_tx(TXID), tx) end, GoodTXIDs ) end, 500, 10000 ), lists:foreach( fun(TXID) -> ?assertMatch({ok, {_, _}}, ar_storage:get_tx_confirmation_data(TXID)) end, GoodTXIDs ). assert_removed_txs(BadTXIDs) -> ?debugFmt("Waiting until these txids are removed: ~p.", [[ar_util:encode(TXID) || TXID <- BadTXIDs]]), true = ar_util:do_until( fun() -> lists:all( fun(TXID) -> {error, not_found} == ar_data_sync:get_tx_data(TXID) %% Do not use ar_storage:read_tx because the %% transaction is temporarily kept in the disk cache, %% even when blacklisted. andalso ar_kv:get(tx_db, TXID) == not_found end, BadTXIDs ) end, 500, 30000 ), %% We have to keep the confirmation data even for blacklisted transactions. lists:foreach( fun(TXID) -> ?assertMatch({ok, {_, _}}, ar_storage:get_tx_confirmation_data(TXID)) end, BadTXIDs ). assert_present_offsets(GoodOffsets) -> true = ar_util:do_until( fun() -> lists:all( fun(Offset) -> case ar_test_node:get_chunk(main, Offset) of {ok, {{<<"200">>, _}, _, _, _, _}} -> true; _ -> ?debugFmt("Waiting until the end offset ~B is stored.", [Offset]), false end end, lists:flatten(GoodOffsets) ) end, 500, 120000 ). assert_removed_offsets(BadOffsets) -> true = ar_util:do_until( fun() -> lists:all( fun(Offset) -> case ar_test_node:get_chunk(main, Offset) of {ok, {{<<"404">>, _}, _, _, _, _}} -> true; _ -> ?debugFmt("Waiting until the end offset ~B is removed.", [Offset]), false end end, lists:flatten(BadOffsets) ) end, 500, 60000 ). assert_does_not_accept_offsets(BadOffsets) -> true = ar_util:do_until( fun() -> lists:all( fun(Offset) -> case ar_test_node:get_chunk(main, Offset) of {ok, {{<<"404">>, _}, _, _, _, _}} -> {ok, {{<<"200">>, _}, _, EncodedProof, _, _}} = ar_test_node:get_chunk(peer1, Offset), Proof = decode_chunk(EncodedProof), DataPath = maps:get(data_path, Proof), {ok, DataRoot} = ar_merkle:extract_root(DataPath), RelativeOffset = ar_merkle:extract_note(DataPath), Proof2 = Proof#{ offset => RelativeOffset - 1, data_root => DataRoot, data_size => 10 * ?DATA_CHUNK_SIZE }, EncodedProof2 = encode_chunk(Proof2), %% The node returns 200 but does not store the chunk. case ar_test_node:post_chunk(main, EncodedProof2) of {ok, {{<<"200">>, _}, _, _, _, _}} -> case ar_test_node:get_chunk(main, Offset) of {ok, {{<<"404">>, _}, _, _, _, _}} -> true; _ -> false end; _ -> false end; _ -> false end end, lists:flatten(BadOffsets) ) end, 500, 60000 ). decode_chunk(EncodedProof) -> ar_serialize:json_map_to_poa_map( jiffy:decode(EncodedProof, [return_maps]) ). teardown(Config) -> ok = ar_test_node:remote_call(peer1, cowboy, stop_listener, [ar_tx_blacklist_test_listener]), arweave_config:set_env(Config). ================================================ FILE: apps/arweave/test/ar_tx_replay_pool_tests.erl ================================================ -module(ar_tx_replay_pool_tests). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave/include/ar_pricing.hrl"). -include_lib("eunit/include/eunit.hrl"). verify_block_txs_test_() -> {timeout, 30, fun test_verify_block_txs/0}. test_verify_block_txs() -> Key1 = ar_wallet:new(), Key2 = ar_wallet:new(), RandomBlockAnchors = [crypto:strong_rand_bytes(32) || _ <- lists:seq(1, ar_block:get_max_tx_anchor_depth())], BlockAnchorTXAtForkHeight = tx(Key1, fee(ar_fork:height_2_0()), <<"hash">>), BlockAnchorTXAfterForkHeight = tx(Key1, fee(ar_fork:height_2_0() + 1), <<"hash">>), Timestamp = os:system_time(second), TestCases = [ #{ title => "Fork height 2.0 accepts block anchors", txs => [tx(Key1, fee(ar_fork:height_2_0()), <<"hash">>)], height => ar_fork:height_2_0(), block_anchors => [<<"hash">>], recent_txs_map => #{}, wallet_list => [wallet(Key1, fee(ar_fork:height_2_0()))], expected_result => valid }, #{ title => "After fork height 2.0 accepts block anchors", txs => [tx(Key1, fee(ar_fork:height_2_0() + 1), <<"hash">>)], height => ar_fork:height_2_0() + 1, block_anchors => [<<"hash">>], recent_txs_map => #{}, wallet_list => [wallet(Key1, fee(ar_fork:height_2_0() + 1))], expected_result => valid }, #{ title => "Fork height 2.0 rejects outdated block anchors", txs => [ tx( Key1, fee(ar_fork:height_2_0()), crypto:strong_rand_bytes(32) ) ], block_anchors => RandomBlockAnchors, recent_txs_map => #{}, height => ar_fork:height_2_0(), wallet_list => [wallet(Key1, fee(ar_fork:height_2_0()))], expected_result => invalid }, #{ title => "Fork height 2.0 accepts wallet list anchors", txs => [ tx(Key1, fee(ar_fork:height_2_0()), <<>>), tx(Key2, fee(ar_fork:height_2_0()), <<>>) ], height => ar_fork:height_2_0(), wallet_list => [ wallet(Key1, fee(ar_fork:height_2_0())), wallet(Key2, fee(ar_fork:height_2_0())) ], block_anchors => [], recent_txs_map => #{}, expected_result => valid }, #{ title => "After fork height 2.0 accepts wallet list anchors", txs => [ tx(Key1, fee(ar_fork:height_2_0() + 1), <<>>), tx(Key2, fee(ar_fork:height_2_0() + 1), <<>>) ], height => ar_fork:height_2_0() + 1, wallet_list => [ wallet(Key1, fee(ar_fork:height_2_0() + 1)), wallet(Key2, fee(ar_fork:height_2_0() + 1)) ], block_anchors => [], recent_txs_map => #{}, expected_result => valid }, #{ title => "Fork height 2.0 rejects conflicting wallet list anchors", txs => [ tx(Key1, fee(ar_fork:height_2_0()), <<>>), tx(Key1, fee(ar_fork:height_2_0()), <<>>) ], height => ar_fork:height_2_0(), block_anchors => [], recent_txs_map => #{}, wallet_list => [wallet(Key1, 2 * fee(ar_fork:height_2_0()))], expected_result => invalid }, #{ title => "Fork height 2.0 rejects chained wallet list anchors", txs => make_tx_chain(Key1, ar_fork:height_2_0()), height => ar_fork:height_2_0(), block_anchors => [], recent_txs_map => #{}, wallet_list => [wallet(Key1, 2 * fee(ar_fork:height_2_0()))], expected_result => invalid }, #{ title => "Fork height 2.0 rejects conflicting balances", txs => [ tx(Key1, fee(ar_fork:height_2_0()), <<>>), tx(Key1, fee(ar_fork:height_2_0()), <<>>) ], height => ar_fork:height_2_0(), wallet_list => [wallet(Key1, erlang:trunc(1.5 * fee(ar_fork:height_2_0())))], block_anchors => [], recent_txs_map => #{}, expected_result => invalid }, #{ title => "Fork height 2.0 rejects duplicates", txs => [BlockAnchorTXAtForkHeight, BlockAnchorTXAtForkHeight], height => ar_fork:height_2_0(), block_anchors => [], recent_txs_map => #{}, wallet_list => [wallet(Key1, 2 * fee(ar_fork:height_2_0()))], expected_result => invalid }, #{ title => "After fork height 2.0 rejects duplicates", txs => [BlockAnchorTXAfterForkHeight, BlockAnchorTXAfterForkHeight], height => ar_fork:height_2_0() + 1, block_anchors => [], recent_txs_map => #{}, wallet_list => [wallet(Key1, 2 * fee(ar_fork:height_2_0() + 1))], expected_result => invalid }, #{ title => "Fork height 2.0 rejects txs from the weave", txs => [BlockAnchorTXAtForkHeight], height => ar_fork:height_2_0(), block_anchors => [<<"hash">>, <<"otherhash">>], recent_txs_map => #{ <<"txid">> => ok, <<"txid2">> => ok, BlockAnchorTXAtForkHeight#tx.id => ok }, wallet_list => [wallet(Key1, fee(ar_fork:height_2_0()))], expected_result => invalid }, #{ title => "After fork height 2.0 rejects txs from the weave", txs => [BlockAnchorTXAfterForkHeight], height => ar_fork:height_2_0() + 1, block_anchors => [<<"hash">>, <<"otherhash">>], recent_txs_map => #{ <<"txid">> => ok, <<"txid2">> => ok, BlockAnchorTXAfterForkHeight#tx.id => ok }, wallet_list => [wallet(Key1, fee(ar_fork:height_2_0() + 1))], expected_result => invalid } ], lists:foreach( fun(#{ title := Title, txs := TXs, height := Height, wallet_list := WL, block_anchors := BlockAnchors, recent_txs_map := RecentTXMap, expected_result := ExpectedResult }) -> Rate = {1, 4}, PricePerGiBMinute = 2000, KryderPlusRateMultiplier = 1, Denomination = 1, RedenominationHeight = 0, Wallets = maps:from_list([{A, {B, LTX}} || {A, B, LTX} <- WL]), ?debugFmt("~s:~n", [Title]), ?assertEqual( ExpectedResult, ar_tx_replay_pool:verify_block_txs({TXs, Rate, PricePerGiBMinute, KryderPlusRateMultiplier, Denomination, Height, RedenominationHeight, Timestamp, Wallets, BlockAnchors, RecentTXMap}), Title), PickedTXs = ar_tx_replay_pool:pick_txs_to_mine({BlockAnchors, RecentTXMap, Height, RedenominationHeight, Rate, PricePerGiBMinute, KryderPlusRateMultiplier, Denomination, Timestamp, Wallets, TXs}), ?assertEqual( valid, ar_tx_replay_pool:verify_block_txs({PickedTXs, Rate, PricePerGiBMinute, KryderPlusRateMultiplier, Denomination, Height, RedenominationHeight, Timestamp, Wallets, BlockAnchors, RecentTXMap}), lists:flatten( io_lib:format("Verifying after picking_txs_to_mine: ~s:", [Title]) ) ) end, TestCases ). make_tx_chain(Key, Height) -> TX1 = tx(Key, fee(Height), <<>>), TX2 = tx(Key, fee(Height), TX1#tx.id), [TX1, TX2]. tx(Key = {_, {_, Owner}}, Reward, Anchor) -> ar_tx:sign( #tx{ format = 2, owner = Owner, reward = Reward, last_tx = Anchor }, Key ). wallet({_, Pub}, Balance) -> {ar_wallet:to_address(Pub), Balance, <<>>}. fee(Height) -> ar_tx:get_tx_fee({0, 2000, 1, <<>>, #{}, Height + 1}). ================================================ FILE: apps/arweave/test/ar_tx_tests.erl ================================================ -module(ar_tx_tests). -include("ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("eunit/include/eunit.hrl"). -import(ar_test_node, [wait_until_height/2, assert_wait_until_height/2, read_block_when_stored/1, random_v1_data/1]). %% ------------------------------------------------------------------------------------------- %% Test registration %% ------------------------------------------------------------------------------------------- accepts_gossips_and_mines_test_() -> PrepareTestFor = fun(BuildTXSetFun, KeyType) -> fun() -> %% The weave has to be initialised under the fork so that %% we can get the correct price estimations according %% to the new pricinig model. Key = {_, Pub} = ar_wallet:new(KeyType), Wallets = [{ar_wallet:to_address(Pub), ?AR(5), <<>>}], [B0] = ar_weave:init(Wallets), accepts_gossips_and_mines(B0, BuildTXSetFun(Key, B0)) end end, [ {timeout, ?TEST_NODE_TIMEOUT, { "One RSA transaction with wallet list anchor followed by one with block anchor", PrepareTestFor(fun one_wallet_list_one_block_anchored_txs/2, ?RSA_KEY_TYPE) }}, {timeout, ?TEST_NODE_TIMEOUT, { "One ECDSA transaction with wallet list anchor followed by one with block anchor", PrepareTestFor(fun one_wallet_list_one_block_anchored_txs/2, ?ECDSA_KEY_TYPE) }}, {timeout, ?TEST_NODE_TIMEOUT, { "Two RSA transactions with block anchor", PrepareTestFor(fun two_block_anchored_txs/2, ?RSA_KEY_TYPE) }}, {timeout, ?TEST_NODE_TIMEOUT, { "Two ECDSA transactions with block anchor", PrepareTestFor(fun two_block_anchored_txs/2, ?ECDSA_KEY_TYPE) }} ]. polls_for_transactions_and_gossips_and_mines_test_() -> PrepareTestFor = fun(BuildTXSetFun, KeyType) -> fun() -> %% The weave has to be initialised under the fork so that %% we can get the correct price estimations according %% to the new pricinig model. Key = {_, Pub} = ar_wallet:new(KeyType), Wallets = [{ar_wallet:to_address(Pub), ?AR(5), <<>>}], [B0] = ar_weave:init(Wallets), polls_for_transactions_and_gossips_and_mines(B0, BuildTXSetFun(Key, B0)) end end, [ {timeout, ?TEST_NODE_TIMEOUT, { "Two RSA transactions with block anchor", PrepareTestFor(fun two_block_anchored_txs/2, ?RSA_KEY_TYPE) }}, {timeout, ?TEST_NODE_TIMEOUT, { "Two ECDSA transactions with block anchor", PrepareTestFor(fun two_block_anchored_txs/2, ?ECDSA_KEY_TYPE) }} ]. keeps_txs_after_new_block_test_() -> PrepareTestFor = fun(BuildFirstTXSetFun, BuildSecondTXSetFun) -> fun() -> Key = {_, Pub} = ar_wallet:new(), Key2 = {_, Pub2} = ar_test_node:new_custom_size_rsa_wallet(66), Wallets = [{ar_wallet:to_address(Pub), ?AR(5), <<>>}, {ar_wallet:to_address(Pub2), ?AR(5), <<>>}], [B0] = ar_weave:init(Wallets), keeps_txs_after_new_block( B0, BuildFirstTXSetFun(Key, B0), BuildSecondTXSetFun(Key2, B0) ) end end, [ %% Main node receives the second set then the first set. Peer node only %% receives the second set. {timeout, ?TEST_NODE_TIMEOUT, { "First set: two block anchored txs, second set: empty", PrepareTestFor(fun two_block_anchored_txs/2, fun empty_tx_set/2) }}, {timeout, ?TEST_NODE_TIMEOUT, { "First set: empty, second set: two block anchored txs", PrepareTestFor(fun empty_tx_set/2, fun two_block_anchored_txs/2) }}, {timeout, ?TEST_NODE_TIMEOUT, { "First set: two block anchored txs, second set: two block anchored txs", PrepareTestFor(fun two_block_anchored_txs/2, fun two_block_anchored_txs/2) }} ]. returns_error_when_txs_exceed_balance_test_() -> PrepareTestFor = fun(BuildTXSetFun) -> fun() -> returns_error_when_txs_exceed_balance(BuildTXSetFun) end end, [ {timeout, ?TEST_NODE_TIMEOUT, { "Three transactions with block anchor", PrepareTestFor(fun block_anchor_txs_spending_balance_plus_one_more/2) }}, {timeout, ?TEST_NODE_TIMEOUT, { "Five transactions with mixed anchors", PrepareTestFor(fun mixed_anchor_txs_spending_balance_plus_one_more/2) }} ]. mines_blocks_under_the_size_limit_test_() -> PrepareTestFor = fun(BuildTXSetFun) -> fun() -> {B0, TXGroups} = BuildTXSetFun(), mines_blocks_under_the_size_limit(B0, TXGroups) end end, [ { "Five transactions with block anchors", {timeout, ?TEST_NODE_TIMEOUT, PrepareTestFor(fun() -> grouped_txs() end)} } ]. joins_network_successfully_test_() -> {timeout, ?TEST_NODE_TIMEOUT, fun joins_network_successfully/0}. recovers_from_forks_test_() -> {timeout, ?TEST_NODE_TIMEOUT, fun() -> recovers_from_forks(7) end}. rejects_transactions_above_the_size_limit_test_() -> {timeout, ?TEST_NODE_TIMEOUT, fun test_rejects_transactions_above_the_size_limit/0}. accepts_at_most_one_wallet_list_anchored_tx_per_block_test_() -> {timeout, ?TEST_NODE_TIMEOUT, fun test_accepts_at_most_one_wallet_list_anchored_tx_per_block/0}. does_not_allow_to_spend_mempool_tokens_test_() -> {timeout, ?TEST_NODE_TIMEOUT, fun test_does_not_allow_to_spend_mempool_tokens/0}. does_not_allow_to_replay_empty_wallet_txs_test_() -> {timeout, ?TEST_NODE_TIMEOUT, fun test_does_not_allow_to_replay_empty_wallet_txs/0}. rejects_txs_with_outdated_anchors_test_() -> {timeout, ?TEST_NODE_TIMEOUT, fun() -> %% Post a transaction anchoring the block at ar_block:get_max_tx_anchor_depth() + 1. %% %% Expect the transaction to be rejected. Key = {_, Pub} = ar_wallet:new(), [B0] = ar_weave:init([ {ar_wallet:to_address(Pub), ?AR(20), <<>>} ]), _ = ar_test_node:start_peer(peer1, B0), mine_blocks(peer1, ar_block:get_max_tx_anchor_depth()), assert_wait_until_height(peer1, ar_block:get_max_tx_anchor_depth()), TX1 = ar_test_node:sign_v1_tx(Key, #{ last_tx => B0#block.indep_hash }), {ok, {{<<"400">>, _}, _, <<"Invalid anchor (last_tx).">>, _, _}} = ar_test_node:post_tx_to_peer(peer1, TX1) end}. drops_v1_txs_exceeding_mempool_limit_test_() -> {timeout, ?TEST_NODE_TIMEOUT, fun test_drops_v1_txs_exceeding_mempool_limit/0}. drops_v2_txs_exceeding_mempool_limit_test_() -> {timeout, ?TEST_NODE_TIMEOUT, fun drops_v2_txs_exceeding_mempool_limit/0}. mines_format_2_txs_without_size_limit_test_() -> {timeout, ?TEST_NODE_TIMEOUT, fun mines_format_2_txs_without_size_limit/0}. %% ------------------------------------------------------------------------------------------- %% Test functions %% ------------------------------------------------------------------------------------------- accepts_gossips_and_mines(B0, TXFuns) -> %% Post the given transactions made from the given wallets to a node. %% %% Expect them to be accepted, gossiped to the peer and included into the block. %% Expect the block to be accepted by the peer. _ = ar_test_node:start(B0), _ = ar_test_node:start_peer(peer1, B0), %% Sign here after the node has started to get the correct price %% estimation from it. TXs = lists:map(fun(TXFun) -> TXFun() end, TXFuns), ar_test_node:connect_to_peer(peer1), %% Post the transactions to peer1. lists:foreach( fun(TX) -> ar_test_node:assert_post_tx_to_peer(peer1, TX), %% Expect transactions to be gossiped to main. ar_test_node:assert_wait_until_receives_txs([TX]) end, TXs ), %% Mine a block. ar_test_node:mine(peer1), %% Expect both transactions to be included into block. PeerBI = assert_wait_until_height(peer1, 1), TXIDs = lists:map(fun(TX) -> TX#tx.id end, TXs), ?assertEqual( lists:sort(TXIDs), lists:sort((ar_test_node:remote_call(peer1, ar_test_node, read_block_when_stored, [hd(PeerBI)]))#block.txs) ), lists:foreach( fun(TX) -> ?assertEqual(TX, ar_test_node:remote_call(peer1, ar_storage, read_tx, [TX#tx.id])) end, TXs ), %% Expect the block to be accepted by main. BI = wait_until_height(main, 1), ?assertEqual( lists:sort(TXIDs), lists:sort((read_block_when_stored(hd(BI)))#block.txs) ), lists:foreach( fun(TX) -> ?assertEqual(TX, ar_storage:read_tx(TX#tx.id)) end, TXs ). polls_for_transactions_and_gossips_and_mines(B0, TXFuns) -> %% Post the given transactions made from the given wallets to a node. %% %% Expect them to be accepted, fetched by the peer we did not push them to %% and included into the block. %% Expect the block to be accepted by the peer. {ok, MainConfig} = arweave_config:get_env(), {ok, PeerConfig} = ar_test_node:remote_call(peer1, arweave_config, get_env, []), try MainConfig2 = MainConfig#config{ max_propagation_peers = 0 }, _ = ar_test_node:start(#{ b0 => B0, config => MainConfig2 }), PeerConfig2 = PeerConfig#config{ max_propagation_peers = 0 }, _ = ar_test_node:start_peer(peer1, #{ b0 => B0, config => PeerConfig2 }), %% Sign here after the node has started to get the correct price %% estimation from it. TXs = lists:map(fun(TXFun) -> TXFun() end, TXFuns), ar_test_node:connect_to_peer(peer1), %% Post the transactions to peer1. lists:foreach( fun(TX) -> ar_test_node:assert_post_tx_to_peer(peer1, TX), %% Expect transactions to be fetched by main. ar_test_node:assert_wait_until_receives_txs([TX]) end, TXs ), %% Mine a block. ar_test_node:mine(peer1), %% Expect both transactions to be included into block. PeerBI = assert_wait_until_height(peer1, 1), TXIDs = lists:map(fun(TX) -> TX#tx.id end, TXs), ?assertEqual( lists:sort(TXIDs), lists:sort((ar_test_node:remote_call(peer1, ar_test_node, read_block_when_stored, [hd(PeerBI)]))#block.txs) ), lists:foreach( fun(TX) -> ?assertEqual(TX, ar_test_node:remote_call(peer1, ar_storage, read_tx, [TX#tx.id])) end, TXs ), %% Expect the block to be accepted by main. BI = wait_until_height(main, 1), ?assertEqual( lists:sort(TXIDs), lists:sort((read_block_when_stored(hd(BI)))#block.txs) ), lists:foreach( fun(TX) -> ?assertEqual(TX, ar_storage:read_tx(TX#tx.id)) end, TXs ) after arweave_config:set_env(MainConfig), ar_test_node:set_config(peer1, PeerConfig) end. keeps_txs_after_new_block(B0, FirstTXSetFuns, SecondTXSetFuns) -> %% Post the transactions from the first set to a node but do not gossip them. %% Post transactions from the second set to both nodes. %% Mine a block with transactions from the second set on a different node %% and gossip it to the node with transactions. %% %% Expect the block to be accepted. %% Expect transactions from the difference between the two sets to be kept in the mempool. %% Mine a block on the first node, expect the difference to be included into the block. {ok, MainConfig} = arweave_config:get_env(), {ok, PeerConfig} = ar_test_node:remote_call(peer1, arweave_config, get_env, []), try MainConfig2 = MainConfig#config{ disable = [tx_poller | MainConfig#config.disable] }, _ = ar_test_node:start(#{ b0 => B0, config => MainConfig2 }), PeerConfig2 = PeerConfig#config{ disable = [tx_poller | PeerConfig#config.disable] }, _ = ar_test_node:start_peer(peer1, #{ b0 => B0, config => PeerConfig2 }), %% Sign here after the node has started to get the correct price %% estimation from it. FirstTXSet = lists:map(fun(TXFun) -> TXFun() end, FirstTXSetFuns), SecondTXSet = lists:map(fun(TXFun) -> TXFun() end, SecondTXSetFuns), %% Disconnect the nodes so that peer1 does not receive txs. ar_test_node:disconnect_from(peer1), %% Post transactions from the first set to main. lists:foreach( fun(TX) -> ar_test_node:post_tx_to_peer(main, TX) end, SecondTXSet ++ FirstTXSet ), ?assertEqual([], ar_test_node:remote_call(peer1, ar_mempool, get_all_txids, [])), %% Post transactions from the second set to peer1. lists:foreach( fun(TX) -> ar_test_node:assert_post_tx_to_peer(peer1, TX) end, SecondTXSet ), %% Wait to make sure the tx will not be gossiped upon reconnect. timer:sleep(2000), % == 2 * ?CHECK_MEMPOOL_FREQUENCY %% Connect the nodes and mine a block on peer1. ar_test_node:connect_to_peer(peer1), ar_test_node:mine(peer1), %% Expect main to receive the block. BI = wait_until_height(main, 1), SecondSetTXIDs = lists:map(fun(TX) -> TX#tx.id end, SecondTXSet), ?assertEqual(lists:sort(SecondSetTXIDs), lists:sort((read_block_when_stored(hd(BI)))#block.txs)), %% Expect main to have the set difference in the mempool. ar_test_node:assert_wait_until_receives_txs(FirstTXSet -- SecondTXSet), %% Mine a block on main and expect both transactions to be included. ar_test_node:mine(), BI2 = wait_until_height(main, 2), SetDifferenceTXIDs = lists:map(fun(TX) -> TX#tx.id end, FirstTXSet -- SecondTXSet), ?assertEqual( lists:sort(SetDifferenceTXIDs), lists:sort((read_block_when_stored(hd(BI2)))#block.txs) ) after arweave_config:set_env(MainConfig), ar_test_node:set_config(peer1, PeerConfig) end. returns_error_when_txs_exceed_balance(BuildTXSetFun) -> Key = {_, Pub} = ar_wallet:new(), [B0] = ar_weave:init([{ar_wallet:to_address(Pub), ?AR(20), <<>>}]), _ = ar_test_node:start(B0), _ = ar_test_node:start_peer(peer1, B0), TXs = BuildTXSetFun(Key, B0), ar_test_node:connect_to_peer(peer1), %% Expect the post for all TXs (including the balance exceeding one) to %% succeed. However immeidately after adding each TX to the mempool, %% we'll check whether any balances are exceeded and eject the TXs that %% exceed the balance. The ordering used is {Utility, TXID} - so TXs with %% the same Utility but with a lower alphanumeric ID will be ejected first. SortedTXs = lists:sort( fun (TX1, TX2) -> % Sort in reverse order - "biggest" first. {ar_tx:utility(TX1), TX1#tx.id} > {ar_tx:utility(TX2), TX2#tx.id} end, TXs ), ExceedBalanceTX = lists:last(SortedTXs), BelowBalanceTXs = lists:droplast(SortedTXs), lists:foreach( fun(TX) -> ar_test_node:assert_post_tx_to_peer(peer1, TX, false) end, TXs ), ar_test_node:assert_wait_until_receives_txs(BelowBalanceTXs), %% Expect only the first two to be included into the block. ar_test_node:mine(peer1), PeerBI = assert_wait_until_height(peer1, 1), TXIDs = lists:map(fun(TX) -> TX#tx.id end, BelowBalanceTXs), ?assertEqual( lists:sort(TXIDs), lists:sort((ar_test_node:remote_call(peer1, ar_test_node, read_block_when_stored, [hd(PeerBI)]))#block.txs) ), BI = wait_until_height(main, 1), ?assertEqual( lists:sort(TXIDs), lists:sort((read_block_when_stored(hd(BI)))#block.txs) ), %% Post the balance exceeding transaction again %% and expect the balance exceeded error. ar_test_node:remote_call(peer1, ets, delete, [ignored_ids, ExceedBalanceTX#tx.id]), {ok, {{<<"400">>, _}, _, _Body, _, _}} = ar_http:req(#{ method => post, peer => ar_test_node:peer_ip(peer1), path => "/tx", body => ar_serialize:jsonify(ar_serialize:tx_to_json_struct(ExceedBalanceTX)) }), ?assertEqual({ok, ["overspend"]}, ar_test_node:remote_call(peer1, ar_tx_db, get_error_codes, [ExceedBalanceTX#tx.id])). test_rejects_transactions_above_the_size_limit() -> %% Create a genesis block with a wallet. Key1 = {_, Pub1} = ar_wallet:new(), Key2 = {_, Pub2} = ar_wallet:new(), [B0] = ar_weave:init([ {ar_wallet:to_address(Pub1), ?AR(20), <<>>}, {ar_wallet:to_address(Pub2), ?AR(20), <<>>} ]), %% Start the node. _ = ar_test_node:start_peer(peer1, B0), _ = ar_test_node:connect_to_peer(peer1), SmallData = random_v1_data(?TX_DATA_SIZE_LIMIT), BigData = random_v1_data(?TX_DATA_SIZE_LIMIT + 1), GoodTX = ar_test_node:sign_v1_tx(Key1, #{ data => SmallData }), ar_test_node:assert_post_tx_to_peer(peer1, GoodTX), BadTX = ar_test_node:sign_v1_tx(Key2, #{ data => BigData }), ?assertMatch( {ok, {{<<"400">>, _}, _, <<"Transaction verification failed.">>, _, _}}, ar_test_node:post_tx_to_peer(peer1, BadTX) ), ?assertMatch( {ok, ["tx_fields_too_large"]}, ar_test_node:remote_call(peer1, ar_tx_db, get_error_codes, [BadTX#tx.id]) ). test_accepts_at_most_one_wallet_list_anchored_tx_per_block() -> %% Post a TX, mine a block. %% Post another TX referencing the first one. %% Post the third TX referencing the second one. %% %% Expect the third to be rejected. %% %% Post the fourth TX referencing the block. %% %% Expect the fourth TX to be accepted and mined into a block. Key = {_, Pub} = ar_wallet:new(), [B0] = ar_weave:init([ {ar_wallet:to_address(Pub), ?AR(20), <<>>} ]), _ = ar_test_node:start_peer(peer1, B0), _ = ar_test_node:connect_to_peer(peer1), TX1 = ar_test_node:sign_v1_tx(Key), ar_test_node:assert_post_tx_to_peer(peer1, TX1), ar_test_node:mine(peer1), assert_wait_until_height(peer1, 1), TX2 = ar_test_node:sign_v1_tx(Key, #{ last_tx => TX1#tx.id }), ar_test_node:assert_post_tx_to_peer(peer1, TX2), TX3 = ar_test_node:sign_v1_tx(Key, #{ last_tx => TX2#tx.id }), {ok, {{<<"400">>, _}, _, <<"Invalid anchor (last_tx from mempool).">>, _, _}} = ar_test_node:post_tx_to_peer(peer1, TX3), TX4 = ar_test_node:sign_v1_tx(Key, #{ last_tx => B0#block.indep_hash }), ar_test_node:assert_post_tx_to_peer(peer1, TX4), ar_test_node:mine(peer1), PeerBI = assert_wait_until_height(peer1, 2), B2 = ar_test_node:remote_call(peer1, ar_test_node, read_block_when_stored, [hd(PeerBI)]), ?assertEqual([TX2#tx.id, TX4#tx.id], B2#block.txs). test_does_not_allow_to_spend_mempool_tokens() -> %% Post a transaction sending tokens to a wallet with few tokens. %% Post the second transaction spending the new tokens. %% %% Expect the second transaction to be rejected. %% %% Mine a block. %% Post another transaction spending the rest of tokens from the new wallet. %% %% Expect the transaction to be accepted. Key1 = {_, Pub1} = ar_wallet:new(), Key2 = {_, Pub2} = ar_wallet:new(), [B0] = ar_weave:init([ {ar_wallet:to_address(Pub1), ?AR(20), <<>>}, {ar_wallet:to_address(Pub2), ?AR(0), <<>>} ]), _ = ar_test_node:start_peer(peer1, B0), _ = ar_test_node:connect_to_peer(peer1), TX1 = ar_test_node:sign_v1_tx(Key1, #{ target => ar_wallet:to_address(Pub2), reward => ?AR(1), quantity => ?AR(2) }), ar_test_node:assert_post_tx_to_peer(peer1, TX1), TX2 = ar_test_node:sign_v1_tx( Key2, #{ target => ar_wallet:to_address(Pub1), reward => ?AR(1), quantity => ?AR(1), last_tx => B0#block.indep_hash, tags => [{<<"nonce">>, <<"1">>}] } ), {ok, {{<<"400">>, _}, _, _, _, _}} = ar_test_node:post_tx_to_peer(peer1, TX2), ?assertEqual({ok, ["overspend"]}, ar_test_node:remote_call(peer1, ar_tx_db, get_error_codes, [TX2#tx.id])), ar_test_node:mine(peer1), PeerBI = assert_wait_until_height(peer1, 1), B1 = ar_test_node:remote_call(peer1, ar_test_node, read_block_when_stored, [hd(PeerBI)]), ?assertEqual([TX1#tx.id], B1#block.txs), TX3 = ar_test_node:sign_v1_tx( Key2, #{ target => ar_wallet:to_address(Pub1), reward => ?AR(1), quantity => ?AR(1), last_tx => B1#block.indep_hash, tags => [{<<"nonce">>, <<"3">>}] } ), ar_test_node:assert_post_tx_to_peer(peer1, TX3), ar_test_node:mine(peer1), PeerBI2 = assert_wait_until_height(peer1, 2), B2 = ar_test_node:remote_call(peer1, ar_test_node, read_block_when_stored, [hd(PeerBI2)]), ?assertEqual([TX3#tx.id], B2#block.txs). test_does_not_allow_to_replay_empty_wallet_txs() -> %% Create a new wallet by sending some tokens to it. Mine a block. %% Send the tokens back so that the wallet balance is back to zero. Mine a block. %% Send the same amount of tokens to the same wallet again. Mine a block. %% Try to replay the transaction which sent the tokens back (before and after mining). %% %% Expect the replay to be rejected. Key1 = {_, Pub1} = ar_wallet:new(), Key2 = {_, Pub2} = ar_wallet:new(), [B0] = ar_weave:init([ {ar_wallet:to_address(Pub1), ?AR(50), <<>>} ]), _ = ar_test_node:start_peer(peer1, B0), TX1 = ar_test_node:sign_v1_tx(Key1, #{ target => ar_wallet:to_address(Pub2), reward => ?AR(6), quantity => ?AR(2), last_tx => <<>> }), ar_test_node:assert_post_tx_to_peer(peer1, TX1), ar_test_node:mine(peer1), assert_wait_until_height(peer1, 1), GetBalancePath = binary_to_list(ar_util:encode(ar_wallet:to_address(Pub2))), {ok, {{<<"200">>, _}, _, Body, _, _}} = ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(peer1), path => "/wallet/" ++ GetBalancePath ++ "/balance" }), Balance = binary_to_integer(Body), TX2 = ar_test_node:sign_v1_tx(Key2, #{ target => ar_wallet:to_address(Pub1), reward => Balance - ?AR(1), quantity => ?AR(1), last_tx => <<>> }), ar_test_node:assert_post_tx_to_peer(peer1, TX2), ar_test_node:mine(peer1), assert_wait_until_height(peer1, 2), {ok, {{<<"200">>, _}, _, Body2, _, _}} = ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(peer1), path => "/wallet/" ++ GetBalancePath ++ "/balance" }), ?assertEqual(0, binary_to_integer(Body2)), TX3 = ar_test_node:sign_v1_tx(Key1, #{ target => ar_wallet:to_address(Pub2), reward => ?AR(6), quantity => ?AR(2), last_tx => TX1#tx.id }), ar_test_node:assert_post_tx_to_peer(peer1, TX3), ar_test_node:mine(peer1), assert_wait_until_height(peer1, 3), %% Remove the replay TX from the ignore list (to simulate e.g. a node restart). ar_test_node:remote_call(peer1, ets, delete, [ignored_ids, TX2#tx.id]), {ok, {{<<"400">>, _}, _, <<"Invalid anchor (last_tx).">>, _, _}} = ar_test_node:post_tx_to_peer(peer1, TX2). mines_blocks_under_the_size_limit(B0, TXGroups) -> %% Post the given transactions grouped by block size to a node. %% %% Expect them to be mined into the corresponding number of blocks so that %% each block fits under the limit. _ = ar_test_node:start(B0), _ = ar_test_node:start_peer(peer1, B0), ar_test_node:connect_to_peer(peer1), lists:foreach( fun(TX) -> ar_test_node:assert_post_tx_to_peer(peer1, TX), ar_test_node:assert_wait_until_receives_txs([TX]) end, lists:flatten(TXGroups) ), %% Mine blocks, expect the transactions there. lists:foldl( fun(Group, Height) -> ar_test_node:mine(peer1), PeerBI = assert_wait_until_height(peer1, Height), GroupTXIDs = lists:map(fun(TX) -> TX#tx.id end, Group), ?assertEqual( lists:sort(GroupTXIDs), lists:sort( (ar_test_node:remote_call(peer1, ar_test_node, read_block_when_stored, [hd(PeerBI)]))#block.txs ), io_lib:format("Height ~B", [Height]) ), assert_wait_until_txs_are_stored(GroupTXIDs), Height + 1 end, 1, TXGroups ). assert_wait_until_txs_are_stored(TXIDs) -> ar_util:do_until( fun() -> lists:all(fun(TX) -> is_record(TX, tx) end, ar_storage:read_tx(TXIDs)) end, 200, 60_000 ). mines_format_2_txs_without_size_limit() -> Key = {_, Pub} = ar_wallet:new(), [B0] = ar_weave:init([ {ar_wallet:to_address(Pub), ?AR(20), <<>>} ]), _ = ar_test_node:start(B0), _ = ar_test_node:start_peer(peer1, B0), ar_test_node:connect_to_peer(peer1), ChunkSize = ?MEMPOOL_DATA_SIZE_LIMIT div (?BLOCK_TX_COUNT_LIMIT + 1), lists:foreach( fun(N) -> TX = ar_test_node:sign_tx( Key, #{ last_tx => B0#block.indep_hash, data => << <<1>> || _ <- lists:seq(1, ChunkSize) >>, tags => [{<<"nonce">>, integer_to_binary(N)}] } ), ar_test_node:assert_post_tx_to_peer(peer1, TX), ar_test_node:assert_wait_until_receives_txs([TX]) end, lists:seq(1, ?BLOCK_TX_COUNT_LIMIT + 1) ), ar_test_node:mine(), [{H, _, _} | _] = wait_until_height(main, 1), B = read_block_when_stored(H), ?assertEqual(?BLOCK_TX_COUNT_LIMIT, length(B#block.txs)), TotalSize = lists:sum([(ar_storage:read_tx(TXID))#tx.data_size || TXID <- B#block.txs]), ?assert(TotalSize > ?BLOCK_TX_DATA_SIZE_LIMIT). test_drops_v1_txs_exceeding_mempool_limit() -> %% Post transactions which exceed the mempool size limit. %% %% Expect the exceeding transaction to be dropped. Key = {_, Pub} = ar_wallet:new(), [B0] = ar_weave:init([ {ar_wallet:to_address(Pub), ?AR(20), <<>>} ]), _ = ar_test_node:start_peer(peer1, B0), BigChunk = random_v1_data(?TX_DATA_SIZE_LIMIT - ?TX_SIZE_BASE), TXs = lists:map( fun(N) -> ar_test_node:sign_v1_tx(Key, #{ last_tx => B0#block.indep_hash, data => BigChunk, tags => [{<<"nonce">>, integer_to_binary(N)}] }) end, lists:seq(1, 6) ), lists:foreach( fun(TX) -> ar_test_node:assert_post_tx_to_peer(peer1, TX) end, lists:sublist(TXs, 5) ), Peer1 = ar_test_node:peer_ip(peer1), {{ok, Mempool1}, Peer1} = ar_http_iface_client:get_mempool(Peer1), %% The transactions have the same utility therefore they are sorted in the %% order of submission. ?assertEqual([TX#tx.id || TX <- lists:sublist(TXs, 5)], Mempool1), Last = lists:last(TXs), {ok, {{<<"200">>, _}, _, <<"OK">>, _, _}} = ar_test_node:post_tx_to_peer(peer1, Last, false), {{ok, Mempool2}, Peer1} = ar_http_iface_client:get_mempool(Peer1), %% There is no place for the last transaction in the mempool. ?assertEqual([TX#tx.id || TX <- lists:sublist(TXs, 5)], Mempool2). drops_v2_txs_exceeding_mempool_limit() -> Key = {_, Pub} = ar_wallet:new(), [B0] = ar_weave:init([ {ar_wallet:to_address(Pub), ?AR(20), <<>>} ]), _ = ar_test_node:start_peer(peer1, B0), BigChunk = crypto:strong_rand_bytes(?TX_DATA_SIZE_LIMIT div 2), TXs = lists:map( fun(N) -> ar_test_node:sign_tx(Key, #{ last_tx => B0#block.indep_hash, data => case N of 11 -> << BigChunk/binary, BigChunk/binary >>; _ -> BigChunk end, tags => [{<<"nonce">>, integer_to_binary(N)}] }) end, lists:seq(1, 11) ), lists:foreach( fun(TX) -> ar_test_node:assert_post_tx_to_peer(peer1, TX) end, lists:sublist(TXs, 10) ), Peer1 = ar_test_node:peer_ip(peer1), {{ok, Mempool1}, Peer1} = ar_http_iface_client:get_mempool(Peer1), %% The transactions have the same utility therefore they are sorted in the %% order of submission. ?assertEqual([TX#tx.id || TX <- lists:sublist(TXs, 10)], Mempool1), Last = lists:last(TXs), {ok, {{<<"200">>, _}, _, <<"OK">>, _, _}} = ar_test_node:post_tx_to_peer(peer1, Last, false), {{ok, Mempool2}, Peer1} = ar_http_iface_client:get_mempool(Peer1), %% The last TX is twice as big and twice as valuable so it replaces two %% other transactions in the memory pool. ?assertEqual([Last#tx.id | [TX#tx.id || TX <- lists:sublist(TXs, 8)]], Mempool2), %% Strip the data out. Expect the header to be accepted. StrippedTX = ar_test_node:sign_tx(Key, #{ last_tx => B0#block.indep_hash, data => BigChunk, tags => [{<<"nonce">>, integer_to_binary(12)}] }), ar_test_node:assert_post_tx_to_peer(peer1, StrippedTX#tx{ data = <<>> }), {{ok, Mempool3}, Peer1} = ar_http_iface_client:get_mempool(Peer1), ?assertEqual([Last#tx.id] ++ [TX#tx.id || TX <- lists:sublist(TXs, 8)] ++ [StrippedTX#tx.id], Mempool3). joins_network_successfully() -> %% Start a node and mine ar_block:get_max_tx_anchor_depth() blocks, some of them %% with transactions. %% %% Join this node by another node. %% Post a transaction with an outdated anchor to the new node. %% Expect it to be rejected. %% %% Expect all the transactions to be present on the new node. %% %% Isolate the nodes. Mine 1 block with a transaction anchoring the %% oldest block possible on peer1. Mine a block on main so that it stops %% tracking the block just referenced by peer1. Reconnect the nodes, mine another %% block with transactions anchoring the oldest block possible on peer1. %% Expect main to fork recover successfully. Key = {_, Pub} = ar_wallet:new(), [B0] = ar_weave:init([ {ar_wallet:to_address(Pub), ?AR(200000000), <<>>}, {Addr = crypto:strong_rand_bytes(32), ?AR(200000000), <<>>}, {crypto:strong_rand_bytes(32), ?AR(200000000), <<>>} ]), ar_test_node:start(B0), _ = ar_test_node:start_peer(peer1, B0), {TXs, _} = lists:foldl( fun(Height, {TXs, LastTX}) -> {TX, AnchorType} = case rand:uniform(4) of 1 -> {ar_test_node:sign_v1_tx(Key, #{ last_tx => LastTX, reward => ?AR(10000) }), tx_anchor}; 2 -> {ar_test_node:sign_v1_tx(Key, #{ last_tx => ar_test_node:get_tx_anchor(peer1), reward => ?AR(10000), tags => [{<<"nonce">>, integer_to_binary(rand:uniform(100))}] }), block_anchor}; 3 -> {ar_test_node:sign_tx(Key, #{ last_tx => LastTX, target => Addr, reward => ?AR(10000) }), tx_anchor}; 4 -> {ar_test_node:sign_tx(Key, #{ last_tx => ar_test_node:get_tx_anchor(peer1), reward => ?AR(10000), tags => [{<<"nonce">>, integer_to_binary(rand:uniform(100))}]}), block_anchor} end, ar_test_node:assert_post_tx_to_peer(peer1, TX), ar_test_node:mine(peer1), assert_wait_until_height(peer1, Height), ar_util:do_until( fun() -> ar_test_node:remote_call(peer1, ar_mempool, get_all_txids, []) == [] end, 200, 1000 ), {TXs ++ [{TX, AnchorType}], TX#tx.id} end, {[], <<>>}, lists:seq(1, ar_block:get_max_tx_anchor_depth()) ), ar_test_node:join_on(#{ node => main, join_on => peer1 }), BI = ar_test_node:remote_call(peer1, ar_node, get_block_index, []), ?assertEqual(ok, ar_test_node:wait_until_block_index(BI)), TX1 = ar_test_node:sign_tx(Key, #{ last_tx => element(1, lists:nth(ar_block:get_max_tx_anchor_depth() + 1, BI)) }), {ok, {{<<"400">>, _}, _, <<"Invalid anchor (last_tx).">>, _, _}} = ar_test_node:post_tx_to_peer(main, TX1), %% Expect transactions to be on main. lists:foreach( fun({TX, _}) -> ?assert( ar_util:do_until( fun() -> ar_test_node:get_tx_confirmations(main, TX#tx.id) > 0 end, 100, 20000 ) ) end, TXs ), lists:foreach( fun({TX, AnchorType}) -> Reply = ar_test_node:post_tx_to_peer(main, TX), case AnchorType of tx_anchor -> ?assertMatch({ok, {{<<"400">>, _}, _, <<"Invalid anchor (last_tx).">>, _, _}}, Reply); block_anchor -> RecentBHL = lists:sublist(?BI_TO_BHL(BI), ar_block:get_max_tx_anchor_depth()), case lists:member(TX#tx.last_tx, RecentBHL) of true -> ?assertMatch({ok, {{<<"400">>, _}, _, <<"Transaction is already on the weave.">>, _, _}}, Reply); false -> ?assertMatch({ok, {{<<"400">>, _}, _, <<"Invalid anchor (last_tx).">>, _, _}}, Reply) end end end, TXs ), ar_test_node:disconnect_from(peer1), %% Mine the block on main first to ensure that it can't be rebased after the 2-block %% fork from peer1 wins. TX2 = ar_test_node:sign_tx(main, Key, #{ last_tx => element(1, lists:nth(ar_block:get_max_tx_anchor_depth(), BI)) }), ar_test_node:assert_post_tx_to_peer(main, TX2), ar_test_node:mine(), wait_until_height(main, ar_block:get_max_tx_anchor_depth() + 1), %% mine two blocks on peer to ensure that the main branch is orphaned. ar_test_node:mine(peer1), assert_wait_until_height(peer1, ar_block:get_max_tx_anchor_depth() + 1), %% lists:nth(ar_block:get_max_tx_anchor_depth() - 1, BI) since we'll be at at ar_block:get_max_tx_anchor_depth() + 2. TX3 = ar_test_node:sign_tx(peer1, Key, #{ last_tx => element(1, lists:nth(ar_block:get_max_tx_anchor_depth() - 1, BI)) }), ar_test_node:assert_post_tx_to_peer(peer1, TX3), ar_test_node:mine(peer1), BI2 = assert_wait_until_height(peer1, ar_block:get_max_tx_anchor_depth() + 2), ar_test_node:connect_to_peer(peer1), wait_until_height(main, ar_block:get_max_tx_anchor_depth() + 2), TX4 = ar_test_node:sign_tx(peer1, Key, #{ last_tx => element(1, lists:nth(ar_block:get_max_tx_anchor_depth(), BI2)) }), ar_test_node:assert_post_tx_to_peer(peer1, TX4), ar_test_node:assert_wait_until_receives_txs([TX4]), ar_test_node:mine(peer1), BI3 = assert_wait_until_height(peer1, ar_block:get_max_tx_anchor_depth() + 3), BI3 = wait_until_height(main, ar_block:get_max_tx_anchor_depth() + 3), ?assertEqual([TX4#tx.id], (read_block_when_stored(hd(BI3)))#block.txs), ?assertEqual([TX3#tx.id], (read_block_when_stored(hd(BI2)))#block.txs). recovers_from_forks(ForkHeight) -> %% Mine a number of blocks with transactions on peer1 and main in sync, %% then mine another bunch independently. %% %% Mine an extra block on peer1 to make main fork recover to it. %% Expect the fork recovery to be successful. %% %% Try to replay all the past transactions on main. Expect the transactions to be rejected. %% %% Resubmit all the transactions from the orphaned fork. Expect them to be accepted %% and successfully mined into a block. Key = {_, Pub} = ar_wallet:new(), [B0] = ar_weave:init([ {ar_wallet:to_address(Pub), ?AR(20), <<>>} ]), _ = ar_test_node:start(B0), _ = ar_test_node:start_peer(peer1, B0), ar_test_node:connect_to_peer(peer1), {ok, Config} = arweave_config:get_env(), MainPort = Config#config.port, PreForkTXs = lists:foldl( fun(Height, TXs) -> TX = ar_test_node:sign_v1_tx(Key, #{ last_tx => ar_test_node:get_tx_anchor(peer1), tags => [{<<"nonce">>, random_nonce()}] }), ar_test_node:assert_post_tx_to_peer(peer1, TX), ar_test_node:assert_wait_until_receives_txs([TX]), ar_test_node:mine(peer1), BI = assert_wait_until_height(peer1, Height), BI = wait_until_height(main, Height), assert_block_txs(peer1, [TX], BI), assert_block_txs(main, [TX], BI), TXs ++ [TX] end, [], lists:seq(1, ForkHeight) ), PostTXToMain = fun() -> UnsignedTX = #{ last_tx => ar_test_node:get_tx_anchor(main), tags => [{<<"nonce">>, random_nonce()}], reward => ?AR(1) }, TX = case rand:uniform(2) of 1 -> ar_test_node:sign_tx(main, Key, UnsignedTX); 2 -> ar_test_node:sign_v1_tx(main, Key, UnsignedTX) end, ar_test_node:assert_post_tx_to_peer(main, TX), [TX] end, PostTXToPeer = fun() -> UnsignedTX = #{ last_tx => ar_test_node:get_tx_anchor(peer1), tags => [{<<"nonce">>, random_nonce()}] }, TX = case rand:uniform(2) of 1 -> ar_test_node:sign_tx(Key, UnsignedTX); 2 -> ar_test_node:sign_v1_tx(Key, UnsignedTX) end, ar_test_node:assert_post_tx_to_peer(peer1, TX), [TX] end, ar_test_node:disconnect_from(peer1), {MainPostForkTXs, PeerPostForkTXs} = lists:foldl( fun(Height, {MainTXs, PeerTXs}) -> UpdatedMainTXs = MainTXs ++ ([NewMainTX] = PostTXToMain()), ar_test_node:mine(), BI = wait_until_height(main, Height), assert_block_txs(main, [NewMainTX], BI), UpdatedPeerTXs = PeerTXs ++ ([NewPeerTX] = PostTXToPeer()), ar_test_node:mine(peer1), PeerBI = assert_wait_until_height(peer1, Height), assert_block_txs(peer1, [NewPeerTX], PeerBI), {UpdatedMainTXs, UpdatedPeerTXs} end, {[], []}, lists:seq(ForkHeight + 1, 9) ), ar_test_node:connect_to_peer(peer1), TX2 = ar_test_node:sign_tx(Key, #{ last_tx => ar_test_node:get_tx_anchor(peer1), tags => [{<<"nonce">>, random_nonce()}] }), ar_test_node:assert_post_tx_to_peer(peer1, TX2), ar_test_node:assert_wait_until_receives_txs([TX2]), ar_test_node:mine(peer1), assert_wait_until_height(peer1, 10), wait_until_height(main, 10), forget_txs( PreForkTXs ++ MainPostForkTXs ++ PeerPostForkTXs ++ [TX2] ), %% Assert pre-fork transactions, the transactions which came during %% fork recovery, and the freshly created transaction are in the %% weave. lists:foreach( fun(TX) -> ?assert( ar_util:do_until( fun() -> ar_test_node:get_tx_confirmations(main, TX#tx.id) > 0 end, 100, 1000 ) ), {ok, {{<<"400">>, _}, _, _, _, _}} = ar_test_node:post_tx_to_peer(main, TX) end, PreForkTXs ++ PeerPostForkTXs ++ [TX2] ), %% Assert the block anchored transactions from the abandoned fork are %% back in the memory pool. lists:foreach( fun(TX) -> {ok, {{<<"208">>, _}, _, <<"Transaction already processed.">>, _, _}} = ar_http:req(#{ method => post, peer => {127, 0, 0, 1, MainPort}, path => "/tx", headers => [{<<"x-p2p-port">>, integer_to_binary(MainPort, 10)}], body => ar_serialize:jsonify(ar_serialize:tx_to_json_struct(TX)) }) end, MainPostForkTXs ). one_wallet_list_one_block_anchored_txs(Key, B0) -> %% Sign only after the node has started to get the correct price %% estimation from it. {_, {KeyType, _}} = Key, TX1Fun = fun() -> case KeyType of ?RSA_KEY_TYPE -> ar_test_node:sign_v1_tx(Key, #{ reward => ?AR(1) }); ?ECDSA_KEY_TYPE -> ar_test_node:sign_tx(Key, #{ reward => ?AR(1), last_tx => <<>> }) end end, TX2Fun = fun() -> case KeyType of ?RSA_KEY_TYPE -> ar_test_node:sign_v1_tx(Key, #{ reward => ?AR(1), last_tx => B0#block.indep_hash }); ?ECDSA_KEY_TYPE -> ar_test_node:sign_tx(Key, #{ reward => ?AR(1), last_tx => B0#block.indep_hash }) end end, [TX1Fun, TX2Fun]. two_block_anchored_txs(Key, B0) -> %% Sign only after the node has started to get the correct price %% estimation from it. {_, {KeyType, _}} = Key, TX1Fun = fun() -> case KeyType of ?RSA_KEY_TYPE -> ar_test_node:sign_v1_tx(Key, #{ reward => ?AR(1), last_tx => B0#block.indep_hash }); ?ECDSA_KEY_TYPE -> ar_test_node:sign_tx(Key, #{ reward => ?AR(1), last_tx => B0#block.indep_hash }) end end, TX2Fun = fun() -> case KeyType of ?RSA_KEY_TYPE -> ar_test_node:sign_v1_tx(Key, #{ reward => ?AR(1), last_tx => B0#block.indep_hash }); ?ECDSA_KEY_TYPE -> ar_test_node:sign_tx(Key, #{ reward => ?AR(1), last_tx => B0#block.indep_hash, %% A tag to distinguish deterministic ECDSA transactions. tags => [{<<"id">>, <<>>}] }) end end, [TX1Fun, TX2Fun]. empty_tx_set(_Key, _B0) -> []. block_anchor_txs_spending_balance_plus_one_more(Key, B0) -> TX1 = ar_test_node:sign_v1_tx(Key, #{ denomination => 1, reward => ?AR(10), last_tx => B0#block.indep_hash }), TX2 = ar_test_node:sign_v1_tx(Key, #{ denomination => 1, reward => ?AR(10), last_tx => B0#block.indep_hash }), TX3 = ar_test_node:sign_v1_tx(Key, #{ denomination => 1, reward => ?AR(1), last_tx => B0#block.indep_hash }), [TX1, TX2, TX3]. mixed_anchor_txs_spending_balance_plus_one_more(Key, B0) -> TX1 = ar_test_node:sign_v1_tx(Key, #{ denomination => 1, reward => ?AR(10), last_tx => <<>> }), TX2 = ar_test_node:sign_v1_tx(Key, #{ denomination => 1, reward => ?AR(5), last_tx => B0#block.indep_hash }), TX3 = ar_test_node:sign_v1_tx(Key, #{ denomination => 1, reward => ?AR(2), last_tx => B0#block.indep_hash }), TX4 = ar_test_node:sign_v1_tx(Key, #{ denomination => 1, reward => ?AR(3), last_tx => B0#block.indep_hash }), TX5 = ar_test_node:sign_v1_tx(Key, #{ denomination => 1, reward => ?AR(1), last_tx => B0#block.indep_hash }), [TX1, TX2, TX3, TX4, TX5]. grouped_txs() -> Key1 = {_, Pub1} = ar_wallet:new(), Key2 = {_, Pub2} = ar_wallet:new(), Wallets = [ {ar_wallet:to_address(Pub1), ?AR(100), <<>>}, {ar_wallet:to_address(Pub2), ?AR(100), <<>>} ], [B0] = ar_weave:init(Wallets), Chunk1 = random_v1_data(?TX_DATA_SIZE_LIMIT), Chunk2 = <<"a">>, TX1 = ar_test_node:sign_v1_tx(Key1, #{ reward => ?AR(1), data => Chunk1, last_tx => <<>> }), TX2 = ar_test_node:sign_v1_tx(Key2, #{ reward => ?AR(1), data => Chunk2, last_tx => B0#block.indep_hash }), %% TX1 is expected to be mined first because wallet list anchors are mined first while %% the price per byte should be the same since we assigned the minimum required fees. {B0, [[TX1], [TX2]]}. mine_blocks(Node, TargetHeight) -> mine_blocks(Node, 1, TargetHeight). mine_blocks(_Node, Height, TargetHeight) when Height == TargetHeight + 1 -> ok; mine_blocks(Node, Height, TargetHeight) -> ar_test_node:mine(Node), assert_wait_until_height(Node, Height), mine_blocks(Node, Height + 1, TargetHeight). forget_txs(TXs) -> lists:foreach( fun(TX) -> ets:delete(ignored_ids, TX#tx.id) end, TXs ). assert_block_txs(Node, TXs, BI) -> TXIDs = lists:map(fun(TX) -> TX#tx.id end, TXs), B = ar_test_node:remote_call(Node, ar_test_node, read_block_when_stored, [hd(BI)]), ?assertEqual(lists:sort(TXIDs), lists:sort(B#block.txs)). random_nonce() -> integer_to_binary(rand:uniform(1000000)). ================================================ FILE: apps/arweave/test/ar_vdf_block_validation_tests.erl ================================================ -module(ar_vdf_block_validation_tests). -include_lib("eunit/include/eunit.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -define(TEST_RESET_FREQUENCY, 400). -define(BLOCK_DELIVERY_TIMEOUT, 120000). fork_at_entropy_reset_point_test_() -> [ {timeout, ?TEST_NODE_TIMEOUT, fun test_fork_checkpoints_not_found/0}, {timeout, ?TEST_NODE_TIMEOUT, fun test_fork_refuse_validation/0} ]. %% Scenario: %% 1. VDF server applies a block that opens a new VDF session %% 2. VDF client mines a solution at that same height %% (i.e. it mines a fork before receiving the other block) %% 3. That solution fails because it is mined off VDF steps from the %% server which are in the new session, but the block being mined %% is an entropy reset block. %% %% The failure in this case (`step_checkpoints_not_found' error) is %% unavoidable in this specific scenario. So this test will just assert %% that the block is rejected and that the VDF client can later get on %% the correct chain and then mine a solution there. test_fork_checkpoints_not_found() -> mock_reset_frequency_and_block_propagation_parallelization(), try [B0] = ar_weave:init(), %% Start nodes in such way that they will not gossip blocks to %% each other. This lets us control when blocks are shared. %% Note: also relies on `mock_block_propagation_parallelization()`. {ok, Config} = arweave_config:get_env(), ar_test_node:start(#{ b0 => B0, config => Config#config{ nonce_limiter_client_peers = [ ar_util:format_peer(ar_test_node:peer_ip(peer1)) ], block_pollers = 0 } }), mock_reset_frequency_and_block_propagation_parallelization(main), {ok, PeerConfig} = ar_test_node:get_config(peer1), ar_test_node:start_peer(peer1, #{ b0 => B0, config => PeerConfig#config{ nonce_limiter_server_trusted_peers = [ ar_util:format_peer(ar_test_node:peer_ip(main)) ], block_pollers = 0 } }), mock_reset_frequency_and_block_propagation_parallelization(peer1), H2 = ar_test_node:with_gossip_paused(main, fun() -> %% Still need to connect to make sure VDF is shared ar_test_node:connect_to_peer(peer1), ar_test_node:mine(main), [H1 | _] = ar_test_node:wait_until_height(main, 1), send_block(H1, main, peer1), ar_test_node:wait_until_height(peer1, 1), ar_test_node:disconnect_from(peer1), %% Make sure that we are deep into the new session before we try to mine. %% Suspend peer1's nonce limiter so it cannot advance to the new session while isolated. [H2Local | _] = with_nonce_limiter_paused(peer1, fun() -> wait_until_step_number(main, ?TEST_RESET_FREQUENCY + 101), ar_test_node:mine(main), ar_test_node:wait_until_height(main, 2) end), ar_test_node:connect_to_peer(peer1), %% Wait until peer1 has transitioned to the new VDF session. wait_until_step_number(peer1, ?TEST_RESET_FREQUENCY + 1), with_vdf_pull_and_push_disabled(peer1, fun() -> ar_test_node:mine(peer1), %% Assert that peer1 is unable to mine a block. timer:sleep(10000), BI = ar_test_node:remote_call(peer1, ar_node, get_blocks, []), ?assertEqual(2, length(BI)) end), H2Local end), %% Get peer1 on the main chain send_block(H2, main, peer1), ar_test_node:wait_until_height(peer1, 2), %% Now that we're on the main chain and still mining, we should eventually mine a block. ar_test_node:mine(peer1), ar_test_node:wait_until_height(peer1, 3) after disable_mocks(main), disable_mocks(peer1) end. %% Scenario: %% 1. There's a chain fork on a block that opens a new VDF session. %% The "winning" block has a higher VDF step than the "losing" block. %% Both blocks need to be validated using the current VDF session %% (not the new one) %% 2. VDF server applies the "winning" block, validates with the current %% VDF session, and opens a new VDF session. %% 3. VDF client applies the "losing" block, is able to get the VDF steps %% it needs to validate because the steps are before the new session that %% was opened on the VDF server so they still belong to the "old" session %% (or perhaps it just validates the "losing" block before the VDF server %% opens a new session) %% 4. Later the VDF client tries to apply the winning block. However when it %% queries the steps it needs to validate the block, the VDF server which is %% now on the new session returns the steps for that new session - which won't %% validate. %% 5. VDF client is stuck trying to validate the winning block and can't proceed. %% %% We built in a fix for this scenario before 2.9.5-alpha1, but it relied on %% VDF Pull being enabled (in which case the VDF client would explicitly ask %% the server for the full current and previous sessions). In 2.9.5-alpha1 we %% broke this fix for nodes using `disable vdf_server_pull`. We've now %% re-applied the fix and added this test. test_fork_refuse_validation() -> mock_reset_frequency_and_block_propagation_parallelization(), try [B0] = ar_weave:init(), %% Start nodes in such way that they will not gossip blocks to %% each other. This lets us control when blocks are shared. %% Note: also relies on `mock_block_propagation_parallelization()`. {ok, Config} = arweave_config:get_env(), ar_test_node:start(#{ b0 => B0, config => Config#config{ nonce_limiter_client_peers = [ ar_util:format_peer(ar_test_node:peer_ip(peer1)) ], block_pollers = 0 } }), mock_reset_frequency_and_block_propagation_parallelization(main), {ok, PeerConfig} = ar_test_node:get_config(peer1), ar_test_node:start_peer(peer1, #{ b0 => B0, config => PeerConfig#config{ nonce_limiter_server_trusted_peers = [ ar_util:format_peer(ar_test_node:peer_ip(main)) ], block_pollers = 0, disable = [vdf_server_pull | PeerConfig#config.disable] } }), mock_reset_frequency_and_block_propagation_parallelization(peer1), ar_test_node:with_gossip_paused(main, fun() -> %% Still need to connect to make sure VDF is shared ar_test_node:connect_to_peer(peer1), ar_test_node:mine(main), [H1 | _] = ar_test_node:wait_until_height(main, 1), send_block(H1, main, peer1), ar_test_node:assert_wait_until_height(peer1, 1), wait_until_step_number(peer1, ?TEST_RESET_FREQUENCY + 1), ar_test_node:mine(peer1), ar_test_node:wait_until_height(peer1, 2), ar_test_node:disconnect_from(peer1), wait_until_step_number(main, ?TEST_RESET_FREQUENCY + 100), ar_test_node:mine(main), [H2 | _] = ar_test_node:wait_until_height(main, 2), ar_test_node:mine(main), [H3 | _] = ar_test_node:wait_until_height(main, 3), %% Just avoids some errors if the test finishes before the mining server is paused. ar_test_node:wait_until_mining_paused(main), ar_test_node:connect_to_peer(peer1), ensure_block_applied(H2, main, peer1, 2), ensure_block_applied(H3, main, peer1, 3) end), ar_test_node:wait_until_height(peer1, 3) after disable_mocks(main), disable_mocks(peer1) end. mock_reset_frequency_and_block_propagation_parallelization() -> ar_test_node:new_mock(ar_nonce_limiter, [passthrough]), ar_test_node:new_mock(ar_bridge, [passthrough]), ar_test_node:mock_function(ar_nonce_limiter, get_reset_frequency, fun() -> ?TEST_RESET_FREQUENCY end), ar_test_node:mock_function(ar_bridge, block_propagation_parallelization, fun() -> 0 end). mock_reset_frequency_and_block_propagation_parallelization(Node) -> ar_test_node:remote_call(Node, ar_test_node, new_mock, [ar_nonce_limiter, [passthrough]]), ar_test_node:remote_call(Node, ar_test_node, new_mock, [ar_bridge, [passthrough]]), ar_test_node:remote_call(Node, ar_test_node, mock_function, [ar_nonce_limiter, get_reset_frequency, fun() -> ?TEST_RESET_FREQUENCY end]), ar_test_node:remote_call(Node, ar_test_node, mock_function, [ar_bridge, block_propagation_parallelization, fun() -> 0 end]). disable_mocks(Node) -> ok = ar_test_node:remote_call(Node, ar_test_node, unmock_module, [ar_bridge]), ok = ar_test_node:remote_call(Node, ar_test_node, unmock_module, [ar_nonce_limiter]). send_block(H, FromNode, ToNode) -> Block = ar_test_node:remote_call(FromNode, ar_storage, read_block, [H]), case ar_test_node:send_new_block(ar_test_node:peer_ip(ToNode), Block) of {ok, {{<<"200">>, _}, _, _, _, _}} -> ok; {ok, {{<<"208">>, _}, _, _, _, _}} -> ok; Error -> ?assert(false, io_lib:format("Got unexpected error: ~p", [Error])) end. ensure_block_applied(H, FromNode, ToNode, TargetHeight) -> ar_util:do_until( fun() -> send_block(H, FromNode, ToNode), Height = ar_test_node:remote_call(ToNode, ar_node, get_height, []), Height >= TargetHeight end, 1000, ?BLOCK_DELIVERY_TIMEOUT). wait_until_step_number(Node, StepNumber) -> true = ar_util:do_until( fun() -> try CurrentStepNumber = ar_test_node:remote_call( Node, ar_nonce_limiter, get_current_step_number, []), CurrentStepNumber >= StepNumber catch %% meck's internal gen_server proxy uses gen_server:call/2 %% with the default 5s timeout, which can fire under load. exit:{timeout, _} -> false end end, 500, 120000). with_nonce_limiter_paused(Node, Fun) when is_function(Fun, 0) -> Pid = suspend_nonce_limiter(Node), try Fun() after resume_nonce_limiter(Node, Pid) end. with_vdf_pull_and_push_disabled(Node, Fun) when is_function(Fun, 0) -> {ok, Config} = ar_test_node:remote_call(Node, arweave_config, get_env, []), DisableFlags = Config#config.disable, %% Update config so that ar_http_iface_middleware %% responds to POST /vdf with #nonce_limiter_update_response {postpone = 120 }. ok = ar_test_node:remote_call( Node, arweave_config, set_env, [Config#config{ disable = lists:delete(vdf_server_pull, DisableFlags) }] ), %% Also suspend the pull loop so peer1 cannot fetch full sessions. Pid = suspend_nonce_limiter_client(Node), try Fun() after ok = ar_test_node:remote_call(Node, arweave_config, set_env, [Config]), resume_nonce_limiter_client(Node, Pid) end. suspend_nonce_limiter(Node) -> Pid = ar_test_node:remote_call(Node, erlang, whereis, [ar_nonce_limiter]), ?assert(is_pid(Pid)), ok = ar_test_node:remote_call(Node, sys, suspend, [Pid]), Pid. suspend_nonce_limiter_client(Node) -> Pid = ar_test_node:remote_call(Node, erlang, whereis, [ar_nonce_limiter_client]), ?assert(is_pid(Pid)), ok = ar_test_node:remote_call(Node, sys, suspend, [Pid]), Pid. resume_nonce_limiter(_Node, undefined) -> ok; resume_nonce_limiter(Node, Pid) -> case ar_test_node:remote_call(Node, erlang, is_process_alive, [Pid]) of true -> ok = ar_test_node:remote_call(Node, sys, resume, [Pid]); false -> ok end. resume_nonce_limiter_client(_Node, undefined) -> ok; resume_nonce_limiter_client(Node, Pid) -> case ar_test_node:remote_call(Node, erlang, is_process_alive, [Pid]) of true -> ok = ar_test_node:remote_call(Node, sys, resume, [Pid]); false -> ok end. ================================================ FILE: apps/arweave/test/ar_vdf_external_update_tests.erl ================================================ -module(ar_vdf_external_update_tests). -export([init/2]). -include_lib("eunit/include/eunit.hrl"). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("arweave/include/ar_mining.hrl"). -import(ar_test_node, [assert_wait_until_height/2, post_block/2, send_new_block/2]). %% we have to wait to let the ar_events get processed whenever we apply a VDF step -define(WAIT_TIME, 1000). %% ------------------------------------------------------------------------------------------------- %% Test Fixtures %% ------------------------------------------------------------------------------------------------- setup_external_update() -> {ok, Config} = arweave_config:get_env(), [B0] = ar_weave:init(), %% Start the testnode with a configured VDF server so that it doesn't compute its own VDF - %% this is necessary so that we can test the behavior of apply_external_update without any %% auto-computed VDF steps getting in the way. _ = ar_test_node:start( B0, ar_wallet:to_address(ar_wallet:new_keyfile()), Config#config{ nonce_limiter_server_trusted_peers = [ ar_util:format_peer(vdf_server_1()), ar_util:format_peer(vdf_server_2()) ], mine = true } ), ets:new(computed_output, [named_table, ordered_set, public]), ets:new(add_task, [named_table, bag, public]), Pid = spawn( fun() -> ok = ar_events:subscribe(nonce_limiter), computed_output() end ), {Pid, Config}. cleanup_external_update({Pid, Config}) -> exit(Pid, kill), ok = arweave_config:set_env(Config), ets:delete(add_task), ets:delete(computed_output). %% ------------------------------------------------------------------------------------------------- %% Test Registration %% ------------------------------------------------------------------------------------------------- external_update_test_() -> {foreach, fun setup_external_update/0, fun cleanup_external_update/1, [ ar_test_node:test_with_mocked_functions([mock_add_task(), mock_reset_frequency()], fun test_session_overlap/0, 120), ar_test_node:test_with_mocked_functions([mock_add_task(), mock_reset_frequency()], fun test_client_ahead/0, 120), ar_test_node:test_with_mocked_functions([mock_add_task(), mock_reset_frequency()], fun test_skip_ahead/0, 120), ar_test_node:test_with_mocked_functions([mock_add_task(), mock_reset_frequency()], fun test_2_servers_switching/0, 120), ar_test_node:test_with_mocked_functions([mock_add_task(), mock_reset_frequency()], fun test_backtrack/0, 120), ar_test_node:test_with_mocked_functions([mock_add_task(), mock_reset_frequency()], fun test_2_servers_backtrack/0, 120) ] }. mining_session_test_() -> {foreach, fun setup_external_update/0, fun cleanup_external_update/1, [ ar_test_node:test_with_mocked_functions([mock_add_task(), mock_reset_frequency()], fun test_mining_session/0, 120) ] }. %% ------------------------------------------------------------------------------------------------- %% Tests %% ------------------------------------------------------------------------------------------------- %% %% external_update_test_ %% %% @doc The VDF session key is only updated when a block is procesed by the VDF server. Until that %% happens the serve will push all VDF steps under the same session key - even if those steps %% cross an entropy reset line. When a block comes in the server will update the session key %% *and* move all appropriate steps to that session. Prior to 2.7 this caused VDF clients to %% process some steps twice - once under the old session key, and once under the new session key. %% This test asserts that this behavior has been fixed and that VDF clients only process each %% step once. test_session_overlap() -> SessionKey0 = get_current_session_key(), SessionKey1 = {<<"session1">>, 1, 1}, SessionKey2 = {<<"session2">>, 2, 1}, ?assertEqual( #nonce_limiter_update_response{ session_found = false }, apply_external_update(SessionKey1, [], 8, true, SessionKey0), "Partial session1, session not found"), ?assertEqual( ok, apply_external_update(SessionKey1, [7, 6, 5], 8, false, SessionKey0), "Full session1"), ?assertEqual( ok, apply_external_update(SessionKey1, [], 9, true, SessionKey0), "Partial session1"), ?assertEqual( ok, apply_external_update(SessionKey1, [], 10, true, SessionKey0), "Partial session1, interval2"), ?assertEqual( ok, apply_external_update(SessionKey1, [], 11, true, SessionKey0), "Partial session1, interval2"), ?assertEqual( #nonce_limiter_update_response{ session_found = false }, apply_external_update(SessionKey2, [], 12, true, SessionKey1), "Partial session2, interval2"), ?assertEqual( #nonce_limiter_update_response{ session_found = true, step_number = 11 }, apply_external_update(SessionKey1, [8, 7, 6, 5], 9, false, SessionKey1), "Full session1, all steps already seen"), ?assertEqual( ok, apply_external_update(SessionKey2, [11, 10], 12, false, SessionKey1), "Full session2, some steps already seen"), timer:sleep(?WAIT_TIME), ?assertEqual( [<<"8">>, <<"7">>, <<"6">>, <<"5">>, <<"9">>, <<"10">>, <<"11">>, <<"12">>], computed_steps()), ?assertEqual(SessionKey0, get_current_session_key()), ?assertEqual( [10, 10, 10, 10, 10, 20, 20, 20], computed_upper_bounds()). %% @doc This test asserts that the client responds correctly when it is ahead of the VDF server. test_client_ahead() -> SessionKey0 = get_current_session_key(), SessionKey1 = {<<"session1">>, 1, 1}, ?assertEqual( ok, apply_external_update(SessionKey1, [7, 6, 5], 8, false, SessionKey0), "Full session"), ?assertEqual( #nonce_limiter_update_response{ step_number = 8 }, apply_external_update(SessionKey1, [], 7, true, SessionKey0), "Partial session, client ahead"), ?assertEqual( #nonce_limiter_update_response{ step_number = 8 }, apply_external_update(SessionKey1, [6, 5], 7, false, SessionKey0), "Full session, client ahead"), timer:sleep(?WAIT_TIME), ?assertEqual(SessionKey0, get_current_session_key()), ?assertEqual( [<<"8">>, <<"7">>, <<"6">>, <<"5">>], computed_steps()), ?assertEqual( [10, 10, 10, 10], computed_upper_bounds()). %% @doc %% Test case: %% 1. VDF server pushes a partial update that skips too far ahead of the client %% 2. Simulate the updates that the server would then push (i.e. full session updates of the current %% session and maybe previous session) %% %% Assert that the client responds correctly and only processes each step once (even though it may %% see the same step several times as part of the full session updates). test_skip_ahead() -> SessionKey0 = get_current_session_key(), SessionKey1 = {<<"session1">>, 1, 1}, SessionKey2 = {<<"session2">>, 2, 1}, ?assertEqual( ok, apply_external_update(SessionKey1, [5], 6, false, SessionKey0), "Full session1"), ?assertEqual( #nonce_limiter_update_response{ session_found = true, step_number = 6 }, apply_external_update(SessionKey1, [], 8, true, SessionKey0), "Partial session1, server ahead"), ?assertEqual( ok, apply_external_update(SessionKey1, [7, 6, 5], 8, false, SessionKey0), "Full session1"), ?assertEqual( #nonce_limiter_update_response{ session_found = false }, apply_external_update(SessionKey2, [], 12, true, SessionKey1), "Partial session2, server ahead"), ?assertEqual( ok, apply_external_update(SessionKey1, [8, 7, 6, 5], 9, false, SessionKey0), "Full session1, all steps already seen"), ?assertEqual( ok, apply_external_update(SessionKey2, [11, 10], 12, false, SessionKey1), "Full session2, some steps already seen"), timer:sleep(?WAIT_TIME), ?assertEqual(SessionKey0, get_current_session_key()), ?assertEqual( [<<"6">>, <<"5">>, <<"8">>, <<"7">>, <<"9">>, <<"12">>, <<"11">>, <<"10">>], computed_steps()), ?assertEqual( [10, 10, 10, 10, 10, 20, 20, 20], computed_upper_bounds()). test_2_servers_switching() -> SessionKey0 = get_current_session_key(), SessionKey1 = {<<"session1">>, 1, 1}, SessionKey2 = {<<"session2">>, 2, 1}, ?assertEqual( ok, apply_external_update(SessionKey1, [6, 5], 7, false, SessionKey0, vdf_server_1()), "Full session1 from vdf_server_1"), ?assertEqual( ok, apply_external_update(SessionKey1, [], 8, true, SessionKey0, vdf_server_2()), "Partial session1 from vdf_server_2"), ?assertEqual( ok, apply_external_update(SessionKey1, [], 9, true, SessionKey0, vdf_server_2()), "Partial session1 from vdf_server_2"), ?assertEqual( #nonce_limiter_update_response{ session_found = false }, apply_external_update(SessionKey2, [], 11, true, SessionKey1, vdf_server_1()), "Partial session2 from vdf_server_1"), ?assertEqual( ok, apply_external_update(SessionKey2, [10], 11, false, SessionKey1, vdf_server_1()), "Full session2 from vdf_server_1"), ?assertEqual( ok, apply_external_update(SessionKey1, [], 10, true, SessionKey0, vdf_server_2()), "Partial session1 from vdf_server_2 (should not change current session)"), ?assertEqual( ok, apply_external_update(SessionKey1, [], 11, true, SessionKey0, vdf_server_2()), "Partial session1 from vdf_server_2 (should not change current session)"), ?assertEqual( ok, apply_external_update(SessionKey1, [], 12, true, SessionKey0, vdf_server_2()), "Partial session1 from vdf_server_2 (should not change current session)"), ?assertEqual( ok, apply_external_update( SessionKey2, [11, 10], 12, false, SessionKey1, vdf_server_2()), "Full session2 from vdf_server_2"), ?assertEqual( #nonce_limiter_update_response{ step_number = 12 }, apply_external_update(SessionKey2, [], 12, true, SessionKey1, vdf_server_1()), "Partial (repeat) session2 from vdf_server_1"), ?assertEqual( ok, apply_external_update(SessionKey2, [], 13, true, SessionKey1, vdf_server_1()), "Partial (new) session2 from vdf_server_1"), ?assertEqual( ok, apply_external_update(SessionKey2, [], 14, true, SessionKey1, vdf_server_2()), "Partial (new) session2 from vdf_server_2"), timer:sleep(?WAIT_TIME), ?assertEqual(SessionKey0, get_current_session_key()), ?assertEqual([ <<"7">>, <<"6">>, <<"5">>, <<"8">>, <<"9">>, <<"11">>, <<"10">>, <<"10">>, <<"11">>, <<"12">>, <<"12">>, <<"13">>, <<"14">> ], computed_steps()), ?assertEqual( [10, 10, 10, 10, 10, 20, 20, 20, 20, 20, 20, 20, 20], computed_upper_bounds()). test_backtrack() -> SessionKey0 = get_current_session_key(), SessionKey1 = {<<"session1">>, 1, 1}, SessionKey2 = {<<"session2">>, 2, 1}, ?assertEqual( ok, apply_external_update(SessionKey1, [ 16, 15, %% interval 3 14, 13, 12, 11, 10, %% interval 2 9, 8, 7, 6, 5 %% interval 1 ], 17, false, SessionKey0), "Full session1"), ?assertEqual( ok, apply_external_update(SessionKey1, [], 18, true, SessionKey0), "Partial session1"), ?assertEqual( #nonce_limiter_update_response{ session_found = false }, apply_external_update(SessionKey2, [], 15, true, SessionKey1), "Partial session2"), ?assertEqual( #nonce_limiter_update_response{ step_number = 18 }, apply_external_update( SessionKey1, [8, 7, 6, 5], 9, false, SessionKey0), "Backtrack. Send full session1."), ?assertEqual( ok, apply_external_update( SessionKey2, [14, 13, 12, 11, 10], 15, false, SessionKey1), "Backtrack. Send full session2"), timer:sleep(?WAIT_TIME), ?assertEqual(SessionKey0, get_current_session_key()), ?assertEqual([ <<"17">>,<<"16">>,<<"15">>,<<"14">>,<<"13">>,<<"12">>, <<"11">>,<<"10">>,<<"9">>,<<"8">>,<<"7">>,<<"6">>, <<"5">>,<<"18">>,<<"15">> ], computed_steps()), ?assertEqual( [20, 20, 20, 20, 20, 20, 20, 20, 10, 10, 10, 10, 10, 20, 30], computed_upper_bounds()). test_2_servers_backtrack() -> SessionKey0 = get_current_session_key(), SessionKey1 = {<<"session1">>, 1, 1}, SessionKey2 = {<<"session2">>, 2, 1}, ?assertEqual( ok, apply_external_update(SessionKey1, [ 16, 15, %% interval 3 14, 13, 12, 11, 10, %% interval 2 9, 8, 7, 6, 5 %% interval 1 ], 17, false, SessionKey0, vdf_server_1()), "Full session1 from vdf_server_1"), ?assertEqual( ok, apply_external_update(SessionKey1, [], 18, true, SessionKey0, vdf_server_1()), "Partial session1 from vdf_server_1"), ?assertEqual( #nonce_limiter_update_response{ session_found = false }, apply_external_update(SessionKey2, [], 15, true, SessionKey1, vdf_server_2()), "Partial session2 from vdf_server_2"), ?assertEqual( ok, apply_external_update( SessionKey2, [14, 13, 12, 11, 10], 15, false, SessionKey1, vdf_server_2()), "Backtrack in session2 from vdf_server_2"), timer:sleep(?WAIT_TIME), ?assertEqual([ <<"17">>,<<"16">>,<<"15">>,<<"14">>,<<"13">>,<<"12">>, <<"11">>,<<"10">>,<<"9">>,<<"8">>,<<"7">>,<<"6">>, <<"5">>,<<"18">>,<<"15">> ], computed_steps()), ?assertEqual(SessionKey0, get_current_session_key()), ?assertEqual( [20, 20, 20, 20, 20, 20, 20, 20, 10, 10, 10, 10, 10, 20, 30], computed_upper_bounds()). test_mining_session() -> SessionKey0 = get_current_session_key(), SessionKey1 = {<<"session1">>, 1, 1}, SessionKey2 = {<<"session2">>, 2, 1}, SessionKey3 = {<<"session3">>, 3, 1}, ar_test_node:mine(), ?assertEqual( ok, apply_external_update(SessionKey0, [], 2, true, undefined), "Partial session0, should mine"), timer:sleep(?WAIT_TIME), ?assertEqual([SessionKey0], sets:to_list(ar_mining_server:active_sessions())), ?assertEqual([2], mined_steps()), ?assertEqual( ok, apply_external_update(SessionKey0, [3], 4, false, undefined), "Full session0, should mine"), timer:sleep(?WAIT_TIME), ?assertEqual([SessionKey0], sets:to_list(ar_mining_server:active_sessions())), ?assertEqual([4, 3], mined_steps()), ?assertEqual( #nonce_limiter_update_response{ step_number = 4 }, apply_external_update(SessionKey0, [], 4, true, undefined), "Repeat step, should not mine"), timer:sleep(?WAIT_TIME), ?assertEqual([SessionKey0], sets:to_list(ar_mining_server:active_sessions())), ?assertEqual([], mined_steps()), ?assertEqual( #nonce_limiter_update_response{ session_found = false }, apply_external_update(SessionKey1, [], 6, true, SessionKey0), "Partial session1, should not mine"), timer:sleep(?WAIT_TIME), ?assertEqual([SessionKey0], sets:to_list(ar_mining_server:active_sessions())), ?assertEqual([], mined_steps()), ?assertEqual( ok, apply_external_update(SessionKey1, [5], 6, false, SessionKey0), "Full session1, should mine"), timer:sleep(?WAIT_TIME), assert_sessions_equal([SessionKey0, SessionKey1], ar_mining_server:active_sessions()), ?assertEqual([6, 5], mined_steps()), ?assertEqual( #nonce_limiter_update_response{ session_found = false }, apply_external_update(SessionKey3, [], 16, true, SessionKey2), "Partial session3, should not mine"), timer:sleep(?WAIT_TIME), assert_sessions_equal([SessionKey0, SessionKey1], ar_mining_server:active_sessions()), ?assertEqual([], mined_steps()), ?assertEqual( #nonce_limiter_update_response{ session_found = false }, apply_external_update(SessionKey3, [15], 16, true, SessionKey2), "Full session3, should not mine"), timer:sleep(?WAIT_TIME), assert_sessions_equal([SessionKey0, SessionKey1], ar_mining_server:active_sessions()), ?assertEqual([], mined_steps()), %% Current session is only updated when applying a new tip block, not when applying a VDF step %% from a VDF server. ?assertEqual(SessionKey0, get_current_session_key()). %% ------------------------------------------------------------------------------------------------- %% Helper Functions %% ------------------------------------------------------------------------------------------------- init(Req, State) -> SplitPath = ar_http_iface_server:split_path(cowboy_req:path(Req)), handle(SplitPath, Req, State). handle([<<"vdf">>], Req, State) -> {ok, Body, _} = ar_http_req:body(Req, ?MAX_BODY_SIZE), case ar_serialize:binary_to_nonce_limiter_update(2, Body) of {ok, Update} -> handle_update(Update, Req, State); {error, _} -> Response = #nonce_limiter_update_response{ format = 2 }, Bin = ar_serialize:nonce_limiter_update_response_to_binary(Response), {ok, cowboy_req:reply(202, #{}, Bin, Req), State} end. handle_update(Update, Req, State) -> {Seed, _, _} = Update#nonce_limiter_update.session_key, IsPartial = Update#nonce_limiter_update.is_partial, Session = Update#nonce_limiter_update.session, StepNumber = Session#vdf_session.step_number, NSteps = length(Session#vdf_session.steps), Checkpoints = maps:get(StepNumber, Session#vdf_session.step_checkpoints_map), UpdateOutput = hd(Checkpoints), SessionOutput = hd(Session#vdf_session.steps), ?assertNotEqual(Checkpoints, Session#vdf_session.steps), %% #nonce_limiter_update.checkpoints should be the checkpoints of the last step so %% the head of checkpoints should match the head of the session's steps ?assertEqual(UpdateOutput, SessionOutput), case ets:lookup(computed_output, Seed) of [{Seed, FirstStepNumber, LatestStepNumber}] -> ?assert(not IsPartial orelse StepNumber == LatestStepNumber + 1, lists:flatten(io_lib:format( "Partial VDF update did not increase by 1, " "StepNumber: ~p, LatestStepNumber: ~p", [StepNumber, LatestStepNumber]))), ets:insert(computed_output, {Seed, FirstStepNumber, StepNumber}), {ok, cowboy_req:reply(200, #{}, <<>>, Req), State}; _ -> case IsPartial of true -> Response = #nonce_limiter_update_response{ session_found = false }, Bin = ar_serialize:nonce_limiter_update_response_to_binary(Response), {ok, cowboy_req:reply(202, #{}, Bin, Req), State}; false -> ets:insert(computed_output, {Seed, StepNumber - NSteps + 1, StepNumber}), {ok, cowboy_req:reply(200, #{}, <<>>, Req), State} end end. vdf_server_1() -> {127,0,0,1,2001}. vdf_server_2() -> {127,0,0,1,2002}. computed_steps() -> lists:reverse(ets:foldl(fun({_, Step, _}, Acc) -> [Step | Acc] end, [], computed_output)). computed_upper_bounds() -> lists:reverse(ets:foldl(fun({_, _, UpperBound}, Acc) -> [UpperBound | Acc] end, [], computed_output)). mined_steps() -> Steps = lists:reverse(ets:foldl( fun({_Worker, _Task, Step}, Acc) -> [Step | Acc] end, [], add_task)), ets:delete_all_objects(add_task), Steps. computed_output() -> receive {event, nonce_limiter, {computed_output, Args}} -> {_SessionKey, _StepNumber, Output, UpperBound} = Args, Key = ets:info(computed_output, size) + 1, % Unique key based on current size, ensures ordering ets:insert(computed_output, {Key, Output, UpperBound}), computed_output() end. apply_external_update(SessionKey, ExistingSteps, StepNumber, IsPartial, PrevSessionKey) -> apply_external_update(SessionKey, ExistingSteps, StepNumber, IsPartial, PrevSessionKey, vdf_server_1()). apply_external_update(SessionKey, ExistingSteps, StepNumber, IsPartial, PrevSessionKey, Peer) -> {Seed, Interval, _Difficulty} = SessionKey, Steps = [list_to_binary(integer_to_list(Step)) || Step <- [StepNumber | ExistingSteps]], Session = #vdf_session{ upper_bound = Interval * 10, next_upper_bound = (Interval+1) * 10, prev_session_key = PrevSessionKey, step_number = StepNumber, seed = Seed, steps = Steps }, Update = #nonce_limiter_update{ session_key = SessionKey, is_partial = IsPartial, session = Session }, ar_nonce_limiter:apply_external_update(Update, Peer). get_current_session_key() -> {CurrentSessionKey, _} = ar_nonce_limiter:get_current_session(), CurrentSessionKey. mock_add_task() -> { ar_mining_worker, add_task, fun(Worker, TaskType, Candidate) -> ets:insert(add_task, {Worker, TaskType, Candidate#mining_candidate.step_number}) end }. mock_reset_frequency() -> { ar_nonce_limiter, get_reset_frequency, fun() -> 5 end }. assert_sessions_equal(List, Set) -> ?assertEqual(lists:sort(List), lists:sort(sets:to_list(Set))). ================================================ FILE: apps/arweave/test/ar_vdf_server_tests.erl ================================================ -module(ar_vdf_server_tests). -export([init/2]). -include_lib("eunit/include/eunit.hrl"). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -import(ar_test_node, [assert_wait_until_height/2, post_block/2, send_new_block/2]). %% ------------------------------------------------------------------------------------------------- %% Test Fixtures %% ------------------------------------------------------------------------------------------------- setup() -> ets:new(computed_output, [named_table, set, public]), {ok, Config} = arweave_config:get_env(), {ok, PeerConfig} = ar_test_node:remote_call(peer1, arweave_config, get_env, []), {Config, PeerConfig}. cleanup({Config, PeerConfig}) -> arweave_config:set_env(Config), ar_test_node:remote_call(peer1, arweave_config, set_env, [PeerConfig]), ets:delete(computed_output). %% ------------------------------------------------------------------------------------------------- %% Test Registration %% ------------------------------------------------------------------------------------------------- %% @doc All vdf_server_push_test_ tests test a few things %% 1. VDF server posts regular VDF updates to the client %% 2. For partial updates (session doesn't change), each step number posted is 1 greater than %% the one before %% 3. When the client responds that it doesn't have the session in a partial update, server %% should post the full session %% %% test_vdf_server_push_fast_block tests that the VDF server can handle receiving %% a block that is ahead in the VDF chain: specifically: %% When a block comes in that starts a new VDF session, the server should first post the %% full previous session which should include all steps up to and including the %% global_step_number of the block (it may also include additional "overflow" steps that %% were computed before the block arrived). The server should not post the new session %% until it has computed a step in that session. %% %% test_vdf_server_push_slow_block tests that the VDF server can handle receiving %% a block that is behind in the VDF chain: specifically: %% vdf_server_push_test_() -> {foreach, fun setup/0, fun cleanup/1, [ ar_test_node:test_with_mocked_functions([mock_reset_frequency()], fun test_vdf_server_push_fast_block/0, ?TEST_NODE_TIMEOUT), ar_test_node:test_with_mocked_functions([mock_reset_frequency()], fun test_vdf_server_push_slow_block/0, ?TEST_NODE_TIMEOUT) ] }. %% @doc Similar to the vdf_server_push_test_ tests except we test the full end-to-end %% flow where a VDF client has to validate a block with VDF information provided by %% the VDF server. vdf_client_test_() -> {foreach, fun setup/0, fun cleanup/1, [ ar_test_node:test_with_mocked_functions([mock_reset_frequency()], fun test_vdf_client_fast_block/0, ?TEST_NODE_TIMEOUT), ar_test_node:test_with_mocked_functions([mock_reset_frequency()], fun test_vdf_client_fast_block_pull_interface/0, ?TEST_NODE_TIMEOUT), ar_test_node:test_with_mocked_functions([mock_reset_frequency()], fun test_vdf_client_slow_block/0, ?TEST_NODE_TIMEOUT), ar_test_node:test_with_mocked_functions([mock_reset_frequency()], fun test_vdf_client_slow_block_pull_interface/0, ?TEST_NODE_TIMEOUT) ] }. serialize_test_() -> [ {timeout, 120, fun test_serialize_update_format_2/0}, {timeout, 120, fun test_serialize_update_format_3/0}, {timeout, 120, fun test_serialize_update_format_4/0}, {timeout, 120, fun test_serialize_response/0}, {timeout, 120, fun test_serialize_response_compatibility/0} ]. %% ------------------------------------------------------------------------------------------------- %% Tests %% ------------------------------------------------------------------------------------------------- %% %% vdf_server_push_test_ %% test_vdf_server_push_fast_block() -> VDFPort = ar_test_node:get_unused_port(), {_, Pub} = ar_wallet:new(), [B0] = ar_weave:init([{ar_wallet:to_address(Pub), ?AR(10000), <<>>}]), %% Let peer1 get ahead of main in the VDF chain _ = ar_test_node:start_peer(peer1, B0), ar_test_node:remote_call(peer1, ar_http, block_peer_connections, []), timer:sleep(3000), {ok, Config} = arweave_config:get_env(), _ = ar_test_node:start( B0, ar_wallet:to_address(ar_wallet:new_keyfile()), Config#config{ nonce_limiter_client_peers = [ "127.0.0.1:" ++ integer_to_list(VDFPort) ] } ), %% Setup a server to listen for VDF pushes Routes = [{"/[...]", ar_vdf_server_tests, []}], {ok, _} = cowboy:start_clear( ar_vdf_server_test_listener, [{port, VDFPort}], #{ env => #{ dispatch => cowboy_router:compile([{'_', Routes}]) } } ), %% Mine a block that will be ahead of main in the VDF chain ar_test_node:mine(peer1), BI = assert_wait_until_height(peer1, 1), B1 = ar_test_node:remote_call(peer1, ar_storage, read_block, [hd(BI)]), %% Post the block to main which will cause it to validate VDF for the block under %% the B0 session and then begin using the (later) B1 VDF session going forward ok = ar_events:subscribe(block), post_block(B1, valid), Seed0 = B0#block.nonce_limiter_info#nonce_limiter_info.next_seed, Seed1 = B1#block.nonce_limiter_info#nonce_limiter_info.next_seed, StepNumber1 = ar_block:vdf_step_number(B1), ar_util:do_until( fun() -> %% Wait until both VDF sessions are present and we apply VDF upt to the block's step number. case {ets:lookup(computed_output, Seed0), ets:lookup(computed_output, Seed1)} of {[{Seed0, _, LatestStepNumber}], [{Seed1, _, _}]} -> LatestStepNumber >= StepNumber1; _ -> false end end, 200, 20_000 ), [{Seed0, _, LatestStepNumber0}] = get_computed_output(Seed0), [{Seed1, _FirstStepNumber1, _}] = get_computed_output(Seed1), ?assertEqual(2, ets:info(computed_output, size), "VDF server did not post 2 sessions"), ?assert(LatestStepNumber0 >= StepNumber1, "VDF server did not post the full Session0 when starting Session1"), cowboy:stop_listener(ar_vdf_server_test_listener). test_vdf_server_push_slow_block() -> VDFPort = ar_test_node:get_unused_port(), {_, Pub} = ar_wallet:new(), [B0] = ar_weave:init([{ar_wallet:to_address(Pub), ?AR(10000), <<>>}]), {ok, Config} = arweave_config:get_env(), _ = ar_test_node:start( B0, ar_wallet:to_address(ar_wallet:new_keyfile()), Config#config{ nonce_limiter_client_peers = [ "127.0.0.1:" ++ integer_to_list(VDFPort) ] } ), %% Let main get ahead of peer1 in the VDF chain timer:sleep(3000), _ = ar_test_node:start_peer(peer1, B0), ar_test_node:remote_call(peer1, ar_http, block_peer_connections, []), %% Setup a server to listen for VDF pushes Routes = [{"/[...]", ar_vdf_server_tests, []}], {ok, _} = cowboy:start_clear( ar_vdf_server_test_listener, [{port, VDFPort}], #{ env => #{ dispatch => cowboy_router:compile([{'_', Routes}]) } } ), %% Mine a block that will be behind main in the VDF chain ar_test_node:mine(peer1), BI = assert_wait_until_height(peer1, 1), B1 = ar_test_node:remote_call(peer1, ar_storage, read_block, [hd(BI)]), %% Post the block to main which will cause it to validate VDF for the block under %% the B0 session and then begin using the (earlier) B1 VDF session going forward ok = ar_events:subscribe(block), post_block(B1, valid), timer:sleep(3000), Seed0 = B0#block.nonce_limiter_info#nonce_limiter_info.next_seed, Seed1 = B1#block.nonce_limiter_info#nonce_limiter_info.next_seed, [{Seed0, _, LatestStepNumber0}] = get_computed_output(Seed0), [{Seed1, FirstStepNumber1, LatestStepNumber1}] = get_computed_output(Seed1), ?assert(LatestStepNumber0 > FirstStepNumber1, "Session0 should have started later than Session1"), timer:sleep(3000), [{Seed0, _, NewLatestStepNumber0}] = get_computed_output(Seed0), [{Seed1, _, NewLatestStepNumber1}] = get_computed_output(Seed1), ?assertEqual(LatestStepNumber0, NewLatestStepNumber0, "Session0 should not have progressed"), ?assert(NewLatestStepNumber1 > LatestStepNumber1, "Session1 should have progressed"), cowboy:stop_listener(ar_vdf_server_test_listener). %% %% vdf_client_test_ %% test_vdf_client_fast_block() -> ar_test_node:stop(), {ok, Config} = arweave_config:get_env(), {_, Pub} = ar_wallet:new(), [B0] = ar_weave:init([{ar_wallet:to_address(Pub), ?AR(10000), <<>>}]), PeerAddress = ar_wallet:to_address(ar_test_node:remote_call(peer1, ar_wallet, new_keyfile, [])), %% Let peer1 get ahead of main in the VDF chain _ = ar_test_node:start_peer(peer1, B0), ar_test_node:remote_call(peer1, ar_http, block_peer_connections, []), timer:sleep(5_000), %% Mine a block that will be ahead of main in the VDF chain ar_test_node:mine(peer1), BI = assert_wait_until_height(peer1, 1), B1 = ar_test_node:remote_call(peer1, ar_storage, read_block, [hd(BI)]), ar_test_node:stop(peer1), %% Restart peer1 as a VDF client {ok, PeerConfig} = ar_test_node:get_config(peer1), _ = ar_test_node:start_peer(peer1, B0, PeerAddress, PeerConfig#config{ nonce_limiter_server_trusted_peers = [ ar_util:format_peer(ar_test_node:peer_ip(main)) ] }), %% Isolate the client-path assertion below: when B1 is posted directly to peer1, %% peer1 must not relay it to main before we explicitly post it to main. ar_test_node:remote_call(peer1, ar_http, block_peer_connections, []), %% Start main as a VDF server ar_test_node:stop(), _ = ar_test_node:start( B0, ar_wallet:to_address(ar_wallet:new_keyfile()), Config#config{ nonce_limiter_client_peers = [ ar_util:format_peer(ar_test_node:peer_ip(peer1)) ] }), %% Post the block to the VDF client. It won't be able to validate it since the VDF server %% isn't aware of the new VDF session yet. Also, it cannot gossip it to main because %% we disabled gossip. send_new_block(ar_test_node:peer_ip(peer1), B1), timer:sleep(5_000), ?assertEqual(1, length(ar_test_node:remote_call(peer1, ar_node, get_blocks, [])), "VDF client shouldn't be able to validate the block until the VDF server posts a " "new VDF session"), %% Re-enable p2p communication - main will receive B1 and peer1 is %% expected to sync and validate it. ar_test_node:connect_to_peer(peer1), %% After the VDF server receives the block, it should push the old and new VDF sessions %% to the VDF client allowing it to validate teh block. send_new_block(ar_test_node:peer_ip(main), B1), %% If all is right, the VDF server should push the old and new VDF sessions allowing %% the VDF client to finally validate the block. BI = assert_wait_until_height(peer1, 1). test_vdf_client_fast_block_pull_interface() -> {ok, Config} = arweave_config:get_env(), {_, Pub} = ar_wallet:new(), [B0] = ar_weave:init([{ar_wallet:to_address(Pub), ?AR(10000), <<>>}]), PeerAddress = ar_wallet:to_address(ar_test_node:remote_call(peer1, ar_wallet, new_keyfile, [])), %% Let peer1 get ahead of main in the VDF chain _ = ar_test_node:start_peer(peer1, B0), _ = ar_test_node:remote_call(peer1, ar_http, block_peer_connections, []), timer:sleep(20000), %% Mine a block that will be ahead of main in the VDF chain ar_test_node:mine(peer1), BI = assert_wait_until_height(peer1, 1), B1 = ar_test_node:remote_call(peer1, ar_storage, read_block, [hd(BI)]), ar_test_node:stop(peer1), %% Restart peer1 as a VDF client {ok, PeerConfig} = ar_test_node:remote_call(peer1, arweave_config, get_env, []), _ = ar_test_node:start_peer(peer1, B0, PeerAddress, PeerConfig#config{ nonce_limiter_server_trusted_peers = [ ar_util:format_peer(ar_test_node:peer_ip(main)) ], enable = [vdf_server_pull | PeerConfig#config.enable] } ), %% Start the main as a VDF server _ = ar_test_node:start( B0, ar_wallet:to_address(ar_wallet:new_keyfile()), Config#config{ nonce_limiter_client_peers = [ ar_util:format_peer(ar_test_node:peer_ip(peer1)) ] } ), ar_test_node:connect_to_peer(peer1), %% Post the block to the VDF client. It won't be able to validate it since the VDF server %% isn't aware of the new VDF session yet. send_new_block(ar_test_node:peer_ip(peer1), B1), timer:sleep(10000), ?assertEqual(1, length(ar_test_node:remote_call(peer1, ar_node, get_blocks, [])), "VDF client shouldn't be able to validate the block until the VDF server posts a " "new VDF session"), %% After the VDF server receives the block, it should push the old and new VDF sessions %% to the VDF client allowing it to validate teh block. send_new_block(ar_test_node:peer_ip(main), B1), %% If all is right, the VDF server should push the old and new VDF sessions allowing %% the VDF clietn to finally validate the block. BI = assert_wait_until_height(peer1, 1). test_vdf_client_slow_block() -> {ok, Config} = arweave_config:get_env(), {_, Pub} = ar_wallet:new(), [B0] = ar_weave:init([{ar_wallet:to_address(Pub), ?AR(10000), <<>>}]), PeerAddress = ar_wallet:to_address(ar_test_node:remote_call(peer1, ar_wallet, new_keyfile, [])), %% Let peer1 get ahead of main in the VDF chain _ = ar_test_node:start_peer(peer1, B0), ar_test_node:remote_call(peer1, ar_http, block_peer_connections, []), %% Mine a block that will be ahead of main in the VDF chain ar_test_node:mine(peer1), BI = assert_wait_until_height(peer1, 1), B1 = ar_test_node:remote_call(peer1, ar_storage, read_block, [hd(BI)]), ar_test_node:stop(peer1), %% Restart peer1 as a VDF client {ok, PeerConfig} = ar_test_node:remote_call(peer1, arweave_config, get_env, []), _ = ar_test_node:start_peer(peer1, B0, PeerAddress, PeerConfig#config{ nonce_limiter_server_trusted_peers = [ "127.0.0.1:" ++ integer_to_list(Config#config.port) ] } ), %% Start the main as a VDF server _ = ar_test_node:start( B0, ar_wallet:to_address(ar_wallet:new_keyfile()), Config#config{ nonce_limiter_client_peers = [ "127.0.0.1:" ++ integer_to_list(ar_test_node:peer_port(peer1)) ] } ), ar_test_node:connect_to_peer(peer1), timer:sleep(10000), %% Post the block to the VDF client, it should validate it "immediately" since the %% VDF server is ahead of the block in the VDF chain. send_new_block(ar_test_node:peer_ip(peer1), B1), BI = assert_wait_until_height(peer1, 1). test_vdf_client_slow_block_pull_interface() -> {ok, Config} = arweave_config:get_env(), {_, Pub} = ar_wallet:new(), [B0] = ar_weave:init([{ar_wallet:to_address(Pub), ?AR(10000), <<>>}]), PeerAddress = ar_wallet:to_address(ar_test_node:remote_call(peer1, ar_wallet, new_keyfile, [])), %% Let peer1 get ahead of main in the VDF chain _ = ar_test_node:start_peer(peer1, B0), ar_test_node:remote_call(peer1, ar_http, block_peer_connections, []), %% Mine a block that will be ahead of main in the VDF chain ar_test_node:mine(peer1), BI = assert_wait_until_height(peer1, 1), B1 = ar_test_node:remote_call(peer1, ar_storage, read_block, [hd(BI)]), ar_test_node:stop(peer1), %% Restart peer1 as a VDF client {ok, PeerConfig} = ar_test_node:remote_call(peer1, arweave_config, get_env, []), _ = ar_test_node:start_peer(peer1, B0, PeerAddress, PeerConfig#config{ nonce_limiter_server_trusted_peers = [ "127.0.0.1:" ++ integer_to_list(Config#config.port) ], enable = [vdf_server_pull | PeerConfig#config.enable] } ), %% Start the main as a VDF server {ok, Config} = arweave_config:get_env(), _ = ar_test_node:start( B0, ar_wallet:to_address(ar_wallet:new_keyfile()), Config#config{ nonce_limiter_client_peers = [ "127.0.0.1:" ++ integer_to_list(ar_test_node:peer_port(peer1)) ] } ), ar_test_node:connect_to_peer(peer1), timer:sleep(10000), %% Post the block to the VDF client, it should validate it "immediately" since the %% VDF server is ahead of the block in the VDF chain. send_new_block(ar_test_node:peer_ip(peer1), B1), BI = assert_wait_until_height(peer1, 1). %% %% serialize_test_ %% test_serialize_update_format_2() -> SessionKey0 = {crypto:strong_rand_bytes(48), 0, 1}, SessionKey1 = {crypto:strong_rand_bytes(48), 1, 1}, Checkpoints = [crypto:strong_rand_bytes(32) || _ <- lists:seq(1, 25)], Update = #nonce_limiter_update{ session_key = SessionKey1, is_partial = true, session = #vdf_session{ step_checkpoints_map = #{ 1 => Checkpoints }, upper_bound = 1, next_upper_bound = 1, prev_session_key = SessionKey0, step_number = 1, seed = element(1, SessionKey1), steps = [crypto:strong_rand_bytes(32)] } }, Binary = ar_serialize:nonce_limiter_update_to_binary(2, Update), ?assertEqual({ok, Update}, ar_serialize:binary_to_nonce_limiter_update(2, Binary)). test_serialize_update_format_3() -> SessionKey0 = {crypto:strong_rand_bytes(48), 0, 1}, SessionKey1 = {crypto:strong_rand_bytes(48), 1, 1}, Checkpoints = [crypto:strong_rand_bytes(32) || _ <- lists:seq(1, 25)], Update = #nonce_limiter_update{ session_key = SessionKey1, is_partial = true, session = #vdf_session{ step_checkpoints_map = #{ 1 => Checkpoints }, upper_bound = 1, next_upper_bound = 1, prev_session_key = SessionKey0, step_number = 1, seed = element(1, SessionKey1), steps = [crypto:strong_rand_bytes(32)] } }, Binary = ar_serialize:nonce_limiter_update_to_binary(3, Update), ?assertEqual({ok, Update}, ar_serialize:binary_to_nonce_limiter_update(3, Binary)). test_serialize_update_format_4() -> SessionKey0 = {crypto:strong_rand_bytes(48), 0, 1}, SessionKey1 = {crypto:strong_rand_bytes(48), 1, 1}, Checkpoints = [crypto:strong_rand_bytes(32) || _ <- lists:seq(1, 25)], Update = #nonce_limiter_update{ session_key = SessionKey1, is_partial = true, session = #vdf_session{ step_checkpoints_map = #{ 1 => Checkpoints }, upper_bound = 1, next_upper_bound = 1, prev_session_key = SessionKey0, vdf_difficulty = 10000, next_vdf_difficulty = 1, step_number = 1, seed = element(1, SessionKey1), steps = [crypto:strong_rand_bytes(32)] } }, Binary = ar_serialize:nonce_limiter_update_to_binary(4, Update), ?assertEqual({ok, Update}, ar_serialize:binary_to_nonce_limiter_update(4, Binary)). %% @doc test serializing and deserializing a #nonce_limiter_update_response when the client %% is running the same node version as the server. test_serialize_response() -> ResponseA = #nonce_limiter_update_response{}, BinaryA = ar_serialize:nonce_limiter_update_response_to_binary(ResponseA), ?assertEqual({ok, ResponseA}, ar_serialize:binary_to_nonce_limiter_update_response(BinaryA)), ResponseB = #nonce_limiter_update_response{ session_found = false, step_number = 8589934593, postpone = 255, format = 2 }, BinaryB = ar_serialize:nonce_limiter_update_response_to_binary(ResponseB), ?assertEqual({ok, ResponseB}, ar_serialize:binary_to_nonce_limiter_update_response(BinaryB)). %% @doc test serializing and deserializing a #nonce_limiter_update_response when the client %% is running an older node version than the server. test_serialize_response_compatibility() -> BinaryA = << 0:8, 1:8, 5:8 >>, ResponseA = #nonce_limiter_update_response{ session_found = false, step_number = 5, postpone = 0, format = 1 }, ?assertEqual({ok, ResponseA}, ar_serialize:binary_to_nonce_limiter_update_response(BinaryA)), BinaryB = << 1:8, 2:8, 511:16, 120:8 >>, ResponseB = #nonce_limiter_update_response{ session_found = true, step_number = 511, postpone = 120, format = 1 }, ?assertEqual({ok, ResponseB}, ar_serialize:binary_to_nonce_limiter_update_response(BinaryB)). %% ------------------------------------------------------------------------------------------------- %% Helper Functions %% ------------------------------------------------------------------------------------------------- init(Req, State) -> SplitPath = ar_http_iface_server:split_path(cowboy_req:path(Req)), handle(SplitPath, Req, State). handle([<<"vdf">>], Req, State) -> {ok, Body, _} = ar_http_req:body(Req, ?MAX_BODY_SIZE), case ar_serialize:binary_to_nonce_limiter_update(2, Body) of {ok, Update} -> handle_update(Update, Req, State); {error, _} -> Response = #nonce_limiter_update_response{ format = 2 }, Bin = ar_serialize:nonce_limiter_update_response_to_binary(Response), {ok, cowboy_req:reply(202, #{}, Bin, Req), State} end. handle_update(Update, Req, State) -> {Seed, _, _} = Update#nonce_limiter_update.session_key, IsPartial = Update#nonce_limiter_update.is_partial, Session = Update#nonce_limiter_update.session, StepNumber = Session#vdf_session.step_number, NSteps = length(Session#vdf_session.steps), Checkpoints = maps:get(StepNumber, Session#vdf_session.step_checkpoints_map), UpdateOutput = hd(Checkpoints), SessionOutput = hd(Session#vdf_session.steps), ?assertNotEqual(Checkpoints, Session#vdf_session.steps), %% #nonce_limiter_update.checkpoints should be the checkpoints of the last step so %% the head of checkpoints should match the head of the session's steps ?assertEqual(UpdateOutput, SessionOutput), case ets:lookup(computed_output, Seed) of [{Seed, FirstStepNumber, LatestStepNumber}] -> %% Normally a partial VDF update should always increase by 1, but the VDF_DIFFICULTY %% is so low in tests that there can be a race condition which causes a partial %% update to repeat a VDF step. This assertion allows for that scenario in order %% to improve test reliability. ?assert( not IsPartial orelse StepNumber == LatestStepNumber + 1 orelse StepNumber == LatestStepNumber, lists:flatten(io_lib:format( "Partial VDF update has step gap, " "StepNumber: ~p, LatestStepNumber: ~p", [StepNumber, LatestStepNumber]))), ets:insert(computed_output, {Seed, FirstStepNumber, StepNumber}), {ok, cowboy_req:reply(200, #{}, <<>>, Req), State}; _ -> case IsPartial of true -> Response = #nonce_limiter_update_response{ session_found = false }, Bin = ar_serialize:nonce_limiter_update_response_to_binary(Response), {ok, cowboy_req:reply(202, #{}, Bin, Req), State}; false -> ets:insert(computed_output, {Seed, StepNumber - NSteps + 1, StepNumber}), {ok, cowboy_req:reply(200, #{}, <<>>, Req), State} end end. get_computed_output(Seed) -> ar_util:do_until( fun() -> case ets:lookup(computed_output, Seed) of [] -> false; _ -> true end end, 1000, 10_000 ), ets:lookup(computed_output, Seed). mock_reset_frequency() -> { ar_nonce_limiter, get_reset_frequency, fun() -> 5 end }. ================================================ FILE: apps/arweave/test/ar_vdf_tests.erl ================================================ -module(ar_vdf_tests). -include_lib("eunit/include/eunit.hrl"). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave/include/ar_vdf.hrl"). -include_lib("arweave/include/ar_pricing.hrl"). -define(ENCODED_PREV_OUTPUT, <<"f_z7RLug8etm3SrmRf-xPwXEL0ZQ_xHng2A5emRDQBw">>). -define(RESET_SEED, <<"f_z7RLug8etm3SrmRf-xPwXEL0ZQ_xHng2A5emRDQBw">>). -define(MAX_THREAD_COUNT, 4). %-define(TEST_VDF_DIFFICULTY, 15000000 div 25). -define(TEST_VDF_DIFFICULTY, 10). %%%=================================================================== %%% utils %%%=================================================================== break_byte(Buf, Pos)-> Head = binary:part(Buf, 0, Pos), Tail = binary:part(Buf, Pos+1, size(Buf)-Pos-1), ChangedByte = binary:at(Buf,Pos) bxor 1, <>. reset_mix(PrevOutput, ResetSeed) -> crypto:hash(sha256, << PrevOutput/binary, ResetSeed/binary >>). %%%=================================================================== vdf_basic_test_() -> {timeout, 1000, fun test_vdf_basic_compute_verify_/0}. % no reset test_vdf_basic_compute_verify_() -> StartStepNumber1 = 2, StartStepNumber2 = 3, StartSalt1 = ar_vdf:step_number_to_salt_number(StartStepNumber1-1), StartSalt2 = ar_vdf:step_number_to_salt_number(StartStepNumber2-1), PrevOutput = ar_util:decode(?ENCODED_PREV_OUTPUT), ResetSeed = ar_util:decode(?RESET_SEED), ResetSalt = -1, {ok, Output1, Checkpoints1} = ar_vdf:compute2(StartStepNumber1, PrevOutput, ?TEST_VDF_DIFFICULTY), assert_verify(StartSalt1, ResetSalt, PrevOutput, 1, lists:reverse(Checkpoints1)), {ok, _Output2, Checkpoints2} = ar_vdf:compute2(StartStepNumber2, Output1, ?TEST_VDF_DIFFICULTY), assert_verify(StartSalt2, ResetSalt, Output1, 1, lists:reverse(Checkpoints2)), Hashes = lists:reverse(Checkpoints1) ++ lists:reverse(Checkpoints2), assert_verify(StartSalt1, ResetSalt, PrevOutput, 1, Hashes), % test damage on any byte, arg (aka negative tests) ok = test_vdf_basic_compute_verify_break_(StartSalt1, PrevOutput, 1, Hashes, ResetSalt, ResetSeed), ok. test_vdf_basic_compute_verify_break_(StartSalt, PrevOutput, StepBetweenHashCount, Hashes, ResetSalt, ResetSeed)-> test_vdf_basic_compute_verify_break_(StartSalt, PrevOutput, StepBetweenHashCount, Hashes, ResetSalt, ResetSeed, size(iolist_to_binary(Hashes))-1). test_vdf_basic_compute_verify_break_(_StartSalt, _PrevOutput, _StepBetweenHashCount, _Hashes, _ResetSalt, _ResetSeed, 0)-> ok; test_vdf_basic_compute_verify_break_(StartSalt, PrevOutput, StepBetweenHashCount, Hashes, ResetSalt, ResetSeed, BreakPos)-> BufferHash = iolist_to_binary(Hashes), BufferHashBroken = break_byte(BufferHash, BreakPos), HashesBroken = ar_vdf:checkpoint_buffer_to_checkpoints(BufferHashBroken), false = ar_vdf:verify(StartSalt, PrevOutput, StepBetweenHashCount, HashesBroken, ResetSalt, ResetSeed, ?MAX_THREAD_COUNT, ?TEST_VDF_DIFFICULTY), test_vdf_basic_compute_verify_break_(StartSalt, PrevOutput, StepBetweenHashCount, Hashes, ResetSalt, ResetSeed, BreakPos-1). assert_verify(StartSalt, ResetSalt, Output, NumCheckpointsBetweenHashes, Checkpoints) -> ResetSeed = ar_util:decode(?RESET_SEED), ?assertEqual( {true, iolist_to_binary(Checkpoints)}, ar_vdf:verify( StartSalt, Output, NumCheckpointsBetweenHashes, Checkpoints, ResetSalt, ResetSeed, ?MAX_THREAD_COUNT, ?TEST_VDF_DIFFICULTY) ). vdf_reset_test_() -> {timeout, 1000, fun test_vdf_reset_verify_/0}. test_vdf_reset_verify_() -> ok = test_vdf_reset_0_(), ok = test_vdf_reset_1_(), ok = test_vdf_reset_mid_checkpoint_(), ok. test_vdf_reset_0_() -> StartStepNumber1 = 2, StartStepNumber2 = 3, StartSalt1 = ar_vdf:step_number_to_salt_number(StartStepNumber1-1), StartSalt2 = ar_vdf:step_number_to_salt_number(StartStepNumber2-1), PrevOutput = ar_util:decode(?ENCODED_PREV_OUTPUT), ResetSeed = ar_util:decode(?RESET_SEED), ResetSalt = StartSalt1, MixOutput = reset_mix(PrevOutput, ResetSeed), {ok, Output1, Checkpoints1} = ar_vdf:compute2(StartStepNumber1, MixOutput, ?TEST_VDF_DIFFICULTY), {ok, _Output2, Checkpoints2} = ar_vdf:compute2(StartStepNumber2, Output1, ?TEST_VDF_DIFFICULTY), % partial verify should work assert_verify(StartSalt1, ResetSalt, PrevOutput, 1, lists:reverse(Checkpoints1)), assert_verify(StartSalt2, ResetSalt, Output1, 1, lists:reverse(Checkpoints2)), Hashes3 = lists:sublist(lists:reverse(Checkpoints2), 1, ?VDF_CHECKPOINT_COUNT_IN_STEP + 1), assert_verify(StartSalt1, ResetSalt, PrevOutput, 1, lists:reverse(Checkpoints1) ++ Hashes3), Hashes4 = lists:reverse(Checkpoints1) ++ lists:reverse(Checkpoints2), assert_verify(StartSalt1, ResetSalt, PrevOutput, 1, Hashes4), ok. test_vdf_reset_1_() -> StartStepNumber1 = 2, StartStepNumber2 = 3, StartSalt1 = ar_vdf:step_number_to_salt_number(StartStepNumber1-1), StartSalt2 = ar_vdf:step_number_to_salt_number(StartStepNumber2-1), PrevOutput = ar_util:decode(?ENCODED_PREV_OUTPUT), ResetSeed = ar_util:decode(?RESET_SEED), ResetSalt = StartSalt2, {ok, Output1, Checkpoints1} = ar_vdf:compute2(StartStepNumber1, PrevOutput, ?TEST_VDF_DIFFICULTY), MixOutput = reset_mix(Output1, ResetSeed), {ok, _Output2, Checkpoints2} = ar_vdf:compute2(StartStepNumber2, MixOutput, ?TEST_VDF_DIFFICULTY), % partial verify should work assert_verify(StartSalt1, ResetSalt, PrevOutput, 1, lists:reverse(Checkpoints1)), assert_verify(StartSalt2, ResetSalt, Output1, 1, lists:reverse(Checkpoints2)), Hash1 = lists:last(Checkpoints2), Hash2 = lists:nth(length(Checkpoints2) - 1, Checkpoints2), assert_verify(StartSalt1, ResetSalt, PrevOutput, 1, lists:reverse([Hash1 | Checkpoints1])), assert_verify(StartSalt1, ResetSalt, PrevOutput, 1, lists:reverse([Hash2, Hash1 | Checkpoints1])), Hashes5 = lists:reverse(Checkpoints1) ++ lists:reverse(Checkpoints2), assert_verify(StartSalt1, ResetSalt, PrevOutput, 1, Hashes5), ok. test_vdf_reset_mid_checkpoint_() -> StartStepNumber1 = 2, StartStepNumber2 = 3, StartSalt1 = ar_vdf:step_number_to_salt_number(StartStepNumber1-1), StartSalt2 = ar_vdf:step_number_to_salt_number(StartStepNumber2-1), PrevOutput = ar_util:decode(?ENCODED_PREV_OUTPUT), ResetSeed = ar_util:decode(?RESET_SEED), % means inside 1 iteration ResetSaltFlat = 10, ResetSalt = StartSalt1 + ResetSaltFlat, Salt1 = << StartSalt1:256 >>, {ok, Output1Part1, LastStepCheckpoints1Part1} = ar_vdf_nif:vdf_sha2_nif(Salt1, PrevOutput, ResetSaltFlat-1, 0, ?TEST_VDF_DIFFICULTY), MixOutput = reset_mix(Output1Part1, ResetSeed), Salt2 = << ResetSalt:256 >>, {ok, Output1Part2, LastStepCheckpoints1Part2} = ar_vdf_nif:vdf_sha2_nif(Salt2, MixOutput, ?VDF_CHECKPOINT_COUNT_IN_STEP-ResetSaltFlat-1, 0, ?TEST_VDF_DIFFICULTY), Output1 = Output1Part2, LastStepCheckpoints1 = << LastStepCheckpoints1Part1/binary, Output1Part1/binary, LastStepCheckpoints1Part2/binary, Output1Part2/binary >>, Checkpoints1 = ar_vdf:checkpoint_buffer_to_checkpoints(LastStepCheckpoints1), {ok, _Output2, Checkpoints2} = ar_vdf:compute2(StartStepNumber2, Output1, ?TEST_VDF_DIFFICULTY), % partial verify should work assert_verify(StartSalt1, ResetSalt, PrevOutput, 1, lists:reverse(Checkpoints1)), assert_verify(StartSalt2, ResetSalt, Output1, 1, lists:reverse(Checkpoints2)), Hash1 = lists:last(Checkpoints2), Hash2 = lists:nth(length(Checkpoints2) - 1, Checkpoints2), assert_verify(StartSalt1, ResetSalt, PrevOutput, 1, lists:reverse([Hash1 | Checkpoints1])), assert_verify(StartSalt1, ResetSalt, PrevOutput, 1, lists:reverse([Hash2, Hash1 | Checkpoints1])), Hashes5 = lists:reverse(Checkpoints1) ++ lists:reverse(Checkpoints2), assert_verify(StartSalt1, ResetSalt, PrevOutput, 1, Hashes5), % test vdf_fused {ok, Output1Part1, LastStepCheckpoints1Part1} = ar_vdf_nif:vdf_sha2_fused_nif(Salt1, PrevOutput, ResetSaltFlat-1, 0, ?TEST_VDF_DIFFICULTY), {ok, Output1Part2, LastStepCheckpoints1Part2} = ar_vdf_nif:vdf_sha2_fused_nif(Salt2, MixOutput, ?VDF_CHECKPOINT_COUNT_IN_STEP-ResetSaltFlat-1, 0, ?TEST_VDF_DIFFICULTY), % test vdf_hiopt {ok, Output1Part1, LastStepCheckpoints1Part1} = ar_vdf_nif:vdf_sha2_hiopt_nif(Salt1, PrevOutput, ResetSaltFlat-1, 0, ?TEST_VDF_DIFFICULTY), {ok, Output1Part2, LastStepCheckpoints1Part2} = ar_vdf_nif:vdf_sha2_hiopt_nif(Salt2, MixOutput, ?VDF_CHECKPOINT_COUNT_IN_STEP-ResetSaltFlat-1, 0, ?TEST_VDF_DIFFICULTY), ok. compute_next_vdf_difficulty_test_block() -> Height1 = max(ar_block_time_history:history_length(), ?REWARD_HISTORY_BLOCKS), Height2 = Height1 + ?VDF_DIFFICULTY_RETARGET - Height1 rem ?VDF_DIFFICULTY_RETARGET, #block{ height = Height2-1, nonce_limiter_info = #nonce_limiter_info{ vdf_difficulty = 10000, next_vdf_difficulty = 10000 }, reward_history = lists:duplicate(?REWARD_HISTORY_BLOCKS, {<<>>, 10000, 10, 1}), block_time_history = lists:duplicate(ar_block_time_history:history_length(), {129, 135, 1}), price_per_gib_minute = 10000, scheduled_price_per_gib_minute = 15000 }. compute_next_vdf_difficulty_2_7_test_()-> ar_test_node:test_with_mocked_functions( [{ar_fork, height_2_6, fun() -> -1 end}, {ar_fork, height_2_7, fun() -> -1 end}, {ar_fork, height_2_7_1, fun() -> infinity end}], fun() -> B = compute_next_vdf_difficulty_test_block(), 10465 = ar_block:compute_next_vdf_difficulty(B), ok end). compute_next_vdf_difficulty_2_7_1_test_()-> ar_test_node:test_with_mocked_functions( [{ar_fork, height_2_6, fun() -> -1 end}, {ar_fork, height_2_7, fun() -> -1 end}, {ar_fork, height_2_7_1, fun() -> -1 end}], fun() -> B = compute_next_vdf_difficulty_test_block(), 10046 = ar_block:compute_next_vdf_difficulty(B), ok end). ================================================ FILE: apps/arweave/test/ar_wallet_tests.erl ================================================ -module(ar_wallet_tests). -include_lib("eunit/include/eunit.hrl"). wallet_sign_verify_test_() -> {timeout, 30, fun test_wallet_sign_verify/0}. test_wallet_sign_verify() -> TestWalletSignVerify = fun(KeyTypeEnc) -> fun() -> KeyType = ar_serialize:binary_to_signature_type(KeyTypeEnc), {Priv, Pub} = ar_wallet:new(KeyType), TestData = <<"TEST DATA">>, Signature = ar_wallet:sign(Priv, TestData), true = ar_wallet:verify(Pub, TestData, Signature) end end, [ {"PS256_65537", TestWalletSignVerify(<<"PS256_65537">>)}, {"ES256K", TestWalletSignVerify(<<"ES256K">>)}, {"Ed25519", TestWalletSignVerify(<<"Ed25519">>)} ]. invalid_signature_test_() -> TestInvalidSignature = fun(KeyTypeEnc) -> fun() -> KeyType = ar_serialize:binary_to_signature_type(KeyTypeEnc), {Priv, Pub} = ar_wallet:new(KeyType), TestData = <<"TEST DATA">>, << _:32, Signature/binary >> = ar_wallet:sign(Priv, TestData), false = ar_wallet:verify(Pub, TestData, << 0:32, Signature/binary >>) end end, [ {"PS256_65537", TestInvalidSignature(<<"PS256_65537">>)}, {"ES256K", TestInvalidSignature(<<"ES256K">>)}, {"Ed25519", TestInvalidSignature(<<"Ed25519">>)} ]. %% @doc Check generated keyfiles can be retrieved. generate_keyfile_test_() -> GenerateKeyFile = fun(KeyTypeEnc) -> fun() -> KeyType = ar_serialize:binary_to_signature_type(KeyTypeEnc), {Priv, Pub} = ar_wallet:new_keyfile(KeyType), FileName = ar_wallet:wallet_filepath(ar_util:encode(ar_wallet:to_address(Pub))), {Priv, Pub} = ar_wallet:load_keyfile(FileName) end end, [ {"PS256_65537", GenerateKeyFile(<<"PS256_65537">>)}, {"ES256K", GenerateKeyFile(<<"ES256K">>)}, {"Ed25519", GenerateKeyFile(<<"Ed25519">>)} ]. load_keyfile_test_() -> TestLoadKeyfile = fun(KeyTypeEnc) -> fun() -> {Priv, Pub = {KeyType, _}} = ar_wallet:load_keyfile(wallet_fixture_path(KeyTypeEnc)), KeyType = ar_serialize:binary_to_signature_type(KeyTypeEnc), TestData = <<"TEST DATA">>, Signature = ar_wallet:sign(Priv, TestData), true = ar_wallet:verify(Pub, TestData, Signature) end end, [ {"PS256_65537", TestLoadKeyfile(<<"PS256_65537">>)}, {"ES256K", TestLoadKeyfile(<<"ES256K">>)}, {"Ed25519", TestLoadKeyfile(<<"Ed25519">>)} ]. wallet_fixture_path(KeyTypeEnc) -> {ok, Cwd} = file:get_cwd(), filename:join(Cwd, "./apps/arweave/test/ar_wallet_tests_" ++ binary_to_list(KeyTypeEnc) ++ "_fixture.json"). ================================================ FILE: apps/arweave/test/ar_wallet_tests_ES256K_fixture.json ================================================ { "kty":"EC", "crv":"secp256k1", "x":"dWCvM4fTdeM0KmloF57zxtBPXTOythHPMm1HCLrdd3A", "y":"36uMVGM7hnw-N6GnjFcihWE3SkrhMLzzLCdPMXPEXlA", "d":"rhYFsBPF9q3-uZThy7B3c4LDF_8wnozFUAEm5LLC4Zw" } ================================================ FILE: apps/arweave/test/ar_wallet_tests_Ed25519_fixture.json ================================================ { "kty":"OKP", "alg": "EdDSA", "crv":"Ed25519", "x":"11qYAYKxCrfVS_7TyWQHOg7hcvPapiMlrwIaaPcHURo", "d":"nWGxne_9WmC6hEr0kuwsxERJxWl7MmkZcDusAxyuf2A" } ================================================ FILE: apps/arweave/test/ar_wallet_tests_PS256_65537_fixture.json ================================================ { "kty":"RSA", "e":"AQAB", "n":"kmM4O08BJB85RbxfQ2nkka9VNO6Czm2Tc_IGQNYCTSXRzOc6W9bHRrlZ_eDhWO0OdfaRalgLeuYCXx9DV-n1djeerKHdFo2ZAjRv5WjL_b4IxbQnPFnHOSNHVg49yp7CUWUgDQOKtylt3x0YENIW37RQPZJ-Fvyk7Z0jvibj2iZ0K3K8yNenJ4mWswyQdyPaJcbP6AMWvUWT62giWHa3lDgBZNhXqakkYdoaM157kRUfrZDRSWXbilr-4f40PQF1DV5YSj81Fl72N7j30r0vL1yoj0bZn74WRquQ5j3QsiAA-SzhAxpecWniljj1wvZlyIgJpCYCvCrKZCcCq_JW1nYP6to5YM3fAqcYRadbTNdQ3oH0Sjy8vyvLYNe48Ur_TFTTAwZxJV70BgZfkJ00BxiNTb8EhSchejabeExUkCNlOrQsCHDxOig-WXOrjX5fb4NeR3jedeYWbhN922ORLuEwVLeyjc7hBfQXU2-mYraFAVTc0QST201P7rRu-UGtZ4gRavFuOvAyYrMimFVW9dTwTrcYXFK2zKCEv2aRRQAHZanKjBv0Xq9m3BqvxKy-_3Cj1O6ft7FT21drPoDRDzfnkyOeUjlXzRJzn-iQ0nqgHAQr9WBWPzLEcaTFpw3KmwDYHW_6JOkUWDyMW9anuS8cyqt_2O29SK_rHHuucD8", "d":"Bq6C13vknF6Ln1MrKI3Ilq-83IuSvQpe7NRAuT69u7i8sv4XwsHOJAV7qpGvp37NXT5R1G3ehEZ6qoSxJbcN4IVrQMKq5mMiCY6DBv5C6fHZGoNZE2gxXV7uydf8I1Vnnw4xYIj5oyC_5nSJlFAc3U-MAcbkfJuvrhGxLVGsrqHmjoqQPGG_hTxCjuAOlOBs-9cmWTujbm1-OyjaAQwfTbXYbUy7hC1TCE05SxLPmTUwaJxY8AXJigpbYqjpWsc15HjRlv38A44tEnIwHjHda_3JpmbSsffSslRej2vPCCgSPHHyLeO437Nc7DraogKStugisRfhoe89yY4QSBVXbtvWJeF1LxPtg8uPtfoKt3wdnGWKaLDqYNDeA3AckbKrPp50kHEMR7hnNHq3lAoMAXTz8BbI_Czo5n9-f9DQpvJC8kpM7gCGG8DptA2nTPuQG02MOx7AsEE99EN8ltD_dA0l0MgG7CDsaQC5IPMHcRs1wyvZMBGA8fvZdURiVv9YSnCddndXjBJuetf5KdES-1EmrSLzo5hobQbkc7dkHMS5dmLm5YtK-aLYXZi31nRIGkA1UfZhf2TtfRxP6uKlRT106EtDX1rT3RgsLqg06y_xoS4SFQ6u-8wHqgbIHBmKdsWVtBkC4SGUlYDPgrJe2V9CaPFAcoSDFK1D_IPvU2U", "p":"zhyauK0ISMg9Wk7iZK2ifW2cj5KSr-_k_Em0nfUrtsaKp0iXOsCKxOH__zcAVj7oLxaEP2l8i2Pdi7CzhVRiqrjgVwA1JuLPgxtryuVqwRCYbO_Y_2Xutk404iKmDX6_LQ7BeIzUI8GD6rQCeLq1HBd3Yvok9bPvbbMZjFtUmBf_Kfb0cYP8tewMmV_USGpqwJXdB_4aHF6qBrZBtd1KLoO1E7MNkAPk7pbiA1-KO2Xa6oY6fy2pztNe1MO7tz_QywqlDdymfhnpk41arY3A6US-ZFXOinqXKdh9uEfxiyZwzaLMNWVKEaWxRxbqSUOLV3uZS05N6B2ZqvOp2h9Csw", "q":"tdHmYcbJpZ0U27d_j0YvUeb6sdcuFLg2vmDgKwUamC5_vvV41LSm8LIkLuY2DAN5MKg6-HTWetKWQhgbCIbubLtX5164MFrES1YVZI-aggrYohhH8MRn_hwMZZQndv9H07WUVgQ1GZ2ZDvhO7XxPDIXyBNQ46x6V1AikHtyTmqARjgrkgs-1XN55S9rhcffixOlJ-egIDPVei_Z6YNdSpLlhtiqHOp_lX37mrPSYGxjgIZVxevpPgBhVFlnAMqC2iRd87XupmWgiluSos8I7i1VESBzwlFZGk5hRb8och4zwmDBDwx65XWngg6LneSXTWcKjKKGM2NnX7wHrZBuyRQ", "dp":"Gfo49fW5CZNTSEKQ_id0R2K9TMsoecw-jB2uCgqQi-TSLOtVRC5oTxA896my_SvIj8bCvEtLSzY3AhgvSCqulN3gSJbaHCCSDvAx0czAe7zfuTsxml76izeoKqg7TZAgAEnP0KXPRwJo4ff2J8lAcl3yyiLE7cLT9nuQSMRqERFVM7DQdk4wV618mQge9VGUStmYlh1MpS65N0dZWNafNuWauPTkTLZw8DFMIyizf3EC-nQYg1b6A_tYBHD3A82jPzQEQY8B3PrfGZ3DRASNv9jONk8qTQHOc5O5pLRMmUErDn_qRQCTKU483bzhooJE2a3WUEt6Pjsc1xMG4Vr3SQ", "dq":"cCVai36Yi-06m1cwd8fbkhH9GUpXIvKI2Z5ZRk-smqc7piY0dEZFHftS9BaMyZYu3wM09GDklfdkNLo3mmfXkftv-cbjpvelUa50HYWx0HouKrT9UpVia0sTnmfme7BztjKunuuTcQxTBvfDfxoIi_nmUHIx9Vv1IEaALITzChGnIky3q7O_8ttKR65nFevG1JvsRBeJN6z0tzG9RBQr5mxtx3Wt2Uwcp21XjOCFHVmXjT9nMmpINQNNIC8VrGSSkjaJmNWIw5WGmDnLkKzCG2vpZO1suqIIgCsYN_Ka7ETTdZt3gFdoECUpFSiay4-4MAospvgWLv8XAFXXwfSPXQ", "qi":"n-R81MpbwfWfqRSVgD8nDk7D8zlJ-tpMaojfTwNNqDt34Cr-BpMjxaQyEfMnzOd2dY4OV0rKhd29DIuwFEb2UERHdVWF3gM8f2byYGj4357CRkiwq6I050bUxd1ODgAXjVGNpOK_fmaNHDWfe5v3wVIcCmwH0mJxEu9kuz7fr9TJNxGJBGUphpGS6NQZDCbDXg9-FPafMeNV-Jdo0NQaKMwm8uZyW7YGSNpUXYnksrWt4Fa-B9H2KoC4PPSWESPxNooXdxK7Y0J1KbzNyrUmOl4dT6p_oFKcU-1unuDCZ11e6EmMKyUGjpDzTIAZ2XxmyWUJ06yzEw7oLo8noiCE_Q" } ================================================ FILE: apps/arweave/test/ar_webhook_tests.erl ================================================ -module(ar_webhook_tests). -export([init/2]). -include_lib("eunit/include/eunit.hrl"). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -import(ar_test_node, [ wait_until_height/2, read_block_when_stored/1]). init(Req, State) -> SplitPath = ar_http_iface_server:split_path(cowboy_req:path(Req)), handle(SplitPath, Req, State). handle([<<"tx">>], Req, State) -> {ok, Reply, _} = cowboy_req:read_body(Req), JSON = jiffy:decode(Reply, [return_maps]), TX = maps:get(<<"transaction">>, JSON), ets:insert(?MODULE, {{tx, maps:get(<<"id">>, TX)}, TX}), {ok, cowboy_req:reply(200, #{}, <<>>, Req), State}; handle([<<"block">>], Req, State) -> {ok, Reply, _} = cowboy_req:read_body(Req), JSON = jiffy:decode(Reply, [return_maps]), B = maps:get(<<"block">>, JSON), ets:insert(?MODULE, {{block, maps:get(<<"height">>, B)}, B}), {ok, cowboy_req:reply(200, #{}, <<>>, Req), State}; handle([<<"txdata">>], Req, State) -> {ok, Reply, _} = cowboy_req:read_body(Req), JSON = jiffy:decode(Reply, [return_maps]), ets:insert(?MODULE, {{tx_data_payload, maps:get(<<"txid">>, JSON)}, JSON}), {ok, cowboy_req:reply(200, #{}, <<>>, Req), State}; handle([<<"solution">>], Req, State) -> {ok, Reply, _} = cowboy_req:read_body(Req), JSON = jiffy:decode(Reply, [return_maps]), case maps:get(<<"event">>, JSON, not_found) of <<"solution_accepted">> -> ets:update_counter(?MODULE, accepted_solutions, {2, 1}, {accepted_solutions, 0}); _ -> ok end, {ok, cowboy_req:reply(200, #{}, <<>>, Req), State}. webhooks_test_() -> {timeout, 120, fun test_webhooks/0}. test_webhooks() -> {_, Pub} = Wallet = ar_wallet:new(), [B0] = ar_weave:init([{ar_wallet:to_address(Pub), ?AR(10000), <<>>}]), {ok, Config} = arweave_config:get_env(), try Port = ar_test_node:get_unused_port(), PortBinary = integer_to_binary(Port), TXBlacklistFilename = random_tx_blacklist_filename(), Addr = ar_wallet:to_address(ar_wallet:new_keyfile()), Config2 = Config#config{ webhooks = [ #config_webhook{ url = <<"http://127.0.0.1:", PortBinary/binary, "/tx">>, events = [transaction] }, #config_webhook{ url = <<"http://127.0.0.1:", PortBinary/binary, "/block">>, events = [block] }, #config_webhook{ url = <<"http://127.0.0.1:", PortBinary/binary, "/txdata">>, events = [transaction_data] }, #config_webhook{ url = <<"http://127.0.0.1:", PortBinary/binary, "/solution">>, events = [solution] } ], transaction_blacklist_files = [TXBlacklistFilename] }, ar_test_node:start(#{ b0 => B0, addr => Addr, config => Config2, %% Replica 2.9 modules do not support updates. storage_modules =>[{10 * ?MiB, 0, {composite, Addr, 1}}] }), %% Setup a server that would be listening for the webhooks and registering %% them in the ETS table. ets:new(?MODULE, [named_table, set, public]), Routes = [{"/[...]", ar_webhook_tests, []}], cowboy:start_clear( ar_webhook_test_listener, [{port, Port}], #{ env => #{ dispatch => cowboy_router:compile([{'_', Routes}]) } } ), {V2TX, Proofs} = create_v2_tx(Wallet), TXs = lists:map( fun(Height) -> SignedTX = case Height rem 2 == 1 of true -> Data = crypto:strong_rand_bytes(262144 * 2 + 10), ar_test_node:sign_v1_tx(main, Wallet, #{ data => Data }); false -> case Height == 2 of true -> V2TX; false -> ar_test_node:sign_tx(main, Wallet, #{}) end end, ar_test_node:assert_post_tx_to_peer(main, SignedTX), ar_test_node:mine(), wait_until_height(main, Height), [{_, AcceptedSolutionCount}] = ets:lookup(?MODULE, accepted_solutions), ?assert(AcceptedSolutionCount >= Height), SignedTX end, lists:seq(1, 10) ), UnconfirmedTX = ar_test_node:sign_tx(main, Wallet, #{}), ar_test_node:assert_post_tx_to_peer(main, UnconfirmedTX), lists:foreach( fun(Height) -> TX = lists:nth(Height, TXs), true = ar_util:do_until( fun() -> case ets:lookup(?MODULE, {block, Height}) of [{_, B}] -> {H, _, _} = ar_node:get_block_index_entry(Height), B2 = read_block_when_stored(H), Struct = ar_serialize:block_to_json_struct(B2), Expected = maps:remove( <<"wallet_list">>, jiffy:decode(ar_serialize:jsonify(Struct), [return_maps]) ), ?assertEqual(Expected, B), true; _ -> false end end, 200, 10000 ), true = ar_util:do_until( fun() -> case ets:lookup(?MODULE, {tx, ar_util:encode(TX#tx.id)}) of [{_, TX2}] -> Struct = ar_serialize:tx_to_json_struct(TX), Expected = maps:remove( <<"data">>, jiffy:decode(ar_serialize:jsonify(Struct), [return_maps]) ), ?assertEqual(Expected, TX2), true; _ -> false end end, 200, 10000 ), case Height < 8 andalso Height rem 2 == 1 of false -> %% Do not expect events about data from the latest blocks because it %% stays in the disk pool. ok; true -> assert_transaction_data_synced(TX#tx.id) end end, lists:seq(1, 10) ), true = ar_util:do_until( fun() -> case ets:lookup(?MODULE, {tx, ar_util:encode(UnconfirmedTX#tx.id)}) of [{_, TX}] -> Struct = ar_serialize:tx_to_json_struct(UnconfirmedTX), Expected = maps:remove( <<"data">>, jiffy:decode(ar_serialize:jsonify(Struct), [return_maps]) ), ?assertEqual(Expected, TX), true; _ -> false end end, 200, 2000 ), V2TXID = (V2TX)#tx.id, upload_chunks(Proofs), assert_transaction_data_synced(V2TXID), FirstTXID = (hd(TXs))#tx.id, append_txid_to_file(FirstTXID, TXBlacklistFilename), assert_transaction_data_removed(FirstTXID), SecondTXID = (lists:nth(3, TXs))#tx.id, % The second v1 transaction with data. append_second_chunk_to_file(SecondTXID, TXBlacklistFilename), assert_transaction_data_removed(SecondTXID), append_second_chunk_to_file(V2TXID, TXBlacklistFilename), assert_transaction_data_removed(V2TXID), empty_file(TXBlacklistFilename), %% Wait until the new blacklisting policy (=no blacklisting) takes effect. timer:sleep(3000), upload_chunks(Proofs), assert_transaction_data_synced(V2TXID), cowboy:stop_listener(ar_webhook_test_listener) after arweave_config:set_env(Config#config{ webhooks = [] }) end. create_v2_tx(Wallet) -> DataSize = 3 * ?DATA_CHUNK_SIZE + 11, Chunks = ar_tx:chunk_binary(?DATA_CHUNK_SIZE, crypto:strong_rand_bytes(DataSize)), SizeTaggedChunks = ar_tx:chunks_to_size_tagged_chunks(Chunks), SizedChunkIDs = ar_tx:sized_chunks_to_sized_chunk_ids(SizeTaggedChunks), {DataRoot, DataTree} = ar_merkle:generate_tree(SizedChunkIDs), TX = ar_test_node:sign_tx(main, Wallet, #{ format => 2, data_root => DataRoot, data_size => DataSize, reward => ?AR(1) }), Proofs = [encode_proof(#{ data_root => DataRoot, chunk => Chunk, data_path => ar_merkle:generate_path(DataRoot, Offset - 1, DataTree), offset => Offset - 1, data_size => DataSize }) || {Chunk, Offset} <- SizeTaggedChunks], {TX, Proofs}. encode_proof(Proof) -> ar_serialize:jsonify(#{ chunk => ar_util:encode(maps:get(chunk, Proof)), data_path => ar_util:encode(maps:get(data_path, Proof)), data_root => ar_util:encode(maps:get(data_root, Proof)), data_size => integer_to_binary(maps:get(data_size, Proof)), offset => integer_to_binary(maps:get(offset, Proof)) }). assert_transaction_data_synced(TXID) -> EncodedTXID = ar_util:encode(TXID), true = ar_util:do_until( fun() -> case ets:lookup(?MODULE, {tx_data_payload, EncodedTXID}) of [{_, JSON}] -> maps:get(<<"event">>, JSON) == <<"transaction_data_synced">>; _ -> false end end, 1000, 30000 ). upload_chunks([]) -> ok; upload_chunks([Proof | Proofs]) -> {ok, {{<<"200">>, _}, _, _, _, _}} = ar_test_node:post_chunk(main, Proof), upload_chunks(Proofs). random_tx_blacklist_filename() -> {ok, Config} = arweave_config:get_env(), filename:join(Config#config.data_dir, "ar-webhook-tests-transaction-blacklist-" ++ binary_to_list(ar_util:encode(crypto:strong_rand_bytes(32)))). append_txid_to_file(TXID, Filename) -> {ok, F} = file:open(Filename, [append]), ok = file:write(F, io_lib:format("~s~n", [ar_util:encode(TXID)])), file:close(F). assert_transaction_data_removed(TXID) -> EncodedTXID = ar_util:encode(TXID), true = ar_util:do_until( fun() -> [{_, JSON}] = ets:lookup(?MODULE, {tx_data_payload, EncodedTXID}), maps:get(<<"event">>, JSON) == <<"transaction_data_removed">> end, 100, 60000 ). append_second_chunk_to_file(TXID, Filename) -> {ok, {EndOffset, Size}} = ar_data_sync:get_tx_offset(TXID), SecondChunkStart = EndOffset - Size + ?DATA_CHUNK_SIZE, SecondChunkEnd = SecondChunkStart + ?DATA_CHUNK_SIZE, {ok, F} = file:open(Filename, [append]), ok = file:write(F, io_lib:format("~B,~B~n", [SecondChunkStart, SecondChunkEnd])), file:close(F). empty_file(Filename) -> {ok, F} = file:open(Filename, [write]), ok = file:write(F, <<" ">>), file:close(F). ================================================ FILE: apps/arweave_config/README.md ================================================ # Arweave Configuration Application `arweave_config` application is in charge of dealing with arweave configuration and contains all the modules, functions and processes to manage it. ## Getting Started ## Features ## Usage ## Test `arweave_config` is using [`eunit`](https://www.erlang.org/doc/apps/eunit) and [`common_test`](https://www.erlang.org/doc/apps/common_test/). ```sh # execute eunit test suite rebar3 eunit -c # execute common test test suite rebar3 ct -c # check the coverage rebar3 cover -v ``` ## FAQ ## References and Resources ================================================ FILE: apps/arweave_config/include/arweave_config.hrl ================================================ -ifndef(AR_CONFIG_HRL). -define(AR_CONFIG_HRL, true). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave/include/ar_verify_chunks.hrl"). -record(config_webhook, { events = [], url = undefined, headers = [] }). %% The polling frequency in seconds. -define(DEFAULT_POLLING_INTERVAL, 2). %% The number of processes periodically searching for the latest blocks. -define(DEFAULT_BLOCK_POLLERS, 10). %% The number of processes fetching the recent blocks and transactions on join. -define(DEFAULT_JOIN_WORKERS, 10). %% The number of data sync jobs to run. Each job periodically picks a range %% and downloads it from peers. -ifdef(AR_TEST). -define(DEFAULT_SYNC_JOBS, 10). -else. -define(DEFAULT_SYNC_JOBS, 100). -endif. %% The number of disk pool jobs to run. Disk pool jobs scan the disk pool to index %% no longer pending or orphaned chunks, pack chunks with a sufficient number of confirmations, %% or remove the abandoned ones. -define(DEFAULT_DISK_POOL_JOBS, 20). %% The number of header sync jobs to run. Each job picks the latest not synced %% block header and downloads it from peers. -define(DEFAULT_HEADER_SYNC_JOBS, 1). %% The default expiration time for a data root in the disk pool. -define(DEFAULT_DISK_POOL_DATA_ROOT_EXPIRATION_TIME_S, 30 * 60). %% The default size limit for unconfirmed and seeded chunks, per data root. -ifdef(AR_TEST). -define(DEFAULT_MAX_DISK_POOL_DATA_ROOT_BUFFER_MB, 50). -else. -define(DEFAULT_MAX_DISK_POOL_DATA_ROOT_BUFFER_MB, 10000). -endif. %% The default number of duplicate data roots checked for a posted chunk. -define(DEFAULT_MAX_DUPLICATE_DATA_ROOTS, 5). %% The default total size limit for unconfirmed and seeded chunks. -ifdef(AR_TEST). -define(DEFAULT_MAX_DISK_POOL_BUFFER_MB, 100). -else. -define(DEFAULT_MAX_DISK_POOL_BUFFER_MB, 100000). -endif. %% The default frequency of checking for the available disk space. -ifdef(AR_TEST). -define(DISK_SPACE_CHECK_FREQUENCY_MS, 1000). -else. -define(DISK_SPACE_CHECK_FREQUENCY_MS, 30 * 1000). -endif. -define(NUM_HASHING_PROCESSES, max(1, (erlang:system_info(schedulers_online) - 1))). -define(MAX_PARALLEL_BLOCK_INDEX_REQUESTS, 1). -define(MAX_PARALLEL_GET_CHUNK_REQUESTS, 100). -define(MAX_PARALLEL_GET_AND_PACK_CHUNK_REQUESTS, 1). -define(MAX_PARALLEL_GET_TX_DATA_REQUESTS, 1). -define(MAX_PARALLEL_WALLET_LIST_REQUESTS, 1). -define(MAX_PARALLEL_POST_CHUNK_REQUESTS, 100). -define(MAX_PARALLEL_GET_SYNC_RECORD_REQUESTS, 10). -define(MAX_PARALLEL_REWARD_HISTORY_REQUESTS, 1). -define(MAX_PARALLEL_GET_TX_REQUESTS, 20). -define(MAX_PARALLEL_GET_DATA_ROOTS_REQUESTS, 1). %% The number of parallel tx validation processes. -define(MAX_PARALLEL_POST_TX_REQUESTS, 20). %% The time in seconds to wait for the available tx validation process before dropping the %% POST /tx request. -define(DEFAULT_POST_TX_TIMEOUT, 20). %% The default value for the maximum number of threads used for nonce limiter chain %% validation. -define(DEFAULT_MAX_NONCE_LIMITER_VALIDATION_THREAD_COUNT, max(1, (erlang:system_info(schedulers_online) div 2))). %% The default value for the maximum number of threads used for nonce limiter chain %% last step validation. -define(DEFAULT_MAX_NONCE_LIMITER_LAST_STEP_VALIDATION_THREAD_COUNT, max(1, (erlang:system_info(schedulers_online) - 1))). %% Accept a block from the given IP only once in so many milliseconds. -ifdef(AR_TEST). -define(DEFAULT_BLOCK_THROTTLE_BY_IP_INTERVAL_MS, 10). -else. -define(DEFAULT_BLOCK_THROTTLE_BY_IP_INTERVAL_MS, 1000). -endif. %% Accept a block with the given solution hash only once in so many milliseconds. -ifdef(AR_TEST). -define(DEFAULT_BLOCK_THROTTLE_BY_SOLUTION_INTERVAL_MS, 10). -else. -define(DEFAULT_BLOCK_THROTTLE_BY_SOLUTION_INTERVAL_MS, 2000). -endif. -define(DEFAULT_CM_POLL_INTERVAL_MS, 60000). -define(DEFAULT_CM_BATCH_TIMEOUT_MS, 20). -define(CHUNK_GROUP_SIZE, (256 * 1024 * 8000)). % 2 GiB. %% The number of consecutive chunks to read at a time during in-place repacking. -ifdef(AR_TEST). -define(DEFAULT_REPACK_BATCH_SIZE, 2). -else. -define(DEFAULT_REPACK_BATCH_SIZE, 100). -endif. -define(DEFAULT_REPACK_CACHE_SIZE_MB, 4000). %% default filtering value for the peer list (30days) -define(CURRENT_PEERS_LIST_FILTER, 30*60*60*24). %% The default rocksdb databases flush interval, 30 minutes. -define(DEFAULT_ROCKSDB_FLUSH_INTERVAL_S, 1800). %% The default rocksdb WAL sync interval, 1 minute. -define(DEFAULT_ROCKSDB_WAL_SYNC_INTERVAL_S, 60). %% The number of 2.9 storage modules allowed to prepare the storage at a time. -ifdef(AR_TEST). -define(DEFAULT_REPLICA_2_9_WORKERS, 2). -else. -define(DEFAULT_REPLICA_2_9_WORKERS, 8). -endif. %% The default maximum number of replica 2.9 entropies to cache at a time %% while syncing data. Each entropy is 256 MiB. -define(DEFAULT_REPLICA_2_9_ENTROPY_CACHE_SIZE_MB, 4000). %% The number of packing workers. -define(DEFAULT_PACKING_WORKERS, erlang:system_info(dirty_cpu_schedulers_online)). %% The default connection tcp delay when arweave is shutting down -define(SHUTDOWN_TCP_CONNECTION_TIMEOUT, 30). -define(SHUTDOWN_TCP_MODE, shutdown). %% Global socket configuration -define(DEFAULT_SOCKET_BACKEND, inet). %% Default Gun HTTP/TCP parameters -define(DEFAULT_GUN_HTTP_CLOSING_TIMEOUT, 15_000). -define(DEFAULT_GUN_HTTP_KEEPALIVE, 60_000). -define(DEFAULT_GUN_TCP_DELAY_SEND, false). -define(DEFAULT_GUN_TCP_KEEPALIVE, true). -define(DEFAULT_GUN_TCP_LINGER, false). -define(DEFAULT_GUN_TCP_LINGER_TIMEOUT, 0). -define(DEFAULT_GUN_TCP_NODELAY, true). -define(DEFAULT_GUN_TCP_SEND_TIMEOUT_CLOSE, true). -define(DEFAULT_GUN_TCP_SEND_TIMEOUT, 15_000). %% The time the cowboy loop handler waits before killing a request handler process. -define(DEFAULT_HTTP_HANDLER_TIMEOUT_MS, 55000). %% Per-chunk HTTP body read period passed to cowboy_req:read_body/2. -define(DEFAULT_HTTP_READ_BODY_PERIOD_MS, 15000). %% Total wall-clock limit for reading the complete request body. -define(DEFAULT_HTTP_MAX_BODY_READ_TIME_MS, ?DEFAULT_HTTP_HANDLER_TIMEOUT_MS - 2000). %% Default Cowboy HTTP/TCP parameters -define(DEFAULT_COWBOY_HTTP_ACTIVE_N, 100). -define(DEFAULT_COWBOY_HTTP_IDLE_TIMEOUT, 60_000). -define(DEFAULT_COWBOY_HTTP_INACTIVITY_TIMEOUT, 300_000). -define(DEFAULT_COWBOY_HTTP_LINGER_TIMEOUT, 1000). -define(DEFAULT_COWBOY_HTTP_REQUEST_TIMEOUT, 5000). -define(DEFAULT_COWBOY_TCP_BACKLOG, 1024). -define(DEFAULT_COWBOY_TCP_DELAY_SEND, false). -define(DEFAULT_COWBOY_TCP_IDLE_TIMEOUT_SECOND, 10). -define(DEFAULT_COWBOY_TCP_KEEPALIVE, true). -define(DEFAULT_COWBOY_TCP_LINGER, false). -define(DEFAULT_COWBOY_TCP_LINGER_TIMEOUT, 0). -define(DEFAULT_COWBOY_TCP_MAX_CONNECTIONS, 5000). -define(DEFAULT_COWBOY_TCP_NODELAY, true). -define(DEFAULT_COWBOY_TCP_NUM_ACCEPTORS, 500). -define(DEFAULT_COWBOY_TCP_SEND_TIMEOUT_CLOSE, true). -define(DEFAULT_COWBOY_TCP_SEND_TIMEOUT, 15_000). -define(DEFAULT_COWBOY_TCP_LISTENER_SHUTDOWN, 5000). %% Common RLG Settings -define(DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_INTERVAL, 120000). -define(DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_EXPIRY, 120000). -define(DEFAULT_HTTP_API_LIMITER_IS_MANUAL_REDUCTION_DISABLED, false). %% General RLG -define(DEFAULT_HTTP_API_LIMITER_GENERAL_SLIDING_WINDOW_LIMIT, 0). -define(DEFAULT_HTTP_API_LIMITER_GENERAL_SLIDING_WINDOW_DURATION, 1000). -ifdef(AR_TEST). -define(DEFAULT_HTTP_API_LIMITER_GENERAL_LEAKY_LIMIT, 45000). -else. -define(DEFAULT_HTTP_API_LIMITER_GENERAL_LEAKY_LIMIT, 450). -endif. -define(DEFAULT_HTTP_API_LIMITER_GENERAL_LEAKY_TICK_INTERVAL, 30000). -define(DEFAULT_HTTP_API_LIMITER_GENERAL_LEAKY_TICK_REDUCTION, 450). -define(DEFAULT_HTTP_API_LIMITER_GENERAL_CONCURRENCY_LIMIT, 150). %% Chunk RLG -define(DEFAULT_HTTP_API_LIMITER_CHUNK_SLIDING_WINDOW_LIMIT, 100). -define(DEFAULT_HTTP_API_LIMITER_CHUNK_SLIDING_WINDOW_DURATION, 1000). -define(DEFAULT_HTTP_API_LIMITER_CHUNK_LEAKY_LIMIT, 6000). -define(DEFAULT_HTTP_API_LIMITER_CHUNK_LEAKY_TICK_INTERVAL, 30000). -define(DEFAULT_HTTP_API_LIMITER_CHUNK_LEAKY_TICK_REDUCTION, 30). -define(DEFAULT_HTTP_API_LIMITER_CHUNK_CONCURRENCY_LIMIT, 200). %% Data Sync RLG -define(DEFAULT_HTTP_API_LIMITER_DATA_SYNC_RECORD_SLIDING_WINDOW_LIMIT, 0). -define(DEFAULT_HTTP_API_LIMITER_DATA_SYNC_RECORD_SLIDING_WINDOW_DURATION, 1000). -define(DEFAULT_HTTP_API_LIMITER_DATA_SYNC_RECORD_LEAKY_LIMIT, 20). -define(DEFAULT_HTTP_API_LIMITER_DATA_SYNC_RECORD_LEAKY_TICK_INTERVAL, 30000). -define(DEFAULT_HTTP_API_LIMITER_DATA_SYNC_RECORD_LEAKY_TICK_REDUCTION, 20). -define(DEFAULT_HTTP_API_LIMITER_DATA_SYNC_RECORD_CONCURRENCY_LIMIT, 40). %% Recent Hash List Diff RLG -define(DEFAULT_HTTP_API_LIMITER_RECENT_HASH_LIST_DIFF_SLIDING_WINDOW_LIMIT, 0). -define(DEFAULT_HTTP_API_LIMITER_RECENT_HASH_LIST_DIFF_SLIDING_WINDOW_DURATION, 1000). -define(DEFAULT_HTTP_API_LIMITER_RECENT_HASH_LIST_DIFF_LEAKY_LIMIT, 120). -define(DEFAULT_HTTP_API_LIMITER_RECENT_HASH_LIST_DIFF_LEAKY_TICK_INTERVAL, 30000). -define(DEFAULT_HTTP_API_LIMITER_RECENT_HASH_LIST_DIFF_LEAKY_TICK_REDUCTION, 120). -define(DEFAULT_HTTP_API_LIMITER_RECENT_HASH_LIST_DIFF_CONCURRENCY_LIMIT, 240). %% Block Index RLG -define(DEFAULT_HTTP_API_LIMITER_BLOCK_INDEX_SLIDING_WINDOW_LIMIT, 0). -define(DEFAULT_HTTP_API_LIMITER_BLOCK_INDEX_SLIDING_WINDOW_DURATION, 1000). -ifdef(AR_TEST). -define(DEFAULT_HTTP_API_LIMITER_BLOCK_INDEX_LEAKY_LIMIT, 10). -else. -define(DEFAULT_HTTP_API_LIMITER_BLOCK_INDEX_LEAKY_LIMIT, 1). -endif. -define(DEFAULT_HTTP_API_LIMITER_BLOCK_INDEX_LEAKY_TICK_INTERVAL, 30000). -define(DEFAULT_HTTP_API_LIMITER_BLOCK_INDEX_LEAKY_TICK_REDUCTION, 1). -define(DEFAULT_HTTP_API_LIMITER_BLOCK_INDEX_CONCURRENCY_LIMIT, 2). %% Wallet list RLG -define(DEFAULT_HTTP_API_LIMITER_WALLET_LIST_SLIDING_WINDOW_LIMIT, 0). -define(DEFAULT_HTTP_API_LIMITER_WALLET_LIST_SLIDING_WINDOW_DURATION, 1000). -ifdef(AR_TEST). -define(DEFAULT_HTTP_API_LIMITER_WALLET_LIST_LEAKY_LIMIT, 10). -else. -define(DEFAULT_HTTP_API_LIMITER_WALLET_LIST_LEAKY_LIMIT, 1). -endif. -define(DEFAULT_HTTP_API_LIMITER_WALLET_LIST_LEAKY_TICK_INTERVAL, 30000). -define(DEFAULT_HTTP_API_LIMITER_WALLET_LIST_LEAKY_TICK_REDUCTION, 1). -define(DEFAULT_HTTP_API_LIMITER_WALLET_LIST_CONCURRENCY_LIMIT, 2). %% Get VDF RLG -define(DEFAULT_HTTP_API_LIMITER_GET_VDF_SLIDING_WINDOW_LIMIT, 0). -define(DEFAULT_HTTP_API_LIMITER_GET_VDF_SLIDING_WINDOW_DURATION, 1000). -ifdef(AR_TEST). -define(DEFAULT_HTTP_API_LIMITER_GET_VDF_LEAKY_LIMIT, 4500). -else. -define(DEFAULT_HTTP_API_LIMITER_GET_VDF_LEAKY_LIMIT, 90). -endif. -define(DEFAULT_HTTP_API_LIMITER_GET_VDF_LEAKY_TICK_INTERVAL, 30000). -define(DEFAULT_HTTP_API_LIMITER_GET_VDF_LEAKY_TICK_REDUCTION, 90). -define(DEFAULT_HTTP_API_LIMITER_GET_VDF_CONCURRENCY_LIMIT, 90). %% VDF Session RLG -define(DEFAULT_HTTP_API_LIMITER_GET_VDF_SESSION_SLIDING_WINDOW_LIMIT, 0). -define(DEFAULT_HTTP_API_LIMITER_GET_VDF_SESSION_SLIDING_WINDOW_DURATION, 1000). -ifdef(AR_TEST). -define(DEFAULT_HTTP_API_LIMITER_GET_VDF_SESSION_LEAKY_LIMIT, 50000). -else. -define(DEFAULT_HTTP_API_LIMITER_GET_VDF_SESSION_LEAKY_LIMIT, 30). -endif. -define(DEFAULT_HTTP_API_LIMITER_GET_VDF_SESSION_LEAKY_TICK_INTERVAL, 30000). -define(DEFAULT_HTTP_API_LIMITER_GET_VDF_SESSION_LEAKY_TICK_REDUCTION, 30). -define(DEFAULT_HTTP_API_LIMITER_GET_VDF_SESSION_CONCURRENCY_LIMIT, 30). %% Previous VDF Session RLG -define(DEFAULT_HTTP_API_LIMITER_GET_PREVIOUS_VDF_SESSION_SLIDING_WINDOW_LIMIT, 0). -define(DEFAULT_HTTP_API_LIMITER_GET_PREVIOUS_VDF_SESSION_SLIDING_WINDOW_DURATION, 1000). -ifdef(AR_TEST). -define(DEFAULT_HTTP_API_LIMITER_GET_PREVIOUS_VDF_SESSION_LEAKY_LIMIT, 50000). -else. -define(DEFAULT_HTTP_API_LIMITER_GET_PREVIOUS_VDF_SESSION_LEAKY_LIMIT, 30). -endif. -define(DEFAULT_HTTP_API_LIMITER_GET_PREVIOUS_VDF_SESSION_LEAKY_TICK_INTERVAL, 30000). -define(DEFAULT_HTTP_API_LIMITER_GET_PREVIOUS_VDF_SESSION_LEAKY_TICK_REDUCTION, 30). -define(DEFAULT_HTTP_API_LIMITER_GET_PREVIOUS_VDF_SESSION_CONCURRENCY_LIMIT, 30). %% Metrics RLG -define(DEFAULT_HTTP_API_LIMITER_METRICS_SLIDING_WINDOW_LIMIT, 0). -define(DEFAULT_HTTP_API_LIMITER_METRICS_SLIDING_WINDOW_DURATION, 1000). -define(DEFAULT_HTTP_API_LIMITER_METRICS_LEAKY_LIMIT, 2). -define(DEFAULT_HTTP_API_LIMITER_METRICS_LEAKY_TICK_INTERVAL, 1000). -define(DEFAULT_HTTP_API_LIMITER_METRICS_LEAKY_TICK_REDUCTION, 2). -define(DEFAULT_HTTP_API_LIMITER_METRICS_CONCURRENCY_LIMIT, 2). %% @doc Startup options with default values. -record(config, { init = false, port = ?DEFAULT_HTTP_IFACE_PORT, mine = false, verify = false, verify_samples = ?SAMPLE_CHUNK_COUNT, peers = [], block_gossip_peers = [], local_peers = [], sync_from_local_peers_only = false, data_dir = "./data", log_dir = ?LOG_DIR, polling = ?DEFAULT_POLLING_INTERVAL, % Polling frequency in seconds. block_pollers = ?DEFAULT_BLOCK_POLLERS, auto_join = true, join_workers = ?DEFAULT_JOIN_WORKERS, diff = ?DEFAULT_DIFF, mining_addr = not_set, hashing_threads = ?NUM_HASHING_PROCESSES, mining_cache_size_mb, packing_cache_size_limit, data_cache_size_limit, post_tx_timeout = ?DEFAULT_POST_TX_TIMEOUT, max_emitters = ?NUM_EMITTER_PROCESSES, sync_jobs = ?DEFAULT_SYNC_JOBS, header_sync_jobs = ?DEFAULT_HEADER_SYNC_JOBS, enable_data_roots_syncing = true, data_sync_request_packed_chunks = false, disk_pool_jobs = ?DEFAULT_DISK_POOL_JOBS, load_key = not_set, disk_space_check_frequency = ?DISK_SPACE_CHECK_FREQUENCY_MS, storage_modules = [], repack_in_place_storage_modules = [], repack_batch_size = ?DEFAULT_REPACK_BATCH_SIZE, repack_cache_size_mb = ?DEFAULT_REPACK_CACHE_SIZE_MB, start_from_latest_state = false, start_from_state = not_set, start_from_block = not_set, internal_api_secret = not_set, enable = [], disable = [], transaction_blacklist_files = [], transaction_blacklist_urls = [], transaction_whitelist_files = [], transaction_whitelist_urls = [], requests_per_minute_limit = ?DEFAULT_REQUESTS_PER_MINUTE_LIMIT, requests_per_minute_limit_by_ip = #{}, max_propagation_peers = ?DEFAULT_MAX_PROPAGATION_PEERS, max_block_propagation_peers = ?DEFAULT_MAX_BLOCK_PROPAGATION_PEERS, webhooks = [], disk_pool_data_root_expiration_time = ?DEFAULT_DISK_POOL_DATA_ROOT_EXPIRATION_TIME_S, max_disk_pool_buffer_mb = ?DEFAULT_MAX_DISK_POOL_BUFFER_MB, max_disk_pool_data_root_buffer_mb = ?DEFAULT_MAX_DISK_POOL_DATA_ROOT_BUFFER_MB, max_duplicate_data_roots = ?DEFAULT_MAX_DUPLICATE_DATA_ROOTS, semaphores = #{ get_chunk => ?MAX_PARALLEL_GET_CHUNK_REQUESTS, get_and_pack_chunk => ?MAX_PARALLEL_GET_AND_PACK_CHUNK_REQUESTS, get_tx_data => ?MAX_PARALLEL_GET_TX_DATA_REQUESTS, post_chunk => ?MAX_PARALLEL_POST_CHUNK_REQUESTS, get_block_index => ?MAX_PARALLEL_BLOCK_INDEX_REQUESTS, get_wallet_list => ?MAX_PARALLEL_WALLET_LIST_REQUESTS, %% The get_sync_record semaphore is shared with GET /sync_buckets, %% GET /footprints, and GET /footprint_buckets. get_sync_record => ?MAX_PARALLEL_GET_SYNC_RECORD_REQUESTS, post_tx => ?MAX_PARALLEL_POST_TX_REQUESTS, get_reward_history => ?MAX_PARALLEL_REWARD_HISTORY_REQUESTS, get_tx => ?MAX_PARALLEL_GET_TX_REQUESTS, get_data_roots => ?MAX_PARALLEL_GET_DATA_ROOTS_REQUESTS }, disk_cache_size = ?DISK_CACHE_SIZE, max_nonce_limiter_validation_thread_count = ?DEFAULT_MAX_NONCE_LIMITER_VALIDATION_THREAD_COUNT, max_nonce_limiter_last_step_validation_thread_count = ?DEFAULT_MAX_NONCE_LIMITER_LAST_STEP_VALIDATION_THREAD_COUNT, nonce_limiter_server_trusted_peers = [], nonce_limiter_client_peers = [], debug = false, run_defragmentation = false, defragmentation_trigger_threshold = 1_500_000_000, defragmentation_modules = [], block_throttle_by_ip_interval = ?DEFAULT_BLOCK_THROTTLE_BY_IP_INTERVAL_MS, block_throttle_by_solution_interval = ?DEFAULT_BLOCK_THROTTLE_BY_SOLUTION_INTERVAL_MS, tls_cert_file = not_set, %% required to enable TLS tls_key_file = not_set, %% required to enable TLS http_api_transport_idle_timeout = ?DEFAULT_COWBOY_TCP_IDLE_TIMEOUT_SECOND*1000, coordinated_mining = false, cm_api_secret = not_set, cm_exit_peer = not_set, cm_peers = [], cm_poll_interval = ?DEFAULT_CM_POLL_INTERVAL_MS, cm_out_batch_timeout = ?DEFAULT_CM_BATCH_TIMEOUT_MS, is_pool_server = false, is_pool_client = false, pool_server_address = not_set, pool_api_key = not_set, pool_worker_name = not_set, packing_workers = ?DEFAULT_PACKING_WORKERS, replica_2_9_workers = ?DEFAULT_REPLICA_2_9_WORKERS, disable_replica_2_9_device_limit = false, replica_2_9_entropy_cache_size_mb = ?DEFAULT_REPLICA_2_9_ENTROPY_CACHE_SIZE_MB, %% Undocumented/unsupported options chunk_storage_file_size = ?CHUNK_GROUP_SIZE, rocksdb_flush_interval_s = ?DEFAULT_ROCKSDB_FLUSH_INTERVAL_S, rocksdb_wal_sync_interval_s = ?DEFAULT_ROCKSDB_WAL_SYNC_INTERVAL_S, %% openssl (will be removed), fused, hiopt_m4 vdf = openssl, %% Turn on/off the rebasing check. Only disabled in tests. allow_rebase = true, % Shutdown procedures shutdown_tcp_connection_timeout = ?SHUTDOWN_TCP_CONNECTION_TIMEOUT, shutdown_tcp_mode = ?SHUTDOWN_TCP_MODE, % global socket configuration 'socket.backend' = ?DEFAULT_SOCKET_BACKEND, % gun network stack configuration. % these parameters are mainly configured using default % values from inet module 'http_client.http.closing_timeout' = ?DEFAULT_GUN_HTTP_CLOSING_TIMEOUT, 'http_client.http.keepalive' = ?DEFAULT_GUN_HTTP_KEEPALIVE, 'http_client.tcp.delay_send' = ?DEFAULT_GUN_TCP_DELAY_SEND, 'http_client.tcp.keepalive' = ?DEFAULT_GUN_TCP_KEEPALIVE, 'http_client.tcp.linger' = ?DEFAULT_GUN_TCP_LINGER, 'http_client.tcp.linger_timeout' = ?DEFAULT_GUN_TCP_LINGER_TIMEOUT, 'http_client.tcp.nodelay' = ?DEFAULT_GUN_TCP_NODELAY, 'http_client.tcp.send_timeout_close' = ?DEFAULT_GUN_TCP_SEND_TIMEOUT_CLOSE, 'http_client.tcp.send_timeout' = ?DEFAULT_GUN_TCP_SEND_TIMEOUT, % cowboy network stack configuration. % these parameters are mainly configured using default % values from inet module 'http_api.http.active_n' = ?DEFAULT_COWBOY_HTTP_ACTIVE_N, 'http_api.http.inactivity_timeout' = ?DEFAULT_COWBOY_HTTP_INACTIVITY_TIMEOUT, 'http_api.http.linger_timeout' = ?DEFAULT_COWBOY_HTTP_LINGER_TIMEOUT, 'http_api.http.request_timeout' = ?DEFAULT_COWBOY_HTTP_REQUEST_TIMEOUT, 'http_api.tcp.backlog' = ?DEFAULT_COWBOY_TCP_BACKLOG, 'http_api.tcp.delay_send' = ?DEFAULT_COWBOY_TCP_DELAY_SEND, 'http_api.tcp.keepalive' = ?DEFAULT_COWBOY_TCP_KEEPALIVE, 'http_api.tcp.linger' = ?DEFAULT_COWBOY_TCP_LINGER, 'http_api.tcp.linger_timeout' = ?DEFAULT_COWBOY_TCP_LINGER_TIMEOUT, 'http_api.tcp.listener_shutdown' = ?DEFAULT_COWBOY_TCP_LISTENER_SHUTDOWN, 'http_api.tcp.max_connections' = ?DEFAULT_COWBOY_TCP_MAX_CONNECTIONS, 'http_api.tcp.nodelay' = ?DEFAULT_COWBOY_TCP_NODELAY, 'http_api.tcp.num_acceptors' = ?DEFAULT_COWBOY_TCP_NUM_ACCEPTORS, 'http_api.tcp.send_timeout_close' = ?DEFAULT_COWBOY_TCP_SEND_TIMEOUT_CLOSE, 'http_api.tcp.send_timeout' = ?DEFAULT_COWBOY_TCP_SEND_TIMEOUT, 'http_api.limiter.general.sliding_window_limit' = ?DEFAULT_HTTP_API_LIMITER_GENERAL_SLIDING_WINDOW_LIMIT, 'http_api.limiter.general.sliding_window_duration' = ?DEFAULT_HTTP_API_LIMITER_GENERAL_SLIDING_WINDOW_DURATION, 'http_api.limiter.general.sliding_window_timestamp_cleanup_interval' = ?DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_INTERVAL, 'http_api.limiter.general.sliding_window_timestamp_cleanup_expiry' = ?DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_EXPIRY, 'http_api.limiter.general.leaky_limit' = ?DEFAULT_HTTP_API_LIMITER_GENERAL_LEAKY_LIMIT, 'http_api.limiter.general.leaky_tick_interval' = ?DEFAULT_HTTP_API_LIMITER_GENERAL_LEAKY_TICK_INTERVAL, 'http_api.limiter.general.leaky_tick_reduction' = ?DEFAULT_HTTP_API_LIMITER_GENERAL_LEAKY_TICK_REDUCTION, 'http_api.limiter.general.concurrency_limit' = ?DEFAULT_HTTP_API_LIMITER_GENERAL_CONCURRENCY_LIMIT, 'http_api.limiter.general.is_manual_reduction_disabled' = ?DEFAULT_HTTP_API_LIMITER_IS_MANUAL_REDUCTION_DISABLED, 'http_api.limiter.chunk.sliding_window_limit' = ?DEFAULT_HTTP_API_LIMITER_CHUNK_SLIDING_WINDOW_LIMIT, 'http_api.limiter.chunk.sliding_window_duration' = ?DEFAULT_HTTP_API_LIMITER_CHUNK_SLIDING_WINDOW_DURATION, 'http_api.limiter.chunk.sliding_window_timestamp_cleanup_interval' = ?DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_INTERVAL, 'http_api.limiter.chunk.sliding_window_timestamp_cleanup_expiry' = ?DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_EXPIRY, 'http_api.limiter.chunk.leaky_limit' = ?DEFAULT_HTTP_API_LIMITER_CHUNK_LEAKY_LIMIT, 'http_api.limiter.chunk.leaky_tick_interval' = ?DEFAULT_HTTP_API_LIMITER_CHUNK_LEAKY_TICK_INTERVAL, 'http_api.limiter.chunk.leaky_tick_reduction' = ?DEFAULT_HTTP_API_LIMITER_CHUNK_LEAKY_TICK_REDUCTION, 'http_api.limiter.chunk.concurrency_limit' = ?DEFAULT_HTTP_API_LIMITER_CHUNK_CONCURRENCY_LIMIT, 'http_api.limiter.chunk.is_manual_reduction_disabled' = ?DEFAULT_HTTP_API_LIMITER_IS_MANUAL_REDUCTION_DISABLED, 'http_api.limiter.data_sync_record.sliding_window_limit' = ?DEFAULT_HTTP_API_LIMITER_DATA_SYNC_RECORD_SLIDING_WINDOW_LIMIT, 'http_api.limiter.data_sync_record.sliding_window_duration' = ?DEFAULT_HTTP_API_LIMITER_DATA_SYNC_RECORD_SLIDING_WINDOW_DURATION, 'http_api.limiter.data_sync_record.sliding_window_timestamp_cleanup_interval' = ?DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_INTERVAL, 'http_api.limiter.data_sync_record.sliding_window_timestamp_cleanup_expiry' = ?DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_EXPIRY, 'http_api.limiter.data_sync_record.leaky_limit' = ?DEFAULT_HTTP_API_LIMITER_DATA_SYNC_RECORD_LEAKY_LIMIT, 'http_api.limiter.data_sync_record.leaky_tick_interval' = ?DEFAULT_HTTP_API_LIMITER_DATA_SYNC_RECORD_LEAKY_TICK_INTERVAL, 'http_api.limiter.data_sync_record.leaky_tick_reduction' = ?DEFAULT_HTTP_API_LIMITER_DATA_SYNC_RECORD_LEAKY_TICK_REDUCTION, 'http_api.limiter.data_sync_record.concurrency_limit' = ?DEFAULT_HTTP_API_LIMITER_DATA_SYNC_RECORD_CONCURRENCY_LIMIT, 'http_api.limiter.data_sync_record.is_manual_reduction_disabled' = ?DEFAULT_HTTP_API_LIMITER_IS_MANUAL_REDUCTION_DISABLED, 'http_api.limiter.recent_hash_list_diff.sliding_window_limit' = ?DEFAULT_HTTP_API_LIMITER_RECENT_HASH_LIST_DIFF_SLIDING_WINDOW_LIMIT, 'http_api.limiter.recent_hash_list_diff.sliding_window_duration' = ?DEFAULT_HTTP_API_LIMITER_RECENT_HASH_LIST_DIFF_SLIDING_WINDOW_DURATION, 'http_api.limiter.recent_hash_list_diff.sliding_window_timestamp_cleanup_interval' = ?DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_INTERVAL, 'http_api.limiter.recent_hash_list_diff.sliding_window_timestamp_cleanup_expiry' = ?DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_EXPIRY, 'http_api.limiter.recent_hash_list_diff.leaky_limit' = ?DEFAULT_HTTP_API_LIMITER_RECENT_HASH_LIST_DIFF_LEAKY_LIMIT, 'http_api.limiter.recent_hash_list_diff.leaky_tick_interval' = ?DEFAULT_HTTP_API_LIMITER_RECENT_HASH_LIST_DIFF_LEAKY_TICK_INTERVAL, 'http_api.limiter.recent_hash_list_diff.leaky_tick_reduction' = ?DEFAULT_HTTP_API_LIMITER_RECENT_HASH_LIST_DIFF_LEAKY_TICK_REDUCTION, 'http_api.limiter.recent_hash_list_diff.concurrency_limit' = ?DEFAULT_HTTP_API_LIMITER_RECENT_HASH_LIST_DIFF_CONCURRENCY_LIMIT, 'http_api.limiter.recent_hash_list_diff.is_manual_reduction_disabled' = ?DEFAULT_HTTP_API_LIMITER_IS_MANUAL_REDUCTION_DISABLED, 'http_api.limiter.block_index.sliding_window_limit' = ?DEFAULT_HTTP_API_LIMITER_BLOCK_INDEX_SLIDING_WINDOW_LIMIT, 'http_api.limiter.block_index.sliding_window_duration' = ?DEFAULT_HTTP_API_LIMITER_BLOCK_INDEX_SLIDING_WINDOW_DURATION, 'http_api.limiter.block_index.sliding_window_timestamp_cleanup_interval' = ?DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_INTERVAL, 'http_api.limiter.block_index.sliding_window_timestamp_cleanup_expiry' = ?DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_EXPIRY, 'http_api.limiter.block_index.leaky_limit' = ?DEFAULT_HTTP_API_LIMITER_BLOCK_INDEX_LEAKY_LIMIT, 'http_api.limiter.block_index.leaky_tick_interval' = ?DEFAULT_HTTP_API_LIMITER_BLOCK_INDEX_LEAKY_TICK_INTERVAL, 'http_api.limiter.block_index.leaky_tick_reduction' = ?DEFAULT_HTTP_API_LIMITER_BLOCK_INDEX_LEAKY_TICK_REDUCTION, 'http_api.limiter.block_index.concurrency_limit' = ?DEFAULT_HTTP_API_LIMITER_BLOCK_INDEX_CONCURRENCY_LIMIT, 'http_api.limiter.block_index.is_manual_reduction_disabled' = ?DEFAULT_HTTP_API_LIMITER_IS_MANUAL_REDUCTION_DISABLED, 'http_api.limiter.wallet_list.sliding_window_limit' = ?DEFAULT_HTTP_API_LIMITER_WALLET_LIST_SLIDING_WINDOW_LIMIT, 'http_api.limiter.wallet_list.sliding_window_duration' = ?DEFAULT_HTTP_API_LIMITER_WALLET_LIST_SLIDING_WINDOW_DURATION, 'http_api.limiter.wallet_list.sliding_window_timestamp_cleanup_interval' = ?DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_INTERVAL, 'http_api.limiter.wallet_list.sliding_window_timestamp_cleanup_expiry' = ?DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_EXPIRY, 'http_api.limiter.wallet_list.leaky_limit' = ?DEFAULT_HTTP_API_LIMITER_WALLET_LIST_LEAKY_LIMIT, 'http_api.limiter.wallet_list.leaky_tick_interval' = ?DEFAULT_HTTP_API_LIMITER_WALLET_LIST_LEAKY_TICK_INTERVAL, 'http_api.limiter.wallet_list.leaky_tick_reduction' = ?DEFAULT_HTTP_API_LIMITER_WALLET_LIST_LEAKY_TICK_REDUCTION, 'http_api.limiter.wallet_list.concurrency_limit' = ?DEFAULT_HTTP_API_LIMITER_WALLET_LIST_CONCURRENCY_LIMIT, 'http_api.limiter.wallet_list.is_manual_reduction_disabled' = ?DEFAULT_HTTP_API_LIMITER_IS_MANUAL_REDUCTION_DISABLED, 'http_api.limiter.get_vdf.sliding_window_limit' = ?DEFAULT_HTTP_API_LIMITER_GET_VDF_SLIDING_WINDOW_LIMIT, 'http_api.limiter.get_vdf.sliding_window_duration' = ?DEFAULT_HTTP_API_LIMITER_GET_VDF_SLIDING_WINDOW_DURATION, 'http_api.limiter.get_vdf.sliding_window_timestamp_cleanup_interval' = ?DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_INTERVAL, 'http_api.limiter.get_vdf.sliding_window_timestamp_cleanup_expiry' = ?DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_EXPIRY, 'http_api.limiter.get_vdf.leaky_limit' = ?DEFAULT_HTTP_API_LIMITER_GET_VDF_LEAKY_LIMIT, 'http_api.limiter.get_vdf.leaky_tick_interval' = ?DEFAULT_HTTP_API_LIMITER_GET_VDF_LEAKY_TICK_INTERVAL, 'http_api.limiter.get_vdf.leaky_tick_reduction' = ?DEFAULT_HTTP_API_LIMITER_GET_VDF_LEAKY_TICK_REDUCTION, 'http_api.limiter.get_vdf.concurrency_limit' = ?DEFAULT_HTTP_API_LIMITER_GET_VDF_CONCURRENCY_LIMIT, 'http_api.limiter.get_vdf.is_manual_reduction_disabled' = ?DEFAULT_HTTP_API_LIMITER_IS_MANUAL_REDUCTION_DISABLED, 'http_api.limiter.get_vdf_session.sliding_window_limit' = ?DEFAULT_HTTP_API_LIMITER_GET_VDF_SESSION_SLIDING_WINDOW_LIMIT, 'http_api.limiter.get_vdf_session.sliding_window_duration' = ?DEFAULT_HTTP_API_LIMITER_GET_VDF_SESSION_SLIDING_WINDOW_DURATION, 'http_api.limiter.get_vdf_session.sliding_window_timestamp_cleanup_interval' = ?DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_INTERVAL, 'http_api.limiter.get_vdf_session.sliding_window_timestamp_cleanup_expiry' = ?DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_EXPIRY, 'http_api.limiter.get_vdf_session.leaky_limit' = ?DEFAULT_HTTP_API_LIMITER_GET_VDF_SESSION_LEAKY_LIMIT, 'http_api.limiter.get_vdf_session.leaky_tick_interval' = ?DEFAULT_HTTP_API_LIMITER_GET_VDF_SESSION_LEAKY_TICK_INTERVAL, 'http_api.limiter.get_vdf_session.leaky_tick_reduction' = ?DEFAULT_HTTP_API_LIMITER_GET_VDF_SESSION_LEAKY_TICK_REDUCTION, 'http_api.limiter.get_vdf_session.concurrency_limit' = ?DEFAULT_HTTP_API_LIMITER_GET_VDF_SESSION_CONCURRENCY_LIMIT, 'http_api.limiter.get_vdf_session.is_manual_reduction_disabled' = ?DEFAULT_HTTP_API_LIMITER_IS_MANUAL_REDUCTION_DISABLED, 'http_api.limiter.get_previous_vdf_session.sliding_window_limit' = ?DEFAULT_HTTP_API_LIMITER_GET_PREVIOUS_VDF_SESSION_SLIDING_WINDOW_LIMIT, 'http_api.limiter.get_previous_vdf_session.sliding_window_duration' = ?DEFAULT_HTTP_API_LIMITER_GET_PREVIOUS_VDF_SESSION_SLIDING_WINDOW_DURATION, 'http_api.limiter.get_previous_vdf_session.sliding_window_timestamp_cleanup_interval' = ?DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_INTERVAL, 'http_api.limiter.get_previous_vdf_session.sliding_window_timestamp_cleanup_expiry' = ?DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_EXPIRY, 'http_api.limiter.get_previous_vdf_session.leaky_limit' = ?DEFAULT_HTTP_API_LIMITER_GET_PREVIOUS_VDF_SESSION_LEAKY_LIMIT, 'http_api.limiter.get_previous_vdf_session.leaky_tick_interval' = ?DEFAULT_HTTP_API_LIMITER_GET_PREVIOUS_VDF_SESSION_LEAKY_TICK_INTERVAL, 'http_api.limiter.get_previous_vdf_session.leaky_tick_reduction' = ?DEFAULT_HTTP_API_LIMITER_GET_PREVIOUS_VDF_SESSION_LEAKY_TICK_REDUCTION, 'http_api.limiter.get_previous_vdf_session.concurrency_limit' = ?DEFAULT_HTTP_API_LIMITER_GET_PREVIOUS_VDF_SESSION_CONCURRENCY_LIMIT, 'http_api.limiter.get_previous_vdf_session.is_manual_reduction_disabled' = ?DEFAULT_HTTP_API_LIMITER_IS_MANUAL_REDUCTION_DISABLED, 'http_api.limiter.metrics.sliding_window_limit' = ?DEFAULT_HTTP_API_LIMITER_METRICS_SLIDING_WINDOW_LIMIT, 'http_api.limiter.metrics.sliding_window_duration' = ?DEFAULT_HTTP_API_LIMITER_METRICS_SLIDING_WINDOW_DURATION, 'http_api.limiter.metrics.sliding_window_timestamp_cleanup_interval' = ?DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_INTERVAL, 'http_api.limiter.metrics.sliding_window_timestamp_cleanup_expiry' = ?DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_EXPIRY, 'http_api.limiter.metrics.leaky_limit' = ?DEFAULT_HTTP_API_LIMITER_METRICS_LEAKY_LIMIT, 'http_api.limiter.metrics.leaky_tick_interval' = ?DEFAULT_HTTP_API_LIMITER_METRICS_LEAKY_TICK_INTERVAL, 'http_api.limiter.metrics.leaky_tick_reduction' = ?DEFAULT_HTTP_API_LIMITER_METRICS_LEAKY_TICK_REDUCTION, 'http_api.limiter.metrics.concurrency_limit' = ?DEFAULT_HTTP_API_LIMITER_METRICS_CONCURRENCY_LIMIT, 'http_api.limiter.metrics.is_manual_reduction_disabled' = ?DEFAULT_HTTP_API_LIMITER_IS_MANUAL_REDUCTION_DISABLED }). -endif. ================================================ FILE: apps/arweave_config/include/arweave_config_spec.hrl ================================================ -import(arweave_config_spec, [is_function_exported/3]). ================================================ FILE: apps/arweave_config/priv/.gitkeep ================================================ ================================================ FILE: apps/arweave_config/src/arweave_config.app.src ================================================ {application, arweave_config, [ {id, "arweave_config"}, {description, "Arweave Configuration"}, {vsn, "0.0.1"}, {mod, {arweave_config, []}}, {env, []}, {applications, [ kernel, stdlib, sasl, cowboy, tomerl, yamerl ]}, {modules, [ arweave_config, arweave_config_environment, arweave_config_http_server, arweave_config_legacy, arweave_config_parameters, arweave_config_parser, arweave_config_signal_handler, arweave_config_spec, arweave_config_spec_default, arweave_config_spec_deprecated, arweave_config_spec_enabled, arweave_config_spec_environment, arweave_config_spec_handle_get, arweave_config_spec_handle_set, arweave_config_spec_inherit, arweave_config_spec_legacy, arweave_config_spec_long_argument, arweave_config_spec_long_description, arweave_config_spec_nargs, arweave_config_spec_parameter_key, arweave_config_spec_runtime, arweave_config_spec_short_argument, arweave_config_spec_short_description, arweave_config_spec_type, arweave_config_store, arweave_config_sup, arweave_config_type ]}, {registered, [ arweave_config, arweave_config_sup, arweave_config_legacy, arweave_config_environment, arweave_config_spec, arweave_config_store, arweave_config_signal_handler ]} ]}. ================================================ FILE: apps/arweave_config/src/arweave_config.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @copyright 2025 (c) Arweave %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @doc Arweave Configuration Interface. %%% %%% `arweave_config' module is an interface to the Arweave %%% configuration data store where all configuration parameters are %%% stored and specified. %%% %%% WARNING: this module/application is in active development, the %%% interfaces can change. %%% %%% == Usage == %%% %%% `arweave_config' application needs to be started to work %%% correctly, many processes are mandatory and will be in charge to %%% deal with stored configuration. %%% %%% ``` %%% % start arweave_config %%% arweave_config:start(). %%% ''' %%% %%% Parameters keys are defined as list and can be retrieve using %%% `arweave_config:get/1' or `arweave_config:get/2'. %%% %%% ``` %%% % get debug parameter. %%% arweave_config:get([debug]). %%% %%% % get debug parameter, if undefined, use false instead. %%% arweave_config:get([debug], false). %%% ''' %%% %%% Parameters keys can be dynamically set using %%% `arweave_config:set/2'. %%% %%% ``` %%% % set debug parameter to true %%% arweave_config:set([debug], true). %%% %%% % set debug parameter to false %%% arweave_config:set([debug], false). %%% ''' %%% %%% Parameters are defined in parameter specification, defined as %%% callback modules or as map. If a specification is not containing %%% the parameter key, the interface will return and error. %%% %%% @end %%%=================================================================== -module(arweave_config). -compile(warnings_as_errors). -vsn(1). -behavior(application). -behavior(gen_server). -export([ get/1, get/2, get_env/0, is_runtime/0, runtime/0, set/2, set_env/1, start/0, start_link/0, stop/0 ]). % application behavior callbacks. -export([start/2, stop/1]). % gen_server behavior callbacks -export([init/1, terminate/2, handle_call/3, handle_cast/2, handle_info/2]). -compile({no_auto_import,[get/1]}). -include("arweave_config.hrl"). -include_lib("kernel/include/logger.hrl"). %%-------------------------------------------------------------------- %% @doc helper function to started `arweave_config' application. %% @end %%-------------------------------------------------------------------- -spec start() -> ok | {error, term()}. start() -> case application:ensure_all_started(?MODULE, permanent) of {ok, Dependencies} -> ?LOG_DEBUG("arweave_config started dependencies: ~p", Dependencies), ok; Elsewise -> Elsewise end. %%-------------------------------------------------------------------- %% @doc help function to stop `arweave_config' application. %% @end %%-------------------------------------------------------------------- -spec stop() -> ok. stop() -> application:stop(?MODULE). %%-------------------------------------------------------------------- %% @doc A wrapper for `application:get_env/2'. %% @deprecated this function is a temporary interface and will be %% replaced by `arweave_config:get/1' function. %% @see application:get_env/2 %% @end %%-------------------------------------------------------------------- -spec get_env() -> {ok, #config{}}. get_env() -> arweave_config_legacy:get_env(). %%-------------------------------------------------------------------- %% @doc A wrapper for `application:set_env/3'. %% @deprecated this function is a temporary interface and will be %% replaced by `arweave_config:set/2' function. %% @see application:set_env/3 %% @end %%-------------------------------------------------------------------- -spec set_env(term()) -> ok. set_env(Value) -> arweave_config_legacy:set_env(Value). %%-------------------------------------------------------------------- %% @doc Get a value from the configuration. %% %% Note: the behavior of this function is not the same depending of %% the kind of parameter desired. Indeed, to help the transition to %% the new configuration format, when an `atom' is set as first %% argument, `arweave_config' will act as proxy to the old %% configuration method (using a record). %% %% == Examples == %% %% ``` %% > get(<<"global.debug">>). %% {ok, false} %% %% > get([global, debug]). %% {ok, false} %% %% > get([test]). %% {error, #{ reason => not_found }}. %% ''' %% %% @end %%-------------------------------------------------------------------- -spec get(ParameterKey) -> Return when ParameterKey :: atom() | string() | binary() | list(), Return :: {ok, term()} | {error, term()}. get(Key) when is_atom(Key) -> % TODO: pattern to remove. % this pattern is ONLY for legacy purpose, it should be % removed after the full migration to the new arweave % configuration format. ?LOG_DEBUG([ {function, ?FUNCTION_NAME}, {module, ?MODULE}, {key, Key} ]), arweave_config_legacy:get(Key); get(Key) -> case arweave_config_parser:key(Key) of {ok, Parameter} -> arweave_config_spec:get(Parameter); Elsewise -> Elsewise end. %%-------------------------------------------------------------------- %% @doc Get a value from the configuration, if not defined, a default %% value can be returned instead. %% %% == Examples == %% %% ``` %% > get(<<"global.debug">>, true). %% false %% %% > get([global, debug], true). %% false %% %% > get([test], true). %% true %% ''' %% @end %%-------------------------------------------------------------------- -spec get(ParameterKey, Default) -> Return when ParameterKey :: atom() | string() | binary() | list(), Default :: term(), Return :: term(). get(Key, Default) -> try get(Key) of {ok, Value} -> Value; _Else -> Default catch _:_ -> Default end. %%-------------------------------------------------------------------- %% @doc Set a configuration value using a key. %% %% == Examples== %% %% ``` %% > set(<<"global.debug">>, <<"true">>). %% {ok, true} %% %% > set([global, debug]), true). %% {ok, true} %% %% > set("global.debug", "true"). %% {ok, true} %% %% > set("global.debug", 1234). %% {error, #{ reason => not_boolean }} %% ''' %% %% @end %%-------------------------------------------------------------------- -spec set(ParameterKey, Value) -> Return when ParameterKey :: atom() | string() | iolist() | binary() | list(), Value :: term(), Return :: {ok, term()} | {error, term()}. set(Key, Value) when is_atom(Key) -> % TODO: pattern to remove. % this pattern is ONLY for legacy purpose and should be % removed after the migration to the new arweave configuration % format. ?LOG_DEBUG([ {function, ?FUNCTION_NAME}, {module, ?MODULE}, {key, Key}, {value, Value} ]), case arweave_config_legacy:set(Key, Value) of {ok, V} -> case arweave_config_spec:get_legacy(Key) of {ok, PK} -> _ = set(PK, Value), {ok, V}; Else -> Else end; Else -> Else end; set(Key, Value) -> case arweave_config_parser:key(Key) of {ok, Parameter} -> arweave_config_spec:set(Parameter, Value); Elsewise -> Elsewise end. %%-------------------------------------------------------------------- %% %% == Examples == %% %% ``` %% 10 = getm(#{}, logdir, [logging,default,path], 10). %% parameter_value = getm(#{}, logdir, [logging,default,path], 10). %% 1 = getm(#{ logdir => 1 }, logdir, [logging,default,path], 10). %% ''' %% %%-------------------------------------------------------------------- % getm(MapKey, Map, Parameter, Default) -> %%-------------------------------------------------------------------- %% @doc Start arweave_config process. %% @end %%-------------------------------------------------------------------- start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). %%-------------------------------------------------------------------- %% @doc Switch to runtime mode. No rollback is possible there, this is %% a one time operation to announce arweave config is ready to deal %% with dynamic configuration. %% @end %%-------------------------------------------------------------------- -spec runtime() -> ok. runtime() -> gen_server:call(?MODULE, runtime, 10_000). %%-------------------------------------------------------------------- %% @doc Returns if arweave config is in runtime mode or not. %% @end %%-------------------------------------------------------------------- -spec is_runtime() -> boolean(). is_runtime() -> case ets:lookup(?MODULE, runtime) of [{runtime, true}] -> true; _Elsewise -> false end. %%-------------------------------------------------------------------- %% @hidden %% @doc `gen_server' callback. %% @end %%-------------------------------------------------------------------- init(_) -> ets:new(?MODULE, [named_table, protected]), {ok, ?MODULE}. %%-------------------------------------------------------------------- %% @hidden %% @doc `gen_server' callback. %% @end %%-------------------------------------------------------------------- terminate(_, _) -> ?LOG_INFO("arweave_config process stopped"). %%-------------------------------------------------------------------- %% @hidden %% @doc `gen_server' callback. %% @end %%-------------------------------------------------------------------- handle_call(runtime, _From, State) -> try ets:insert(?MODULE, {runtime, true}) of true -> ok; _ -> ok catch _:_ -> ok end, {reply, ok, State}; handle_call(_, _, State) -> {noreply, State}. %%-------------------------------------------------------------------- %% @hidden %% @doc `gen_server' callback. %% @end %%-------------------------------------------------------------------- handle_cast(_, State) -> {noreply, State}. %%-------------------------------------------------------------------- %% @hidden %% @doc `gen_server' callback. %% @end %%-------------------------------------------------------------------- handle_info(_, State) -> {noreply, State}. %%-------------------------------------------------------------------- %% @hidden %% @doc `application' callback. %% @end %%-------------------------------------------------------------------- start(_StartType, _StartArgs) -> ?LOG_INFO("arweave_config application starting"), % start application supervisor arweave_config_sup:start_link(). %%-------------------------------------------------------------------- %% @hidden %% @doc `application' callback. %% @end %%-------------------------------------------------------------------- stop(_Args) -> ?LOG_INFO("arweave_config application stopped"), ok. ================================================ FILE: apps/arweave_config/src/arweave_config_arguments.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @copyright 2025 (c) Arweave %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @doc Arweave Configuration CLI Arguments Parser. %%% %%% This module is in charge of parsing the arguments from the command %%% line, usually defined at startup. The parser will use the %%% specifications from `arweave_config_spec' (this means, for now, %%% `arweave_config' must be started to correctly parse something). %%% %%% The main idea is to have all arguments flags directly available %%% from the specifications and parse the arguments from CLI using %%% them. It should then return a list of actions to be executed. %%% %%% This module is also a process called `arweave_config_arguments', %%% keeping the original command line passed by the user. %%% %%% @todo add support for more than one parameter taken from the flags %%% %%% @todo add support for short string flags (e.g. -def will look for %%% -d, -e and -f flags if they are boolean). %%% %%% @todo returns a comprehensive error message. %%% %%% @todo when parsing fails, the documentation of the last parameter %%% should be displayed, or the documentation of the whole %%% application. %%% %%% @todo sub-arguments parsing, a more complex way to parse certain %%% kind of value will be required in some situation, for example for %%% peers and storage modules. see section below. %%% %%% @end %%%=================================================================== -module(arweave_config_arguments). -behavior(gen_server). -compile(warnings_as_errors). -compile({no_auto_import,[get/0]}). -export([ start_link/0, load/0, set/1, get/0, get_args/0, parse/1, parse/2 ]). -export([init/1]). -export([handle_call/3, handle_cast/2, handle_info/2]). -include_lib("kernel/include/logger.hrl"). %%-------------------------------------------------------------------- %% @doc Parses command line arguments. %% @see parse/2 %% @end %%-------------------------------------------------------------------- -spec parse(Args) -> Return when Args :: [binary()], Return :: {ok, [{Spec, Values}]} | {error, Reason}, Spec :: map(), Values :: [term()], Reason :: map(). parse(Args) -> parse(Args, #{}). %%-------------------------------------------------------------------- %% @doc Parses an argument from command line. Erlang is usually giving %% us these arguments as a `[string()]', but we want it to be a %% `[binary()]', to make our life easier when displaying this %% information somewhere else (e.g. JSON). %% %% Custom specifications can be set using `long_arguments' and %% `short_arguments' options. Those are mostly used for testing and %% debugging purpose, by default, this function will fetch %% specifications from `arweave_config_spec' process. %% @end %%-------------------------------------------------------------------- -spec parse(Args, Opts) -> Return when Args :: [binary()], Opts :: #{ long_arguments => #{}, short_arguments => #{} }, Return :: {ok, [{Spec, Values}]} | {error, Reason}, Spec :: map(), Values :: [term()], Reason :: map(). parse(Args, Opts) -> parse_converter(Args, Opts). %%-------------------------------------------------------------------- %% @hidden %% @doc type converter, the parser only check binary data. %% @end %%-------------------------------------------------------------------- parse_converter(Args, Opts) -> parse_converter(Args, [], Opts). parse_converter([], Buffer, Opts) -> Reverse = lists:reverse(Buffer), parse_final(Reverse, Opts); parse_converter([Arg|Rest], Buffer, Opts) when is_list(Arg) -> NewBuffer = [list_to_binary(Arg)|Buffer], parse_converter(Rest, NewBuffer, Opts); parse_converter([Arg|Rest], Buffer, Opts) when is_integer(Arg) -> NewBuffer = [integer_to_binary(Arg)|Buffer], parse_converter(Rest, NewBuffer, Opts); parse_converter([Arg|Rest], Buffer, Opts) when is_float(Arg) -> NewBuffer = [float_to_binary(Arg)|Buffer], parse_converter(Rest, NewBuffer, Opts); parse_converter([Arg|Rest], Buffer, Opts) when is_atom(Arg) -> NewBuffer = [atom_to_binary(Arg)|Buffer], parse_converter(Rest, NewBuffer, Opts); parse_converter([Arg|Rest], Buffer, Opts) when is_binary(Arg) -> NewBuffer = [Arg|Buffer], parse_converter(Rest, NewBuffer, Opts). %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- parse_final(Args, Opts) -> try LongArgs = maps:get( long_arguments, Opts, % @todo it's annoying to convert these values, longs/short % args from specifications should be returned as map directly. maps:from_list( arweave_config_spec:get_long_arguments() ) ), ShortArgs = maps:get( short_arguments, Opts, % @todo it's annoying to convert these values, longs/short % args from specifications should be returned as map directly. maps:from_list( arweave_config_spec:get_short_arguments() ) ), State = #{ args => Args, la => LongArgs, sa => ShortArgs, pos => 1, actions => [] }, parse(Args, State, Opts) catch _:R -> {error, #{ reason => R } } end. %%-------------------------------------------------------------------- %% @hidden %% @doc loop over the arguments and check them. %% @end %%-------------------------------------------------------------------- -spec parse(Args, State, Opts) -> Return when Args :: [binary()], State :: map(), Opts :: map(), Return :: {ok, [{Spec, Values}]} | {error, Reason}, Spec :: map(), Values :: [term()], Reason :: map(). parse([], #{ actions := Buffer }, _Opts) -> {ok, lists:reverse(Buffer)}; parse([Arg = <<"---",_/binary>>|_], State, _Opts) -> Pos = maps:get(pos, State), {error, #{ reason => <<"bad_argument">>, argument => Arg, position => Pos } }; parse([Arg = <<"--",_/binary>>|Rest], State = #{la := LA}, Opts) when is_map_key(Arg, LA) -> % by default, we assume the argument is a long % arguments and we try to find it. Spec = maps:get(Arg, LA), Pos = maps:get(pos, State), case apply_spec(Rest, Spec, State#{ pos => Pos+1 }) of {ok, NewRest, NewState} -> parse(NewRest, NewState, Opts); Else -> Else end; parse([<<"-", Arg>>|Rest], State = #{sa := SA}, Opts) when is_map_key(Arg, SA), Arg =/= $- -> Spec = maps:get(Arg, SA), Pos = maps:get(pos, State), case apply_spec(Rest, Spec, State#{ pos => Pos+1 }) of {ok, NewRest, NewState} -> parse(NewRest, NewState, Opts); Else -> Else end; parse([Unknown|_], #{ pos := Pos }, _Opts) -> {error, #{ reason => <<"unknown argument">>, argument => Unknown, position => Pos } }. %%-------------------------------------------------------------------- %% @hidden %% @doc Take a value and check its type. %% @end %%-------------------------------------------------------------------- apply_spec([], Spec = #{type := boolean}, State) -> Buffer = maps:get(actions, State), NewBuffer = [{Spec, [true]}|Buffer], NewState = State#{ actions => NewBuffer }, {ok, [], NewState}; apply_spec([Value|Rest], Spec = #{type := boolean}, State) -> Buffer = maps:get(actions, State), case arweave_config_type:boolean(Value) of {ok, Return} -> Pos = maps:get(pos, State), NewBuffer = [{Spec, [Return]}|Buffer], NewState = State#{ actions => NewBuffer, pos => Pos+1 }, {ok, Rest, NewState}; _ -> NewBuffer = [{Spec, [true]}|Buffer], NewState = State#{ actions => NewBuffer }, {ok, [Value|Rest], NewState} end; apply_spec([Value|Rest], Spec = #{type := Type}, State) -> Buffer = maps:get(actions, State), Pos = maps:get(pos, State), case arweave_config_type:Type(Value) of {ok, Return} -> NewBuffer = [{Spec, [Return]}|Buffer], NewState = State#{ actions => NewBuffer, pos => Pos+1 }, {ok, Rest, NewState}; _ -> {error, #{ reason => <<"bad value">>, value => Value, type => Type, position => Pos } } end; apply_spec(_, Spec, State) -> Type = maps:get(type, Spec), Pos = maps:get(pos, State), {error, #{ reason => <<"missing value">>, type => Type, position => Pos } }. %%-------------------------------------------------------------------- %% @doc starts arweave_config_arguments server. %% @end %%-------------------------------------------------------------------- -spec start_link() -> {ok, pid()}. start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, #{}, []). %%-------------------------------------------------------------------- %% @doc load arguments. %% @end %%-------------------------------------------------------------------- -spec load() -> ok | {error, term()}. load() -> gen_server:call(?MODULE, load, 10_000). %%-------------------------------------------------------------------- %% @doc set arguments from command line. %% @end %%-------------------------------------------------------------------- -spec set(Args) -> Return when Args :: [string() | binary()], Return :: ok. set(Args) -> gen_server:call(?MODULE, {set, Args}, 10_000). %%-------------------------------------------------------------------- %% @doc returns parsed arguments from process state. %% @end %%-------------------------------------------------------------------- -spec get() -> {ok, [map()]}. get() -> gen_server:call(?MODULE, get, 10_000). %%-------------------------------------------------------------------- %% @doc returns raw arguments from process state. %% @end %%-------------------------------------------------------------------- -spec get_args() -> [string() | binary()]. get_args() -> gen_server:call(?MODULE, {get, args}, 10_000). %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- -spec init(Args) -> Return when Args :: #{ args => [string() | binary()] }, Return :: {ok, State}, State :: #{ init_args => Args, args => Args, params => [] }. init(Args) -> State = #{ params => [] }, init_args(Args, State). %%-------------------------------------------------------------------- %% @hidden %% @doc get arguments directly from the command line. %% %% ``` %% arweave_config_arguments:start(#{ args => [] }). %% ''' %% %% @end %%-------------------------------------------------------------------- init_args(Args, State) -> RawArgs = maps:get(args, Args, []), NewState = State#{ init_args => Args, args => RawArgs }, init_final(Args, NewState). %%-------------------------------------------------------------------- %% @hidden %% @doc returns the final state, ready to be used by the process. %% @end %%-------------------------------------------------------------------- init_final(_, State) -> {ok, State}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- -spec handle_call (get, From, State) -> Return when From :: term(), State :: map(), Return :: {reply, Reply, State}, Reply :: [string()]; ({get, args}, From, State) -> Return when From :: term(), State :: map(), Return :: {reply, Reply, State}, Reply :: [string()]; ({set, Args}, From, State) -> Return when Args :: [string() | binary()], From :: term(), State :: map(), Return :: {reply, Reply, State}, Reply :: {ok, [map()]} | {error, term()}; (load, From, State) -> Return when From :: term(), State :: map(), Return :: {reply, Reply, State}, Reply :: ok | {error, term()}; (any(), From, State) -> Return when From :: term(), State :: map(), Return :: {reply, ok, State}. handle_call(get, _From, State) -> Args = maps:get(params, State, []), {reply, Args, State}; handle_call({get, args}, _From, State) -> RawArgs = maps:get(args, State, []), {reply, RawArgs, State}; handle_call({set, RawArgs}, _From, State) -> try parse(RawArgs) of {ok, Parsed} -> NewState = State#{ args => RawArgs, params => Parsed }, {reply, {ok, Parsed}, NewState}; Else -> {reply, Else, State} catch _Error:Reason -> {reply, {error, Reason}, State} end; handle_call(load, _From, State = #{ params := Params}) -> try lists:map(fun load_fun/1, Params), {reply, ok, State} catch _Error:Reason -> {reply, {error, Reason}, State} end; handle_call(Msg, _, State) -> ?LOG_WARNING("~p (~p) received: ~p", [?MODULE, self(), Msg]), {reply, ok, State}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- -spec handle_cast(Msg, State) -> Return when Msg :: any(), State :: #{}, Return :: {noreply, State}. handle_cast(Msg, State) -> ?LOG_WARNING("~p (~p) received: ~p", [?MODULE, self(), Msg]), {noreply, State}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- -spec handle_info(Msg, State) -> Return when Msg :: any(), State :: #{}, Return :: {noreply, State}. handle_info(Msg, State) -> ?LOG_WARNING("~p (~p) received: ~p", [?MODULE, self(), Msg]), {noreply, State}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- load_fun({Spec, [Value]}) -> ParameterKey = maps:get(parameter_key, Spec), arweave_config:set(ParameterKey, Value). ================================================ FILE: apps/arweave_config/src/arweave_config_arguments_legacy.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @copyright 2026 (c) Arweave %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @deprecated this module is a legacy compat layer. %%% @doc Support for legacy arweave configuration. %%% %%% This module has been created to deal with legacy arguments parser %%% from `ar.erl'. The goal is to slowly migrate to the new %%% parser without breaking everything. %%% %%% @end %%%=================================================================== -module(arweave_config_arguments_legacy). -behavior(gen_server). -compile(warnings_as_errors). -compile({no_auto_import,[get/0]}). -export([ get/0, get_args/0, load/0, parse/1, set/1, start_link/0 ]). -export([init/1]). -export([handle_call/3, handle_cast/2, handle_info/2]). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("kernel/include/logger.hrl"). -include_lib("eunit/include/eunit.hrl"). %%-------------------------------------------------------------------- %% local type definition. %%-------------------------------------------------------------------- -type state() :: #{ args => [string()], config => #config{} }. %%-------------------------------------------------------------------- %% @doc Uses `ar_cli_parser:parse/2' legacy parser to parse a list of %% arguments. This is mostly an helper to get rid of the extra data %% produced by this function. %% @end %%-------------------------------------------------------------------- -spec parse(Args) -> Return when Args :: [string()], Return :: {ok, #config{}} | {error, term()}. parse(Args) -> try Config = #config{}, ar_cli_parser:parse(Args, Config) of Result = {ok, _} -> Result; {error, _, _} -> {error, badarg}; {error, _} -> {error, badarg}; Else -> {error, Else} catch _Error:Reason -> {error, Reason} end. %%-------------------------------------------------------------------- %% @doc Load parsed arguments into `arweave_config' process. %% %% ``` %% ok = arweave_config_arguments_legacy:load(). %% ''' %% %% @end %%-------------------------------------------------------------------- -spec load() -> ok | {error, term()}. load() -> gen_server:call(?MODULE, load, 10_000). %%-------------------------------------------------------------------- %% @doc Set a new list of arguments, overwritting the old one if %% present. %% %% ``` %% {ok, #config{}} = arweave_config_arguments_legacy:set([ %% "debug" %% ]). %% ''' %% %% @end %%-------------------------------------------------------------------- -spec set(Args) -> Return when Args :: [string()], Return :: {ok, #config{}} | {error, term()}. set(Args) -> gen_server:call(?MODULE, {set, Args}, 10_000). %%-------------------------------------------------------------------- %% @doc Returns the parsed arguments as `#config{}' record. %% %% ``` %% #config{} = arweave_config_arguments_legacy:get(). %% ''' %% %% @end %%-------------------------------------------------------------------- -spec get() -> #config{}. get() -> gen_server:call(?MODULE, get, 10_000). %%-------------------------------------------------------------------- %% @doc Returns the arguments defined stored in %% `arweave_config_arguments_legacy' process. %% %% ``` %% ["debug"] = arweave_config_arguments_legacy:get_args(). %% ''' %% %% @end %%-------------------------------------------------------------------- -spec get_args() -> [string()]. get_args() -> gen_server:call(?MODULE, {get, args}, 10_000). %%-------------------------------------------------------------------- %% @doc Start `arweave_config_arguments_legacy' process. %% @end %%-------------------------------------------------------------------- start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- -spec init(any()) -> {ok, state()}. init(_) -> ?LOG_INFO("start ~p process", [?MODULE]), State = #{ args => [], config => #config{} }, {ok, State}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- -spec handle_call ({set, Args}, From, State) -> Return when Args :: [string()], From :: term(), State :: state(), Return :: {reply, Reply, State}, Reply :: {ok, #config{}} | {error, term()}; (get, From, State) -> Return when From :: term(), State :: state(), Return :: {reply, Reply, State}, Reply :: #config{}; ({get, args}, From, State) -> Return when From :: term(), State :: state(), Return :: [string()]; (load, From, State) -> Return when From :: term(), State :: state(), Return :: {reply, Reply, State}, Reply :: ok | {error, term()}; ({merge, Config}, From, State) -> Return when Config :: #config{}, From :: term(), State :: state(), Return :: {reply, Reply, State}, Reply :: {ok, Config} | {error, term()}; (term(), From, State) -> Return when From :: term(), State :: state(), Return :: {reply, ok, State}. handle_call(Msg = {set, Args}, From, State) -> ?LOG_DEBUG([{message, Msg}, {from, From}]), case parse(Args) of {ok, Config} -> NewState = State#{ args => Args, config => Config }, {reply, {ok, Config}, NewState}; Else -> {reply, Else, State} end; handle_call(Msg = get, From, State = #{ config := Config }) -> ?LOG_DEBUG([{message, Msg}, {from, From}]), {reply, Config, State}; handle_call(Msg = {get,args}, From, State = #{ args := Args }) -> ?LOG_DEBUG([{message, Msg}, {from, From}]), {reply, Args, State}; handle_call(Msg = load, From, State) -> ?LOG_DEBUG([{message, Msg}, {from, From}]), handle_load(State); handle_call(Msg, From, State) -> ?LOG_WARNING([{process, self()}, {message, Msg}, {from, From}]), {reply, ok, State}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- -spec handle_cast(any(), State) -> Return when State :: map(), Return :: {noreply, state()}. handle_cast(Msg, State) -> ?LOG_WARNING([{process, self()}, {message, Msg}]), {noreply, State}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- -spec handle_info(any(), State) -> Return when State :: map(), Return :: {noreply, state()}. handle_info(Msg, State) -> ?LOG_WARNING([{process, self()}, {message, Msg}]), {noreply, State}. %%-------------------------------------------------------------------- %% @hidden %% @doc this function is mostly a hack around legacy parameters. %% Indeed, the current process is to let arweave_config set both %% arweave_config and arweave_config_legacy from a list of parameters. %% Because legacy parameters need to be loaded (and not all parameters %% are supported yet), a way to set them is required. Legacy format is %% "temporary" and will be removed after the complete migration to the %% new format. The following code should be executed once anyway %% (during arweave startup). %% @end %%-------------------------------------------------------------------- handle_load(State = #{ config := Config }) -> try % get the list of compatible legacy arguments SupportedMap = arweave_config_spec:get_legacy(), % convert the current configuration to a map ConfigMap = maps:from_list( arweave_config_legacy:config_to_proplist(Config) ), % set arweave_config parameters one by one _ = maps:map(fun (LegacyKey, ParameterKey) -> case maps:get(LegacyKey, ConfigMap, undefined) of undefined -> undefined; Value -> arweave_config:set(ParameterKey, Value), Value end end, SupportedMap ), handle_load2(State) catch _Error:Reason -> {reply, {error, Reason}, State} end. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- handle_load2(State = #{ config := Config }) -> try % to be sure the legacy configuration has been % configured, merge the current one. arweave_config_legacy:merge(Config) of {ok, C} -> {reply, {ok, C}, State}; {error, Reason} -> {reply, {error, Reason}, State} catch _Error:Reason -> {reply, {error, Reason}, State} end. ================================================ FILE: apps/arweave_config/src/arweave_config_bootstrap.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @copyright 2026 (c) Arweave %%% @author Mathieu Kerjouan %%% @author Arweave Team %%% @doc Arweave Configuration Bootstrap module. %%% %%% This module is in charge to configure arweave parameters from %%% different sources and in specific order. The only public interface %%% is `start/1' function. All function prefixed by `init' are %%% internal function callbacks. %%% %%% == Legacy Mode == %%% %%% The legacy mode has been created to be compatible with the old %%% static configuration format. The procedure has been a bit %%% modified, but the execution path is globally the same. %%% %%% 1. load environment %%% %%% 2. find config_file parameter from arguments and load legacy %%% configuration file %%% %%% 3. parse arguments and load them %%% %%% 4. switch to runtime mode. %%% %%% 5. start arweave %%% %%% == New Mode == %%% %%% In the new configuration mode, every steps are modifying the %%% stored configuration file in `arweave_config' step by step. The %%% final step is to start arweave based on the final parsed %%% configuration and in runtime mode. %%% %%% 1. set environment %%% %%% 2. set arguments %%% %%% 3. set configuration files if present in arguments %%% %%% 4. load configuration into arweave_config %%% %%% 5. switch to runtime mode %%% %%% 6. start arweave application and features. %%% %%% @end %%%=================================================================== -module(arweave_config_bootstrap). -compile(warnings_as_errors). -export([ start/1, init_environment/1, init_config_file/1, init_arguments/1, init_load/1, init_runtime/1, init_final/1 ]). -include_lib("arweave_config/include/arweave_config.hrl"). %%-------------------------------------------------------------------- %% @doc Configure Arweave parameters from different sources. %% @end %%-------------------------------------------------------------------- -spec start(Args) -> Return when Args :: [string() | binary()], Return :: {ok, #config{}} | {error, term()}. start(Args) -> % to ensure the compatibility with the legacy parsers, an % environment variable called AR_CONFIG_MODE can be set. % By default, the legacy format is used for now, but if an % user wants to switch to the new mode, this environment % variable needs to be set to "new". % @todo remove this environment variable when arweave_config % is fully operational. ArweaveConfigMode = os:getenv("AR_CONFIG_MODE"), Config = arweave_config_legacy:get(), State = #{ mode => ArweaveConfigMode, config => Config, args => Args }, % Let call the fsm loop. arweave_config_fsm:init(?MODULE, init_environment, State). %%-------------------------------------------------------------------- %% @hidden %% @private %% @doc init arweave configuration with environment variable. %% @end %%-------------------------------------------------------------------- init_environment(State = #{ mode := "new" }) -> arweave_config_environment:reset(), {next, init_arguments, State}; init_environment(State) -> arweave_config_environment:reset(), arweave_config_environment:load(), {next, init_config_file, State}. %%-------------------------------------------------------------------- %% @hidden %% @private %% @doc init arweave configuration from configuration file. %% @end %%-------------------------------------------------------------------- init_config_file(State = #{ mode := "new" }) -> % the configuration file is directly loaded when it has been % found in the arguments. ?LOG_WARNING("arweave_config will use new configuration format."), {next, init_load, State}; init_config_file(State = #{ args := Args, config := Config }) -> % @todo enable arweave_config_file_legacy. case ar_config:parse_config_file(Args, Config) of {ok, NewConfig} when is_record(NewConfig, config) -> arweave_config_legacy:merge(Config), NewState = State#{ config => NewConfig }, {next, init_arguments, NewState}; {error, Reason} -> {error, Reason} end. %%-------------------------------------------------------------------- %% @hidden %% @private %% @doc init arweave configuration from command line arguments. %% @end %%-------------------------------------------------------------------- init_arguments(State = #{ args := Args, mode := "new" }) -> ?LOG_WARNING("arweave_config will use new argument format."), case arweave_config_arguments:set(Args) of {ok, _} -> {next, init_config_file, State}; Else -> {error, Else} end; init_arguments(State = #{ config := Config, args := Args }) -> case ar_cli_parser:parse(Args, Config) of {ok, NewConfig} -> arweave_config_legacy:set(NewConfig), NewState = State#{ config => NewConfig }, {next, init_load, NewState}; {error, Reason} -> {error, Reason}; Else -> Else end. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- init_load(State = #{ mode := "new" }) -> % in the new mode, this is where we defines which part of the % configuration is loaded before, between, environment, % arguments and configuration. Indeed, every part of the % configuration are being stored in individual processes. We % can merge them now arweave_config_environment:load(), arweave_config_file:load(), arweave_config_arguments:load(), {next, init_runtime, State}; init_load(State) -> {next, init_runtime, State}. %%-------------------------------------------------------------------- %% @hidden %% @private %% @doc set arweave configuration in runtime mode to avoid setting %% static parameters. Only dynamic parameters will be allowed to be %% configured in this mode. %% @end %%-------------------------------------------------------------------- init_runtime(State) -> case arweave_config:runtime() of ok -> {next, init_final, State} end. %%-------------------------------------------------------------------- %% @hidden %% @private %% @doc finalize arweave configuration initialization. %% @end %%-------------------------------------------------------------------- init_final(_State = #{ mode := "new" })-> % @todo this part of the code should not work like that. % there, we should retrieve all configuration using % Module:get/0 using the same format and them merging them % based on a specific order. The problem though, is to deal % with complex variable (like list). ok = arweave_config_environment:load(), ok = arweave_config_file:load(), ok = arweave_config_arguments:load(), LegacyConfig = arweave_config_legacy:get(), {ok, LegacyConfig}; init_final(_State = #{ config := Config }) -> % parse the arguments from command line and check if a % configuration file is defined, returns #config{} record. % Note: this function will halt the node and print helps if % the arguments or configuration file are wrong. % @todo: re-enable legacy parser % Config = ar_cli_parser:parse_config_file(Args) arweave_config_legacy:set(Config), {ok, Config}. ================================================ FILE: apps/arweave_config/src/arweave_config_environment.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @copyright 2025 (c) Arweave %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @doc Manage and store local environment variable. %%% %%% This module has been created to be a frontend around the local %%% system environment variable. Environment variables are set %%% read-only after a program is started. In this case, there is no %%% point to call `os:getenv/0' and parse all values everytime. This %%% module is getting environment variables, parses them and store %%% them in an ETS table called `arweave_config_environment'. %%% %%% All environment variables are stored as binary to display them %%% easily in debug mode or in JSON/YAML format. %%% %%% ``` %%% _____________ %%% | | %%% | os:getenv/0 | %%% |_____________| %%% /_ _\ %%% | | %%% | | [arweave_config_environment:init/0] %%% | | [arweave_config_environment:reset/0] %%% ____| |_____________________ _____ %%% | \ ( ) %%% | arweave_config_environment |--[state]-->| ets | %%% \____________________________| (_____) %%% | | %%% | | [arweave_config_environment:load/0] %%% _| |_ %%% ___\___/________ %%% | | %%% | arweave_config | %%% |________________| %%% %%% ''' %%% %%% == TODO == %%% %%% @todo store the configuration spec in the process and modify the %%% `get/0' function to return it. %%% %%% @todo creates `get_environment/0' and `get_environment/1' to %%% retrieve one environment value. %%% %%% @end %%%=================================================================== -module(arweave_config_environment). -behavior(gen_server). -compile(warnings_as_errors). -compile({no_auto_import,[get/0]}). -export([load/0, get/0, get/1, reset/0]). -export([start_link/0]). -export([init/1]). -export([handle_call/3, handle_cast/2, handle_info/2]). -include_lib("kernel/include/logger.hrl"). %%-------------------------------------------------------------------- %% @doc start `arweave_config_environment' process. %% @end %%-------------------------------------------------------------------- start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). %%-------------------------------------------------------------------- %% @doc load environment variable into `arweave_config'. %% @end %%-------------------------------------------------------------------- -spec load() -> ok. load() -> gen_server:call(?MODULE, load, 10_000). %%-------------------------------------------------------------------- %% @doc returns the environment variables stored. %% @end %%-------------------------------------------------------------------- -spec get() -> [{binary(), binary()}]. get() -> ets:tab2list(?MODULE). %%-------------------------------------------------------------------- %% @doc returns the environment variables stored. %%-------------------------------------------------------------------- -spec get(Key) -> Return when Key :: binary(), Return :: {ok, binary()} | {error, term()}. get(Key) -> case ets:lookup(?MODULE, Key) of [{Key, Value}] -> {ok, Value}; _ -> {error, not_found} end. %%-------------------------------------------------------------------- %% @doc reset the environment variable. Remove all environment %% variables set and reload them from the environment. Mostly used for %% development and testing purpose. %% @end %%-------------------------------------------------------------------- -spec reset() -> {ok, [{binary(), binary()}]}. reset() -> gen_server:call(?MODULE, reset, 1000). %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- -spec init(any()) -> {ok, reference() | atom()}. init(_) -> % list environment variables available on the system % when arweave is started. These variables will need % to be stored. Ets = ets:new(?MODULE, [named_table, protected]), handle_reset(), {ok, Ets}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- handle_call(Msg = load, From, State) -> ?LOG_DEBUG("received: ~p", [Msg, From]), Spec = arweave_config_spec:get_environments(), Mapping = [ begin ?LOG_DEBUG("found environment ~p=~p", [EnvKey,EnvValue]), {Parameter, EnvValue} end || {EnvKey, EnvValue} <- get(), {EnvSpec, Parameter} <- Spec, EnvSpec =:= EnvKey ], lists:map( fun({Parameter, Value}) -> arweave_config:set(Parameter, Value) end, Mapping ), {reply, ok, State}; handle_call(Msg = reset, From, State) -> ?LOG_DEBUG("received: ~p", [Msg, From]), Result = handle_reset(), {reply, {ok, Result}, State}; handle_call(Msg, From, State) -> ?LOG_WARNING([ {module, ?MODULE}, {function, handle_cast}, {from, From}, {message, Msg} ]), {reply, ok, State}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- handle_cast(Msg, State) -> ?LOG_WARNING([ {module, ?MODULE}, {function, handle_cast}, {message, Msg} ]), {noreply, State}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- handle_info(Msg, State) -> ?LOG_WARNING([ {module, ?MODULE}, {function, handle_cast}, {message, Msg} ]), {noreply, State}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- handle_reset() -> % Environments are list of string. They must be at least % splitted in half using '=' separator. the left part is the % key, the right part is the value. Environments are static, % they can't be modified during runtime, then, keeping them % inside an ETS already parsed to be reused later will avoid % some friction in the future. ets:delete_all_objects(?MODULE), _Environment = [ begin [K,V] = re:split(E, "=", [{parts, 2}, {return, list}]), BK = list_to_binary(K), VK = list_to_binary(V), ets:insert(?MODULE, {BK, VK}), {BK,VK} end || E <- os:getenv() ]. ================================================ FILE: apps/arweave_config/src/arweave_config_file.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @copyright 2026 (c) Arweave %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @doc Arweave Configure File Interface. %%% %%% This process is in charge of managing the configuration files. All %%% configuration files are stored in memory with their parsed %%% content. %%% %%% == Usage == %%% %%% ``` %%% % add a new path, if the path/file is valid, the parsed version is %%% % returned. %%% {ok, ValidConfig1} = arweave_config_file:add(Path1). %%% {ok, ValidConfig2} = arweave_config_file:add(Path2). %%% %%% % get the merged configuration, if more than one path is present %%% % all of them will be merged by alphanumeric order %%% {ok, Merged} = arweave_config_file:get(). %%% %%% % returns the parsed configuration from the path %%% {ok, ValidConfig1} = arweave_config_file:get_by_path(Path1). %%% %%% % returns the paths currently stored and merged. %%% {ok, Paths} = arweave_config_file:get_paths(). %%% %%% % reset the configuration, the last merged configuration is %%% % returned to the caller %%% {ok, Merged} = arweave_config_file:reset(). %%% %%% % load the configuration into arweave_config %%% ok = arweave_config_file:load(). %%% ''' %%% %%% == TODO == %%% %%% @todo create `check/0' function. It will ensure all files are still %%% present and with the same values. If it's not the case, the files %%% are reloaded and merged together. will be used with sighup. %%% %%% @todo create `delete/1' function. A configuration file can be removed %%% from the store, in this case, the remaining files are merged %%% together after the file has been removed. %%% %%% @todo create `get_by_format/1' function. It will return the %%% configuration files by their format. %%% %%% @todo store the raw value and the parsed value in the store. %%% Useful for debugging and analysis. %%% %%% @todo find a way to deal with a transition when a file is modified %%% locally. %%% %%% @todo what to do if a configuration file can't be loaded? %%% %%% @todo should we store the parsed configuration files? %%% %%% @todo should we follow the files in case of modification %%% (e.g. inotify)? %%% %%% @todo add support for glob pattern %%% %%% @end %%%=================================================================== -module(arweave_config_file). -compile(warnings_as_errors). -behavior(gen_server). -export([ start_link/0, add/1, get/0, get_by_path/1, get_paths/0, load/0, load/1, reset/0, parsers/0 ]). -export([ parse/1, parse/2, check_path/1, identify_parser/1, parse_data/1 ]). -export([init/1]). -export([handle_call/3, handle_cast/2, handle_info/2]). %%-------------------------------------------------------------------- %% @doc start arweave_config_file process. %% @end %%-------------------------------------------------------------------- start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). %%-------------------------------------------------------------------- %% @doc Returns the list of supported parsers based on the file %% extension. If another extension needs to be supported, this parsers %% list can be modified (ensure the test suite is working though). %% @end %%-------------------------------------------------------------------- -spec parsers() -> #{ binary() => atom() }. parsers() -> #{ <<".json">> => arweave_config_format_json, <<".yaml">> => arweave_config_format_yaml, <<".toml">> => arweave_config_format_toml, <<".ljson">> => arweave_config_format_legacy }. %%-------------------------------------------------------------------- %% @doc Add a new configuration file path. The configuration must be %% valid. The format of the file is identified using the postfix (e.g. %% json, yaml, toml, ljson). %% @end %%-------------------------------------------------------------------- -spec add(Path) -> Return when Path :: string() | binary(), Return :: {ok, [map()]}. add(Path) when is_list(Path) -> add(list_to_binary(Path)); add(Path) -> gen_server:call(?MODULE, {add, Path}, 1000). %%-------------------------------------------------------------------- %% @doc Get the final merged configuration. %% @end %%-------------------------------------------------------------------- -spec get() -> Return when Return :: map(). get() -> gen_server:call(?MODULE, get, 1000). %%-------------------------------------------------------------------- %% @doc Get the configuration file from a stored path. %% @end %%-------------------------------------------------------------------- -spec get_by_path(Path) -> Return when Path :: string() | binary(), Return :: {ok, {Timestamp, map()}} | {error, term()}, Timestamp :: pos_integer(). get_by_path(Path) when is_list(Path) -> get_by_path(list_to_binary(Path)); get_by_path(Path) -> gen_server:call(?MODULE, {get, Path}, 1000). %%-------------------------------------------------------------------- %% @doc Get the list of configuration file stored. %% @end %%-------------------------------------------------------------------- -spec get_paths() -> [binary()]. get_paths() -> gen_server:call(?MODULE, get_paths, 1000). %%-------------------------------------------------------------------- %% @doc Load the merged configuration in arweave_config. %% @end %%-------------------------------------------------------------------- -spec load() -> Return when Return :: ok | timeout. load() -> gen_server:call(?MODULE, load, 1000). %%-------------------------------------------------------------------- %% @doc Load a specific stored configuration file in arweave_config. %% @end %%-------------------------------------------------------------------- -spec load(Path) -> Return when Path :: string() | binary(), Return :: {ok, map()}. load(Path) when is_list(Path) -> load(list_to_binary(Path)); load(Path) -> gen_server:call(?MODULE, {load, Path}, 1000). %%-------------------------------------------------------------------- %% @doc %% @end %%-------------------------------------------------------------------- -spec reset() -> ok. reset() -> gen_server:call(?MODULE, reset, 1000). %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- init(_) -> Store = ets:new(?MODULE, [ named_table, ordered_set, protected ]), {ok, Store}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- handle_call({add, Path}, _From, Store) -> handle_add(Path, Store); handle_call(get, _From, Store) -> handle_get(Store); handle_call({get, Path}, _From, Store) -> handle_get_path(Path, Store); handle_call(get_paths, _From, Store) -> handle_get_paths(Store); handle_call(load, _From, Store) -> handle_load(Store); handle_call({load, Path}, _From, Store) -> handle_load(Path, Store); handle_call(reset, _From, Store) -> handle_reset(Store); handle_call(_Msg, _From, State) -> {reply, ok, State}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- handle_info(_Msg, State) -> {noreply, State}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- handle_cast(_Msg, State) -> {noreply, State}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- handle_get(Store) -> case ets:lookup(?MODULE, merge) of [] -> {reply, #{}, Store}; [{merge, Config}] -> {reply, Config, Store} end. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- handle_get_paths(Store) -> Pattern = {{config, '$1'}, '_'}, Guard = [], Format = ['$1'], Return = ets:select(?MODULE, [{Pattern, Guard, Format}]), {reply, Return, Store}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- handle_get_path(Path, Store) -> Pattern = {{config, Path}, '$2'}, Guard = [], Format = ['$2'], case ets:select(?MODULE, [{Pattern, Guard, Format}]) of [Config] -> {reply, {ok, Config}, Store}; _ -> {reply, {error, not_found}, Store} end. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- handle_load(Store) -> {reply, Config, _} = handle_get(Store), maps:map(fun (K, V) -> arweave_config:set(K, V) end, Config), {reply, ok, Store}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- handle_load(Path, Store) -> case handle_get_path(Path, Store) of {reply, {ok, {_, Config}}, _} -> maps:map(fun (K, V) -> arweave_config:set(K, V) end, Config), {reply, ok, Store}; Else -> Else end. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- handle_add(Path, Store) -> case parse(Path) of {ok, {ValidPath, Config}} -> Key = {config, ValidPath}, Value = {erlang:system_time(), Config}, ets:insert(?MODULE, {Key, Value}), handle_merge(Store); Else -> {reply, Else, Store} end. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- handle_merge(Store) -> Merged = ets:foldl(fun ({{config, _}, {_, Config}}, Acc) -> maps:merge(Acc, Config); (_, Acc) -> Acc end, #{}, ?MODULE ), ets:insert(?MODULE, {merge, Merged}), {reply, {ok, Merged}, Store}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- handle_reset(Store) -> ets:delete_all_objects(Store), {reply, ok, Store}. %%-------------------------------------------------------------------- %% @doc parses a path. %% @see parse/2 %% @end %%-------------------------------------------------------------------- -spec parse(Path) -> Return when Path :: binary() | string(), Return :: {ok, {Path, map()}} | {error, term()}. parse(Path) -> parse(Path, #{}). %%-------------------------------------------------------------------- %% @doc parses a path. %% @end %%-------------------------------------------------------------------- -spec parse(Path, Opts) -> Return when Path :: binary() | string(), Opts :: map(), Return :: {ok, {Path, map()}} | {error, term()}. parse(Path, Opts) -> State = #{ opts => Opts, path => Path }, arweave_config_fsm:init(?MODULE, check_path, State). %%-------------------------------------------------------------------- %% @hidden %% @private %% @doc check the path used for the configuration file. %% @end %%-------------------------------------------------------------------- check_path(_State = #{ path := Path }) -> case arweave_config_file_path:check(Path) of {ok, Data, NewState} -> {next, identify_parser, NewState#{ data => Data }}; Else -> Else end. %%-------------------------------------------------------------------- %% @hidden %% @private %% @doc check the file extension (generated during the check), and %% select the parser. %% @end %%-------------------------------------------------------------------- identify_parser(State = #{ file_extension := Extension }) -> Parsers = parsers(), case maps:get(Extension, Parsers, undefined) of undefined -> {error, "unsupported file"}; Parser when is_atom(Parser) -> NewState = State#{ parser => Parser }, {next, parse_data, NewState}; _ -> {error, "unsupported extension or parser"} end. %%-------------------------------------------------------------------- %% @hidden %% @private %% @doc %% @end return the path (absolute) and the configuration (parsed). %%-------------------------------------------------------------------- parse_data(_State = #{ path := Path, data := Data, parser := Parser }) -> case Parser:parse(Data) of {ok, Config} -> {ok, {Path, Config}}; Else -> Else end. ================================================ FILE: apps/arweave_config/src/arweave_config_file_path.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @copyright 2026 (c) Arweave %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @doc Arweave Configuration File Checker. %%% %%% This module contains the code to check configuration files used by %%% arweave and it is used by all `arweave_config_file_*' module. The %%% goal is to have understandable and flexible rules easy to reuse in %%% other part of the code. %%% %%% At the end of the pipeline, the path must have been fully checked %%% and its content stored in `data' map key. %%% %%% Here the check steps: %%% %%% 1. check if the path has the right type (binary). %%% %%% 2. check if the path is absolute (or convert it). %%% %%% 3. check if the file exists. %%% %%% 4. check if the file is readable. %%% %%% 5. check if the file extension is correct. %%% %%% 6. read the content of the file. %%% %%% @end %%%=================================================================== -module(arweave_config_file_path). -compile(warnings_as_errors). -export([ check/1, init/1, check_path_type/1, check_path/1, check_relative_path/1, check_file_mode/1, extract_directory/1, extract_extension/1, read_file/1 ]). -include_lib("kernel/include/file.hrl"). %%-------------------------------------------------------------------- %% @doc %% @end %%-------------------------------------------------------------------- check(Path) -> arweave_config_fsm:init( ?MODULE, init, #{ path => Path } ). %%-------------------------------------------------------------------- %% @private %% @doc init the file checker. %% @end %%-------------------------------------------------------------------- -spec init(State) -> Return when State :: map(), Return :: term(). init(State) -> case file:get_cwd() of {ok, Cwd} -> NewState = State#{ cwd => Cwd }, {next, check_path_type, NewState}; _ -> {error, "can't find current working directory"} end. %%-------------------------------------------------------------------- %% @hidden %% @private %% @doc Check the Erlang path type. To avoid type confusion, all %% configuration file path are encoded using binary type and not list. %% @end %%-------------------------------------------------------------------- check_path_type(State = #{ path := Path }) when is_list(Path) -> NewState = State#{ path => list_to_binary(Path) }, {next, check_path, NewState}; check_path_type(State = #{ path := Path }) when is_binary(Path) -> {next, check_path, State}; check_path_type(_State) -> {error, "bad path type"}. %%-------------------------------------------------------------------- %% @hidden %% @private %% @doc Check if a path is relative or absolute. Relative paths should %% not be allowed, only absolute path should be used. %% @end %%-------------------------------------------------------------------- check_path(State = #{ path := Path }) -> case filename:pathtype(Path) of relative -> {next, check_relative_path, State}; absolute -> {next, check_file_mode, State} end. %%-------------------------------------------------------------------- %% @hidden %% @private %% @doc Check if the relative file path is safe (without "../"). We %% don't want the application to load unwanted files, this is a first %% protection against potential file injections. %% @end %%-------------------------------------------------------------------- check_relative_path(State = #{ path := Path, cwd := Cwd }) -> case filelib:safe_relative_path(Path, Cwd) of unsafe -> {error, "unsafe path"}; _ -> AbsolutePath = filename:absname(Path), NewState = State#{ origin_path => Path, path => AbsolutePath }, {next, check_file_mode, NewState} end. %%-------------------------------------------------------------------- %% @hidden %% @private %% @doc Check file mode. A file must be readable at least. %% @end %%-------------------------------------------------------------------- check_file_mode(State = #{ path := Path }) -> case file:read_file_info(Path) of {ok, #file_info{ type = regular, access = read } } -> {next, extract_extension, State}; {ok, #file_info{ type = regular, access = read_write } } -> {next, extract_extension, State}; _Else -> {error, "bad path"} end. %%-------------------------------------------------------------------- %% @hidden %% @private %% @doc Extract the file extension and store it in `file_extension'. %% @end %%-------------------------------------------------------------------- extract_extension(State = #{ path := Path }) -> Extension = filename:extension(Path), NewState = State#{ file_extension => Extension }, {next, extract_directory, NewState}. %%-------------------------------------------------------------------- %% @hidden %% @private %% @doc Extract the directory where the file is stored in %% `file_directory'. %% @end %%-------------------------------------------------------------------- extract_directory(State = #{ path := Path }) -> Dir = filename:dirname(Path), NewState = State#{ file_directory => Dir }, {next, read_file, NewState}. %%-------------------------------------------------------------------- %% @hidden %% @private %% @doc read file content. %% @end %%-------------------------------------------------------------------- read_file(State = #{ path := Path }) -> % At this step, the application must be able to read the file, % except if a race condition appears and the file removed or % ownership/mode are changed. {ok, Data} = file:read_file(Path), {ok, Data, State}. ================================================ FILE: apps/arweave_config/src/arweave_config_format_json.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @copyright 2026 (c) Arweave %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @doc Arweave Configuration JSON Format Support. %%% %%% @reference https://www.json.org/json-en.html %%% @reference https://github.com/davisp/jiffy %%% @end %%%=================================================================== -module(arweave_config_format_json). -compile(warnings_as_errors). -export([ parse/1, parse/2 ]). -export([ decode_data/1, parse_config/1 ]). %%-------------------------------------------------------------------- %% @doc Parses JSON data. %% @see parse/2 %% @end %%-------------------------------------------------------------------- -spec parse(Data) -> Return when Data :: string() | binary(), Return :: {ok, map()} | {error, term()}. parse(Data) -> parse(Data, #{}). %%-------------------------------------------------------------------- %% @doc Parses JSON data. %% @end %%-------------------------------------------------------------------- -spec parse(Data, Opts) -> Return when Data :: string() | binary(), Opts :: map(), Return :: {ok, map()} | {error, term()}. parse(Data, Opts) -> State = #{ data => Data, opts => Opts }, arweave_config_fsm:init(?MODULE, decode_data, State). %%-------------------------------------------------------------------- %% @hidden %% @private %% @doc parses a string or a binary with JSON decoder. %% @end %%-------------------------------------------------------------------- decode_data(_State = #{ data := <<>> }) -> {ok, #{}}; decode_data(_State = #{ data := [] }) -> {ok, #{}}; decode_data(State = #{ data := Data }) -> try Json = jiffy:decode(Data, [return_maps]), NewState = State#{ json => Json }, {next, parse_config, NewState} catch _Error:{Position, Reason} -> {error, #{ reason => Reason, position => Position } } end. %%-------------------------------------------------------------------- %% @hidden %% @private %% @doc converts a json into a configuration file %% @end %%-------------------------------------------------------------------- parse_config(_State = #{ json := Json }) -> arweave_config_serializer:encode(Json). ================================================ FILE: apps/arweave_config/src/arweave_config_format_legacy.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @copyright 2026 (c) Arweave %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @doc Arweave Configuration Legacy Format Support. %%% %%% @see ar_config %%% @end %%% @todo convert config record to arweave_config spec. %%%=================================================================== -module(arweave_config_format_legacy). -compile(warnings_as_errors). -export([ parse/1, parse/2 ]). -export([ decode_data/1 ]). -include_lib("kernel/include/file.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). %%-------------------------------------------------------------------- %% @doc Parses a JSON using legacy parser. %% @see parse/2 %% @end %%-------------------------------------------------------------------- -spec parse(Data) -> Return when Data :: string() | binary(), Return :: {ok, map()} | {error, term()}. parse(Data) -> parse(Data, #{}). %%-------------------------------------------------------------------- %% @doc Parses a JSON using legacy parser. %% @end %%-------------------------------------------------------------------- -spec parse(Data, Opts) -> Return when Data :: string() | binary(), Opts :: map(), Return :: {ok, map()} | {error, term()}. parse(Data, Opts) -> State = #{ opts => Opts, data => Data }, arweave_config_fsm:init(?MODULE, decode_data, State). %%-------------------------------------------------------------------- %% @hidden %% @private %% @doc %% @end %%-------------------------------------------------------------------- decode_data(_State = #{ data := ""}) -> {ok, #config{}}; decode_data(_State = #{ data := <<>>}) -> {ok, #config{}}; decode_data(State = #{ data := Data }) when is_list(Data) -> NewState = State#{ data => list_to_binary(Data) }, decode_data(NewState); decode_data(_State = #{ data := Data }) -> case ar_config:parse(Data) of {ok, LegacyConfig} -> {ok, LegacyConfig}; {error, Reason, _} -> {error, Reason}; {error, Reason} -> {error, Reason} end. ================================================ FILE: apps/arweave_config/src/arweave_config_format_toml.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @copyright 2026 (c) Arweave %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @doc Arweave Configuration TOML Format Support. %%% %%% @reference https://toml.io/en/ %%% @reference https://github.com/filmor/tomerl %%% @end %%%=================================================================== -module(arweave_config_format_toml). -compile(warnings_as_errors). -export([ parse/1, parse/2 ]). -export([ decode_data/1, parse_config/1 ]). -include_lib("kernel/include/file.hrl"). %%-------------------------------------------------------------------- %% @doc Parse TOML data. %% @see parse/2 %% @end %%-------------------------------------------------------------------- -spec parse(Data) -> Return when Data :: string() | binary(), Return :: {ok, map()} | {error, term()}. parse(Data) -> parse(Data, #{}). %%-------------------------------------------------------------------- %% @doc Parse TOML data. %% @end %%-------------------------------------------------------------------- -spec parse(Data, Opts) -> Return when Data :: string() | binary(), Opts :: map(), Return :: {ok, map()} | {error, term()}. parse(Data, Opts) -> State = #{ opts => Opts, data => Data }, arweave_config_fsm:init(?MODULE, decode_data, State). %%-------------------------------------------------------------------- %% @hidden %% @private %% @doc %% @end %%-------------------------------------------------------------------- decode_data(State = #{ data := Data }) -> case tomerl:parse(Data) of {ok, Parsed} -> {next, parse_config, State#{ config => Parsed }}; {error, Reason} -> {error, Reason}; Else -> Else end. %%-------------------------------------------------------------------- %% @hidden %% @private %% @doc %% @end %%-------------------------------------------------------------------- parse_config(#{ config := Config }) -> arweave_config_serializer:encode(Config). ================================================ FILE: apps/arweave_config/src/arweave_config_format_yaml.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @copyright 2026 (c) Arweave %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @doc Arweave Configuration YAML Format Support. %%% %%% @reference https://yaml.org/ %%% @reference https://github.com/yakaz/yamerl/ %%% @end %%%=================================================================== -module(arweave_config_format_yaml). -compile(warnings_as_errors). -export([ parse/1, parse/2, proplist_to_map/1 ]). -export([ decode_data/1, decode_proplist/1, parse_config/1 ]). -include_lib("kernel/include/file.hrl"). -include_lib("eunit/include/eunit.hrl"). %%-------------------------------------------------------------------- %% @doc Parse YAML data. %% @see parse/2 %% @end %%-------------------------------------------------------------------- -spec parse(Data) -> Return when Data :: string() | binary(), Return :: {ok, map()} | {error, term()}. parse(Data) -> parse(Data, #{}). %%-------------------------------------------------------------------- %% @doc Parse YAML data. %% @end %%-------------------------------------------------------------------- -spec parse(Data, Opts) -> Return when Data :: string() | binary(), Opts :: map(), Return :: {ok, map()} | {error, term()}. parse(Data, Opts) -> State = #{ opts => Opts, data => Data }, arweave_config_fsm:init(?MODULE, decode_data, State). %%-------------------------------------------------------------------- %% @hidden %% @private %% @doc convert yaml data to erlang terms. %% @end %%-------------------------------------------------------------------- decode_data(State = #{ data := Data }) -> try yamerl:decode(Data) of [] -> NewState = State#{ proplist => [] }, {next, decode_proplist, NewState}; [Proplist] -> NewState = State#{ proplist => Proplist }, {next, decode_proplist, NewState}; _Else -> {error, multi_yaml_unsupported} catch _:Reason -> {error, Reason} end. %%-------------------------------------------------------------------- %% @hidden %% @private %% @doc convert proplist to map. %% @end %%-------------------------------------------------------------------- decode_proplist(State = #{ proplist := Proplist }) -> try Parsed = proplist_to_map(Proplist), NewState = State#{ config => Parsed }, {next, parse_config, NewState} catch _:Reason -> {error, Reason} end. %%-------------------------------------------------------------------- %% @hidden %% @private %% @doc returns the final configuration spec. %% @end %%-------------------------------------------------------------------- parse_config(#{ config := Config }) -> arweave_config_serializer:encode(Config). %%-------------------------------------------------------------------- %% @doc recursively convert a proplist to a map. `yamerl' does not %% return a map and then we need to convert the proplist returned %% recursively. %% @end %%-------------------------------------------------------------------- -spec proplist_to_map(Proplist) -> Return when Proplist :: proplists:proplist(), Return :: map(). proplist_to_map(Proplist) -> proplist_to_map(Proplist, #{}). proplist_to_map_test() -> ?assertEqual( #{}, proplist_to_map([]) ), ?assertEqual( #{ <<"1">> => 2 }, proplist_to_map([{1,2}]) ), ?assertEqual( #{ <<"test">> => #{ <<"a">> => <<"b">>, <<"c">> => <<"d">> } }, proplist_to_map([ {test,[ {"a", "b"}, {c, d} ]} ]) ). %%-------------------------------------------------------------------- %% @hidden %% @private %% @doc %% @end %%-------------------------------------------------------------------- proplist_to_map([], Buffer) -> Buffer; proplist_to_map([{K,V = [{_,_}|_]}|Rest], Buffer) -> % if a value is made of tuple, we assume this is an object, % then, we convert it to map. Recurse = proplist_to_map(V), Key = converter_key(K), proplist_to_map(Rest, Buffer#{ Key => Recurse }); proplist_to_map([{K, V}|Rest], Buffer) when is_list(V) -> % if a value is a list we assume this is a string and it % is converted to binary. Key = converter_key(K), Value = converter_value(V), proplist_to_map(Rest, Buffer#{ Key => Value }); proplist_to_map([{K,V}|Rest], Buffer) -> Key = converter_key(K), Value = converter_value(V), proplist_to_map(Rest, Buffer#{ Key => Value }). %%-------------------------------------------------------------------- %% @hidden %% @private %% @doc %% @end %%-------------------------------------------------------------------- converter_key(Key) when is_integer(Key) -> integer_to_binary(Key); converter_key(Key) when is_atom(Key) -> atom_to_binary(Key); converter_key(Key) when is_list(Key) -> list_to_binary(Key); converter_key(Key) -> Key. converter_value(Value) when is_atom(Value) -> atom_to_binary(Value); converter_value(Value) when is_list(Value) -> list_to_binary(Value); converter_value(Value) -> Value. ================================================ FILE: apps/arweave_config/src/arweave_config_fsm.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @copyright 2026 (c) Arweave %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @doc An internal FSM implementation for Arweave Config. %%% %%% This kind of abstraction helps to create tests on individual part %%% of functions, based on the returned values. With it, one controls %%% both the input and the output. Side effects functions can also be %%% isolated easily. %%% %%% The state passed to all callback function is not enforced, and can %%% be of any Erlang type. %%% %%% == Examples == %%% %%% `arweave_config_fsm' is a really simple finite state machine, %%% inspired by `gen_statem' and `gen_fsm'. Each function returns a %%% tuple with the next function to call. The state is passed to the %%% next function, until one function is returning `{ok, term()}' to %%% end the pipeline. %%% %%% ``` %%% -module(fsm_test). %%% -export([start/1]). %%% -export([first/1, second/1, third/1]). %%% %%% % starter %%% start(Opts) -> %%% State = #{}, %%% arweave_config_fsm:do_loop(?MODULE, first, State). %%% %%% % callback transition %%% first(State) -> %%% {next, second, State#{ first => ok }}. %%% %%% % module callback transition and error return %%% second(State = #{ first := ok }) -> %%% {next, {?MODULE, third}, State#{ second => ok }}. %%% second(#{ first := error }) -> %%% {error, "first function failed"}. %%% %%% % loop (dangerous) support and final return value %%% third(#{ reset := true }) -> %%% {next, first, #{ state => loop }); %%% third(State) -> %%% {ok, final_result}. %%% ''' %%% %%% == Metadata for Tracing and Debugging Purpose == %%% %%% Debugging and tracing an execution pipeline can sometime be %%% complex, even with Erlang tooling. This fsm implement a way to %%% collect those information on demand, by setting the flag `meta' to %%% `true' during initialization phase. %%% %%% ``` %%% -module(t). %%% -export([start/0, my_function/1]). %%% %%% start() -> %%% {ok, value, Metadata} = arweave_config_fsm:init( %%% ?MODULE, %%% my_function, %%% #{ meta => true }, %%% my_state %%% ). %%% %%% my_function(State) -> %%% {ok, value}. %%% ''' %%% %%% `Metadata' variable will contain the execution history with the %%% timestamp when it was executed and the module/function callback. A %%% counter is also available to have the number of step executed. %%% %%% Note: this will have a small impact on the performance, but it %%% should be negligible. In case of a long and complex pipeline, the %%% size of history can grow dangerously and use lot of memory. %%% %%% == TODO == %%% %%% Better errors management (e.g. specific error message when a %%% callback module or a callback function are not atom). %%% %%% Custom options (e.g. return metadata or send them to another %%% process during execution). %%% %%% Enforce behavior. %%% %%% Add delay/timeout support. %%% %%% Permits to filter what we want to store in meta-data, and/or allow %%% lambda function to collect custom data. %%% %%% Add a debug state where the returned values are stored. %%% %%% @end %%%=================================================================== -module(arweave_config_fsm). -compile(warnings_as_errors). -export([init/3, init/4]). -include_lib("kernel/include/logger.hrl"). %%-------------------------------------------------------------------- %% type definition, usefull for behavior feature and DRY. %%-------------------------------------------------------------------- % a module callback as atom. -type fsm_module() :: atom(). % a function callbcak as atom. -type fsm_callback() :: atom(). % available fsm options/parameters. -type fsm_opts() :: #{ meta => boolean() }. % fsm state defined by developer during initialization or during % execution. -type fsm_state() :: term(). % fsm metadata used to store and collect stats and information about % fsm execution. -type fsm_meta_history() :: #{ timestamp => pos_integer(), module => fsm_module(), callback => fsm_callback(), process_info => map(), pid => pid() }. -type fsm_metadata() :: #{ meta => boolean(), history => [fsm_meta_history()], counter => pos_integer() }. % values returned by the callback defined by the developer. -type fsm_callback_return() :: meta | {ok, term()} | {next, fsm_callback(), fsm_state()} | {next, fsm_module(), fsm_callback(), fsm_state()} | {error, term()} | {error, term(), fsm_state()}. % values always returned by the fsm after the final callback. -type fsm_return() :: {ok, term()} | {ok, term(), fsm_state() | fsm_metadata()} | {ok, term(), fsm_state(), fsm_metadata()} | {error, term()} | {error, term(), fsm_state()}. %%-------------------------------------------------------------------- %% if one wants to use it as behavior, callback_name is an example %% function, and no errors/warnings will be reported if this one is %% not explicitely defined. %%-------------------------------------------------------------------- -callback( callback_name(fsm_state()) -> fsm_callback_return() ). -optional_callbacks(callback_name/1). %%-------------------------------------------------------------------- %% @doc `arweave_config_fsm' initialize/starter function. %% @see init/4 %% @end %%-------------------------------------------------------------------- -spec init(Module, Callback, State) -> Return when Module :: fsm_module(), Callback :: fsm_callback(), State :: fsm_state(), Return :: fsm_return(). init(Module, Callback, State) -> init(Module, Callback, #{}, State). %%-------------------------------------------------------------------- %% @doc `arweave_config_fsm' initialize/starter function. A meta %% parameter can be configured during initialization, storing %% information about the pipelined functions and offering a way to %% trace the execution of the fsm. %% @end %%-------------------------------------------------------------------- -spec init(Module, Callback, Opts, State) -> Return when Module :: fsm_module(), Callback :: fsm_callback(), Opts :: fsm_opts(), State :: fsm_state(), Return :: fsm_return(). init(Module, Callback, Opts, State) -> init_meta(Module, Callback, Opts, State). %%-------------------------------------------------------------------- %% @hidden %% @doc initialize metadata. %% @end %%-------------------------------------------------------------------- init_meta(Module, Callback, Opts = #{ meta := true }, State) -> Meta = #{ opts => Opts, meta => maps:get(meta, Opts, false), history => [meta_history(Module, Callback)], counter => 0 }, do_loop(Module, Callback, State, Meta); init_meta(Module, Callback, _Opts, State) -> do_loop(Module, Callback, State, #{}). %%-------------------------------------------------------------------- %% @hidden %% @doc main loop where all callbacks are executed. %% @end %%-------------------------------------------------------------------- -spec do_loop(Module, Callback, State, Meta) -> Return when Module :: fsm_module(), Callback :: fsm_callback(), State :: fsm_state(), Meta :: fsm_metadata(), Return :: fsm_return(). do_loop(Module, Callback, State, Meta) -> try Return = erlang:apply(Module, Callback, [State]), do_eval( Module, Callback, Return, State, update_meta(Module, Callback, Meta) ) catch _Error:Reason -> do_eval( Module, Callback, {error, Reason}, State, update_meta(Module, Callback, Meta) ) end. %%-------------------------------------------------------------------- %% @hidden %% @private %% @doc evaluate the return of a callback and do the routing part. %% @end %%-------------------------------------------------------------------- -spec do_eval(Module, Callback, CallbackReturn, State, Meta) -> Return when Module :: fsm_module(), Callback :: fsm_callback(), CallbackReturn :: fsm_callback_return(), State :: fsm_state(), Meta :: fsm_metadata(), Return :: fsm_return(). % return latested value with meta information and stop the fsm. do_eval(_Module, _Callback, {ok, Return}, _State, Meta = #{ meta := true }) -> {ok, Return, Meta}; % return latest value with meta information and state then stop the % fsm. do_eval(_Module, _Callback, {ok, Return, NewState}, _State, Meta = #{ meta := true}) -> {ok, Return, NewState, Meta}; % final evaluation, return the value from the callback. do_eval(_Module, _Callback, {ok, Return}, _State, _Meta) -> {ok, Return}; % final evaluation, return the value from the callback and its last % state. do_eval(_Module, _Callback, {ok, Return, NewState}, _State, _Meta) -> {ok, Return, NewState}; % return meta information and stop the fsm. do_eval(_Module, _Callback, meta, _State, Meta) -> {meta, Meta}; % fsm transition with a new callback and a new state on the same module % callback. do_eval(Module, _Callback, {next, NextCallback, NewState}, _State, Meta) when is_atom(NextCallback) -> do_loop( Module, NextCallback, NewState, Meta ); % fsm transition with a new state. this function will switch to % another module and another callback with a new state. do_eval(_Module, _Callback, {next, NextModule, NextCallback, NewState}, _State, Meta) when is_atom(NextModule), is_atom(NextCallback) -> do_loop( NextModule, NextCallback, NewState, Meta ); % fsm error management with debugging feature for traceability. do_eval(Module, Callback, {error, Reason}, State, Meta) -> {error, #{ debug => #{ module => ?MODULE, function => ?FUNCTION_NAME, function_arity => ?FUNCTION_ARITY, line => ?LINE }, reason => Reason, module => Module, callback => Callback, state => State, meta => Meta } }; do_eval(Module, Callback, {error, Reason, NewState}, State, Meta) -> {error, #{ debug => #{ module => ?MODULE, function => ?FUNCTION_NAME, function_arity => ?FUNCTION_ARITY, line => ?LINE }, reason => Reason, module => Module, callback => Callback, state => State, new_state => NewState, meta => Meta }, NewState }; % fsm error callback returned value. do_eval(Module, Callback, Return, State, Meta) -> {error, #{ debug => #{ module => ?MODULE, function => ?FUNCTION_NAME, function_arity => ?FUNCTION_ARITY, line => ?LINE }, reason => unsupported_return, return => Return, module => Module, callback => Callback, state => State, meta => Meta } }. %%-------------------------------------------------------------------- %% @hidden %% @private %% @doc update metadata information if enabled. %% @end %%-------------------------------------------------------------------- -spec update_meta(Module, Callback, Meta) -> Return when Module :: fsm_module(), Callback :: fsm_callback(), Meta :: fsm_metadata(), Return :: Meta. update_meta(Module, Callback, Meta = #{ meta := true }) -> History = maps:get(history, Meta), Counter = maps:get(counter, Meta), NewHistory= [meta_history(Module, Callback)|History], Meta#{ history => NewHistory, counter => Counter+1 }; update_meta(_, _, Meta) -> Meta. %%-------------------------------------------------------------------- %% @hidden %% @private %% @doc generate a meta history item. %% @end %%-------------------------------------------------------------------- meta_history(Module, Callback) -> #{ timestamp => erlang:system_time(), module => Module, callback => Callback, process_info => meta_process_info(), pid => self() }. %%-------------------------------------------------------------------- %% @hidden %% @private %% @doc wrapper around erlang:process_info/2 %% @end %%-------------------------------------------------------------------- meta_process_info() -> maps:from_list( erlang:process_info( self(), [ heap_size, message_queue_len, reductions, stack_size, status, total_heap_size, trap_exit ] ) ). ================================================ FILE: apps/arweave_config/src/arweave_config_http_server.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @copyright 2025 (c) Arweave %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @doc Configuration HTTP Server Interface. %%% %%% This module is using cowboy to handle configuration requests. The %%% goal is to configure dynamically some values using a web %%% interface. The default TCP port used is `4891'. %%% %%% The interface is using a RESTful like module, where a path is %%% representing an object. %%% %%% A configuration path is converted to a parameter: %%% %%% ``` %%% v1/config/debug %%% %%% % becomes %%% %%% [debug] %%% ''' %%% %%% This API is also versionned, the `v0' version is mostly a draft to %%% see how the different methods are behaving. %%% %%% JSON data returned try to follow jsend format. %%% %%% see: https://github.com/omniti-labs/jsend %%% %%% == Examples == %%% %%% By default, the values being used and returned are raw: %%% %%% ``` %%% # get the value of global.debug parameter %%% $ curl localhost:4891/v1/config/debug %%% {"status":"success","data":true} %%% %%% # set the value of global.debug parameter %%% $ curl localhost:4891/v1/config/global/debug -d false %%% {"status":"success","data":false} %%% ''' %%% %%% === Unix Socket Support === %%% %%% Arweave Configuration HTTP API can listen to an unix socket %%% instead of an IP address. If a valid path is given instead of an %%% IP address, cowboy will listen on this file. When the server is %%% stopped, this file should be removed. %%% %%% One can then use an HTTP client (e.g. curl) to send HTTP requests, %%% here an example %%% %%% ``` %%% curl \ %%% --unix-socket ${WORKDIR}/arweave.sock \ %%% http://localhost/v1/config/... %%% ''' %%% %%% Enabling the usage of an unix socket restrict the surface attack, %%% and limit the configuration access to only the user with %%% read/write access to it. The "authentication" is then based on %%% UNIX credentials. %%% %%% Note: it can also be a good way to offer an interface for a GUI. %%% %%% @end %%%=================================================================== -module(arweave_config_http_server). -export([start_link/0, stop/0]). -export([start_as_child/0, stop_as_child/0]). -export([init/2]). -include_lib("kernel/include/logger.hrl"). -include_lib("eunit/include/eunit.hrl"). %%-------------------------------------------------------------------- %% @doc start cowboy as `arweave_config_sup' child. %% @end %%-------------------------------------------------------------------- start_as_child() -> Spec = #{ id => ?MODULE, start => {?MODULE, start_link, []}, type => worker, restart => temporary }, supervisor:start_child(arweave_config_sup, Spec). %%-------------------------------------------------------------------- %% @doc stop cowboy from `arweave_config_sup'. %% @end %%-------------------------------------------------------------------- stop_as_child() -> stop(), supervisor:terminate_child(arweave_config_sup, ?MODULE), supervisor:delete_child(arweave_config_sup, ?MODULE). %%-------------------------------------------------------------------- %% @doc start arweave config http api interface. %% @end %%-------------------------------------------------------------------- start_link() -> {ok, DefaultHost} = arweave_config:get([config,http,api,listen,address]), {ok, DefaultPort} = arweave_config:get([config,http,api,listen,port]), TransportOpts = case inet:parse_address(binary_to_list(DefaultHost)) of {ok, Address} -> [ {port, DefaultPort}, {ip, Address} ]; {error, _} -> % if it's not an ip address, this is % an unix socket. [ {ip, {local, DefaultHost}} ] end, ProtocolOpts = #{ env => #{ dispatch => dispatch() } }, cowboy:start_clear(?MODULE, TransportOpts, ProtocolOpts). %%-------------------------------------------------------------------- %% @doc %% @end %%-------------------------------------------------------------------- stop() -> ListenAddress = ranch:get_addr(?MODULE), cowboy:stop_listener(?MODULE), case ListenAddress of {local, Address} -> ?LOG_DEBUG("remove ~p", [Address]), file:delete(Address), ok; _ -> ok end. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- dispatch() -> cowboy_router:compile([ {'_', router()} ]). %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- headers() -> #{ <<"content-type">> => <<"application/json">> }. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- router() -> [ {"/v0", ?MODULE, #{}}, {"/v0/config", ?MODULE, #{}}, {"/v0/config/[...]", ?MODULE, #{}}, {"/v0/environment", ?MODULE, #{}} ]. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- % @todo returns API specifications % init(Req = #{ path := <<"/v0">> }, State) -> % Reply = cowboy_req:reply(200, #{}, <<>>, Req), % {ok, Reply, State}; % @todo returns arweave and beam arguments % init(Req = #{ path := <<"/v0/arguments">> }, State) -> ok; init(Req = #{ path := <<"/v0/environment">>, method := <<"GET">> }, State) -> Environment = arweave_config_environment:get(), AsMap = maps:from_list(Environment), Headers = headers(), Body = encode( jsend( success, AsMap ) ), Reply = cowboy_req:reply(200, Headers, Body, Req), {ok, Reply, State}; init(Req = #{ path := <<"/v0/config">> }, State) -> % @todo: add the configuration from spec (with default value) % should return the full configuration using different format. Config = arweave_config_store:to_map(), Headers = headers(), Body = encode( jsend( success, Config ) ), Reply = cowboy_req:reply(200, Headers, Body, Req), {ok, Reply, State}; init(Req = #{ path := <<"/v0/config/">> }, State) -> init(Req#{ path => <<"/v0/config">> }, State); init(Req = #{ path := <<"/v0/config/", Key/binary>> }, State) when Key =/= <<>> -> apply_config(Key, Req, State); init(Req, State) -> ?LOG_INFO("~p", [{Req, State}]), Headers = headers(), Body = encode( jsend( error, <<"not found">> ) ), Reply = cowboy_req:reply(404, Headers, Body, Req), {ok, Reply, State}. %%-------------------------------------------------------------------- %% %%-------------------------------------------------------------------- apply_config(Key, Req, State) -> case config(Key, Req, State) of {ok, #{ status := Status, body := Body, req := NewReq }} -> Reply = cowboy_req:reply( Status, headers(), encode(Body), NewReq ), {ok, Reply, State}; _Else -> Reply = cowboy_req:reply( 400, headers(), encode( jsend( error, <<"configuration error">> ) ), Req ), {ok, Reply, State} end. %%-------------------------------------------------------------------- %% @hidden %% config end-point %%-------------------------------------------------------------------- config(Key, Req, State) -> io:format("~p~n", [{Key, Req, State}]), Key2 = re:replace(Key, <<"/">>, <<".">>, [global]), Key3 = case Key2 of _ when is_list(Key2) -> list_to_binary(Key2); _ when is_binary(Key2) -> Key2 end, case arweave_config_parser:key(Key3) of {ok, Parameter} -> config1(Parameter, Req, State); _ -> NewState = #{ status => 400, headers => headers(), body => jsend( error, <<"bad data">> ), req => Req }, {ok, NewState} end. config1(Parameter, Req = #{ method := <<"GET">> }, State) -> case arweave_config:get(Parameter) of {ok, Value} -> NewState = State#{ status => 200, headers => headers(), body => jsend( success, Value ), req => Req }, {ok, NewState}; _ -> NewState = State#{ status => 404, headers => headers(), body => jsend( error, <<"not_found">> ), req => Req }, {ok, NewState} end; config1(Parameter, Req = #{ method := <<"POST">> }, State) -> case cowboy_req:has_body(Req) of true -> config_post(Parameter, Req, State); false -> NewState = State#{ status => 400, headers => headers(), body => jsend( error, <<"missing body">> ), req => Req }, {ok, NewState} end. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- config_post(Parameter, Req, State) -> case cowboy_req:read_body(Req) of {ok, Data, Req0} -> config_post1(Data, Parameter, Req0, State); _ -> NewState = State#{ status => 400, headers => headers(), body => jsend( error, <<"bad data">> ), req => Req }, {ok, NewState} end. config_post1(Data, Parameter, Req, State) -> case arweave_config_spec:set(Parameter, Data) of {ok, NewValue, OldValue} -> NewState = State#{ status => 200, headers => headers(), body => jsend( success, #{ new => NewValue, old => OldValue } ), req => Req }, {ok, NewState}; _ -> NewState = State#{ status => 400, headers => headers(), body => jsend( error, <<"bad data">> ), req => Req }, {ok, NewState} end. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- -spec jsend(Status, Data) -> Return when Status :: success | fail | error, Data :: binary() | map() | list() | integer(), Return :: #{ status => success | fail | error, data => Data, message => Data }. jsend(success, Data) -> #{ status => success, data => Data }; jsend(fail, Data) -> #{ status => fail, data => Data }; jsend(error, Message) -> #{ status => error, message => Message }. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- encode(Data) -> jiffy:encode(Data). ================================================ FILE: apps/arweave_config/src/arweave_config_legacy.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @copyright 2025 (c) Arweave %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @deprecated This module is a legacy compat layer. %%% @doc temporary interface to arweave legacy configuration. %%% %%% This module is mainly used as a process to deal with arweave %%% legacy configuration. Indeed, the previous implementation was %%% using a record to store parameters as record's key, unfortunately, %%% this is not flexible enough to do everything. This process is a %%% direct interface to `application:set_env(arweave, config, _)' %%% function and to `#config{}' record. %%% %%% The record needs to be converted as proplists, then, it will %%% introduce a slower answers, but at this time, the configuration is %%% not dynamic at all, this means this performance issue will only %%% impact arweave during startup. %%% %%% @TODO TO REMOVE when legacy configuration will be dropped. %%% %%% == Examples == %%% %%% ``` %%% % get the configuration as #config{} record from %%% % arweave_config_legacy process state.Similar to %%% % application:get_env/2 %%% {ok, #config{}} = arweave_config_legacy:get_env(). %%% %%% % overwrite the configuration present in `arweave_config_legacy' %%% % process state, similar to application:set_env/3. %%% arweave_config_legacy:set_env(#config{}). %%% %%% % get value's key. %%% Init = arweave_config_legacy:get(init). %%% %%% % set a value's key. %%% arweave_config_legacy:set(init, false). %%% %%% % reset the configuration with the default state (default values %%% % from `#config{}'. %%% arweave_config_legacy:reset(). %%% ''' %%% %%% @end %%%=================================================================== -module(arweave_config_legacy). -behavior(gen_server). -compile(warnings_as_errors). -compile({no_auto_import,[get/0, get/1]}). -export([start_link/0, stop/0]). -export([ get/0, get/1, get_config_value/2, get_env/0, has_key/1, keys/0, merge/1, reset/0, set/1, set/2, set_env/1, config_merge/2, config_to_proplist/1, proplist_to_config/1 ]). -export([init/1, terminate/2]). -export([handle_call/3, handle_info/2, handle_cast/2]). -include("arweave_config.hrl"). -include_lib("kernel/include/logger.hrl"). -include_lib("eunit/include/eunit.hrl"). -record(?MODULE, {proplist, record}). %%-------------------------------------------------------------------- %% @doc Returns the complete list of all keys from configuration %% process state. %% @end %%-------------------------------------------------------------------- -spec keys() -> [atom()]. keys() -> gen_server:call(?MODULE, keys, 1000). %%-------------------------------------------------------------------- %% @doc Check if a key is present in the process record state. %% @end %%-------------------------------------------------------------------- -spec has_key(atom()) -> boolean(). has_key(Key) when is_atom(Key) -> gen_server:call(?MODULE, {has_key, Key}, 1000). %%-------------------------------------------------------------------- %% @doc Returns process state configuration as `#config{}' record. %% @end %%-------------------------------------------------------------------- -spec get() -> Return when Return :: undefined | {ok, #config{}}. get() -> try gen_server:call(?MODULE, get, 1000) of {ok, Value} -> Value; _Elsewise -> undefined catch _E:R:S -> throw({error, {R, S}}) end. %%-------------------------------------------------------------------- %% @doc Returns the value of a key from process state configuration. %% @end %%-------------------------------------------------------------------- -spec get(Key) -> Return when Key :: atom(), Return :: undefined | term(). get(Key) when is_atom(Key) -> try gen_server:call(?MODULE, {get, Key}, 1000) of {ok, Value} -> Value; _Elsewise -> undefined catch _E:R:S -> throw({error, {R, S}}) end. %%-------------------------------------------------------------------- %% @doc Set a new config file. %% @end %%-------------------------------------------------------------------- -spec set(Config) -> Return when Config :: #config{}, Return :: ok | {error, term()} | timeout. set(Config) when is_record(Config, config) -> gen_server:call(?MODULE, {set, Config}, 1000); set(_) -> {error, badarg}. %%-------------------------------------------------------------------- %% @doc Set a value to a key. %% @end %%-------------------------------------------------------------------- -spec set(Key, Value) -> Return when Key :: atom(), Value :: term(), Return :: {ok, Value} | {error, term()}. set(Key, Value) -> set(Key, Value, #{}). %%-------------------------------------------------------------------- %% @doc Set a value to a key (custom options). When setting `set_env' %% to `true', the environment application arweave/config is configured %% with the content of the store from this process. %% %% Warning: this part is not protected against race condition, if %% another process is setting application environment variable with %% `application:set_env/2' function, the state present in this process %% will not have the correct information. %% @end %%-------------------------------------------------------------------- -spec set(Key, Value, Opts) -> Return when Key :: atom(), Value :: term(), Opts :: #{ set_env => boolean() }, Return :: {ok, Value} | {error, term()}. set(Key, Value, Opts) when is_atom(Key), is_map(Opts) -> try gen_server:call(?MODULE, {set, Key, Value, Opts}, 1000) of {ok, NewValue, _OldValue} -> {ok, NewValue}; Elsewise -> {error, Elsewise} catch _E:R:S -> throw({error, {R, S}}) end. %%-------------------------------------------------------------------- %% @doc import #config{} record and set it as new state. %% @end %%-------------------------------------------------------------------- set_env(Config) when is_record(Config, config) -> gen_server:call(?MODULE, {set_env, Config}, 1000). %%-------------------------------------------------------------------- %% @doc reset the legacy configuration by using the default values. %% @end %%-------------------------------------------------------------------- reset() -> gen_server:call(?MODULE, reset, 1000). %%-------------------------------------------------------------------- %% @doc export the current configuration as `#config{}' record. %% @end %%-------------------------------------------------------------------- -spec get_env() -> {ok, #config{}}. get_env() -> gen_server:call(?MODULE, get_env, 1000). %%-------------------------------------------------------------------- %% @doc merge a configuration file (set only modified values). %% @end %%-------------------------------------------------------------------- -spec merge(Config) -> Return when Config :: #config{}, Return :: {ok, Config} | {error, term()}. merge(Config) when is_record(Config, config) -> gen_server:call(?MODULE, {merge, Config}, 1000); merge(_) -> {error, badarg}. %%-------------------------------------------------------------------- %% @doc start `arweave_config_legacy' process. %% @end %%-------------------------------------------------------------------- start_link() -> ?LOG_INFO("start ~p process (~p)", [?MODULE, self()]), gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). %%-------------------------------------------------------------------- %% @doc stop `arweave_config_legacy' process. %% @end %%-------------------------------------------------------------------- stop() -> gen_server:stop(?MODULE). %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- init(_) -> ?LOG_INFO("start ~p process", [?MODULE]), Proplist = config_to_proplist(#config{}), ?LOG_DEBUG([{configuration, Proplist}]), set_environment(Proplist), {ok, #?MODULE{ proplist = Proplist, record = #config{} } }. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- terminate(_, _) -> ?LOG_INFO("stop ~p process (~p)", [?MODULE, self()]), ok. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- handle_call({merge, Config}, _From, State = #?MODULE{ proplist = P }) when is_record(Config, config) -> try MergedProplist = config_merge(P, Config), MergedConfig = proplist_to_config(MergedProplist), NewState = State#?MODULE{ proplist = MergedProplist, record = MergedConfig }, {reply, {ok, MergedConfig}, NewState} catch _Error:Reason -> {reply, {error, Reason}, State} end; handle_call({has_key, Key}, _From, State = #?MODULE{ proplist = P }) -> {reply, proplists:is_defined(Key, P), State}; handle_call(keys, _From, State = #?MODULE{ proplist = P }) -> {reply, [ K || {K,_} <- P ], State}; handle_call(get, _From, State = #?MODULE{ record = R }) -> {reply, {ok, R}, State}; handle_call({get, Key}, _From, State = #?MODULE{ proplist = P }) when is_atom(Key) -> Return = {ok, proplists:get_value(Key, P)}, {reply, Return, State}; handle_call({set, Config}, _From, State) when is_record(Config, config) -> try Proplist = config_to_proplist(Config), NewState = #?MODULE{ proplist = Proplist, record = Config }, set_environment(Config), {reply, ok, NewState} catch _Error:Reason -> {reply, {error, Reason}, State} end; handle_call({set, Key, Value, Opts}, _From, State = #?MODULE{ proplist = P }) when is_atom(Key), is_map(Opts) -> OldValue = proplists:get_value(Key, P), NewP = lists:keyreplace(Key, 1, P, {Key, Value}), set_environment(NewP), Return = {ok, Value, OldValue}, NewState = State#?MODULE{ proplist = NewP, record = proplist_to_config(NewP) }, {reply, Return, NewState}; handle_call(get_env, _From, State = #?MODULE{ record = R }) -> {reply, {ok, R}, State}; handle_call({set_env, Config}, _From, State) -> case import_config(Config) of {ok, NewP} -> set_environment(NewP), NewState = State#?MODULE{ proplist = NewP, record = proplist_to_config(NewP) }, {reply, ok, NewState}; _ -> {reply, error, State} end; handle_call(reset, _From, State) -> case reset_config() of {ok, NewP} -> NewState = State#?MODULE{ proplist = NewP, record = proplist_to_config(NewP) }, {reply, ok, NewState}; _ -> {reply, error, State} end; handle_call(Message, From, State) -> Error = [ {from, From}, {message, Message}, {from, From}, {pid, self()} ], ?LOG_ERROR(Error), {reply, {error, Error}, State}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- handle_cast(Msg, State) -> ?LOG_ERROR("received: ~p", [Msg]), {noreply, State}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- handle_info(Msg, State) -> ?LOG_ERROR("received: ~p", [Msg]), {noreply, State}. %%-------------------------------------------------------------------- %% @doc Converts `#config{}' records to `proplists'. %% @end %%-------------------------------------------------------------------- -spec config_to_proplist(Config) -> Return when Config :: #config{}, Return :: [{atom(), term()}]. config_to_proplist(Config) when is_record(Config, config) -> Fields = record_info(fields, config), Values = erlang:delete_element(1, Config), List = erlang:tuple_to_list(Values), lists:zip(Fields, List). config_to_proplist_test() -> Config = #config{}, Keys = record_info(fields, config), Proplist = config_to_proplist(Config), [ begin {ok, VC} = get_config_value(Key, Config), VP = proplists:get_value(Key, Proplist), ?assertEqual(VC, VP) end || Key <- Keys ]. %%-------------------------------------------------------------------- %% @doc Converts a proplists to a `#config{}' record. %% @end %%-------------------------------------------------------------------- -spec proplist_to_config(Proplist) -> Return when Proplist :: [{atom(), term()}], Return :: #config{}. proplist_to_config(Proplist) when is_list(Proplist) -> Fields = record_info(fields, config), proplist_to_config2(Proplist, Fields, Proplist, 1). proplist_to_config_test() -> Config = #config{}, Proplist = config_to_proplist(Config), NewConfig = proplist_to_config(Proplist), [ begin {ok, VC} = get_config_value(Key, NewConfig), VP = proplists:get_value(Key, Proplist), ?assertEqual(VC, VP) end || Key <- proplists:get_keys(Proplist) ]. %%-------------------------------------------------------------------- %% @hidden %% private: check the order of the fields, if not in right order, it %% will fail. %%-------------------------------------------------------------------- proplist_to_config2([], [], Proplist, _Pos) -> proplist_to_config3(Proplist); proplist_to_config2([{Key,_}|R1], [Key|R2], Proplist, Pos) -> proplist_to_config2(R1, R2, Proplist, Pos+1); proplist_to_config2([{K1, _V1}|_R1], [K2|_R2], _, Pos) -> throw({error, #{ reason => {badkey, K1, K2}, position => Pos } } ); proplist_to_config2(_, _, _, Pos) -> throw({error, #{ reason => badvalue, position => Pos } } ). %%-------------------------------------------------------------------- %% @hidden %% private: finally, convert the last values %%-------------------------------------------------------------------- proplist_to_config3(Proplist) -> Values = lists:map(fun({_,V}) -> V end, Proplist), Values2 = [config|Values], erlang:list_to_tuple(Values2). %%-------------------------------------------------------------------- %% @hidden %% private: import a config record as proplist %%-------------------------------------------------------------------- import_config(Config) when is_record(Config, config) -> Proplist = config_to_proplist(Config), {ok, Proplist}; import_config(Config) -> {error, Config}. %%-------------------------------------------------------------------- %% @hidden %% private: reset internal configuration using #config{} record. %%-------------------------------------------------------------------- reset_config() -> Proplist = config_to_proplist(#config{}), {ok, Proplist}. %%-------------------------------------------------------------------- %% @hidden %% @doc wrapper for `application:set_env/3'. %% @end %% @TODO: to remove, arweave_legacy_config will set the environment %% for compatibility. If another application/module is still using %% application:get_env, it will not be impacted and will have an %% updated configuration. %%-------------------------------------------------------------------- set_environment(Config) when is_list(Config) -> % convert the list to config record Record = proplist_to_config(Config), set_environment(Record); set_environment(Config) when is_record(Config, config) -> application:set_env(arweave_config, config, Config). %%-------------------------------------------------------------------- %% @hidden %% @doc %% helper function to extract config value using a key, similar to %% `proplists:get_value/2'. %% @end %%-------------------------------------------------------------------- -spec get_config_value(Key, Config) -> Return when Key :: atom(), Config :: #config{}, Return :: {error, undefined} | {ok, term()}. get_config_value(Key, Config) when is_atom(Key), is_record(Config, config) -> Keys = record_info(fields, config), [_|List] = tuple_to_list(Config), Zip = lists:zip(Keys, List), case lists:keyfind(Key, 1, Zip) of false -> {error, undefined}; {Key, Value} -> {ok, Value} end. %%-------------------------------------------------------------------- %% @hidden %% @private %% @doc Merge configuration as records or proplists, return the %% configuration as proplist. %% @end %%-------------------------------------------------------------------- -spec config_merge(OldConfig, NewConfig) -> Return when OldConfig :: #config{} | proplists:proplist(), NewConfig :: #config{} | proplists:proplist(), Return :: proplists:proplist(). config_merge(OldConfig, NewConfig) when is_record(OldConfig, config) -> OldProplist = config_to_proplist(OldConfig), config_merge(OldProplist, NewConfig); config_merge(OldConfig, NewConfig) when is_record(NewConfig, config) -> NewProplist = config_to_proplist(NewConfig), config_merge(OldConfig, NewProplist); config_merge(OldConfig, NewConfig) when is_list(OldConfig), is_list(NewConfig) -> Zipped = lists:zip(NewConfig, OldConfig), lists:foldr( fun % same values, nothing to change ({{K, NV}, {K, OV}}, Acc) when NV =:= OV -> [{K, OV}|Acc]; % different values, we set the new one ({{K, NV}, {K, OV}}, Acc) when NV =/= OV -> [{K, NV}|Acc]; % something wrong, the configuration % is bad (Else, _Acc) -> throw({error, {badconfig, Else}}) end, [], Zipped ). config_merge_test() -> Merged1 = proplist_to_config( config_merge( #config{ init = false }, #config{ init = true } ) ), #config{ init = Init1 } = Merged1, ?assertEqual(true, Init1), Merged2 = proplist_to_config( config_merge( #config{ init = true }, #config{ init = true } ) ), #config{ init = Init2 } = Merged2, ?assertEqual(true, Init2). ================================================ FILE: apps/arweave_config/src/arweave_config_parameters.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @copyright 2025 (c) Arweave %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @doc Arweave Configuration Parameters. %%% %%% == TODO == %%% %%% @todo create an `include' parameter to include files from %%% other places. %%% %%% @end %%%=================================================================== -module(arweave_config_parameters). -compile(warnings_as_errors). -export([init/0]). -include_("arweave_config.hrl"). %%-------------------------------------------------------------------- %% @doc returns a list of map containing arweave parameters. %% @end %%-------------------------------------------------------------------- init() -> [ % set configuration file. The configuration parameter % is a list of binary, stored in arweave_config_file % process. #{ enabled => true, parameter_key => [configuration], default => [], type => path, runtime => true, deprecated => false, required => false, environment => <<"AR_CONFIGURATION">>, short_argument => $c, long_argument => <<"--configuration">>, handle_set => fun (_, V, _ ,_) -> {ok, _} = arweave_config_file:add(V), P = arweave_config_file:get_paths(), {store, P} end, handle_get => fun (_, _) -> {ok, arweave_config_file:get_paths()} end }, % set data directory #{ enabled => true, parameter_key => [data,directory], default => "./data", runtime => false, type => path, deprecated => false, legacy => data_dir, required => true, short_description => "", long_description => "", environment => <<"AR_DATA_DIRECTORY">>, short_argument => $D, long_argument => <<"--data.directory">>, handle_get => fun legacy_get/2, handle_set => fun legacy_set/4 }, #{ parameter_key => [start,from,state], default => not_set, runtime => false, type => path, deprecated => false, legacy => start_from_state, required => false, short_description => "", long_description => "", environment => <<"AR_START_FROM_STATE">>, long_argument => <<"--start-from-state">>, handle_get => fun legacy_get/2, handle_set => fun legacy_set/4 }, #{ enabled => true, parameter_key => [debug], default => false, runtime => true, type => boolean, deprecated => false, legacy => debug, required => false, short_description => "", long_description => "", environment => <<"AR_DEBUG">>, short_argument => $d, long_argument => <<"--debug">>, handle_get => fun legacy_get/2, handle_set => fun (K, V, S = #{ config := #{ debug := Old }}, _) -> case {V, Old} of {true, true} -> ignore; {false, false} -> ignore; {false, true} -> logger:set_application_level(arweave_config, info), logger:set_application_level(arweave, info), ar_logger:stop_handler(arweave_debug), legacy_set(K, V, S, []); {true, false} -> logger:set_application_level(arweave_config, debug), logger:set_application_level(arweave, debug), ar_logger:start_handler(arweave_debug), legacy_set(K, V, S, []) end; (K, V, S, _) -> logger:set_application_level(arweave_config, debug), logger:set_application_level(arweave, debug), ar_logger:start_handler(arweave_debug), legacy_set(K, V, S, []) end }, %----------------------------------------------------- % arweave logging feature %----------------------------------------------------- #{ enabled => true, % parse a string and convert it to valid % logger template. parameter_key => [logging,formatter,template], default => [time," [",level,"] ",mfa,":",line," ",msg,"\n"], type => logging_template, environment => false, runtime => false }, #{ enabled => true, % see: https://www.erlang.org/doc/apps/kernel/logger.html % config.path must be a string (list of % integer). By default, type path will convert % its input in binary. So, one can convert the % value stored using handle_get, or converting % the value when using handle_set. parameter_key => [logging,path], default => "./logs", type => path, environment => true, long_argument => true, runtime => false, handle_set => fun (_K, Path, _S, _) when is_list(Path) -> {store, Path}; (_K, Path, _S, _) when is_binary(Path) -> {store, binary_to_list(Path)} end }, #{ enabled => true, % see: https://www.erlang.org/doc/apps/kernel/logger_formatter.html % TODO: this parameter can also be an atom % (unlimited). parameter_key => [logging,formatter,max_size], default => 8128, type => pos_integer, environment => true, long_argument => true, runtime => true, handle_set => { fun logger_set/4, [arweave_info,formatter,max_size] } }, #{ enabled => true, % see: https://www.erlang.org/doc/apps/kernel/logger_formatter.html % TODO: this parameter can also be an atom % (unlimited) parameter_key => [logging,formatter,depth], default => 256, type => pos_integer, environment => true, long_argument => true, runtime => true, handle_set => { fun logger_set/4, [arweave_info,formatter,depth] } }, #{ enabled => true, % see: https://www.erlang.org/doc/apps/kernel/logger_formatter.html % TODO: this parameter can also be an atom % (unlimited) parameter_key => [logging,formatter,chars_limit], default => 16256, type => pos_integer, environment => true, long_argument => true, runtime => true, handle_set => { fun logger_set/4, [arweave_info,formatter,chars_limit] } }, #{ enabled => true, % see: https://www.erlang.org/doc/apps/kernel/logger_std_h.html parameter_key => [logging,max_no_files], default => 20, type => pos_integer, environment => true, long_argument => true, runtime => true, handle_set => { fun logger_set/4, [arweave_info,config,max_no_files] } }, #{ enabled => true, % see: https://www.erlang.org/doc/apps/kernel/logger_std_h.html % TODO: this parameter can also be an atom % (infinity) parameter_key => [logging,max_no_bytes], default => 51418800, type => pos_integer, environment => true, long_argument => true, runtime => true, handle_set => { fun logger_set/4, [arweave_info,config,max_no_bytes] } }, #{ enabled => true, % see: https://www.erlang.org/doc/apps/kernel/logger_std_h.html parameter_key => [logging,compress_on_rotate], default => false, type => boolean, environment => true, long_argument => true, runtime => true, handle_set => { fun logger_set/4, [arweave_info,config,compress_on_rotate] } }, #{ enabled => true, parameter_key => [logging,sync_mode_qlen], runtime => false, type => pos_integer, default => 10, environment => true, long_argument => true, handle_set => { fun logger_set/4, [arweave_info,config,sync_mode_qlen] } }, #{ enabled => true, parameter_key => [logging,drop_mode_qlen], runtime => true, type => pos_integer, default => 200, environment => true, long_argument => true, handle_set => { fun logger_set/4, [arweave_info,config,drop_mode_qlen] } }, #{ enabled => true, parameter_key => [logging,flush_qlen], runtime => true, type => pos_integer, default => 1000, environment => true, long_argument => true, handle_set => { fun logger_set/4, [arweave_info,config,flush_qlen] } }, #{ enabled => true, parameter_key => [logging,burst_limit_enable], runtime => true, type => boolean, default => true, environment => true, long_argument => true, handle_set => { fun logger_set/4, [arweave_info,config,burst_limit_enable] } }, #{ enabled => true, parameter_key => [logging,burst_limit_max_count], runtime => true, type => pos_integer, default => 500, environment => true, long_argument => true, handle_set => { fun logger_set/4, [arweave_info,config,burst_limit_max_count] } }, #{ enabled => true, parameter_key => [logging,burst_limit_window_time], runtime => true, type => pos_integer, default => 1000, environment => true, long_argument => true, handle_set => { fun logger_set/4, [arweave_info,config,burst_limit_window_time] } }, #{ enabled => true, parameter_key => [logging,overload_kill_enable], runtime => true, type => boolean, default => true, environment => true, long_argument => true, handle_set => { fun logger_set/4, [arweave_info,config,overload_kill_enable] } }, #{ enabled => true, parameter_key => [logging,overload_kill_qlen], runtime => true, type => pos_integer, default => 20_000, environment => true, long_argument => true, handle_set => { fun logger_set/4, [arweave_info,config,overload_kill_qlen] } }, #{ enabled => true, parameter_key => [logging,overload_kill_mem_size], runtime => true, type => pos_integer, default => 3_000_000, environment => true, long_argument => true, handle_set => { fun logger_set/4, [arweave_info,config,overload_kill_mem_size] } }, #{ enabled => true, parameter_key => [logging,overload_kill_restart_after], runtime => true, type => pos_integer, default => 5000, environment => true, long_argument => true, handle_set => { fun logger_set/4, [arweave_info,config,overload_kill_restart_after] } }, % debug logs #{ enabled => true, parameter_key => [logging,handlers,debug], default => false, type => boolean, environment => true, long_argument => true, runtime => true, handle_set => fun (_,true,_,_) -> ar_logger:start_handler(arweave_debug), {store, true}; (_,false,_,_) -> ar_logger:stop_handler(arweave_debug), {store, false} end }, #{ enabled => true, parameter_key => [logging,handlers,debug,max_no_files], inherit => [logging,max_no_files], runtime => true, environment => true, long_argument => true, handle_set => { fun logger_set/4, [arweave_debug,config,max_no_files] } }, #{ enabled => true, parameter_key => [logging,handlers,debug,max_no_bytes], inherit => [logging,max_no_bytes], runtime => true, environment => true, long_argument => true, handle_set => { fun logger_set/4, [arweave_debug,config,max_no_bytes] } }, #{ enabled => true, parameter_key => [logging,handlers,debug,sync_mode_qlen], inherit => [logging,sync_mode_qlen], runtime => true, environment => true, long_argument => true, handle_set => { fun logger_set/4, [arweave_debug,config,sync_mode_qlen] } }, #{ enabled => true, parameter_key => [logging,handlers,debug,drop_mode_qlen], inherit => [logging,drop_mode_qlen], runtime => true, environment => true, long_argument => true, handle_set => { fun logger_set/4, [arweave_debug,config,drop_mode_qlen] } }, #{ enabled => true, parameter_key => [logging,handlers,debug,flush_qlen], inherit => [logging,flush_qlen], runtime => true, environment => true, long_argument => true, handle_set => { fun logger_set/4, [arweave_debug,config,flush_qlen] } }, #{ enabled => true, parameter_key => [logging,handlers,debug,burst_limit_enable], inherit => [logging,burst_limit_enable], runtime => true, environment => true, long_argument => true, handle_set => { fun logger_set/4, [arweave_debug,config,burst_limit_enable] } }, #{ enabled => true, parameter_key => [logging,handlers,debug,burst_limit_max_count], inherit => [logging,burst_limit_max_count], runtime => true, environment => true, long_argument => true, handle_set => { fun logger_set/4, [arweave_debug,config,burst_limit_max_count] } }, #{ enabled => true, parameter_key => [logging,handlers,debug,burst_limit_window_time], inherit => [logging,burst_limit_window_time], runtime => true, environment => true, long_argument => true, handle_set => { fun logger_set/4, [arweave_debug,config,burst_limit_window_time] } }, #{ enabled => true, parameter_key => [logging,handlers,debug,overload_kill_enable], inherit => [logging,overload_kill_enable], runtime => true, environment => true, long_argument => true, handle_set => { fun logger_set/4, [arweave_debug,config,overload_kill_enable] } }, #{ enabled => true, parameter_key => [logging,handlers,debug,overload_kill_qlen], inherit => [logging,overload_kill_qlen], runtime => true, environment => true, long_argument => true, handle_set => { fun logger_set/4, [arweave_debug,config,overload_kill_qlen] } }, #{ enabled => true, parameter_key => [logging,handlers,debug,overload_kill_mem_size], inherit => [logging,overload_kill_mem_size], runtime => true, environment => true, long_argument => true, handle_set => { fun logger_set/4, [arweave_debug,config,overload_kill_mem_size] } }, #{ enabled => true, parameter_key => [logging,handlers,debug,overload_kill_restart_after], inherit => [logging,overload_kill_restart_after], runtime => true, environment => true, long_argument => true, handle_set => { fun logger_set/4, [arweave_debug,config,overload_kill_restart_after] } }, #{ enabled => true, parameter_key => [logging,handlers,debug,formatter,chars_limit], inherit => [logging,formatter,chars_limit], runtime => true, environment => true, long_argument => true, handle_set => { fun logger_set/4, [arweave_debug,formatter,chars_limit] } }, #{ enabled => true, parameter_key => [logging,handlers,debug,formatter,depth], inherit => [logging,formatter,depth], runtime => true, environment => true, long_argument => true, handle_set => { fun logger_set/4, [arweave_debug,formatter,depth] } }, #{ enabled => true, parameter_key => [logging,handlers,debug,formatter,max_size], inherit => [logging,formatter,max_size], runtime => true, environment => true, long_argument => true, handle_set => { fun logger_set/4, [arweave_debug,formatter,max_size] } }, #{ enabled => true, parameter_key => [logging,handlers,debug,formatter,template], inherit => [logging,formatter,template], runtime => true, environment => true, long_argument => true, handle_set => { fun logger_set/4, [arweave_debug,formatter,template] } }, % http api logs #{ enabled => true, parameter_key => [logging,handlers,http,api], default => false, type => boolean, environment => true, long_argument => true, runtime => true, handle_set => fun (_K,true,_S,_) -> ar_logger:start_handler(arweave_http_api), {store, true}; (_K,false,_S,_) -> ar_logger:stop_handler(arweave_http_api), {store, false} end }, #{ enabled => true, parameter_key => [logging,handlers,http,api,max_no_files], inherit => [logging,max_no_files], runtime => true, environment => true, long_argument => true, handle_set => { fun logger_set/4, [arweave_http_api,config,max_no_files] } }, #{ enabled => true, parameter_key => [logging,handlers,http,api,max_no_bytes], inherit => [logging,max_no_bytes], runtime => true, environment => true, long_argument => true, handle_set => { fun logger_set/4, [arweave_http_api,config,max_no_bytes] } }, #{ enabled => true, parameter_key => [logging,handlers,http,api,sync_mode_qlen], inherit => [logging,sync_mode_qlen], runtime => true, environment => true, long_argument => true, handle_set => { fun logger_set/4, [arweave_http_api,config,sync_mode_qlen] } }, #{ enabled => true, parameter_key => [logging,handlers,http,api,drop_mode_qlen], inherit => [logging,drop_mode_qlen], runtime => true, environment => true, long_argument => true, handle_set => { fun logger_set/4, [arweave_http_api,config,drop_mode_qlen] } }, #{ enabled => true, parameter_key => [logging,handlers,http,api,flush_qlen], inherit => [logging,flush_qlen], runtime => true, environment => true, long_argument => true, handle_set => { fun logger_set/4, [arweave_http_api,config,flush_qlen] } }, #{ enabled => true, parameter_key => [logging,handlers,http,api,burst_limit_enable], inherit => [logging,burst_limit_enable], runtime => true, environment => true, long_argument => true, handle_set => { fun logger_set/4, [arweave_http_api,config,burst_limit_enable] } }, #{ enabled => true, parameter_key => [logging,handlers,http,api,burst_limit_max_count], inherit => [logging,burst_limit_max_count], runtime => true, environment => true, long_argument => true, handle_set => { fun logger_set/4, [arweave_http_api,config,burst_limit_max_count] } }, #{ enabled => true, parameter_key => [logging,handlers,http,api,burst_limit_window_time], inherit => [logging,burst_limit_window_time], runtime => true, environment => true, long_argument => true, handle_set => { fun logger_set/4, [arweave_http_api,config,burst_limit_window_time] } }, #{ enabled => true, parameter_key => [logging,handlers,http,api,overload_kill_enable], inherit => [logging,overload_kill_enable], runtime => true, environment => true, long_argument => true, handle_set => { fun logger_set/4, [arweave_http_api,config,overload_kill_enable] } }, #{ enabled => true, parameter_key => [logging,handlers,http,api,overload_kill_qlen], inherit => [logging,overload_kill_qlen], runtime => true, environment => true, long_argument => true, handle_set => { fun logger_set/4, [arweave_http_api,config,overload_kill_qlen] } }, #{ enabled => true, parameter_key => [logging,handlers,http,api,overload_kill_mem_size], inherit => [logging,overload_kill_mem_size], runtime => true, environment => true, long_argument => true, handle_set => { fun logger_set/4, [arweave_http_api,config,overload_kill_mem_size] } }, #{ enabled => true, parameter_key => [logging,handlers,http,api,overload_kill_restart_after], inherit => [logging,overload_kill_restart_after], runtime => true, environment => true, long_argument => true, handle_set => { fun logger_set/4, [arweave_http_api,config,overload_kill_restart_after] } }, #{ enabled => true, parameter_key => [logging,handlers,http,api,compress_on_rotate], inherit => {[logging,compress_on_rotate], [type, default]}, runtime => true, environment => true, long_argument => true, handle_set => { fun logger_set/4, [arweave_http_api,config,compress_on_rotate] } }, #{ enabled => true, parameter_key => [logging,handlers,http,api,formatter,chars_limit], inherit => [logging,formatter,chars_limit], runtime => true, environment => true, long_argument => true, handle_set => { fun logger_set/4, [arweave_http_api,formatter,chars_limit] } }, #{ enabled => true, parameter_key => [logging,handlers,http,api,formatter,max_size], inherit => [logging,formatter,max_size], runtime => true, environment => true, long_argument => true, handle_set => { fun logger_set/4, [arweave_http_api,formatter,max_size] } }, #{ enabled => true, parameter_key => [logging,handlers,http,api,formatter,depth], inherit => [logging,formatter,depth], runtime => true, environment => true, long_argument => true, handle_set => { fun logger_set/4, [arweave_http_api,formatter,depth] } }, %----------------------------------------------------- % arweave_config http api parameters %----------------------------------------------------- #{ parameter_key => [config,http,api,enabled], environment => true, long_argument => true, short_description => <<"enable arweave configuration http api interface">>, % @todo enable it by default after testing default => false, type => boolean, required => false, runtime => false }, #{ parameter_key => [config,http,api,listen,port], environment => true, long_argument => true, short_description => "set arweave configuration http api interface port", default => 4891, type => tcp_port, required => false, runtime => false }, #{ parameter_key => [config,http,api,listen,address], environment => true, long_argument => true, short_description => "set arweave configuration http api listen address", type => [ipv4, file], required => false, % can be an ip address or an unix socket path, % the configuration should be transparent % though and we should avoid using % {local, socket_path} % the rule is probably to say if the value % start with / then this is an unix socket, % else this is an ip address or an hostname. default => <<"127.0.0.1">>, runtime => false } % @todo implement read, write and token parameters % #{ % parameter => [config,http,api,read], % environment => <<"AR_CONFIG_HTTP_API_READ">>, % short_description => "allow read (get method) on arweave configuration http api", % type => boolean, % required => false, % default => true % }, % #{ % parameter => [config,http,api,write], % environment => <<"AR_CONFIG_HTTP_API_WRITE">>, % short_description => "allow write (post method) on arweave configuration http api", % type => boolean, % required => false, % default => true % }, % #{ % parameter => [config,http,api,token], % environment => <<"AR_CONFIG_HTTP_API_TOKEN">>, % short_description => "set an access token for arweave configuration http api interface", % type => string, % required => false, % default => <<>> % } ]. %%-------------------------------------------------------------------- %% @hidden %% @doc function helper to deal with legacy configuration. %% @end %%-------------------------------------------------------------------- legacy_get(_K, #{ spec := #{ legacy := L }}) -> V = arweave_config_legacy:get(L), {ok, V}; legacy_get(_K, _) -> {error, not_found}. %%-------------------------------------------------------------------- %% @hidden %% @doc function helper to deal with legacy configuration. %% @end %%-------------------------------------------------------------------- legacy_set(_K, V, #{ spec := #{ legacy := L }},_) -> arweave_config_legacy:set(L, V), {store, V}; legacy_set(_K, V, _,_) -> {store, V}. %%-------------------------------------------------------------------- %% @hidden %% @doc helper function to dynamically set logging handlers. %% @end %%-------------------------------------------------------------------- logger_set(_I, Value, _S, [HandlerId, formatter, Key]) -> case logger:get_handler_config(HandlerId) of {ok, #{formatter := {logger_formatter, Config}}} -> NewConfig = Config#{ Key => Value }, logger:update_handler_config( HandlerId, formatter, {logger_formatter, NewConfig} ), {store, Value}; _Else -> {store, Value} end; logger_set(_K, Value, _S, [HandlerId, ConfigKey, Key]) -> case logger:get_handler_config(HandlerId) of {ok, HandlerConfig} -> C = maps:get(ConfigKey, HandlerConfig), logger:update_handler_config( HandlerId, ConfigKey, C#{ Key => Value } ), {store, Value}; _Else -> {store, Value} end. ================================================ FILE: apps/arweave_config/src/arweave_config_parser.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @copyright 2025 (c) Arweave %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @doc arweave configuration parser. %%% %%% This module exports all function required to parse keys and values %%% for arweave configuration. %%% %%% @end %%%=================================================================== -module(arweave_config_parser). -export([ separator/0, key/1, is_parameter/1 ]). -include_lib("eunit/include/eunit.hrl"). -define(SEPARATOR, $.). %%-------------------------------------------------------------------- %% @doc default separator used. %% @end %%-------------------------------------------------------------------- separator() -> ?SEPARATOR. %%-------------------------------------------------------------------- %% @doc parses a string and converted it to a configuration key. %% At this time, only ASCII characters are supported. %% %% == Examples == %% %% ``` %% > arweave_config_parser:key("test.2.3.[127.0.0.1:1984].data"). %% {ok,[test,2,3,<<"127.0.0.1:1984">>,data]} %% ''' %% %% @TODO check indepth list. %% @end %%-------------------------------------------------------------------- -spec key(Key) -> Return when Key :: atom() | binary() | string(), Return :: {ok, [atom() | binary()]} | {error, map()}. key(Key) -> case is_parameter(Key) of true -> {ok, Key}; false -> key2(Key) end. key2(Atom) when is_atom(Atom) -> key2(atom_to_binary(Atom)); key2(List) when is_list(List) -> try key2(list_to_binary(List)) catch _:_ -> {error, #{ reason => invalid_data }} end; key2(Binary) when is_binary(Binary) -> key_parse(Binary, <<>>, [], 1); key2(_Elsewise) -> {error, #{ reason => invalid_data }}. %%-------------------------------------------------------------------- %% @doc %% @end %%-------------------------------------------------------------------- is_parameter([]) -> false; is_parameter(List) when is_list(List) -> try _ = list_to_binary(List), false catch _:_ -> is_parameter_list(List) end; is_parameter(_) -> false. %%-------------------------------------------------------------------- %% %%-------------------------------------------------------------------- is_parameter_list([]) -> true; is_parameter_list([<<>>|_]) -> false; is_parameter_list([Item|Rest]) when is_atom(Item) -> is_parameter_list(Rest); is_parameter_list([Item|Rest]) when is_integer(Item) -> is_parameter_list(Rest); is_parameter_list([Item|Rest]) when is_binary(Item) -> is_parameter_list(Rest); is_parameter_list(_) -> false. %%-------------------------------------------------------------------- %% @hidden %% @doc parse the key and convert it to parameter format. %% @end %%-------------------------------------------------------------------- key_parse(<<>>, <<>>, [], 1) -> {error, #{ position => 1, reason => empty_key } }; key_parse(<>, _Buffer, _Key, Pos) -> {error, #{ position => Pos, reason => separator_ending } }; key_parse(<>, _Buffer, _Key, Pos) -> {error, #{ position => Pos, reason => multi_separators } }; key_parse(<>, Buffer, [], 1) -> key_parse(Rest, Buffer, [], 2); key_parse(<<>>, <<>>, Key, _Pos) -> key_convert(Key, []); key_parse(<<>>, Buffer, Key, Pos) -> key_parse(<<>>, <<>>, [Buffer|Key], Pos); key_parse(<>, <<>>, Key, Pos) -> key_parse_string(Rest, <<>>, Key, Pos+2); key_parse(<>, Buffer, Key, Pos) -> key_parse_string(Rest, <<>>, [Buffer|Key], Pos+2); key_parse(<>, Buffer, Key, Pos) -> key_parse(Rest, <<>>, [Buffer|Key], Pos+1); key_parse(<>, Buffer, Key, Pos) when C >= $0, C =< $9; C >= $A, C =< $Z; C >= $a, C =< $z; C =:= $_ -> key_parse(Rest, <>, Key, Pos+1); key_parse(<>, Buffer, Key, Pos) -> {error, #{ char => <>, rest => Rest, buffer => Buffer, position => Pos, key => Key, reason => bad_char } }. %%-------------------------------------------------------------------- %% @hidden %% @doc parse a string enclosed by `[' and `]'. %% @end %%-------------------------------------------------------------------- key_parse_string(<<"]">>, Buffer, Key, Pos) -> key_parse(<<>>, <<>>, [{string, Buffer}|Key], Pos+1); key_parse_string(<<"]", ?SEPARATOR, Rest/binary>>, Buffer, Key, Pos) -> key_parse(Rest, <<>>, [{string, Buffer}|Key], Pos+2); key_parse_string(<>, Buffer, Key, Pos) when C >= $!, C =< $/; C >= $0, C =< $9; C >= $?, C =< $Z; C >= $a, C =< $z; C =:= $:; C =:= $=; C =:= $_; C =:= $~ -> key_parse_string(Rest, <>, Key, Pos+1); key_parse_string(Binary, Buffer, Key, Pos) -> {error, #{ rest => Binary, buffer => Buffer, position => Pos, key => Key, reason => bad_string } }. %%-------------------------------------------------------------------- %% @hidden %% @doc convert a key to its final parameter form. %% @end %%-------------------------------------------------------------------- key_convert([], Buffer) -> {ok, Buffer}; key_convert([{string, Value}|Rest], Buffer) -> key_convert(Rest, [Value|Buffer]); key_convert([Item|Rest], Buffer) -> try Integer = binary_to_integer(Item), key_convert(Rest, [Integer|Buffer]) catch _:_ -> key_convert_to_atom(Item, Rest, Buffer) end; key_convert(Rest, Buffer) -> {error, #{ rest => Rest, buffer => Buffer, reason => bad_key } }. key_convert_to_atom(Item, Rest, Buffer) -> try Atom = binary_to_existing_atom(Item), key_convert(Rest, [Atom|Buffer]) catch _:_ -> {error, #{ reason => invalid_key, key => Item } } end. key_test() -> ?assertEqual( {ok, [global, debug]}, key(<< ?SEPARATOR, "global", ?SEPARATOR, "debug" >>) ), ?assertEqual( {ok, [global, debug]}, key( [?SEPARATOR] ++ "global" ++ [?SEPARATOR] ++ "debug" ) ), ?assertEqual( {ok, [storage,3,unpacked,state]}, key(<< ?SEPARATOR, "storage", ?SEPARATOR, "3", ?SEPARATOR, "unpacked", ?SEPARATOR, "state" >>) ), ?assertEqual( {ok, [peers,<<"127.0.0.1:1984">>,trusted]}, key(<< ?SEPARATOR, "peers", ?SEPARATOR, "[127.0.0.1:1984]", ?SEPARATOR, "trusted" >>) ), ?assertEqual( {error, #{ reason => bad_char, buffer => <<>>, char => <<"~">>, rest => <<>>, key => [], position => 1 } }, key(<<"~">>) ), ?assertEqual( {error, #{ reason => empty_key, position => 1 } }, key("") ), ?assertEqual( {error, #{ reason => invalid_key, key => <<"totally_random_key">> } }, key("totally_random_key") ), ?assertEqual( {error, #{ reason => multi_separators, position => 1 } }, key(<< ?SEPARATOR, ?SEPARATOR, "test" >>) ), ?assertEqual( {error, #{ reason => multi_separators, position => 6 } }, key(<< ?SEPARATOR, "test", ?SEPARATOR, ?SEPARATOR, "test" >>) ), ?assertEqual( {error, #{ reason => separator_ending, position => 5 } }, key(<< "test", ?SEPARATOR >>) ). ================================================ FILE: apps/arweave_config/src/arweave_config_serializer.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @copyright 2026 (c) Arweave %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @doc Arweave Configuration File Serializer. %%% %%% This module is in charge of serializing a map and convert it to a %%% specification compatible format. %%% %%% @end %%%=================================================================== -module(arweave_config_serializer). -compile(warnings_as_errors). -export([encode/1, encode/2, decode/1, decode/2]). -export([map_merge/1]). -export([encode_enter/1, encode_iterate/1, encode_merge/1]). -export([decode_enter/1, decode_iterate/1, decode_merge/1]). -include_lib("kernel/include/logger.hrl"). %%-------------------------------------------------------------------- %% @doc convert a map to arweave_config specification format. %% @see encode/2 %% @end %%-------------------------------------------------------------------- -spec encode(Map) -> Return when Map :: map(), Return :: map(). encode(Map) -> encode(Map, #{}). %%-------------------------------------------------------------------- %% @doc convert a map to arweave_config specification format. %% @end %%-------------------------------------------------------------------- encode(Map, Opts) -> encode(Map, Opts, []). %%-------------------------------------------------------------------- %% @doc convert a map to arweave_config specification format. %% @end %%-------------------------------------------------------------------- -spec encode(Map, Opts, Level) -> Return when Map :: map(), Opts :: map(), Level :: list(), Return :: map(). encode(Map, Opts, Level) when is_map(Map), is_list(Level) -> Iterator = maps:iterator(Map), State = #{ opts => Opts, map => Map, iterator => Iterator, level => Level }, arweave_config_fsm:init(?MODULE, encode_enter, State); encode(_, _, _) -> {error, badarg}. %%-------------------------------------------------------------------- %% @hidden %% @private %% @doc initialize the buffer and start the iteration. %% @end %%-------------------------------------------------------------------- encode_enter(State = #{ iterator := Iterator }) -> NewState = State#{ iterator => maps:next(Iterator), buffer => #{} }, {next, encode_iterate, NewState}. %%-------------------------------------------------------------------- %% @hidden %% @private %% @doc iterate over the iterator and route the data when needed. %% @end %%-------------------------------------------------------------------- encode_iterate(#{ iterator := none, buffer := Buffer }) -> {ok, Buffer}; encode_iterate(State = #{ iterator := {K, V, Iterator} }) when is_map(V) -> Opts = maps:get(opts, State), Level = maps:get(level, State), Buffer = maps:get(buffer, State), Key = encode_convert_key(K), case encode(V, Opts, [Key|Level]) of {ok, MapBuffer} -> NewState = State#{ iterator => Iterator, buffer => maps:merge(Buffer, MapBuffer) }, {next, encode_iterate, NewState}; Else -> {error, Else} end; encode_iterate(State = #{ iterator := {_K, _V, _Iterator} }) -> {next, encode_merge, State}. %%-------------------------------------------------------------------- %% @hidden %% @private %% @doc merge key/values into one unique map. %% @end %%-------------------------------------------------------------------- encode_merge(State = #{ iterator := {K, V, Iterator} }) -> Buffer = maps:get(buffer, State), Level = maps:get(level, State), Key = encode_convert_key(K), ReversedLevel = lists:reverse([Key|Level]), NewState = State#{ iterator => maps:next(Iterator), buffer => Buffer#{ ReversedLevel => V } }, {next, encode_iterate, NewState}. %%-------------------------------------------------------------------- %% @hidden %% @private %% @doc convert the key as existing atoms. %% @end %%-------------------------------------------------------------------- encode_convert_key(List) when is_list(List) -> try list_to_existing_atom(List) catch _:_ -> List end; encode_convert_key(Binary) when is_binary(Binary) -> try binary_to_existing_atom(Binary) catch _:_ -> Binary end; encode_convert_key(Else) -> Else. %%-------------------------------------------------------------------- %% @doc %% @see decode/2 %% @end %%-------------------------------------------------------------------- -spec decode(Map) -> Return when Map :: #{ [term()] => term() }, Return :: #{ term() => term() }. decode(Map) -> decode(Map, #{}). %%-------------------------------------------------------------------- %% @doc decode arweave config serialized map. A similar implementation %% was present in `arweave_config_store', this one is using %% `arweave_config_fsm'. %% @end %%-------------------------------------------------------------------- -spec decode(Map, Opts) -> Return when Map :: #{ [term()] => term() }, Opts :: map(), Return :: #{ term() => term() }. decode(Map, Opts) -> Iterator = maps:iterator(Map), Level = [], State = #{ opts => Opts, map => Map, iterator => Iterator, level => Level }, arweave_config_fsm:init(?MODULE, decode_enter, State). %%-------------------------------------------------------------------- %% @hidden %% @private %% @doc initialize the decoder and the iterator. %% @end %%-------------------------------------------------------------------- decode_enter(State = #{ iterator := Iterator }) -> NewState = State#{ iterator => maps:next(Iterator), buffer => [] }, {next, decode_iterate, NewState}. %%-------------------------------------------------------------------- %% @hidden %% @private %% @doc loop over the items present in the map. %% @end %%-------------------------------------------------------------------- decode_iterate(State = #{ iterator := none }) -> {next, decode_merge, State}; decode_iterate(State = #{ iterator := {K, V, Iterator} }) when is_list(K) -> Buffer = maps:get(buffer, State), [K0|KS] = lists:reverse(K), Fold = lists:foldl(fun decode_fold/2, #{ K0 => V }, KS), NewState = State#{ iterator => maps:next(Iterator), buffer => [Fold|Buffer] }, {next, decode_iterate, NewState}. % lambda using in decode_iterate. decode_fold(Item, Acc) -> #{ Item => Acc }. %%-------------------------------------------------------------------- %% @hidden %% @private %% @doc merge the final values. %% @end %%-------------------------------------------------------------------- decode_merge(#{ buffer := Buffer }) -> Result = map_merge(Buffer), {ok, Result}. %%-------------------------------------------------------------------- %% @hidden %% @private %% @doc merge map recursively using arweave config rules. %% @end %% @todo this function should use arweave_config_fsm %%-------------------------------------------------------------------- map_merge(ListOfMap) -> lists:foldr( fun(X, A) -> map_merge(X, A) end, #{}, ListOfMap ). map_merge(A, B) when is_map(A), is_map(B) -> I = maps:iterator(A), map_merge2(I, B); map_merge(A, B) when is_map(A) -> A#{ '_' => B }. map_merge2(none, B) -> B; map_merge2({K, V, I2}, B) when is_map(V), is_map_key(K, B) -> BV = maps:get(K, B, #{}), Result = map_merge(V, BV), map_merge2(I2, B#{ K => Result }); map_merge2({K, V, I2}, B) when is_map_key(K, B) -> BV = maps:get(K, B), case V =:= BV of true -> map_merge2(I2, B#{ K => V }); false -> map_merge3(I2, B, K, BV, V) end; map_merge2({K, V, I2}, B) -> map_merge2(I2, B#{ K => V }); map_merge2(I = [0|_], B) -> I2 = maps:next(I), map_merge2(I2, B). map_merge3(I2, B, K, BV, V) when is_map(BV) -> case is_map_key('_', BV) of true -> OV = maps:get('_', BV), ?LOG_WARNING("value ~p will be overwritten.", [OV]), map_merge2(I2, B#{ K => BV#{ '_' => V }}); false -> map_merge2(I2, B#{ K => BV#{ '_' => V }}) end; map_merge3(I2, B, K, BV, V) -> ?LOG_WARNING("value ~p will be ignored.", [BV]), map_merge2(I2, B#{ K => V }). ================================================ FILE: apps/arweave_config/src/arweave_config_signal_handler.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @copyright 2025 (c) Arweave %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @doc Arweave Configuration Unix Signal Handler. %%% %%% inspired by: https://github.com/rabbitmq/rabbitmq-server/pull/2227/files %%% see also: https://www.erlang.org/docs/20/man/os#set_signal-2 %%% see also: https://www.erlang.org/docs/20/man/kernel_app#id63489 %%% %%% @end %%%=================================================================== -module(arweave_config_signal_handler). -compile(warnings_as_errors). -behavior(gen_event). -export([ start_link/0, signals/0, signal/2, sigusr1/0, sigusr2/0 ]). -export([ init/1, terminate/2, handle_call/2, handle_event/2, handle_info/2 ]). -include_lib("kernel/include/logger.hrl"). -type signal() :: sighup | sigquit | sigterm | sigusr1 | sigusr2. -type state() :: #{ signal() => pos_integer() }. %%-------------------------------------------------------------------- %% @doc Starts unix signal handler. %% @end %%-------------------------------------------------------------------- start_link() -> _ = gen_event:delete_handler(erl_signal_server, ?MODULE, []), ok = gen_event:swap_sup_handler( erl_signal_server , {erl_signal_handler, []}, {?MODULE, []} ), set_signals(), gen_event:start_link({local, ?MODULE}). %%-------------------------------------------------------------------- %% @doc Returns supported Unix signals. %% @end %%-------------------------------------------------------------------- -spec signals() -> [signal()]. signals() -> [ sighup, sigquit, sigterm, sigusr1, sigusr2 ]. %%-------------------------------------------------------------------- %% @doc Executes signal side effect. %% @end %%-------------------------------------------------------------------- -spec signal(Signal, State) -> Return when Signal :: signal(), State :: state(), Return :: {ok, state()}. signal(E = sighup, State) -> % 1. reload environment variable % 2. reload arguments % 3. reload the configuration arweave_config_environment:reset(), arweave_config_environment:load(), arweave_config_legacy:reset(), update_state(E, State); signal(E = sigquit, State) -> % stop arweave and generate a core dump erlang:halt("sigquit received", [{flush, true}]), update_state(E, State); signal(E = sigterm, State) -> % stop arweave, call init:stop/0. init:stop(0), update_state(E, State); signal(E = sigusr1, State) -> % Custom signal mostly used to print arweave state % diagnostic, and debugging information in the logs. % In case of odd behavior, this signal can be helpful % to extract internal information. sigusr1(), update_state(E, State); signal(E = sigusr2, State) -> % custom signal mostly used to recover arweave % 1. check if arweave is still connected to epmd % 2. reconnect to epmd if disconnected sigusr2(), update_state(E, State); signal(E, State) -> % if the signal is not supported, we use the default % behavior from erl_signal_handler. ?LOG_INFO("received signal ~p", [E]), erl_signal_handler:handle_event(E, State), update_state(E, State). %%-------------------------------------------------------------------- %% @doc execute sigusr1. prints diagnostic. %% @end %%-------------------------------------------------------------------- sigusr1() -> spawn(fun () -> arweave_diagnostic:all() end). %%-------------------------------------------------------------------- %% @doc %% @end %%-------------------------------------------------------------------- sigusr2() -> ok. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- -spec init(any()) -> {ok, state()}. init(_) -> erlang:process_flag(trap_exit, true), {ok, #{}}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- -spec handle_event(Signal, State) -> Return when Signal :: signal(), State :: state(), Return :: {ok, state()}. handle_event(Signal, State) when is_atom(Signal) -> ?LOG_INFO("received signal ~p", [Signal]), try signal(Signal, State) catch _:_ -> {ok, State} end; handle_event(Event, State) -> ?LOG_DEBUG("received unexpected event: ~p", [Event]), {ok, State}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- -spec handle_info(Event, State) -> Return when Event :: any(), State :: state(), Return :: {ok, state()}. handle_info(Event, State) -> ?LOG_DEBUG("received unexpected event: ~p", [Event]), {ok, State}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- -spec handle_call(Event, State) -> Return when Event :: any(), State :: state(), Return :: {ok, ok, state()}. handle_call(Event, State) -> ?LOG_DEBUG("received unexpected event: ~p", [Event]), {ok, ok, State}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- -spec terminate(Reason, State) -> Return when Reason :: any(), State :: state(), Return :: ok. terminate(_Reason, _State) -> ?LOG_INFO("unix signal handler stopped"), ok. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- set_signals() -> [ begin ?LOG_DEBUG("catch signal ~p", [S]), os:set_signal(S, handle) end || S <- signals() ]. %%-------------------------------------------------------------------- %% @hidden %% @doc counts the amount of signals received/used. Mostly used for %% debugging purpose. %% @end %%-------------------------------------------------------------------- -spec update_state(Signal, State) -> Return when Signal :: signal(), State :: state(), Return :: {ok, state()}. update_state(Signal, State) -> Value = maps:get(Signal, State, 0), {ok, State#{ Signal => Value + 1 }}. ================================================ FILE: apps/arweave_config/src/arweave_config_spec.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @copyright 2025 (c) Arweave %%% @doc Arweave configuration specification behavior. %%% %%% When used as module, `arweave_config_spec' defines a behavior to %%% deal with arweave parameters. %%% %%% When used as process, `arweave_config_spec' is in charge of %%% loading and managing arweave configuration specification, stored %%% in a map. %%% %%% == `arweave_config_spec' process == %%% %%% The `arweave_config_spec' process is a frontend for %%% `arweave_config_store'. The idea is to pass the parameter from %%% another module/process, check it first based on the specification, %%% and then forward the valid result to store it. here an example %%% anwith the environment %%% %%% ``` %%% _____________ %%% | | %%% | system | %%% | environment | %%% |_____________| %%% | %%% | %%% _\_/________________________ %%% | | %%% | arweave_config_environment |<--+ %%% |____________________________| | %%% | / \ | %%% | | [specification | %%% | | errors] | %%% _\ /______|__________ | %%% | | | %%% | arweave_config_spec | | %%% |_____________________| | %%% | | %%% | [specification success] | %%% | | %%% _\_/__________________ | %%% | | | %%% | arweave_config_store |---------+ [valid result] %%% |______________________| %%% %%% ''' %%% %%% == TODO === %%% %%% === Check Parameter Function === %%% %%% A function is required to check manually/on demande a value %%% without setting it. It will be needed for testing. %%% %%% === Variable Parameter Item Specification === %%% %%% Parameter item can be a variable, defined by a `type'. This is %%% helpful when setting different kind of values like storage modules %%% or peers. %%% %%% ``` %%% [peers, {peer}, enabled] %%% ''' %%% %%% === Check for duplicated values === %%% %%% A warning (or an error) should be returned when there is a %%% duplicated specification. Here a quick list of error/warning: %%% %%% - errors (stop execution): %%% - duplicated parameter_key %%% - warnings (last one overwrite the first one): %%% - duplicated environments %%% - duplicated short arguments %%% - duplicated long arguments %%% - duplicated legacy %%% %%% === Improve callback functions === %%% %%% Add support for pre/post actions. %%% %%% Add support for MFA %%% %%% Add support for embedded lambdas %%% %%% @end %%%=================================================================== -module(arweave_config_spec). -behavior(gen_server). -compile(warnings_as_errors). -export([ start_link/0, start_link/1, stop/0, spec/0, spec/1, spec_to_argparse/0, get_default/1, get_legacy/0, get_legacy/1, get_environments/0, get_environment/1, get_short_arguments/0, get_short_argument/1, get_long_arguments/0, get_long_argument/1, get/1, set/2 ]). -export([init/1, terminate/2]). -export([handle_call/3, handle_cast/2, handle_info/2]). -export([is_function_exported/3]). -compile({no_auto_import,[get/1]}). -include_lib("kernel/include/logger.hrl"). % A raw key from external sources (cli, api...), non-sanitized, % non-parsed. % -type key() :: term(). % An arweave parameter, parsed and valid, containing only known % terms and specified. -type parameter() :: [atom() | {atom()} | binary()]. % A value associated with a key/parameter, usually any kind of term. -type value() :: term(). %--------------------------------------------------------------------- % REQUIRED: defines the configuration key used to identify arweave % parameter, usually stored in a data store like ETS. %--------------------------------------------------------------------- -callback parameter_key() -> Return when Return :: parameter(). %--------------------------------------------------------------------- % OPTIONAL: defines how to retrieve the value using the parameter key. %--------------------------------------------------------------------- -callback handle_get(ParameterKey, State) -> Return when ParameterKey :: parameter(), State :: map(), Return :: {ok, value()} | {ok, Action} | {ok, MFA} | {error, map()}, CallbackReturn :: {ok, term()} | {error, term()}, Action :: fun ((ParameterKey, State) -> CallbackReturn), MFA :: {atom(), atom(), list()}. %--------------------------------------------------------------------- % OPTIONAL: defines how to set the value Value with the parameter key. % It should be transaction. This callback must be improved, instead % of returning directly a value, it should also be possible to return % a list of MFA or lambda functions executed in order like % transactions. % % `ignore' will keep the old value in place. % % `{ok, term()}' will simply returns the term, the side effect is % protected during the execution, and the module can do whatever (even % setting the value anywhere). % % `{store, term()}' will automatically store the returned value into % `arweave_config_store' using `arweave_config_store:set/2' function. % % `{error, map()}' will return an error and the reason. % % == TODO == % % `{ok, action() | actions()}' will execute a list of action in order % first from last. Those are function with side effects, and their % return is not controlled. Those action should be defined as `fun/3': % % ``` % Action = fun (Parameter, NewValue, OldValue) -> % % do some action... % end. % ''' % % `{ok, mfa() | mfas()}' same than the previous action definition. % % ``` % -module(my_module). % export([mfa/3]). % mfa(Parameter, NewValue, OldValue) -> % ok. % ''' % %--------------------------------------------------------------------- -callback handle_set(Parameter, NewValue, State, Args) -> Return when Parameter:: parameter(), NewValue :: value(), State :: map(), Args :: term(), Return :: ignore | {ok, term()} | {store, term()} | {error, map()}. %--------------------------------------------------------------------- % OPTIONAL: defines if the parameter can be set during runtime. if % true, the parameter can be set when arweave is running, else, the % parameter can only be set during startup % DEFAULT: false %--------------------------------------------------------------------- -callback runtime() -> Return when Return :: boolean(). %--------------------------------------------------------------------- % OPTIONAL: short argument used to configure the parameter, usually a % single ASCII letter present in range 0-9, a-z and A-Z. % DEFAULT: undefined %--------------------------------------------------------------------- -callback short_argument() -> Return when Return :: undefined | [pos_integer()]. %--------------------------------------------------------------------- % OPTIONAL: a long argument, used to configure the parameter, usually % lower cases words separated by dashes. % DEFAULT: converted parameter key (e.g. --global.debug) %--------------------------------------------------------------------- -callback long_argument() -> Return when Return :: undefined | iolist(). %--------------------------------------------------------------------- % OPTIONAL: the type of the value. % DEFAULT: undefined %--------------------------------------------------------------------- -callback type() -> Return when Return :: undefined | atom() | [atom()]. %--------------------------------------------------------------------- % OPTIONAL: a function returning a string representing an environment % variable. % DEFAULT: false %--------------------------------------------------------------------- -callback environment() -> Return when Return :: undefined | string(). %--------------------------------------------------------------------- % OPTIONAL: a list of legacy references used to previously fetch the % value. % DEFAULT: undefined %--------------------------------------------------------------------- -callback legacy() -> Return when Return :: undefined | atom() | iolist() | [atom() | iolist()]. %--------------------------------------------------------------------- % OPTIONAL: a short description of the parameter. % DEFAULT: undefined %--------------------------------------------------------------------- -callback short_description() -> Return when Return :: undefined | iolist(). %--------------------------------------------------------------------- % OPTIONAL: a long description of the parameter. % DEFAULT: undefined %--------------------------------------------------------------------- -callback long_description() -> Return when Return :: undefined | iolist(). %--------------------------------------------------------------------- % OPTIONAL: defines if a parameter is deprecated, can eventually % returns a message. % DEFAULT: false %--------------------------------------------------------------------- -callback deprecated() -> Return when Return :: true | {true, term()} | false. %-------------------------------------------------------------------- % OPTIONAL: defines the number of arguments to take % DEFAULT: 1 % see: argparse %-------------------------------------------------------------------- -callback nargs() -> Return when Return :: pos_integer() | list | nonempty_list | 'maybe' | {'maybe', term()} | all. %-------------------------------------------------------------------- % OPTIONAL: defines if the parameter is enabled or not. % DEFAULT: true %-------------------------------------------------------------------- -callback enabled() -> Return when Return :: boolean() | {false, term()}. %--------------------------------------------------------------------- % @TODO inherit callback % OPTIONAL: defines an inherited parameter. % DEFAULT: undefined %--------------------------------------------------------------------- -callback inherit() -> Return when Return :: Parameter | {Parameter, Fields}, Parameter :: [atom()], Fields :: [atom()]. %--------------------------------------------------------------------- % @TODO: protected callback % OPTIONAL: defines if the value should be public or protected (not % displayed or even encrypted, useful for password) % DEFAULT: false %--------------------------------------------------------------------- % -callback protected() -> Return when % Return :: boolean(). %--------------------------------------------------------------------- % @TODO: dependencies callback % OPTIONAL: defines a list of required parameters to be set % DEFAULT: undefined %--------------------------------------------------------------------- % -callback dependencies() -> Return when % Return :: undefined | [atom()|iolist()|tuple()]. %--------------------------------------------------------------------- % @TODO: conflicts callback % OPTIONAL: defines a list of conflicting parameters % DEFAULT: undefined %--------------------------------------------------------------------- % -callback conflicts() -> RETURN when % Return :: undefined | [atom()|iolist()|tuple()]. %--------------------------------------------------------------------- % @TODO: formatter callback % OPTIONAL: defines a function callback to format short or long % help message. % DEFAULT: undefined %--------------------------------------------------------------------- % -callback formatter(Type, Value) when % Type :: short | long, % Value :: iolist(), % Return :: undefined | {ok, FormattedValue}, % FormattedValue :: iolist(). %--------------------------------------------------------------------- % @TODO: positional arguments callback % OPTIONAL: defines if the argument is positional, those are found % after a special separator, usually `--'. %--------------------------------------------------------------------- % -callback positional() -> Return when % Return :: boolean(). %--------------------------------------------------------------------- % @TODO: before/after arguments callback % OPTIONAL: defines if another argument should be set before or after % the current one %--------------------------------------------------------------------- % -callback before() -> Return when % Return :: undefined | [atom()]. % -callback after() -> Return when % Return :: undefined | [atom()]. %--------------------------------------------------------------------- % @TODO: dryrun argument % OPTIONAL: instead of executing the set callback, it simply returns % the action and modification would have been applied. %--------------------------------------------------------------------- % -callback dryrun() -> Return when % Return :: term(). %--------------------------------------------------------------------- % @TODO fail callback % OPTIONAL: defines if a wrong value should stop the execution, with a % specific error set. %--------------------------------------------------------------------- % -spec fail() -> Return when % Return :: boolean() % | {true, term()}. -optional_callbacks([ handle_get/2, handle_set/4, runtime/0, short_argument/0, long_argument/0, type/0, environment/0, legacy/0, short_description/0, long_description/0, deprecated/0, nargs/0, enabled/0, inherit/0 ]). %%-------------------------------------------------------------------- %% @doc Start `arweave_config_spec' process. %% %% @end %%-------------------------------------------------------------------- -spec start_link() -> Return when Return :: {ok, pid()}. start_link() -> start_link([]). %%-------------------------------------------------------------------- %% @doc Start `arweave_config_spec' process. %% @end %%-------------------------------------------------------------------- -spec start_link(Specs) -> Return when Specs :: [map() | atom()], Return :: {ok, pid()}. start_link(Specs) -> gen_server:start_link({local, ?MODULE}, ?MODULE, Specs, []). %%-------------------------------------------------------------------- %% @doc Stop `arweave_config_spec' process. %% @end %%-------------------------------------------------------------------- stop() -> gen_server:stop(?MODULE). %%-------------------------------------------------------------------- %% @doc returns the full list of specifications. %% @end %%-------------------------------------------------------------------- spec() -> Pattern = {'$1', '$2'}, Guard = [], Select = [{{'$1', '$2'}}], Result = ets:select(?MODULE, [{Pattern, Guard, Select}]), maps:from_list(Result). %%-------------------------------------------------------------------- %% @doc returns parameter's specification from specification store. %% @end %%-------------------------------------------------------------------- spec(ParameterSpec) -> Pattern = {'$1', '$2'}, Guard = [{'=:=', '$1', ParameterSpec}], Select = [{{'$1', '$2'}}], case ets:select(?MODULE, [{Pattern, Guard, Select}]) of [{Parameter, Spec}] -> {ok, Parameter, Spec}; _Elsewise -> {error, not_found} end. %%-------------------------------------------------------------------- %% @doc returns default parameter value if defined. %% @end %%-------------------------------------------------------------------- get_default(Parameter) -> Pattern = {'$1', #{ default => '$2'}}, Guard = [{'=:=', '$1', Parameter}], Select = ['$2'], case ets:select(?MODULE, [{Pattern, Guard, Select}]) of [Default] -> {ok, Default}; _Elsewise -> {error, not_found} end. %%-------------------------------------------------------------------- %% @doc returns the list of environment variable supported. %% @end %%-------------------------------------------------------------------- get_environments() -> Pattern = {'$1', #{ environment => '$2'}}, Guard = [], Select = [{{'$2', '$1'}}], ets:select(?MODULE, [{Pattern, Guard, Select}]). %%-------------------------------------------------------------------- %% @doc Returns the specification for an environment variable. %% @end %%-------------------------------------------------------------------- get_environment(EnvironmentKey) -> Pattern = {'$1', #{ environment => '$2'}}, Guard = [{'=:=', '$2', EnvironmentKey}], Select = ['$1'], case ets:select(?MODULE, [{Pattern, Guard, Select}]) of [Parameter] -> [{Parameter, Value}] = ets:lookup(?MODULE, Parameter), {ok, Parameter, Value}; _Elsewise -> {error, not_found} end. %%-------------------------------------------------------------------- %% @doc Returns the list of short arguments supported with the number %% of elements required. %% @end %%-------------------------------------------------------------------- get_short_arguments() -> Pattern = {'$1', #{ short_argument => '$2' }}, Guard = [], Select = [{{'$2', '$_'}}], % match spec does not support correctly map, so, a filter % is required to cleanup things. [ {Argument, Spec} || {Argument, {_, Spec}} <- ets:select(?MODULE, [{Pattern, Guard, Select}]) ]. %%-------------------------------------------------------------------- %% @doc returns specification for a short argument. %% @end %%-------------------------------------------------------------------- get_short_argument(ArgumentKey) -> Pattern = {'$1', #{ short_argument => '$2' }}, Guard = [{'=:=', '$2', ArgumentKey}], Select = [{{'$2', '$_'}}], % match spec does not support correctly map, so, a filter % is required to cleanup things. [ {Argument, Spec} || {Argument, {_, Spec}} <- ets:select(?MODULE, [{Pattern, Guard, Select}]) ]. %%-------------------------------------------------------------------- %% @doc Returns the list of long arguments supported with the number %% of elements required. %% @end %%-------------------------------------------------------------------- get_long_arguments() -> Pattern = {'$1', #{ long_argument => '$2' }}, Guard = [], Select = [{{'$2', '$_'}}], % match spec does not support correctly map, so, a filter % is required to cleanup things. [ {Argument, Spec} || {Argument, {_, Spec}} <- ets:select(?MODULE, [{Pattern, Guard, Select}]) ]. %%-------------------------------------------------------------------- %% @doc Returns specification from a long argument. %% @end %%-------------------------------------------------------------------- get_long_argument(ArgumentKey) -> Pattern = {'$1', #{ long_argument => '$2' }}, Guard = [{'=:=', '$2', ArgumentKey}], Select = [{{'$2', '$_'}}], % match spec does not support correctly map, so, a filter % is required to cleanup things. [ {Argument, Spec} || {Argument, {_, Spec}} <- ets:select(?MODULE, [{Pattern, Guard, Select}]) ]. %%-------------------------------------------------------------------- %% @doc Returns legacy keys (used for legacy configuration %% compatibility). %% @end %%-------------------------------------------------------------------- get_legacy() -> Pattern = {'$1', #{ legacy => '$2' }}, Guard = [{'=/=', '$2', undefined}], Select = [{{'$2', '$1'}}], Query = [{Pattern, Guard, Select}], maps:from_list(ets:select(?MODULE, Query)). %%-------------------------------------------------------------------- %% @doc Returns the parameter keys (if it exists) using a legacy %% parameter from `#config{}' record. %% @end %%-------------------------------------------------------------------- get_legacy(Key) -> Pattern = {'$1', #{ legacy => Key }}, Guard = [{'=/=', Key, undefined}], Select = ['$1'], Query = [{Pattern, Guard, Select}], case ets:select(?MODULE, Query) of [V] -> {ok, V}; _ -> {error, undefined} end. %%-------------------------------------------------------------------- %% @doc get a value using a parameter. %% @end %%-------------------------------------------------------------------- get(Parameter) -> gen_server:call(?MODULE, {get, Parameter}, 10_000). %%-------------------------------------------------------------------- %% @doc set a parameters. The process will be in charge to check both %% keys and values then if everything is good, it will execute a side %% effect (to modify the application state) and finally store/update %% the value in `arweave_config_store'. %% %% == Examples == %% %% ``` %% {ok, NewValue = true, OldValue = false} = %% set([global, debug], <<"true">>). %% ''' %% %% @end %%-------------------------------------------------------------------- -spec set(Parameter, Value) -> Return when Parameter :: [atom() | iolist()], Value :: term(), Return :: {ok, term()} | {error, term()}. set(Parameter, Value) -> gen_server:call(?MODULE, {set, Parameter, Value}, 10_000). %%-------------------------------------------------------------------- %% @hidden %% @doc Returns a list of module callbacks to check specifications. %% This function has been created to avoid having to deal with a very %% long and complex file. Each module callbacks only export an init/2 %% function to initialize the final state corresponding to a spec %% callback. %% @end %%-------------------------------------------------------------------- callbacks_check() -> [ % mandatory callbacks {parameter_key, arweave_config_spec_parameter_key}, % optional callbacks {enabled, arweave_config_spec_enabled}, {default, arweave_config_spec_default}, {handle_get, arweave_config_spec_handle_get}, {handle_set, arweave_config_spec_handle_set}, {type, arweave_config_spec_type}, {runtime, arweave_config_spec_runtime}, {deprecated, arweave_config_spec_deprecated}, {environment, arweave_config_spec_environment}, {short_argument, arweave_config_spec_short_argument}, {long_argument, arweave_config_spec_long_argument}, {legacy, arweave_config_spec_legacy}, {short_description, arweave_config_spec_short_description}, {long_description, arweave_config_spec_long_description}, {nargs, arweave_config_spec_nargs}, {inherit, arweave_config_spec_inherit} ]. %%-------------------------------------------------------------------- %% @hidden %% @TODO check if the specs are correct (list of maps). %%-------------------------------------------------------------------- -spec init(Specs) -> Return when Specs :: [atom() | map()], Return :: {ok, NamedEts}, NamedEts :: ?MODULE. init([]) -> Specs = arweave_config_parameters:init(), init_process(Specs); init(Specs) when is_list(Specs) -> init_process(Specs). %%-------------------------------------------------------------------- %% @hidden %% init process. this one should not crash, then we catch all %% exceptions. %%-------------------------------------------------------------------- init_process(Specs) -> erlang:process_flag(trap_exit, true), init_ets(Specs). %%-------------------------------------------------------------------- %% @hidden %% create the ETS table, this one should be only reachable by %% the current process to avoid doing nasty things with the %% specification during runtime. %%-------------------------------------------------------------------- init_ets(Specs) -> ets:new(?MODULE, [ named_table, protected ]), init_parameters(Specs). %%-------------------------------------------------------------------- %% @hidden %% parse and load all specifications from each modules. %%-------------------------------------------------------------------- init_parameters(Specs) -> case init_loop(Specs, #{}) of {ok, MapSpec} -> init_inherit(MapSpec); Else -> {error, Else} end. %%-------------------------------------------------------------------- %% @hidden %% loops over parameters and if `inherit' spec is defined, update the %% parameters accordingly. At the end of this function, the specs %% should be ready. %%-------------------------------------------------------------------- init_inherit(MapSpec) -> InheritedMap = inherit_loop(MapSpec), init_state(InheritedMap). %%-------------------------------------------------------------------- %% @hidden %% loop over the specifications and set inherited values if present. %%-------------------------------------------------------------------- inherit_loop(Specs) -> inherit_loop(maps:iterator(Specs), Specs, #{}). %%-------------------------------------------------------------------- %% @hidden %% this loop restrist the inherited values to be a tuple containing %% the inherited parameter and the list of fields. %%-------------------------------------------------------------------- inherit_loop(none, _Specs, Buffer) -> Buffer; inherit_loop(Iterator, Specs, Buffer) -> case maps:next(Iterator) of {K, Parameter = #{inherit := {Parent, Fields}}, Next} -> NewParameter = inherit(Parameter, Fields, Parent, Specs), NewBuffer = Buffer#{ K => NewParameter }, inherit_loop(Next, Specs, NewBuffer); {K, Parameter, Next} -> NewBuffer = Buffer#{ K => Parameter }, inherit_loop(Next, Specs, NewBuffer) end. %%-------------------------------------------------------------------- %% @hidden %% loop over inherited fields and search them in parent parameter. if %% the original parameter does not have a field set, use the one from %% the parent. %%-------------------------------------------------------------------- -spec inherit(Parameter, InheritedFields, ParentKey, Specs) -> Return when Parameter :: map(), InheritedFields :: [atom()], ParentKey :: list(), Specs :: map(), Return :: map(). inherit(Parameter, _, ParentKey, Specs) when is_map_key(ParentKey, Specs) =:= false -> ?LOG_WARNING("undefined inherited parent: ~p", [Parameter]), Parameter; inherit(Parameter, Fields, ParentKey, Specs) -> Parent = maps:get(ParentKey, Specs), inherit(Parameter, Fields, Parent). %%-------------------------------------------------------------------- %% @hidden %% check if the parameter is set from the original spec. %%-------------------------------------------------------------------- -spec inherit(Parameter, Fields, Parent) -> Return when Parameter :: map(), Fields :: [atom()], Parent :: map(), Return :: map(). inherit(Parameter, [], _) -> Parameter; inherit(Parameter, [Field|Rest], Parent) -> case maps:get(Field, Parameter, undefined) of undefined -> inherit2(Parameter,[Field|Rest],Parent); _ -> inherit(Parameter, Rest, Parent) end. %%-------------------------------------------------------------------- %% @hidden %% check if the parent spec got the inherited field and set it. %%-------------------------------------------------------------------- -spec inherit2(Parameter, Fields, Parent) -> Return when Parameter :: map(), Fields :: [atom()], Parent :: map(), Return :: map(). inherit2(Parameter, [Field|Rest], Parent) -> case maps:get(Field, Parent, undefined) of undefined -> inherit(Parameter, Rest, Parent); Value -> NewParameter = Parameter#{ Field => Value }, inherit(NewParameter, Rest, Parent) end. %%-------------------------------------------------------------------- %% @hidden %% insert all specification into the specification store (ETS). %% @TODO specification must be unique, when inserting a new %% specification, if the same key exists, a warning should be %% displayed in some way. %%-------------------------------------------------------------------- init_state(MapSpec) -> [ ets:insert(?MODULE, {K, V}) || {K, V} <- maps:to_list(MapSpec) ], init_final(?MODULE). %%-------------------------------------------------------------------- %% @hidden %% It should be good, we can start the module. %%-------------------------------------------------------------------- init_final(State) -> ?LOG_INFO("~p ready", [?MODULE]), {ok, State}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- init_loop([], Buffer) -> % no more arguments to path, the buffer is returned, it should % contain the complete list of all arguments specification % supported. {ok, Buffer}; init_loop([Map|Rest], Buffer) when is_map(Map) -> % the argument is defined as map, then all callback are % checked as map key. case init_map(Map, #{}) of {ok, #{ parameter_key := K } = R} -> ?LOG_DEBUG("checked callback from map ~p", [Map]), init_loop(Rest, Buffer#{ K => R }); discard -> ?LOG_NOTICE("can't load parameter from module ~p", [Map]), init_loop(Rest, Buffer); {discard, _Message} -> ?LOG_NOTICE("can't load parameter from module ~p", [Map]), init_loop(Rest, Buffer); Elsewise -> throw(Elsewise) end; init_loop([Module|Rest], Buffer) when is_atom(Module) -> % the argument is defined as module callback, then % all callback are checked as functions exported. case init_module(Module, #{}) of {ok, #{ parameter_key := K } = R} -> ?LOG_DEBUG("checked callback from map ~p", [Module]), init_loop(Rest, Buffer#{ K => R }); discard -> ?LOG_NOTICE("can't load parameter from module ~p", [Module]), init_loop(Rest, Buffer); {discard, _Message} -> ?LOG_NOTICE("can't load parameter from module ~p", [Module]), init_loop(Rest, Buffer); Elsewise -> throw(Elsewise) end. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- init_module(Module, State) -> CallbacksCheck = callbacks_check(), init_module(Module, CallbacksCheck, State). init_module(Module, [], State) -> ?LOG_DEBUG("loaded callback module ~p", [Module]), {ok, State}; init_module(Module, [{_Callback, ModuleCallback}|Rest], State) -> case erlang:apply(ModuleCallback, init, [Module, State]) of {ok, NewState} -> init_module(Module, Rest, NewState); Elsewise -> {discard, Elsewise} end. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- init_map(Map, State) -> CallbacksCheck = callbacks_check(), init_map(Map, CallbacksCheck, State). init_map(Map, [], State) -> ?LOG_DEBUG("loaded callback map ~p", [Map]), {ok, State}; init_map(Map, [{_Callback, ModuleCallback}|Rest], State) -> case erlang:apply(ModuleCallback, init, [Map, State]) of {ok, NewState} -> init_map(Map, Rest, NewState); Elsewise -> {discard, Elsewise} end. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- terminate(_, _) -> ok. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- handle_call({get, Parameter}, _From, State) -> case apply_get(Parameter) of {ok, Value} -> {reply, {ok, Value}, State}; Elsewise -> {reply, Elsewise, State} end; handle_call({set, Parameter, Value}, _From, State) -> case apply_set(Parameter, Value) of Return = {ok, Return} -> {reply, Return, State}; Elsewise -> {reply, Elsewise, State} end; handle_call(Msg, From, State) -> ?LOG_WARNING([ {message, Msg}, {from, From}, {module, ?MODULE}, {function, handle_call} ]), {noreply, State}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- handle_cast(Msg, State) -> ?LOG_WARNING([ {message, Msg}, {module, ?MODULE}, {function, handle_cast} ]), {noreply, State}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- handle_info(Msg, State) -> ?LOG_WARNING([ {message, Msg}, {module, ?MODULE}, {function, handle_info} ]), {noreply, State}. %%-------------------------------------------------------------------- %% @hidden %% @doc Check if a function from a module is exported. %% @end %%-------------------------------------------------------------------- -spec is_function_exported(Module, Function, Arity) -> Return when Module :: atom(), Function :: atom(), Arity :: pos_integer(), Return :: boolean(). is_function_exported(Module, Function, Arity) -> try Exports = Module:module_info(exports), proplists:get_value(Function, Exports, undefined) of undefined -> false; A when A =:= Arity -> true; _ -> false catch _:_ -> false end. %%-------------------------------------------------------------------- %% @hidden %% @doc A pipeline function to check if the value is correct or not. %% @end %%-------------------------------------------------------------------- check(Parameter, Value, Spec) -> check_type(Parameter, Value, Spec, #{}). %%-------------------------------------------------------------------- %% @hidden %% @doc Check the type of the value associated with the parameter. %% This function will check for a type present in %% `arweave_config_type' module and execute it. It should return, %% `ok', `{ok, ConvertedValue}' or `{error, Term}'. %% @end %%-------------------------------------------------------------------- check_type(Parameter, Value, Spec = #{ type := Type }, Buffer) -> case check_type(Value, Type) of ok -> NewBuffer = Buffer#{ type => ok }, check_final(Parameter, Value, Spec, NewBuffer); {ok, V} -> NewBuffer = Buffer#{ type => ok }, check_final(Parameter, V, Spec, NewBuffer); Error -> NewBuffer = Buffer#{ type => Error }, check_final(Parameter, Value, Spec, NewBuffer) end; check_type(Parameter, Value, Spec, Buffer) -> check_final(Parameter, Value, Spec, Buffer#{ type => undefined }). %%-------------------------------------------------------------------- %% @hidden %% @doc check a single type. %% @end %%-------------------------------------------------------------------- -spec check_type(Value, Types) -> Return when Value :: term(), Types :: atom() | [atom()], Return :: ok | {ok, Value} | {error, term()}. check_type(Value, Types) when is_list(Types) -> check_types(Value, Types); check_type(Value, Type) when is_atom(Type) -> try arweave_config_type:Type(Value) of ok -> {ok, Value}; {ok, V} -> {ok, V}; {error, Reason} -> {error, Reason} catch E:R -> {E, R} end. %%-------------------------------------------------------------------- %% @hidden %% @doc check a list of type. %% @end %%-------------------------------------------------------------------- -spec check_types(Value, Types) -> Return when Value :: term(), Types :: [atom()], Return :: ok | {ok, Value} | {error, term()}. check_types(_Value, []) -> {error, <<"value does not match types">>}; check_types(Value, [Type|Rest]) when is_atom(Type) -> case check_type(Value, Type) of ok -> {ok, Value}; {ok, V} -> {ok, V}; {error, _} -> check_types(Value, Rest) end. %%-------------------------------------------------------------------- %% @hidden %% @doc final check function, all returned values from the previous %% call should be present in `Buffer' variable. %% @end %%-------------------------------------------------------------------- check_final(_, Value, _, Buffer) -> ?LOG_DEBUG("~p", [Buffer]), case Buffer of #{ type := undefined } -> {ok, Value, Buffer}; #{ type := ok } -> {ok, Value, Buffer}; _ -> {error, Value, Buffer} end. %%-------------------------------------------------------------------- %% @hidden %% 1. get he specification, if they are present, we can continue %% to execute the transaction %%-------------------------------------------------------------------- apply_set(Parameter, Value) -> case spec(Parameter) of {ok, Parameter, Spec} -> apply_set_runtime(Parameter, Value, Spec); Elsewise -> Elsewise end. %%-------------------------------------------------------------------- %% @hidden %% check if the parameter can be set during runtime or not. %%-------------------------------------------------------------------- apply_set_runtime(Parameter, Value, Spec) -> RuntimeMode = maps:get(runtime, Spec, false), Runtime = arweave_config:is_runtime(), case {Runtime, RuntimeMode} of {false, false} -> apply_set_parameter(Parameter, Value, Spec); {false, true} -> apply_set_parameter(Parameter, Value, Spec); {true, true} -> apply_set_parameter(Parameter, Value, Spec); {true, false} -> {error, #{ parameter => Parameter, reason => not_a_runtime_parameter, value => Value } } end. %%-------------------------------------------------------------------- %% @hidden %% 2. check if the specification match the parameter/value, %% if everything is fine, we can continue the execution %%-------------------------------------------------------------------- apply_set_parameter(Parameter, Value, Spec) -> case check(Parameter, Value, Spec) of {ok, Return, _} -> apply_set_value(Parameter, Return, Spec); Elsewise -> Elsewise end. %%-------------------------------------------------------------------- %% @hidden %% 3. let retrieve the value (if set) and use the handle_set/3 %% callback to set the value. %%-------------------------------------------------------------------- apply_set_value(Parameter, Value, Spec = #{ set := Set }) -> % if handle_set/4 is present, we execute it. Default = maps:get(default, Spec, undefined), % @todo: potential race condition OldValue = arweave_config_store:get(Parameter, Default), State = local_state(#{ spec => Spec, old_value => OldValue }), Args = maps:get(set_args, Spec, []), try Set(Parameter, Value, State, Args) of ignore -> {ok, OldValue, OldValue}; {ok, NewValue} -> {ok, NewValue, OldValue}; {store, NewValue} -> apply_set_store(Parameter,NewValue,OldValue,Spec) catch E:R -> {E,R} end; apply_set_value(Parameter, Value, Spec) -> % if no handle_set/3 has been configured, we only store the % value by default. Default = maps:get(default, Spec, undefined), OldValue = arweave_config_store:get(Parameter, Default), apply_set_store(Parameter, Value, OldValue, Spec). %%-------------------------------------------------------------------- %% @hidden %% 4. the previous callback returned `store', then we store %% the value into arweave_config_store. %%-------------------------------------------------------------------- apply_set_store(Parameter, NewValue, OldValue, _Spec) -> try arweave_config_store:set(Parameter, NewValue) of {ok, {_, _}} -> {ok, NewValue, OldValue}; Elsewise -> Elsewise catch E:R -> {E, R} end. %%-------------------------------------------------------------------- %% @hidden %% @doc %% @end %%-------------------------------------------------------------------- apply_get(Parameter) -> case spec(Parameter) of {ok, Parameter, Spec} -> apply_get2(Parameter, Spec); Elsewise -> Elsewise end. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- apply_get2(Parameter, Spec = #{ get := Get, default := Default }) -> State = local_state(#{ spec => Spec }), case Get(Parameter, State) of {ok, Value} -> {ok, Value}; _ -> {ok, Default} end; apply_get2(Parameter, Spec = #{ get := Get }) -> State = local_state(#{ spec => Spec }), case Get(Parameter, State) of {ok, Value} -> {ok, Value}; Elsewise -> {error, Elsewise} end; apply_get2(Parameter, _Spec = #{ default := Default }) -> Value = arweave_config_store:get(Parameter, Default), {ok, Value}; apply_get2(Parameter, _Spec) -> arweave_config_store:get(Parameter). %%-------------------------------------------------------------------- %% @doc Converts arweave configuration specification to argparse map. %% @see argparse %% @end %%-------------------------------------------------------------------- spec_to_argparse() -> % fetch the specifications and convert them to proplists, it % will be easier to loop over. Specs = [ maps:to_list(X) || {_, X} <- maps:to_list(spec()) ], #{ arguments => spec_to_argparse(Specs, []) }. % @hidden spec_to_argparse([], Buffer) -> Buffer; spec_to_argparse([Spec|Rest], Buffer) -> ArgParse = spec_to_argparse2(Spec, #{}), spec_to_argparse(Rest, [ArgParse|Buffer]). % @hidden spec_to_argparse2([], ArgParse) -> ArgParse; spec_to_argparse2([{parameter_key, Name}|Rest], ArgParse) -> % convert the configuration key parameter to name spec_to_argparse2(Rest, ArgParse#{ name => Name }); spec_to_argparse2([{default, Default}|Rest], ArgParse) -> spec_to_argparse2(Rest, ArgParse#{ default => Default }); spec_to_argparse2([{nargs, Nargs}|Rest], ArgParse) -> spec_to_argparse2(Rest, ArgParse#{ nargs => Nargs }); spec_to_argparse2([{long_argument, Long}|Rest], ArgParse) when is_binary(Long) -> % arweave config spec uses binary, convert it. spec_to_argparse2(Rest, ArgParse#{ long => binary_to_list(Long) }); spec_to_argparse2([{short_argument, Short}|Rest], ArgParse) -> spec_to_argparse2(Rest, ArgParse#{ short => Short }); spec_to_argparse2([{required, Required}|Rest], ArgParse) -> spec_to_argparse2(Rest, ArgParse#{ required => Required }); spec_to_argparse2([{short_description, SD}|Rest], ArgParse) -> spec_to_argparse2(Rest, ArgParse#{ help => SD }); spec_to_argparse2([{type, Type}|Rest], ArgParse) -> % At this time, only support boolean type case Type of boolean -> spec_to_argparse2(Rest, ArgParse#{ type => Type }); _Elsewise -> spec_to_argparse2(Rest, ArgParse) end; spec_to_argparse2([Ignore|Rest], ArgParse) -> ?LOG_DEBUG("ignored value: ~p", [Ignore]), spec_to_argparse2(Rest, ArgParse). %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- local_state(Map) -> maps:merge(Map, #{config => arweave_config_store:to_map()}). ================================================ FILE: apps/arweave_config/src/arweave_config_specs/arweave_config_spec_default.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @copyright 2025 (c) Arweave %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @doc "Default" value specification. Returns a default value if %%% specified. %%% @end %%%=================================================================== -module(arweave_config_spec_default). -compile(warnings_as_errors). -export([init/2]). -include("arweave_config_spec.hrl"). -include_lib("kernel/include/logger.hrl"). init(Map, State) when is_map(Map) -> fetch(Map, State); init(Module, State) when is_atom(Module) -> case is_function_exported(Module, default, 0) of true -> fetch(Module, State); false -> {ok, State } end. fetch(#{ default := DefaultValue }, State) -> {ok, State#{ default => DefaultValue }}; fetch(Map, State) when is_map(Map) -> {ok, State}; fetch(Module, State) -> try Module:default() of Default -> NewState = State#{ default => Default }, {ok, NewState} catch E:R:S -> ?LOG_ERROR([ {module, ?MODULE}, {parameter, Module}, {state, State}, {error, {E,R,S}} ]), {ok, State} end. ================================================ FILE: apps/arweave_config/src/arweave_config_specs/arweave_config_spec_deprecated.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @copyright 2025 (c) Arweave %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @doc Deprecated specification feature. Returns a warning message %%% when a deprecated flag is present. %%% @end %%%=================================================================== -module(arweave_config_spec_deprecated). -export([default/0, init/2]). -include("arweave_config_spec.hrl"). -include_lib("kernel/include/logger.hrl"). default() -> false. init(#{ deprecated := Deprecated }, State) when is_boolean(Deprecated) -> {ok, State#{ deprecated => Deprecated }}; init(Map, State) when is_map(Map) -> {ok, State#{ deprecated => default() }}; init(Module, State) when is_atom(Module) -> case is_function_exported(Module, deprecated, 0) of true -> fetch(Module, State); false -> {ok, State#{ deprecated => default() }} end. fetch(Module, State) -> try Module:deprecated() of false -> NewState = State#{ deprecated => default() }, {ok, NewState}; true -> NewState = State#{ deprecated => true }, ?LOG_WARNING("~p~n is deprecated", [Module]), {ok, NewState}; {true, Message} -> NewState = State#{ deprecated => true }, ?LOG_WARNING("~p~n is deprecated: ~p", [Module, Message]), {ok, NewState}; Elsewise -> {error, Elsewise} catch _:Reason -> {error, Reason} end. ================================================ FILE: apps/arweave_config/src/arweave_config_specs/arweave_config_spec_enabled.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @copyright 2025 (c) Arweave %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @doc "Enabled" value specification %%% @end %%%=================================================================== -module(arweave_config_spec_enabled). -export([init/2]). -include("arweave_config_spec.hrl"). -include_lib("kernel/include/logger.hrl"). init(Map, State) when is_map(Map) -> fetch(Map, State); init(Module, State) when is_atom(Module) -> case is_function_exported(Module, enabled, 0) of true -> fetch(Module, State); false -> {ok, State} end. fetch(#{ enabled := false }, State) -> PK = maps:get(parameter_key, State), ?LOG_DEBUG("parameter_key: ~p disabled", [PK]), skip; fetch(#{ enabled := {false, Reason} }, State) -> PK = maps:get(parameter_key, State), ?LOG_DEBUG("parameter_key: ~p disabled (~p)", [PK, Reason]), skip; fetch(_Module, State) -> {ok, State#{ enabled => true }}. ================================================ FILE: apps/arweave_config/src/arweave_config_specs/arweave_config_spec_environment.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @copyright 2025 (c) Arweave %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @doc %%% Define an environment variable to check or generate one %%% automatically. %%% @end %%%=================================================================== -module(arweave_config_spec_environment). -compile(warnings_as_errors). -export([init/2]). -include("arweave_config_spec.hrl"). %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- init(#{environment := true}, State) -> PK = maps:get(parameter_key, State), Env = convert(PK), {ok, State#{ environment => Env }}; init(#{environment := Env}, State) when is_binary(Env) -> {ok, State#{ environment => Env }}; init(Map, State) when is_map(Map) -> {ok, State}; init(Module, State) when is_atom(Module) -> case is_function_exported(Module, environment, 0) of true -> fetch(Module, State); false -> {ok, State} end. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- fetch(Module, State) -> try Env = erlang:apply(Module, environment, []), check(Module, Env, State) catch _:R -> {error, R} end. %%-------------------------------------------------------------------- %% @doc environment check callback. %% @end %%-------------------------------------------------------------------- -spec check(Module, Environment, State) -> Return when Module :: atom() | map(), Environment :: string() | binary(), State :: map(), Return :: {ok, State} | {error, map()}. check(Module, Environment, State) when is_list(Environment) -> check(Module, list_to_binary(Environment), State); check(_Module, Environment, State) when is_binary(Environment) -> {ok, State#{ environment => Environment }}; check(Module, Env, State) -> {error, #{ reason => {invalid, Env}, module => Module, callback => environment, state => State } }. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- -spec convert(PK) -> Return when PK :: [binary()|integer()|atom()], Return :: binary(). convert(PK) -> convert(PK, []). %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- -spec convert(PK, Buffer) -> Return when PK :: [binary()|integer()|atom()], Buffer :: [binary()], Return :: binary(). convert([], Buffer) -> Reverse = lists:reverse(Buffer), Join = lists:join(<<"_">>, [<<"AR">>|Reverse]), list_to_binary(Join); convert([H|T], Buffer) when is_atom(H) -> Bin = atom_to_binary(H), Upper = string:uppercase(Bin), convert(T, [Upper|Buffer]); convert([H|T], Buffer) when is_integer(H) -> Bin = integer_to_binary(H), convert(T, [Bin|Buffer]); convert([H|T], Buffer) when is_binary(H) -> convert(T, [H|Buffer]). ================================================ FILE: apps/arweave_config/src/arweave_config_specs/arweave_config_spec_handle_get.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @copyright 2025 (c) Arweave %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @doc get specification feature. insert a custom get feature inside %%% a parameter. %%% @end %%%=================================================================== -module(arweave_config_spec_handle_get). -export([init/2]). -include("arweave_config_spec.hrl"). -include_lib("eunit/include/eunit.hrl"). init(#{ handle_get := Get }, State) when is_function(Get, 2) -> {ok, State#{ get => Get }}; init(Map, State) when is_map(Map) -> {ok, State}; init(Module, State) when is_atom(Module) -> case is_function_exported(Module, handle_get, 2) of true -> {ok, State#{ get => fun Module:handle_get/2 }}; false -> {ok, State} end. ================================================ FILE: apps/arweave_config/src/arweave_config_specs/arweave_config_spec_handle_set.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @copyright 2025 (c) Arweave %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @doc set specification feature. adds a custom set specification %%% inside a parameter. %%% @end %%%=================================================================== -module(arweave_config_spec_handle_set). -export([init/2]). -include("arweave_config_spec.hrl"). init(#{ handle_set := Set }, State) when is_function(Set, 4) -> {ok, State#{ set => Set, set_args => [] }}; init(#{ handle_set := {Set, Args}}, State) when is_function(Set, 4) -> {ok, State#{ set => Set, set_args => Args }}; init(Map, State) when is_map(Map) -> {ok, State}; init(Module, State) when is_atom(Module) -> case is_function_exported(Module, handle_set, 4) of true -> NewState = State#{set => fun Module:handle_set/4}, init_args(Module, NewState); false -> {ok, State} end. init_args(Module, State) -> case is_function_exported(Module, set_args, 0) of true -> Args = Module:set_args(), NewState = #{ set_args => Args }, {ok, NewState}; _Else -> {ok, State} end. ================================================ FILE: apps/arweave_config/src/arweave_config_specs/arweave_config_spec_inherit.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%%------------------------------------------------------------------ %%% %%% @copyright 2025 (c) Arweave %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @doc inherit specific values from another parameter. %%% @end %%%=================================================================== -module(arweave_config_spec_inherit). -compile(warnings_as_errors). -export([inherited_fields/0, init/2]). -include("arweave_config_spec.hrl"). -include_lib("kernel/include/logger.hrl"). inherited_fields() -> [ default, enabled, required, runtime, type ]. init(Map, State) when is_map(Map); is_atom(Map) -> fetch(Map, State); init(_, State) -> {ok, State}. fetch(Map = #{ inherit := {Parameter, Fields}}, State) when is_list(Parameter); is_list(Fields) -> check(Map, {Parameter, Fields}, State); fetch(Map = #{ inherit := Parameter }, State) when is_list(Parameter) -> Fields = inherited_fields(), check(Map, {Parameter, Fields}, State); fetch(Map, State) when is_map(Map) -> {ok, State}; fetch(Module, State) -> try Module:inherit() of {Parameter, Fields} -> check(Module, {Parameter, Fields}, State); Parameter when is_list(Parameter) -> check(Module, {Parameter, inherited_fields()}, State); _Else -> {ok, State} catch E:R:S -> ?LOG_ERROR([ {module, ?MODULE}, {parameter, Module}, {state, State}, {error, {E,R,S}} ]), {ok, State} end. check(_, Inherit, State) -> {ok, State#{ inherit => Inherit }}. ================================================ FILE: apps/arweave_config/src/arweave_config_specs/arweave_config_spec_legacy.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @copyright 2025 (c) Arweave %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @doc legacy specification feature. set the key used by the legacy %%% configuration. %%% @todo legacy configuration is kinda special, and instead of an %%% atom, a function should probably be used sometimes. %%% @end %%%=================================================================== -module(arweave_config_spec_legacy). -export([init/2]). -include("arweave_config_spec.hrl"). -include("arweave_config.hrl"). -include_lib("eunit/include/eunit.hrl"). init(Map = #{ legacy := Legacy }, State) when is_map(Map) -> check(Map, Legacy, State); init(Map, State) when is_map(Map) -> {ok, State}; init(Module, State) when is_atom(Module) -> case is_function_exported(Module, legacy, 0) of true -> fetch(Module, State); false -> {ok, State} end. init_test() -> ?assertEqual( {ok, #{ legacy => init }}, init(#{ legacy => init }, #{}) ), ?assertMatch( {error, _}, init(#{ legacy => 123 }, #{}) ), ?assertMatch( {error, _}, init(#{ legacy => does_not_exist }, #{}) ). fetch(Module, State) when is_atom(Module) -> try L = erlang:apply(Module, legacy, []), check(Module, L, State) catch _:R -> {error, R} end. check(Module, Legacy, State) when is_atom(Legacy) -> % ensure the presence of the field in config record. Fields = record_info(fields, config), case [ X || X <- Fields, X =:= Legacy ] of [Legacy] -> {ok, State#{ legacy => Legacy }}; _ -> {error, #{ reason => "does not exists", legacy_key => Legacy, module => Module } } end; check(Module, Legacy, State) -> {error, #{ reason => {invalid, Legacy}, module => Module, callback => legacy, state => State } }. ================================================ FILE: apps/arweave_config/src/arweave_config_specs/arweave_config_spec_long_argument.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @copyright 2025 (c) Arweave %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @doc Long argument specification feature. It should be compatible %%% with arguments modules in OTP. %%% %%% ``` %%% % example %%% Parameter = [global, debug]. %%% LongArgumentDefault = <<"--global.debug">>. %%% %%% % example %%% Spec = [peers, {peer}, enabled]. %%% Parameter = [peers, <<"127.0.0.1:1984">>, enabled]. %%% Key = <<"peers.[127.0.0.1:1984].ebaled">>. %%% LongArgumentDefault = <<"--peers.[peer].enabled">>. %%% ''' %%% %%% @end %%%=================================================================== -module(arweave_config_spec_long_argument). -compile(warnings_as_errors). -export([init/2]). -include("arweave_config_spec.hrl"). init(Map = #{ long_argument := LA }, State) -> check(Map, LA, State); init(Map, State) when is_map(Map) -> {ok, State}; init(Module, State) when is_atom(Module) -> case is_function_exported(Module, long_argument, 0) of true -> fetch(Module, State); false -> check(Module, undefined, State) end. fetch(Module, State) -> try LA = erlang:apply(Module, long_argument, []), check(Module, LA, State) catch _:R -> {error, R} end. check(_Module, false, State) -> {ok, State}; check(_Module, true, State = #{ parameter_key := CK }) -> {ok, State#{ long_argument => convert(CK) }}; check(_Module, undefined, State = #{ parameter_key := CK }) -> {ok, State#{ long_argument => convert(CK) }}; check(_Module, LA, State) when is_binary(LA) orelse is_list(LA) -> {ok, State#{ long_argument => convert(LA) }}; check(Module, LA, State) -> {error, #{ reason => {invalid, LA}, state => State, module => Module, callback => long_argument } }. convert(List) when is_list(List) -> convert(List, []); convert(<<"-", _/binary>> = Binary) -> Binary; convert(Binary) when is_binary(Binary) -> <<"-", Binary/binary>>. convert([], Buffer) -> Sep = application:get_env(arweave_config, long_argument_separator, "."), Bin = list_to_binary(lists:join(Sep, lists:reverse(Buffer))), <<"--", Bin/binary>>; convert([H|T], Buffer) when is_integer(H) -> convert([integer_to_binary(H)|T], Buffer); convert([H|T], Buffer) when is_atom(H) -> convert([atom_to_binary(H)|T], Buffer); convert([H|T], Buffer) when is_list(H) -> convert([list_to_binary(H)|T], Buffer); convert([H|T], Buffer) when is_binary(H) -> convert(T, [H|Buffer]). ================================================ FILE: apps/arweave_config/src/arweave_config_specs/arweave_config_spec_long_description.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @copyright 2025 (c) Arweave %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @doc long description specification feature. %%% @end %%%=================================================================== -module(arweave_config_spec_long_description). -export([init/2]). -include("arweave_config_spec.hrl"). init(#{ long_description := LD }, State) -> {ok, State#{ long_description => LD }}; init(Map, State) when is_map(Map) -> {ok, State}; init(Module, State) when is_atom(Module) -> case is_function_exported(Module, long_description, 0) of true -> fetch(Module, State); false -> {ok, State} end. fetch(Module, State) -> try LD = erlang:apply(Module, long_description, []), check(Module, LD, State) catch _:R -> {error, R} end. check(_Module, undefined, State) -> {ok, State#{ long_description => undefined }}; check(_Module, LD, State) when is_binary(LD); is_list(LD) -> {ok, State#{ long_description => LD }}; check(Module, LD, State) -> {error, #{ reason => {invalid, LD}, module => Module, state => State, callback => long_description } }. ================================================ FILE: apps/arweave_config/src/arweave_config_specs/arweave_config_spec_nargs.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @copyright 2025 (c) Arweave %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @doc nargs parameter interface from `argparse' module. Only %%% available for command line arguments. %%% @see argparse %%% @end %%%=================================================================== -module(arweave_config_spec_nargs). -export([init/2]). -include("arweave_config_spec.hrl"). init(Map = #{ nargs := Nargs }, State) -> check(Map, Nargs, State); init(Map, State) when is_map(Map) -> {ok, State}; init(Module, State) when is_atom(Module) -> case is_function_exported(Module, nargs, 0) of true -> fetch(Module, State); false -> {ok, State} end. fetch(Module, State) -> Nargs = Module:nargs(), check(Module, Nargs, State). check(Module, nonempty_list, State) -> {ok, State#{ nargs => nonempty_list }}; check(Module, list, State) -> {ok, State#{ nargs => list }}; check(Module, all, State) -> {ok, State#{ nargs => all }}; check(Module, 'maybe', State) -> {ok, State#{ nargs => 'maybe'}}; check(Module, {'maybe', Term}, State) -> {ok, State#{ nargs => {'maybe', Term}}}; check(Module, Nargs, State) when is_integer(Nargs), Nargs >= 0 -> {ok, State#{ nargs => Nargs }}. ================================================ FILE: apps/arweave_config/src/arweave_config_specs/arweave_config_spec_parameter_key.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @copyright 2025 (c) Arweave %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @doc Arweave Configuration Specification Configuration Key. %%% %%% A specification for a specification key is a way to describe a %%% parameter in arweave_config. The idea is to have something similar %%% like a path/uri that can be checked before updated. A simple %%% example with the debug parameter: %%% %%% ``` %%% [global,debug]. %%% ''' %%% %%% How to configure a "dynamic" key, for example, with a peer or a %%% storage module? It can be done by inserting a special term to %%% define what kind of type is accepted. %%% %%% ``` %%% [peers,{peer},enabled]. %%% ''' %%% %%% What if the variable parameter can have many types? %%% %%% ``` %%% [peers, {[peer,ipv4,ipv6]}, enabled]. %%% ''' %%% %%% Now, how it's possible to match quickly the content of a parameter %%% and this kind of key? %%% %%% ``` %%% RawKey = <<"peers.[127.0.0.1].enabled">>. %%% Value = <<"true">>. %%% FormattedKey = [peers, <<"127.0.0.1">>, enabled]. %%% Specification = [peers, {[peer,ipv4,ipv6]}, enabled]. %%% %%% % an idea for an internal representation %%% % InternalSpec = [peers, fun param/1, enabled]. %%% %%% {ok, Spec} = find(FormattedKey). %%% true = is_valid(FormattedKey, Spec). %%% ''' %%% %%% @todo if the provided key is a binary, it should probably be good %%% to convert it. %%% %%% @end %%%=================================================================== -module(arweave_config_spec_parameter_key). -export([init/2]). -include("arweave_config_spec.hrl"). -include_lib("eunit/include/eunit.hrl"). init(Map = #{ parameter_key := CK }, State) -> fetch(Map, State); init(Map, State) when is_map(Map) -> {error, #{ module => Map, reason => missing_parameter_key, state => State } }; init(Module, State) when is_atom(Module) -> case is_function_exported(Module, parameter_key, 0) of true -> fetch(Module, State); false -> {error, #{ callback => parameter_key, reason => parameter_key_not_defined, module => Module, state => State } } end. init_test() -> ?assertEqual( {ok, #{ parameter_key => [test]}}, init(#{ parameter_key => <<"test">> }, #{}) ), ?assertEqual( {ok, #{ parameter_key => [test,1,2,3]}}, init(#{ parameter_key => <<"test.1.2.3">> }, #{}) ), ?assertEqual( {ok, #{ parameter_key => [test]}}, init(#{ parameter_key => [test]}, #{}) ), ?assertEqual( {ok, #{ parameter_key => [<<"test">>] }}, init(#{ parameter_key => [<<"test">>] }, #{}) ), ?assertMatch( {error, _}, init(#{}, #{}) ), ?assertMatch( {error, _}, init(#{ parameter_key => []}, #{}) ), ?assertMatch( {error, _}, init(#{ parameter_key => test}, #{}) ), ?assertMatch( {error, _}, init(#{ parameter_key => [test, #{}]}, #{}) ). fetch(Map = #{ parameter_key := CK }, State) -> check(Map, CK, State); fetch(Module, State) when is_atom(Module) -> try CK = Module:parameter_key(), check(Module, CK, State) catch _:Reason -> {error, Reason} end. check(Module, CK, State) when is_binary(CK) -> case arweave_config_parser:key(CK) of {ok, Value} -> {ok, State#{ parameter_key => Value }}; {error, Reason} -> {error, #{ module => Module, callback => parameter_key, reason => {Reason, CK}, state => State } } end; check(Module, CK, State) when is_list(CK) -> check2(Module, CK, CK, State); check(Module, CK, State) -> {error, #{ callback => parameter_key, reason => {invalid, CK}, module => Module, state => State } }. check2(Module, [], [], State) -> {error, #{ reason => {invalid, []}, module => Module, state => State, callback => parameter_key } }; check2(_Module, [], CK, State) -> {ok, State#{ parameter_key => CK }}; check2(Module, [Item|Rest], CK, State) when is_atom(Item) -> check2(Module, Rest, CK, State); check2(Module, [Item|Rest], CK, State) when is_binary(Item) -> check2(Module, Rest, CK, State); check2(Module, [{Variable}|Rest], CK, State) when is_atom(Variable) -> check2(Module, Rest, CK, State); check2(Module, [Item|Rest], _CK, State) -> {error, #{ callback => parameter_key, reason => {invalid, Item}, module => Module, state => State, rest => Rest } }. ================================================ FILE: apps/arweave_config/src/arweave_config_specs/arweave_config_spec_runtime.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @copyright 2025 (c) Arweave %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @doc Runtime Specification Definition. %%% %%% Runtime callback has been created to deal with different kind of %%% parameters. Some are static and can be set only at startup. Others %%% are dynamic and can be set during runtime. %%% %%% @end %%%=================================================================== -module(arweave_config_spec_runtime). -export([init/2]). -include("arweave_config_spec.hrl"). default() -> false. init(_Map = #{ runtime := Runtime }, State) -> {ok, State#{ runtime => Runtime }}; init(Map, State) when is_map(Map) -> {ok, State#{ runtime => default() }}; init(Module, State) when is_atom(Module) -> case is_function_exported(Module, runtime, 0) of true -> init2(Module, State); false -> {ok, State#{ runtime => default() }} end. init2(Module, State) -> try Module:runtime() of false -> NewState = State#{ runtime => false }, {ok, NewState}; true -> NewState = State#{ runtime => true }, {ok, NewState}; Elsewise -> {error, Elsewise} catch E:R:S -> {error, #{ module => Module, state => State, error => E, reason => R, stack => S } } end. ================================================ FILE: apps/arweave_config/src/arweave_config_specs/arweave_config_spec_short_argument.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @copyright 2025 (c) Arweave %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @doc short argument specification feature. %%% @end %%%=================================================================== -module(arweave_config_spec_short_argument). -export([init/2]). -include("arweave_config_spec.hrl"). -include_lib("kernel/include/logger.hrl"). init(Map = #{ short_argument := SA }, State) -> check(Map, SA, State); init(Map, State) when is_map(Map) -> {ok, State}; init(Module, State) when is_atom(Module) -> case is_function_exported(Module, short_argument, 0) of true -> ?LOG_DEBUG("~p is defined", [{Module, short_argument, []}]), fetch(Module, State); false -> ?LOG_DEBUG("~p is undefined", [{Module, short_argument, []}]), {ok, State} end. fetch(Module, State) -> try SA = erlang:apply(Module, short_argument, []), check(Module, SA, State) catch _:R -> {error, R} end. check(Module, undefined, State) -> {ok, State}; check(Module, false, State) -> {ok, State}; check(Module, [SA], State) when is_integer(SA), SA > 0 -> check(Module, SA, State); check(Module, SA, State) when integer(SA), ( SA >= $0 andalso SA =< $9 ); ( SA >= $a andalso SA =< $z ); ( SA >= $A andalso SA =< $Z ) -> {ok, State#{ short_argument => SA }}; check(Module, SA, State) -> {error, #{ reason => {invalid, SA}, callback => short_argument, module => Module, state => State } }. ================================================ FILE: apps/arweave_config/src/arweave_config_specs/arweave_config_spec_short_description.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @copyright 2025 (c) Arweave %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @doc Specification callback module define short description field. %%% @end %%%=================================================================== -module(arweave_config_spec_short_description). -export([init/2]). -include("arweave_config_spec.hrl"). init(Map = #{ short_description := SD }, State) -> check(Map, SD, State); init(Map, State) when is_map(Map) -> {ok, State}; init(Module, State) when is_atom(Module) -> case is_function_exported(Module, short_description, 0) of true -> fetch(Module, State); false -> {ok, State} end. fetch(Module, State) -> try SD = erlang:apply(Module, short_description, []), check(Module, SD, State) catch _:R -> {error, R} end. check(_Module, undefined, State) -> {ok, State}; check(_Module, SD, State) when is_binary(SD); is_list(SD) -> {ok, State#{ short_description => SD }}; check(Module, SD, State) -> {error, #{ reason => {invalid, SD}, module => Module, callback => short_description, state => State } }. ================================================ FILE: apps/arweave_config/src/arweave_config_specs/arweave_config_spec_type.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @copyright 2025 (c) Arweave %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @doc Configuration specification type. %%% %%% A type MUST BE defined somewhere. This is a way to reuse already %%% used format on the system. %%% %%% A type CAN CONVERT a value to an internal Erlang term. %%% %%% ``` %%% % type boolean: %%% boolean(<<"true">>) -> {ok, true}. %%% boolean("true") -> {ok, true}. %%% ''' %%% %%% == TODO == %%% %%% 1. Configures a default generic type (e.g. any) returning always, %%% `ok' it should be defined in this module or any other. At this, %%% time , he default value is set to `undefined', but this is not %%% coherent with the rest of the code. %%% %%% ``` %%% any(_, _) -> ok. %%% ''' %%% %%% 2. Configures a default generic type (e.g. none) returning always %%% `error'. %%% %%% ``` %%% none(_, _) -> {error, none}. %%% ''' %%% %%% 3. Configure a custom Module/Function type callback: %%% %%% ``` %%% {Module, Function} %%% ''' %%% %%% @end %%%=================================================================== -module(arweave_config_spec_type). -export([init/2]). -include("arweave_config_spec.hrl"). -include_lib("kernel/include/logger.hrl"). -include_lib("eunit/include/eunit.hrl"). init(#{ type := Type }, State) -> {ok, State#{ type => Type }}; init(Map, State) when is_map(Map) -> {ok, State}; init(Module, State) when is_atom(Module) -> case is_function_exported(Module, type, 0) of true -> fetch(Module, State); false -> {ok, State} end. fetch(Module, State) -> try erlang:apply(Module, type, []) of T when is_atom(T) -> case is_function_exported(arweave_config_type, T, 1) of true -> {ok, State#{ type => T }}; false -> ?LOG_WARNING("non existing type ~p", [T]), {ok, State#{ type => T }} end; Elsewise -> {error, Elsewise} catch E:R:S -> {error, #{ module => Module, state => State, error => E, reason => R, stack => S } } end. ================================================ FILE: apps/arweave_config/src/arweave_config_store.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @copyright 2025 (c) Arweave %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @doc Arweave Configuration Data Store Interface. %%% %%% This module/process is in charge to store the configuration. %%% Usually, `arweave_config_spec' is the only process allowed to do %%% so, but this rule is not enforced at the moment. %%% %%% == Features == %%% %%% === Dealing with map root value === %%% %%% While creating new parameter, a problem will probably arise very %%% soon. What if a leaf is also a branch? Let imagine we want to %%% create a more flexible way to configure the debugging parameter, %%% and permit users to configure debug on some part of the code, the %%% implementation would be something like the code below: %%% %%% ``` %%% {[global,debug], true} %%% {[global,debug,arweave_config], true} %%% ''' %%% %%% Unfortunately, this will not work in the current implementation, a %%% map. When a value is already present and is not a `map()', then it %%% will be set as `root' item. The `root' is represented as %%% underscore character (`_'). %%% %%% ``` %%% % extracted from arweave_config_store %%% Proplist = [ %%% {[global,debug], true}, %%% {[global,debug,arweave_config, true} %%% ]. %%% %%% % converted as map %%% Maps = [ %%% #{ global => #{ debug => true }}, %%% #{ global => #{ debug => #{ arweave_config => true }}} %%% ] %%% %%% % merged as map %%% MergedMap = #{ %%% global => #{ %%% debug => #{ %%% '_' => true, %%% arweave_config => true %%% } %%% } %%% } %%% %%% % as JSON %%% { %%% "global": { %%% "debug": { %%% "_": true, %%% "arweave_config": true %%% } %%% } %%% } %%% %%% % as YAML %%% global: %%% debug: %%% "_": true %%% arweave_config: true %%% ''' %%% %%% The key `_' then becomes a reserved key. %%% %%% == TODO == %%% %%% === Single Line Format Support === %%% %%% Instead of exporting classic JSON format, an easier one can be %%% created, where one value is attributed on one line: %%% %%% ``` %%% global.debug=true %%% global.data.directory="." %%% peers.[127.0.0.1:1984].enabled=true %%% ''' %%% %%% The separatator could be `=' or a null char (e.g `\t', ` '). One %%% huge advantage is no external module will be required to %%% parse/decode, and the format is pretty close from what we already %%% have in the database. %%% %%% === JSON Support === %%% %%% The key present in the store can have different type, usually %%% `atom()', `binary()' and/or `integer()'. Encoder like `jiffy' will %%% not encode `integer()' key to JSON string directly, then, a %%% conversion step will be required. %%% %%% @end %%%=================================================================== -module(arweave_config_store). -behavior(gen_server). -vsn(1). -export([ start_link/0, stop/0, get/1, get/2, set/2, delete/1, to_map/0, from_map/1 ]). -export([ init/1, handle_call/3, handle_cast/2, handle_info/2 ]). -compile({no_auto_import,[get/1]}). -record(key, {id}). -record(value, {value, meta}). -include_lib("kernel/include/logger.hrl"). -include_lib("eunit/include/eunit.hrl"). %%-------------------------------------------------------------------- %% @doc Starts `arweave_config_store' registered process. %% @end %%-------------------------------------------------------------------- start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). %%-------------------------------------------------------------------- %% @doc Stops `arweave_config_store' process. %% @end %%-------------------------------------------------------------------- stop() -> gen_server:stop(?MODULE). %%-------------------------------------------------------------------- %% @doc Retrieve a key from configuration store using ETS directly. %% @see lookup/1 %% @end %%-------------------------------------------------------------------- -spec get(Key) -> Return when Key :: term(), Return :: {ok, term()} | {error, undefined}. get(Key) -> case arweave_config_parser:key(Key) of {ok, Id} -> lookup(Id); Elsewise -> {error, Elsewise} end. %%-------------------------------------------------------------------- %% @doc Retrieve a value from ETS table, if not defined, return the %% default value from second argument. %% @end %%-------------------------------------------------------------------- -spec get(Key, Default) -> Return when Key :: term(), Default :: term(), Return :: term() | Default. get(Key, Default) -> case get(Key) of {ok, Value} -> Value; _ -> Default end. %%-------------------------------------------------------------------- %% @doc Configure a value using a new key. %% @todo if the key is already defined, the old value should be %% returned. %% @end %%-------------------------------------------------------------------- -spec set(Key, Value) -> Return when Key :: term(), Value :: term(), Return :: {ok, New} | {ok, New, Old} | {error, term()}, New :: {Id, Value}, Old :: {Id, Value}, Id :: term(). set(Key, Value) -> case arweave_config_parser:key(Key) of {ok, Id} -> gen_server:call(?MODULE, {set, Id, Value}); Elsewise -> Elsewise end. %%-------------------------------------------------------------------- %% @doc %% @end %%-------------------------------------------------------------------- -spec delete(Key) -> Return when Key :: term(), Return :: {ok, term()} | {error, undefined}. delete(Key) -> case arweave_config_parser:key(Key) of {ok, Id} -> gen_server:call(?MODULE, {delete, Id}); Elsewise -> Elsewise end. %%-------------------------------------------------------------------- %% @doc Converts a map into a valid structure ready to be inserted %% into an ETS table. %% @todo create the import feature. %% %% ``` %% % from: %% #{ 1 => 2, 2 => #{ 3 => 4 }}. %% %% % every keys/values must be valid, and then the final data %% % structure before insert should look like that: %% [{1, 2}, {[2,3], 4}]. %% ''' %% %% @end %%-------------------------------------------------------------------- -spec from_map(Map) -> Return when Map :: map(), Return :: {ok, list()} | {error, term()}. from_map(Data) when is_map(Data) -> todo. %%-------------------------------------------------------------------- %% @doc Converts the content of the ETS table into a map. It will %% be easier to export the database in this case. %% @todo the merger is not finished yet. %% %% ``` %% % the final output should look like that %% #{ key1 => #{ key2 => value } }. %% %% % it can easily be converted into json, yaml or toml. %% ''' %% %% @end %%-------------------------------------------------------------------- -spec to_map() -> Return when Return :: map(). to_map() -> Parameters = ets:tab2list(?MODULE), ListOfMap = to_map(Parameters, []), arweave_config_serializer:map_merge(ListOfMap). to_map([], Buffer) -> Buffer; to_map([{#key{ id = Id }, #value{ value = Value }}|Rest], Buffer) -> to_map(Rest, [map_path(Id, Value)|Buffer]). to_map_test() -> {ok, Pid} = start_link(), set("test.a.b", 1), set(<<"test.a.c">>, 2), ?assertEqual( #{ test => #{ a => #{ b => 1, c => 2 } } }, to_map() ), gen_server:stop(Pid). %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- map_path(List, Value) -> [H|Rest] = lists:reverse(List), map_path2(Rest, #{ H => Value }). map_path2([], Buffer) -> Buffer; map_path2([H|T], Buffer) -> map_path2(T, #{ H => Buffer }). map_path2_test() -> ?assertEqual( #{ 1 => #{ 2 => #{ 3 => data } } }, map_path([1,2,3], data) ). %%-------------------------------------------------------------------- %% @hidden %% @doc a wrapper around ets:lookup/2 %% @end %%-------------------------------------------------------------------- lookup(Id) -> case ets:lookup(?MODULE, #key{ id = Id }) of [] -> {error, undefined}; [{#key{ id = Id }, #value{ value = Value}}] -> {ok, Value}; Elsewise -> {error, Elsewise} end. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- init(_Args) -> erlang:process_flag(trap_exit, true), Ets = ets:new(?MODULE, [named_table, protected]), {ok, Ets}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- handle_call({set, Id, Value}, _From, State) -> K = #key{ id = Id }, V = #value{ value = Value, meta = #{} }, case ets:insert(?MODULE, {K, V}) of true -> {reply, {ok, {Id, Value}}, State}; false -> {reply, {error, {Id, Value}}, State} end; handle_call({delete, Id}, From, State) -> case ets:take(?MODULE, #key{ id = Id }) of [] -> {reply, {error, undefined}, State}; [{_, #value{ value = Value}}] -> {reply, {ok, {Id, Value}}, State} end; handle_call(Msg, From, State) -> ?LOG_ERROR([{message, Msg}, {from, From}, {module, ?MODULE}]), {noreply, State}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- handle_cast(Msg, State) -> ?LOG_ERROR([{message, Msg}, {module, ?MODULE}]), {noreply, State}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- handle_info(Msg, State) -> ?LOG_ERROR([{message, Msg}, {module, ?MODULE}]), {noreply, State}. ================================================ FILE: apps/arweave_config/src/arweave_config_sup.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @copyright 2025 (c) Arweave %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @doc Arweave Configuration Application Supervisor. %%% @end %%%=================================================================== -module(arweave_config_sup). -export([start_link/0]). -export([init/1]). %%-------------------------------------------------------------------- %% @doc %% @end %%-------------------------------------------------------------------- start_link() -> supervisor:start_link({local, ?MODULE}, ?MODULE, []). %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- init(_Args) -> {ok, {supervisor(), children()}}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- supervisor() -> #{ strategy => one_for_all }. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- children() -> [ #{ id => arweave_config, start => { arweave_config, start_link, [] } }, #{ id => arweave_config_environment, start => { arweave_config_environment, start_link, [] } }, #{ id => arweave_config_arguments, start => { arweave_config_arguments, start_link, [] } }, #{ id => arweave_config_file, start => { arweave_config_file, start_link, [] } }, #{ id => arweave_config_arguments_legacy, start => { arweave_config_arguments_legacy, start_link, [] } }, % @TODO at this time, this process/feature is not % stable enough to be started. % #{ % id => arweave_config_file_legacy, % start => { % arweave_config_file_legacy, % start_link, % [] % } % }, #{ id => arweave_config_store, start => { arweave_config_store, start_link, [] } }, #{ id => arweave_config_spec, start => { arweave_config_spec, start_link, [] } }, #{ id => arweave_config_legacy, start => { arweave_config_legacy, start_link, [] } }, #{ id => arweave_config_signal_handler, start => { arweave_config_signal_handler, start_link, [] } } ]. ================================================ FILE: apps/arweave_config/src/arweave_config_type.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @copyright 2025 (c) Arweave %%% @doc Arweave Configuration Type Definition. %%% @end %%%=================================================================== -module(arweave_config_type). -compile(warnings_as_errors). -export([ none/1, any/1, boolean/1, integer/1, pos_integer/1, ipv4/1, file/1, tcp_port/1, path/1, atom/1, string/1, base64/1, base64url/1, logging_template/1 ]). -include_lib("kernel/include/file.hrl"). %%-------------------------------------------------------------------- %% @doc always returns an error. %% @end %%-------------------------------------------------------------------- -spec none(V) -> {error, V}. none(V) -> {error, V}. %%-------------------------------------------------------------------- %% @doc always returns the value. %% @end %%-------------------------------------------------------------------- -spec any(V) -> {ok, V}. any(V) -> {ok, V}. %%-------------------------------------------------------------------- %% @doc check if the data is an atom and convert list/binary to %% existing atoms. %% @end %%-------------------------------------------------------------------- -spec atom(Input) -> Return when Input :: string() | binary() | atom(), Return :: {ok, atom()} | {error, Input}. atom(List) when is_list(List) -> try {ok, list_to_existing_atom(List)} catch _:_ -> {error, List} end; atom(Binary) when is_binary(Binary) -> try {ok, binary_to_existing_atom(Binary)} catch _:_ -> {error, Binary} end; atom(V) when is_atom(V) -> {ok, V}; atom(V) -> {error, V}. %%-------------------------------------------------------------------- %% @doc check booleans from binary, list, integer and atoms. When a %% string is used, a regexp is being used and ignore the case of the %% word. %% %% == Examples == %% %% ``` %% {ok, true} = boolean(true). %% {ok, true} = boolean(<<"true">>). %% {ok, true} = boolean("true"). %% {ok, true} = boolean("on"). %% {ok, true} = boolean(<<"TruE">>). %% ''' %% %% @end %%-------------------------------------------------------------------- -spec boolean(Input) -> Return when Input :: string() | binary() | boolean(), Return :: {ok, boolean()} | {error, Input}. boolean(true) -> {ok, true}; boolean(on) -> {ok, true}; boolean(false) -> {ok, false}; boolean(off) -> {ok, false}; boolean(String) when is_list(String); is_binary(String) -> Regexp = "^(?:(?false|off)|(?true|on))$", Opts = [extended, caseless, {capture, all_names, binary}], case re:run(String, Regexp, Opts) of {match, [<<>>, _True]} -> {ok, true}; {match, [_False, <<>>]} -> {ok, false}; _ -> {error, String} end; boolean(V) -> {error, V}. %%-------------------------------------------------------------------- %% @doc check integers. %% @end %%-------------------------------------------------------------------- -spec integer(Integer) -> Return when Integer :: list() | binary() | integer(), Return :: {ok, integer()} | {error, term()}. integer(List) when is_list(List) -> try integer(list_to_integer(List)) catch _:_ -> {error, List} end; integer(Binary) when is_binary(Binary) -> try integer(binary_to_integer(Binary)) catch _:_ -> {error, Binary} end; integer(Integer) when is_integer(Integer) -> {ok, Integer}; integer(V) -> {error, V}. %%-------------------------------------------------------------------- %% @doc check positive integers. %% @end %%-------------------------------------------------------------------- -spec pos_integer(Integer) -> Return when Integer :: list() | binary() | pos_integer(), Return :: {ok, pos_integer()} | {error, term()}. pos_integer(Data) -> case integer(Data) of {ok, Integer} when Integer >= 0 -> {ok, Integer}; _Else -> {error, Data} end. %%-------------------------------------------------------------------- %% @doc check ipv4 addresses. %% @end %%-------------------------------------------------------------------- -spec ipv4(IPv4) -> Return when IPv4 :: inet:ip4_address() | binary() | list(), Return :: {ok, list()} | {error, term()}. ipv4(Tuple = {_, _, _, _}) -> case inet:is_ipv4_address(Tuple) of true -> ipv4(inet:ntoa(Tuple)); false -> {error, Tuple} end; ipv4(Binary) when is_binary(Binary) -> ipv4(binary_to_list(Binary)); ipv4(List) when is_list(List) -> case inet:parse_strict_address(List, inet) of {ok, _} -> {ok, list_to_binary(List)}; _Elsewise -> {error, List} end; ipv4(Elsewise) -> {error, Elsewise}. %%-------------------------------------------------------------------- %% @doc Defines file type. %% @todo if an unix socket path length is > 108, it will fail, needs %% to be fixed. %% @end %%-------------------------------------------------------------------- -spec file(File) -> Return when File :: binary() | list(), Return :: {ok, binary()} | {error, term()}. file(List) when is_list(List) -> file(list_to_binary(List)); file(Binary) when is_binary(Binary) -> case filename:pathtype(Binary) of absolute -> file2(Binary); relative -> {ok, Cwd} = file:get_cwd(), Absolute = filename:join(Cwd, Binary), file2(Absolute) end; file(Path) -> type_error( file, <<"unsupported format">>, #{ path => Path } ). % check if the directory is present, arweave_config should not % be in charge of creating it. file2(Path) -> Split = filename:split(Path), [_Filename|Reverse] = lists:reverse(Split), Directory = filename:join(lists:reverse(Reverse)), case filelib:is_dir(Directory) of true -> file3(Path, Directory); false -> type_error( file, <<"directory not found">>, #{ path => Path, directory => Directory } ) end. % check if the directory has a read/write access. file3(Path, Directory) -> Split = filename:split(Path), [_Filename|_] = lists:reverse(Split), case file:read_file_info(Directory) of {ok, #file_info{access = read_write }} -> file4(Path); {error, Reason} -> type_error( file, Reason, #{ path => Path } ) end. % convert a list into path. It should not be the case there, but it's % to avoid having different type format. file4(Path) when is_list(Path) -> {ok, list_to_binary(Path)}; file4(Path) -> {ok, Path}. %%-------------------------------------------------------------------- %% @doc check tcp port. %% @end %%-------------------------------------------------------------------- -spec tcp_port(Port) -> Return when Port :: pos_integer(), Return :: {ok, pos_integer()} | {error, term()}. tcp_port(Binary) when is_binary(Binary) -> tcp_port(binary_to_integer(Binary)); tcp_port(List) when is_list(List) -> tcp_port(list_to_integer(List)); tcp_port(Integer) when is_integer(Integer) -> case Integer of _ when Integer >= 0, Integer =< 65535 -> {ok, Integer}; _ -> {error, Integer} end. %%-------------------------------------------------------------------- %% @doc check unix path. %% @end %%-------------------------------------------------------------------- path(List) when is_list(List) -> path(list_to_binary(List)); path(Binary) when is_binary(Binary) -> case filename:validate(Binary) of true -> path_relative(Binary); false -> {error, Binary} end. path_relative(Path) -> case filename:pathtype(Path) of relative -> {ok, filename:absname(Path)}; absolute -> {ok, Path} end. %%-------------------------------------------------------------------- %% @doc a string type. %% @todo to be defined correctly. %% @end %%-------------------------------------------------------------------- -spec string(String) -> Return when String :: list(), Return :: {ok, list()} | {error, term()}. string(String) -> string(String, String). string([], String) -> {ok, String}; string([H|T], String) when is_integer(H) -> string(T, String); string(_, String) -> {error, String}. %%-------------------------------------------------------------------- %% @doc check base64 type. %% @end %%-------------------------------------------------------------------- -spec base64(String) -> Return when String :: binary() | list(), Return :: {ok, binary()} | {error, term()}. base64(List) when is_list(List) -> base64(list_to_binary(List)); base64(Binary) -> try {ok, base64:decode(Binary)} catch _:_ -> {error, Binary} end. %%-------------------------------------------------------------------- %% @doc check base64url %% @end %%-------------------------------------------------------------------- -spec base64url(String) -> Return when String :: binary() | list(), Return :: {ok, binary()} | {error, term()}. base64url(List) when is_list(List) -> base64url(list_to_binary(List)); base64url(Binary) -> try {ok, b64fast:decode(Binary)} catch _:_ -> {error, Binary} end. %%-------------------------------------------------------------------- %% @doc Check, parse and convert a logging template from custom %% parser. %% %% The rules are strict, only tab and space as separator, only ASCII %% printable chars as word, only a limited list of chars for %% existing atoms (`[a-zA-Z_]'). An atom is a word starting with a %% null char and '%' symbol. All templates are terminated with "\n". %% %% @see logger_formatter:template/0 %% %% == Examples == %% %% ``` %% {ok, ["test", "\n"]} = logging_template("test"). %% {ok, [test, "\n"]} = logging_template("%test"). %% {ok, ["message:", msg, "\n"]} = logging_template("message: %msg"). %% ''' %% %% @end %%-------------------------------------------------------------------- -spec logging_template(String) -> Return when String :: binary() | list(), Return :: {ok, [atom()|list()]} | {error, term()}. logging_template(List) when is_list(List) -> logging_template_parse(list_to_binary(List)); logging_template(Binary) when is_binary(Binary) -> logging_template_parse(Binary). logging_template_parse(Binary) -> logging_template_tokenizer(Binary, []). logging_template_tokenizer(<<>>, Buffer) -> logging_template_parser(Buffer); logging_template_tokenizer(<>, Buffer) when Char =:= $ ; Char =:= $\t -> NewBuffer = [{null, Char}|Buffer], logging_template_tokenizer(Rest, NewBuffer); logging_template_tokenizer(<<$%, Rest/binary>>, Buffer) -> case logging_template_token_atom(Rest) of {ok, Atom, NewRest} -> NewBuffer = [{atom, Atom}|Buffer], logging_template_tokenizer(NewRest, NewBuffer); Else -> Else end; logging_template_tokenizer(Bin, Buffer) when is_binary(Bin) -> case logging_template_token_word(Bin) of {ok, Word, NewRest} -> logging_template_token_word(Bin), NewBuffer = [{word, Word}|Buffer], logging_template_tokenizer(NewRest, NewBuffer); Else -> Else end. logging_template_token_atom(Binary) -> logging_template_token_atom(Binary, <<>>). logging_template_token_atom(<<>>, Buffer) -> {ok, Buffer, <<>>}; logging_template_token_atom(Rest = <>, Buffer) when Char =:= $ ; Char =:= $\t -> {ok, Buffer, Rest}; logging_template_token_atom(<>, Buffer) when Char >= $a, Char =< $z; Char >= $A, Char =< $Z; Char >= $0, Char =< $9; Char =:= $_ -> logging_template_token_atom(Rest, <>); logging_template_token_atom(<>, _Buffer) -> {error, {atom, Char}}. logging_template_token_word(Binary) -> logging_template_token_word(Binary, <<>>). logging_template_token_word(<<>>, Buffer) -> {ok, Buffer, <<>>}; logging_template_token_word(Rest= <>, Buffer) when Char =:= $ ; Char =:= $\t -> {ok, Buffer, Rest}; logging_template_token_word(<>, Buffer) when Char >= 21, Char =< 126 -> logging_template_token_word(Rest, <>); logging_template_token_word(<>, _Buffer) -> {error, {word, Char}}. logging_template_parser(Tokens) -> logging_template_parser(Tokens, []). logging_template_parser([], Buffer) -> {ok, Buffer ++ ["\n"]}; logging_template_parser([{null, Null}|Rest], Buffer) -> NewBuffer = [[Null]|Buffer], logging_template_parser(Rest, NewBuffer); logging_template_parser([{atom, Atom}|Rest], Buffer) -> try Result = binary_to_existing_atom(Atom), NewBuffer = [Result|Buffer], logging_template_parser(Rest, NewBuffer) catch _:_ -> {error, {atom, Atom}} end; logging_template_parser([{word, Word}|Rest], Buffer) -> NewBuffer = [binary_to_list(Word)|Buffer], logging_template_parser(Rest, NewBuffer). %%-------------------------------------------------------------------- %% common format for all errors %%-------------------------------------------------------------------- type_error(Name, Reason, Data) -> {error, #{ status => error, message => #{ type => Name, reason => Reason, data => Data } } }. ================================================ FILE: apps/arweave_config/test/arweave_config_SUITE.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @copyright 2025 (c) Arweave %%% @doc %%% @end %%%=================================================================== -module(arweave_config_SUITE). -export([suite/0, description/0]). -export([init_per_suite/1, end_per_suite/1]). -export([init_per_testcase/2, end_per_testcase/2]). -export([all/0]). -export([arweave_config/1, arweave_config_legacy/1]). -include("arweave_config.hrl"). -include_lib("common_test/include/ct.hrl"). %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- suite() -> [{userdata, [description()]}]. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- description() -> {description, "arweave_config test main interface"}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- init_per_suite(Config) -> Config. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- end_per_suite(_Config) -> ok. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- init_per_testcase(_TestCase, Config) -> ct:pal(info, 1, "start arweave_config"), ok = arweave_config:start(), Config. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- end_per_testcase(_TestCase, _Config) -> ct:pal(info, 1, "stop arweave_config"), ok = arweave_config:stop(). %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- all() -> [ arweave_config, arweave_config_legacy ]. %%-------------------------------------------------------------------- %% @doc test `arweave_config' interface %% @end %%-------------------------------------------------------------------- arweave_config(_Config) -> ct:pal(test, 1, "get an existing parameter"), {ok, _} = arweave_config:get([debug]), ct:pal(test, 1, "get a missing parameter"), {error, _} = arweave_config:get([missing, parameter]), ct:pal(test, 1, "get an existing parameter with default value"), _ = arweave_config:get([debug], true), ct:pal(test, 1, "get a missing parameter with default value"), 1 = arweave_config:get([missing, parameter], 1), ct:pal(test, 1, "set an existing parameter"), {ok, DebugValue1, _} = arweave_config:set([debug], true), {ok, DebugValue1} = arweave_config:get([debug]), ct:pal(test, 1, "unset an existing parameter"), {ok, DebugValue2, _} = arweave_config:set([debug], false), {ok, DebugValue2} = arweave_config:get([debug]), ok. %%-------------------------------------------------------------------- %% @doc test `arweave_config' legacy interface. %% @end %%-------------------------------------------------------------------- arweave_config_legacy(_Config) -> % legacy compatible interface, to remove ct:pal(test, 1, "init legacy environment"), ok = arweave_config:set_env(#config{}), % legacy compatible interface, to remove ct:pal(test, 1, "get legacy environment"), {ok, Config1} = arweave_config:get_env(), false = Config1#config.init, % legacy compatible interface, to remove ct:pal(test, 1, "set legacy environment"), ok = arweave_config:set_env(#config{ init = true }), {ok, Config2} = arweave_config:get_env(), true = Config2#config.init, % check runtime mode false = arweave_config:is_runtime(), ct:pal(test, 1, "switch to runtime mode"), ok = arweave_config:runtime(), true = arweave_config:is_runtime(), {comment, "arweave_config interface tested"}. ================================================ FILE: apps/arweave_config/test/arweave_config_arguments_SUITE.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @copyright 2026 (c) Arweave %%% @doc arweave configuration arguments parser suite. %%% @end %%%=================================================================== -module(arweave_config_arguments_SUITE). -export([suite/0, description/0]). -export([init_per_suite/1, end_per_suite/1]). -export([init_per_testcase/2, end_per_testcase/2]). -export([all/0]). -export([ default/1, parser/1 ]). -include("arweave_config.hrl"). -include_lib("common_test/include/ct.hrl"). %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- suite() -> [{userdata, [description()]}]. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- description() -> {description, "arweave config cli arguments interface"}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- init_per_suite(Config) -> Config. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- end_per_suite(_Config) -> ok. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- init_per_testcase(_TestCase, _Config) -> ct:pal(info, 1, "start arweave_config"), ok = arweave_config:start(), []. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- end_per_testcase(_TestCase, _Config) -> ct:pal(info, 1, "stop arweave_config"), ok = arweave_config:stop(). %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- all() -> [ default, parser ]. %%-------------------------------------------------------------------- %% @doc test `arweave_config_arguments' main interface. %% @end %%-------------------------------------------------------------------- default(_Config) -> ct:pal(test, 1, "check if arweave_config_arguments is alive"), true = is_process_alive(whereis(arweave_config_arguments)), ct:pal(test, 1, "send an unsupported message to the process"), ok = gen_server:call(arweave_config_arguments, '@random_test', 1000), ok = gen_server:cast(arweave_config_arguments, '@random_test'), _ = erlang:send(arweave_config_arguments, '@random_test'), ct:pal(test, 1, "check the default configuration"), [] = arweave_config_arguments:get(), ct:pal(test, 1, "by default, no arguments are present"), [] = arweave_config_arguments_legacy:get_args(), ct:pal(test, 1, "set debug arguments"), {ok, _} = arweave_config_arguments:set([ "--debug" ]), ct:pal(test, 1, "raw arguments are returned"), ["--debug"] = arweave_config_arguments:get_args(), ct:pal(test, 1, "the configuration has been set"), [{#{ parameter_key := [debug] }, [true]}] = arweave_config_arguments:get(), ct:pal(test, 1, "load into arweave_config"), ok = arweave_config_arguments:load(), ct:pal(test, 1, "check arweave_config result"), {ok, true} = arweave_config:get([debug]), {comment, "arguments process tested"}. %%-------------------------------------------------------------------- %% @doc test `arweave_config_arguments' parser. %% @end %%-------------------------------------------------------------------- parser(_Config) -> % Create a custom long arguments map LongArguments = #{ <<"--boolean">> => #{ type => boolean }, <<"--integer">> => #{ type => integer }, <<"--integer.pos">> => #{ type => pos_integer } }, % create a custom short argmuents map ShortArguments = #{ $b => #{ type => boolean }, $i => #{ type => integer }, $I => #{ type => pos_integer } }, Arguments = [ % long arguments <<"--boolean">>, <<"--boolean">>, <<"true">>, <<"--boolean">>, <<"false">>, <<"--boolean">>, <<"True">>, <<"--boolean">>, <<"TRUE">>, <<"--boolean">>, <<"FALSE">>, <<"--integer">>, <<"-65535">>, <<"--integer">>, <<"-0">>, <<"--integer">>, <<"-65535">>, <<"--integer.pos">>, <<"0">>, <<"--integer.pos">>, <<"65535">>, % short arguments <<"-b">>, <<"-b">>, <<"true">>, <<"-b">>, <<"on">>, <<"-b">>, <<"off">>, <<"-b">>, <<"false">>, <<"-i">>, <<"65535">>, <<"-i">>, <<"0">>, <<"-i">>, <<"-65535">>, <<"-I">>, <<"0">>, <<"-I">>, <<"65535">> ], % --peer 127.0.0.1 --vdf --trusted % --storage.module 1.unpacked --enabled Opts = #{ long_arguments => LongArguments, short_arguments => ShortArguments }, ct:pal(test, 1, "parse ~p", [Arguments]), Result = [ {#{type => boolean},[true]}, {#{type => boolean},[true]}, {#{type => boolean},[false]}, {#{type => boolean},[true]}, {#{type => boolean},[true]}, {#{type => boolean},[false]}, {#{type => integer},[-65535]}, {#{type => integer},[0]}, {#{type => integer},[-65535]}, {#{type => pos_integer},[0]}, {#{type => pos_integer},[65535]}, {#{type => boolean},[true]}, {#{type => boolean},[true]}, {#{type => boolean},[true]}, {#{type => boolean},[false]}, {#{type => boolean},[false]}, {#{type => integer},[65535]}, {#{type => integer},[0]}, {#{type => integer},[-65535]}, {#{type => pos_integer},[0]}, {#{type => pos_integer},[65535]} ], {ok, Result} = arweave_config_arguments:parse(Arguments, Opts), ct:pal(test, 1, "check bad arguments"), {error, #{ reason := <<"bad_argument">> }} = arweave_config_arguments:parse([<<"---bad-arg">>]), {error, #{ reason := <<"bad_argument">> }} = arweave_config_arguments:parse([<<"----bad-arg">>]), ct:pal(test, 1, "check unknown argument"), {error, #{ reason := <<"unknown argument">> }} = arweave_config_arguments:parse([<<"--unknown">>]), ct:pal(test, 1, "check missing value"), {error, #{ reason := <<"missing value">> }} = arweave_config_arguments:parse([<<"--data.directory">>]), {comment, "arguments parser tested"}. ================================================ FILE: apps/arweave_config/test/arweave_config_arguments_legacy_SUITE.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @copyright 2026 (c) Arweave %%% @doc arweave configuration legacy parser test suite. %%% @end %%%=================================================================== -module(arweave_config_arguments_legacy_SUITE). -export([suite/0, description/0]). -export([init_per_suite/1, end_per_suite/1]). -export([init_per_testcase/2, end_per_testcase/2]). -export([all/0]). -export([ default/1, parser/1 ]). -include("arweave_config.hrl"). -include_lib("common_test/include/ct.hrl"). %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- suite() -> [{userdata, [description()]}]. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- description() -> {description, "arweave config cli arguments interface"}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- init_per_suite(Config) -> Config. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- end_per_suite(_Config) -> ok. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- init_per_testcase(_TestCase, _Config) -> ct:pal(info, 1, "start arweave_config"), ok = arweave_config:start(), []. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- end_per_testcase(_TestCase, _Config) -> ct:pal(info, 1, "stop arweave_config"), ok = arweave_config:stop(). %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- all() -> [ default, parser ]. %%-------------------------------------------------------------------- %% @doc test `arweave_config_arguments_legacy' main interface. %% @end %%-------------------------------------------------------------------- default(_Config) -> ct:pal(test, 1, "check if arweave_config_arguments is alive"), true = is_process_alive(whereis(arweave_config_arguments_legacy)), ct:pal(test, 1, "send an unsupported message to the process"), ok = gen_server:call(arweave_config_arguments_legacy, '@random_test', 1000), ok = gen_server:cast(arweave_config_arguments_legacy, '@random_test'), _ = erlang:send(arweave_config_arguments_legacy, '@random_test'), ct:pal(test, 1, "check the default configuration"), #config{} = arweave_config_arguments_legacy:get(), ct:pal(test, 1, "by default, no arguments are present"), [] = arweave_config_arguments_legacy:get_args(), ct:pal(test, 1, "set debug and init arguments"), {ok, _} = arweave_config_arguments_legacy:set([ "debug", "init" ]), ct:pal(test, 1, "raw arguments are returned"), ["debug", "init"] = arweave_config_arguments_legacy:get_args(), ct:pal(test, 1, "the configuration has been set"), #config{ debug = true, init = true } = arweave_config_arguments_legacy:get(), ct:pal(test, 1, "load into arweave_config_legacy"), {ok, _} = arweave_config_arguments_legacy:load(), ct:pal(test, 1, "check arweave_config_legacy result"), #config{ debug = true, init = true } = arweave_config_legacy:get(), ct:pal(test, 1, "check arweave_config result"), {ok, true} = arweave_config:get([debug]), {comment, "arguments process tested"}. %%-------------------------------------------------------------------- %% @doc test `arweave_config_arguments_legacy' parser interface. %% @end %%-------------------------------------------------------------------- parser(_Config) -> ct:pal(test, 1, "check parser"), {ok, _} = arweave_config_arguments_legacy:parse(["debug"]), {error, _} = arweave_config_arguments_legacy:parse(["wrong_arg"]), {comment, "arguments parser tested"}. ================================================ FILE: apps/arweave_config/test/arweave_config_bootstrap_SUITE.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @copyright 2026 (c) Arweave %%% @doc arweave configuration legacy parser test suite. %%% @end %%%=================================================================== -module(arweave_config_bootstrap_SUITE). -export([suite/0, description/0]). -export([init_per_suite/1, end_per_suite/1]). -export([init_per_testcase/2, end_per_testcase/2]). -export([all/0]). -export([ legacy_mode/1, new_mode/1 ]). -include("arweave_config.hrl"). -include_lib("common_test/include/ct.hrl"). %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- suite() -> [{userdata, [description()]}]. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- description() -> {description, "arweave config parameters bootstrap"}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- init_per_suite(Config) -> Config. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- end_per_suite(_Config) -> ok. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- init_per_testcase(new_mode, _Config) -> ct:pal(info, 1, "start arweave_config"), ok = arweave_config:start(), ct:pal(test, 1, "set AR_CONFIG_MODE environment"), os:putenv("AR_CONFIG_MODE", "new"), []; init_per_testcase(_TestCase, _Config) -> ct:pal(info, 1, "start arweave_config"), ok = arweave_config:start(), ct:pal(test, 1, "ensure ARWEAVE_CONFIG_MODE is unset"), os:putenv("AR_CONFIG_MODE", ""), []. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- end_per_testcase(new_mode, _Config) -> ct:pal(info, 1, "stop arweave_config"), ok = arweave_config:stop(), ct:pal(test, 1, "reset AR_CONFIG_MODE environment"), os:putenv("AR_CONFIG_MODE", ""); end_per_testcase(_TestCase, _Config) -> ct:pal(info, 1, "stop arweave_config"), ok = arweave_config:stop(). %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- all() -> [ legacy_mode, new_mode ]. %%-------------------------------------------------------------------- %% @doc complete legacy mode test. %% @end %%-------------------------------------------------------------------- legacy_mode(_Config) -> ct:pal(test, 1, "legacy arguments must succeed"), {ok, Valid} = arweave_config_bootstrap:start(["init"]), % true = arweave_config:get([]), #config{ init = true } = Valid, {comment, ""}. %%-------------------------------------------------------------------- %% @doc complete new mode test. %% @end %%-------------------------------------------------------------------- new_mode(_Config) -> ct:pal(test, 1, "new arguments must succeed (debug true)"), {ok, Valid1} = arweave_config_bootstrap:start(["--debug"]), {ok, true} = arweave_config:get([debug]), #config{ debug = true } = Valid1, ct:pal(test, 1, "new arguments must succeed (debug false)"), {ok, Valid2} = arweave_config_bootstrap:start(["--debug", "false"]), {ok, false} = arweave_config:get([debug]), #config{ debug = false } = Valid2, ct:pal(test, 1, "legacy arguments must fail"), {error, _Invalid} = arweave_config_bootstrap:start(["debug"]), {comment, ""}. ================================================ FILE: apps/arweave_config/test/arweave_config_environment_SUITE.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @copyright 2025 (c) Arweave %%% @doc %%% @end %%%=================================================================== -module(arweave_config_environment_SUITE). -export([suite/0, description/0]). -export([init_per_suite/1, end_per_suite/1]). -export([init_per_testcase/2, end_per_testcase/2]). -export([all/0]). -export([default/1]). -include("arweave_config.hrl"). -include_lib("common_test/include/ct.hrl"). %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- suite() -> [{userdata, [description()]}]. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- description() -> {description, "arweave config environment interface"}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- init_per_suite(Config) -> Config. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- end_per_suite(_Config) -> ok. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- init_per_testcase(_TestCase, Config) -> set_environment(), ct:pal(info, 1, "start arweave_config"), ok = arweave_config:start(), [{environment, environment()}|Config]. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- end_per_testcase(_TestCase, _Config) -> ct:pal(info, 1, "stop arweave_config"), ok = arweave_config:stop(), unset_environment(). %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- all() -> [ default ]. %%-------------------------------------------------------------------- %% @doc test `arweave_config_environment' main interface. %% @end %%-------------------------------------------------------------------- default(Config) -> Environment = proplists:get_value(environment, Config), ct:pal(test, 1, "ensure arweave_config_environment is started"), true = is_process_alive(whereis(arweave_config_environment)), ct:pal(test, 1, "send an unsupported message to the process"), ok = gen_server:call(arweave_config_environment, '@random_test', 1000), ok = gen_server:cast(arweave_config_environment, '@random_test'), _ = erlang:send(arweave_config_environment, '@random_test'), ct:pal(test, 1, "reset arweave_config_environment"), arweave_config_environment:reset(), ct:pal(test, 1, "retrieve environment from arweave_config_environment"), ArweaveEnvironment = arweave_config_environment:get(), ct:pal(test, 1, "check if variables have been configured"), [ begin ct:pal(test, 1, "found: ~p", [{K,V}]), BK = list_to_binary(K), VE = proplists:get_value(BK, ArweaveEnvironment), ct:pal(test, 1, "~p", [{BK,VE}]), VE = list_to_binary(V) end || {K, V} <- Environment ], ct:pal(test, 1, "check all variables one by one"), [ begin ct:pal(test, 1, "check: ~p", [{K,V}]), BK = list_to_binary(K), {ok, VE} = arweave_config_environment:get(BK), VE = list_to_binary(V) end || {K, V} <- Environment ], ct:pal(test, 1, "check current value of debug parameter"), {ok, false} = arweave_config:get([debug]), #config{ debug = false } = arweave_config_legacy:get(), % load the environment, it will lookup in the environment list % and set the value, in our case, AR_DEBUG should be % configured and set to true instead of false. ct:pal(test, 1, "load the environment"), ok = arweave_config_environment:load(), ct:pal(test, 1, "check [debug] parameter."), {ok, true} = arweave_config:get([debug]), ct:pal(test, 1, "check [debug] parameter (legacy)."), #config{ debug = true } = arweave_config_legacy:get(), ct:pal(test, 1, "check unconfigured environment variable"), {error, not_found} = arweave_config_environment:get(<<"UNKNOWN_VARIABLE">>), {comment, "environment feature tested"}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- environment() -> [ {"AR_TEST_ENVIRONMENT_VARIABLE", "test"}, {"AR_DEBUG", "true"} ]. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- set_environment() -> [ begin ct:pal(test, 1, "prepare: set ~p=~p",[K,V]), os:putenv(K,V), V = os:getenv(K) end || {K,V} <- environment() ]. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- unset_environment() -> [ begin ct:pal(test, 1, "cleanup: unset ~p", [K]), os:unsetenv(K) end || {K,_} <- environment() ]. ================================================ FILE: apps/arweave_config/test/arweave_config_file_SUITE.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @copyright 2026 (c) Arweave %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @doc Arweave Config File Support Test Suite. %%% @end %%%=================================================================== -module(arweave_config_file_SUITE). -export([suite/0, description/0]). -export([init_per_suite/1, end_per_suite/1]). -export([init_per_testcase/2, end_per_testcase/2]). -export([all/0]). -export([ default/1, load/1, json/1, yaml/1, toml/1, legacy/1 ]). -include("arweave_config.hrl"). -include_lib("common_test/include/ct.hrl"). %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- suite() -> [{userdata, [description()]}]. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- description() -> {description, ""}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- init_per_suite(Config) -> Config. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- end_per_suite(_Config) -> ok. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- init_per_testcase(_TestCase, Config) -> ct:pal(info, 1, "start arweave_config"), ok = arweave_config:start(), Config. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- end_per_testcase(_TestCase, _Config) -> ct:pal(info, 1, "stop arweave_config"), ok = arweave_config:stop(). %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- all() -> [ default, load, json, toml, yaml, legacy ]. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- default(Config) -> DataDir = proplists:get_value(data_dir, Config), ct:pal(test, 1, "Check post-initialization value"), #{} = arweave_config_file:get(), [] = arweave_config_file:get_paths(), ct:pal(test, 1, "Check presence of non loaded file"), {error, not_found} = arweave_config_file:get_by_path("/tmp2/not_present.json"), ct:pal(test, 1, "check unsupported file"), {error, _} = arweave_config_file:add( filename:join(DataDir, "config_unsupported.xml") ), {ok, JsonPath} = valid_format_test([ {format, "json"}, {filename, "config_valid.json"} |Config]), {ok, TomlPath} = valid_format_test([ {format, "toml"}, {filename, "config_valid.toml"} |Config]), {ok, YamlPath} = valid_format_test([ {format, "yaml"}, {filename, "config_valid.yaml"} |Config]), % @todo legacy % LegacyPath = filename:join(DataDir, "config_valid.ljson"), % format_test([ % {format, "legacy"}, % {filename, "config_valid.ljson"}, % {path, LegacyPath} % |Config]), ct:pal(test, 1, "merge toml, json and yaml files together"), {ok, _} = arweave_config_file:add(JsonPath), {ok, _} = arweave_config_file:add(TomlPath), {ok, _} = arweave_config_file:add(YamlPath), ct:pal(test, 1, "check merged configuration"), _ = arweave_config_file:get(), % it should return the list of files loaded in a list, sorted % in alphabetical order. ct:pal(test, 1, "check if the paths have been added"), MergedPaths = arweave_config_file:get_paths(), true = search_path(JsonPath, MergedPaths), true = search_path(TomlPath, MergedPaths), true = search_path(YamlPath, MergedPaths), % finally, reset the configuration ct:pal(test, 1, "reset arweave_config_file state"), ok = arweave_config_file:reset(), {error, _InvalidJsonPath} = invalid_format_test([ {format, "json"}, {filename, "config_invalid.json"} |Config]), {error, _InvalidYamlPath} = invalid_format_test([ {format, "yaml"}, {filename, "config_invalid.yaml"} |Config]), {error, _InvalidTomlPath} = invalid_format_test([ {format, "toml"}, {filename, "config_invalid.toml"} |Config]), ok = arweave_config_file:load(), % check unsupported call, the process should not crash. ok = erlang:send(arweave_config_file, ok), ok = gen_server:cast(arweave_config_file, ok), ok = gen_server:call(arweave_config_file, unsupported, 1000), {comment, "tested arweave config file worker"}. %%-------------------------------------------------------------------- %% @hidden %% @doc test valid common pattern for all format. %% @end %%-------------------------------------------------------------------- valid_format_test(Config) -> DataDir = proplists:get_value(data_dir, Config), Format = proplists:get_value(format, Config), Filename = proplists:get_value(filename, Config), Path = filename:join(DataDir, Filename), ct:pal(test, 1, "~p: Add ~p file", [Format, Filename]), {ok, _} = arweave_config_file:add(Path), ct:pal(test, 1, "~p: check if the file has been added", [Format]), Paths = arweave_config_file:get_paths(), ct:pal(test, 1, "~p", [Paths]), true = search_path(Path, Paths), ct:pal(test, 1, "~p: check if the file has been parsed", [Format]), Merged = arweave_config_file:get(), true = 0 < map_size(Merged), ct:pal(test, 1, "~p: retrieve the configuration (~p)", [Format, Path]), {ok, {_Timestamp, _Config}} = arweave_config_file:get_by_path(Path), ct:pal(test, 1, "~p: load ~p", [Format, Path]), ok = arweave_config_file:load(Path), {ok, true} = arweave_config:get([debug]), ct:pal(test, 1, "~p: load merged configuration", [Format]), ok = arweave_config_file:load(), {ok, true} = arweave_config:get([debug]), ct:pal(test, 1, "reset arweave_config_file state"), ok = arweave_config_file:reset(), % return the full path {ok, Path}. %%-------------------------------------------------------------------- %% @hidden %% @doc test invalid common pattern for all format. %% @end %%-------------------------------------------------------------------- invalid_format_test(Config) -> DataDir = proplists:get_value(data_dir, Config), Format = proplists:get_value(format, Config), Filename = proplists:get_value(filename, Config), Path = filename:join(DataDir, Filename), ct:pal(test, 1, "~p: load invalid file ~p (~p)", [Format, Filename, Path]), {error, _} = arweave_config_file:add(Path), ct:pal(test, 1, "~p: ensure the file ~p was not loaded", [Format, Filename]), Paths = arweave_config_file:get_paths(), false = search_path(Path, Paths), ct:pal(test, 1, "reset arweave_config_file state"), ok = arweave_config_file:reset(), {error, Path}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- load(_Config) -> ok. %%-------------------------------------------------------------------- %% @hidden %% @doc test json file support. %% @end %%-------------------------------------------------------------------- json(Config) -> {error, _} = file_bad_name([{extension, ".jsn"}|Config]), file_checks([{extension, ".json"}|Config]), {comment, "json file format tested"}. %%-------------------------------------------------------------------- %% @hidden %% @doc test toml file support. %% @end %%-------------------------------------------------------------------- toml(Config) -> {error, _} = file_bad_name([{extension, ".tml"}|Config]), file_checks([{extension, ".toml"}|Config]), {comment, "toml file format tested"}. %%-------------------------------------------------------------------- %% @hidden %% @doc test yaml file support. %% @end %%-------------------------------------------------------------------- yaml(Config) -> {error, _} = file_bad_name([{extension, ".yml"}|Config]), file_checks([{extension, ".yaml"}|Config]), {comment, "yaml file format tested"}. %%-------------------------------------------------------------------- %% @hidden %% @doc test legacy file support. %% @end %%-------------------------------------------------------------------- legacy(Config) -> {error, _} = file_bad_name([{extension, ".lson"}|Config]), file_checks([{extension, ".ljson"}|Config]), {comment, "legacy file format tested"}. %%-------------------------------------------------------------------- %% @hidden %% @doc check all common file test. %% @end %%-------------------------------------------------------------------- file_checks(Config) -> {error, _} = file_bad_format(Config), {ok, _} = file_empty(Config), {error, _} = file_norights(Config), {ok, _} = file_read(Config), {ok, _} = file_readwrite(Config), {error, _} = file_unsafe_path(Config), {ok, _} = file_relative_path(Config), ok. %%-------------------------------------------------------------------- %% @hidden %% @doc create a regular file containing bad data. %% @end %%-------------------------------------------------------------------- file_bad_format(Config) -> Module = proplists:get_value(module, Config), PrivDir = proplists:get_value(priv_dir, Config), Extension = proplists:get_value(extension, Config), Filename = string:join(["bad_format", Extension], ""), Data = proplists:get_value(data, Config, "test::data::bad"), Path = filename:join(PrivDir, Filename), ct:pal(test, 1, "create file ~p", [Path]), file:write_file(Path, Data), arweave_config_file:parse(Path). %%-------------------------------------------------------------------- %% @hidden %% @doc creates a regular file containing a bad name. %% @end %%-------------------------------------------------------------------- file_bad_name(Config) -> Module = proplists:get_value(module, Config), PrivDir = proplists:get_value(priv_dir, Config), Extension = proplists:get_value(extension, Config), Filename = string:join(["bad_name", Extension], ""), Path = filename:join(PrivDir, Filename), ct:pal(test, 1, "create file ~p", [Path]), file:write_file(Path, ""), arweave_config_file:parse(Path). %%-------------------------------------------------------------------- %% @hidden %% @doc creates an empty file. %% @end %%-------------------------------------------------------------------- file_empty(Config) -> Module = proplists:get_value(module, Config), PrivDir = proplists:get_value(priv_dir, Config), Extension = proplists:get_value(extension, Config), Filename = string:join(["empty", Extension], ""), Path = filename:join(PrivDir, Filename), ct:pal(test, 1, "create file ~p", [Path]), file:write_file(Path, ""), arweave_config_file:parse(Path). %%-------------------------------------------------------------------- %% @hidden %% @doc creates an empty file with no rights. %% @end %%-------------------------------------------------------------------- file_norights(Config) -> Module = proplists:get_value(module, Config), PrivDir = proplists:get_value(priv_dir, Config), Extension = proplists:get_value(extension, Config), Filename = string:join(["norights", Extension], ""), Path = filename:join(PrivDir, Filename), ct:pal(test, 1, "create file ~p", [Path]), file:write_file(Path, ""), file:change_mode(Path, 8#000), Return = arweave_config_file:parse(Path), file:change_mode(Path, 8#600), Return. %%-------------------------------------------------------------------- %% @hidden %% @doc creates an empty file in read-only. %% @end %%-------------------------------------------------------------------- file_read(Config) -> Module = proplists:get_value(module, Config), PrivDir = proplists:get_value(priv_dir, Config), Extension = proplists:get_value(extension, Config), Filename = string:join(["read", Extension], ""), Path = filename:join(PrivDir, Filename), ct:pal(test, 1, "create file ~p", [Path]), file:write_file(Path, ""), file:change_mode(Path, 8#400), arweave_config_file:parse(Path). %%-------------------------------------------------------------------- %% @hidden %% @doc creates an empty file in read/write. %% @end %%-------------------------------------------------------------------- file_readwrite(Config) -> Module = proplists:get_value(module, Config), PrivDir = proplists:get_value(priv_dir, Config), Extension = proplists:get_value(extension, Config), Filename = string:join(["readwrite", Extension], ""), Path = filename:join(PrivDir, Filename), ct:pal(test, 1, "create file ~p", [Path]), file:write_file(Path, ""), file:change_mode(Path, 8#600), arweave_config_file:parse(Path). %%-------------------------------------------------------------------- %% @hidden %% @doc check unsafe page (containing "../"). %% @end %%-------------------------------------------------------------------- file_unsafe_path(Config) -> Module = proplists:get_value(module, Config), PrivDir = proplists:get_value(priv_dir, Config), Extension = proplists:get_value(extension, Config), Filename = string:join(["unsafe_path", Extension], ""), Path = filename:join(["../..", Filename]), ct:pal(test, 1, "check unsafe path ~p", [Path]), arweave_config_file:parse(Path). %%-------------------------------------------------------------------- %% @hidden %% @doc check safe relative path. %% @end %%-------------------------------------------------------------------- file_relative_path(Config) -> Module = proplists:get_value(module, Config), PrivDir = proplists:get_value(priv_dir, Config), Extension = proplists:get_value(extension, Config), Filename = string:join(["relative_path", Extension], ""), {ok, Cwd} = file:get_cwd(), Relative = case PrivDir -- Cwd of [$/|R] -> R; [$.,$/|R] -> R; E -> E end, Path = filename:join(["./", Relative, Filename]), ct:pal(test, 1, "check relative path ~p", [Path]), file:write_file(Path, ""), file:change_mode(Path, 8#644), arweave_config_file:parse(Path). %%-------------------------------------------------------------------- %% @hidden %% @doc search a term in a list. %% @see lists:search/2 %% @end %%-------------------------------------------------------------------- search_path(Path, List) when is_list(Path) -> search_path(list_to_binary(Path), List); search_path(Path, List) -> Fun = fun (P) when P =:= Path -> true; (_) -> false end, case lists:search(Fun, List) of {value, _} -> true; _ -> false end. ================================================ FILE: apps/arweave_config/test/arweave_config_file_SUITE_data/config_invalid.json ================================================ -- ================================================ FILE: apps/arweave_config/test/arweave_config_file_SUITE_data/config_invalid.toml ================================================ ~~[] ================================================ FILE: apps/arweave_config/test/arweave_config_file_SUITE_data/config_invalid.yaml ================================================ --[ ================================================ FILE: apps/arweave_config/test/arweave_config_file_SUITE_data/config_unsupported.xml ================================================ ================================================ FILE: apps/arweave_config/test/arweave_config_file_SUITE_data/config_valid.json ================================================ { "debug": true, "data": { "directory": "/tmp/data" } } ================================================ FILE: apps/arweave_config/test/arweave_config_file_SUITE_data/config_valid.toml ================================================ # global parameters debug = true data.directory = "/tmp/data" # local http api server [config.http.api] enabled = true listen.port = 4891 listen.address = "127.0.0.1" # default logging parameter, inherited by other logging parameters. [logging] max_no_bytes = 51418800 compress_on_rotate = false sync_mode_qlen = 10 drop_mode_qlen = 200 flush_qlen = 1000 burst_limit_enable = true burst_limit_max_count = 500 burst_limit_window_time = 1000 overload_kill_enable = true overload_kill_qlen = 20_000 overload_kill_mem_size = 3_000_000 # debug logging handlers parameters [logging.handlers.debug] # http api logging handlers parameters [logging.handlers.http.api] ###################################################################### # draft: features ###################################################################### # [features.feature_1] # enabled = true # # [features.feature_2] # enabled = false ###################################################################### # draft: network ###################################################################### ###################################################################### # draft: rate limiter ###################################################################### ###################################################################### # draft: peers # Could it be helpful to create some helpers to group easily vdf or # trusted peers? # peers.vdf = [...] # peers.trusted = [...] ###################################################################### # [peers.default] # ... # # [peers."127.0.0.1:1984"] # vdf = true # ... # # [peers."127.0.0.2:1984"] # trusted = true # ... # # [peers."mypeers.arweave.xyz"] # ... # ###################################################################### # draft: storage modules # format: [storage.{type}.{index}] ###################################################################### # [storage.default] # address = "LKC84RnISouGUw4uMQGCpPS9yDC-tIoqM2UVbUIt-Sw" # # [storage.unpacked.0] # enabled = true # path = ... # size = ... # # [storage.spora_2_6.10] # enabled = true # address = "LKC84RnISouGUw4uMQGCpPS9yDC-tIoqM2UVbUIt-Sw" # # [storage.replica_2_9.0] # enabled = true # address = address_value # repack_in_place = true # # [storage.s # ================================================ FILE: apps/arweave_config/test/arweave_config_file_SUITE_data/config_valid.yaml ================================================ # global parameters debug: true data: directory: "/tmp/data" # local http api server config: http: api: enabled: true listen: port: 4891 address: "127.0.0.1" # default logging parameter, inherited by other logging parameters. logging: max_no_bytes: 51418800 compress_on_rotate: false sync_mode_qlen: 10 drop_mode_qlen: 200 flush_qlen: 1000 burst_limit_enable: true burst_limit_max_count: 500 burst_limit_window_time: 1000 overload_kill_enable: true overload_kill_qlen: 20000 overload_kill_mem_size: 3000000 ================================================ FILE: apps/arweave_config/test/arweave_config_format_SUITE.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @copyright 2026 (c) Arweave %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @doc Arweave Config Format Test Suite. %%% @end %%%=================================================================== -module(arweave_config_format_SUITE). -export([suite/0, description/0]). -export([init_per_suite/1, end_per_suite/1]). -export([init_per_testcase/2, end_per_testcase/2]). -export([all/0]). -export([ json/1, toml/1, yaml/1, legacy/1 ]). -include("arweave_config.hrl"). -include_lib("common_test/include/ct.hrl"). %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- suite() -> [{userdata, [description()]}]. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- description() -> {description, "arweave_config format"}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- init_per_suite(Config) -> Config. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- end_per_suite(_Config) -> ok. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- init_per_testcase(_TestCase, Config) -> ct:pal(info, 1, "start arweave_config"), ok = arweave_config:start(), Config. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- end_per_testcase(_TestCase, _Config) -> ct:pal(info, 1, "stop arweave_config"), ok = arweave_config:stop(). %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- all() -> [ json, toml, yaml, legacy ]. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- json(_Config) -> {ok, #{}} = arweave_config_format_json:parse(""), {ok, #{}} = arweave_config_format_json:parse(<<"">>), {ok, #{}} = arweave_config_format_json:parse(<<"{}">>), {ok, #{}} = arweave_config_format_json:parse("{}"), {error, _} = arweave_config_format_json:parse("--"), {comment, "tested json format"}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- toml(_Config) -> {ok, #{}} = arweave_config_format_toml:parse(""), {ok, #{}} = arweave_config_format_toml:parse(<<"">>), {ok, #{}} = arweave_config_format_toml:parse(<<"test = 1">>), {ok, #{}} = arweave_config_format_toml:parse("test = 1"), {error, _} = arweave_config_format_toml:parse("--[]"), {comment, "tested toml format"}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- yaml(_Config) -> {ok, #{}} = arweave_config_format_yaml:parse(""), {ok, #{}} = arweave_config_format_yaml:parse(<<"">>), {ok, _} = arweave_config_format_yaml:parse(<<"test: 1\n">>), {ok, _} = arweave_config_format_yaml:parse("test: 1\n"), {error, _} = arweave_config_format_yaml:parse("--[]"), {error, _} = arweave_config_format_yaml:parse(<<"---\n---\n">>), {comment, "tested yaml format"}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- legacy(_Config) -> {ok, _} = arweave_config_format_legacy:parse(""), {ok, _} = arweave_config_format_legacy:parse(<<"">>), {ok, _} = arweave_config_format_legacy:parse(<<"{}">>), {ok, _} = arweave_config_format_legacy:parse("{}"), {error, _} = arweave_config_format_legacy:parse("--"), {comment, "tested legacy format"}. ================================================ FILE: apps/arweave_config/test/arweave_config_fsm_SUITE.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @copyright 2025 (c) Arweave %%% @doc Arweave Configuration Finite State Machine Test Suite. %%% @end %%%=================================================================== -module(arweave_config_fsm_SUITE). -compile(warnings_as_errors). -export([suite/0, description/0]). -export([init_per_suite/1, end_per_suite/1]). -export([init_per_testcase/2, end_per_testcase/2]). -export([all/0]). -export([default/1]). -export([ fsm_callback_ok/1, fsm_callback_ok_state/1, fsm_callback_function_transition_state/1, fsm_callback_module_transition_state/1, fsm_callback_error/1, fsm_callback_error_state/1, fsm_callback_error_wildcard/1, fsm_callback_meta/1, fsm_callback_ok_meta/1, fsm_callback_ok_meta_state/1 ]). -include("arweave_config.hrl"). -include_lib("common_test/include/ct.hrl"). %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- suite() -> [{userdata, [description()]}]. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- description() -> {description, "arweave_config_fsm test suite"}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- init_per_suite(Config) -> Config. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- end_per_suite(_Config) -> ok. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- init_per_testcase(_TestCase, Config) -> Config. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- end_per_testcase(_TestCase, _Config) -> ok. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- all() -> [ default ]. %%-------------------------------------------------------------------- %% @doc %% @end %%-------------------------------------------------------------------- default(_Config) -> ct:pal(test, 1, "set debug"), logger:set_module_level(arweave_config_fsm, debug), ct:pal(test, 1, "check return without state"), {ok, value} = arweave_config_fsm:init( ?MODULE, fsm_callback_ok, state ), ct:pal(test, 1, "check return with state"), {ok, value, state} = arweave_config_fsm:init( ?MODULE, fsm_callback_ok_state, state ), ct:pal(test, 1, "check function transition with state"), {ok, value, #{ state := test, return := ok }} = arweave_config_fsm:init( ?MODULE, fsm_callback_function_transition_state, #{ state => test } ), ct:pal(test, 1, "check module/function transition with state"), {ok, value, #{ state := test, return := ok }} = arweave_config_fsm:init( ?MODULE, fsm_callback_module_transition_state, #{ state => test } ), ct:pal(test, 1, "check error without state"), {error, _} = arweave_config_fsm:init( ?MODULE, fsm_callback_error, #{ state => test } ), ct:pal(test, 1, "check error with state"), {error, _, #{ state := test }} = arweave_config_fsm:init( ?MODULE, fsm_callback_error_state, #{ state => test } ), ct:pal(test, 1, "check evaluation error"), {error, _} = arweave_config_fsm:init( ?MODULE, fsm_callback_error_wildcard, #{ state => test } ), ct:pal(test, 1, "check evaluation error"), {error, _} = arweave_config_fsm:init( "not_module", <<"bad_callback">>, #{ state => test } ), ct:pal(test, 1, "return metadata"), {meta, Meta1} = arweave_config_fsm:init( ?MODULE, fsm_callback_meta, #{ meta => true }, state ), #{ counter := 1 } = Meta1, #{ history := _} = Meta1, #{ meta := true } = Meta1, ct:pal(test, 1, "return metadata with value"), {ok, value, Meta2} = arweave_config_fsm:init( ?MODULE, fsm_callback_ok_meta, #{ meta => true }, state ), #{ counter := 1 } = Meta2, #{ history := _} = Meta2, #{ meta := true } = Meta2, ct:pal(test, 1, "return metadata with value and state"), {ok, value, State3, Meta3} = arweave_config_fsm:init( ?MODULE, fsm_callback_ok_meta_state, #{ meta => true }, state ), state = State3, #{ counter := 1 } = Meta3, #{ history := _} = Meta3, #{ meta := true } = Meta3, ct:pal(test, 1, "unset debug"), logger:set_module_level(arweave_config_fsm, none), {comment, "tested"}. %%-------------------------------------------------------------------- %% @doc %% @end %%-------------------------------------------------------------------- fsm_callback_ok(_State) -> {ok, value}. %%-------------------------------------------------------------------- %% @doc %% @end %%-------------------------------------------------------------------- fsm_callback_ok_state(State) -> {ok, value, State}. %%-------------------------------------------------------------------- %% @doc %% @end %%-------------------------------------------------------------------- fsm_callback_function_transition_state(State) -> {next, fsm_callback_ok_state, State#{ return => ok }}. %%-------------------------------------------------------------------- %% @doc %% @end %%-------------------------------------------------------------------- fsm_callback_module_transition_state(State) -> {next, ?MODULE, fsm_callback_ok_state, State#{ return => ok }}. %%-------------------------------------------------------------------- %% @doc %% @end %%-------------------------------------------------------------------- fsm_callback_error(_State) -> {error, test}. %%-------------------------------------------------------------------- %% @doc %% @end %%-------------------------------------------------------------------- fsm_callback_error_state(State) -> {error, test, State#{ return => error }}. %%-------------------------------------------------------------------- %% @doc %% @end %%-------------------------------------------------------------------- fsm_callback_error_wildcard(_State) -> {unsupported_return}. %%-------------------------------------------------------------------- %% @doc %% @end %%-------------------------------------------------------------------- fsm_callback_meta(_State) -> meta. %%-------------------------------------------------------------------- %% @doc %% @end %%-------------------------------------------------------------------- fsm_callback_ok_meta(_State) -> {ok, value}. %%-------------------------------------------------------------------- %% @doc %% @end %%-------------------------------------------------------------------- fsm_callback_ok_meta_state(State) -> {ok, value, State}. ================================================ FILE: apps/arweave_config/test/arweave_config_http_server_SUITE.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @copyright 2025 (c) Arweave %%% @doc %%% @end %%%=================================================================== -module(arweave_config_http_server_SUITE). -export([suite/0, description/0]). -export([init_per_suite/1, end_per_suite/1]). -export([init_per_testcase/2, end_per_testcase/2]). -export([all/0]). -export([ default/1, unix_socket/1 ]). -include("arweave_config.hrl"). -include_lib("common_test/include/ct.hrl"). %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- suite() -> [{userdata, [description()]}]. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- description() -> {description, "arweave_config http api interface"}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- init_per_suite(Config) -> application:ensure_all_started(gun), Config. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- end_per_suite(_Config) -> application:stop(gun), ok. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- init_per_testcase(_TestCase, Config) -> ct:pal(info, 1, "start arweave_config"), ok = arweave_config:start(), Config. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- end_per_testcase(_TestCase, _Config) -> ct:pal(info, 1, "stop arweave_config"), ok = arweave_config:stop(). %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- all() -> [ default, unix_socket ]. %%-------------------------------------------------------------------- %% @doc test `arweave_config' main interface. %% @end %%-------------------------------------------------------------------- default(_Config) -> % the server can be started as child (under % arweave_config_sup). The goal is to enable it on demand only % if a specific parameter or environment variable is present. ct:pal(test, 1, "start arweave config http server"), arweave_config_http_server:start_as_child(), % the whole configuration can be seen using /v0/config % end-point ct:pal(test, 1, "fetch the whole configuration"), {ok, 200, D1} = get_path("/v0/config"), {true, {success, _}} = is_jsend(D1), % any parameters can be fetched using /v0/config/${parameter}, % they are separated by '/' ct:pal(test, 1, "fetch debug parameter"), {ok, 200, D2} = get_path("/v0/config/debug"), {true, {success, false}} = is_jsend(D2), % parameters can be set using a POST method and following the % same pattern. At this time, the data sent is untyped (no % json support) ct:pal(test, 1, "set debug parameter"), {ok, 200, D3} = post_path("/v0/config/debug", <<"true">>), {true, {success, #{ <<"new">> := true, <<"old">> := false } } } = is_jsend(D3), % when a parameter was set, the new value should be present. ct:pal(test, 1, "fetch debug parameter"), {ok, 200, D4} = get_path("/v0/config/debug"), {true, {success, true}} = is_jsend(D4), % if a bad value is given by the client, an error must be % returned, if possible with a message containing the reason. ct:pal(test, 1, "set bad value on parameter"), {ok, 400, D5} = post_path("/v0/config/debug", <<"random">>), {true, {error, _}} = is_jsend(D5), % if a parameter is not present, an error should be returned % with the reason ct:pal(test, 1, "check unknown parameter"), {ok, 404, D6} = get_path("/v0/config/parameter/not/found"), {true, {error, _}} = is_jsend(D6), % arweave environment should be available to the client, at % this time, all environment variables are displayed. ct:pal(test, 1, "fetch arweave config environment"), {ok, 200, D7} = get_path("/v0/environment"), {true, {success, _}} = is_jsend(D7), ct:pal(test, 1, "stop config http server"), arweave_config_http_server:stop_as_child(), {comment, "arweave_config_http_server tested "}. unix_socket(Config) -> SocketPath = filename:join("/tmp", "./arweave.sock"), ct:pal(test, 1, "set socket to ~p", [SocketPath]), arweave_config:set([config,http,api,listen,address], SocketPath), ct:pal(test, 1, "start arweave config http server"), arweave_config_http_server:start_link(), timer:sleep(500), {ok, _} = file:read_file_info(SocketPath), ct:pal(test, 1, "stop arweave config http server"), arweave_config_http_server:stop(), timer:sleep(500), {error, enoent} = file:read_file_info(SocketPath), {command, "unix socket feature tested"}. %%-------------------------------------------------------------------- %% simple http client for get request. %%-------------------------------------------------------------------- get_path(Path) -> get_path(Path, #{}). get_path(Path, Opts) -> Host = maps:get(host, Opts, "127.0.0.1"), Port = maps:get(port, Opts, 4891), % @todo host and port should be defined as macros {ok, Pid} = gun:open(Host, Port), StreamRef = gun:get(Pid, Path), body(Pid, StreamRef). %%-------------------------------------------------------------------- %% simple http client for post request. %%-------------------------------------------------------------------- post_path(Path, Data) -> post_path(Path, Data, #{}). post_path(Path, Data, Opts) -> Host = maps:get(host, Opts, "127.0.0.1"), Port = maps:get(port, Opts, 4891), % @todo host and port should be defined as macros {ok, Pid} = gun:open(Host, Port), StreamRef = gun:post(Pid, Path, #{}, Data), body(Pid, StreamRef). %%-------------------------------------------------------------------- %% from https://ninenines.eu/docs/en/gun/2.1/guide/http/ %%-------------------------------------------------------------------- body(ConnPid, MRef) -> receive {gun_response, ConnPid, StreamRef, fin, Status, Headers} -> {ok, Status, no_data}; {gun_response, ConnPid, StreamRef, nofin, Status, Headers} -> receive_data( ConnPid, MRef, Status, StreamRef, <<>> ); {'DOWN', MRef, process, ConnPid, Reason} -> {error, Reason} after 1000 -> timeout end. %%-------------------------------------------------------------------- %% from https://ninenines.eu/docs/en/gun/2.1/guide/http/ %%-------------------------------------------------------------------- receive_data(ConnPid, MRef, Status, StreamRef, Buffer) -> receive {gun_data, ConnPid, StreamRef, nofin, Data} -> receive_data( ConnPid, MRef, Status, StreamRef, <> ); {gun_data, ConnPid, StreamRef, fin, Data} -> {ok, Status, <>}; {'DOWN', MRef, process, ConnPid, Reason} -> {error, Reason} after 1000 -> timeout end. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- is_jsend(Data) -> try jiffy:decode(Data, [return_maps]) of #{ <<"status">> := <<"success">>, <<"data">> := D } -> {true, {success, D}}; #{ <<"status">> := <<"fail">>, <<"data">> := D } -> {true, {fail, D}}; #{ <<"status">> := <<"error">>, <<"message">> := M } -> {true, {error, M}}; _ -> false catch _:_ -> false end. ================================================ FILE: apps/arweave_config/test/arweave_config_legacy_SUITE.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @copyright 2025 (c) Arweave %%% @doc %%% @end %%%=================================================================== -module(arweave_config_legacy_SUITE). -export([suite/0, description/0]). -export([init_per_suite/1, end_per_suite/1]). -export([init_per_testcase/2, end_per_testcase/2]). -export([all/0]). -export([arweave_config_legacy/1]). -include("arweave_config.hrl"). -include_lib("common_test/include/ct.hrl"). %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- suite() -> [{userdata, [description()]}]. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- description() -> {description, "arweave_config_legacy test"}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- init_per_suite(Config) -> Config. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- end_per_suite(_Config) -> ok. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- init_per_testcase(_TestCase, Config) -> ct:pal(info, 1, "start arweave_config_legacy"), {ok, Pid} = arweave_config_legacy:start_link(), [{arweave_config_legacy, Pid}|Config]. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- end_per_testcase(_TestCase, _Config) -> ct:pal(info, 1, "stop arweave_config_legacy"), ok = arweave_config_legacy:stop(). %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- all() -> [ arweave_config_legacy ]. %%-------------------------------------------------------------------- %% @doc test `arweave_config' main interface. %% @end %%-------------------------------------------------------------------- arweave_config_legacy(_Config) -> ct:pal(test, 1, "config keys should be the same"), Keys = arweave_config_legacy:keys(), ConfigKeys = record_info(fields, config), LengthKeys = length(Keys), LengthConfigKeys = length(ConfigKeys), LengthKeys = LengthConfigKeys, [ K1 = K2 || {K1, K2} <- lists:zip(Keys, ConfigKeys) ], ct:pal(test, 1, "check if config keys are present"), [ true = arweave_config_legacy:has_key(Key) || Key <- ConfigKeys ], ct:pal(test, 1, "all values should be set with default"), #config{} = arweave_config_legacy:get(), [ begin {ok, VC} = arweave_config_legacy:get_config_value(Key, #config{}), VP = arweave_config_legacy:get(Key), VC = VP end || Key <- ConfigKeys ], ct:pal(test, 1, "set init value to true"), arweave_config_legacy:set(init, true), true = arweave_config_legacy:get(init), ct:pal(test, 1, "reset the configuration to default value"), arweave_config_legacy:reset(), false = arweave_config_legacy:get(init), ct:pal(test, 1, "check legacy application env interface"), arweave_config_legacy:set_env(#config{ init = true}), {ok, #config{ init = true }} = arweave_config_legacy:get_env(), {ok, #config{ init = true }} = application:get_env(arweave_config, config), {comment, "arweave_config_legacy tested"}. ================================================ FILE: apps/arweave_config/test/arweave_config_serializer_SUITE.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @copyright 2026 (c) Arweave %%% @doc Arweave Configuration File Serializer Test Suite. %%% @end %%%=================================================================== -module(arweave_config_serializer_SUITE). -export([suite/0, description/0]). -export([init_per_suite/1, end_per_suite/1]). -export([init_per_testcase/2, end_per_testcase/2]). -export([all/0]). -export([encoder/1,decoder/1,map_merge/1]). -include("arweave_config.hrl"). -include_lib("common_test/include/ct.hrl"). %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- suite() -> [{userdata, [description()]}]. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- description() -> {description, "arweave config parameters bootstrap"}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- init_per_suite(Config) -> Config. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- end_per_suite(_Config) -> ok. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- init_per_testcase(_TestCase, _Config) -> ct:pal(info, 1, "start arweave_config"), ok = arweave_config:start(), []. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- end_per_testcase(_TestCase, _Config) -> ct:pal(info, 1, "stop arweave_config"), ok = arweave_config:stop(). %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- all() -> [ encoder, decoder, map_merge ]. %%-------------------------------------------------------------------- %% @doc %% @end %%-------------------------------------------------------------------- encoder(_Config) -> ct:pal(test, 1, "check encoder with empty value"), {ok, #{}} = arweave_config_serializer:encode(#{}), ct:pal(test, 1, "check encoder with 1 key"), {ok, #{ [1] := 1 }} = arweave_config_serializer:encode(#{ 1 => 1 }), ct:pal(test, 1, "check encoder with binary key"), {ok, #{ [test] := 2 }} = arweave_config_serializer:encode(#{ <<"test">> => 2 }), ct:pal(test, 1, "check encoder with list key"), {ok, #{ [test] := 3 }} = arweave_config_serializer:encode(#{ "test" => 3 }), ct:pal(test, 1, "check encoder with recursive map"), {ok, #{ [1,2,3] := 4 }} = arweave_config_serializer:encode(#{ 1 => #{ 2 => #{ 3 => 4 }}}), ct:pal(test, 1, "full encoding test"), Map = #{ <<"data">> => #{ <<"directory">> => <<"/path/to/data">>, 1 => 2, a => b }, <<"logging">> => #{ <<"debug">> => #{ <<"enabled">> => true } }, <<"random_uRxsNKiM">> => #{ <<"random_gblL5sdA">> => [] }, "test" => #{ <<"test">> => #{ test => #{ data => test } } } }, Result = #{ [data,directory] => <<"/path/to/data">>, [data,1] => 2, [data,a] => b, [logging,debug,enabled] => true, [<<"random_uRxsNKiM">>, <<"random_gblL5sdA">>] => [], [test,test,test,data] => test }, {ok, Result} = arweave_config_serializer:encode(Map), {comment, "encoder tested"}. %%-------------------------------------------------------------------- %% @doc %% @end %%-------------------------------------------------------------------- decoder(_Config) -> ct:pal(test, 1, "check decoder with empty value"), {ok, #{}} = arweave_config_serializer:decode(#{}), ct:pal(test, 1, "check decoder with complex value"), {ok, #{ 1 := #{ '_' := 1, 2 := test, 3 := data }, t := #{ 1 := #{ test := data } } }} = arweave_config_serializer:decode(#{ [1] => 1, [1,2] => test, [1,3] => data, [t,1,test] => data }), ct:pal(test, 1, "full decoding test"), Result = #{ 1 => #{ 2 => #{ 3 => #{ '_' => 4, a => b } } } }, {ok, Result} = arweave_config_serializer:decode(#{ [1,2,3] => 4, [1,2,3,a] => b }), {comment, "decoder tested"}. %%-------------------------------------------------------------------- %% @doc %% @end %%-------------------------------------------------------------------- map_merge(_Config) -> ct:pal(test, 1, "test merge map"), Result = #{ 1 => #{ '_' => 1, 2 => test, 3 => data }, t => #{ 1 => #{ test => data } } }, Result = arweave_config_serializer:map_merge([ #{ 1 => 1 }, #{ 1 => #{ 2 => test } }, #{ 1 => #{ 3 => data } }, #{ t => #{ 1 => #{ test => data }}} ]), {comment, "map merger tested"}. ================================================ FILE: apps/arweave_config/test/arweave_config_spec_SUITE.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @copyright 2025 (c) Arweave %%% @doc %%% @end %%%=================================================================== -module(arweave_config_spec_SUITE). -export([suite/0, description/0]). -export([init_per_suite/1, end_per_suite/1]). -export([init_per_testcase/2, end_per_testcase/2]). -export([all/0]). -export([specs/1]). -export([ default/1, default_value/1, default_type/1, default_get/1, default_set/1, default_set_state/1, default_multi/1, default_runtime/1, default_multi_types/1, default_inherit/1, default_environment/1 ]). -include("arweave_config.hrl"). -include_lib("common_test/include/ct.hrl"). %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- suite() -> [{userdata, [description()]}]. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- description() -> {description, "arweave configuration spec interface"}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- init_per_suite(Config) -> Config. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- end_per_suite(_Config) -> ok. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- init_per_testcase(TestCase, Config) -> % required for runtime parameter ct:pal(info, 1, "start arweave_config"), {ok, _} = arweave_config:start_link(), % required for configuration storage ct:pal(info, 1, "start arweave_config_store"), {ok, PidStore} = arweave_config_store:start_link(), % required for specificatoin ct:pal(info, 1, "Start arweave_config_spec"), Specs = specs(TestCase), {ok, PidSpec} = arweave_config_spec:start_link(Specs), [ {arweave_config_store, PidStore}, {arweave_config_spec, PidSpec} | Config ]. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- end_per_testcase(_TestCase, _Config) -> ct:pal(info, 1, "stop arweave_config_spec"), ok = arweave_config_spec:stop(), ct:pal(info, 1, "stop arweave_config_store"), ok = arweave_config_store:stop(), ct:pal(info, 1, "stop arweave_config"), ok = gen_server:stop(arweave_config). %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- all() -> [ default, default_value, default_type, default_get, default_set, default_set_state, default_multi, default_runtime, default_multi_types, default_inherit, default_environment ]. %%-------------------------------------------------------------------- %% @doc %% @end %%-------------------------------------------------------------------- default(_Config) -> % default parameter only have all value by default, present % from the spec. In this case, it should return an error. {error, undefined} = arweave_config_spec:get([default]), % when setting a value, we should see the new value and the % old value. The value should also be present in the store {ok, test, undefined} = arweave_config_spec:set([default], test), {ok, test} = arweave_config_store:get([default]). %%-------------------------------------------------------------------- %% @doc %% @end %%-------------------------------------------------------------------- default_value(_Config) -> % a parameter with a default value {ok, true} = arweave_config_spec:get([default_value]), {error, undefined} = arweave_config_store:get([default_value]), {ok, false, true} = arweave_config_spec:set([default_value], false), {ok, false} = arweave_config_store:get([default_value]). %%-------------------------------------------------------------------- %% @doc %% @end %%-------------------------------------------------------------------- default_type(_Config) -> % a parameter with a defined type {ok, true, undefined} = arweave_config_spec:set([default_type], true), {error, _, _} = arweave_config_spec:set([default_type], "not a boolean"). %%-------------------------------------------------------------------- %% @doc %% @end %%-------------------------------------------------------------------- default_get(_Config) -> % a parameter with a specific get {ok, valid} = arweave_config_spec:get([default_get]). %%-------------------------------------------------------------------- %% @doc %% @end %%-------------------------------------------------------------------- default_set(_Config) -> % a parameter with a specific set {ok, ok, undefined} = arweave_config_spec:set([default_set], self()), ok = receive ok -> ok after 10 -> error end. %%-------------------------------------------------------------------- %% @doc %% @end %%-------------------------------------------------------------------- default_set_state(_Config) -> % set state {ok, empty, undefined} = arweave_config_spec:set([default_set_state], ok), {ok, full, empty} = arweave_config_spec:set([default_set_state], ok), {comment, "set state tested"}. %%-------------------------------------------------------------------- %% @doc %% @end %%-------------------------------------------------------------------- default_multi(_Config) -> % default value to 1 ct:pal(test, 1, "set value to 1"), {ok, 1} = arweave_config_spec:get([one]), {ok, one, 1} = arweave_config_spec:set([one], one), % on default value, but always return 3 ct:pal(test, 1, "get value"), {ok, 3} = arweave_config_spec:get([three]), {ok, 3, undefined} = arweave_config_spec:set([three], any), {comment, "multi spec tested"}. %%-------------------------------------------------------------------- %% @doc %% @end %%-------------------------------------------------------------------- default_runtime(_Config) -> ct:pal(test, 1, "initial state not in runtime"), false = arweave_config:is_runtime(), {ok, 1, undefined} = arweave_config_spec:set([default], 1), {ok, 1, undefined} = arweave_config_spec:set([runtime], 1), {ok, 1, undefined} = arweave_config_spec:set([not_runtime], 1), ct:pal(test, 1, "swith to runtime"), ok = arweave_config:runtime(), true = arweave_config:is_runtime(), % by default, a parameter without runtime feature is not % allowed to be set during runtime {error, _} = arweave_config_spec:set([default], 2), % runtime parameter can be set during runtime {ok, 2, 1} = arweave_config_spec:set([runtime], 2), % not_runtime parameter can't be set during runtime {error, _} = arweave_config_spec:set([not_runtime], 2). %%-------------------------------------------------------------------- %% @doc %% @end %%-------------------------------------------------------------------- default_multi_types(_Config) -> ct:pal(test, 1, "set a boolean"), {ok, true, undefined} = arweave_config_spec:set([default], <<"true">>), ct:pal(test, 1, "set an integer"), {ok, 1, true} = arweave_config_spec:set([default], 1), ct:pal(test, 1, "set an ipv4"), {ok, <<"127.0.0.1">>, 1} = arweave_config_spec:set([default], <<"127.0.0.1">>), {comment, "multi types tested"}. %%-------------------------------------------------------------------- %% @doc %% @end %%-------------------------------------------------------------------- default_inherit(_Config) -> ct:pal(test, 1, "check inheritance"), {ok, true} = arweave_config:get([default]), {ok, true} = arweave_config:get([inherit,all]), {ok, 1} = arweave_config:get([inherit,nothing]), ct:pal(test, 1, "check specs from ets"), [{_, #{ type := boolean, default := true}}] = ets:lookup(arweave_config_spec, [default]), [{_, #{ type := boolean, default := true}}] = ets:lookup(arweave_config_spec, [inherit, all]), [{_, #{ type := boolean }}] = ets:lookup(arweave_config_spec, [inherit, type]), [{_, #{ default := true }}] = ets:lookup(arweave_config_spec, [inherit, default]), [{_, #{ default := 1, type := pos_integer }}] = ets:lookup(arweave_config_spec, [inherit, nothing]), {comment, "inherit feature tested"}. %%-------------------------------------------------------------------- %% @doc %% @end %%-------------------------------------------------------------------- default_environment(_Config) -> ct:pal(test, 1, "get the list of environment variables"), Env = arweave_config_spec:get_environments(), ct:pal(test, 1, "check disabled environment"), false = lists:search(fun ({_, [environment,disabled]}) -> true; (_) -> false end, Env ), ct:pal(test, 1, "check enabled environment (generated)"), {value, {<<"AR_ENVIRONMENT_ENABLED">>, [environment,enabled]}} = lists:search(fun ({_,[environment,enabled]}) -> true; (_) -> false end, Env ), ct:pal(test, 1, "check custom enabled environment"), {value, {<<"CUSTOM">>, [environment,custom]}} = lists:search(fun ({_,[environment,custom]}) -> true; (_) -> false end, Env ), {comment, "environment feature tested"}. %%-------------------------------------------------------------------- %% @doc defines custom parameters for tests. %% @end %%-------------------------------------------------------------------- specs(default) -> [ #{ parameter_key => [default] } ]; specs(default_value) -> [ #{ parameter_key => [default_value], default => true } ]; specs(default_type) -> [ #{ parameter_key => [default_type], type => boolean } ]; specs(default_get) -> [ #{ parameter_key => [default_get], handle_get => fun (K, _S) -> {ok, valid} end } ]; specs(default_set) -> [ #{ parameter_key => [default_set], handle_set => fun (K, V, S, _) -> V ! ok, {ok, ok} end } ]; specs(default_set_state) -> [ #{ parameter_key => [default_set_state], handle_set => fun (_K, _V, #{ config := Config }, _) -> case Config of #{ default_set_state := empty } -> {store, full}; _ -> {store, empty} end end } ]; specs(default_multi) -> [ #{ parameter_key => [one], default => 1 }, #{ parameter_key => [three], handle_get => fun (_K, _S) -> {ok, 3} end, handle_set => fun (_K, _V, _S, _) -> {ok, 3} end } ]; specs(default_runtime) -> [ #{ parameter_key => [default] }, #{ parameter_key => [runtime], runtime => true }, #{ parameter_key => [not_runtime], runtime => false } ]; specs(default_multi_types) -> [ #{ parameter_key => [default], type => [boolean, integer, ipv4] } ]; specs(default_inherit) -> [ #{ parameter_key => [default], type => boolean, default => true }, #{ parameter_key => [inherit,all], inherit => [default] }, #{ parameter_key => [inherit,type], inherit => {[default], [type]} }, #{ parameter_key => [inherit,default], inherit => {[default], [default]} }, #{ parameter_key => [inherit,nothing], inherit => [default], type => pos_integer, default => 1 } ]; specs(default_environment) -> [ #{ parameter_key => [environment,disabled], environment => false }, #{ parameter_key => [environment,enabled], environment => true }, #{ parameter_key => [environment,custom], environment => <<"CUSTOM">> } ]. ================================================ FILE: apps/arweave_config/test/arweave_config_store_SUITE.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @copyright 2025 (c) Arweave %%% @doc %%% @end %%%=================================================================== -module(arweave_config_store_SUITE). -export([suite/0, description/0]). -export([init_per_suite/1, end_per_suite/1]). -export([init_per_testcase/2, end_per_testcase/2]). -export([all/0]). -export([arweave_config_store/1]). -include("arweave_config.hrl"). -include_lib("common_test/include/ct.hrl"). %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- suite() -> [{userdata, [description()]}]. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- description() -> {description, "arweave configuration store interface"}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- init_per_suite(Config) -> Config. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- end_per_suite(_Config) -> ok. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- init_per_testcase(_TestCase, Config) -> ct:pal(info, 1, "start arweave_config_store"), {ok, Pid} = arweave_config_store:start_link(), [{arweave_config_store, Pid}|Config]. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- end_per_testcase(_TestCase, _Config) -> ct:pal(info, 1, "stop arweave_config_store"), ok = arweave_config_store:stop(). %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- all() -> [ arweave_config_store ]. %%-------------------------------------------------------------------- %% @doc test `arweave_config_store' storage interface. %% @end %%-------------------------------------------------------------------- arweave_config_store(_Config) -> ct:pal(test, 1, "check undefined parameter"), {error, undefined} = arweave_config_store:get("test"), ct:pal(test, 1, "try to delete an undefined parameter"), {error, undefined} = arweave_config_store:delete("test"), ct:pal(test, 1, "ensure default parameter is working"), default = arweave_config_store:get("test", default), ct:pal(test, 1, "set a new parameter"), {ok, {[test], data}} = arweave_config_store:set("test", data), ct:pal(test, 1, "get an existing parameter"), {ok, data} = arweave_config_store:get("test"), ct:pal(test, 1, "delete an existing parameter"), {ok, {[test], data}} = arweave_config_store:delete("test"), ct:pal(test, 1, "ensure the paramater was removed"), {error, undefined} = arweave_config_store:get("test"), {comment, "arweave_config_store process tested"}. ================================================ FILE: apps/arweave_config/test/arweave_config_type_SUITE.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @copyright 2025 (c) Arweave %%% @doc %%% @end %%%=================================================================== -module(arweave_config_type_SUITE). -export([suite/0, description/0]). -export([init_per_suite/1, end_per_suite/1]). -export([init_per_testcase/2, end_per_testcase/2]). -export([all/0]). -export([ none/1, any/1, boolean/1, atom/1, integer/1, pos_integer/1, ipv4/1, path/1, base64/1, base64url/1, tcp_port/1, file/1, logging_template/1 ]). -include_lib("common_test/include/ct.hrl"). %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- suite() -> [{userdata, [description()]}]. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- description() -> {description, "arweave_config_type test interface"}. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- init_per_suite(Config) -> Config. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- end_per_suite(_Config) -> ok. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- init_per_testcase(_TestCase, Config) -> Config. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- end_per_testcase(_TestCase, _Config) -> ok. %%-------------------------------------------------------------------- %% @hidden %%-------------------------------------------------------------------- all() -> [ none, any, atom, integer, boolean, integer, pos_integer, ipv4, path, base64, base64url, tcp_port, file, logging_template ]. none(_Config) -> {error, 1} = arweave_config_type:none(1). any(_Config) -> {ok, 1} = arweave_config_type:any(1). atom(_Config) -> {ok, atom} = arweave_config_type:atom(atom), {ok, atom} = arweave_config_type:atom(<<"atom">>), {ok, atom} = arweave_config_type:atom("atom"). boolean(_Config) -> [ {ok, true} = arweave_config_type:boolean(X) || X <- [<<"true">>, "true", true] ], [ {ok, false} = arweave_config_type:boolean(X) || X <- [<<"false">>, "false", false] ], {error, not_boolean} = arweave_config_type:boolean(not_boolean). integer(_Config) -> {ok, 1} = arweave_config_type:integer(1), {ok, 1} = arweave_config_type:integer("1"), {ok, 1} = arweave_config_type:integer(<<"1">>), {error, a} = arweave_config_type:integer(a). pos_integer(_Config) -> {ok, 1} = arweave_config_type:pos_integer(1), {error, -1} = arweave_config_type:pos_integer(-1). ipv4(_Config) -> {ok, <<"127.0.0.1">>} = arweave_config_type:ipv4("127.0.0.1"), {ok, <<"127.0.0.1">>} = arweave_config_type:ipv4({127,0,0,1}), {ok, <<"127.0.0.1">>} = arweave_config_type:ipv4(<<"127.0.0.1">>), {error, _ } = arweave_config_type:ipv4(test). path(Config) -> _PrivDir = proplists:get_value(priv_dir, Config), {ok, Cwd} = file:get_cwd(), % absolute path {ok, <<"/">>} = arweave_config_type:path(<<"/">>), {ok, <<"/">>} = arweave_config_type:path("/"), % relative path: convert automatically in absolute path CwdBinary = list_to_binary(Cwd), {ok, CwdBinary} = arweave_config_type:path(<<"./">>), {ok, CwdBinary} = arweave_config_type:path("./"). base64(_Config) -> {ok, <<"test">>} = arweave_config_type:base64("dGVzdA=="), {ok, <<"test">>} = arweave_config_type:base64(<<"dGVzdA==">>). base64url(_Config) -> {ok, <<"test">>} = arweave_config_type:base64url("dGVzdA"), {ok, <<"test">>} = arweave_config_type:base64url(<<"dGVzdA">>). tcp_port(_Config) -> {ok, 0} = arweave_config_type:tcp_port(0), {ok, 65535} = arweave_config_type:tcp_port(65535), {ok, 1234} = arweave_config_type:tcp_port(1234), {ok, 1234} = arweave_config_type:tcp_port("1234"), {ok, 1234} = arweave_config_type:tcp_port(<<"1234">>), {error, 78912} = arweave_config_type:tcp_port(<<"78912">>). file(_Config) -> ct:pal(test, 1, "test absolute path and path as binary"), {ok, <<"/tmp/arweave.sock">>} = arweave_config_type:file(<<"/tmp/arweave.sock">>), ct:pal(test, 1, "test relative path and path as list"), {ok, P1} = arweave_config_type:file("./arweave.sock"), true = is_binary(P1), ct:pal(test, 1, "test a wrong path"), {error, _} = arweave_config_type:file("/random/t/a/b/c.sock"), ct:pal(test, 1, "test a file without write access"), {error, _} = arweave_config_type:file("/root/data/arweave.sock"), ct:pal(test, 1, "test a wrong erlang type"), {error, _} = arweave_config_type:file(1234), ok. logging_template(_Config) -> ct:pal(test, 1, "valid template can be string"), {ok, ["test", "\n"]} = arweave_config_type:logging_template("test"), ct:pal(test, 1, "valid template can be a binary"), {ok, ["test","\n"]} = arweave_config_type:logging_template(<<"test">>), ct:pal(test, 1, "An atom start with %"), {ok, [test,"\n"]} = arweave_config_type:logging_template(<<"%test">>), ct:pal(test, 1, "a string and an atom can be part of the same template"), {ok, ["message:", " ", test, "\n"]} = arweave_config_type:logging_template("message: %test"), ct:pal(test, 1, "an atom must start with a null char"), {ok, ["message:%test","\n"]} = arweave_config_type:logging_template("message:%test"), ct:pal(test, 1, "an atom must only use [a-zA-Z_] chars"), {error, _} = arweave_config_type:logging_template("%test!#&"), ct:pal(test, 1, "an atom must exist"), {error, _} = arweave_config_type:logging_template("%total_random_atom"), ok. ================================================ FILE: apps/arweave_diagnostic/README.md ================================================ ================================================ FILE: apps/arweave_diagnostic/include/.gitkeep ================================================ ================================================ FILE: apps/arweave_diagnostic/priv/.gitkeep ================================================ ================================================ FILE: apps/arweave_diagnostic/src/arweave_diagnostic.app.src ================================================ {application, arweave_diagnostic, [ {id, "arweave_diagnostic"}, {description, "Arweave Diagnostic Tool"}, {vsn, "0.0.1"}, {mod, {arweave_diagnostic, []}}, {env, []}, {applications, [ kernel, stdlib, sasl ]}, {modules, [ arweave_diagnostic ]}, {registered, [ ]} ]}. ================================================ FILE: apps/arweave_diagnostic/src/arweave_diagnostic.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @copyright 2025 (c) Arweave %%% @author Arweave Team %%% @author Mathieu Kerjouan %%% @doc Arweave Diagnostic Module. %%% %%% This module has been created to display detailed information about %%% an Arweave application, including information from the BEAM as %%% well. %%% %%% @todo create diagnostic for epmd %%% @todo create diagnostic for timers %%% @end %%%=================================================================== -module(arweave_diagnostic). -compose(warnings_as_errors). -compile({no_auto_import,[processes/0]}). -compile({no_auto_import,[process_info/1]}). -export([ all/0, select/1, cpu/0, memory/0, memory_worst/0, network/0, processes/0, sockets/0, arweave/0, ets/0, dets/0, rocksdb/0 ]). -include_lib("kernel/include/logger.hrl"). -type diagnostic() :: cpu | memory | memory_worst | processes | arweave | ets | dets. %%-------------------------------------------------------------------- %% @doc returns all diagnostic available. %% @end %%-------------------------------------------------------------------- -spec all() -> proplists:proplist(). all() -> select([ cpu, memory, memory_worst, network, processes, arweave, ets, dets, rocksdb ]). %%-------------------------------------------------------------------- %% @doc returns a subset of supported diagnostics. %% @end %%-------------------------------------------------------------------- -spec select([diagnostic()]) -> proplists:proplist(). select(List) -> select(List, []). select([], Buffer) -> Buffer; select([cpu|Rest], Buffer) -> select(Rest,[{cpu,cpu()}|Buffer]); select([memory|Rest], Buffer) -> select(Rest,[{memory,memory()}|Buffer]); select([memory_worst|Rest], Buffer) -> select(Rest,[{memory_worst,memory_worst()}|Buffer]); select([network|Rest], Buffer) -> select(Rest,[{network,network()}|Buffer]); select([processes|Rest], Buffer) -> select(Rest,[{processes,processes()}|Buffer]); select([arweave|Rest], Buffer) -> select(Rest,[{arweave,arweave()}|Buffer]); select([ets|Rest], Buffer) -> select(Rest,[{ets_tables,ets()}|Buffer]); select([dets|Rest], Buffer) -> select(Rest,[{dets_tables,dets()}|Buffer]); select([rocksdb|Rest], Buffer) -> select(Rest,[{rocksdb,rocksdb()}|Buffer]); select([_|Rest], Buffer) -> select(Rest, Buffer). %%-------------------------------------------------------------------- %% @doc Returns cpu diagnostic. Most of the cpu information are being %% collected using `cpu_sup' module, this is then, a dependency. %% @end %%-------------------------------------------------------------------- -spec cpu() -> proplists:proplist(). cpu() -> try display([ {nprocs, cpu_sup:nprocs()}, {avg1, cpu_sup:avg1()}, {avg5, cpu_sup:avg5()}, {avg15, cpu_sup:avg15()}, {util, cpu_sup:util()}, {ping, cpu_sup:ping()} ], cpu) catch _:_ -> ?LOG_WARNING("cpu_sup not started"), [] end. %%-------------------------------------------------------------------- %% @doc Returns memory diagnostic. Most of the memory information are %% collected using `memsup' module. This is then a dependency. %% @end %%-------------------------------------------------------------------- -spec memory() -> proplists:proplist(). memory() -> try Memory = memsup:get_system_memory_data(), display(Memory, memory) catch _:_ -> ?LOG_WARNING("memsup not started"), [] end. %%-------------------------------------------------------------------- %% @doc Returns the pid using the greatest amount of memory. This %% function is using `memsup' module. %% @end %%-------------------------------------------------------------------- -spec memory_worst() -> proplists:proplist(). memory_worst() -> try memsup:get_memory_data() of {Total, Allocated, {Pid, PidAllocated}} -> Info = process_info(Pid), Name = proplists:get_value(registered_name, Info), HeapSize = proplists:get_value(heap_size, Info), TotalHeapSize = proplists:get_value(total_heap_size, Info), StackSize = proplists:get_value(stack_size, Info), Reductions = proplists:get_value(reductions, Info), MsgQueue = proplists:get_value(message_queue_len, Info), Status = proplists:get_value(status, Info), display([ {total, Total}, {allocated, Allocated}, {worst_pid, Pid}, {worst_pid_allocated, PidAllocated}, {worst_pid_name, Name}, {worst_pid_total_heap_size, TotalHeapSize}, {worst_pid_heap_size, HeapSize}, {worst_pid_stack_size, StackSize}, {worst_pid_reductions, Reductions}, {worst_pid_message_queue, MsgQueue}, {worst_pid_status, Status} ], memory_worst) catch _:_ -> ?LOG_WARNING("memsup is not started"), [] end. %%-------------------------------------------------------------------- %% @doc Returns network diagnostic. %% @end %%-------------------------------------------------------------------- -spec network() -> proplists:proplist(). network() -> Sockets = sockets(), display([ {sockets, length(Sockets)} ], network). %%-------------------------------------------------------------------- %% @doc Returns sockets (network ports) diagnostic. %% @end %%-------------------------------------------------------------------- sockets() -> Ports = erlang:ports(), Sockets = lists:filter(fun is_network_port/1, Ports), [ {socket, socket_info(S)} || S <- Sockets ]. %%-------------------------------------------------------------------- %% @doc Returns processes diagnostic. %% @end %%-------------------------------------------------------------------- -spec processes() -> proplists:proplist(). processes() -> Processes = erlang:processes(), [ process_info(P) || P <- Processes ]. %%-------------------------------------------------------------------- %% @hidden %% @doc wrapper around `erlang:process_info/1'. %% @end %%-------------------------------------------------------------------- -spec process_info(Pid) -> Return when Pid :: pid(), Return :: proplists:proplist(). process_info(Pid) -> try erlang:process_info(Pid) of undefined -> []; Info -> process_info2(Pid, Info) catch _:_ -> [] end. process_info2(Pid, Info) -> RegisteredName = proplists:get_value(registered_name, Info), Status = proplists:get_value(status, Info), MsgQueueLen = proplists:get_value(message_queue_len, Info), TrapExit = proplists:get_value(trap_exit, Info), Priority = proplists:get_value(priority, Info), GroupLeader = proplists:get_value(group_leader, Info), TotalHeapSize = proplists:get_value(total_heap_size, Info), HeapSize = proplists:get_value(heap_size, Info), StackSize = proplists:get_value(stack_size, Info), Reductions = proplists:get_value(reductions, Info), GC = proplists:get_value(garbage_collection, Info), GC_MinBinVHeapSize = proplists:get_value(min_bin_vheap_size, GC), GC_MinHeapSize = proplists:get_value(min_heap_size, GC), GC_FullsweepAfter = proplists:get_value(fullsweep_after, GC), GC_MinorGCS = proplists:get_value(minor_gcs, GC), CurrentFunction = try erlang:process_info(Pid, current_location) of {current_location,{M,F,A,_}} -> io_lib:format("~s:~s/~b", [M,F,A]); _ -> undefined catch _:_ -> undefined end, Memory = try erlang:process_info(Pid, memory) of {memory, Mem} -> Mem; _ -> undefined catch _:_ -> undefined end, display([ {pid, Pid}, {status, Status}, {location, CurrentFunction}, {memory, Memory}, {group_leader, GroupLeader}, {heap_size, HeapSize}, {message_queue_len, MsgQueueLen}, {priority, Priority}, {reductions, Reductions}, {registered_name, RegisteredName}, {stack_size, StackSize}, {total_heap_size, TotalHeapSize}, {trap_exit, TrapExit}, {gc_min_bin_vheap_size, GC_MinBinVHeapSize}, {gc_min_heap_size, GC_MinHeapSize}, {gc_fullsweep_after, GC_FullsweepAfter}, {gc_minor_gcs, GC_MinorGCS} ], process). %%-------------------------------------------------------------------- %% @hidden %% @doc Check if a port is a network socket (tcp, udp, sctp). %% @end %%-------------------------------------------------------------------- -spec is_network_port(Port) -> Return when Port :: port(), Return :: boolean(). is_network_port(Port) -> try Info = erlang:port_info(Port), proplists:get_value(name, Info) of "tcp_inet" -> true; "udp_inet" -> true; "sctp_inet" -> true; _ -> false catch _:_ -> false end. %%-------------------------------------------------------------------- %% @hidden %% @doc A function helper to gather information about a socket. %% @end %%-------------------------------------------------------------------- -spec socket_info(Socket) -> Return when Socket :: port(), Return :: proplists:proplist(). socket_info(Socket) -> try inet:info(Socket) of undefined -> []; Info -> socket_info2(Socket, Info) catch _:_ -> [] end. socket_info2(Socket, Info) -> Counters = maps:get(counters, Info, #{}), Active = maps:get(active, Info, undefined), Domain = maps:get(domain, Info, undefined), NumAcceptors = maps:get(num_acceptors, Info, undefined), NumReaders = maps:get(num_readers, Info, undefined), NumWriters = maps:get(num_writers, Info, undefined), Protocol = maps:get(protocol, Info, undefined), RecvAvg = maps:get(recv_avg, Counters, undefined), RecvCnt = maps:get(recv_cnt, Counters, undefined), RecvDvi = maps:get(recv_dvi, Counters, undefined), RecvMax = maps:get(recv_max, Counters, undefined), RecvOct = maps:get(recv_oct, Counters, undefined), SendAvg = maps:get(send_avg, Counters, undefined), SendCnt = maps:get(send_cnt, Counters, undefined), SendMax = maps:get(send_max, Counters, undefined), SendOct = maps:get(send_oct, Counters, undefined), SendPend = maps:get(send_pend, Counters, undefined), {PeerIP,PeerPort} = case inet:peername(Socket) of {ok, {PI,PP}} -> {inet:ntoa(PI), PP}; _ -> {undefined, undefined} end, {SockIP,SockPort} = case inet:sockname(Socket) of {ok, {SI, SP}} -> {inet:ntoa(SI), SP}; _ -> {undefined, undefined} end, display([ {socket, Socket}, {active, Active}, {domain, Domain}, {protocol, Protocol}, {peer_ip, PeerIP}, {peer_port, PeerPort}, {sock_ip, SockIP}, {sock_port, SockPort}, {num_acceptors, NumAcceptors}, {num_readers, NumReaders}, {num_writers, NumWriters}, {recv_oct, RecvOct}, {recv_avg, RecvAvg}, {recv_cnt, RecvCnt}, {recv_dvi, RecvDvi}, {recv_max, RecvMax}, {send_avg, SendAvg}, {send_cnt, SendCnt}, {send_max, SendMax}, {send_oct, SendOct}, {send_pend, SendPend} ], socket). %%-------------------------------------------------------------------- %% @doc Returns arweave diagnostic. This function should display more %% information than the `processes/0' diagnostic. The application to %% be checked are `arweave', `arweave_config'. %% %% - check processes (like in `processes/0') %% - check ETS tables %% - check workers status %% %% This function will become huge, and should probably be migrated in %% its own module called. %% %% @end %%-------------------------------------------------------------------- -spec arweave() -> proplists:proplist(). arweave() -> arweave_processes(). arweave_processes() -> case get_process_group_leader(ar_sup) of undefined -> []; Leader -> arweave_processes(Leader) end. arweave_processes(Leader) -> Processes = erlang:processes(), [ display(N, arweave_processes) || N <- [ process_info(P) || P <- Processes ], proplists:get_value(group_leader, N) =:= Leader ]. %%-------------------------------------------------------------------- %% @hidden %% @doc extract process group leader of a pid or registered process. %% @end %%-------------------------------------------------------------------- get_process_group_leader(undefined) -> undefined; get_process_group_leader(Atom) when is_atom(Atom) -> get_process_group_leader(whereis(Atom)); get_process_group_leader(Pid) when is_pid(Pid) -> try erlang:process_info(Pid, group_leader) of {group_leader, GL} -> GL; _ -> undefined catch _:_ -> undefined end. %%-------------------------------------------------------------------- %% @doc Returns ets diagnostic. %% @end %%-------------------------------------------------------------------- -spec ets() -> proplists:proplist(). ets() -> Ets = ets:all(), ets(Ets, []). ets([], Buffer) -> Buffer; ets([Ets|Rest], Buffer) -> try Info = display( ets:info(Ets), ets ), NewBuffer = [{Ets, Info}|Buffer], ets(Rest, NewBuffer) catch _:_ -> ets(Rest, Buffer) end. %%-------------------------------------------------------------------- %% @doc Returns dets diagnostic. %% @end %%-------------------------------------------------------------------- -spec dets() -> proplists:proplist(). dets() -> Dets = dets:all(), dets(Dets, []). dets([], Buffer) -> Buffer; dets([Dets|Rest], Buffer) -> try Info = display( dets:info(Dets), dets ), NewBuffer = [{Dets, Info}|Buffer], dets(Rest, NewBuffer) catch _:_ -> dets(Rest, Buffer) end. %% record extracted from ar_kv module. This record is used to store %% rocksdb information used by arweave. -record(db, { name :: term() | undefined, filepath :: file:filename_all(), db_options :: rocksdb:db_options(), db_handle :: rocksdb:db_handle() | undefined, cf_names = undefined :: [term()], cf_descriptors = undefined :: [rocksdb:cf_descriptor()], cf_handle = undefined :: rocksdb:cf_handle() }). %%-------------------------------------------------------------------- %% @doc Returns rocksdb diagnostic. This function get the list of %% opened database by checking the content of `ar_kv' ETS table. %%-------------------------------------------------------------------- -spec rocksdb() -> proplists:proplist(). rocksdb() -> case ets:info(ar_kv) of undefined -> []; Info -> rocksdb(Info) end. rocksdb(Info) -> Ets = proplists:get_value(id, Info), [ {rocksdb, rocksdb_struct(Db)} || Db <- ets:tab2list(Ets) ]. rocksdb_struct(#db{filepath = Filepath, db_handle = Handle}) -> Properties = rocksdb_properties(), Result = rocksdb_properties(Handle, Properties, []), display([ {filepath, Filepath}, {handle,Handle} |Result ], rocksdb ). %%-------------------------------------------------------------------- %% @hidden %% @doc These properties should always returns an integer formatted %% as binary. %% @end %%-------------------------------------------------------------------- -spec rocksdb_properties() -> [binary()]. rocksdb_properties() -> [ <<"rocksdb.actual-delayed-write-rate">>, <<"rocksdb.background-errors">>, <<"rocksdb.base-level">>, <<"rocksdb.block-cache-capacity">>, <<"rocksdb.block-cache-pinned-usage">>, <<"rocksdb.block-cache-usage">>, <<"rocksdb.compaction-pending">>, <<"rocksdb.current-super-version-number">>, <<"rocksdb.cur-size-active-mem-table">>, <<"rocksdb.cur-size-all-mem-tables">>, <<"rocksdb.estimate-live-data-size">>, <<"rocksdb.estimate-num-keys">>, <<"rocksdb.estimate-pending-compaction-bytes">>, <<"rocksdb.estimate-table-readers-mem">>, <<"rocksdb.is-file-deletions-enabled">>, <<"rocksdb.is-write-stopped">>, <<"rocksdb.live-blob-file-size">>, <<"rocksdb.live-sst-files-size">>, <<"rocksdb.mem-table-flush-pending">>, <<"rocksdb.min-log-number-to-keep">>, <<"rocksdb.min-obsolete-sst-number-to-keep">>, <<"rocksdb.num-blob-files">>, <<"rocksdb.num-deletes-active-mem-table">>, <<"rocksdb.num-deletes-active-mem-table">>, <<"rocksdb.num-deletes-imm-mem-tables">>, <<"rocksdb.num-entries-active-mem-table">>, <<"rocksdb.num-entries-imm-mem-tables">>, <<"rocksdb.num-files-at-level0">>, <<"rocksdb.num-files-at-level1">>, <<"rocksdb.num-files-at-level2">>, <<"rocksdb.num-files-at-level3">>, <<"rocksdb.num-files-at-level4">>, <<"rocksdb.num-immutable-mem-table">>, <<"rocksdb.num-immutable-mem-table-flushed">>, <<"rocksdb.num-live-versions">>, <<"rocksdb.num-running-compactions">>, <<"rocksdb.num-running-flushes">>, <<"rocksdb.num-snapshots">>, <<"rocksdb.size-all-mem-tables">>, <<"rocksdb.total-blob-file-size">>, <<"rocksdb.total-sst-files-size">> ]. %%-------------------------------------------------------------------- %% @hidden %% @doc loop over the properties and convert them into a proplist. %% @end %%-------------------------------------------------------------------- -spec rocksdb_properties(Handle, Properties, Buffer) -> Return when Handle :: reference(), Properties :: [binary()], Buffer :: proplists:proplist(), Return :: proplists:proplist(). rocksdb_properties(_, [], Buffer) -> Buffer; rocksdb_properties(Handle, [Property|Rest], Buffer) when is_binary(Property) -> try {ok, Raw} = rocksdb:get_property(Handle, Property), Value = binary_to_integer(Raw), % converting the value for presentation % purpose. String = binary_to_list(Property), NewBuffer = [{String,Value}|Buffer], rocksdb_properties(Handle,Rest,NewBuffer) catch _:_ -> rocksdb_properties(Handle,Rest,Buffer) end. %%-------------------------------------------------------------------- %% @hidden %% @doc display diagnostics via logs. %% @end %%-------------------------------------------------------------------- display(Diagnostic, Category) -> Message = [{diagnostic, Category}|Diagnostic], ?LOG_INFO(Message), Diagnostic. ================================================ FILE: apps/arweave_limiter/include/.gitkeep ================================================ ================================================ FILE: apps/arweave_limiter/priv/.gitkeep ================================================ ================================================ FILE: apps/arweave_limiter/src/arweave_limiter.app.src ================================================ {application, arweave_limiter, [ {id, "arweave_limiter"}, {description, "Arweave Rate Limiter"}, {vsn, "0.0.1"}, {mod, {arweave_limiter, []}}, {env, []}, {applications, [ kernel, stdlib, sasl, arweave_config, prometheus, prometheus_cowboy, prometheus_process_collector, prometheus_httpd, runtime_tools ]}, {modules, [ arweave_limiter, arweave_limiter_sup, arweave_limiter_time, arweave_limiter_metrics, arweave_limiter_metrics_collector ]}, {registered, [ arweave_limiter, arweave_limiter_sup ]} ]}. ================================================ FILE: apps/arweave_limiter/src/arweave_limiter.erl ================================================ %%%=================================================================== %%% GNU General Public License, version 2 (GPL-2.0) %%% The GNU General Public License (GPL-2.0) %%% Version 2, June 1991 %%% %%% ------------------------------------------------------------------ %%% %%% @copyright 2025 (c) Arweave %%% @author Arweave Team %%% @author Kristof Hetzl %%% @doc Arweave Rate Limiter. %%% %%% `arweave_limiter' module is an interface to the Arweave %%% Rate Limiter functionality. %%% %%% @end %%%=================================================================== -module(arweave_limiter). -vsn(1). -behavior(application). -export([ start/0, start/2, stop/0, stop/1 ]). -export([register_or_reject_call/2, reduce_for_peer/2]). -include_lib("kernel/include/logger.hrl"). %%-------------------------------------------------------------------- %% @doc helper function to start `arweave_limiter' application. %% @end %%-------------------------------------------------------------------- -spec start() -> ok | {error, term()}. start() -> case application:ensure_all_started(?MODULE, permanent) of {ok, Dependencies} -> ?LOG_DEBUG("arweave_limiter started dependencies: ~p", [Dependencies]), ok; Elsewise -> Elsewise end. %%-------------------------------------------------------------------- %% @doc Application API function to start `arweave_config' app. %% @end %%-------------------------------------------------------------------- -spec start(term(), term()) -> {ok, pid()}. start(_StartType, _StartArgs) -> arweave_limiter_sup:start_link(). %%-------------------------------------------------------------------- %% @doc help function to stop `arweave_config' application. %% @end %%-------------------------------------------------------------------- -spec stop() -> ok. stop() -> application:stop(?MODULE). %%-------------------------------------------------------------------- %% @doc help function to stop `arweave_config' application. %% @end %%-------------------------------------------------------------------- -spec stop(term()) -> ok. stop(_State) -> ok. %%-------------------------------------------------------------------- %% @doc Rate limit request %% @end %%-------------------------------------------------------------------- register_or_reject_call(LimiterRef, Peer) -> arweave_limiter_group:register_or_reject_call(LimiterRef, Peer). %%-------------------------------------------------------------------- %% @doc Reduce leaky tokens for peer. %% @end %%-------------------------------------------------------------------- reduce_for_peer(LimiterRef, Peer) -> arweave_limiter_group:reduce_for_peer(LimiterRef, Peer). ================================================ FILE: apps/arweave_limiter/src/arweave_limiter_group.erl ================================================ %%% %%% @doc Leaky bucket token rate limiter based on %%% https://gist.github.com/humaite/21a84c3b3afac07fcebe476580f3a40b %%% combined with a concurrency limiter similar to Ranch's connection pool. %%% The leaky bucket limiter sits on top of a sliding window limiter. %%% %%% Concurrency is validated first, then sliding window, followed by leaky %%% bucket. If sliding windows passes, the call is accepted, otherwise it %%% burns leaky tokens, if those are exhausted as well, the call will be %%% marked as rejected. %%% It only stores data in process memory. %%% -module(arweave_limiter_group). -behaviour(gen_server). %% API -export([ start_link/2, info/1, config/1, register_or_reject_call/2, reduce_for_peer/2, reset_all/1, stop/1 ]). %% gen_server callbacks -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3, format_status/2]). -ifdef(AR_TEST). -export([ expire_and_get_requests/4, drop_expired/3, add_and_order_timestamps/2, cleanup_expired_sliding_peers/3]). -endif. -include_lib("arweave/include/ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). %%% API start_link(LimiterRef, Config) -> gen_server:start_link({local, LimiterRef}, ?MODULE, [Config], []). info(LimiterRef) -> gen_server:call(LimiterRef, get_info). config(LimiterRef) -> gen_server:call(LimiterRef, get_config). register_or_reject_call(LimiterRef, Peer) -> {Time, Value} = timer:tc(fun do_register_or_reject_call/2, [LimiterRef, Peer]), prometheus_histogram:observe(ar_limiter_response_time_microseconds, [atom_to_list(LimiterRef)], Time), Value. do_register_or_reject_call(LimiterRef, Peer) -> prometheus_counter:inc(ar_limiter_requests_total, [atom_to_list(LimiterRef)]), case gen_server:call(LimiterRef, {register_or_reject, Peer}) of {reject, Reason, _Data} = Rejection -> prometheus_counter:inc(ar_limiter_rejected_total, [atom_to_list(LimiterRef), atom_to_list(Reason)]), Rejection; Accept -> Accept end. %% This function is called when a transaction is accepted. This is how the previous %% solution dealt with high loads. This will perform double reduction. (as the periodic %% reduction is still occurring). reduce_for_peer(LimiterRef, Peer) -> Result = gen_server:call(LimiterRef, {reduce_for_peer, Peer}), Result == ok andalso prometheus_counter:inc(ar_limiter_reduce_requests_total, [atom_to_list(LimiterRef)]), Result. reset_all(LimiterRef) -> whereis(LimiterRef) == undefined orelse gen_server:call(LimiterRef, reset_all). stop(LimiterRef) -> gen_server:stop(LimiterRef). %% gen_server callbacks init([Config] = _Args) -> Id = maps:get(id, Config), IsDisabled = maps:get(no_limit, Config, false), IsManualReductionDisabled = maps:get(is_manual_reduction_disabled, Config, false), LeakyTickMs = maps:get(leaky_tick_ms, Config, ?DEFAULT_HTTP_API_LIMITER_GENERAL_LEAKY_TICK_INTERVAL), TimestampCleanupTickMs = maps:get(timestamp_cleanup_tick_ms, Config, ?DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_INTERVAL), TimestampCleanupExpiry = maps:get(timestamp_cleanup_expiry, Config, ?DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_EXPIRY), LeakyRateLimit = maps:get(leaky_rate_limit, Config, ?DEFAULT_HTTP_API_LIMITER_GENERAL_LEAKY_LIMIT), ConcurrencyLimit = maps:get(concurrency_limit, Config, ?DEFAULT_HTTP_API_LIMITER_GENERAL_CONCURRENCY_LIMIT), TickReduction = maps:get(tick_reduction, Config, ?DEFAULT_HTTP_API_LIMITER_GENERAL_LEAKY_TICK_REDUCTION), SlidingWindowDuration = maps:get(sliding_window_duration, Config, ?DEFAULT_HTTP_API_LIMITER_GENERAL_SLIDING_WINDOW_DURATION), SlidingWindowLimit = maps:get(sliding_window_limit, Config, ?DEFAULT_HTTP_API_LIMITER_GENERAL_SLIDING_WINDOW_LIMIT), {ok, LeakyRef} = timer:send_interval(LeakyTickMs, self(), {tick, leaky_bucket_reduction}), {ok, TsRef} = timer:send_interval(TimestampCleanupTickMs, self(), {tick, sliding_window_timestamp_cleanup}), {ok, #{ id => atom_to_list(Id), is_disabled => IsDisabled, is_manual_reduction_disabled => IsManualReductionDisabled, leaky_tick_timer_ref => LeakyRef, timestamp_cleanup_timer_ref => TsRef, leaky_tick_ms => LeakyTickMs, timestamp_cleanup_tick_ms => TimestampCleanupTickMs, timestamp_cleanup_expiry => TimestampCleanupExpiry, tick_reduction => TickReduction, leaky_rate_limit => LeakyRateLimit, concurrency_limit => ConcurrencyLimit, concurrent_requests => #{}, %% Peer -> List of {MonitorRef, Pid} concurrent_monitors => #{}, %% MonitorRef -> Peer leaky_tokens => #{}, %% Peer -> Leaky Bucket tokens sliding_window_duration => SlidingWindowDuration, sliding_window_limit => SlidingWindowLimit, sliding_timestamps => #{} %% Peer -> Ordered list of timestamps }}. handle_call(reset_all, _From, State) -> {reply, ok, State#{concurrent_requests => #{}, concurrent_monitors => #{}, leaky_tokens => #{}, sliding_timestamps => #{}}}; handle_call({register_or_reject, Peer}, {FromPid, _}, State = #{id := Id, is_disabled := IsDisabled, leaky_rate_limit := LeakyRateLimit, leaky_tokens := LeakyTokens, concurrency_limit := ConcurrencyLimit, concurrent_requests := ConcurrentRequests, concurrent_monitors := ConcurrentMonitors, sliding_window_duration := SlidingWindowDuration, sliding_window_limit := SlidingWindowLimit, sliding_timestamps := SlidingTimestamps }) -> Now = arweave_limiter_time:ts_now(), Tokens = maps:get(Peer, LeakyTokens, 0) + 1, Concurrency = length(maps:get(Peer, ConcurrentRequests, [])) + 1, SlidingTimestampsForPeer0 = expire_and_get_requests(Peer, SlidingTimestamps, SlidingWindowDuration, Now), case IsDisabled of true -> {reply, {register, no_limiting_applied}, State}; _ -> case Concurrency > ConcurrencyLimit of true -> %% Concurrency Hard Limit ?LOG_DEBUG([{event, ar_limiter_reject}, {reason, concurrency}, {peer, Peer}, {id, Id}]), {reply, {reject, concurrency, data}, State}; _ -> case length(SlidingTimestampsForPeer0) + 1 > SlidingWindowLimit of true -> %% Sliding Window limited, check Leaky Bucket Tokens case Tokens > LeakyRateLimit of true -> %% Burst exhausted with the Leaky Tokens ?LOG_DEBUG([{event, ar_limiter_reject}, {reason, rate_limit}, {sliding_window_limit, SlidingWindowLimit}, {leaky_rate_limit, LeakyRateLimit}, {peer, Peer}, {id, Id}]), {reply, {reject, rate_limit, data}, State}; false -> NewLeakyTokens = update_token(Peer, Tokens, LeakyTokens), {NewRequests, NewMonitors} = register_concurrent( Peer, FromPid, ConcurrentRequests, ConcurrentMonitors), {reply, {register, leaky}, State#{leaky_tokens => NewLeakyTokens, concurrent_requests => NewRequests, concurrent_monitors => NewMonitors}} end; _ -> {NewRequests, NewMonitors} = register_concurrent( Peer, FromPid, ConcurrentRequests, ConcurrentMonitors), SlidingTimestampsForPeer1 = add_and_order_timestamps(Now, SlidingTimestampsForPeer0), NewSlidingTimestamps = SlidingTimestamps#{Peer => SlidingTimestampsForPeer1}, {reply, {register, sliding}, State#{sliding_timestamps => NewSlidingTimestamps, concurrent_requests => NewRequests, concurrent_monitors => NewMonitors}} end end end; handle_call({reduce_for_peer, Peer}, _From, State = #{is_manual_reduction_disabled := false, leaky_tokens := LeakyTokens}) -> NewLeakyTokens = do_reduce_for_peer(Peer, LeakyTokens), {reply, ok, State#{leaky_tokens => NewLeakyTokens}}; handle_call({reduce_for_peer, _Peer}, _From, State = #{is_manual_reduction_disabled := true}) -> {reply, disabled, State}; handle_call(get_info, _From, State = #{sliding_timestamps := SlidingTimestamps, leaky_tokens := LeakyTokens, concurrent_requests := ConcurrentRequests, concurrent_monitors := ConcurrentMonitors}) -> {reply, #{sliding_timestamps => SlidingTimestamps, leaky_tokens => LeakyTokens, concurrent_requests => ConcurrentRequests, concurrent_monitors => ConcurrentMonitors}, State}; handle_call(get_config, _From, State) -> {reply, filter_state_for_config(State), State}; handle_call(Request, From, State = #{id := Id}) -> ?LOG_WARNING([{event, unhandled_call}, {id, Id}, {module, ?MODULE}, {request, Request}, {from, From}, {config, filter_state_for_config(State)}]), {reply, ok, State}. handle_cast(_Request, State) -> {noreply, State}. handle_info({tick, sliding_window_timestamp_cleanup}, State = #{id := Id, sliding_timestamps := SlidingTimestamps, timestamp_cleanup_expiry := CleanupExpiry}) -> Now = arweave_limiter_time:ts_now(), NewSlidingTimestamps = cleanup_expired_sliding_peers(SlidingTimestamps, CleanupExpiry, Now), Deleted = maps:size(SlidingTimestamps) - maps:size(NewSlidingTimestamps), prometheus_counter:inc(ar_limiter_cleanup_tick_expired_sliding_peers_deleted_total, [Id], Deleted), {noreply, State#{sliding_timestamps => NewSlidingTimestamps}}; handle_info({tick, leaky_bucket_reduction}, State = #{id := Id, tick_reduction := TickReduction, leaky_tokens := LeakyTokens}) -> %% This is going to be more precise than ar_limiter_leaky_ticks*ar_limiter_peers prometheus_counter:inc(ar_limiter_leaky_ticks, [Id]), SizeBefore = maps:size(LeakyTokens), prometheus_counter:inc(ar_limiter_leaky_tick_reductions_peer, [Id], SizeBefore), NewTokens = maps:fold(fun(Key, Value, AccIn) -> fold_decrease_rate(Id, Key, Value, AccIn, TickReduction) end, #{}, LeakyTokens), prometheus_counter:inc( ar_limiter_leaky_tick_delete_peer_total, [Id], SizeBefore - maps:size(NewTokens)), {noreply, State#{leaky_tokens => NewTokens}}; handle_info({'DOWN', MonitorRef, process, Pid, Reason}, State = #{concurrent_requests := ConcurrentRequests, concurrent_monitors := ConcurrentMonitors}) -> {NewConcurrentRequests, NewConcurrentMonitors} = remove_concurrent( MonitorRef, Pid, Reason, ConcurrentRequests, ConcurrentMonitors), {noreply, State#{concurrent_requests => NewConcurrentRequests, concurrent_monitors => NewConcurrentMonitors}}; handle_info(Info, State = #{id := Id}) -> ?LOG_WARNING([{event, unhandled_info}, {id, Id}, {module, ?MODULE}, {info, Info}]), {noreply, State}. terminate(_Reason, #{leaky_tick_timer_ref := _LeakyRef, timestamp_cleanup_timer_ref := _TsRef} = _State) -> ok. code_change(_OldVsn, State, _Extra) -> {ok, State}. format_status(_Opt, Status) -> Status. %%% Internal functions %% Sliding window manipulation expire_and_get_requests(Peer, SlidingTimestamps, SlidingWindowDuration, Now) -> Timestamps = maps:get(Peer, SlidingTimestamps, []), drop_expired(Timestamps, SlidingWindowDuration, Now). drop_expired([TS|Timestamps], WindowDuration, Now) when TS + WindowDuration =< Now -> drop_expired(Timestamps, WindowDuration, Now); drop_expired(Timestamps, _WindowDuration, _Now) -> Timestamps. %% There is no idomatic way of adding an element to the end of a list in Erlang. %% So, we reverse the list add it to the beginning and reverse it again. add_and_order_timestamps(Ts, Timestamps) -> lists:reverse(do_add_and_order_timestamps(Ts, lists:reverse(Timestamps))). do_add_and_order_timestamps(Ts, []) -> [Ts]; do_add_and_order_timestamps(Ts, [Head | _Rest] = Timestamps) when Ts >= Head -> [Ts | Timestamps]; do_add_and_order_timestamps(Ts, [Head | Rest]) -> %% This clause shouldn't really reached, because we use monotonic time %% for timestamps. [Head | do_add_and_order_timestamps(Ts, Rest)]. cleanup_expired_sliding_peers(SlidingTimestamps, WindowDuration, Now) -> maps:fold(fun(Peer, TsList, AccIn) -> case drop_expired(TsList, WindowDuration, Now) of [] -> AccIn; ValidTimestamps -> AccIn#{Peer => ValidTimestamps} end end, #{}, SlidingTimestamps). %% Token manipulation update_token(Peer, Token, LeakyToken) -> maps:put(Peer, Token, LeakyToken). do_reduce_for_peer(Peer, LeakyTokens) -> case maps:get(Peer, LeakyTokens, 0) of 0 -> LeakyTokens; Tokens -> LeakyTokens#{Peer => Tokens - 1} end. fold_decrease_rate(_Id, _Key, Counter, Acc, _TickReduction) when is_integer(Counter), Counter =< 0 -> Acc; fold_decrease_rate(Id, Key, Counter, Acc, TickReduction) when Counter < TickReduction -> prometheus_counter:inc(ar_limiter_leaky_tick_token_reductions_total, [Id], Counter), maps:put(Key, 0, Acc); fold_decrease_rate(Id, Key, Counter, Acc, TickReduction) -> prometheus_counter:inc(ar_limiter_leaky_tick_token_reductions_total, [Id], TickReduction), maps:put(Key, Counter-TickReduction, Acc). %% Concurrency magic register_concurrent(Peer, Pid, ConcurrentRequests, ConcurrentMonitors) -> MonitorRef = erlang:monitor(process, Pid), Processes = maps:get(Peer, ConcurrentRequests, []), NewConcurrentRequests = maps:put(Peer, [{MonitorRef, Pid} | Processes], ConcurrentRequests), NewConcurrentMonitors = maps:put(MonitorRef, Peer, ConcurrentMonitors), {NewConcurrentRequests, NewConcurrentMonitors}. remove_concurrent(MonitorRef, _Pid, _Reason, ConcurrentRequests, ConcurrentMonitors) -> %% Peer for a MonitorRef shouldn't be undefined, because we started to %% monitor the process as a first thing when register was called. case maps:get(MonitorRef, ConcurrentMonitors, not_found) of not_found -> %% MonitorRef not found. This happens when we reset all the peers %% manually. This also means everything else has been deleted as well. %% Nothing to do, just return the current state. {ConcurrentRequests, ConcurrentMonitors}; Peer -> ConcurrentForPeer = maps:get(Peer, ConcurrentRequests), NewConcurrentForPeer = proplists:delete(MonitorRef, ConcurrentForPeer), NewConcurrentRequests = case NewConcurrentForPeer of [] -> maps:remove(Peer, ConcurrentRequests); _ -> ConcurrentRequests#{Peer => NewConcurrentForPeer} end, NewConcurrentMonitors = maps:remove(MonitorRef, ConcurrentMonitors), {NewConcurrentRequests, NewConcurrentMonitors} end. filter_state_for_config(#{id := Id, is_disabled := IsDisabled, is_manual_reduction_disabled := IsManualReductionDisabled, leaky_tick_ms := LeakyTickMs, timestamp_cleanup_tick_ms := TimestampCleanupTickMs, timestamp_cleanup_expiry := TimestampCleanupExpiry, tick_reduction := TickReduction, leaky_rate_limit := LeakyRateLimit, concurrency_limit := ConcurrencyLimit, sliding_window_duration := SlidingWindowDuration, sliding_window_limit := SlidingWindowLimit}) -> #{id => Id, is_disabled => IsDisabled, is_manual_reduction_disabled => IsManualReductionDisabled, leaky_tick_ms => LeakyTickMs, timestamp_cleanup_tick_ms => TimestampCleanupTickMs, timestamp_cleanup_expiry => TimestampCleanupExpiry, tick_reduction => TickReduction, leaky_rate_limit => LeakyRateLimit, concurrency_limit => ConcurrencyLimit, sliding_window_duration => SlidingWindowDuration, sliding_window_limit => SlidingWindowLimit}. ================================================ FILE: apps/arweave_limiter/src/arweave_limiter_metrics.erl ================================================ -module(arweave_limiter_metrics). -export([register/0]). %%%=================================================================== %%% Public interface. %%%=================================================================== %% @doc Declare Arweave Rate Limiter metrics. register() -> ok = prometheus_histogram:new([ {name, ar_limiter_response_time_microseconds}, {help, "Time it took for the limiter to respond to requests"}, %% buckets might be reduced for production {buckets, [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5, 10, 50]}, {labels, [limiter_id]}]), ok = prometheus_counter:new([ {name, ar_limiter_requests_total}, {help, "The number of requests the limiter has processed"}, {labels, [limiter_id]}]), ok = prometheus_counter:new([{name, ar_limiter_rejected_total}, {help, "The number of request were rejected by the limiter"}, {labels, [limiter_id, reason]} ]), ok = prometheus_counter:new([{name, ar_limiter_reduce_requests_total}, {help, "The number of reduce request by peer in total"}, {labels, [limiter_id]} ]), ok = prometheus_gauge:new([ {name, ar_limiter_peers}, {help, "The number of peers the limiter is monitoring currently"}, %% limiting type: %% sliding_window -> baseline, leaky_bucket -> burst, concurrency -> concurrency {labels, [limiter_id, limiting_type]}]), ok = prometheus_gauge:new([ {name, ar_limiter_tracked_items_total}, {help, "The number of timestamps, leaky tokens, concurrent processes are tracked"}, %% limiting type: %% sliding_window -> baseline, leaky_bucket -> burst, concurrency -> concurrency {labels, [limiter_id, limiting_type]}]), ok = prometheus_counter:new([ {name, ar_limiter_leaky_ticks}, {help, "The number of leaky bucket ticks the limiter has processed"}, {labels, [limiter_id]}]), ok = prometheus_counter:new([ {name, ar_limiter_leaky_tick_delete_peer_total}, {help, "The number of times a peer has been dropped from the leaky bucket token register"}, {labels, [limiter_id]}]), ok = prometheus_counter:new([ {name, ar_limiter_cleanup_tick_expired_sliding_peers_deleted_total}, {help, "The number of times a peer has been dropped from the sliding window timestamp register"}, {labels, [limiter_id]}]), ok = prometheus_counter:new([ %% To show how much tokens clients are burning for bursts. {name, ar_limiter_leaky_tick_token_reductions_total}, {help, "All the consumed leaky bucket tokens that were reduced for all peers in total"}, {labels, [limiter_id]}]), ok = prometheus_counter:new([ %% To see how many peers bite into their burst tokens. (in a period) {name, ar_limiter_leaky_tick_reductions_peer}, {help, "The times a leaky bucket token reduction had have to be performed for a peer"}, {labels, [limiter_id]}]), ok. ================================================ FILE: apps/arweave_limiter/src/arweave_limiter_metrics_collector.erl ================================================ -module(arweave_limiter_metrics_collector). -behaviour(prometheus_collector). -export([ deregister_cleanup/1, collect_mf/2 ]). -ifdef(AR_TEST). -export([ metrics/0, tracked_items/1, peers/1 ]). -endif. -import(prometheus_model_helpers, [create_mf/4]). -include_lib("prometheus/include/prometheus.hrl"). -define(METRIC_NAME_PREFIX, "arweave_"). %% =================================================================== %% API %% =================================================================== %% called to collect Metric Families -spec collect_mf(_Registry, Callback) -> ok when _Registry :: prometheus_registry:registry(), Callback :: prometheus_collector:callback(). collect_mf(_Registry, Callback) -> Metrics = metrics(), [add_metric_family(Metric, Callback) || Metric <- Metrics], ok. %% called when collector deregistered deregister_cleanup(_Registry) -> ok. %% =================================================================== %% Private functions %% =================================================================== add_metric_family({Name, Type, Help, Metrics}, Callback) -> Callback(create_mf(?METRIC_NAME(Name), Help, Type, Metrics)). metrics() -> AllInfo = arweave_limiter_sup:all_info(), [ {ar_limiter_tracked_items_total, gauge, "tracked requests, timestamps, leaky tokens", tracked_items(AllInfo)}, {ar_limiter_peers, gauge, "The number of peers the limiter is monitoring currently", peers(AllInfo)} ]. tracked_items(AllInfo) -> lists:foldl(fun tracked_items_info/2, [], AllInfo). tracked_items_info({Id, Info}, Acc) -> SlidingTimestamps = count_sliding_timestamps(Info), Monitors = maps:get(concurrent_monitors, Info), LeakyPeers = maps:get(leaky_tokens, Info), Items = [ {[{limiter_id, Id}, {limiting_type, concurrency}], maps:size(Monitors)}, {[{limiter_id, Id}, {limiting_type, leaky_bucket_tokens}], maps:size(LeakyPeers)}, {[{limiter_id, Id}, {limiting_type, sliding_window_timestamps}], SlidingTimestamps} ], Items ++ Acc. count_sliding_timestamps(Info) -> SlidingTimestamps = maps:get(sliding_timestamps, Info), maps:fold(fun(_Peer, TimestampList, Acc) -> length(TimestampList) + Acc end, 0, SlidingTimestamps). peers(AllInfo) -> lists:foldl(fun peers_info/2, [], AllInfo). peers_info({Id, Info}, Acc) -> ConcurrentRequests = maps:get(concurrent_requests, Info), LeakyPeers = maps:get(leaky_tokens, Info), SlidingPeers = maps:get(sliding_timestamps, Info), Items = [ {[{limiter_id, Id}, {limiting_type, concurrency}], maps:size(ConcurrentRequests)}, {[{limiter_id, Id}, {limiting_type, leaky_bucket_tokens}], maps:size(LeakyPeers)}, {[{limiter_id, Id}, {limiting_type, sliding_window_timestamps}], maps:size(SlidingPeers)} ], Items ++ Acc. ================================================ FILE: apps/arweave_limiter/src/arweave_limiter_sup.erl ================================================ -module(arweave_limiter_sup). -behaviour(supervisor). %% API -export([start_link/0, all_info/0]). -ifdef(AR_TEST). -export([start_link/1, child_spec/1, reset_all/0]). -endif. %% Supervisor callbacks -export([init/1]). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("arweave/include/ar_sup.hrl"). -include_lib("kernel/include/logger.hrl"). %% =================================================================== %% API functions %% =================================================================== start_link() -> start_link(get_limiter_config()). start_link(Config) -> supervisor:start_link({local, ?MODULE}, ?MODULE, [Config]). %% =================================================================== %% Supervisor callbacks %% =================================================================== init([Config]) -> ok = arweave_limiter_metrics:register(), {ok, {supervisor_spec(Config), children_spec(Config)}}. supervisor_spec(_Config) -> #{ strategy => one_for_all, intensity => 5, period => 10 }. %%-------------------------------------------------------------------- %% Child spec generation based on Config. %%-------------------------------------------------------------------- children_spec(Configs) -> [child_spec(Config) || Config <- Configs]. child_spec(#{id := Id} = Config) -> #{ id => Id, start => {arweave_limiter_group, start_link, [Id, Config]}, type => worker, shutdown => ?SHUTDOWN_TIMEOUT}. get_limiter_config() -> {ok, Config} = arweave_config:get_env(), [ #{id => chunk, sliding_window_limit => Config#config.'http_api.limiter.chunk.sliding_window_limit', sliding_window_duration => Config#config.'http_api.limiter.chunk.sliding_window_duration', timestamp_cleanup_tick_ms => Config#config.'http_api.limiter.chunk.sliding_window_timestamp_cleanup_interval', timestamp_cleanup_expiry => Config#config.'http_api.limiter.chunk.sliding_window_timestamp_cleanup_expiry', leaky_rate_limit => Config#config.'http_api.limiter.chunk.leaky_limit', leaky_tick_ms => Config#config.'http_api.limiter.chunk.leaky_tick_interval', tick_reduction => Config#config.'http_api.limiter.chunk.leaky_tick_reduction', concurrency_limit => Config#config.'http_api.limiter.chunk.concurrency_limit', is_manual_reduction_disabled => Config#config.'http_api.limiter.chunk.is_manual_reduction_disabled'}, #{id => data_sync_record, sliding_window_limit => Config#config.'http_api.limiter.data_sync_record.sliding_window_limit', sliding_window_duration => Config#config.'http_api.limiter.data_sync_record.sliding_window_duration', timestamp_cleanup_tick_ms => Config#config.'http_api.limiter.data_sync_record.sliding_window_timestamp_cleanup_interval', timestamp_cleanup_expiry => Config#config.'http_api.limiter.data_sync_record.sliding_window_timestamp_cleanup_expiry', leaky_rate_limit => Config#config.'http_api.limiter.data_sync_record.leaky_limit', leaky_tick_ms => Config#config.'http_api.limiter.data_sync_record.leaky_tick_interval', tick_reduction => Config#config.'http_api.limiter.data_sync_record.leaky_tick_reduction', concurrency_limit => Config#config.'http_api.limiter.data_sync_record.concurrency_limit', is_manual_reduction_disabled => Config#config.'http_api.limiter.data_sync_record.is_manual_reduction_disabled'}, #{id => recent_hash_list_diff, sliding_window_limit => Config#config.'http_api.limiter.recent_hash_list_diff.sliding_window_limit', sliding_window_duration => Config#config.'http_api.limiter.recent_hash_list_diff.sliding_window_duration', timestamp_cleanup_tick_ms => Config#config.'http_api.limiter.recent_hash_list_diff.sliding_window_timestamp_cleanup_interval', timestamp_cleanup_expiry => Config#config.'http_api.limiter.recent_hash_list_diff.sliding_window_timestamp_cleanup_expiry', leaky_rate_limit => Config#config.'http_api.limiter.recent_hash_list_diff.leaky_limit', leaky_tick_ms => Config#config.'http_api.limiter.recent_hash_list_diff.leaky_tick_interval', tick_reduction => Config#config.'http_api.limiter.recent_hash_list_diff.leaky_tick_reduction', concurrency_limit => Config#config.'http_api.limiter.recent_hash_list_diff.concurrency_limit', is_manual_reduction_disabled => Config#config.'http_api.limiter.recent_hash_list_diff.is_manual_reduction_disabled'}, #{id => block_index, sliding_window_limit => Config#config.'http_api.limiter.block_index.sliding_window_limit', sliding_window_duration => Config#config.'http_api.limiter.block_index.sliding_window_duration', timestamp_cleanup_tick_ms => Config#config.'http_api.limiter.block_index.sliding_window_timestamp_cleanup_interval', timestamp_cleanup_expiry => Config#config.'http_api.limiter.block_index.sliding_window_timestamp_cleanup_expiry', leaky_rate_limit => Config#config.'http_api.limiter.block_index.leaky_limit', leaky_tick_ms => Config#config.'http_api.limiter.block_index.leaky_tick_interval', tick_reduction => Config#config.'http_api.limiter.block_index.leaky_tick_reduction', concurrency_limit => Config#config.'http_api.limiter.block_index.concurrency_limit', is_manual_reduction_disabled => Config#config.'http_api.limiter.block_index.is_manual_reduction_disabled'}, #{id => wallet_list, sliding_window_limit => Config#config.'http_api.limiter.wallet_list.sliding_window_limit', sliding_window_duration => Config#config.'http_api.limiter.wallet_list.sliding_window_duration', timestamp_cleanup_tick_ms => Config#config.'http_api.limiter.wallet_list.sliding_window_timestamp_cleanup_interval', timestamp_cleanup_expiry => Config#config.'http_api.limiter.wallet_list.sliding_window_timestamp_cleanup_expiry', leaky_rate_limit => Config#config.'http_api.limiter.wallet_list.leaky_limit', leaky_tick_ms => Config#config.'http_api.limiter.wallet_list.leaky_tick_interval', tick_reduction => Config#config.'http_api.limiter.wallet_list.leaky_tick_reduction', concurrency_limit => Config#config.'http_api.limiter.wallet_list.concurrency_limit', is_manual_reduction_disabled => Config#config.'http_api.limiter.wallet_list.is_manual_reduction_disabled'}, #{id => get_vdf, sliding_window_limit => Config#config.'http_api.limiter.get_vdf.sliding_window_limit', sliding_window_duration => Config#config.'http_api.limiter.get_vdf.sliding_window_duration', timestamp_cleanup_tick_ms => Config#config.'http_api.limiter.get_vdf.sliding_window_timestamp_cleanup_interval', timestamp_cleanup_expiry => Config#config.'http_api.limiter.get_vdf.sliding_window_timestamp_cleanup_expiry', leaky_rate_limit => Config#config.'http_api.limiter.get_vdf.leaky_limit', leaky_tick_ms => Config#config.'http_api.limiter.get_vdf.leaky_tick_interval', tick_reduction => Config#config.'http_api.limiter.get_vdf.leaky_tick_reduction', concurrency_limit => Config#config.'http_api.limiter.get_vdf.concurrency_limit', is_manual_reduction_disabled => Config#config.'http_api.limiter.get_vdf.is_manual_reduction_disabled'}, #{id => get_vdf_session, sliding_window_limit => Config#config.'http_api.limiter.get_vdf_session.sliding_window_limit', sliding_window_duration => Config#config.'http_api.limiter.get_vdf_session.sliding_window_duration', timestamp_cleanup_tick_ms => Config#config.'http_api.limiter.get_vdf_session.sliding_window_timestamp_cleanup_interval', timestamp_cleanup_expiry => Config#config.'http_api.limiter.get_vdf_session.sliding_window_timestamp_cleanup_expiry', leaky_rate_limit => Config#config.'http_api.limiter.get_vdf_session.leaky_limit', leaky_tick_ms => Config#config.'http_api.limiter.get_vdf_session.leaky_tick_interval', tick_reduction => Config#config.'http_api.limiter.get_vdf_session.leaky_tick_reduction', concurrency_limit => Config#config.'http_api.limiter.get_vdf_session.concurrency_limit', is_manual_reduction_disabled => Config#config.'http_api.limiter.get_vdf_session.is_manual_reduction_disabled'}, #{id => get_previous_vdf_session, sliding_window_limit => Config#config.'http_api.limiter.get_previous_vdf_session.sliding_window_limit', sliding_window_duration => Config#config.'http_api.limiter.get_previous_vdf_session.sliding_window_duration', timestamp_cleanup_tick_ms => Config#config.'http_api.limiter.get_previous_vdf_session.sliding_window_timestamp_cleanup_interval', timestamp_cleanup_expiry => Config#config.'http_api.limiter.get_previous_vdf_session.sliding_window_timestamp_cleanup_expiry', leaky_rate_limit => Config#config.'http_api.limiter.get_previous_vdf_session.leaky_limit', leaky_tick_ms => Config#config.'http_api.limiter.get_previous_vdf_session.leaky_tick_interval', tick_reduction => Config#config.'http_api.limiter.get_previous_vdf_session.leaky_tick_reduction', concurrency_limit => Config#config.'http_api.limiter.get_previous_vdf_session.concurrency_limit', is_manual_reduction_disabled => Config#config.'http_api.limiter.get_previous_vdf_session.is_manual_reduction_disabled'}, #{id => general, sliding_window_limit => Config#config.'http_api.limiter.general.sliding_window_limit', sliding_window_duration => Config#config.'http_api.limiter.general.sliding_window_duration', timestamp_cleanup_tick_ms => Config#config.'http_api.limiter.general.sliding_window_timestamp_cleanup_interval', timestamp_cleanup_expiry => Config#config.'http_api.limiter.general.sliding_window_timestamp_cleanup_expiry', leaky_rate_limit => Config#config.'http_api.limiter.general.leaky_limit', leaky_tick_ms => Config#config.'http_api.limiter.general.leaky_tick_interval', tick_reduction => Config#config.'http_api.limiter.general.leaky_tick_reduction', concurrency_limit => Config#config.'http_api.limiter.general.concurrency_limit', is_manual_reduction_disabled => Config#config.'http_api.limiter.general.is_manual_reduction_disabled'}, #{id => metrics, sliding_window_limit => Config#config.'http_api.limiter.metrics.sliding_window_limit', sliding_window_duration => Config#config.'http_api.limiter.metrics.sliding_window_duration', timestamp_cleanup_tick_ms => Config#config.'http_api.limiter.metrics.sliding_window_timestamp_cleanup_interval', timestamp_cleanup_expiry => Config#config.'http_api.limiter.metrics.sliding_window_timestamp_cleanup_expiry', leaky_rate_limit => Config#config.'http_api.limiter.metrics.leaky_limit', leaky_tick_ms => Config#config.'http_api.limiter.metrics.leaky_tick_interval', tick_reduction => Config#config.'http_api.limiter.metrics.leaky_tick_reduction', concurrency_limit => Config#config.'http_api.limiter.metrics.concurrency_limit', is_manual_reduction_disabled => Config#config.'http_api.limiter.metrics.is_manual_reduction_disabled'}, %% Local peers #{id => local_peers, no_limit => true} ]. all_info() -> Children = supervisor:which_children(?MODULE), [{Id, arweave_limiter_group:info(Id)} || {Id, _Child, _Type, _Modules} <- Children]. reset_all() -> Children = supervisor:which_children(?MODULE), [{Id, arweave_limiter_group:reset_all(Id)} || {Id, _Child, _Type, _Modules} <- Children]. ================================================ FILE: apps/arweave_limiter/src/arweave_limiter_time.erl ================================================ %%% %%% @doc Rate limiter clock and time management library %%% %%% NOTE: this module seems pretty redundant. However, moving erlang:monotonic_time/1 %%% into this module allows us to test production code without alteration %%% and mock time related functions, and so manipulate and control time precisely %%% in tests. So here it is. -module(arweave_limiter_time). -export([ ts_now/0 ]). ts_now() -> erlang:monotonic_time(millisecond). ================================================ FILE: apps/arweave_limiter/test/arweave_limiter_group_tests.erl ================================================ -module(arweave_limiter_group_tests). -include_lib("eunit/include/eunit.hrl"). -include_lib("arweave/include/ar.hrl"). -define(M, arweave_limiter_group). -define(TABLE, eunit_arweave_limiter_tests_mock). -define(KEY, ts_now). -define(TEST_LIMITER, test_limiter). -define(setTsMock(Ts), ets:insert(?TABLE, {?KEY, Ts})). -define(assertHandlerRegisterOrRejectCall(LimiterRef, Pattern, Peer, Now), ((fun () -> ?assert(?setTsMock(Now)), spawn_link(fun() -> ?assertMatch( Pattern, ?M:register_or_reject_call(LimiterRef, Peer)), receive done -> ok end end) end)())). expire_test() -> IP = {1,2,3,4}, ?assertEqual([], ?M:expire_and_get_requests(IP, #{}, 1000, 1)), ?assertEqual([1], ?M:drop_expired([1], 1000, 500)), ?assertEqual([1], ?M:expire_and_get_requests(IP, #{IP => [1]}, 1000, 500)), ?assertEqual([1, 500], ?M:expire_and_get_requests(IP, #{IP => [1, 500]}, 1000, 501)), ?assertEqual([500, 501], ?M:expire_and_get_requests(IP, #{IP => [1, 500, 501]}, 1000, 1100)), ?assertEqual([500, 501], ?M:expire_and_get_requests(IP, #{IP => [1, 500, 501]}, 1000, 1499)), ?assertEqual([501], ?M:expire_and_get_requests(IP, #{IP => [1, 500, 501]}, 1000, 1500)), ?assertEqual([], ?M:expire_and_get_requests(IP, #{IP => [1, 500, 501]}, 1000, 1501)), ok. add_and_order_test() -> ?assertEqual([5], ?M:add_and_order_timestamps(5, [])), ?assertEqual([1,2,3,4,5], ?M:add_and_order_timestamps(5, [1,2,3,4])), ?assertEqual([1,2,3,4,5,6,7], ?M:add_and_order_timestamps(5, [1,2,3,4,6,7])), ?assertEqual([5,7,8], ?M:add_and_order_timestamps(5, [7,8])), ok. cleanup_timestamps_map_test() -> IP1 = {1,2,3,4}, IP2 = {2,3,4,5}, ?assertEqual( #{IP1 => [1], IP2 => [500] }, ?M:cleanup_expired_sliding_peers( #{IP1 => [1], IP2 => [500]}, 1000, 501)), ?assertEqual( #{%%IP1 => [1], - removed IP2 => [500] }, ?M:cleanup_expired_sliding_peers( #{IP1 => [1], IP2 => [500]}, 1000, 1100)), Empty = ?M:cleanup_expired_sliding_peers( #{IP1 => [1], IP2 => [500]}, 1000, 2100), %% Now it's empty ?assertEqual(0, maps:size(Empty)), ok. setup(Config) -> ?TABLE = ets:new(?TABLE, [named_table, public]), ?setTsMock(0), {module, arweave_limiter_time} = code:ensure_loaded(arweave_limiter_time), ok = meck:new(prometheus_counter, [passthrough]), ok = meck:expect(prometheus_counter, inc, 2, ok), ok = meck:expect(prometheus_counter, inc, 3, ok), ok = meck:new(arweave_limiter_time, []), ok = meck:expect(arweave_limiter_time, ts_now, fun() -> [{?KEY, Value}] = ets:lookup(?TABLE, ?KEY), Value end), 0 = arweave_limiter_time:ts_now(), {ok, LimiterPid} = ?M:start_link(?TEST_LIMITER, Config), LimiterPid. cleanup(_Config, _LimiterPid) -> true = meck:validate(arweave_limiter_time), true = meck:validate(prometheus_counter), ok = meck:unload([prometheus_counter, arweave_limiter_time]), ?M:stop(?TEST_LIMITER), true = ets:delete(?TABLE), ok. rate_limiter_process_test_() -> {foreachx, fun setup/1, fun cleanup/2, [{#{id => ?TEST_LIMITER, tick_reduction => 1, leaky_rate_limit => 0, concurrency_limit => 5, sliding_window_limit => 2, sliding_window_duration => 1000, timestamp_cleanup_expiry => 1000, leaky_tick_ms => 100000}, fun(_Config, _LimiterPid) -> {"sliding test", fun simple_sliding_happy/0} end}, {#{id => ?TEST_LIMITER, tick_reduction => 1, leaky_rate_limit => 5, concurrency_limit => 2, sliding_window_limit => 0, sliding_window_duration => 1000, timestamp_cleanup_expiry => 1000, leaky_tick_ms => 100000}, fun simple_leaky_happy_path/2}, {#{id => ?TEST_LIMITER, tick_reduction => 1, leaky_rate_limit=> 5, concurrency_limit => 2, sliding_window_limit => 0, sliding_window_duration => 1000, timestamp_cleanup_expiry => 1000, leaky_tick_ms => 100000}, fun rate_limiter_rejected_due_concurrency/2}, {#{id => ?TEST_LIMITER, tick_reduction => 1, leaky_rate_limit => 2, concurrency_limit => 5, sliding_window_limit => 0, sliding_window_duration => 1000, timestamp_cleanup_expiry => 1000, leaky_tick_ms => 100000}, fun rejected_due_leaky_rate/2}, {#{id => ?TEST_LIMITER, tick_reduction => 1, leaky_rate_limit => 1, concurrency_limit => 10, sliding_window_limit => 1, sliding_window_duration => 100000, leaky_tick_ms => 10000000, timestamp_cleanup_expiry => 1000, timestamp_cleanup_tick_ms => 1000000}, fun both_exhausted/2}, {#{id => ?TEST_LIMITER, tick_reduction => 1, leaky_rate_limit => 1, concurrency_limit => 2, sliding_window_limit => 1, sliding_window_duration => 1000, timestamp_cleanup_expiry => 1000, leaky_tick_ms => 100000}, fun peer_cleanup/2}, {#{id => ?TEST_LIMITER, tick_reduction => 1, leaky_rate_limit => 5, concurrency_limit => 10, sliding_window_limit => 0, sliding_window_duration => 1000, timestamp_cleanup_expiry => 1000, leaky_tick_ms => 100000}, fun leaky_manual_reduction/2}, {#{id => ?TEST_LIMITER, is_manual_reduction_disabled => true, tick_reduction => 1, leaky_rate_limit => 5, concurrency_limit => 10, sliding_window_limit => 0, sliding_window_duration => 1000, timestamp_cleanup_expiry => 1000, leaky_tick_ms => 100000}, fun leaky_manual_reduction_disabled/2} ]}. simple_sliding_happy() -> IP = {1,2,3,4}, Caller1 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {register, sliding}, IP, 1), Caller1 ! done, timer:sleep(100), Info1 = ?M:info(?TEST_LIMITER), ?assertMatch(#{sliding_timestamps := #{IP := [1]}}, Info1), #{concurrent_requests := ConcurrentReqs1} = Info1, ?assertEqual(0, maps:size(ConcurrentReqs1)), Caller2 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {register, sliding}, IP, 500), Caller2 ! done, timer:sleep(100), Info2 = ?M:info(?TEST_LIMITER), ?assertMatch(#{sliding_timestamps := #{IP := [1,500]}}, Info2), #{concurrent_requests := ConcurrentReqs2} = Info2, ?assertEqual(0, maps:size(ConcurrentReqs2)), Caller3 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {register, sliding}, IP, 2000), Caller3 ! done, timer:sleep(100), %% 2 previous ts expired due to the time elapsed. ?assertMatch(#{sliding_timestamps := #{IP := [2000]}}, ?M:info(?TEST_LIMITER)), Caller4 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {register, sliding}, IP, 2001), Caller4 ! done, timer:sleep(100), ?assertMatch(#{sliding_timestamps := #{IP := [2000, 2001]}}, ?M:info(?TEST_LIMITER)), Caller5 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {reject, rate_limit, _} , IP, 2002), Caller5 ! done, timer:sleep(100), %% Wait a bit for surely have request processed, and observe, no new timestamp ?assertMatch(#{sliding_timestamps := #{IP := [2000, 2001]}}, ?M:info(?TEST_LIMITER)), ok. simple_leaky_happy_path(_Config, LimiterPid) -> {"Leaky happy path", fun() -> ?assertMatch(#{is_manual_reduction_disabled := false}, ?M:config(?TEST_LIMITER)), IP = {1,2,3,4}, %% init state, the ip is not blocked Caller1 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {register, leaky}, IP, 0), timer:sleep(20), Caller2 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {register, leaky}, IP, 2), %% wait a bit so they are surely started. timer:sleep(100), ?assertMatch(#{concurrent_requests := #{IP := [_,_]}, leaky_tokens := #{IP := 2}}, ?M:info(?TEST_LIMITER)), Caller1 ! done, %% wait a tiny bit so the logic surely runs. timer:sleep(100), ?assertMatch(#{concurrent_requests := #{IP := [_]}, leaky_tokens := #{IP := 2}}, ?M:info(?TEST_LIMITER)), Caller2 ! done, %% wait a tiny bit so the logic surely runs. timer:sleep(100), %% Keys deleted ?assertMatch(#{concurrent_requests := #{}, leaky_tokens := #{IP := 2}}, ?M:info(?TEST_LIMITER)), %% manually trigger a tick. LimiterPid ! {tick, leaky_bucket_reduction}, %% wait a tiny bit so the tick logic surely runs. timer:sleep(100), ?assertMatch(#{concurrent_requests := #{}, leaky_tokens := #{IP := 1}}, ?M:info(?TEST_LIMITER)), %% manually trigger a tick. LimiterPid ! {tick, leaky_bucket_reduction}, %% wait a tiny bit so the tick logic surely runs. timer:sleep(100), ?assertMatch(#{concurrent_requests := #{}, leaky_tokens := #{IP := 0}}, ?M:info(?TEST_LIMITER)), %% manually trigger a tick. LimiterPid ! {tick, leaky_bucket_reduction}, %% wait a tiny bit so the tick logic surely runs. timer:sleep(100), %% Key only deleted from leaky_tokens map, when it reached 0 in the previous tick #{concurrent_requests := ConcurrentReqs, leaky_tokens := LeakyTokens} = ?M:info(?TEST_LIMITER), ?assertEqual(0, maps:size(ConcurrentReqs)), ?assertMatch(0, maps:size(LeakyTokens)), ok end}. rate_limiter_rejected_due_concurrency(_Config, LimiterPid) -> {"rejected due concurrency", fun() -> ?assertMatch(#{is_manual_reduction_disabled := false}, ?M:config(?TEST_LIMITER)), %% init state, the ip is not blocked IP = {1,2,3,4}, Caller1 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {register, leaky}, IP, -1), timer:sleep(120), Caller2 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {register, leaky}, IP, 10), timer:sleep(120), Caller3 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {reject, concurrency, _Data}, IP, 10), %% wait a bit so they are surely started. timer:sleep(100), ?assertMatch(#{concurrent_requests := #{IP := [_,_]}, leaky_tokens := #{IP := 2}}, ?M:info(?TEST_LIMITER)), Caller1 ! done, Caller2 ! done, Caller3 ! done, %% wait a tiny bit so the logic surely runs. timer:sleep(100), %% Keys deleted %% NOTE: concurrent_requests := #{} matches to any map, so we don't what's in there. ?assertMatch(#{concurrent_requests := #{}, leaky_tokens := #{IP := 2}}, ?M:info(?TEST_LIMITER)), %% manually trigger a tick. LimiterPid ! {tick, leaky_bucket_reduction}, %% wait a tiny bit so the tick logic surely runs. timer:sleep(100), ?assertMatch(#{concurrent_requests := #{}, leaky_tokens := #{IP := 1}}, ?M:info(?TEST_LIMITER)), %% Concurrency reduced, one handler terminated, will register again Caller4 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {register, leaky}, IP, 0), %% wait a tiny bit so the logic surely runs. timer:sleep(100), Caller4 ! done, %% Keys deleted ?assertMatch(#{concurrent_requests := #{}, leaky_tokens := #{IP := 2}}, ?M:info(?TEST_LIMITER)), %% manually trigger two ticks. LimiterPid ! {tick, leaky_bucket_reduction}, LimiterPid ! {tick, leaky_bucket_reduction}, %% wait a tiny bit so the tick logic surely runs. timer:sleep(100), ?assertMatch(#{concurrent_requests := #{}, leaky_tokens := #{IP := 0}}, ?M:info(?TEST_LIMITER)), %% manually trigger a tick. LimiterPid ! {tick, leaky_bucket_reduction}, %% wait a tiny bit so the tick logic surely runs. timer:sleep(100), %% Key only deleted from leaky_tokens map, when it reached 0 in the previous tick #{concurrent_requests := ConcurrentReqs, leaky_tokens := LeakyTokens} = ?M:info(?TEST_LIMITER), ?assertEqual(0, maps:size(ConcurrentReqs)), ?assertEqual(0, maps:size(LeakyTokens)), ok end}. rejected_due_leaky_rate(_Config, LimiterPid) -> {"rejected due leaky rate", fun() -> ?assertMatch(#{is_manual_reduction_disabled := false}, ?M:config(?TEST_LIMITER)), %% init state, the ip is not blocked IP = {1,2,3,4}, Caller1 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {register, leaky}, IP, 1), timer:sleep(20), Caller2 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {register, leaky}, IP, 2), timer:sleep(20), Caller3 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {reject, rate_limit, _Data}, IP, 3), %% wait a bit so they are surely started. timer:sleep(100), %% 2 concurrent, 2 token ?assertMatch(#{concurrent_requests := #{IP := [_,_]}, leaky_tokens := #{IP := 2}}, ?M:info(?TEST_LIMITER)), %% Simulate a tick LimiterPid ! {tick, leaky_bucket_reduction}, %% wait a tiny bit so the logic surely runs. timer:sleep(100), %% 2 concurrent, but tokens reduced. ?assertMatch(#{concurrent_requests := #{IP := [_,_]}, leaky_tokens := #{IP := 1}}, ?M:info(?TEST_LIMITER)), %% Tokens reduced, will register again Caller4 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {register, leaky}, IP, 10), %% wait a tiny bit so the logic surely runs. timer:sleep(100), %% 3 concurrent, 2 tokens ?assertMatch(#{concurrent_requests := #{IP := [_,_,_]}, leaky_tokens := #{IP := 2}}, ?M:info(?TEST_LIMITER)), %% manually trigger two ticks. LimiterPid ! {tick, leaky_bucket_reduction}, LimiterPid ! {tick, leaky_bucket_reduction}, %% wait a tiny bit so the tick logic surely runs. timer:sleep(100), ?assertMatch(#{concurrent_requests := #{IP := [_,_,_]}, leaky_tokens := #{IP := 0}}, ?M:info(?TEST_LIMITER)), %% Clean up Caller1 ! done, Caller2 ! done, Caller3 ! done, Caller4 ! done, LimiterPid ! {tick, leaky_bucket_reduction}, %% Key only deleted from leaky_tokens map, when it reached 0 in the previous tick LimiterPid ! {tick, leaky_bucket_reduction}, %% wait a tiny bit so the tick logic surely runs. timer:sleep(100), #{concurrent_requests := ConcurrentReqs, leaky_tokens := LeakyTokens} = ?M:info(?TEST_LIMITER), ?assertEqual(0, maps:size(ConcurrentReqs)), ?assertEqual(0, maps:size(LeakyTokens)), ok end}. both_exhausted(_Config, LimiterPid) -> {"Both exhausted", fun() -> ?assertMatch(#{is_manual_reduction_disabled := false}, ?M:config(?TEST_LIMITER)), IP = {1,2,3,4}, Caller1 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {register, sliding}, IP, -1), %% wait a bit so they are surely started. timer:sleep(100), %% 1 concurrent, 0 token ?assertMatch(#{concurrent_requests := #{IP := [_]}, sliding_timestamps := #{IP := [_]}, leaky_tokens := #{}}, ?M:info(?TEST_LIMITER)), Caller2 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {register, leaky}, IP, 20), %% wait a tiny bit so the logic surely runs. timer:sleep(100), %% 2 concurrent, but tokens reduced. Info = ?M:info(?TEST_LIMITER), ?assertMatch(#{concurrent_requests := #{IP := [_,_]}, sliding_timestamps := #{IP := [_]}, leaky_tokens := #{IP := 1}}, Info), Caller3 = ?assertHandlerRegisterOrRejectCall( ?TEST_LIMITER, {reject, rate_limit, _Data}, IP, 130), %% Tokens reduced, will register again %% wait a tiny bit so the logic surely runs. timer:sleep(100), %% 2 concurrent, 1 token ?assertMatch(#{concurrent_requests := #{IP := [_,_]}, sliding_timestamps := #{IP := [_]}, leaky_tokens := #{IP := 1}}, ?M:info(?TEST_LIMITER)), %% Clean up Caller1 ! done, Caller2 ! done, Caller3 ! done, LimiterPid ! {tick, leaky_bucket_reduction}, %% Key only deleted from leaky_tokens map, when it reached 0 in the previous tick LimiterPid ! {tick, leaky_bucket_reduction}, %% wait a tiny bit so the tick logic surely runs. timer:sleep(100), #{concurrent_requests := ConcurrentReqs, leaky_tokens := LeakyTokens} = ?M:info(?TEST_LIMITER), ?assertEqual(0, maps:size(ConcurrentReqs)), ?assertEqual(0, maps:size(LeakyTokens)), ok end}. peer_cleanup(_Config, LimiterPid) -> {"Peer cleanup", fun() -> ?assertMatch(#{is_manual_reduction_disabled := false}, ?M:config(?TEST_LIMITER)), %% init state, the ip is not blocked IP = {1,2,3,4}, Caller1 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {register, sliding}, IP, 1), %% wait a bit so they are surely started. timer:sleep(100), %% 2 concurrent, 2 token ?assertMatch(#{concurrent_requests := #{IP := [_]}, sliding_timestamps := #{IP := [_]}, leaky_tokens := #{}}, ?M:info(?TEST_LIMITER)), Caller2 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {register, leaky}, IP, 20), %% wait a tiny bit so the logic surely runs. timer:sleep(100), %% 2 concurrent, but tokens reduced. ?assertMatch(#{concurrent_requests := #{IP := [_,_]}, sliding_timestamps := #{IP := [_]}, leaky_tokens := #{IP := 1}}, ?M:info(?TEST_LIMITER)), %% further requests are rejected Caller3 = ?assertHandlerRegisterOrRejectCall( ?TEST_LIMITER, {reject, concurrency, _Data}, IP, 300), %% Tokens reduced, will register again %% wait a tiny bit so the logic surely runs. timer:sleep(100), %% 2 concurrent, 1 token ?assertMatch(#{concurrent_requests := #{IP := [_,_]}, sliding_timestamps := #{IP := [_]}, leaky_tokens := #{IP := 1}}, ?M:info(?TEST_LIMITER)), %% Clean up Caller1 ! done, Caller2 ! done, Caller3 ! done, LimiterPid ! {tick, leaky_bucket_reduction}, %% Key only deleted from leaky_tokens map, when it reached 0 in the previous tick LimiterPid ! {tick, leaky_bucket_reduction}, %% wait a tiny bit so the tick logic surely runs. %% Now we still have timestamps for IP1 in the state. timer:sleep(100), #{concurrent_requests := ConcurrentReqs, sliding_timestamps := SlidingTimestamps, leaky_tokens := LeakyTokens} = ?M:info(?TEST_LIMITER), ?assertEqual(0, maps:size(ConcurrentReqs)), ?assertEqual(1, maps:size(SlidingTimestamps)), ?assertEqual(0, maps:size(LeakyTokens)), ?setTsMock(20000), timer:sleep(500), %% Trigger timestamp cleanup. LimiterPid ! {tick, sliding_window_timestamp_cleanup}, %% wait a tiny bit so the tick logic surely runs. %% Now we should have all cleaned up. timer:sleep(100), #{concurrent_requests := ConcurrentReqs, sliding_timestamps := SlidingTimestamps2, leaky_tokens := LeakyTokens} = ?M:info(?TEST_LIMITER), ?assertEqual(0, maps:size(ConcurrentReqs)), ?assertEqual(0, maps:size(SlidingTimestamps2)), ?assertEqual(0, maps:size(LeakyTokens)), ok end}. leaky_manual_reduction(_Config, _LimiterPid) -> {"Leaky tokens manual peer reduction", fun() -> ?assertMatch(#{is_manual_reduction_disabled := false}, ?M:config(?TEST_LIMITER)), %% init state, the ip is not blocked IP = {1,2,3,4}, NonRecordedIP = {2,3,4,5,1984}, Caller1 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {register, leaky}, IP, 1), Caller2 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {register, leaky}, IP, 20), Caller3 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {register, leaky}, IP, 40), Caller4 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {register, leaky}, IP, 60), %% wait a bit so they are surely started. timer:sleep(100), %% 2 concurrent, 2 token ?assertMatch(#{concurrent_requests := #{IP := [_, _, _, _]}, leaky_tokens := #{IP := 4}}, ?M:info(?TEST_LIMITER)), ?assertEqual(ok, ?M:reduce_for_peer(?TEST_LIMITER, IP)), ?assertEqual(ok, ?M:reduce_for_peer(?TEST_LIMITER, IP)), %% call for one that's surely not in the state ?assertEqual(ok, ?M:reduce_for_peer(?TEST_LIMITER, NonRecordedIP)), %% 2 concurrent, but tokens reduced. ?assertMatch(#{concurrent_requests := #{IP := [_, _, _, _]}, leaky_tokens := #{IP := 2}}, ?M:info(?TEST_LIMITER)), ?assertEqual(ok, ?M:reduce_for_peer(?TEST_LIMITER, IP)), ?assertEqual(ok, ?M:reduce_for_peer(?TEST_LIMITER, IP)), %% 4 concurrent, but tokens reduced. ?assertMatch(#{concurrent_requests := #{IP := [_, _, _, _]}, leaky_tokens := #{IP := 0}}, ?M:info(?TEST_LIMITER)), ?assertEqual(ok, ?M:reduce_for_peer(?TEST_LIMITER, IP)), %% 4 concurrent, no change, there is nothing to reduce beyond 0 ?assertMatch(#{concurrent_requests := #{IP := [_, _, _, _]}, leaky_tokens := #{IP := 0}}, ?M:info(?TEST_LIMITER)), %% Clean up Caller1 ! done, Caller2 ! done, Caller3 ! done, Caller4 ! done, ok end}. leaky_manual_reduction_disabled(_Config, _LimiterPid) -> {"Leaky tokens manual peer reduction", fun() -> ?assertMatch(#{is_manual_reduction_disabled := true}, ?M:config(?TEST_LIMITER)), %% init state, the ip is not blocked IP = {1,2,3,4}, Caller1 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {register, leaky}, IP, 1), Caller2 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {register, leaky}, IP, 20), Caller3 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {register, leaky}, IP, 40), Caller4 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {register, leaky}, IP, 60), %% wait a bit so they are surely started. timer:sleep(100), ?assertMatch(#{concurrent_requests := #{IP := [_, _, _, _]}, leaky_tokens := #{IP := 4}}, ?M:info(?TEST_LIMITER)), ?assertEqual(disabled, ?M:reduce_for_peer(?TEST_LIMITER, IP)), %% Didn't reduce anything ?assertMatch(#{concurrent_requests := #{IP := [_, _, _, _]}, leaky_tokens := #{IP := 4}}, ?M:info(?TEST_LIMITER)), %% We can repeat this, but still disabled ?assertEqual(disabled, ?M:reduce_for_peer(?TEST_LIMITER, IP)), %% Clean up Caller1 ! done, Caller2 ! done, Caller3 ! done, Caller4 ! done, ok end}. ================================================ FILE: apps/arweave_limiter/test/arweave_limiter_metrics_collector_tests.erl ================================================ -module(arweave_limiter_metrics_collector_tests). -include_lib("eunit/include/eunit.hrl"). -include_lib("arweave/include/ar.hrl"). -define(M, arweave_limiter_metrics_collector). -define(S, arweave_limiter_sup). -define(L, arweave_limiter). -define(ME, arweave_limiter_metrics). -define(GENERAL, general_test). -define(METRICS, metrics_test). %% Very similar but not identical to ar_limiter_tests macro -define(assertHandlerRegisterOrRejectCall(LimiterRef, Pattern, Peer), ((fun () -> spawn_link(fun() -> ?assertMatch( Pattern, ?L:register_or_reject_call(LimiterRef, Peer)), receive done -> ok end end) end)())). do_setup() -> %% It would be tempting to just use what the node has started already, %% but we need to start new limiters to control the config, and make %% sure these tests don't break with only config change. %% It is especially important to increase the interval for the tests. Configs = [#{id => ?GENERAL, leaky_rate_limit => 50, concurrency_limit => 150, sliding_window_limit => 100, leaky_tick_interval_ms => 1000000}, #{id => ?METRICS, leaky_rate_limit => 50, concurrency_limit => 150, sliding_window_limit => 100, leaky_tick_interval_ms => 1000000} ], LimiterIds = lists:map(fun(Config) -> {ok, _LimPid} = supervisor:start_child(?S, ?S:child_spec(Config)), maps:get(id, Config) end, Configs), {LimiterIds, []}. do_setup_with_data() -> {LimiterIds, _Callers} = do_setup(), %% Generate IP tuples (up to like 16k peers), but any term can be a peer ID. Port = 1984, IPs = [{1,2,X div 128, X rem 128, Port} || X <- lists:seq(1, 1000)], Callers = lists:foldl(fun(IP, Acc) -> Acc ++ [?assertHandlerRegisterOrRejectCall(?GENERAL, {register, _}, IP) || _ <- lists:seq(1,150)] end, [], IPs), timer:sleep(500), {LimiterIds, Callers}. cleanup({LimiterIds, Callers}) -> [Caller ! done || Caller <- Callers], timer:sleep(150), ok = lists:foreach(fun(Id) -> supervisor:terminate_child(?S, Id), supervisor:delete_child(?S, Id), ?debugFmt(">>> Terminated and deleted limiter: ~p ~n", [Id]) end, LimiterIds), ok. empty_limiters_sanity_check_test_() -> { setup, fun do_setup/0, fun cleanup/1, fun({_Sup, _Callers}) -> [fun() -> ?assertMatch( [{ar_limiter_tracked_items_total,gauge, "tracked requests, timestamps, leaky tokens", _}, {ar_limiter_peers,gauge, "The number of peers the limiter is monitoring currently", _}], ?M:metrics()) end] end }. rate_limiter_happy_path_sanity_check_test_() -> { setup, fun do_setup_with_data/0, fun cleanup/1, fun({_Sup, _Callers}) -> [fun() -> ?assertMatch( [{ar_limiter_tracked_items_total,gauge, "tracked requests, timestamps, leaky tokens", _}, {ar_limiter_peers,gauge, "The number of peers the limiter is monitoring currently", _}], ?M:metrics()), Info = arweave_limiter_group:info(?GENERAL), ?assertMatch( [ {[{limiter_id, ?GENERAL}, {limiting_type, concurrency}], 150*1000}, {[{limiter_id, ?GENERAL}, {limiting_type, leaky_bucket_tokens}], 1000}, {[{limiter_id, ?GENERAL}, {limiting_type, sliding_window_timestamps}], 100*1000} ], ?M:tracked_items([{?GENERAL, Info}])), ?assertMatch( [ {[{limiter_id, ?GENERAL}, {limiting_type, concurrency}], 1000}, {[{limiter_id, ?GENERAL}, {limiting_type, leaky_bucket_tokens}], 1000}, {[{limiter_id, ?GENERAL}, {limiting_type, sliding_window_timestamps}], 1000} ], ?M:peers([{?GENERAL, Info}])) end] end}. ================================================ FILE: apps/randomx_square_latency_tester/.gitignore ================================================ *.o main ================================================ FILE: apps/randomx_square_latency_tester/Makefile ================================================ # Compiler CXX = g++ # Include directories INCLUDES = -I../arweave/c_src/randomx -I ../arweave/lib/RandomX/src # Compiler Flags CXXFLAGS = -msse4.2 -mavx2 -Wall -O2 $(INCLUDES) # Linker Flags LDFLAGS = -L/usr/local/lib LDLIBS = -lssl -lcrypto # Source files in ../arweave/c_src/randomx DEPS_SOURCES := $(wildcard ../arweave/c_src/randomx/*.cpp) DEPS_OBJECTS := $(patsubst ../arweave/c_src/randomx/%.cpp,%.o,$(DEPS_SOURCES)) # Local main.cpp MAIN_SOURCES := main.cpp MAIN_OBJECTS := $(patsubst %.cpp,%.o,$(MAIN_SOURCES)) # All object files OBJECTS := $(DEPS_OBJECTS) $(MAIN_OBJECTS) # Path to RandomX library RANDOMX_LIB = ../arweave/lib/RandomX/build4096/librandomx4096.a # Target executable TARGET = main # Default target all: $(TARGET) # Link object files to create the executable $(TARGET): $(OBJECTS) $(CXX) $(CXXFLAGS) -o $@ $^ $(RANDOMX_LIB) $(LDFLAGS) $(LDLIBS) # Compile source files from ../arweave/c_src/randomx %.o: ../arweave/c_src/randomx/%.cpp $(CXX) $(CXXFLAGS) -c $< -o $@ # Compile local main.cpp %.o: %.cpp $(CXX) $(CXXFLAGS) -c $< -o $@ # Clean up build files clean: rm -f $(OBJECTS) $(TARGET) .PHONY: all clean ================================================ FILE: apps/randomx_square_latency_tester/main.cpp ================================================ #include #include #include #include #include "randomx_squared.h" int main() { return 0; // // Constants // const size_t entropySize = 8 * 1024 * 1024; // 8 MB // const int iterations = 100; // // Allocate memory for entropies // unsigned char* inEntropy = new unsigned char[entropySize]; // unsigned char* keyEntropy = new unsigned char[entropySize]; // unsigned char* outEntropy = new unsigned char[entropySize]; // // Seed the random number generator // std::srand(static_cast(std::time(nullptr))); // // Fill entropies with random data // for (size_t i = 0; i < entropySize; ++i) { // inEntropy[i] = std::rand() % 256; // keyEntropy[i] = std::rand() % 256; // } // // Variables to store elapsed time // std::chrono::duration elapsedFeistelShaFull(0); // std::chrono::duration elapsedFeistelAesFull(0); // std::chrono::duration elapsedFeistelCrc32(0); // std::chrono::duration elapsedCrc32(0); // std::chrono::duration elapsedFcrc32w(0); // std::chrono::duration elapsedLcgMmix(0); // std::chrono::duration elapsedSimdLcg(0); // // Benchmark packing_mix_entropy_feistel_sha_full // { // auto startTime = std::chrono::high_resolution_clock::now(); // for (int iter = 0; iter < iterations; ++iter) { // // Modify the first byte of inEntropy // inEntropy[0] = static_cast((inEntropy[0] + 1) % 256); // // Call the function // packing_mix_entropy_feistel_sha_full(inEntropy, keyEntropy, outEntropy, entropySize); // } // auto endTime = std::chrono::high_resolution_clock::now(); // elapsedFeistelShaFull = endTime - startTime; // } // // Benchmark packing_mix_entropy_feistel_aes_full // { // auto startTime = std::chrono::high_resolution_clock::now(); // for (int iter = 0; iter < iterations; ++iter) { // // Modify the first byte of inEntropy // inEntropy[0] = static_cast((inEntropy[0] + 1) % 256); // // Call the function // packing_mix_entropy_feistel_aes_full(inEntropy, keyEntropy, outEntropy, entropySize); // } // auto endTime = std::chrono::high_resolution_clock::now(); // elapsedFeistelAesFull = endTime - startTime; // } // // Benchmark packing_mix_entropy_feistel_crc32 // { // auto startTime = std::chrono::high_resolution_clock::now(); // for (int iter = 0; iter < iterations; ++iter) { // // Modify the first byte of inEntropy // inEntropy[0] = static_cast((inEntropy[0] + 1) % 256); // // Call the function // packing_mix_entropy_feistel_crc32(inEntropy, keyEntropy, outEntropy, entropySize); // } // auto endTime = std::chrono::high_resolution_clock::now(); // elapsedFeistelCrc32 = endTime - startTime; // } // // Benchmark packing_mix_entropy_fcrc32w // { // auto startTime = std::chrono::high_resolution_clock::now(); // for (int iter = 0; iter < iterations; ++iter) { // // Modify the first byte of inEntropy // inEntropy[0] = static_cast((inEntropy[0] + 1) % 256); // // Call the function // packing_mix_entropy_fcrc32w(inEntropy, outEntropy, entropySize); // } // auto endTime = std::chrono::high_resolution_clock::now(); // elapsedFcrc32w = endTime - startTime; // } // // Benchmark packing_mix_entropy_crc32 // { // auto startTime = std::chrono::high_resolution_clock::now(); // for (int iter = 0; iter < iterations; ++iter) { // // Modify the first byte of inEntropy // inEntropy[0] = static_cast((inEntropy[0] + 1) % 256); // // Call the function // packing_mix_entropy_crc32(inEntropy, outEntropy, entropySize); // } // auto endTime = std::chrono::high_resolution_clock::now(); // elapsedCrc32 = endTime - startTime; // } // // Benchmark packing_mix_entropy_lcg_mmix // { // auto startTime = std::chrono::high_resolution_clock::now(); // for (int iter = 0; iter < iterations; ++iter) { // // Modify the first byte of inEntropy // inEntropy[0] = static_cast((inEntropy[0] + 1) % 256); // // Call the function // packing_mix_entropy_lcg_mmix(inEntropy, outEntropy, entropySize); // } // auto endTime = std::chrono::high_resolution_clock::now(); // elapsedLcgMmix = endTime - startTime; // } // // Benchmark packing_mix_entropy_simd_lcg // { // auto startTime = std::chrono::high_resolution_clock::now(); // for (int iter = 0; iter < iterations; ++iter) { // // Modify the first byte of inEntropy // inEntropy[0] = static_cast((inEntropy[0] + 1) % 256); // // Call the function // packing_mix_entropy_simd_lcg(inEntropy, outEntropy, entropySize); // } // auto endTime = std::chrono::high_resolution_clock::now(); // elapsedSimdLcg = endTime - startTime; // } // // Output results // std::cout << "Benchmark results for " << iterations << " iterations on " // << (entropySize / (1024 * 1024)) << " MB of data:\n\n"; // std::cout << "1. packing_mix_entropy_feistel_sha_full:\n"; // std::cout << " Total time: " << elapsedFeistelShaFull.count() << " seconds.\n"; // std::cout << " Average time per iteration: " << (elapsedFeistelShaFull.count() / iterations) << " seconds.\n\n"; // std::cout << "2. packing_mix_entropy_feistel_aes_full:\n"; // std::cout << " Total time: " << elapsedFeistelAesFull.count() << " seconds.\n"; // std::cout << " Average time per iteration: " << (elapsedFeistelAesFull.count() / iterations) << " seconds.\n\n"; // std::cout << "3. packing_mix_entropy_feistel_crc32:\n"; // std::cout << " Total time: " << elapsedFeistelCrc32.count() << " seconds.\n"; // std::cout << " Average time per iteration: " << (elapsedFeistelCrc32.count() / iterations) << " seconds.\n\n"; // std::cout << "4. packing_mix_entropy_crc32:\n"; // std::cout << " Total time: " << elapsedCrc32.count() << " seconds.\n"; // std::cout << " Average time per iteration: " << (elapsedCrc32.count() / iterations) << " seconds.\n\n"; // std::cout << "5. packing_mix_entropy_fcrc32w:\n"; // std::cout << " Total time: " << elapsedFcrc32w.count() << " seconds.\n"; // std::cout << " Average time per iteration: " << (elapsedFcrc32w.count() / iterations) << " seconds.\n\n"; // std::cout << "6. packing_mix_entropy_lcg_mmix:\n"; // std::cout << " Total time: " << elapsedLcgMmix.count() << " seconds.\n"; // std::cout << " Average time per iteration: " << (elapsedLcgMmix.count() / iterations) << " seconds.\n\n"; // std::cout << "7. packing_mix_entropy_simd_lcg:\n"; // std::cout << " Total time: " << elapsedSimdLcg.count() << " seconds.\n"; // std::cout << " Average time per iteration: " << (elapsedSimdLcg.count() / iterations) << " seconds.\n\n"; // // Clean up allocated memory // delete[] inEntropy; // delete[] keyEntropy; // delete[] outEntropy; // return 0; } ================================================ FILE: ar-rebar3 ================================================ #!/bin/bash set -e set -x if [ $# -ne 2 ] then echo "ar-rebar3 " exit 1 fi SYSTEM=$(uname -s) SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" PROFILE=$1 COMMAND=$2 OVERLAY_TARGET="${SCRIPT_DIR}/_vars.config" # helper function to create erlang like value from the shell # and stored them into a specific file. create_overlay_var() { local target="${OVERLAY_TARGET}" local name="${1}" local value="$(eval ${2} 2>/dev/null || echo undefined)" if ! echo "${name}" | grep -E '^[a-z]+[0-9A-Za-z_]+$' >/dev/null then echo "invalid variable ${name}" 1>&2 return 1 fi if ! echo "${value}" | grep -E '^[[:print:]]+$' >/dev/null then echo "invalid value ${value}" 1>&2 return 1 fi if test -e "${target}" then printf '{%s, "%s"}.\n' "${name}" "${value}" >> "${target}" return 0 fi printf '{%s, "%s"}.\n' "${name}" "${value}" > "${target}" return 0 } # create variables required for the overlay, it will contain # various information regarding the build and will be # hardcoded in the final release. rebar3_overlay_variables() { echo "Crafting overlay variables..." if test "${SYSTEM}" = "Linux" then create_overlay_var git_rev "git rev-parse HEAD" create_overlay_var datetime "date -u '+%Y-%m-%dT%H:%M:%SZ'" create_overlay_var cc_version "cc --version | head -n1" create_overlay_var gmake_version "gmake --version | head -n1" create_overlay_var cmake_version "cmake --version | head -n1" else touch $OVERLAY_TARGET fi } # remove old artifacts that must be recreated everytime. # The lib and releases symlinks are removed here to prevent infinite loops # when VSCode extensions (like Erlang LS) traverse the project. # bin/arweave recreates them on-demand when running the application. rebar3_clean_artifacts() { echo Removing build artifacts... rm -vf "_vars.config" rm -vf "${SCRIPT_DIR}/lib" rm -vf "${SCRIPT_DIR}/releases" } # execute rebar3 using the profile and the command previously # configured rebar3_invocation() { echo "Executing rebar3 as ${PROFILE} ${COMMAND}" ${SCRIPT_DIR}/rebar3 as ${PROFILE} ${COMMAND} } # create artifacts required to run the code locally, only useful # in case of release. rebar3_create_artifacts() { echo Copying and linking build artifacts } ###################################################################### # main script ###################################################################### rebar3_clean_artifacts rebar3_overlay_variables rebar3_invocation if [ "${COMMAND}" = "release" ] then RELEASE_PATH=$(${SCRIPT_DIR}/rebar3 as ${ARWEAVE_BUILD_TARGET:-default} path --rel) rebar3_create_artifacts fi ================================================ FILE: arweave-server ================================================ #!/usr/bin/env bash set -e SCRIPT_DIR="$(dirname "$0")" export ARWEAVE_DEV=1 "$SCRIPT_DIR/bin/start" "$@" ================================================ FILE: arweave_styleguide.md ================================================ # Arweave code style The main development language of the Arweave client is Erlang, and as the number of developers of the project continues to grow this style guide will act as a means of keeping the codebase clean and comprehensible. ## Code comprehensibility ### Module header comments Each module should have a simplistic comment at the top that encompasses and describes the set of functions that can be found within it. Module description comments should be prefixed with '%%%' . ```erlang %% Example: head of ar_serialize. -module(ar_serialize). -export([full_block_to_json_struct/1, block_to_json_struct/1, ...]). -export([tx_to_json_struct/1, json_struct_to_tx/1]). -export([wallet_list_to_json_struct/1, block_index_to_json_struct/1, ...]). -export([jsonify/1, dejsonify/1]). -export([query_to_json_struct/1, json_struct_to_query/1]). -include("ar.hrl"). -include_lib("eunit/include/eunit.hrl"). %%% Module containing serialisation/deserialisation utility functions for use in the HTTP server. ``` ### Function clause comments Function clause comments should be placed above the header. Every function should have a comment describing its purpose, unless the function signature explains it well enough. Function comments should not include implementation details unless absolutely required, the code itself should be the main conveyor of the specific implementation. It is more important to comment exported functions. A specification (`-spec`) may be used to document the function, as an alternative or an addition to the comment. Function description comments should be prefixed with '%% @doc'. ```erlang %% Example %% @doc Takes a list containing tx records and returns the number of those 'not_found'. count_unavailable_txs(TXList) -> length([TX || TX <- TXList, TX == not_found]). ``` ### Sparing use of comments inside function bodies Comments should only need to be used inside functions if the code being described has high complexity and without description would take reasonable time to trace and understand. If the written code does have high complexity consider if descriptive variable names, code abstraction and or basic refactoring could improve the comprehensibility before resorting to commenting the code block. ```erlang %% Bad sign_verify_test(Keypair) -> % Deconstructs keypair into two separate terms, Pub and Priv. {Priv, Pub} = Keypair, % Generates an integer between 1 and 100 to sign and then verify. Data = floor(rand:uniform() * 100), % Sign the data generated above. SignedData = sign(Priv, Data), % Verify the signed data. verify(Pub, SignedData). %% Good sign_verify_test({Priv, Pub}) -> Data = floor(rand:uniform() * 100), SignedData = sign(Priv, Data), verify(Pub, SignedData). ``` ### Use the minimal descriptive words for function names Function names should be descriptive enough to explain the high-level purpose of the function whilst remaining short enough as to not hinder the code readability. The name of the module can sometimes be used to help increase clarity without increasing the wordiness of the function name. ```erlang %% Bad function names ar_tx:generate_data_segment_for_signing(TX). ar_util:pretty_print_internal_ip_representation(IPAddr). ar_retarget:is_current_block_retarget_block(Block). %% Good function names ar_tx:to_binary(TX). ar_block:generate_block_from_shadow(BShadow). ar_serialize:block_to_json_struct(Block). ``` ### Tests should be defined at the tail of the module Tests should be defined as the last thing present within a module and should be prefixed with the following comment. ```erlang % Tests: {module name} ``` ### Maximum of eighty characters per line A maximum of eighty characters should be present on any singular line. To help enforce this styling consider using a ruler, most extensible editors will have this functionality by default or a simple plugin should be available to help. ### Do not use if The `true ->` subclause of the `if` clause is confusing because `true` suggests the `if` expression evaluates to `true`, while the clause is executed when the expression is false. Use `case` instead. ### Try to avoid deeply nested code Deeply nested code should be avoided as it can mask a large set of alternative code paths and can become very difficult to debug. Code that uses case or receives structures should aim for a singular level of nesting and at most two levels of depth. ```erlang %% Bad contains_data_tx([]) -> false; contains_data_tx(TXList) -> [TX|Rest] = TXList, case is_record(TX, tx) of true -> case byte_size(TX#tx.data) > 0 of true -> true; false -> contains_data_tx(Rest) end; false -> error_not_tx. end. %% Better contains_data_tx([]) -> false; contains_data_tx([TX|Rest]) when is_record(TX, tx) -> case byte_size(TX#tx.data) > 0 of true -> true; false -> contains_data_tx(Rest) end; contains_data_tx(_) -> error_not_tx. ``` ### Deconstruct arguments in the function header The maximum number of variables should be deconstructed within the function clause header and not the clause body. This makes the arguments to the function explicit and helps debugging as should the wrong form of data be provided no matching function clause will be found. ```erlang %% Bad server(State, Keypair) -> Keypair = {Priv, Pub}, State#state { peers = Peers, heard = HeardMsg, ignored = IgnoredMsg }, ... %% Good server(State#state { peers = Peers, heard = HeardMsg, ignored = IgnoredMsg }, {Priv, Pub}) -> ... ``` ### Atoms should be lowercase and separated by underscores For easy recognisability the Arweave codebase uses descriptive lowercase atoms where multiple words are separated by the underscore character. ```erlang %% Bad atoms 'iAtom' 'block not found' %% Good atoms unavailable block_not_found ``` ### Record definitions should include descriptions of fields When a new record is defined the information regarding the purpose of each field should be included in a comment inline with the field it pertains to. These comments should be prefixed with a singular '%' character and aim to be as concise as possible. ```erlang -record(tx, { id = <<>>, % TX UID (Hash of signature) last_tx = <<>>, % Wallets last TX hash. owner = <<>>, % Public key of transaction owner. tags = [], % Indexable TX category identifiers. target = <<>>, % Wallet address of target of the tx. quantity = 0, % Amount of Winston to send data = <<>>, % Data body (if data transaction). signature = <<>>, % Transaction signature. reward = 0 % Transaction mining reward. }). ``` ### Redundant or deprecated code should be removed Should existing code be made redundant with the implementation of new developments, this old code should be removed. It should not be left cluttering the code base as either code or comment. If reference to these old implementation details is still required they will remain present in the project repositories version control. ### Modules should export the minimal number of functions Modules should export the minimum number of functions in which are externally required and these exports should be logically ordered. This helps show the interface that the module exposes. Via looking at the exports other engineers should be able to identify which functions they need understand and are available for external use. ```erlang %% Bad function exporting -compile(export_all). %% Good function exporting -export([sign/2, verify/3]). -export([to_address/1]). ``` ### Variable names should be descriptive of the data they contain Variable names should be descriptive of the data in which they are representing. This helps in understanding the purpose of a codeblock without the need for verbose comments detailing its purpose. ```erlang %% Bad variables sign_data(X, Y) -> {A, B} = X, sign(A, Y). %% Good variables sign_data(Keypair, Data) -> {Priv, Pub} = Keypair, sign(Priv, Data). ``` ### Use ar:console/1-2 to present information to the end user Writing to the Erlang console is done via the `ar:console/1-2` functions. This will also be written to the log file. ```erlang ar:console("Started mining on block height ~B", [Height]), ``` ```erlang ar:console( [ node_joined_successfully, {height, NewB#block.height} ] ), ``` ### Use `ar:info/1-2`, `ar:warn/1-2`, `ar:err/1-2` to generate log entries. All three types of messages will be written to the one and only log file. Note! Errors (generated by `ar:err/1-2`) will also be displayed in the console. ```erlang ar:warn("Could not retrieve current block. Will retry in ~B seconds", [?REJOIN_TIMEOUT]), ``` ```erlang ar:err( [ node_not_joining, {reason, cannot_get_full_block_from_peer}, {received_instead, NewB} ] ), ``` ### Don't log huge messages Avoid logging huge messages. Truncate arguments, e.g. with `~P` like this: ```erlang ar:warn("Invalid Block Hash List: ~P", [BI, 100]), ``` ### Tuple construction/deconstruction When constructing or deconstructing tuples ensure a space between each comma separated element. ```erlang %% Bad { one,two,three, A, B, C} %% Good {one, two, three, A, B, C} ``` ### List deconstruction When deconstructing a list into head and tail ensure that a space is placed on either side of the '|' separation character. ```erlang %% Bad [Head|Tail] %% Good [Head | Tail] ``` ### Record construction/deconstruction When constructing or deconstructing a record ensure that a space is placed on either side of each field being handled. ```erlang %% Bad State#state {first="hello", second="world"} %% Good State#state { first = "hello", second = "world" }, ``` ### Function arguments on new lines If the arguments for a given function call exceed the previously stated line length limit (80 characters) or contain an inline function split the arguments each on to new lines. ```erlang %% Bad example() -> TotalTime = lists:foldl(fun(X, Acc) -> X + Acc end, 0, [12, 15, 8, 21, 35, 33, 14]), ... %% Better example() -> TotalTime = lists:foldl( fun(X, Acc) -> X + Acc end, 0, [12, 15, 8, 21, 35, 33, 14] ), ... ``` ## Error handling Functions with side effects (in Erlang it boils down to IO) should return an `{ok, ...}` tuple upon successful execution, and `error_code` or `{error_code, ...}` otherwise. When invoking functions with side effects, failing fast by only pattern matching against `{ok, ...}` is encouraged. In rare cases when even unexpected failures have to be processed, like in the HTTP event loop, `try/catch` may be used. ## Put tests for the module X into X_tests.erl It is usually very difficult to separate tests from the actual code in the search results unless tests reside in the dedicated files. For instance, using separate files for tests makes it easier to see in how many places a particular function is used. ## Version control The Arweave client codebase is hosted on Github, the below standards define the criteria for committed code. We aim to adhere to these standards as to make it as easy possible for new contributors to get involved. ### All committed code must be commented All committed code should be fully commented and should aim to fit the styling as detailed in this document. Committing uncommented code is unhelpful to all those maintaining or exploring the project. ### Code pushed to master must work All code committed to the master branch of the Arweave project should be fully functioning. This is a **strict** requirement as this is the prime location of where end users will be obtaining the software to join and participate in the network. ### Commits should aim to be as atomic as possible Code commits should aim to be a single logical change or addition to the codebase, though if not possible all logical alterations should be explained in the commit message, each separated by a comma. ``` - Added generic protocol implementation. - Removed ar_deprecated. - Added block shadows, refactored HTTP iface. ``` ### Commit message syntax To keep the repository clean a set structure for commit messages has been decided. - The first character should be capitalized. - The message should be succinct. - The message should be in the imperative mood. - Multiple actions should be comma separated. ### Commit description In addition to a message, a commit should have a description focusing on why the change was made rather than what was made. ### Commit example ``` Add arweave style guide Inconsistent styling made it hard for us to view, comprehend, and edit the code so we had a discussion and agreed on the common style. ``` ================================================ FILE: bin/arweave ================================================ #!/usr/bin/env bash ###################################################################### # EXTRA_DIST_ARGS environment variable can be set to set extra VM # arguments. ###################################################################### set -e ###################################################################### # Switch to user or dev mode. bin/arweave should be the script used # only by users, and bin/arweave-dev should be the one used only # for developers. ###################################################################### case ${0##*/} in arweave-dev) export ARWEAVE_DEV=1 ;; esac ###################################################################### # EPMD Configuration. force epmd to listen on loopback interface. ###################################################################### export ERL_EPMD_ADDRESS="${ERL_EPMD_ADDRESS:=127.0.0.1,::1}" export ERL_EPMD_PORT="${ERL_EPMD_PORT:=4369}" # http://erlang.org/doc/man/run_erl.html # If defined, disables input and output flow control for the pty # opend by run_erl. Useful if you want to remove any risk of accidentally # blocking the flow control by using Ctrl-S (instead of Ctrl-D to detach), # which can result in blocking of the entire Beam process, and in the case # of running heart as supervisor even the heart process becomes blocked # when writing log message to terminal, leaving the heart process unable # to do its work. RUN_ERL_DISABLE_FLOWCNTRL=${RUN_ERL_DISABLE_FLOWCNTRL:-true} export RUN_ERL_DISABLE_FLOWCNTRL RUN_ERL_LOG_GENERATIONS=${RUN_ERL_LOG_GENERATIONS:-1} export RUN_ERL_LOG_GENERATIONS RUN_ERL_LOG_MAXSIZE=${RUN_ERL_LOG_MAXSIZE:-$((100*1024*1024))} export RUN_ERL_LOG_MAXSIZE RUN_ERL_LOG_ALIVE_MINUTES=${RUN_ERL_LOG_ALIVE_MINUTES:-15} export RUN_ERL_LOG_ALIVE_MINUTES if [ "$TERM" = "dumb" ] || [ -z "$TERM" ]; then export TERM=screen fi # OSX does not support readlink '-f' flag, work # around that # shellcheck disable=SC2039,SC3000-SC4000 case $OSTYPE in darwin*) SCRIPT=$(readlink "$0" || true) ;; *) SCRIPT=$(readlink -f "$0" || true) ;; esac [ -z "$SCRIPT" ] && SCRIPT=$0 export SCRIPT_DIR="$(cd "$(dirname "$SCRIPT")" && pwd -P)" export PARENT_DIR="$(cd "${SCRIPT_DIR}/.." && pwd -P)" export SYSTEM_NAME="$(uname -s)" export RELEASE_ROOT_DIR="$(cd "${SCRIPT_DIR}/.." && pwd -P)" export REBAR_CONFIG="${RELEASE_ROOT_DIR}/rebar.config" export BUILD_DIR="${RELEASE_ROOT_DIR}/_build" # let extract release relx information from rebar.config. # the following erlang code will read/parse the file # and extract the information required. In case of issue # it print an error message and return 1, else 0. extract_release_from_rebar_config() { erl -noshell -eval ' try % extract file from REBAR_CONFIG variable C = case os:getenv("REBAR_CONFIG") of false -> throw("REBAR_CONFIG not set"); VRC -> VRC end, % read/parse rebar.config F = case file:consult(C) of {ok, FC} -> FC; {error, EC} -> throw(EC) end, % extract relx section R = case proplists:get_value(relx, F) of undefined -> throw("relx section not found"); RX -> RX end, % extract release section V = case lists:keyfind(release, 1, R) of M = {release, {_, VX}, _} -> VX; _ -> throw("release not found") end, io:format("~s~n", [V]), erlang:halt(0) catch _:E -> io:format(standard_error, "error: ~p~n", [E]), erlang:halt(255) end. ' return $? } # Make the value available to variable substitution calls below # Most of The following variables are usually hardcoded by rebar3 # if they are empty, this means the entry-point is used from # the sources. export REL_NAME="" export REL_VSN="" export RELEASE_NAME="" export RELEASE_VSN="" export RELEASE_GIT_REV="" export RELEASE_DATETIME="" export RELEASE_ERTS="" export RELEASE_CC="" export RELEASE_CMAKE="" export RELEASE_GMAKE="" export ERTS_VSN="" export RELEASE_PROG="${SCRIPT}" # ensure REL_NAME and RELEASE_NAME variables are set # by default, if the script is running from sources, # the release name must be arweave. test -z "${REL_NAME}" && export REL_NAME="arweave" test -z "${RELEASE_NAME}" && export RELEASE_NAME="arweave" # check REL_VSN variable content. This one is quite important # to be able to start arweave. if test -z "${REL_VSN}" then REL_VSN=$(extract_release_from_rebar_config) if test $? -ne 0 then echo "error: failed to read rebar file" 1>&2 exit 1 fi if test -z "${REL_VSN}" then echo "error: no release found" 1>&2 exit 1 fi export REL_VSN export REL_PATH="${BUILD_DIR}/default/rel/${REL_NAME}/${REL_VSN}" export REL_PATH_ALT="${BUILD_DIR}/default/rel/${REL_NAME}/releases/${REL_VSN}" if ! test -e ${REL_PATH} then echo "error: ${REL_PATH} does not exist" 1>&2 if ! test "${ARWEAVE_DEV}" then exit 1 fi fi fi # ROOTDIR is used by the Erlang VM for code:root_dir() and relative path resolution. # Keep it as the project root so config files and logs work correctly. export ROOTDIR="$RELEASE_ROOT_DIR" # Track whether we're running from source (need symlinks) or pre-built release (have real dirs) RUNNING_FROM_SOURCE="" # Function to create symlinks to the build release directory on-demand. # Called after arweave_developer_mode (if any) so symlinks aren't deleted by ar-rebar3. # Only needed when running from source; pre-built releases have real lib/ and releases/ dirs. ensure_release_symlinks() { # If releases dir already exists as a real directory, we're running a pre-built release if [ -d "${RELEASE_ROOT_DIR}/releases/${REL_VSN}" ] && [ ! -L "${RELEASE_ROOT_DIR}/releases" ]; then RUNNING_FROM_SOURCE="" return fi # Running from source - create symlinks to the build directory RUNNING_FROM_SOURCE="1" RELEASE_BUILD_DIR="${BUILD_DIR}/default/rel/${REL_NAME}" if [ -d "${RELEASE_BUILD_DIR}/releases" ] && [ ! -e "${RELEASE_ROOT_DIR}/releases" ]; then ln -sf "${RELEASE_BUILD_DIR}/releases" "${RELEASE_ROOT_DIR}/releases" fi if [ -d "${RELEASE_BUILD_DIR}/lib" ] && [ ! -e "${RELEASE_ROOT_DIR}/lib" ]; then ln -sf "${RELEASE_BUILD_DIR}/lib" "${RELEASE_ROOT_DIR}/lib" fi } # Schedule symlink cleanup after VM boots. The symlinks are only needed during # boot to load code; once loaded, they can be removed so VSCode extensions work. # Default delay is 30 seconds; set ARWEAVE_SYMLINK_CLEANUP_DELAY to override (0 to disable). # Only runs when running from source (RUNNING_FROM_SOURCE is set). schedule_symlink_cleanup() { # Only cleanup if we're running from source and created symlinks if [ -z "$RUNNING_FROM_SOURCE" ]; then return fi local delay="${ARWEAVE_SYMLINK_CLEANUP_DELAY:-30}" if [ "$delay" = "0" ]; then return fi ( sleep "$delay" # Only remove if they're actually symlinks (safety check) [ -L "${RELEASE_ROOT_DIR}/lib" ] && rm -f "${RELEASE_ROOT_DIR}/lib" [ -L "${RELEASE_ROOT_DIR}/releases" ] && rm -f "${RELEASE_ROOT_DIR}/releases" ) & } export REL_DIR="${RELEASE_ROOT_DIR}/releases/${REL_VSN}" export RUNNER_LOG_DIR="${RUNNER_LOG_DIR:-$RELEASE_ROOT_DIR/logs}" export ESCRIPT_NAME="${ESCRIPT_NAME-$SCRIPT}" # if RELX_RPC_TIMEOUT is set then use that # otherwise check for NODETOOL_TIMEOUT and convert to seconds if [ -z "$RELX_RPC_TIMEOUT" ]; then # if NODETOOL_TIMEOUT exists then turn the old nodetool timeout into the rpc timeout if [ -n "$NODETOOL_TIMEOUT" ]; then # will exit the script if NODETOOL_TIMEOUT isn't a number RELX_RPC_TIMEOUT=$((NODETOOL_TIMEOUT / 1000)) else RELX_RPC_TIMEOUT=60 fi fi export RELX_RPC_TIMEOUT # start/stop/install/upgrade pre/post hooks PRE_START_HOOKS="" POST_START_HOOKS="" PRE_STOP_HOOKS="" POST_STOP_HOOKS="" PRE_INSTALL_UPGRADE_HOOKS="" POST_INSTALL_UPGRADE_HOOKS="" STATUS_HOOK="" EXTENSIONS="" _warning() { printf -- "warning: %s\n" "${*}" 1>&2 } _error () { printf -- "error: %s\n" "${*}" 1>&2 } ###################################################################### # Arweave Section ###################################################################### # Not all systems supports randomx jit if test ${SYSTEM_NAME} = "Darwin" then export RANDOMX_JIT="disable randomx_jit" else export RANDOMX_JIT="" fi # This variable is the main own used to start arweave ARWEAVE_OPTS="-run ar main ${RANDOM_JIT}" ###################################################################### # Arweave System Check Section ###################################################################### arweave_check() { case "${1}" in help) arweave_check_help ;; *) arweave_check_nofile arweave_check_hugepages ;; esac } arweave_check_help() { echo "Usage: ${REL_NAME} check" echo "Check system configuration. Examples:" echo " ${REL_NAME} check" exit 1 } arweave_check_nofile() { recommendation="1000000" limit="$(ulimit -n)" if [ "$limit" -lt "$recommendation" ] then _warning "************************************************************************" _warning "Your maximum number of open file descriptors is currently set to $limit." _warning "We recommend setting that limit to $recommendation or higher." _warning "Otherwise, consider setting your max_connections setting to something" _warning "lower than your file descriptor limit. This value can be check with:" _warning " sysctl fs.file-max" _warning "or" _warning " ulimit -n" _warning "see more at https://docs.arweave.org/" _warning "************************************************************************" fi } arweave_check_hugepages() { # execute this check only on linux test ${SYSTEM_NAME} != "Linux" && return 0 recommendation="3500" value=$(sysctl -n vm.nr_hugepages) if test ${value} -lt ${recommendation} then _warning "************************************************************************" _warning "huge pages is not configured on this system." _warning "It should be set to ${recommendation}. This value can be check with:" _warning " sysctl vm.nr_hugepages" _warning "see more at https://docs.arweave.org/" _warning "************************************************************************" fi } ###################################################################### # Arweave Benchmark Section ###################################################################### arweave_benchmark() { case "${1}" in hash) shift arweave_benchmark_hash ${*} ;; packing) shift arweave_benchmark_packing ${*} ;; vdf) shift arweave_benchmark_vdf ${*} ;; vdf_exp) shift arweave_benchmark_vdf_exp ${*} ;; *) arweave_benchmark_help ;; esac } arweave_benchmark_help() { echo "Usage: ${REL_NAME} benchmark [hash|packing|vdf]" echo "Execute Arweave benchmarks. Examples:" echo " ${REL_NAME} benchmark hash" echo " ${REL_NAME} benchmark packing" echo " ${REL_NAME} benchmark vdf" exit 1 } arweave_benchmark_hash() { ARWEAVE_OPTS="-run ar benchmark_hash" echo ${*} } arweave_benchmark_packing() { ARWEAVE_OPTS="-run ar benchmark_packing" echo ${*} } arweave_benchmark_vdf() { ARWEAVE_OPTS="-run ar benchmark_vdf" echo ${*} } arweave_benchmark_vdf_exp() { ARWEAVE_OPTS="-run ar benchmark_vdf_exp" echo ${*} } ###################################################################### # Arweave Wallet Management Section ###################################################################### arweave_wallet() { case "${1}" in create) shift arweave_wallet_create ${*} ;; *) arweave_wallet_help ;; esac } arweave_wallet_help() { echo "Usage: ${REL_NAME} wallet [create]" echo "Manage Arweave wallets. Examples:" echo " ${REL_NAME} wallet create rsa" echo " ${REL_NAME} wallet create ecdsa" exit 1 } arweave_wallet_create() { case "${1}" in rsa) shift arweave_wallet_create_rsa ${*} ;; ecdsa) shift arweave_wallet_create_ecdsa ${*} ;; *) arweave_wallet_create_help ;; esac } arweave_wallet_create_help() { echo "Usage: ${REL_NAME} wallet create [rsa|ecdsa]" echo "Create Arweave wallet. examples:" echo " ${REL_NAME} wallet create rsa" echo " ${REL_NAME} wallet create ecdsa" exit 1 } arweave_wallet_create_rsa() { ARWEAVE_OPTS="-run ar create_wallet" echo ${*} } arweave_wallet_create_ecdsa() { ARWEAVE_OPTS="-run ar create_ecdsa_wallet" echo ${*} } ###################################################################### # Arweave Data Doctor Section ###################################################################### arweave_doctor() { ARWEAVE_OPTS="-run ar_data_doctor main" echo ${*} } arweave_doctor_help() { echo "Usage: ${REL_NAME} doctor" echo "Execute data doctor analyzer" exit 1 } ###################################################################### # Arweave Developer mode Section ###################################################################### # when ARWEAVE_DEV environment variable is set, the release is rebuild arweave_developer_mode() { ( cd ${PARENT_DIR} \ && ./ar-rebar3 ${ARWEAVE_BUILD_TARGET:-default} release sleep 1 ) } # check if a command (subcommand) is a developer command. is_arweave_developer_command() { local commands="test test_e2e" local value="${1}" if test "${ARWEAVE_DEV}" then return 1 fi for command in ${commands} do if test "${command}" = "${value}" then return 0 fi done return 1 } ###################################################################### # Arweave Version Section ###################################################################### arweave_version() { case "${1}" in *) arweave_version_light ;; esac } arweave_version_light() { echo "${RELEASE_NAME} ${RELEASE_VSN} (${RELEASE_GIT_REV}) ${RELEASE_DATETIME}" echo " erts ${RELEASE_ERTS}" echo " ${RELEASE_CC}" echo " ${RELEASE_GMAKE}" echo " ${RELEASE_CMAKE}" exit 0 } arweave_version_help() { echo "Usage: ${REL_NAME} version" echo "Return Arweave release" exit 1 } ###################################################################### # test section ###################################################################### arweave_test() { TEST_CONFIG="./config/sys.config" TEST_PROFILE="test" TEST_NODE_NAME="${NODE_NAME:-main-localtest}" TEST_NODE_HOST="${NODE_HOST:-127.0.0.1}" TEST_COOKIE="${COOKIE:-test}" TEST_MODULE="tests" TEST_LOG="main-localtest.out" arweave_test_run ${*} } arweave_test_help() { echo "Usage: ${REL_NAME} test [module | module:test ...]" echo "Run Arweave Test Suite" echo " test - run all tests" echo " test module - run all tests in module" echo " test module:test - run specific test from module" echo " test mod1 mod2:test mod3 - mixed mode" } arweave_e2e() { TEST_CONFIG="./config/sys.config" TEST_PROFILE="e2e" TEST_NODE_NAME="${NODE_NAME:-main-e2e}" TEST_NODE_HOST="${NODE_HOST:-127.0.0.1}" TEST_COOKIE="${COOKIE:-e2e}" TEST_MODULE="e2e" TEST_LOG="main-e2e.out" arweave_test_run ${*} } arweave_e2e_help() { echo "Usage: ${REL_NAME} test_e2e [module | module:test ...]" echo "Run Arweave e2e Test Suite" echo " test_e2e - run all e2e tests" echo " test_e2e module - run all tests in module" echo " test_e2e module:test - run specific test from module" echo " test_e2e mod1 mod2:test mod3 - mixed mode" } # test and e2e features are sharing the same procedures. arweave_test_run() { ( echo -e "\033[0;32m===> Enter into ${PARENT_DIR}\033[0m" cd ${PARENT_DIR} echo -e "\033[0;32m===> Compile ${TEST_PROFILE} profile\033[0m" ./ar-rebar3 "${TEST_PROFILE}" compile # if a specific test is specified if test "${1}" then # Replace colons with underscores for valid node name SANITIZED_ARG="${1//:/_}" TEST_NODE="${TEST_NODE_NAME}-${SANITIZED_ARG}@${TEST_NODE_HOST}" else TEST_NODE="${TEST_NODE_NAME}@${TEST_NODE_HOST}" fi TEST_PATH="$(./rebar3 as ${TEST_PROFILE} path)" ## TODO: Generate path for all apps -> Should we fetch this from somewhere? APPS="arweave arweave_config arweave_limiter arweave_diagnostic" PATH_ARGS="" for app in $APPS; do P="$(./rebar3 as ${TEST_PROFILE} path --base)/lib/${app}/test" echo $P PATH_ARGS="${PATH_ARGS} ${P}" done PARAMS="-pa ${TEST_PATH} ${PATH_ARGS} -config ${TEST_CONFIG} -noshell" ENTRY_POINT="-run ar ${TEST_MODULE} ${*} -s init stop" command="erl ${PARAMS} -name ${TEST_NODE} -setcookie ${TEST_COOKIE} ${ENTRY_POINT}" echo -e "\033[0;32m===> Execute command ${command}\033[0m" set -xe -o pipefail ${command} | tee "${TEST_LOG}" exit $? ) } ###################################################################### # Relx section ###################################################################### relx_usage() { command="$1" case "$command" in benchmark) arweave_benchmark_help ;; check) arweave_check_help ;; doctor) arweave_doctor_help ;; version) arweave_version_help ;; packing) arweave_packing_help ;; wallet) arweave_wallet_help ;; daemon) echo "Usage: ${REL_NAME} daemon" echo "Start Arweave as daemon (in background)" ;; daemon_attach) echo "Usage: ${REL_NAME} daemon_attach" echo "Attach to a running Arweave daemonized process" ;; rpc) echo "Usage: $REL_NAME rpc [Mod [Fun [Args]]]]" echo "Applies the specified function and returns the result." echo "Mod must be specified. However, start and [] are assumed" echo "for unspecified Fun and Args, respectively. Args is to " echo "be in the same format as for erlang:apply/3 in ERTS." ;; escript) echo "Usage: ${REL_NAME} escript [ESCRIPT]" echo "Execute an Erlang script in the Arweave release environment." echo "Note: it will not start Arweave." ;; "eval") echo "Usage: $REL_NAME eval [Exprs]" echo "Executes a sequence of Erlang expressions, separated by" echo "comma (,) and ended with a full stop (.)" ;; foreground) echo "Usage: $REL_NAME foreground" echo "Starts the Arweave release in the foreground, meaning all output" echo "going to stdout but without an interactive shell." echo "The entry point is set to -run ar main" ;; foreground_clean) echo "Usage: $REL_NAME foreground" echo "Starts the Arweave release in the foreground, meaning all output" echo "going to stdout but without an interactive shell." echo "No entry point is configured" ;; console) echo "Usage: $REL_NAME console" echo "Starts Arweave with an interactive shell." ;; console_clean) echo "Usage: ${REL_NAME} console_clean" echo: "Starts an interactived Erlang shell without Arweave started." ;; remote_console|remote|remsh) echo "Usage: $REL_NAME remote" echo "Attach a remote shell to an already running Erlang node for this release." ;; reboot) echo "Usage: ${REL_NAME} reboot" echo "Reboot the entire Arweave VM." ;; restart) echo "Usage: ${REL_NAME} restart" echo "Restart the running applications but not the Arweave VM." ;; pid) echo "Usage: ${REL_NAME} pid" echo "Returns the system PID of Arweave release (if running)." ;; ping) echo "Usage: ${REL_NAME} ping" echo "Checks if the Arweave node is running." ;; status) echo "Usage: $REL_NAME status" echo "Obtains node status information through optionally defined hooks." ;; stop) echo "Usage: ${REL_NAME} stop" echo "Stop the Arweave node." ;; test) arweave_test_help ;; test_e2e) arweave_e2e_help ;; *) # check for extension IS_EXTENSION=$(relx_is_extension "$command") if [ "$IS_EXTENSION" = "1" ]; then EXTENSION_SCRIPT=$(relx_get_extension_script "$command") relx_run_extension "$EXTENSION_SCRIPT" help else EXTENSIONS=$(echo $EXTENSIONS | sed -e 's/|undefined//g') echo "Usage: ${REL_NAME} [COMMAND] [ARGS]" echo "" echo "Arweave Commands:" echo "" echo " benchmark Run Arweave Benchmarks" echo " check Check system parameters for Arweave" echo " console Start Arweave with an interactive Erlang shell" echo " console_clean Start an interactive Erlang shell without the Arweave release's applications" echo " daemon Start Arweave in the background with run_erl (named pipes)" echo " daemon_attach Connect to Arweave node started as daemon with to_erl (named pipes)" echo " doctor Start Arweave Data Analyzer tool" echo " escript Run an escript in the same environment as the Arweave release" echo " eval [Exprs] Run Erlang expressions on Arweave node" echo " foreground Start Arweave with output to stdout" echo " foreground_clean Start Arweave VM without any entry-point as arguments" echo " pid Print the PID of the Arweave OS process" echo " ping Print pong if the Arweave node is alive" echo " reboot Reboot the entire Arweave VM" echo " reload Restart only Arweave application in the VM" echo " remote_console Connect remote shell to the Arweave node" echo " restart Restart the running applications but not the Arweave VM" echo " rpc [Mod [Fun [Args]]]] Run apply(Mod, Fun, Args) on the Arweave node" echo " status Verify if the Arweave node is running and then run status hook scripts" echo " stop Stop the Arweave node" echo " version Print the Arweave version" echo " wallet Manage Arweave wallets" if test "$EXTENSIONS" then echo "$EXTENSIONS" fi if test "${ARWEAVE_DEV}" then echo "" echo "Arweave Commands (developer mode):" echo " test [MODULE [TEST]] Run Arweave test Suite" echo " test_e2e [MODULE [TEST]] Run Arweave e2e Test Suite" fi fi ;; esac } find_erts_dir() { __erts_dir="$RELEASE_ROOT_DIR/erts-$ERTS_VSN" if [ -d "$__erts_dir" ]; then ERTS_DIR="$__erts_dir"; else __erl="$(command -v erl)" code="io:format(\"~s\", [code:root_dir()]), halt()." __erl_root="$("$__erl" -boot no_dot_erlang -sasl errlog_type error -noshell -eval "$code")" ERTS_DIR="$__erl_root/erts-$ERTS_VSN" if [ ! -d "$ERTS_DIR" ]; then erts_version_code="io:format(\"~s\", [erlang:system_info(version)]), halt()." __erts_version="$("$__erl" -boot no_dot_erlang -sasl errlog_type error -noshell -eval "$erts_version_code")" ERTS_DIR="${__erl_root}/erts-${__erts_version}" if [ -d "$ERTS_DIR" ]; then echo "Exact ERTS version (${ERTS_VSN}) match not found, instead using ${__erts_version}. The release may fail to run." 1>&2 ERTS_VSN=${__erts_version} else echo "Can not run the release. There is no ERTS bundled with the release or found on the system." exit 1 fi fi fi } find_erl_call() { # users who depend on stdout when running rpc calls must still use nodetool # so we have an overload option to force use of nodetool instead of erl_call if [ "$USE_NODETOOL" ]; then ERL_RPC=relx_nodetool else # only OTP-23 and above have erl_call in the erts bin directory # and only those versions have the features and bug fixes needed # to work properly with this script __erl_call="$ERTS_DIR/bin/erl_call" if [ -f "$__erl_call" ]; then ERL_RPC="$__erl_call"; else ERL_RPC=relx_nodetool fi fi } # Get node pid relx_get_pid() { if output="$(erl_rpc os getpid 2>/dev/null)" then echo "$output" | sed -e 's/"//g' return 0 else echo "$output" return 1 fi } ping_or_exit() { if ! erl_rpc erlang is_alive > /dev/null 2>&1; then echo "Node is not running!" exit 1 fi } relx_get_nodename() { id="longname$(relx_gen_id)-${NAME}" if [ -z "$COOKIE" ]; then # shellcheck disable=SC2086 "$BINDIR/erlexec" -boot "$REL_DIR"/start_clean \ -mode interactive \ -boot_var SYSTEM_LIB_DIR "$SYSTEM_LIB_DIR" \ -eval '[_,H]=re:split(atom_to_list(node()),"@",[unicode,{return,list}]), io:format("~s~n",[H]), halt()' \ -dist_listen false \ ${START_EPMD} \ -noshell "${NAME_TYPE}" "$id" else # running with setcookie prevents a ~/.erlang.cookie from being created # shellcheck disable=SC2086 "$BINDIR/erlexec" -boot "$REL_DIR"/start_clean \ -mode interactive \ -boot_var SYSTEM_LIB_DIR "$SYSTEM_LIB_DIR" \ -eval '[_,H]=re:split(atom_to_list(node()),"@",[unicode,{return,list}]), io:format("~s~n",[H]), halt()' \ -setcookie "${COOKIE}" \ -dist_listen false \ ${START_EPMD} \ -noshell "${NAME_TYPE}" "$id" fi } # Connect to a remote node relx_rem_sh() { # Remove remote_nodename when OTP-23 is the oldest version supported by rebar3/relx. # sort the used erts version against 11.0 to see if it is less than 11.0 (OTP-23) # if it is then we must generate a node name to use for the remote node. # But this feature is only for short names in 23.0 (erts 11.0). It can be used # for long names with 23.1 (erts 11.1) and above. if [ "${NAME_TYPE}" = "-sname" ] && [ "11.0" = "$(printf "%s\n11.0" "${ERTS_VSN}" | sort -V | head -n1)" ] ; then remote_nodename="${NAME_TYPE} undefined@${RELX_HOSTNAME}" # if the name type is longnames then make sure this is erts 11.1+ elif [ "${NAME_TYPE}" = "-name" ] && [ "11.1" = "$(printf "%s\n11.1" "${ERTS_VSN}" | sort -V | head -n1)" ] ; then remote_nodename="${NAME_TYPE} undefined@${RELX_HOSTNAME}" else # Generate a unique id used to allow multiple remsh to the same node transparently remote_nodename="${NAME_TYPE} remsh$(relx_gen_id)-${NAME}" fi # Get the node's ticktime so that we use the same one TICKTIME="$(erl_rpc net_kernel get_net_ticktime)" # Setup remote shell command to control node # -dist_listen is new in OTP-23. It keeps the remote node from binding to a listen port # and implies the option -hidden # shellcheck disable=SC2086 exec "$BINDIR/erlexec" ${remote_nodename} -remsh "$NAME" -boot "$REL_DIR"/start_clean -mode interactive \ -boot_var SYSTEM_LIB_DIR "$SYSTEM_LIB_DIR" \ -setcookie "$COOKIE" -hidden -kernel net_ticktime "$TICKTIME" \ -dist_listen false \ $DIST_ARGS \ $EXTRA_DIST_ARGS } erl_rpc() { case "$ERL_RPC" in "relx_nodetool") relx_nodetool rpc "$@" ;; *) command=$* # erl_call -R is recommended for generating dynamic node name but is only available in 23.0+ if [ "11.0" = "$(printf "%s\n11.0" "${ERTS_VSN}" | sort -V | head -n1)" ] ; then DYNAMIC_NAME="-R" else DYNAMIC_NAME="-r" fi if [ "$ADDRESS" ]; then result=$("$ERL_RPC" "${DYNAMIC_NAME}" -c "${COOKIE}" -address "${ADDRESS}" -timeout "${RELX_RPC_TIMEOUT}" -a "${command}") else result=$("$ERL_RPC" "$NAME_TYPE" "$NAME" "${DYNAMIC_NAME}" -c "${COOKIE}" -timeout "${RELX_RPC_TIMEOUT}" -a "${command}") fi code=$? if [ $code -eq 0 ]; then echo "$result" else return $code fi ;; esac } erl_eval() { case "$ERL_RPC" in "relx_nodetool") relx_nodetool eval "$@" ;; *) local command="${*}" if [ "$ERL_DIST_PORT" ]; then result=$(echo "${command}" | eval "$ERL_RPC" "${DYNAMIC_NAME}" -c "${COOKIE}" -address "${ADDRESS}" -timeout "${RELX_RPC_TIMEOUT}" -e) else result=$(echo "${command}" | eval "$ERL_RPC" "$NAME_TYPE" "$NAME" "${DYNAMIC_NAME}" -c "${COOKIE}" -timeout "${RELX_RPC_TIMEOUT}" -e) fi code=$? if [ $code -eq 0 ]; then echo "$result" | sed 's/^{ok, \(.*\)}$/\1/' else return $code fi ;; esac } # Generate a random id relx_gen_id() { # To prevent exhaustion of atoms on target node, optionally avoid # generation of random node prefixes, if it is guaranteed calls # are entirely sequential. if [ -z "${NODETOOL_NODE_PREFIX}" ]; then dd count=1 bs=4 if=/dev/urandom 2> /dev/null | od -x | head -n1 | awk '{print $2$3}' else echo "${NODETOOL_NODE_PREFIX}" fi } # Control a node with nodetool if erl_call isn't from OTP-23+ relx_nodetool() { command="$1"; shift # Generate a unique id used to allow multiple nodetool calls to the # same node transparently nodetool_id="maint$(relx_gen_id)-${NAME}" if [ -z "${START_EPMD}" ]; then ERL_FLAGS="${ERL_FLAGS} ${DIST_ARGS} ${EXTRA_DIST_ARGS} ${NAME_TYPE} $nodetool_id -setcookie ${COOKIE} -dist_listen false" \ "$ERTS_DIR/bin/escript" \ "$ROOTDIR/bin/nodetool" \ "$NAME_TYPE" "$NAME" \ "$command" "$@" else # shellcheck disable=SC2086 ERL_FLAGS="${ERL_FLAGS} ${DIST_ARGS} ${EXTRA_DIST_ARGS} ${NAME_TYPE} $nodetool_id -setcookie ${COOKIE} -dist_listen false" \ "$ERTS_DIR/bin/escript" \ "$ROOTDIR/bin/nodetool" \ $START_EPMD "$NAME_TYPE" "$NAME" "$command" "$@" fi } # Run an escript in the node's environment relx_escript() { scriptpath="$1"; shift export RELEASE_ROOT_DIR "$ERTS_DIR/bin/escript" "$ROOTDIR/$scriptpath" "$@" } # Convert {127,0,0,1} to 127.0.0.1 (inet:ntoa/1) addr_tuple_to_str() { addr="$1" saved_IFS="$IFS" IFS="{,}'\" " # shellcheck disable=SC2086 eval set -- $addr IFS="$saved_IFS" case $# in 4) printf '%u.%u.%u.%u' "$@";; 8) printf '%.4x:%.4x:%.4x:%.4x:%.4x:%.4x:%.4x:%.4x' "$@";; *) echo "Cannot parse IP address tuple: '$addr'" 1>&2;; esac } make_out_file_path() { # Use output directory provided in the RELX_OUT_FILE_PATH environment variable # (default to the current location of vm.args and sys.config) DIR=$(dirname "$1") [ -d "${RELX_OUT_FILE_PATH}" ] && DIR="${RELX_OUT_FILE_PATH}" FILE=$(basename "$1") IN="${DIR}/${FILE}" PFX=$(echo "$IN" | awk '{sub(/\.[^.]+$/, "", $0)}1') SFX=$(echo "$FILE" | awk -F . '{if (NF>1) print $NF}') if [ "$RELX_MULTI_NODE" ]; then echo "${PFX}.${NAME}.${SFX}" else echo "${PFX}.${SFX}" fi } # Replace environment variables replace_os_vars() { awk '{ while(match($0,"[$]{[^}]*}")) { var=substr($0,RSTART+2,RLENGTH -3) slen=split(var,arr,":-") v=arr[1] e=ENVIRON[v] gsub("&","\\\\\\&",e) if(slen > 1 && e=="") { i=index(var, ":-"arr[2]) def=substr(var,i+2) gsub("[$]{"var"}",def) } else { gsub("[$]{"var"}",e) } } }1' < "$1" > "$2" } add_path() { # Use $CWD/$1 if exists, otherwise releases/VSN/$1 local FILE=${1}; shift local IN_FILE_PATH=${1}; shift local EXTRA_PATHS=${*} if [ "${IN_FILE_PATH}" ] then echo "${IN_FILE_PATH}" return 0 fi for e in "${RELEASE_ROOT_DIR}" "${REL_DIR}" ${EXTRA_PATHS} do if [ -f "${e}/${FILE}" ] then echo "${e}/${FILE}" return 0 fi done return 1 } multi_check_replace_os_vars() { local file="${1}"; shift while test "${*}" do local path=${1}; shift local ret=$(check_replace_os_vars ${file} ${path}) if test "${ret}" then echo ${ret} return 0 fi done return 1 } check_replace_os_vars() { IN_FILE_PATH=$(add_path "$1" "$2") OUT_FILE_PATH="$IN_FILE_PATH" SRC_FILE_PATH="$IN_FILE_PATH.src" ORIG_FILE_PATH="$IN_FILE_PATH.orig" if [ -f "$SRC_FILE_PATH" ]; then OUT_FILE_PATH=$(make_out_file_path "$IN_FILE_PATH") replace_os_vars "$SRC_FILE_PATH" "$OUT_FILE_PATH" elif [ "$RELX_REPLACE_OS_VARS" ]; then OUT_FILE_PATH=$(make_out_file_path "$IN_FILE_PATH") # If vm.args.orig or sys.config.orig is present then use that if [ -f "$ORIG_FILE_PATH" ]; then IN_FILE_PATH="$ORIG_FILE_PATH" fi # apply the environment variable substitution to $IN_FILE_PATH # the result is saved to $OUT_FILE_PATH # if they are both the same, then ensure that we don't clobber # the file by saving a backup with the .orig extension if [ "$IN_FILE_PATH" = "$OUT_FILE_PATH" ]; then cp "$IN_FILE_PATH" "$ORIG_FILE_PATH" replace_os_vars "$ORIG_FILE_PATH" "$OUT_FILE_PATH" else replace_os_vars "$IN_FILE_PATH" "$OUT_FILE_PATH" fi else # If vm.arg.orig or sys.config.orig is present then use that if [ -f "$ORIG_FILE_PATH" ]; then OUT_FILE_PATH=$(make_out_file_path "$IN_FILE_PATH") cp "$ORIG_FILE_PATH" "$OUT_FILE_PATH" fi fi echo "$OUT_FILE_PATH" } relx_run_hooks() { HOOKS=$1 for hook in $HOOKS do # the scripts arguments at this point are separated # from each other by | , we now replace these # by empty spaces and give them to the `set` # command in order to be able to extract them # separately # shellcheck disable=SC2046 set $(echo "$hook" | sed -e 's/|/ /g') HOOK_SCRIPT=$1; shift # all hook locations are expected to be # relative to the start script location # shellcheck disable=SC1090,SC2240 [ -f "$SCRIPT_DIR/$HOOK_SCRIPT" ] && . "$SCRIPT_DIR/$HOOK_SCRIPT" "$@" done } relx_disable_hooks() { PRE_START_HOOKS="" POST_START_HOOKS="" PRE_STOP_HOOKS="" POST_STOP_HOOKS="" PRE_INSTALL_UPGRADE_HOOKS="" POST_INSTALL_UPGRADE_HOOKS="" STATUS_HOOK="" } relx_is_extension() { EXTENSION=$1 case "$EXTENSION" in # ) # echo "1" # ;; *) echo "0" ;; esac } relx_get_extension_script() { EXTENSION=$1 # below are the extensions declarations # of the form: # foo_extension="path/to/foo_script";bar_extension="path/to/bar_script" # get the command extension (eg. foo) and # obtain the actual script filename that it # refers to (eg. "path/to/foo_script" eval echo "$""${EXTENSION}_extension" } relx_run_extension() { # drop the first argument which is the name of the # extension script EXTENSION_SCRIPT=$1 shift # all extension script locations are expected to be # relative to the start script location # shellcheck disable=SC1090,SC2240 [ -f "$SCRIPT_DIR/$EXTENSION_SCRIPT" ] && . "$SCRIPT_DIR/$EXTENSION_SCRIPT" "$@" } # given a list of arguments, identify the internal ones # --relx-disable-hooks # and process them accordingly process_internal_args() { for arg in "$@" do shift case "$arg" in --relx-disable-hooks) relx_disable_hooks ;; *) ;; esac done } # This function takes a list of terms (usually arguments) # and split them in two categories, the one before -- # and the one after. The one before is used as Erlang # VM parameters and should overwrite default configuration, # The last part (LOCAL_PARAMS) contains arweave parameters. # This function export LOCAL_PARAMS and VM_PARAMS variables. parse_args() { local separator="--" local vm_params="" local params="" while test "${*}" do local arg="${1}" if test "${arg}" = ${separator} then test "${vm_params}" \ && vm_params="${vm_params} ${params}" \ || vm_params="${params}" params="" else test "${params}" \ && params="${params} ${arg}" \ || params="${arg}" fi # don't forget to shift to remove the previous # argument from the list shift done export VM_PARAMS="${vm_params}" export LOCAL_PARAMS="${params}" } # if ARWEAVE_DEV environment is defined, then # we start by rebuild a release. if test "${ARWEAVE_DEV}" then arweave_developer_mode fi # Ensure symlinks exist after any rebuild (ar-rebar3 removes them during build) ensure_release_symlinks # process internal arguments process_internal_args "$@" find_erts_dir find_erl_call export BINDIR="$ERTS_DIR/bin" export EMU="beam" export PROGNAME="erl" export LD_LIBRARY_PATH="$ERTS_DIR/lib:$LD_LIBRARY_PATH" SYSTEM_LIB_DIR="$(dirname "$ERTS_DIR")/lib" # vm_args configuration, we can use priv/files/vm_args or # the path from the release. VMARGS_PATH=$(add_path \ vm.args \ "${VMARGS_PATH}" \ "${REL_DIR}" \ "${REL_PATH}" \ "${REL_PATH_ALT}" \ "${RELEASE_ROOT_DIR}/config" \ "${RELEASE_ROOT_DIR}/priv/templates") VMARGS_PATH=$(multi_check_replace_os_vars \ vm.args \ "${VMARGS_PATH}"\ "${REL_DIR}" \ "${REL_PATH}" \ "${REL_PATH_ALT}" \ "${RELEASE_ROOT_DIR}/config") RELX_CONFIG_PATH=$(multi_check_replace_os_vars \ sys.config \ "${RELX_CONFIG_PATH}" \ "${REL_DIR}" \ "${REL_PATH}" \ "${REL_PATH_ALT}" \ "${RELEASE_ROOT_DIR}/config") # Check vm.args and other files referenced via -args_file parameters for: # - nonexisting -args_files # - circular dependencies of -args_files # - relative paths in -args_file parameters # - multiple/mixed occurrences of -name and -sname parameters # - missing -name or -sname parameters # If all checks pass, extract the target node name set +e TMP_NAME_ARG=$(awk 'function shell_quote(str) { gsub(/'\''/,"'\'\\\\\'\''", str); return "'\''" str "'\''" } function check_name(file) { # if file exists, then it should be readable if (system("test -f " shell_quote(file)) == 0 && system("test -r " shell_quote(file)) != 0) { print file" not readable" exit 3 } while ((getline line0) { if (line~/^-args_file +/) { gsub(/^-args_file +| *$/, "", line) if (line in files) { print "circular reference to "line" encountered in "file exit 5 } files[line]=line check_name(line) } else if (line~/^-s?name +/) { if (name!="") { print "\""line"\" parameter found in "file" but already specified as \""name"\"" exit 2 } name=line } } } BEGIN { split("", files) name="" } { files[FILENAME]=FILENAME check_name(FILENAME) if (name=="") { print "need to have exactly one of either -name or -sname parameters but none found" exit 1 } print name exit 0 }' "$VMARGS_PATH") TMP_NAME_ARG_RC=$? case $TMP_NAME_ARG_RC in 0) NAME_ARG="$TMP_NAME_ARG";; *) echo "$TMP_NAME_ARG" exit $TMP_NAME_ARG_RC;; esac unset TMP_NAME_ARG unset TMP_NAME_ARG_RC set -e # Perform replacement of variables in ${NAME_ARG} NAME_ARG=$(eval echo "${NAME_ARG}") # Extract the name type and name from the NAME_ARG for REMSH NAME_TYPE="$(echo "$NAME_ARG" | awk '{print $1}')" NAME="$(echo "$NAME_ARG" | awk '{print $2}')" # Extract dist arguments DIST_ARGS="" PROTO_DIST="$(grep '^-proto_dist' "$VMARGS_PATH" || true)" if [ "$PROTO_DIST" ]; then DIST_ARGS="${PROTO_DIST}" fi START_EPMD="$(grep '^-start_epmd' "$VMARGS_PATH" || true)" if [ "$START_EPMD" ]; then DIST_ARGS="${DIST_ARGS} ${START_EPMD}" fi EPMD_MODULE="$(grep '^-epmd_module' "$VMARGS_PATH" || true)" if [ "$EPMD_MODULE" ]; then DIST_ARGS="${DIST_ARGS} ${EPMD_MODULE}" fi INET_DIST_USE_INTERFACE="$(grep '^-kernel *inet_dist_use_interface' "$VMARGS_PATH" || true)" if [ "$INET_DIST_USE_INTERFACE" ]; then DIST_ARGS="${DIST_ARGS} ${INET_DIST_USE_INTERFACE}" fi if [ "$ERL_DIST_PORT" ]; then if [ "$INET_DIST_USE_INTERFACE" ]; then ADDRESS="$(addr_tuple_to_str "${INET_DIST_USE_INTERFACE#*inet_dist_use_interface }"):$ERL_DIST_PORT" else ADDRESS="$ERL_DIST_PORT" fi if [ "11.1" = "$(printf "%s\n11.1" "${ERTS_VSN}" | sort -V | head -n1)" ] ; then # unless set by the user, set start_epmd to false when ERL_DIST_PORT is used if [ ! "$START_EPMD" ]; then EXTRA_DIST_ARGS="-erl_epmd_port ${ERL_DIST_PORT} -start_epmd false" else EXTRA_DIST_ARGS="-erl_epmd_port ${ERL_DIST_PORT}" fi else ERL_DIST_PORT_WARNING="ERL_DIST_PORT is set and used to set the port, but doing so on ERTS version ${ERTS_VSN} means remsh/rpc will not work for this release" if ! command -v logger > /dev/null 2>&1 then echo "WARNING: ${ERL_DIST_PORT_WARNING}" else logger -p warning -t "${REL_NAME}[$$]" "${ERL_DIST_PORT_WARNING}" fi EXTRA_DIST_ARGS="-kernel inet_dist_listen_min ${ERL_DIST_PORT} -kernel inet_dist_listen_max ${ERL_DIST_PORT}" fi fi # Force use of nodetool if proto_dist set as erl_call doesn't support proto_dist if [ "$PROTO_DIST" ]; then ERL_RPC=relx_nodetool fi # Extract the target cookie # Do this before relx_get_nodename so we can use it and not create a ~/.erlang.cookie if [ -n "$RELX_COOKIE" ]; then COOKIE="$RELX_COOKIE" else COOKIE_ARG="$(grep '^-setcookie' "$VMARGS_PATH" || true)" DEFAULT_COOKIE_FILE="$HOME/.erlang.cookie" if [ -z "$COOKIE_ARG" ]; then if [ -f "$DEFAULT_COOKIE_FILE" ]; then COOKIE="$(cat "$DEFAULT_COOKIE_FILE")" else echo "No cookie is set or found. This limits the scripts functionality, installing, upgrading, rpc and getting a list of versions will not work." fi else # Extract cookie name from COOKIE_ARG COOKIE="$(echo "$COOKIE_ARG" | awk '{print $2}')" fi fi # User can specify an sname without @hostname # This will fail when creating remote shell # So here we check for @ and add @hostname if missing case "${NAME}" in *@*) ;; # Nothing to do *) NAME=${NAME}@$(relx_get_nodename);; # Add @hostname esac # Export the variable so that it's available in the 'eval' calls export NAME # create a variable of just the hostname part of the nodename RELX_HOSTNAME=$(echo "${NAME}" | cut -d'@' -f2) test -z "$PIPE_DIR" && PIPE_BASE_DIR='/tmp/erl_pipes/' PIPE_DIR="${PIPE_DIR:-/tmp/erl_pipes/$NAME/}" # Change to the project root directory (instead of the release root ROOTDIR) # so that relative paths (like config files) and logs work as expected. cd "$RELEASE_ROOT_DIR" if is_arweave_developer_command "${1}" then relx_usage exit 1 fi # Check the first argument for instructions case "$1" in check) shift arweave_check ${*} ;; version) shift arweave_version ${*} ;; daemon|daemon_boot) arweave_check case "$1" in daemon) shift START_OPTION="console" HEART_OPTION="daemon" ;; daemon_boot) shift START_OPTION="console_boot" HEART_OPTION="daemon_boot" ;; esac ARGS="$(printf "'%s' " "$@")" # shellcheck disable=SC2174 test -z "$PIPE_BASE_DIR" || mkdir -m 1777 -p "$PIPE_BASE_DIR" mkdir -p "$PIPE_DIR" if [ ! -w "$PIPE_DIR" ] then echo "failed to start, user '$USER' does not have write privileges on '$PIPE_DIR', either delete it or run node as a different user" exit 1 fi # Make sure log directory exists mkdir -p "$RUNNER_LOG_DIR" relx_run_hooks "$PRE_START_HOOKS" # check system configuration arweave_check "$BINDIR/run_erl" \ -daemon "$PIPE_DIR" \ "$RUNNER_LOG_DIR" \ "exec \"$RELEASE_ROOT_DIR/bin/$REL_NAME\" \"$START_OPTION\" ${ARGS}" # wait for node to be up before running hooks while ! erl_rpc erlang is_alive > /dev/null 2>&1 do sleep 1 done # Clean up symlinks now that VM is running (allows VSCode extensions to work) # Only remove if running from source and they're actually symlinks if [ -n "$RUNNING_FROM_SOURCE" ]; then [ -L "${RELEASE_ROOT_DIR}/lib" ] && rm -f "${RELEASE_ROOT_DIR}/lib" [ -L "${RELEASE_ROOT_DIR}/releases" ] && rm -f "${RELEASE_ROOT_DIR}/releases" fi relx_run_hooks "$POST_START_HOOKS" ;; stop) relx_run_hooks "$PRE_STOP_HOOKS" # Wait for the node to completely stop... PID="$(relx_get_pid)" if ! erl_rpc init stop > /dev/null 2>&1; then exit 1 fi while kill -s 0 "$PID" 2>/dev/null; do sleep 1 done # wait for node to be down before running hooks while erl_rpc erlang is_alive > /dev/null 2>&1 do sleep 1 done relx_run_hooks "$POST_STOP_HOOKS" ;; restart) ## Restart the VM without exiting the process if ! erl_rpc init restart > /dev/null; then exit 1 fi ;; reboot) ## Restart the VM completely (uses heart to restart it) if ! erl_rpc init reboot > /dev/null; then exit 1 fi ;; reload) ## Reload only arweave application in the vm RELX_RPC_TIMEOUT=3600 # first arweave and prometheus application must be stopped if erl_eval '[application:stop(A) || A <- [arweave, prometheus]].' then # then arweave application can be restarted erl_eval 'application:ensure_all_started(arweave).' test $? -ne 0 && exit 1 exit $? else exit 1 fi ;; pid) ## Get the VM's pid if ! relx_get_pid; then exit 1 fi ;; ping) ## See if the VM is alive ping_or_exit echo "pong" ;; escript) ## Run an escript under the node's environment shift if ! relx_escript "$@"; then exit 1 fi ;; daemon_attach|attach) case "$1" in attach) # TODO, add here the right annoying message asking users to consider # instead using systemd or some such other init system echo "'attach' has been deprecated, replaced by 'daemon_attach' and will be removed in the short-term, please consult rebar3.org on why you should be"\ "using 'foreground' and an init tool such as 'systemd'" ;; esac # Make sure a node IS running ping_or_exit if [ ! -w "$PIPE_DIR" ] then echo "failed to attach, user '$USER' does not have sufficient privileges on '$PIPE_DIR', please run node as a different user" exit 1 fi shift exec "$BINDIR/to_erl" "$PIPE_DIR" ;; remote_console|remote|remsh) # Make sure a node IS running ping_or_exit shift relx_rem_sh ;; console|console_clean|console_boot|foreground|foreground_clean|benchmark|wallet|doctor) FOREGROUNDOPTIONS="" # .boot file typically just $REL_NAME (ie, the app name) # however, for debugging, sometimes start_clean.boot is useful. # For e.g. 'setup', one may even want to name another boot script. subcommand="${1}" case "$1" in console) shift if [ -f "$REL_DIR/$REL_NAME.boot" ]; then BOOTFILE="$REL_DIR/$REL_NAME" else BOOTFILE="$REL_DIR/start" fi ARGS=${*} ;; foreground|foreground_clean|benchmark|wallet|doctor) shift # start up the release in the foreground for use by runit # or other supervision services if [ -f "$REL_DIR/$REL_NAME.boot" ]; then BOOTFILE="$REL_DIR/$REL_NAME" else BOOTFILE="$REL_DIR/start" fi FOREGROUNDOPTIONS="-noinput +Bd" # all these arweave commands are being executed in # foreground mode, ARGS will be modified. case ${subcommand} in benchmark) arweave_benchmark ${*} ARGS=$(arweave_benchmark ${*}) ;; wallet) arweave_wallet ${*} ARGS=$(arweave_wallet ${*}) ;; doctor) arweave_doctor ${*} ARGS=$(arweave_doctor ${*}) ;; foreground_clean) ARWEAVE_OPTS="" ARGS=${*} ;; *) ARGS=${*} ;; esac ;; console_clean) shift # if not set by user use interactive mode for console_clean CODE_LOADING_MODE="${CODE_LOADING_MODE:-interactive}" BOOTFILE="$REL_DIR/start_clean" ARGS=${*} ;; console_boot) shift BOOTFILE="$1" shift ARGS=${*} ;; esac # split the argument in two parts based on the previously # passed args, LOCAL_PARAMS is for arweave, VM_PARAMS is for # the vm. parse_args ${ARGS} ARGS=${LOCAL_PARAMS} # if not set by user or console_clean use embedded CODE_LOADING_MODE="${CODE_LOADING_MODE:-embedded}" # Setup beam-required vars EMU="beam" PROGNAME="${0#*/}" export EMU export PROGNAME # Dump environment info for logging purposes # shellcheck disable=SC2086 echo "Exec: $BINDIR/erlexec" \ ${VM_PARAMS} \ ${EXTRA_DIST_ARGS} \ ${FOREGROUNDOPTIONS} \ -boot "$BOOTFILE" \ -mode "$CODE_LOADING_MODE" \ -boot_var SYSTEM_LIB_DIR "$SYSTEM_LIB_DIR" \ -config "$RELX_CONFIG_PATH" \ -args_file "$VMARGS_PATH" \ -- ${ARWEAVE_OPTS} ${ARGS} echo "Root: $ROOTDIR" # Log the startup echo "$RELEASE_ROOT_DIR" if ! command -v logger > /dev/null 2>&1 then echo "${REL_NAME}[$$] Starting up" else logger -t "${REL_NAME}[$$]" "Starting up" fi relx_run_hooks "$PRE_START_HOOKS" # check system configuration arweave_check # Schedule cleanup of symlinks after VM boots (allows VSCode extensions to work) schedule_symlink_cleanup # Start the VM # The variabre FOREGROUNDOPTIONS must NOT be quoted. # shellcheck disable=SC2086 exec "$BINDIR/erlexec" \ ${VM_PARAMS} \ ${EXTRA_DIST_ARGS} \ ${FOREGROUNDOPTIONS} \ -boot "$BOOTFILE" \ -mode "$CODE_LOADING_MODE" \ -boot_var SYSTEM_LIB_DIR "$SYSTEM_LIB_DIR" \ -config "$RELX_CONFIG_PATH" \ -args_file "$VMARGS_PATH" \ -- ${ARWEAVE_OPTS} ${ARGS} # exec will replace the current image and nothing else gets # executed from this point on, this explains the absence # of the pre start hook ;; rpc) # Make sure a node IS running ping_or_exit shift erl_rpc "$@" ;; eval) # Make sure a node IS running ping_or_exit shift erl_eval "$@" ;; status) # Make sure a node IS running ping_or_exit # shellcheck disable=SC1090,SC2240 [ -n "${STATUS_HOOK}" ] && [ -f "$SCRIPT_DIR/$STATUS_HOOK" ] && . "$SCRIPT_DIR/$STATUS_HOOK" "$@" ;; tunnel) # prepare a tunnel to the remote node shift target="${1}" # if epmd is running locally, try to kill it pgrep epmd && pkill epmd # fetch the port of the remote arweave node REMOTE_EPMD_PORT=$(ssh ${target} "epmd -names | sed 1d | awk '$2==\"^arweave$\" {print $NF}'") # create a local forward tunnel ssh -L ${ERL_EPMD_PORT}:localhost:${ERL_EPMD_PORT} \ -L ${REMOTE_EPMD_PORT=}:localhost:${REMOTE_EPMD_PORT} \ ${target} echo "epmd tunnel is ready on localhost:${REMOTE_EPMD_PORT}" ;; remote_observer) # start observer locally, assuming a tunnel has been previouly # created OBSERVER_ID=$(($(date "+%N")%6421)) erl -name observer-${OBSERVER_ID}@127.0.0.1 \ -setcookie ${COOKIE} \ -hidden -run observer ;; test) shift arweave_test ${*} ;; test_e2e) shift arweave_e2e ${*} ;; help) if [ -z "$2" ]; then relx_usage exit 1 fi TOPIC="$2"; shift relx_usage "$TOPIC" ;; *) # check for extension IS_EXTENSION=$(relx_is_extension "$1") if [ "$IS_EXTENSION" = "1" ]; then EXTENSION_SCRIPT=$(relx_get_extension_script "$1") shift relx_run_extension "$EXTENSION_SCRIPT" "$@" # all extension scripts are expected to exit else relx_usage "$1" fi exit 1 ;; esac exit 0 ================================================ FILE: bin/benchmark-hash ================================================ #!/usr/bin/env bash set -e SCRIPT_DIR=$(dirname ${0}) ARWEAVE=${SCRIPT_DIR}/arweave ${ARWEAVE} benchmark hash ${*} ================================================ FILE: bin/benchmark-packing ================================================ #!/usr/bin/env bash set -e SCRIPT_DIR=$(dirname ${0}) ARWEAVE=${SCRIPT_DIR}/arweave ${ARWEAVE} benchmark packing ${*} ================================================ FILE: bin/benchmark-vdf ================================================ #!/usr/bin/env bash set -e SCRIPT_DIR=$(dirname ${0}) ARWEAVE=${SCRIPT_DIR}/arweave ${ARWEAVE} benchmark vdf ${*} ================================================ FILE: bin/console ================================================ #!/usr/bin/env bash set -e SCRIPT_DIR=$(dirname ${0}) ARWEAVE=${SCRIPT_DIR}/arweave ${ARWEAVE} remote_console ${*} ================================================ FILE: bin/create-ecdsa-wallet ================================================ #!/usr/bin/env bash set -e SCRIPT_DIR=$(dirname ${0}) ARWEAVE=${SCRIPT_DIR}/arweave ${ARWEAVE} wallet ecdsa create ${*} ================================================ FILE: bin/create-wallet ================================================ #!/usr/bin/env bash set -e SCRIPT_DIR=$(dirname ${0}) ARWEAVE=${SCRIPT_DIR}/arweave ${ARWEAVE} wallet create rsa ${*} ================================================ FILE: bin/data-doctor ================================================ #!/usr/bin/env bash set -e SCRIPT_DIR=$(dirname ${0}) ARWEAVE=${SCRIPT_DIR}/arweave ${ARWEAVE} doctor ${*} ================================================ FILE: bin/debug-logs ================================================ #!/usr/bin/env bash set -e SCRIPT_DIR="$(dirname "$0")" LOGS_DIR="$(cd $SCRIPT_DIR/../logs && pwd -P)" tail -n 500 ${*} ${LOGS_DIR}/*debug.log ================================================ FILE: bin/e2e ================================================ #!/usr/bin/env bash set -e SCRIPT_DIR=$(dirname ${0}) export ARWEAVE_DEV=1 ARWEAVE=${SCRIPT_DIR}/arweave ${ARWEAVE} test_e2e ${*} ================================================ FILE: bin/e2e_shell ================================================ #!/usr/bin/env bash SCRIPT_DIR="$(dirname "$0")" cd "$SCRIPT_DIR/.." ./ar-rebar3 e2e compile if [ "$(uname -s)" == "Darwin" ]; then RANDOMX_JIT="disable randomx_jit" else RANDOMX_JIT= fi export ERL_EPMD_ADDRESS=127.0.0.1 ERL_E2E_OPTS="-pa $(./rebar3 as e2e path) $(./rebar3 as e2e path --base)/lib/arweave/test -config config/sys.config" echo -e "\033[0;32m===> Running e2e shell...\033[0m" if pgrep -f "beam.smp" > /dev/null; then echo "BEAM is already running. Exiting." exit 1 else erl $ERL_E2E_OPTS -name main-e2e@127.0.0.1 -setcookie e2e -run ar shell_e2e 2>&1 fi kill 0 ================================================ FILE: bin/gen-dev-certs ================================================ #!/usr/bin/env bash set -e SCRIPT_DIR="$(dirname "$0")" PRIV_DIR="$(cd $SCRIPT_DIR/../apps/arweave/priv && pwd -P)" CERT_FILE="$PRIV_DIR/tls/cert.pem" KEY_FILE="$PRIV_DIR/tls/key.pem" APEX_DOMAIN="${1:-"gateway.localhost"}" mkdir -p "$PRIV_DIR/tls" mkcert -cert-file "$CERT_FILE" \ -key-file "$KEY_FILE" \ "$APEX_DOMAIN" "*.$APEX_DOMAIN" ================================================ FILE: bin/localnet_shell ================================================ #!/usr/bin/env bash SCRIPT_DIR="$(dirname "$0")" cd "$SCRIPT_DIR/.." ./ar-rebar3 localnet compile if [ "$(uname -s)" == "Darwin" ]; then RANDOMX_JIT="disable randomx_jit" else RANDOMX_JIT= fi SNAPSHOT_DIR="${1:-localnet_snapshot}" export ERL_EPMD_ADDRESS=127.0.0.1 ERL_LOCALNET_OPTS="-pa $(./rebar3 as localnet path) $(./rebar3 as localnet path --base)/lib/arweave/test -config config/sys.config" echo -e "\033[0;32m===> Starting localnet shell from ${SNAPSHOT_DIR}...\033[0m" # Check if BEAM process is running if pgrep -f "beam.smp" > /dev/null; then echo "BEAM is already running. Exiting." exit 1 else erl $ERL_LOCALNET_OPTS -name main-localnet@127.0.0.1 -setcookie localnet -noshell -s ar shell_localnet "$SNAPSHOT_DIR" -s shell start_interactive 2>&1 fi kill 0 ================================================ FILE: bin/logs ================================================ #!/usr/bin/env bash set -e SCRIPT_DIR="$(dirname "$0")" LOGS_DIR="$(cd $SCRIPT_DIR/../logs && pwd -P)" tail -n 500 ${*} ${LOGS_DIR}/*info.log ================================================ FILE: bin/shell ================================================ #!/usr/bin/env bash SCRIPT_DIR="$(dirname "$0")" cd "$SCRIPT_DIR/.." ./ar-rebar3 test compile if [ `uname -s` == "Darwin" ]; then RANDOMX_JIT="disable randomx_jit" else RANDOMX_JIT= fi export ERL_EPMD_ADDRESS=127.0.0.1 ERL_TEST_OPTS="-pa `./rebar3 as test path` `./rebar3 as test path --base`/lib/arweave/test -config config/sys.config" echo -e "\033[0;32m===> Running tests...\033[0m" # Check if BEAM process is running if pgrep -f "beam.smp" > /dev/null; then echo "BEAM is already running. Exiting." exit 1 else erl $ERL_TEST_OPTS -name main-localtest@127.0.0.1 -setcookie test -run ar shell 2>&1 fi kill 0 ================================================ FILE: bin/start ================================================ #!/usr/bin/env bash ###################################################################### # Arweave Heartbeat script, unrelated to heart(3erl). This script will # restart arweave in case of crash. # # The epmd feature is a workaround to deal with a bug. When arweave # stops, in some case, an epmd session leaks and is still registered # in epmd. Two solutions: (1) wait for the timeout, but for some # reason it can take more than 24h (2) kill/restart epmd. This feature # is optional and can be activated by setting # ARWEAVE_EPMD_AUTO_RESTART environment variable. ###################################################################### SCRIPT_DIR=$(dirname ${0}) ARWEAVE=${SCRIPT_DIR}/arweave # set the default before restarting arweave ARWEAVE_RESTART_DELAY=${ARWEAVE_RESTART_DELAY:=15} # set the number of restart allowed. ARWEAVE_RESTART_LIMIT=${ARWEAVE_RESTART_LIMIT:=""} # set epmd auto restart. this is a workaround when arweave crash, an # epmd session can still be present (epmd session leak). If enabled, # a recovery/restart procedure is started automatically. ARWEAVE_EPMD_AUTO_RESTART=${ARWEAVE_EPMD_AUTO_RESTART:=""} # defines the method to restart method to use. At this time, only # kill and systemctl are supported. If systemctl is used, epmd service # must be called "epmd". If epmd is running with a different user, # systemctl will be called with sudo and the process' user. ARWEAVE_EPMD_RESTART_METHOD=${ARWEAVE_EPMD_RESTART_METHOD:="kill"} ###################################################################### # function helper to print arweave heartbeat messages. ###################################################################### _msg() { printf -- 'Arweave Heartbeat: %s\n' "${*}" } ###################################################################### # print the signal name instead of its number and return it. ###################################################################### _signal_sys() { local code="${1}" local kill_code if test ${code} -gt 127 then kill_code=$((code-128)) else kill_code=${code} fi case "${kill_code}" in 1) echo SIGHUP;; 2) echo SIGINT;; 3) echo SIGQUIT;; 4) echo SIGILL;; 5) echo SIGTRAP;; 6) echo SIGABRT;; 7) echo SIGBUS;; 9) echo SIGKILL;; 10) echo SIGUSR1;; 11) echo SIGSEGV;; 12) echo SIGUSR2;; 13) echo SIGPIPE;; 14) echo SIGALRM;; 15) echo SIGTERM;; 17) echo SIGCHLD;; 18) echo SIGSTOP;; *) echo "UNKNOWN_${code}";; esac return ${code} } ###################################################################### # this function is a quick and dirty patch to deal with epmd # session leaks. When arweave is stopping, in some situation # epmd keeps its session. It can be annoying. ###################################################################### _epmd_restart() { # only try to restart epmd if ARWEAVE_EPMD_AUTO_RESTART is set # not everyone want to do that. test "${ARWEAVE_EPMD_AUTO_RESTART}" || return 0 _msg "Start epmd restart procedure" # check epmd program existance. local epmd=$(which epmd) test "${epmd}" || return 1 test -x "${epmd}" || return 1 # check how many arweave process is running, if # there is more than one, there is a problem and epmd # should not be restarted. local instances=$(pgrep -f "${ARWEAVE}" | wc -l) if test "${instances}" -gt 1 then _msg "More than one arweave instance is running." _msg "epmd can't be restarted, here the nodes:" epmd -names return 1 fi # check if epmd daemon is started. If it's the case, then we # extract some information (e.g. UID, GID, PPID) local epmd_pid=$(pgrep epmd) if ! test "${epmd_pid}" then _msg "epmd is not started, can't restart it." return 1 fi local epmd_pid_user=$(ps -houser -p ${epmd_pid} | xargs echo) local epmd_pid_group=$(ps -hogroup -p ${epmd_pid} | xargs echo) local epmd_pid_ppid=$(ps -hoppid -p ${epmd_pid} | xargs echo) # extract epmd session in better format local epmd_sessions=$(epmd -names \ | sed 1d \ | sed -E "s/name (.+) at port (.+)/\1:\2/") local epmd_sessions_count=$(echo ${epmd_sessions} | wc -w) # small epmd report _msg "epmd (${epmd_pid})" \ "run as ${epmd_pid_user}:${epmd_pid_group}" \ "with ${epmd_sessions_count} sessions" \ "with ppid ${epmd_pid_ppid}." # check if there is an epmd session leak, # an arweave existing session should not be present. # only work if node's name is "arweave". ${epmd} -names | awk 'BEGIN{f=0} $1~/name/ && $2~/arweave/{f=1} END{exit f}' epmd_session_leak="$?" if test "${epmd_session_leak}" -eq 1 then local ret=1 # kill method used. only called if epmd's user is the same # than the one used by this script. if test "${ARWEAVE_EPMD_RESTART_METHOD}" = "kill" \ && test "${epmd_pid_user}" = "${USER}" then kill ${epmd_pid} ret=${?} fi # systemctl method used. invoke systemctl to restart epmd. if test "${ARWEAVE_EPMD_RESTART_METHOD}" = "systemctl" \ && test "${epmd_pid_user}" = "${USER}" then systemctl restart epmd ret=${?} fi # systemctl method (sudo) used. invoke systemctl with # sudo and the pid's user. if test "${ARWEAVE_EPMD_RESTART_METHOD}" = "systemctl" \ && test "${epmd_pid_user}" != "${USER}" then sudo -u "${epmd_pid_user}" systemctl restart epmd ret=${?} fi # if no methods are available, and the user's pid is # not our, then we stop. if test "${epmd_pid_user}" != "${USER}" \ && test "${ret}" != 0 then _msg "epmd can't be restarted (uid:${epmd_pid_user})." ret=${ret} fi if test "${ret}" -ne 0 then _msg "epmd (${epmd_pid}) restart failed." ret=${ret} fi return "${ret}" fi return 0 } ###################################################################### # main script ###################################################################### restart_counter=0 while true do # check for epmd presence (if the feature is enabled) _epmd_restart # we would like to avoid restarting arweave too much if test "${ARWEAVE_RESTART_LIMIT}" \ && test "${restart_counter}" -gt "${ARWEAVE_RESTART_LIMIT}" then _msg "Number of restart reached: ${restart_counter}." _msg "Arweave will not be restarted." _msg "Please check the system." exit 1 fi # start arweave _msg "Launching Erlang Virtual Machine..." ${ARWEAVE} foreground ${*} ret="${?}" # arweave terminated normally (0). if test "${ret}" -eq 0 then _msg "Server terminated safely." exit 0 fi # arweave terminated with an error code, it needs to be # restarted. if test "${ret}" -le 127 then _msg "The Arweave server has terminated with an error code (${ret})." fi # arweave terminated with a signal from the system or another # process, it could be an OOM. In this case, we need to # restart epmd and ensure everything is fine. if test "${ret}" -gt 127 then signal=$(_signal_sys ${ret}) _msg "The Arweave server has been terminated by the system (${signal})." fi _msg "It will restart in ${ARWEAVE_RESTART_DELAY} seconds." _msg "If you would like to avoid this, press control+c to kill the server." sleep "${ARWEAVE_RESTART_DELAY}" restart_counter=$((restart_counter+1)) done ================================================ FILE: bin/start-localnet ================================================ #!/usr/bin/env bash set -e SCRIPT_DIR="$(dirname "$0")" ARWEAVE=${SCRIPT_DIR}/arweave ${ARWEAVE}/arweave localnet $* # while true; do # echo Launching Erlang Virtual Machine... # if # # -run ar main: call ar:main() on launch # $ARWEAVE $ARWEAVE_COMMAND $ARWEAVE_OPTS -run ar main $RANDOMX_JIT "$@" # then # echo "Arweave Heartbeat: Server terminated safely." # exit 0 # else # echo "Arweave Heartbeat: The Arweave server has terminated. It will restart in 15 seconds." # echo "Arweave Heartbeat: If you would like to avoid this, press control+c to kill the server." # sleep 15 # fi # done ================================================ FILE: bin/stop ================================================ #!/usr/bin/env bash set -e SCRIPT_DIR=$(dirname ${0}) ARWEAVE=${SCRIPT_DIR}/arweave ${ARWEAVE} stop ${*} ================================================ FILE: bin/test ================================================ #!/usr/bin/env bash set -e SCRIPT_DIR=$(dirname ${0}) export ARWEAVE_DEV=1 ARWEAVE=${SCRIPT_DIR}/arweave ${ARWEAVE} test ${*} ================================================ FILE: config/sys.config ================================================ [ {arweave, []}, {kernel, [ {inet_dist_use_interface, {127, 0, 0, 1}}, {logger_level, all}, {logger, [{handler, default, logger_std_h, #{ level => warning, formatter => { logger_formatter, #{ legacy_header => false, single_line => true, chars_limit => 16256, max_size => 8128, depth => 256, template => [time," [",level,"] ",mfa,":",line," ",msg,"\n"] } } } }]} ]}, {sasl, [ {sasl_error_logger, false} ]}, {prometheus, [ {cowboy_instrumenter, [ {duration_buckets, [infinity]}, {request_labels, [http_method, route, reason, status_class, status]}, {error_labels, [http_method, route, reason, error]}, {labels_module, ar_prometheus_cowboy_labels} ]}, {vm_system_info_collector_metrics, []}, {vm_msacc_collector_metrics, []}, {vm_dist_collector_metrics, []} ]} ]. ================================================ FILE: config/vm.args ================================================ ###################################################################### ## Default vm arguments templates used by Arweave. ## ## Some useful links to configure emulator flags: ## https://www.erlang.org/doc/apps/erts/erl_cmd.html#emulator-flags ## ## Some useful links on Erlang's memory management: ## https://www.erlang-factory.com/static/upload/media/139454517145429lukaslarsson.pdf ## https://www.youtube.com/watch?v=nuCYL0X-8f4 ## ## Note for testing it's sometimes useful to limit the number of ## schedulers that will be used, to do that: +S 16:16 ###################################################################### ## Name of the node -name ${ARNODE:-arweave@127.0.0.1} ## Cookie for distributed erlang -setcookie ${ARCOOKIE:-arweave} ## This is now the default as of OTP-26 ## Multi-time warp mode in combination with time correction is the ## preferred configuration. ## It is only not the default in Erlang itself because it could break ## older systems. # +C multi_time_warp ## Uncomment the following line if running in a container. ## +sbwt none ## Increase number of concurrent ports/sockets ##-env ERL_MAX_PORTS 4096 ## Tweak GC to run more often ##-env ERL_FULLSWEEP_AFTER 10 ## +B [c | d | i] ## Option c makes Ctrl-C interrupt the current shell instead of ## invoking the emulator break ## handler. Option d (same as specifying +B without an extra option) ## disables the break handler. # Option i makes the emulator ignore any ## break signal. ## If option c is used with oldshell on Unix, Ctrl-C will restart the ## shell process rather than ## interrupt it. ## Disable the emulator break handler ## it easy to accidentally type ctrl-c when trying ## to reach for ctrl-d. ctrl-c on a live node can ## have very undesirable results +Bi ## Enables the kernel poll functionality. +Ktrue ## +A1024: emulator number of threads in the Async long thread pool for linked ## in drivers, mostly unused +A1024 ## +SDio1024: emulator Scheduler thread count for Dirty I/O, 200 ## threads for file access +SDio1024 ## +MBsbct 103424: binary_alloc singleblock carrier threshold (in KiB) ## (101MiB, default 512KiB). Blocks larger than the threshold are ## placed in singleblock carriers. However multi-block carriers are ## more efficient. Since we have so many 100MiB binary blocks due to ## the recall range, set the threshold so that they are all placed in ## multi-block carriers and not single-block carriers. +MBsbct 103424 ## +MBsmbcs 10240: binary_alloc smallest multi-block carrier size (in ## KiB) (10MiB, default 256KiB). +MBsmbcs 10240 ## MBlmbcs 410629: binary_alloc largest multi-block carrier size (in ## KiB) (~401MiB, default 5MiB). Set so that a single multi-block ## carrier can hold roughly 4 full recall ranges. +MBlmbcs 410629 ## +MBmmsbc 1024: binary_alloc maximum mseg_alloc singleblock carriers ## (1024 carriers, default 256). Once exhausted, the emulator will start ## using sys_alloc rather than mseg_alloc for singleblock carriers. ## This can be slower. +MBmmmbc 1024 ## +MBas aobf: emulator Memory Binary Allocation Strategy set to Address ## Order Best Fit. ## see: https://www.erlang.org/doc/man/erts_alloc.html#strategy +MBas aobf ## Sets scheduler busy wait threshold. Defaults to medium. The ## threshold determines how long schedulers are to busy wait when ## running out of work before going to sleep. +sbwt very_long ## Sets dirty scheduler busy wait threshold. +sbwtdcpu very_long ## Sets dirty IO scheduler busy wait threshold +sbwtdio very_long ## Sets scheduler wakeup threshold. +swt very_low ## Sets dirty scheduler wakeup threshold. +swtdcpu very_low ## Sets dirty IO scheduler wakeup threshold. +swtdio very_low ================================================ FILE: config/vm.args.dev ================================================ -name 'arweave@127.0.0.1' -setcookie arweave ================================================ FILE: config/vm.args.src ================================================ ###################################################################### ## Default vm arguments templates used by Arweave. ## ## Some useful links to configure emulator flags: ## https://www.erlang.org/doc/apps/erts/erl_cmd.html#emulator-flags ## ## Some useful links on Erlang's memory management: ## https://www.erlang-factory.com/static/upload/media/139454517145429lukaslarsson.pdf ## https://www.youtube.com/watch?v=nuCYL0X-8f4 ## ## Note for testing it's sometimes useful to limit the number of ## schedulers that will be used, to do that: +S 16:16 ###################################################################### ## Name of the node -name ${ARNODE:-arweave@127.0.0.1} ## Cookie for distributed erlang -setcookie ${ARCOOKIE:-arweave} ## This is now the default as of OTP-26 ## Multi-time warp mode in combination with time correction is the ## preferred configuration. ## It is only not the default in Erlang itself because it could break ## older systems. # +C multi_time_warp ## Uncomment the following line if running in a container. ## +sbwt none ## Increase number of concurrent ports/sockets ##-env ERL_MAX_PORTS 4096 ## Tweak GC to run more often ##-env ERL_FULLSWEEP_AFTER 10 ## +B [c | d | i] ## Option c makes Ctrl-C interrupt the current shell instead of ## invoking the emulator break ## handler. Option d (same as specifying +B without an extra option) ## disables the break handler. # Option i makes the emulator ignore any ## break signal. ## If option c is used with oldshell on Unix, Ctrl-C will restart the ## shell process rather than ## interrupt it. ## Disable the emulator break handler ## it easy to accidentally type ctrl-c when trying ## to reach for ctrl-d. ctrl-c on a live node can ## have very undesirable results +Bi ## Enables the kernel poll functionality. +Ktrue ## +A1024: emulator number of threads in the Async long thread pool for linked ## in drivers, mostly unused +A1024 ## +SDio1024: emulator Scheduler thread count for Dirty I/O, 200 ## threads for file access +SDio1024 ## +MBsbct 103424: binary_alloc singleblock carrier threshold (in KiB) ## (101MiB, default 512KiB). Blocks larger than the threshold are ## placed in singleblock carriers. However multi-block carriers are ## more efficient. Since we have so many 100MiB binary blocks due to ## the recall range, set the threshold so that they are all placed in ## multi-block carriers and not single-block carriers. +MBsbct 103424 ## +MBsmbcs 10240: binary_alloc smallest multi-block carrier size (in ## KiB) (10MiB, default 256KiB). +MBsmbcs 10240 ## MBlmbcs 410629: binary_alloc largest multi-block carrier size (in ## KiB) (~401MiB, default 5MiB). Set so that a single multi-block ## carrier can hold roughly 4 full recall ranges. +MBlmbcs 410629 ## +MBmmsbc 1024: binary_alloc maximum mseg_alloc singleblock carriers ## (1024 carriers, default 256). Once exhausted, the emulator will start ## using sys_alloc rather than mseg_alloc for singleblock carriers. ## This can be slower. +MBmmmbc 1024 ## +MBas aobf: emulator Memory Binary Allocation Strategy set to Address ## Order Best Fit. ## see: https://www.erlang.org/doc/man/erts_alloc.html#strategy +MBas aobf ## Sets scheduler busy wait threshold. Defaults to medium. The ## threshold determines how long schedulers are to busy wait when ## running out of work before going to sleep. +sbwt very_long ## Sets dirty scheduler busy wait threshold. +sbwtdcpu very_long ## Sets dirty IO scheduler busy wait threshold +sbwtdio very_long ## Sets scheduler wakeup threshold. +swt very_low ## Sets dirty scheduler wakeup threshold. +swtdcpu very_low ## Sets dirty IO scheduler wakeup threshold. +swtdio very_low ================================================ FILE: default.nix ================================================ (import ( let lock = with builtins; fromJSON (readFile ./flake.lock); in fetchTarball { url = "https://github.com/edolstra/flake-compat/archive/${lock.nodes.flake-compat.locked.rev}.tar.gz"; sha256 = lock.nodes.flake-compat.locked.narHash; } ) { src = ./.; }).defaultNix ================================================ FILE: deploy/Dockerfile.base.ubuntu20.04 ================================================ FROM ubuntu:20.04 # Set noninteractive installation ENV DEBIAN_FRONTEND=noninteractive # Add add-apt-repository application RUN apt-get update && apt-get install -y software-properties-common # Add rabbitmq erlang R26 repository RUN add-apt-repository -y ppa:rabbitmq/rabbitmq-erlang-26 # Install the necessary software to add a new repository over HTTPS RUN apt-get update && apt-get install -y \ apt-transport-https \ ca-certificates \ curl \ gnupg \ lsb-release \ wget # Install missing dependencies RUN apt-get install -y \ libncurses5 \ libwxbase3.0-0v5 \ libwxgtk3.0-gtk3-0v5 \ libsctp1 ================================================ FILE: deploy/Dockerfile.base.ubuntu22.04 ================================================ FROM ubuntu:22.04 # Set noninteractive installation ENV DEBIAN_FRONTEND=noninteractive # Add add-apt-repository application RUN apt-get update && apt-get install -y software-properties-common # Add rabbitmq erlang R26 repository RUN add-apt-repository -y ppa:rabbitmq/rabbitmq-erlang-26 # Install other dependencies RUN apt-get update && apt-get install -y erlang ================================================ FILE: deploy/Dockerfile.rocky ================================================ # Set the base image using a build argument FROM rockylinux:9 # Update the system and install necessary tools RUN dnf update -y && \ dnf install -y wget \ gcc \ gcc-c++ \ glibc-devel \ make \ ncurses-devel \ openssl-devel \ autoconf \ java-1.8.0-openjdk-devel \ m4 # Download and extract Erlang/OTP source WORKDIR /tmp RUN wget https://github.com/erlang/otp/releases/download/OTP-26.2.5.12/otp_src_26.2.5.12.tar.gz RUN tar zxf otp_src_26.2.5.12.tar.gz # Build and install Erlang/OTP WORKDIR /tmp/otp_src_26.2.5.12 RUN ./configure --prefix=/usr/local && \ make && \ make install # Clean up WORKDIR / RUN rm -rf /tmp/otp_src_26.2.5.12 /tmp/otp_src_26.2.5.12.tar.gz # Install other dependencies RUN dnf install -y \ gmp-devel \ cmake \ git # Set the working directory WORKDIR /app # Define the output directory as a volume VOLUME /output # The build steps are executed every time CMD set -x && \ git clone --recursive https://github.com/ArweaveTeam/arweave.git && \ cd arweave && \ git fetch --all && \ git pull --force && \ git checkout --force $GIT_TAG && \ git submodule update && \ ./rebar3 as prod tar && \ cp _build/prod/rel/arweave/arweave-*.tar.gz /output/arweave.tar.gz ================================================ FILE: deploy/Dockerfile.ubuntu ================================================ # Set the base image using a build argument ARG BASE_IMAGE FROM ${BASE_IMAGE} # Install other dependencies RUN apt-get install -y \ libssl-dev \ libgmp-dev \ libsqlite3-dev \ make \ cmake \ gcc \ g++ \ git # Set the working directory WORKDIR /app # Define the output directory as a volume VOLUME /output # The build steps are executed every time CMD set -x && \ git clone --recursive https://github.com/ArweaveTeam/arweave.git && \ cd arweave && \ git fetch --all && \ git pull --force && \ git checkout --force $GIT_TAG && \ git submodule update && \ ./rebar3 as prod tar && \ cp _build/prod/rel/arweave/arweave-*.tar.gz /output/arweave.tar.gz ================================================ FILE: deploy/Makefile ================================================ ###################################################################### # Arweave Release GNU Makefile for MacOS # # This Makefile was created to build release on Darwin/MacOS system # using homebrew package manage. Every build is created using a # fresh version of the arweave git repository isolated from other # build. # # To install dependencies and create all releases based on Erlang # defined in ERLANG_VERSIONS variables: # # make all # # To install only one release using one erlang version: # # make build-release ERLANG_VERSION=24 # ###################################################################### ARWEAVE_GIT_TAG ?= master ARWEAVE_REPOSITORY ?= https://github.com/ArweaveTeam/arweave.git ERLANG_VERSIONS ?= 24 26 ERLANG_VERSION ?= 24 BUILDDIR ?= ./build RELEASEDIR ?= ./release SYSTEM_NAME = $(shell uname -o) SYSTEM_ARCH = $(shell uname -m) ARWEAVE_RELEASE_NAME ?= $(ARWEAVE_GIT_TAG)-$(SYSTEM_NAME)-$(SYSTEM_ARCH) HOMEBREW_PATH = /opt/homebrew HOMEBREW_COMMAND ?= brew ###################################################################### # default entry-point target ###################################################################### PHONY += help help: @echo "Usage: make [help|install-deps|all|build-release|clean|clean-all]" @echo " help: print help message" @echo " install-deps: install dependencies with homebrew" @echo " all: create all release" @echo " build-release: create release using default erlang version" @echo " clean: remove built artifacts" @echo " clean-all: remove built artifacts and releases" @echo "Variables:" @echo " ARWEAVE_GIT_TAG=$(ARWEAVE_GIT_TAG)" @echo " ARWEAVE_RELEASE_NAME=$(ARWEAVE_RELEASE_NAME)" @echo " ARWEAVE_REPOSITORY=$(ARWEAVE_REPOSITORY)" @echo " BUILDDIR=$(BUILDDIR)" @echo " ERLANG_VERSION=$(ERLANG_VERSION)" @echo " ERLANG_VERSIONS=$(ERLANG_VERSIONS)" @echo " RELEASEDIR=$(RELEASEDIR)" ifneq ($(SYSTEM_NAME), Darwin) @echo "WARNING: this Makefile is not compatible with this system: $(SYSTEM_NAME)" endif ###################################################################### # template to install/cleanup erlang using brew ###################################################################### define template_erlang DEPS_$(1) += $(HOMEBREW_PATH)/opt/erlang@$(1) HOMEBREW_ERLANG_DEPS += $(HOMEBREW_PATH)/opt/erlang@$(1) $(HOMEBREW_PATH)/opt/erlang@$(1): $(HOMEBREW_COMMAND) install erlang@$(1) HOMEBREW_CLEAN += clean-erlang-$(1) PHONY += clean-erlang-$(1) clean-erlang-$(1): -$(HOMEBREW_COMMAND) uninstall erlang@$(1) endef ###################################################################### # template to install/release arweave ###################################################################### define template_builder $$(BUILDDIR)/arweave-$(1): $$(BUILDDIR) git clone --recursive $$(ARWEAVE_REPOSITORY) $$(BUILDDIR)/arweave-$(1) RELEASE_$(1) += $$(RELEASEDIR)/$$(ARWEAVE_RELEASE_NAME)-R$(1).tar.gz RELEASES += $$(RELEASEDIR)/$$(ARWEAVE_RELEASE_NAME)-R$(1).tar.gz ALL += $$(RELEASES) $$(RELEASEDIR)/$$(ARWEAVE_RELEASE_NAME)-R$(1).tar.gz: $$(RELEASEDIR) $$(BUILDDIR)/arweave-$(1) git -C $$(BUILDDIR)/arweave-$(1) fetch --all git -C $$(BUILDDIR)/arweave-$(1) pull --force git -C $$(BUILDDIR)/arweave-$(1) checkout --force $$(ARWEAVE_GIT_TAG) git -C $$(BUILDDIR)/arweave-$(1) submodule update cd $$(BUILDDIR)/arweave-$(1) \ && export PATH="/opt/homebrew/opt/erlang@$(1)/bin:/opt/homebrew/bin:$${PATH}" \ && ./rebar3 as prod tar cp $$(BUILDDIR)/arweave-$(1)/_build/prod/rel/arweave/arweave-*.tar.gz $$@ CHECKSUM_$(1) += $$(RELEASEDIR)/$$(ARWEAVE_RELEASE_NAME)-R$(1).tar.gz.sha256 CHECKSUMS += $$(RELEASEDIR)/$$(ARWEAVE_RELEASE_NAME)-R$(1).tar.gz.sha256 ALL += $$(CHECKSUMS) $$(RELEASEDIR)/$$(ARWEAVE_RELEASE_NAME)-R$(1).tar.gz.sha256: $$(RELEASEDIR)/$$(ARWEAVE_RELEASE_NAME)-R$(1).tar.gz sha256sum $$(RELEASEDIR)/$$(ARWEAVE_RELEASE_NAME)-R$(1).tar.gz \ > $$@ ARWEAVE_CLEAN += clean-arweave-$(1) PHONY += clean-arweave-$(1) clean-arweave-$(1): -rm -rf $$(BUILDDIR)/arweave-$(1) ARWEAVE_CHECKSUMS_CLEAN += clean-arweave-checksum-$(1) PHONY += clean-arweave-checksum-$(1) clean-arweave-checksum-$(1): -rm $$(RELEASEDIR)/$$(ARWEAVE_RELEASE_NAME)-R$(1).tar.gz.sha256 endef ###################################################################### # main directories ###################################################################### $(BUILDDIR): mkdir -p $@ $(RELEASEDIR): mkdir -p $@ ###################################################################### # homebrew deps targets ###################################################################### $(foreach v, $(ERLANG_VERSIONS), $(eval $(call template_erlang,$(v)))) # gmp dep HOMEBREW_DEPS += $(HOMEBREW_PATH)/Cellar/gmp $(HOMEBREW_PATH)/Cellar/gmp: $(HOMEBREW_COMMAND) install gmp HOMEBREW_CLEAN += clean-homebrew-gmp PHONY += clean-homebrew-gmp clean-homebrew-gmp: -$(HOMEBREW_COMMAND) uninstall gmp # pkg-config dep HOMEBREW_DEPS += $(HOMEBREW_PATH)/Cellar/pkgconf $(HOMEBREW_PATH)/Cellar/pkgconf: $(HOMEBREW_COMMAND) install pkg-config HOMEBREW_CLEAN += clean-homebrew-pkg-config PHONY += clean-homebrew-pkg-config clean-homebrew-pkg-config: -$(HOMEBREW_COMMAND) uninstall pkg-config # cmake dep HOMEBREW_DEPS += $(HOMEBREW_PATH)/Cellar/cmake $(HOMEBREW_PATH)/Cellar/cmake: $(HOMEBREW_COMMAND) install cmake HOMEBREW_CLEAN += clean-homebrew-cmake PHONY += clean-homebrew-cmake clean-homebrew-cmake: -$(HOMEBREW_COMMAND) uninstall cmake ###################################################################### # arweave targets ###################################################################### $(foreach v, $(ERLANG_VERSIONS), $(eval $(call template_builder,$(v)))) ###################################################################### # main targets. ###################################################################### ifneq ($(SYSTEM_NAME), Darwin) all: @echo "This Makefile was created for MacOS/Darwin system only" @exit 1 else PHONY += all all: install-deps $(ALL) PHONY += build-release build-release: $(HOMEBREW_DEPS) \ $(DEPS_$(ERLANG_VERSION)) \ $(RELEASE_$(ERLANG_VERSION)) \ $(CHECKSUM_$(ERLANG_VERSION)) PHONY += build-checksum build-checksum: $(CHECKSUMS) PHONY += install-deps install-deps: deps-update $(HOMEBREW_DEPS) $(HOMEBREW_ERLANG_DEPS) PHONY += deps-update deps-update: $(HOMEBREW_COMMAND) update PHONY += clean-deps clean-deps: $(HOMEBREW_CLEAN) PHONY += clean clean: clean-deps $(ARWEAVE_CLEAN) PHONY += clean-all clean-all: clean $(ARWEAVE_CHECKSUMS_CLEAN) -rm $(RELEASES) .PHONY: $(PHONY) endif ================================================ FILE: deploy/build.sh ================================================ #!/bin/bash ECHO_ONLY=0 PRE_RELEASE=0 BRANCH="" LINUX_VERSION="" # Parse flags while getopts 'eb:l:' flag; do case "${flag}" in e) ECHO_ONLY=1 ;; b) PRE_RELEASE=1 BRANCH="${OPTARG}" ;; l) LINUX_VERSION="${OPTARG}" ;; *) echo "Usage: $0 [-e] [-b ] [-l ] version" exit 1 ;; esac done shift $((OPTIND-1)) # Check if version is supplied if [ "$#" -ne 1 ]; then echo "Usage: $0 [-e] [-b ] [-l ] version" exit 1 fi VERSION=$1 if [ $PRE_RELEASE -eq 1 ]; then GIT_TAG="$BRANCH" else GIT_TAG="N.$VERSION" fi BASE_IMAGES=( "arweave-base:20.04" \ "arweave-base:22.04" \ "" ) LINUX_VERSIONS=("ubuntu20" "ubuntu22" "rocky9") BASE_DOCKERFILES=( "Dockerfile.base.ubuntu20.04" \ "Dockerfile.base.ubuntu22.04" \ "" ) # If specific Linux version is supplied, filter the arrays if [ ! -z "$LINUX_VERSION" ]; then for i in "${!LINUX_VERSIONS[@]}"; do if [ "${LINUX_VERSIONS[$i]}" = "$LINUX_VERSION" ]; then BASE_IMAGES=("${BASE_IMAGES[$i]}") LINUX_VERSIONS=("${LINUX_VERSIONS[$i]}") BASE_DOCKERFILES=("${BASE_DOCKERFILES[$i]}") break fi done fi # Function to execute a command, optionally just echoing it function run_cmd { if [ $ECHO_ONLY -eq 1 ]; then echo $1 else eval $1 fi } # Build base images first for i in "${!BASE_DOCKERFILES[@]}"; do BASE_DOCKERFILE=${BASE_DOCKERFILES[$i]} BASE_IMAGE=${BASE_IMAGES[$i]} if [ ! -z "$BASE_DOCKERFILE" ] && [ ! -z "$BASE_IMAGE" ]; then echo "Building base image $BASE_IMAGE..." # Build the base Docker image run_cmd "docker build -f $BASE_DOCKERFILE -t $BASE_IMAGE ." fi done for i in "${!LINUX_VERSIONS[@]}"; do LINUX_VERSION=${LINUX_VERSIONS[$i]} IMAGE_NAME="arweave:$VERSION-$LINUX_VERSION" OUTPUT_FILE="./output/arweave-$VERSION.$LINUX_VERSION-x86_64.tar.gz" DOCKERFILE="Dockerfile.ubuntu" if [ "$LINUX_VERSION" == "rocky9" ]; then DOCKERFILE="Dockerfile.rocky" fi echo "Building $IMAGE_NAME..." if [ ! -z "${BASE_IMAGES[$i]}" ]; then # Build the Docker image run_cmd "docker build -f $DOCKERFILE --build-arg BASE_IMAGE=${BASE_IMAGES[$i]} -t $IMAGE_NAME ." else run_cmd "docker build -f $DOCKERFILE -t $IMAGE_NAME ." fi echo "Running $IMAGE_NAME..." # Run the Docker container run_cmd "docker run --rm -e GIT_TAG=$GIT_TAG -v $(pwd)/output:/output $IMAGE_NAME" echo "Renaming output file..." # Rename the output file run_cmd "mv './output/arweave.tar.gz' '$OUTPUT_FILE'" done if [ $PRE_RELEASE -eq 0 ]; then run_cmd "cp './output/arweave-$VERSION.ubuntu22-x86_64.tar.gz' './output/arweave-$VERSION.linux-x86_64.tar.gz'" fi ================================================ FILE: deploy/create_storage_modules.sh ================================================ #!/bin/bash # Script to create storage module directories and symlinks # Usage: ./create_storage_modules.sh [-e] [-u user] set -e # Parse arguments DRY_RUN=false CHOWN_USER="" while [[ $# -gt 0 ]]; do case $1 in -e) DRY_RUN=true shift ;; -u) CHOWN_USER="$2" shift 2 ;; *) break ;; esac done if [ $# -ne 4 ]; then echo "Usage: $0 [-e] [-u user] " echo " -e Dry run mode - only echo what would be done, don't create anything" echo " -u user Set ownership of created directories and symlinks to specified user" echo "Example: $0 /mnt/vol02 1 10 1seRanklLU_1VTGkEk7P0xAwMwGkD8aYi1" echo "Example: $0 -e /mnt/vol02 1 10 1seRanklLU_1VTGkEk7P0xAwMwGkD8aYi1" echo "Example: $0 -u arweave /mnt/vol02 1 10 1seRanklLU_1VTGkEk7P0xAwMwGkD8aYi1" echo "Example: $0 -e -u arweave /mnt/vol02 1 10 1seRanklLU_1VTGkEk7P0xAwMwGkD8aYi1" exit 1 fi DIRECTORY_ROOT="$1" START_PARTITION="$2" END_PARTITION="$3" MINING_ADDRESS="$4" # Validate arguments if ! [[ "$START_PARTITION" =~ ^[0-9]+$ ]] || ! [[ "$END_PARTITION" =~ ^[0-9]+$ ]]; then echo "Error: Start and end partition must be numbers" exit 1 fi if [ "$START_PARTITION" -gt "$END_PARTITION" ]; then echo "Error: Start partition must be <= end partition" exit 1 fi # Validate user exists if specified if [ -n "$CHOWN_USER" ]; then if ! id "$CHOWN_USER" >/dev/null 2>&1; then echo "Error: User '$CHOWN_USER' does not exist on this system" exit 1 fi fi # Function to execute commands with dry run support execute_command() { local description="$1" local command="$2" echo "$description" if [ "$DRY_RUN" = false ]; then if ! eval "$command"; then echo "Error: Command failed: $command" >&2 exit 1 fi fi } # Function to count storage modules in a directory count_storage_modules() { local dir="$1" local mining_addr="$2" if [ ! -d "$dir" ]; then echo -1 # Return -1 to indicate directory doesn't exist return fi # Count directories matching storage_module_*_MINING_ADDRESS pattern count=$(find "$dir" -maxdepth 1 -type d -name "storage_module_*_${mining_addr}.replica.2.9" 2>/dev/null | wc -l) echo "$count" } # Function to find the first volume directory with < 4 storage modules find_available_volume() { local root="$1" local mining_addr="$2" local volume_num=1 while true; do local volume_dir="${root}-$(printf "%02d" $volume_num)" local count=$(count_storage_modules "$volume_dir" "$mining_addr") # Skip if directory doesn't exist (count == -1) if [ "$count" -ne -1 ] && [ "$count" -lt 4 ]; then echo "$volume_dir" return fi volume_num=$((volume_num + 1)) # Safety check to prevent infinite loop if [ $volume_num -gt 100 ]; then echo "Error: Could not find available volume directory after checking 100 volumes" >&2 exit 1 fi done } # Function to create storage module directory create_storage_module() { local volume_dir="$1" local partition_num="$2" local mining_addr="$3" local storage_dir="${volume_dir}/storage_module_$(printf "%02d" $partition_num)_${mining_addr}.replica.2.9" # Check if volume directory exists if [ ! -d "$volume_dir" ]; then echo "Error: Volume directory $volume_dir does not exist" >&2 return 1 fi # Create storage module directory execute_command "Creating directory: $storage_dir" "mkdir -p '$storage_dir'" # Set ownership if user specified if [ -n "$CHOWN_USER" ]; then execute_command "Setting ownership: $storage_dir -> $CHOWN_USER" "chown '$CHOWN_USER:$CHOWN_USER' '$storage_dir'" fi # Return the path via a global variable to avoid output mixing CREATED_STORAGE_DIR="$storage_dir" } # Function to create symlink in current directory create_symlink() { local target_dir="$1" local partition_num="$2" local mining_addr="$3" local link_name="storage_module_$(printf "%02d" $partition_num)_${mining_addr}.replica.2.9" execute_command "Creating symlink: $link_name -> $target_dir" "ln -sf '$target_dir' '$link_name'" # Set ownership of symlink if user specified if [ -n "$CHOWN_USER" ]; then execute_command "Setting symlink ownership: $link_name -> $CHOWN_USER" "chown -h '$CHOWN_USER:$CHOWN_USER' '$link_name'" fi } # Main logic if [ "$DRY_RUN" = true ]; then echo "=== DRY RUN MODE - No files will be created ===" fi echo "Creating storage modules from partition $START_PARTITION to $END_PARTITION" echo "Directory root: $DIRECTORY_ROOT" echo "Mining address: $MINING_ADDRESS" if [ -n "$CHOWN_USER" ]; then echo "Owner: $CHOWN_USER" fi echo current_volume="" modules_in_current_volume=0 CREATED_STORAGE_DIR="" for partition in $(seq $START_PARTITION $END_PARTITION); do # Find available volume if we don't have one or current is full if [ -z "$current_volume" ] || [ $modules_in_current_volume -ge 4 ]; then current_volume=$(find_available_volume "$DIRECTORY_ROOT" "$MINING_ADDRESS") modules_in_current_volume=$(count_storage_modules "$current_volume" "$MINING_ADDRESS") echo "Using volume directory: $current_volume (current modules: $modules_in_current_volume)" fi # Create storage module directory if create_storage_module "$current_volume" $partition "$MINING_ADDRESS"; then # Create symlink if storage module was created successfully create_symlink "$CREATED_STORAGE_DIR" $partition "$MINING_ADDRESS" modules_in_current_volume=$((modules_in_current_volume + 1)) else # If creation failed (volume doesn't exist), reset current volume and retry echo "Retrying with next available volume..." current_volume="" modules_in_current_volume=0 # Retry this partition partition=$((partition - 1)) fi done echo echo "Storage module creation complete!" ================================================ FILE: doc/ar-ipfs-howto.md ================================================ # How to set up and run Arweave+IPFS nodes ## ipfs Download from https://dist.ipfs.io/#go-ipfs From their website: > After downloading, untar the archive, and move the ipfs binary somewhere in your executables $PATH using the install.sh script: > ``` $ tar xvfz go-ipfs.tar.gz $ cd go-ipfs $ ./install.sh ``` The install.sh wants to install the ipfs binary to /usr/local/bin. Rather than run with sudo, I edit the script to change binpaths to, e.g., "/home/ivan/bin" (full path seems to be required). Set up the local ipfs node with ``` $ ipfs init ``` The ipfs node needs to be running as a daemon before app_ipfs is started. Start it in a separate terminal (screen/tmux/etc) session with ``` $ ipfs daemon ``` ## arweave-server When running `arweave-server` with the argument `ipfs_pin`, the server listens for incoming TXs with data and an `{"IPFS_Add", Hash}` tag, and `ipfs add`s the data to the local ipfs node. ### ipfs_pin #### in erlang shell ```erlang $ arweave-server peer ... 1> app_ipfs:start_pinning(). ok ``` #### with commandline argument ``` $ arweave-server peer ... ipfs_pin ``` ### monitoring Here are some functions for basic monitoring: At any time, state of the app_ipfs server can be accessed via either of: ``` > app_ipfs:report(app_ipfs). > app_ipfs:report(IPFSPid). [{adt_pid,<0.208.0>}, % the simple_adt server, for listening. {queue,<0.203.0>}, % the app_queue pid, for sending TXs. {wallet,{{<<123,45,67,...}, % used with app_queue to finance sending TXs. {ipfs_name,"my_ipfs_node"}, % used to generate the ipfs key and PeerID. {ipfs_key,<<"QmXYZ...8jk">>}, % identity of the local ipfs node. {blocks,0,[]}, % these last three ... {txs,0,[]}, % ... only used ... {ipfs_hashes,0,[]}] % ... in testing. ``` The status of an IPFS hash can be checked: ``` > app_ipfs:ipfs_hash_status(Hash). [{pinned, true | false}, % whether the hash is pinned by the local ipfs node {tx, list() }]. % IDs of TXs containing the ipfs hash & data (generally only one TX) ``` ================================================ FILE: doc/gateway_setup_guide.md ================================================ # Arweave Gateway Setup Guide ### Certificate files Assuming the gateway will run under the domain name `gateway.example`, you will need to acquire a certificate valid for both `gateway.example` and the wildcard `*.gateway.example`. This certificate's files should be installed at the following location: - `apps/arweave/priv/tls/cert.pem` for the certificate file - `apps/arweave/priv/tls/key.pem` for this certificate's key file In order to allow the gateway to serve transactions under custom domain names, additional files need to be installed. For example, for a given domain name `custom.domain.example`, a certificate for that domain should be acquired and its files installed at the following location: - `apps/arweave/priv/tls/custom.domain.example/cert.pem` for the certificate file - `apps/arweave/priv/tls/custom.domain.example/key.pem` for this certificate's key file ### Custom domain DNS records In order to point a custom domain name to a specific transaction a special DNS record needs to be created in its DNS zone. For example, for a given custom domain name `custom.domain.example` and a given target transaction ID `1H0jHTlM6bYFdnrwZ4yMx92EgJITDRakse2YP_sDkBc`, a TXT record should be created with the name `_arweave.custom.domain.example` and the transaction ID as its value (`1H0jHTlM6bYFdnrwZ4yMx92EgJITDRakse2YP_sDkBc`). ### Startup To run a node in gateway node, use the `gateway` command line flag or the `"gateway"` configuration field and specify which domain name should be this gateway's main domain name. For example, with `gateway.example` as the gateway's main domain name: Command line flag: ``` ./arweave-server gateway gateway.example ``` Configuration field: ```jsonc { // ... "gateway": "gateway.example" } ``` To allow a transaction to be served from custom domains, use the command line flag `custom_domain` or the `"custom_domains"` configuration field and specify the custom domain name to serve from **in addition** to the `gateway` flag. For example, given the custom domain names `custom1.domain.example` and `custom2.domain.example`: Command line flag: ``` ./arweave-server gateway gateway.example custom_domain custom1.domain.example custom_domain custom2.domain.example ``` Configuration field: ```jsonc { // ... "gateway": "gateway.example" "custom_domains": [ "custom1.domain.example", "custom2.domain.example" ] } ``` ================================================ FILE: doc/path-manifest-schema.md ================================================ ## Schema Path manifests are JSON objects with the following keys. | Field | Mandatory? | Type | Description | | ---------------- | ---------- | ------ | ----------- | | `manifest` | ✓ | string | The manifest type identifier, this MUST be `arweave/paths`. | | `version` | ✓ | string | The manifest specification version, currently "0.1.0". This will be updated with future updates according to [semver](https://semver.org). | | `index` | | object | The behavior gateways SHOULD follow when the manifest is accessed directly. When defined, `index` MUST contain a member describing the behavior to adopt. Currently, the only supported behavior is `path`. `index` MAY be be omitted, in which case gateways SHOULD serve a listing of all paths. | | `index.path` | | string | The default path to load. If defined, the field MUST reference a key in the `paths` object (it MUST NOT reference a transaction ID directly). | | `paths` | ✓ | object | The path mapping between subpaths and the content they resolve to. The object keys represent the subpaths, and the values tell us which content to resolve to. | | `paths[path].id` | ✓ | string | The transaction ID to resolve to for the given path. | A path manifest transaction MUST NOT contain any data other than this JSON object. The `Content-Type` tag for manifest files MUST be `application/x.arweave-manifest+json`, users MAY add other arbitrary user defined tags. **Example manifest** ```json { "manifest": "arweave/paths", "version": "0.1.0", "index": { "path": "index.html" }, "paths": { "index.html": { "id": "cG7Hdi_iTQPoEYgQJFqJ8NMpN4KoZ-vH_j7pG4iP7NI" }, "js/style.css": { "id": "fZ4d7bkCAUiXSfo3zFsPiQvpLVKVtXUKB6kiLNt2XVQ" }, "css/style.css": { "id": "fZ4d7bkCAUiXSfo3zFsPiQvpLVKVtXUKB6kiLNt2XVQ" }, "css/mobile.css": { "id": "fZ4d7bkCAUiXSfo3zFsPiQvpLVKVtXUKB6kiLNt2XVQ" }, "assets/img/logo.png": { "id": "QYWh-QsozsYu2wor0ZygI5Zoa_fRYFc8_X1RkYmw_fU" }, "assets/img/icon.png": { "id": "0543SMRGYuGKTaqLzmpOyK4AxAB96Fra2guHzYxjRGo" } } } ``` ================================================ FILE: doc/transaction_blacklists.md ================================================ # Arweave Transaction Blacklists To support the freedom of individual participants in the network to control what content they store, and to allow the network as a whole to democratically reject content that is widely reviled, the Arweave software provides a blacklisting system. Each node maintains an (optional) blacklist containing the identifiers of transactions with data it doesn't wish to store. These blacklists can be built by individuals or collaboratively, or can be imported from other sources. ## Blacklist Sources ### Local Files Specify one or more files containing transaction identifiers in the command line using the `transaction_blacklist` argument or in a config file via the `transaction_blacklists` field. ``` ./bin/start transaction_blacklist my_tx_blacklist.txt transaction_blacklist my_other_tx_blacklist.txt ... ``` Inside a file, every line is a Base64 encoded transaction identifier. For example: ``` K76dxpFF7MJXa3SPG8XnrgXxf05eAz7jz2Vue1Bdw1M cPm9Et8pNCh1Boo1aJ7eLGxywhI06O7DQm84V1orBsw xiQYsaUMtlIq9DvTyucB4gu0BFC-qnFRIDclLv8wUT8 ``` ### HTTP Endpoints Specify one more HTTP endpoints in the command line, using the `transaction_blacklist_url` argument or in a config file via the `transaction_blacklist_urls` field. ``` ./bin/start transaction_blacklist_url http://blacklist.org/blacklist ``` A GET request to a given endpoint has to return a list of transaction identifiers in the same format the blacklist files use. ## Update Content Policy On The Fly If blacklisted transactions are removed from the provided files or stop being served by the specified endpoints, they are automatically un-blacklisted. Added transactions are picked up automatically too. However, the changes may not take effect immediately as it takes time until the node refreshes the list and applies the changes. If you wish to add more files or remote endpoints, restart the miner with the additional command argument(s) or config parameter(s) specifying the file(s). If you restart the node without any of the previously specified files or endpoints, the unique transactions fetched from them will be un-blacklisted. ## Whitelisting Transactions If you want to whitelist particular transactions, put them into one or more files in the same format used in blacklist files and specify them on startup via the `transaction_whitelist` command argument or via the `transaction_whitelists` field in the configuration file. Also, the node can fetch whitelists from remote endpoints specified via `transaction_whitelist_url` command arguments or `transaction_whitelist_urls` config field. If a transaction is both in blacklist and whitelist, it is whitelisted. If you restart the node without specifying any whitelists, the previously whitelisted transactions can be blacklisted. ## Clean Up Old Data Data already stored at the time a new transaction is blacklisted is removed automatically. ================================================ FILE: erlang_ls.config ================================================ apps_dirs: - "apps/*" - "_build/default/lib/*" deps_dirs: - "_build/default/lib/*" diagnostics: enabled: - crossref include_dirs: - "apps" - "apps/*/include" - "_build/default/lib" - "_build/default/lib/*/include" providers: enabled: - signature-help ================================================ FILE: flake.nix ================================================ { description = "The Arweave server and App Developer Toolkit."; inputs = { nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable"; utils.url = "github:numtide/flake-utils"; flake-compat = { url = "github:edolstra/flake-compat"; flake = false; }; }; outputs = { self, nixpkgs, utils, ... }: utils.lib.eachDefaultSystem (system: let pkgs = import nixpkgs { inherit system; }; arweave = pkgs.callPackage ./nix/arweave.nix { inherit pkgs; }; in { packages = utils.lib.flattenTree { inherit arweave; }; nixosModules.arweave = { imports = [ ./nix/module.nix ]; nixpkgs.overlays = [ (prev: final: { inherit arweave; }) ]; }; defaultPackage = self.packages."${system}".arweave; devShells = { # for arweave development, made to work with rebar3 builds (not nix) default = with pkgs; mkShellNoCC { name = "arweave-dev"; buildInputs = [ bashInteractive cmake elvis-erlang erlang erlang-ls gmp openssl pkg-config rebar3 rsync ]; PKG_CONFIG_PATH = "${openssl.dev}/lib/pkgconfig"; shellHook = '' ${pkgs.fish}/bin/fish --interactive -C \ '${pkgs.any-nix-shell}/bin/any-nix-shell fish --info-right | source' exit $? ''; }; }; }); } ================================================ FILE: genesis_data/genesis_txs/-M5_EBM4MayX8ZpuLFoANHO00c4pdrSmAQbPYv7fq4U.json ================================================ {"id":"-M5_EBM4MayX8ZpuLFoANHO00c4pdrSmAQbPYv7fq4U","last_tx":"","owner":"1ZukhdLqipW_i4TncK8A3S2gMER5ySKwzAtkfACYKiRIh72VgstVYW-h-JF96NlC_ZDrOu-XJvACPIxjVdZr3KPH5RM3EHTk6LCXHsTqOoVkswybbEzZ6gbCSDJjkGvVscpqJsAcVcNqiPjX9qTtFTXzncFUqlYjzBcZQDN5kI8c3Pokg1ToklYEFeH7BnbacXxyyy6n7uTGtzCxnB4gPL_Gn88O6QSLLYfUNT7oH3m-jrU3RB8vZGDMhmdx46a4XeG64BRHSh_ff1KyvYsnB-nDVcq6VqYKrRt4m8S_No5-r8Wy_Mr5Y_092XDnMlu82yMtS2avuAaNF8kDCmPjyWuP-QarherI90wSDfYOjw_-zlhG3clz151bDvYQFYP1o0O5ARC3poDhsEDdJYbikEwJqkuuOOrZTbFaFAAPW8LQlN14vp5DNlKOt5qTKqPMtBjBeSWERrhY0JxGpCmrT1p7Rv25VBL69_RAl6zItJzWxEYNwpP6cavMhhoPCHoU4t6xnyLGPD3wNnNoKJ5UQxaTqxZ_T72vSp6YC_2c1uSw6x_vMForWyKb9R27vj72fgOuswp0sSlVssuJrvE1dS5nflhAnk9-QdTX9aURu8W3CtiZo4acj0n1q_njjtV34w3pAqfUWdzGj-Hyv5vpj9nxu9GG7hxuATMfXWpw2Pc","tags":[],"target":"","quantity":"0","data":"Z3V5cyE","reward":"0","signature":"J0w6nl36VttDwzxr6m9KceZxo5VSnRuj9VoDQ3wZlMxIwk4aTKiuJ8Ouac7okAXecmAtsxRtbEE7XTvhv66Yldc3apo5Wc1blRZJBoN3_kxfUETzaIsyrf-AWmhUL8OuM4euxJ0UEE86P-7ONRAHNtqVpXgJbEA8oTo8lefTFrm_TxxVHtqbkW-VNEGWKdMqoSFDfa8GRujyKfcfQEZdvAoo5FacrYpqrxji3O4rLN8I6DiIr8suxWiTV2lxR6zFVhJiUA7upeg1EBBjJmuMxNHiW6RZ2mqeeE_2tawrjhITLtl6jASv7e2UWCp2Vpxovh8ICDRmynSDZyO8jV6n5k28GkhRH68-UNEZbICPkCe6EcU1Bk4_z5UduRexaCoi6xPowmd5-N4bSPNwLK_isCQ23b4YDuwZAJoNqiMAh82rXYjQ4n4VkOFV90IQGfz_GuIr5coSipqtM6WZADhGFs0-IpZ8byeJdz-erN-eE_rxDZqnHLfEjudKTFdWJwsB00VVYWo1D8o1GUNiHtnZ9GqCyW5ETzEgr8A_f6coJt06PJBTU6FzLHHlH0gsUeprcXk8BkOcn5XZMqxURdtUCl86E-5NucD2CPTWFwrKUuYCKl57hRQKU1z27c8lBodDw0i6deClB_ak6MGTjXcBC1CxWvCEJEthPUY1i7O1cNI"} ================================================ FILE: genesis_data/genesis_txs/-wzIQJ19Hq8Zyf1L85Ga3uGTrdWA2W-UNyr8aH4a4iE.json ================================================ {"id":"-wzIQJ19Hq8Zyf1L85Ga3uGTrdWA2W-UNyr8aH4a4iE","last_tx":"","owner":"rpv3hShMP8ZGcEYs-cZZ3AgCLGjNp9MBUl5i1NonHaBfwWWm65wkns4qpUL0rLuA8JTBepnrE_YVhCpdV8NGHJHBsUwOlM9EQg5-NaSgq_GR6kvpTE4vdH-yqBDEzdHL0tDV5OjTEAVrSw9HuIUOqknAnxgcUIxYwTZw0K0Zdluq70Fvcsk77EryMB5AUj8S0POYVJ3s0CUp_4jsalp_x8S4DrV2sM4v-1GtWA7apzNI0X30JlwRJsyJHN7Sgi3HfrIT2izXCQyezT9v_DvzJ0B0sRuwywxxE2jmdY86ilHaQ7txP2tRdJKdF4afjkT1PZuxXkZ2e9Rea7h8QzfKEKUNprHLuTbvagNb5iiRRYy0OXoo62UKqCd3JYxvG7xWb4t4ySloxGm6HLavHtp_T6mCVrJA2e7LPvb05LT0s6U63tTZOHEAnHvfrS1gH9bVybaExWMTzsLQTbwriMaLcS0Hdao-LPs3Ut6--L50XzDcyeeITjFH8vE3RSxkUdHLFJsZFgr66QMPa3_LDCB9A5OPVr9eVSvX0eFPoFgj_TOssUyjj50LMYj333Kgd_KS2dA6jgt3Bl7H0b5iUt_WfjAgVRGHiwdFz2zzisz4oA6d6XQKKFFvahTx4Z6eO6kcCTHzUDOVb2st_EiugQHpHohFf5R5Jj9rVCpAd9_X5u0","tags":[],"target":"","quantity":"0","data":"NA","reward":"0","signature":"IikTtEyeMK-bU-wi_sAZ2rxhjbJn3D1Nj04AfYnxiegJ7HCQgSqhkZRcdpEPT89fgKQSiGXKXV9MNyaeKCP6vcVrqK70Cg-at50kkn6pkyMDZmdxHASYpqorHBg6ev4f8c-gX8jrKVk9-p4_C7No9VDgyKXGVa6ZHVVKH0vB3JhFg9Dolkq6jGzHhLxPs1LaTMCjLoD5UyEbciVRFERYpj8SH5RZgHREvqp0tzlBkBOkGRRAlQs3qWIc-k_CBSsRxJ7Lw5X8DzaxJP34Na9grs7O0ef9WqfChXBlUpnQu2bRIOC6kW3Ey6mPtRntiDM2B3rpjrbXjZ1SFyHPIDheAhMzYK2HMbx6dIJ_Aci0pS7AsyJ2ouZwu3HOvt4TUxZ172vmu0ZgE-0yt-t15VpI9kT3Ywoi62Sipw4ruI17kI9LlxxCRRB5L6WOFrxygzg5wf6EEl4FZ5sNh5LQj_WUtYQZDVxlqjvX1pAt3M0DMUIMbNP3gNgur7IS9hBR9cldYf9f6d_DwL1AECt4czCofDT_OxjOtwV-91QWY6bPAR1rp643C-H2lrBMxEzi_jauzziAkkfWWcNbXMGA5G8-Om0038fvaIUJwNVAygcCgSEuIMiF6GaQ9ok-J4J_r5eSgHbjmIFCR04G3tpgO5llD_G3gq6rI5shI4h-8aaIsTc"} ================================================ FILE: genesis_data/genesis_txs/00nFXThK86Aog_HfLJc9j0nnXzXSlU6VdGC8qZc5ekI.json ================================================ {"id":"00nFXThK86Aog_HfLJc9j0nnXzXSlU6VdGC8qZc5ekI","last_tx":"","owner":"0_bqF5ptW5iLV2-aJ6bTZva7d601UncOicbBR6MrvahaZKZQUcZjm3kK7c5eGFbWUGxbU8p7f7rjfmF1L-1bly11--MHSwEkgAh2ypRFLF5TSwziBjGGUUHa8p3sSbANoUphbRyrGx1ZimX3WcxYnnGXjuDvwkxerRnwkYIe_vez5CZVe2fe28ceKaVfm2VtFN7Pb3QG4NwMes8CcLdmaoVNPmSsg5ptwhMV6wQ3k_bg9pNG1B0TwCbHvi3xmbEpjOd1Lv5Bvjyeyjykmd0J98ZbuEaBX_QgYOXGtA9PtCg8lheXbYwcvTKNXYf9fDywlgJ9DtwsN4yrndmWD1U8J5TTOoOgAsCcSfvwJ8hXhdQp7YgdA66LUQv1vaM2ZpvIGHu6hbqBr7lt--a0nT-YKUSpKB9ELWsZCk8ut1X3XNxJMoBi-GpDF9gJ-cUeThsNlik7nuHBcuMqXgfCNqDu6hCQmYB38w6io4DMxi1MXozV-VvPgM6BYcP1LbHQnYb93NMYsN7cbjFnCOMP5frm4qHqcDdYlLLKmJcW2gB84_nRoLjsj4b2FYBh3Jfo9aClmAEQ3o40Qf6_gmP9Hqemhw80lqKgGEKJk0l2-K6fNgHagHw_r3JMzCIOH_zCma9hQdDvWH40-2ggHGGlFvSy4jbcIwvkV2ZtI1bgIxj-Hk0","tags":[],"target":"","quantity":"0","data":"QmF0bWFuIGxpa2VzIHlvdSAtIFVwZW5kcmEgTWVlbmE","reward":"0","signature":"Z_IgNeQLLHNESZeyfF2sYdVAYqbBmKOnVleK7utf5dCsQ0hdxv5TL7DtRzTucixtxua8W0ZQTtI1tKhZRNg8SBKDIP2-TTwmOloWbvDvqpzNNKW2Dx8ViyRbKyZq14B8BLObnRhdkcc1SCmgUvzWiRhWTdQV0sQoHM-mqi-dIYHJKa57zjR-fKvYpIHFCcDlH_H0RtDuJs77MBUxaKTKI4zN54WFc_xiyrLmC-t3JMPBxYdCbjVSZRKEmAWnoBx45AQjE9n7SUup7M2JM0aWPmRem7sQS_Kvbq_0q5iItlwG2HZQahMQbok9YkKhMW6hfyQCcA74xQdq-PbpmGUnnwqiUQeBVT73boTNiyg3skq6RTm2_F3vQr2ZnaIOGAceQv8YfHEuOxhy8KHB0GdDqFqNQLu4Otn7DqbGdxuMIiQzua4FZ1kHgJbmfn8yUiW9wSZTFfcdTZSts76ey-VUO5aST3vvJ-t_wt3MSJu3qfS6CAzPF_r3gqNJHW2R0tHCK_T1JUvK8EdvFP9yN_xxgRRkLlOum5f0WTchCciHMCpmvkpdRcmkGqVut0TXXEopVJknAY2yTjh8OJwMb5DBXUIG9cG12dZWn0OPClr8mYeYOApyvE-btC_83E8aCyDqijpsPVsNDyeQ789Y5kYG_DXuWMw7UljUlmKWNdcB01c"} ================================================ FILE: genesis_data/genesis_txs/06dr4mrXcKlfPbK8t9vWOBCDJznyG-AsKxED-Jr0U88.json ================================================ {"id":"06dr4mrXcKlfPbK8t9vWOBCDJznyG-AsKxED-Jr0U88","last_tx":"","owner":"uS4MMcT-67rchYvbu1dVoru4ojmv0EpENu1nCr0oBiQIJDq0hMgHfUEeuIf9sbBJwvBLC4EFWw5lBHmZimAzvOv-0BbPrjBdz3AZHpjifX3WmoGl6zqfkv8m7eLPI9qFZMLzOJJ2avGk6WoDB1FYoUQlyKdcyhd2gScNgpHc2616bEptlzP-uLk_xgjcaA_tdG6hMG0O33fbfWCHfH2ws2zWmqImY9GTboMuk6Z4xjXH9w3OG-EADsSmmolSPhDQuy2UtlOb3h8HP_5ChRnYS18BQ9E0PraxFsgOoqwdjqB5imCZWkhLeXtVjHs__IRbopM_xpbwc4c_c_q3kuh_yMi3g3gJPlGgjSMikzSyY5IfRgXHTQuTB9naPuGr0n28Gjzxt4hO7xoiZHOUFqCstTnc6bHaoOZeVDi0Lm8R6IvWBe39krinElZGkVBagcIYkrExfLlobOgaeqctd9qFoym1C_hf9VBSdATXbIdG_pCXU0pBI51QACn7uAk4sePIUM0crEQXXaZGckkvsL4hnlDAU3_tfRtdLRZA8WqwmF3DWcXOISOXccF2WlzgrjAappkVQallq1EkljGGijrLOR77bnDIUnbV9wrVvdxBpisK1EviStz4_NXjrj5aCd7-N6IdjiSRTo05eNTaY2xhrULsNYHfKiuceg5QapJsfas","tags":[],"target":"","quantity":"0","data":"aGVsbG8","reward":"0","signature":"nz2dhzg87o9coz9I0cdI-FsPxsay10XoH0exW63UvV420ib26A2H5Pjw2N87PNaKcTuXzWRof9Peafl5-e7rj1gRpfKU0kGl3Sk_78iISN_dH8ywX7qVYXN9S_sDvR1HdpaUawBHc9AS-n3mew7Q2pc3bKra-Fr89gGuEHre0L6yqjtmSEMSCf6rwEnsjJpfupg9DaGlIz5RDRTF5NQHOp6H9cCghApYrYn_VFZGxpY6x155-gBrCLIlG9W0OxzE01g0_hsIySg7rHx2iJRq30YJ0ohdUpL4BdbUOCJY9grKvs-L0dXqWWqClYnGMDb_L46misjOicysePR31rSPnhpj9inY7OVhk6VHg6nRg3KGt9AP2tr8_XI1sPcn3D9Be6mjZNwC2j-YovgV4nkeytlBcYzRnx_126EBqNucO0jfUCUCbuImMBEJ4sllplrGBSGRYlmOq9fI3T-9jaEqTSy90qp12TacLRjtG1urbCNU7aNbNtBw38XOyjyQta1jQWlS6TkcDHCJIOXSJoZFCmrBOCGgpm4_8iZHgsfrojj9bnxq2xYhxGF5S13z33uRt-P3dXU7JcmmFd17WRVehbFIhP0mBMtg5llyrPajhqz7Y_BJWT3Jsn4TzcZF-vWwrjI45As9Ly2jSHpNtPcgN7GXwzq4KegfbMSF_E4lWGk"} ================================================ FILE: genesis_data/genesis_txs/07u3F6WH-ohqBclh6UanAQ9Tau089eLJrIYM-8qkAbw.json ================================================ {"id":"07u3F6WH-ohqBclh6UanAQ9Tau089eLJrIYM-8qkAbw","last_tx":"","owner":"unjIwZ7-TIlfW_y4jkIbxCr6f-zApT5kko-4zfFvQyfSHdccAMg9jwHM5wNGTFDE6hCbT-g4VDIl45Rsv3tGPdfRG1PtBfrIsYNZ6TbJK1pEOMors9sRmlGweqabeS_RS4T37OJ_nFXfzEbEgI97buA9HlfrSAiaFtB4UP2J8MlwWoU-4h92HksgVIGQ5a0Nq74KduviDj6ysK2bWCteGtspReSn7s3Se5Zc2zxwhx5DSoO8e_w-mV1WaA3A8lyCWojWrGFCCQLiBY8TMFcvUFgzayq6EiS7oNn4uO-FBURF1qT3are1aHKLHUADn7mQgN5FfokAdD8ePixqvL1Yi1ghUV218QAZGTQaBB-GMdskSud7tPxS0UbpKwGdqXE3LvG_P9S-5uCFTMPzEVQSLzXRX_m-VEzYcGn4mbxNl1e0A0zJBJ82BY6YEl8brFw0IDJyaozpUNU2YRzRK7Fc08wemlVNyioJuVhLBefSgtSC8PJG_IixYfF2OtYTHLoeUSDAHbhQAU5ASR0nvhQZsu-aIIYmYfSxQQ9Ub845AQGP1z4j8ccrS5gy3TpDoWtTEtB0cokU14uBt1QPGEKRWUVZR7kFWLa_MzYz978zLe6nN9I_z9dVVAdezqsQfJT5mP52z8PHqCQ3z2iZJUfPUYcwC80S8vEjQ7sMGE9sHyU","tags":[],"target":"","quantity":"0","data":"TWFsaSBQYcWhdGV0ZWs","reward":"0","signature":"UuS6zYGmYMrM6bvVl7IX52RPLlYzciEw2pLN5bGGwaAD0Gx6tY0oorrw-tLuhxSSJAVDmFN8a7DGFaD89UsLns-gH4E1VRAQY8K-dMp2YRbuqF6wfgkaf1MkJ82qoIQACcQu-U7_7mrTL1uHSaM5M3OLqcsAwSXKUptn-JZybmrnImygB0-UdjEgdjFihlQa-6os_iQD_TSjaPbni7fdpc1H65ByByF84xf6ikaFXa9hpVnMNzcPxe1wAP0_TQgH9mO1C7X6rSBOfZJFdhJwqR6EKy2L6mDPAYgZDCXuv78pckJ8uCtiVataMh-efqsoPu-s9b1GP8-OScZTUnXDaYqMvmElAs9BG9y_XvI1dzyQHfxyf0-JB8knlYUXG_kQUjTA9kpsCG-5mxfeI5YFYi-6D2nyw1VG4YSxI4jgWcyJ0Xqt9MHMejL6wuvImzxICcP1otH8uU5nx8vUcsx7Fe0cnbVJBHe84Qir3OEhEbi4oJlfGOs2fce_iHiR5Fqqt6MyIL9GrDF_YLaT6B07xiZgfrBeztSWFgIgsb3ALBnAGbVJXfc0s0Ejt0fvLfsmNt6IgXEwjZA5aKKXzq07hyaNG4_K06EqnoHLI-Z-KWe-tehRC3wGlJlYojKROhOHVNT3kv5cK6jsYtArA8ehHytn1aITl_HSKPt8snU3LL4"} ================================================ FILE: genesis_data/genesis_txs/0EzNUQy_5b7CwNNLVAi7CnameMgnxVh-XyahT2kn74Y.json ================================================ {"id":"0EzNUQy_5b7CwNNLVAi7CnameMgnxVh-XyahT2kn74Y","last_tx":"","owner":"vBe1dL5_IwUqidI2ye_rEgSL9QwxniBK1jDpLD3QFoy-QAQ52-Lz9V1s1TuZARF0Ouk6E60mKpDEOoOu-qCN1r505LFSg5l37-DyGYv5ePnbEyeWKzDWMakCFHYzJy8jIhFRJ9PtqWD_XoVQ-t30-TeQPqTB9Knc9CcLiHd3k6E0v1Yzk9f9IIrH0a1FpLIbiSh2kqZjkqe6AuysP8_v4P5Wj5fucdHeJUZhhatj4F802IzaBMnvloeuKoRE5kYLmfpD0BtSTO6f46P0ms9iyGgECCENicSQ8sf_a_y__wicHLDmRIhCH8TVMtDqNbtsprheZNWZpYOajD_eE4bR46zV04cY9ZwCNzb3KZTHqVGanBHVP7PYwM6am1FTAWzzh9UKjMpaaToAUDeyzV9ToDePcM8vajcciNYP-yDv3RruigwU3cH4JOqIP8t4xABooDAQzs8crkFuUpaGoOA2ZpJYzf3kOdMnlYjX4T4opagbv9JntZREeb6mDWJnSFuRABVC3_eZc1-dkrOATTYWHXK3Pn7ULmTAouMc_Q9DffxrKQwTgkABslG57t9ljhbsVu3W_yIJOwkN_72DI6TyN52ywCg_PZX99uEGqhC1eUZml53NV5Z0BF7KsiWijkm36WEgxoDq4OzMBbsoX6QnnfWJvm2dY2tn5GDsBk3niY8","tags":[],"target":"","quantity":"0","data":"dGVzdA","reward":"0","signature":"XUvEtWBt0IfiJHd2x6qdptqwd6E12GVf7Eb5pOJ4zhk5GuNz97YIXruTSPkfbKbNa_fwuH0nE-Y61zG5AdC2lJscfTcZmuYhCfGgl0F3K9iEmSwkUAv6xlPS5OaktlzfSRiCYhimZJQIgGf-7siByHXe-5FHI1uhMVLTenm9OqzVjNp3kPQObjz_otZXDrxo39hdcESNC7ggaZH5IWv3YAYbN9oTAWGh1Mpf_t09beMoIPkRZJrNQ_lieBmCj_QQtslVcgAz5mgUk0i-OXiPhRMKY20lKZU2caHPZnT5xCxJI5IHjSbmphg7mI5hf6UUH5nKRzVcDn1vmRvo2lQr-ZiKlbrhAQRQyF-Hr50y8UpXQktIYl_CKC6SwB5MiCEDanDuB43yEuQwxSCnWBakOjvR6syPOFqaTjD_XDDD-Tdh9OtPcifrrXf5ayunHSb4mrJDK23hfeRpSXmsZXLt0ijwOtY0Rd8QHdGqlFgLzoOYoDyzVvSlNE8r8uHdwdehNP_kYZaWw8bzzceeSYcDh39dfTQmRE5mBpIsu-qXuRYc81DYicYbE8fvrwGc5K7bbx0rYufkKoZE7SzKwuYvK1NOjw0AB6JK3f3GBAYbxQp8o97YcAQoxRcoA9WiYGt4vnOUOyjbIqmoXpi4HRpsUju608tsVkxY8nWbeq22jAc"} ================================================ FILE: genesis_data/genesis_txs/0FJrLrxrFkVTBwRrzCCh88Gm2tG1xPxg8s_IuRZDVzw.json ================================================ {"id":"0FJrLrxrFkVTBwRrzCCh88Gm2tG1xPxg8s_IuRZDVzw","last_tx":"","owner":"ygjBV6EA700C_OwE03ezCj5yKdABrMfipvr6OgUadBtHJ0xvn6BG-sNbCJEz_uI76_tsvL70EJ93thzK2wz7Kf8R4eGYaHytEJnd_r9naKuz_kX5tyiCcvc97iw9pPfX_-vs-S8tDjRQponOZXC3xyhlD_QtcwuWZs-mHMQgOoYE8BCPl018tWULUA_vSnUkIz5w3hLeELBum_8umoEvw3mOE6YQLA9mzpBevkDWkHbUbwxGowpj68Fd3TWwyLC5vxTCtJi4aI18L-m1Ln75yiDPdvNEL0msfoGHxNbj-n100VGRK9INNm7S54LEZ8EF0wxXUrYYpxgp93ZHp0Hc91-XrPW62uGrCefY3T2Q7Y1I6XlH6kOH6Upw5UpNWhD_EGUMX7dWJb12U17QxaI1OZroEEpUp8sZq4VnSsi4Spt-AhrG-Xw51HVHDdtLbDJy7TmUE0_sQF7M8zLj7UkKDEJFEjWgrjzACt2T28YIhsk-OPD7HmKCrASg2JyziBdpvDFh2HeeB59CrDgo2ivW7uW0MOglhVqlfIHltwT4Psx1pVQumYU8A-UmrnAi0VaLn1yp5lmR9RoaCWmhN3y7gzCVx9oKO781rVJYU2M3OV5L4X9oD3GK_KMezRl1-53vqNZx6osRBAaQqUe48NIUBCmnOYT_VqqRSN2pJrg8Kss","tags":[],"target":"","quantity":"0","data":"Mg","reward":"0","signature":"v4vbK_qEdvd1aHfqjni3qxNsP9JzaDbQSD0wEIJyTSNYSCfkCm6iB1Fi5XKKKmJRk-YWKdwVqzB0H-DL9cCTfWBwQXaH-yH5qW9Xf0w8mlusv0ydEwt-WYF-imfvRWkv2SuW7ecgRH9HO0NHHOG7CThM0q0xjWgxX07m4Z8foPQIHi7DGGe_ynR3RZbED0aF4NFK70ilkZO7D6G4zn88I2kKDY2Z2sw6JyXQ5DS7Khj52ldDKMP6BOmOEH7dlxJL68tfhwIemcHe3zYl4Vu-1GqmH_ENIKeR21iH77VvCm0_voc2lAe9U946Txt5SMBby-Wxk6Q1V1ugY1AnrIMPqx6Nd7TBSa-JpS5IYbLcZUocvATj92nKw13ye3Oon2mTPz1EIYjvP670dYnfFiF6OZe2PhEcBXraXaDjr_-NOVVYdwKWX_h2EBAZcZvEfMTTXYUnKC2Mmqtvs84o-BYJlizjX7iCVgC8L1kLSW5yY_LKQsxAdyzczWgA1xaP-a1oE9dkvE56GuQ8fN8D0bZz218FJxv6fvjXtlgR3VDpENdKaSZw8oap3hBuTlnlvMY9a3XEr_LegybXZgs3aijCtkyW67ft6YeTFtbShYrFY_qAjNjFHlQ0AaZhVdBVsVDxcRKCS2fUUDyK6FY43Dgg9iromLM5X_1slunSLYwGm3s"} ================================================ FILE: genesis_data/genesis_txs/0Mxvgz6_wL0FBOxJmHcRcNwiaV8B90whDxG4Vh_GFic.json ================================================ {"id":"0Mxvgz6_wL0FBOxJmHcRcNwiaV8B90whDxG4Vh_GFic","last_tx":"","owner":"ukagDSx1dymaqdiSvCMR4ulMOaJaChntx9xaqL-sgcQgTT5_m4-Z7R1BQoCIWtPK3AyXu3B0Z_B6RuUh_ISwcf69wYa-oXXVV3Y0iW7ogRlNcAo7zdwetvetwOqK-g89SCCeXOvHoDW0Dtc1wHe4tTEgawPJN0lpVqnySmpbEP1vNfeahD9wi6hA7nBnf2lzGNhPS2TIrPb4t2rxHgz9P3sz0mok2RY7qLdqMhDVB9Vx_eV_2LSuQtblFNRkvUO6CVFLgld8tO39f9jz-bymdu3_0J0FXrf97gWceYgPXnXZT-FC6Txe5wULLGmWEbmPtF_YWzTwupeOLm93MeE5mDTP059DoT-PgYWdDgToXN6SVgRd3jR6LpZv3JvRZFQ6E1LXBoS3l1FCmuu4ubSrIHBm4OdQy1EiCRsxIx1JOVMxVcdgMgLpkCeLv_letFuJB_6wB_UVHq4gYh5hoRd72NILNuC4O1tkvDV4cc-j4kFpLGaRYxdwwJgJlGtgBUUunnNtIGxgTB7b_WiJYCOi5o0ntXJrLDOnk8apwvSXYvTkxMLL8LMyyV6s3I3NBEOwHAwt4DRMLszIuS7x9bdId_vs_2YIY_h6BH2nwg_SHD527usKb-EBhIQdN3GQvDuS4BwExooEjuMYN7o53MEhVXuiYrTHapm1zsPNyaTF57M","tags":[],"target":"","quantity":"0","data":"QW5kIG9uIHRoZSBwZWRlc3RhbCB0aGVzZSB3b3JkcyBhcHBlYXI6CgoJCU15IG5hbWUgaXMgT3p5bWFuZGlhcwoKCQlraW5nIG9mIGtpbmdzOiwKCgkJTG9vayBvbiBteSB3b3JrcywKCgkJeWUgTWlnaHR5LAoKCQlhbmQgZGVzcGFpciEn","reward":"0","signature":"du7VNgNQAKNvQjlG8WC2xoFI9wC4W6Sr0dUivAzywiuiTNwrCSgR8RKw8LM9KNLIqxW53H2QkFmqFfaK8SCbPxKKhnvqvrCVLZxDn3JfUadIpHjqNlGjQPQVOQFjIxSkY6d5kKlH581RpWM8GGANiSH_tZi8lGokSJriHUFuTsqHckQW_92jIg9oTWLMfWq0pxz2J9nVrvVUBxjpNFYB4OwcapOy7lH4bxrRuRJ-hQS55Cn5z0nB30fvWM8NGBuakVkXXJcGZVRNAbG82RvivijSa4e9B83BhfGu5QAhqWy6f0G8Mi6mS_hoYsjfoY0IHBQ4HLR6k5pLNl2iSwT7Tqeb-eDHBmBfGH7yG7OFt_IjkYoyBM1mEjwfFTfQNQo5ccXNH79CQwYnugoKMMBZPFG6HaC7hDD1AdVeILSv1YK4x7MH4aSnGcjqwWBqSKeix7efCzFD8eTmCYIJkcYhEEMDUqHrDu_U1liAl-o9sKPqes2sdAfgNRP2Uj4KbilATZS3aDae0jFfO514-t-L7X0YxxOXP9X1k6mU92Pnqm556f1oQu7DGilnqwJRPz7dersRJRjTNoQoAvVVvj5Th5B1M6iAnO9Y1cf1KVW27d75gBQBYvQ2Ktxh0Ch9FIT2tJsNXPNWirF3CFKGzdZiud_k3WxmyQ76ak2yC8EYAu0"} ================================================ FILE: genesis_data/genesis_txs/0O-UnzBvSFYoMQrbcsKHRH_YqNNylC1n9KWXmm-rr90.json ================================================ {"id":"0O-UnzBvSFYoMQrbcsKHRH_YqNNylC1n9KWXmm-rr90","last_tx":"","owner":"1pcS98oH7pkbVe8FaVjo16ahf2IH6x7W7aGCWZ6krYM_K9oStXg-KDj5lDsIHP78qawmG3wTutj0NBauqWlTVUIapmbGERnJ_qgFr7BeIVQ9cxSqXRBf-Cvj8GYSvjxm1vEytzfEqzKkifQHgqmcKlgQFVIy6Lu-qgT_jTtxt5FFDWKapF3kevVX5RfCmrT6T3lEjG12KPH8YvAQAEG6uG0OOK-ThcAAsPsZ2EdNu0_gSXpujJS7YuFg7W0FIMcRrAkk6FJrCmzW4xdwzSSpN1eJMaaNho8PL8d6XdeTeCKLMcz9eWqWyS9LYQ9Mx3800D65leWaYA2VRiee6J5uGMedS4AWENkXSkFhMP744Z7QN85v9KVBSJi0mlIK9IETYG1WcLNZXUBevUpk-yq7n11mR9AM3f6KMTTXQwLzrBN_CvghNr40z1ruPtaDPNSGQAajT_YyjxubPjSa4sBZ-MWG403P9wCRn84rv4o1XFmpqXgFeiYJPkZdkm0RAU_MAEPz9Wwj32YmmTeD8svat26Hi2_L7JZEh2E2Hh15ogZPApw28r78DG-MArwGyp268ltzZBIs__S91gOEZnvYWachD_DN8dGiNP7ZYDUKIfijHN_CZWbNoe2LHfNYzKgu9plLyfOn9c739MzPZaFOl4iS8ATeSK4vyerapJG4Vu0","tags":[],"target":"","quantity":"0","data":"dGVzdA","reward":"0","signature":"RiIKVfETYoVJhD8lnO7dzZ_FU9FqRjwUGJkZl01IFxn0suESgTrAuNPRYFE1TD-RjOVvrQZ83JOPR6_PtECH-E-3YchyTqJ_MjAAGcgJoSWPtOM7NXKIsbQcqh5KETuGD796diFSE3ncUpN6eNy9ZqsMWgyNF5Um62gYpLDtPAsTKWgfLMx5zHIefHy4ncgaBcYEeZMg5aHDQv1b2JpZYwxN657_bf1QKFZAmMLJO3VcuqqgfW5H5YdPLEV8eTazzFuWavGLNWVo1KgoHiwoql4-UUFjJOPbJo8JhVBa-AqpvrwJTV7fXJwroEBTfilZsn6MhlGuFxV4nUs5BdY2IrDe_DiHnK7m8EwUc-4IGGP9AsschL7s-Mk4uBfSwpXoK8j75bqsXwWO1Ist2a42Jg1831gqN1AcwogJza15aKPX6t8l4Yv-kv6uR08oOUtWdD9pCYBmBYKqg97KxdOa2FlZQ4tFgIPJPEj0dE6xqVyCUw2LlOpjqnvwgDS4wT9NlhVF7X9vpivcT-7KwtFjtCMSUtUaWuX5Ot5vCIelfEk1wYEcM3-KJ4Xwmw8w1TBim4HgC7IqT0KK0MmTjH7EaX9YXIFsum70x1GbQXoJxXd2QbCLDGqMiwznVjQSXNn6SveNR-RePOoMK3ntMUAnIbYCEReiOzGiKoP3_5p0KJk"} ================================================ FILE: genesis_data/genesis_txs/0_GKZOdtRH-nc094U5kFBlvQSjPz_oX0tcIroqLFD3U.json ================================================ {"id":"0_GKZOdtRH-nc094U5kFBlvQSjPz_oX0tcIroqLFD3U","last_tx":"","owner":"rIpHdfTLSATegAWr0tgtIADHzC8ranmxjJPqqWV4UpTZUFtrI4nzzvbGXuIk6wjNY_pcmLtmmU_5ifYB7k_lKBoLBjkgAf5Ctkl2Pc0wX3hQL_ylJSdQOUi5OK2zT_7nUYf7JDAkLhnwsQlApMxglFb1LGoL-l86HIz3sEtAq_sh7iGESlwahhxlJFeIZc0BunkwqfMSbSfF5VOsODZNEy1kFw8dhv3pVqeYf3pOsC1FJfslfl41sTLGz5RzdWAwD7PjRg_AFlw6MdQm41g3AjJ1wce0UFmLsKgniH1vEO9Zhz4UJ6UAVIsX-V4LroOKFTLOmRxHXOidxWyHlz0IHd8EHwS0XmQcX3qe8uudL9_X2nqUMp9nOCCm2cA2k0-3inGueUUGD-sCtZJmixk3K53wFmLQa_stPviFOA5xKlBwWgU7K5C7z0fahT3P9miF9YT6IgEhI6NpQtZdfd2q37y8AnPPXa_icPL7sCXmePIdAy4qoYJlWl3DVcjBmf13gwfS1E-ilseQqpvK9fW7mRuREt3NHTihVMHr-rFCseEWJEGeHhucXJI8ViZIcu7iOPByEQ0V36j--O8tJCLM6YWYeQ31BYh-olMao_DobU96d2onqdMsuXewnd89NqZTZesfbVHijSUUR9afunZE1b92taR7kzMRpzU8cFPRXqM","tags":[],"target":"","quantity":"0","data":"SSBiZWxpZXZlIGluIHlvdXIgYnVzaW5lc3M","reward":"0","signature":"Rwd9d_WkDUgkC1nXt4QXBD_rC640h5yNDeJ6F-hNmnfSSYyp1JiUXZtntksqAhgBEuxjTHXpiug2eMmYomN8SShOyYPyPBz3UOOT22LXmr2qCbPCicLz_Ki-qcJ8ct0bM5YdRUTmqzLCguudhV5MEHNo59rFUdAy3AUGsrwn6f5A3Jr6fGwI07IIVcC3Z4jRPwYZvaWadCPQG6bu6jiqlEgFcU9mhYcd4xxIjRsjEnOuqKyWmwz0A9zc6bN7JywU-OJKJxLwEhSYqMkW7ZFRBDQUotJfINAM2TqXjB54iKtXAez0fGNpTLNaIuenUb2I756TXJqZPPri3v5xl27X5SEr6RNDgYceXW6UN2f8cgKF_kYonPf716nl00KUvUdDUh0Xmwm4IQtSefASdlr0ZT74iSLdOrJO9OZISvsAK5ylA1XQeAe7PaW8lYozp6XNBgpfbVp78HV-fm0Z8kkMmVW33xpV2JWE735PYSSXqkWNcJbaYTNVUYad8_BmqLZllr4qrQbTOl7LgM-vRl77yKDvndK4VYnrWh7b9SdWjf513ahFouthc7t4aZF2Wh2Swx6Y1GpinuvAANTKbT0epSIfgp6MVKkIrHO00d-s5tgmanl_JcJMrvdWKjjJzehIx9qtO2Yzr9ZBzcY2fLwz5tlC6KYN0WjXuxLr-A91Q1Y"} ================================================ FILE: genesis_data/genesis_txs/0biLy8DoOhucpeYzOj5jnopxxwe0XDRfCOMjyz_a74U.json ================================================ {"id":"0biLy8DoOhucpeYzOj5jnopxxwe0XDRfCOMjyz_a74U","last_tx":"","owner":"139xZjilLwGQOYO3LhR4s1HD_gEcFMYqKHsPrFIN2ZlYze2PGau3ea-Y_JWpQrzNqCA_PvmlV-BUb-QZDc6hlQkqw_ubu989OPyNLOtma1aIMyjR1JA-wnsdsMCy502jg203t-JKuOg-D9FrX8ecnxzpb0wtUGFsCwgg3GJvoBq3UhAu8c_elO1ZqdynrQM0HfCHeqAVM2EhWxFKtQYwOWIjhp9xIAdJmJcZ5mmqGxqhxvaZum_9_owzgrlx6HGHuV53QXuXTETznOKohMIEf80iQE1YkEdPcS0L9uk2TYU_Ud5KpLq4V6XZ-DDBX4UqWpS7VJihUgP5xR6gQ4XzzoJVKV-AmRCRy0cZ_Wo-aaw4hCfR3Fu9JKutRqBvjw44LmKIWDyLAfmo_pxl3C5UBXD5a8n_ge9irhbKCFwBjZJGMW1WrQxga1fAaAQ6JPCnXd6UCHPSHCALm6eGhh0Bdpxe568yk2QKvnNKPNG8aIth_z4A0O6ivCxshfv1pftxco1Kkk6KDwHI5WU3TpThFM5p61PcCaSbpcyqtAl1cVCdTShf0YwKI89oRLZ_JJTnD5urVTSXJIWp3BzCS4pc3th6DvlApojeTSsABF_x5vEw6Tq1OtgA8PgysJExrdazqSiZq7dZJGJzhZgjcridpc7nu1V9C0ApsamzTmkNnd0","tags":[],"target":"","quantity":"0","data":"SUVJV0dUTUVQQVJUSFFJVENPQVNDQUE","reward":"0","signature":"yW7kOjc0R2wjx5WRxfvcgwYoPhZ1_FlGdCbou79C8C8rCGrf4V4N-MMurSyKtwoPKpleskFuXRwWh2cXNjizuhPtvNOJg5m3diJ96gvpJhdLdVl0jPyOCQdZJeL7n1dGUQnG1iN06utApm2zum5Bet94YYeZl9tzv4cjRPwZc_zyBKy1wxItFNMoJS6wSDluETo_UdqDU0p5ZwcwDtm2M2-sU254eDGISzvfO9Qn8DKAJQMGJBcaNUz2pXcgkgqhjTWt3F87gKM4bse8s3QjlF8uPJ-EqYKfzM61Ub3XxRbH0E7Mb_glZvLt3XoN72HSyz6I027Wk2-cNIx884_nQH0zxWxod39BmuRntYcEIIKYyMjd-VvG5XTAgFBY8pWAJ7hBUoSLyTopccv8UzUwmQpUsrcqpoxEdVZDI7cWYhNskFPedp5CCCOLxfQsJbhXLvaFoGY07TsEhDIsoRlS6jKy3uVYbQ-HromeZ1cCrBRJ1akHa0dRG50RxKrExCu95qjKkdUZ0m_CutOg4uafdhxcudyZ2xWI8_DvoOus0x3kf6pl9uA9Bf7xrtNW5anAw6X4jd6glxgc4gu1sXH-zx2ATP4gaA2_iKoFM-PBo6e9-ts03F2cyCQcrLmXtLPF7mrVPdxOdAMlwZKOO-9YvKmi33TQdvIURPtGS7jE_C8"} ================================================ FILE: genesis_data/genesis_txs/0mFNtCi-u34uwOj3BimQTPOT9PgLGE8uqCbtXhnwoKI.json ================================================ {"id":"0mFNtCi-u34uwOj3BimQTPOT9PgLGE8uqCbtXhnwoKI","last_tx":"","owner":"w-RKGMnJuJm44_PZ4gXHfIkcqTaQY-hdLoILdQIFIFgzPryYw2vqxslLbRUBScfyVCYv55Rm_LASQUaLGOa0-lnUZsaKTXW304H-zGJedAp50rATAbN-A4LcO6NfJgM4TZpP956IMqy4pssr-a2pqmWD6eWX48lWY5DTNIfmTj-eYGtB709zzngwtymcP4E4dj-YYsBfvFXfBkxBohlxScq1EzMqEhFcFK4EJNUKxn_j6Tp6fIMHxlFm5RvoniC06r8lf2QhjfB1_UP0aiZO0wUms4owZYe2Dr3H-Cq2ODGNAEuSmza7q6NwJqZ7NssVeDbpinpTj5sjn9-cE98gSPenQqbqwnRJTfq153fmZNKrx33nDWmn9mGDm1f2K8_yThgEXm-LTG6uYi1--ZZ-VDCEHDFF7wdMla_aTqSEmIVh7OLiHs1hju8BnpbIZ7jwpbsC4e3xJTUpZ9YMcbzv4iaS8it50hbmIxOvao-7orv4GvXozQHYmywVrqkSYnnSQYbZV-x7YQz8ivKDfOL2xnNJX48X_LzO_UVsdoep81O-ohyDVuf79AYwBqQhIcgF2_AC4ZqnxjdJj8YepmG37KEuZRWv4L-UoL0Chm0dE6vZiZh5mtYJLwO_dGNydy7EIOK5vSQzKQyoiTu30AkghAMiWSe_mbygPvhCwAnNWkU","tags":[],"target":"","quantity":"0","data":"dGVzdA","reward":"0","signature":"Xa5oSMd8Q7oaGutgG5J-HAHgeln4Irt4R3z_jL7wtKC_gmdKzQn5_-sjFh-exO1HVjN7hA4WwvSWZ0Wpg6hMsZ5mC5QbJXB54FB7e8T00p9I-YRhpvi6h3uLoi84AN42ay70F_OsLbyb4u5jtHlCd_adsE9lAcRNi6HlLB5VgYpNeueO3K26QQnnR-esxchYvqtqwvpXQy0Q2lHLnv8XmMcm1cnDugG2TKwSRspORK2-SuoieaSJOGzbyMNwBwZLWrV2z9XSLDwMESgMEi0fZAfmnpe4XUH04DlVVstxjCtlFCfGTqlYh-yUl8QbaCE9RGObEkuCb2d_PB3nc0zNgM_7eHHyDrg1qldMgqJ_6y6QWkbDM7g2FxilxLGbF9qkz_bi88tg89pPg-RvXrQQ9CHwqx85V_vgV9RX8xJNsEwalIGTXepxXuwAbki-P4Anb_UqyCW0gTqSaO7U-F2h_u7bIjyBYqLYeUc41zQwgkWx7osY5v4c-ayuAajnhAAajlnaledaxeKsKN3fjGU0SQGB6yQ6R9G4DYZQw_SK1P_5_hsCFlRa2vZM1UuEXvpEWhhARTsu7gxNyOpMBqUH4GNMW5og1qvmmS6Tp2lET-mIBKJihQ8ib2Ua2BEtzgyr42JOynAC5LQ3T0Mh9tIVABOk8YfHaVdSFPX4ML1cato"} ================================================ FILE: genesis_data/genesis_txs/0ogs8DTdSrNxfE2LzrScPvnyf7CQ7jMdFaS_l0-K-GU.json ================================================ {"id":"0ogs8DTdSrNxfE2LzrScPvnyf7CQ7jMdFaS_l0-K-GU","last_tx":"","owner":"xtiH5XagvK_am8DYqq28XlkUEXHrURpZgt0gngqrpokl5PIlFrLX7-RsEyoktSx_TtKhKkyhANxwzI0Ke96DW1momlDkGV_injZEDYVzRw9abTAK-NW8sSriGHwHkh-YKNEmIgzCOjtYttf0Q_DnGdHjo4zHKZ29sN3jFBIKUsboqxHN9li0SkNjGElQzdBup8NXGN1V9VImLLhYhXdpNb6PNvJR1Dj84fi3-fwjZqjK8UjC-VAyNDmnBsBTUPFpN0QOZAGxIb4xuwGE9VGibR9B2IQyg3ltlfhNMx3cZh1RRNbTYo9B6IM61EIC5C-od2EWxA4IIeE6p2BXuWKNdC4_zaF5c23Xz0wbl20-zCGh5yjgk2ovfjbLksfx13yRsmX80bNHFG-nDUK4SCx2igfqMOMWRXamNp8rCyb_-r7-JwTBdD8ZBhy2n2AjmdfNRqjcm-tQGxicjQ4mUvmuVxOuXnA3eQWVMp7z3OSD0OAO1U6S6MhCRAL6VaHzc03JzEWxRi6LW1F7qIboJccaD2VA57ev7UYXl35vplR5f-zcmbieLDBvW7qGRcUjnXqf_qp37tiSVouLyE7R92kuBpZO78oIXC4lEGMCY1ZRPR2SHmeUC6xPedArBSLbcr2G2IQCyvquemUm1xG9q1vZ7bMQG7bp-Ey9QHWPanqtvvc","tags":[],"target":"","quantity":"0","data":"TG9va3MgZXhjaXRpbmc","reward":"0","signature":"QaDAILPRoARO7jiCdK274QXz1ec9azKLARlbHIxD3nmodM0kDpxM6s_i3-AexeCQNw0KD3uk2_RLozO6yv1iAiQgyb981Oxa96L3NfOyirRhOKa-bgqG8ET17XdQQ6DXGfNHKBTKGPdtgGF0DcZY_xoQEm7TLx0sAB7pllf_YM4GK9aJ-m4irzWrtqO6tpWGFluNUQw_Isr_nL1NMfoknXK_xCY5r1baIPK9pr_LGL3mJDd_47TLyNH2YDwaHoKA7vW6_GaDQP88hVw5uFJDy3rrTotQePQHVMY045PWhbuQW7_tejwceJIGRwngRT0JyijNEZEDpP3vyNz5rTzvGn_G0CTtWc1azXNZdI7Kw7dtYjYB0YIVop_9a1fQc7etohoh4SUpq1ULzAIkUZtqW9ptxZOqzrwPyjOgSdnENUVl4PS-DcgJl0xuy0Du24we32FjSeAOZfewI0RcNBm0C0UP0xWQ3FKeMQV38ZhktCiO7vpTdvfltFcIGzd3rCV_BFThC6p7TGzaYtAXTTD6PNTCpFTg04oTxnlWt6kw2mGztY0nYqMCKRtjpd2FcVpxhENmEW2adHiKik-ZIrErBfaGjRZBMqtpWxbUiJsnh8vT4bvl-xeagOrQBSvh-kiDOziNtirADOH40EndIvYfDhqouR4QlXySSbcaYP9Q_Ew"} ================================================ FILE: genesis_data/genesis_txs/0ooE635sVsd6vdhX3Pb8Ufvuqd7XRjfUbG2eXde_CmI.json ================================================ {"id":"0ooE635sVsd6vdhX3Pb8Ufvuqd7XRjfUbG2eXde_CmI","last_tx":"","owner":"xtYm0AInoFMTwc_Ag7BKY2WWiL3qJMfLDfi2uNm_5gguS_ImQm5Gghxvnuo_yNa0AzgD7wEu-RIhQT5hYD916QIna_X5MexsSbXs3qCRQBfTzcY7XC3DSqfrONnkE7XQfzsSY5vWlrC8h6MIijeuiewyMOjVIAn3-V1DEVgVV4Fdqu10JpFuBMfJQIkJHsbphGAjBRKHEM0klYnbPotWFT4Tih8kg0B2v56anEmS3Wy9curATBNJlpXt8j9xkr7yeyPsEUGZV73Emlal9ksbokeb4B-pivplM6_cNBHRben2xvlDF0xGXsaU-G5NIr9cFfZLRjerAg7wG-3aKVKPiocidk8wg_4lYemRVyVZb2RJ63pw2V_YoAQnX4CDzU_7lbrp02cQ3F4glqv5pE5n4V71hlJbhxWz_qBSTXXn8VHRbD7TXSlAyyitFyrtKQKSBwx1oNIu1VlzZqfGK7cGvQiUuHWbQGDB5TNbdlTEz5TJ1sr2gK9BViw9_hvtLHSID_iWGLBrQlwSPa0G02HdSV8NeKobqocVlAE6f_pY1W-br1yX66_kNi95wTBHRXqK6EsJ4PISAGDOQLuD5UtTPJYEi4v5M2zm2N1RLGBbMUpRwPEGUsYcWtVU5mAU3MVlHQj8JnILJf7jeP8xvcjf9E5Lt41CUa231sNQzyHWsrk","tags":[],"target":"","quantity":"0","data":"TW9yZSBDb2lucw","reward":"0","signature":"Pvp39V36IcYj1844dxowBxPIwD7k00m7RVNkst5wPBeaqiOCGRnJLfD3OaPqwkcU2xn4nnnJ0fqmNdBMpfj1Qfhy9xoUlvarLMjo9_Y7MpjqrcZ3t7jhtJVD5rKBeQuvV39xwjnngrg9l2NJ5eWsZ9NZatsfEt9-8f-tQjxBJnr2l57hV_iCEnihu-lCASq3L6UAOcbxvxj3sBx9Taib71jVbV0kEf07bD5n6FGln9syyE-LmojXrgrviyqjKAtops5DUtYVhX5IyDC4gjTM44Iiimfme7pXnfKoKVwDIXiHzTcJr_XcCUvwa5kqykCqUDjc_ibZzu9GmHOHwGe-E0hQCMIdJY4rqccRSWEitkIq6ZR3lUO4P6Nx0_T4tc0RXEci9642rm76uFv0TUjqm7vfXLCKuJbTZ-dOyczwXn2LWVy8U_aFuHT59JX0qdu13t2lMAGxuM28RjbDqd6bDz2EiOGPRxumFB4tuRDUcmIR-bQTfVuUDq65vUer9XfMlcBcJAndQZyE6MqjO5NeT0dbtTL7Eb31YiWozX7_fSTesXUTUf76UyMxcjtxW0jHHyKigCN0F-kmHDJYy4HfooMZIyinAFRQSlPqhZmjl2vwXUpdQrNVwdVszCsUkK87dsHVfNMbpbihUFWQ8DIJEbLl8BhoXumNxOOGaYGUyyI"} ================================================ FILE: genesis_data/genesis_txs/0qob-AeHGTS5EDamY6Mtsnxf1MCyUk18l09bqHAYQjU.json ================================================ {"id":"0qob-AeHGTS5EDamY6Mtsnxf1MCyUk18l09bqHAYQjU","last_tx":"","owner":"n2pLe8A1iOb9gwlSg-hqlNjxuT1KJ-rAnnrJQdZMkhgs_cMIR9ws9DbZowmqBtBR5MTKVi0u69fnRZmZisrugi0RWXyFJdAa7C7Ze3XXOR5CATCl2ms1nwEXFR4y3OPD88L3X6eszMTZ06lTx16XNd2m0QiC5uxgRlZ4TD0F0ssBln1yZKOKZfmoCtHS_JwaaA-tk4z6alWavJHhVktYMqDSTUYc1J5US3KtlD49BuVaflR2JjT2nl08DAN_b1epMStv70QR9Km3K5nbWUA413Q3pL9cGrxgwG102maiM3uct_R78mMI3ouxNBo4zum1Xs522KeCgXYkIrqeUEa7Kxir0O_KQ2lNrhycP2xHXt4QbPZ9e-y182WUmdgJ4ku_lM5PYDGwEP6xe-uEww6UMox_LsmXuLrGnFEk1hoIHUsFNQ1rX2lO7osbewNEP2xCm7lGQk94_W2QWg7Bhev9X7NVyI_teeyL9hm20YPxCpgka3dY2mT771SbFFBSGPrs3JEndHCX2QKOMWda4dMC_y9pyqus48fL2zSzFWuudNnlEvqSG4pho9seSemVCS-MEJAuZAcA40nGL4I03GuAmaheyR4zM8WUbWVJw2fvY3CDXRY_FFX3aSPDuHztkGs6tygOLytoIF9K32wmhwdalH0arKYBWkMyc4Eaj6A1raM","tags":[],"target":"","quantity":"0","data":"TmV2ZXIgcHJldGVuZCB0byBhIGxvdmUgd2hpY2ggeW91IGRvIG5vdCBhY3R1YWxseSBmZWVsLCBmb3IgbG92ZSBpcyBub3Qgb3VycyB0byBjb21tYW5kLg","reward":"0","signature":"fURk7zoXx_gX4ofsPuH_cOjnpI3wnz9sf-_f6Z2TcxMbn70S79_0WxmsGq1ILXTzudTEd6ReoKOUdF6m06GEYYsxJ6biF-ocpzmvIODm-e9i6fc290dZTgfhoPlyW1vVVRvZtPqMc-EQ_maVdefSv3JXNT6hxzo-tMGhPcST1wRyrt3yvORuhw94pVZ8wOhfYb1knWmyUWzJOxwHX_jP0sVqaRDV1MxoNgaKjg9DidjWK-GM7hh8415Xvw9FG5yknJR-5f6HXIsqmXBkDcEmWc2odCuQAN5nE8YJvX_NlPjQLCDWmvStplAPEQJJECfpksSqp1ivgSNoa2M2GuOrv6KsDy-3ymL3suFrolMn97kRSbyX0HV964Xg0VU6uj_itehjA7HuRFmYZqHq4X91iYD8WL4wCml50SrP9BLcQ73z5oMYAh0oZFWKOcoa5KH8A1hauT53SmLF-Cu3D-bKObO2ux_dwrOOqMnT4bdLgG8isreNbyC3O9ZGEhLe_rPLiqaDyP13Soq47XP_Y2bx3EcQF0Hir1ozLibgEJPit5XZGgCMEebS73tl0UCHbPxk1rRla-q6cxTKUifH2gQyPWmBazRNZwYnqtbJSr2FJRf7IlBsdzkgw4nUDTPvv9I3ljrhxMQAKnU_NsPyNc1DMzPIk7H235LL3R32kZ_oM7E"} ================================================ FILE: genesis_data/genesis_txs/128KaPgVaZyrl8Vuzt795ZlWidERzih15pNDAJgahI0.json ================================================ {"id":"128KaPgVaZyrl8Vuzt795ZlWidERzih15pNDAJgahI0","last_tx":"","owner":"xqIemNFLINya17CYAzAfoAaKLbhv3lsAaeYY24JFXEYQsSzHj9LBSOi_rrt1wy7CTvQbqL_jEWiafUZ6nXoTqIo9Ge51ZZwaTkDFc71AO55SyFScpeXNVow2FRXOtM1NNi4Q_jvY_yUyQlXEKWfAIlq8T9a1R853TfM2QLjFmMzws08a2npoRQ1oVeEzijfUaDO1ghfw2Ybca77T0k2SSlj4R3Nyje3vEvhIiYrXv1h8bznJd0MGV7ru73GrpkRChLzrXzPxxpkpSSST7AIq4Pmpale9Q2lPVqC4rAU5yLpabwxQ49yWElSvRjVihJx25RJzE-7e9OF4jMKWYhS0qlaMXuVJydcF2bjH-C0mX0-nL9Pt3o2O792b7-l1jSgTFt4V3Q-Sqy5BCTxq-7AtjAQ8xfXhv8BmMsdxZS45ug2r_isDvcilTxiq1PiLhob-XW_gSYWnTVRYYqau0msJ1BrUmQkOxEqybzqWRt6hfqEzTdBFYskHw6qTFnMoFaS0l4ONoq8w3toGpGN7iw8ihgennaLiffG60KYp9dkfbRI9QUUoUBqyeDOj6I6HFvvtVO37rQld6VhMNyBTVYuzLuPPvifH3cvP5vkwSXtD6TMJnNDIcarOMo6Q2IByKKkgwQLnkt0GdfKWH5zi7MA88UO-AVXScvBsBRTFG6CEtwM","tags":[],"target":"","quantity":"0","data":"dGVzdA","reward":"0","signature":"rAKVe5-tvBafeBTNLAc7wBj9vYgETSSttCABLP8_1noDEDEV-CTmiT-vti4OMKe3dWhWAIhiPQuLJ0PFhxXlt8YjnnGXdGkd2AocbLJLYzHcjv_GfpPZk1LFQRP9aE-04MJYwUGTncCrw1lTfJTW-BXr8qgv5h6keHbN0W3ryFa1rwtnV3cyHSB_wkGD71FhNLzxUeoF4S01TI3hadBjqEAIzk1kRq1Tvf_AQ-WvfK83Ywb-N0cbE-OKRGZ1Em4sB5CxJ4_qQ9xg1IxqmRclWbiP_cRJgpD6ExSCPpcy7gtsC-jHE4SKYfSQyTmUdGmIU15lLtZ1mzrh49CcVczlxNgeNsUIQs_6Y_GAhPNKVPe6ThszP8wJgGNKhYoBIWXA8zmK9T1ucwRdy2j7tN74YbtHr1yYdDyqbryhVfz7FOxFfSIkOJimL4u0L5xjR4JOO4YkMJwdk0LDaQz_Z3pTC0GXJVFXfbIz_fcCp5gal3-0N6g0bh63G4OxQf_2Mi5Tg-xPdEAZDzj5vLolkCXi3CPJEC59EeWwpUfveDGTiGjrP4lf95cWUeUHmZBCRhNtd0E_LFPHOzOizSgRmBEhiL2gvN5rNhGLrqa1lom8FJf1n4RQ9QO6fZxcbed4w55MrzmsS_aBRS26MPApSe8gqMsO0B9HhIT5OoiXXpL-5hQ"} ================================================ FILE: genesis_data/genesis_txs/1Lwuom2q3FFI2pZz5EYgOzJRymgVWE3F9ZIl4vi3-kU.json ================================================ {"id":"1Lwuom2q3FFI2pZz5EYgOzJRymgVWE3F9ZIl4vi3-kU","last_tx":"","owner":"x03xLiKfK-UVdZvoZY2VBQubXjKHU9hpiF-HJaK968-C5frGONFZ_8Jlu4DkVLpX4tsx6biIG3q49ruXU29SxzFwpT_0eRCgcjsvaaJsFENlllCIG_kUzUnJSJLbB7VHhJFnq8tbh9Vitx8SLQV6Fi2oIFw3ipEL3hIoiO_m-66MAGCeVioZd-C4_5QpRrZikcpfBG51xFIbYBo73C-gCuWTA8K5ct3PLHnOY35T53vExzH1WIxSMI27I9kqUAyjx_BUoHuSfN5SmxC1GRiE_3y83-F6TXSk6bBRfYZy-vGAP3VgSc7CTKb9ikzI0B0ILTH1O3EADY7W92_uI6A5B6fAcD-tlQMeNZwnwNzEye5elOnaIGMjAjt0j4w58EIAGsBlJ02rosQXVfmMq98uFkwPhnbiUvPKph6owj0xnZCrT7sPaJhd_MHFS1yH4LvOpaECnvL7qy0YTHL6zBH6two8S8apZJpgyrSkh2OEt-JqnMtMUpcl7dFRPa2BQJwV7ZN3YjYRg8ITuImLBT4rwk2inYKOIuvMsi8ScmjrW0U52RAbYg7oqfWlcosVHOOJe-13d5DhNK6Kvempbvask2kegHOMHHu9aUzL9SB_RuZHSdIXtzxfFHzB7lRTOnrwMMSoENF8-C5Oxhu0hg8hibBLGn0rnVkYtlVP0WU6JtU","tags":[],"target":"","quantity":"0","data":"aGViaWt2YW5kYWFnYWxnZXplZ3Rob2V2ZWVsaWt2YW5qZWhvdS4gUlItTUstSy1FIGVuIFJvZXRpZQ","reward":"0","signature":"IuF7qyrTgzyyzmxC1FI4C2aanrN1crtK2UVOjah3qoqnZXwTwB-wgYKpT2jeV0gygCG7YlQoQk4qImhWN7MvWVklrteNXwcGB_TtIlDoTZ7QSvqm8wiKe9Wy5YfAY8-S2i0R3gdC6zFAIl-rYCoNaBz_XkmFQqg_3eK-OJF0uBexCWWWWzICp_dUrFhCWmZqE0STKQabLmOB6ECUTm4pxh9I6YsU9Kq7kYbDNnUZC3YyTSuhfbYQzX6hPM4LbnmGNygjBaZxtjh5x2laPSlb9Wcujrr6LA_l95DACI3cORGVEG0hVvjdvBKMNZA_dG9FbIFcM0mUov8eGZ27ij_d7yRdxDADwHleB0QLZTHWGPjfnztphI8-ptdtpwasdwC5qFzn5hC_8VQDeyWFhOU9P4AGHRLkKUX3MmAPkE_MCmV4Dxp2HpqF9hJ1jic1AiNBonGgvmieaYLWpS6Z4T8_HY36R7oOkMrLtGt2c1HfXI6LQVM51dxWxG3LI9o_XyyvEOn-Thj5gZRO72cmdsCdKmEKHliR1oiKSYl0SvQmhlMR9SvnjJP_p4lidPFIagmn4ffAHmD6rqYgrOJ-z2keYrx6heasZIWDDnHh6_j-yDfTdEAQQGsX1ZGV00oi5nKjYv1JgO8CwsYXc0UFHCa9RNJOuXZQ_tUxMsqqimtoFIs"} ================================================ FILE: genesis_data/genesis_txs/1Q2plP5JFTLwdTC27VfIgDJ-ri5h3mVsKxZploTrRmQ.json ================================================ {"id":"1Q2plP5JFTLwdTC27VfIgDJ-ri5h3mVsKxZploTrRmQ","last_tx":"","owner":"2v01xaqM45uVGvECKmwGyLS-7MBgIDATLtD9R3fbdEGi4MKOAnLznWHfrvQ5Kev3Cf-9s-dAJf7ZTj6hqOqa0So7n-S7ZykkYoo1seaYXAeimvwrKNxRDIPmqggdsjcZ2zBicQPddomRZziATXW1R1_mdHpi1uek4YC_Z-b3SMPI9BP9OB2882pA8RTxUQ55WWsKobhi8dqET2VbgUmap4nX5hXVwl-Fqg14V2ZM7hErcevmXXi7AACRYTlD9hay4d1WdjCPesMIkYrUtiCoxzKtWWpbqrGfsGoDVqnZsJVToUeavD_4xy43xqIxLRiQCx6YObfnJPqwCmIiJtlOYK3QA6Go11DoOqJcgfr2JXNhB68iG3psieeW4xAqqdV15GBK7O4TSJdfJ1zTIXs8kbhYsP-Y6Vo6JnyHyuSrpZi1OPoBPcdPAlFuBG1Qw8xRzTUNy-ud3NCvlD_PKKAHQMT4NsXpf5Ra3HqkiPPDowCv_fnhUZFhJhRTTUoblGiODbDHBa1n5__zQqJZUD74NmnIYsprzOe52LDfYucRt_FQ8rhiT66VIXq9baKiXpProkkd-EdkO046PTYg8VtRfOZrMgHxnJFKNOhmdrQ2td2lrZzzfbxsTtoIhACuSb0a6lwBDs2I9ZFzrd6rOwDk3eTrO4rEUI4dicb6CaWikU0","tags":[],"target":"","quantity":"0","data":"ZW5saWdodGVuZWQgYnkgd2F5IG9mIHRoZSBSK0MuIE1heSB0aGUgaW5maW5pdGUgYmxvY2tjaGFpbiBicmluZyBMTEwgdG8gYWxsISBTbyBtb3JlIGl0IGJlIQ","reward":"0","signature":"f_OoFml9TZEwy-QR1wfNjuFdeu6sRIu6bVJxbRE5qfKbjoORL26pf63EOh093-tO2ISR0fPMCmoRwIhT2ITaJeAqFF7ABR8klZ0IfwVGJqHvfM3-_qiuQv7CNgf77YgsRcUY01cH-Zq85bwL9q15woB7Q0HsaFHaHqc3mK7JyzeABlk0QAD15r7f8AfKtpzacM3dywukhKMzrY3E98v0T775U-EJzqib1tj2ElLP0eUndR37k1hUCxJTH-he95lGIVY_y8DLNb7hKs58PAB5LbvB8OsKYX1kIpBb9jfWLLhulw144xdoiZZHVS0Gt2DZLQbzKfLUkoK0pyUWn4HBJT-niJ4MA4CAG8mrMNPwWlqgwPO2z-d0_MeJvEO34wmNUYSP23R8lo_6ItMdQCgJjQg-35gLTX3_j6Ns4p0FsfSPnLA3sEKjn0-G0pcm_o36vlZELVXfx7zGJO8JgcIgwrXN58I2LDp0A9fJcd2rpwx_1MEtRew-t49jC0PcVJxJexKKAvy2HjkDoyIVkyzCWu1gr_MCxk1dPYRl8niNGG_gHOJ0zX-XzpTBgdC7Jc8ffXyRKDlqUzB9-EL5DHjxAGJ-3WSanWHxvw-hp7ATgNs_UB_tQxJvAiGo9w0vpHw8j7NShUb0y6r8Dv1X2Mrqwe59COBq710fk_XayTvoAbc"} ================================================ FILE: genesis_data/genesis_txs/1QoMjs6Q3XKklJ9LfovRmGbe4bAy9xY247JfDZqN3Eo.json ================================================ {"id":"1QoMjs6Q3XKklJ9LfovRmGbe4bAy9xY247JfDZqN3Eo","last_tx":"","owner":"vtWywWNGTQuOv_MggV9RJ9GAwkfk8VSkh-ynlSlsIj5NlohTGfy9hBEdRSbqRMc5tZ3Jrn9jbBQD8HYZWZTYSymUxJW1A41LBmGrJZOTt3pLDycvEK89YrAAfSkcxkEmhGBgNNqPU6F74E7CRXQfW4i5_vaKgwmKCDe7rejLFzM9c4n1uO7D3EapxWMdLKYTRZIu9Ux_6H5_kC_yfwHy5Qrs9KxcoqBCZ2quT7ITGHifuk-190RFcTvmxROJOcWUthzJdxRphVPsY1cDir9zxKZHv8wYI6jvXCPCARiE8Z7URx4jHIcXjpyELv6BY1x77Mk4L5UPohpsGo_nR0vXSvTdi6xEc7I9KXSmVG2k2vEtpJMmTHVw4tySu3jquIPWFb2E6ZWFdTIz7Xf9f2XH8QMc-yuVwjoei5jaK8lNQoHZjhMhtGu7I37xp_cMZZ8leDMlVqIJ5xhlWc_XU8TN0CoDB54SYz5f8ywwoOebcm_-2GxsrrYXGc0ob7u-19RhcKqaRUaUSIjhJBV-_jzmHQwgDxNqo4SIHGPZur5we7K272Z2PgpKNOsMt8-cC8Gh6PcEoqtm0P4-u2apnbpqn4SWmIuhB1exr5kz-_OEHD9l_aGicfHfAEh0tlKikEUdX6q6tT67222HL-r8t_Vqc842sD3xtgQtvXnxHBMWOI8","tags":[],"target":"","quantity":"0","data":"TmFtbyBCdWRkaGF5YQ","reward":"0","signature":"WdDVgogKi-37ITw_xKKuJy8LPZEB2UWQzRcN0gnVwYMWdQBC_14ieholRE6pTHakYlHWk5L8N9vsGRR-Ig4xU0Y9e3ZB-jGCyc2ybz5xo_xJNyaxkpgNkfs25Hqqd5rD1stimV5-v9N6Dgqbsm84dgwSBTheE0ns7_ipNEqrOUllVa6rItEUFaGQKMBjp-fiNvdI67jWdlhS7PjACp2J6ztj239EyEwOf2BTBWUnsAX0LeAwJCcqgoHe6t0RT5hnYkyX4oU0iulYmw-uIDcYTXVXqhxt2l5taTNSK95fpn6emtTJqwl7oCqtqfEIPn_FL7-hrd3MMDMuFATEnU9aj5FzSLfebCzaiCIP_nXKaioqsco_YIKhc_LeX4BSDP-N9Z6TCzU3cHtCPkImwQQcgbBeL_1XiEBYPwFh4eNMfvrHOIK9R7REYeV5PLNu3NyS9aujDrEAO6nYA2Klq6aYJy-GQgtwowDVu7nHRclWTB7pOpUFC9peplgG5nsjVjnDXGLPK_1WXvPPnjur5OeG_zH7Fl-Mjuqo1Gs1ecnAO0HVo5LYfvCXpKe1vixxMakFpMh-qEpbyKIeniaZJiKqVV-s1WBbuX7lCQTCILeS9KojcFZbWqgF0eYudltacqDgWgHMmV5BrYBaPHRc49FES9VUtMYIZ0hUJTfDuGPx2hQ"} ================================================ FILE: genesis_data/genesis_txs/1nu07yo-0eB5GLxIJzzlxZW6nFTFiZ3XCDobJUcNyP4.json ================================================ {"id":"1nu07yo-0eB5GLxIJzzlxZW6nFTFiZ3XCDobJUcNyP4","last_tx":"","owner":"5_f-keUEGP9XXTQu9P-E4F9Vqe5awy1xjR3gTQBWdJxdcAHaHl0CyFOpgflqDQtMniVKmZzBx2BPXvGdvp7-vyLEExQdHt3BRnhkdyudfoS1607_-DRnzEHLXneg8CXFFs_Mkx0ehXBpKiS712wtIl7TgmxHbHY_tr5-E-vRsg14twe1DyHRxLualQfQX2pAkViSvQtjvxbRqQIt_v02k48EX7Z-RvMddbeTIfujwl0c9dZUfPGWoCQsGD7jfllqglgOhOgbKAMo-fVPoqs5U0ETyhBmci6oDpanjFM_tfDUIcJUD_lHMQRB_MbmU5zcNmPizEC7Ljbdmk0uLHowscACZU6zlFWb2n08G6x-USnDs8FncyUAC-vXAGqxN4avGiHuAfsbBxKAKMZTZIXqC-chrv66_UCGG56v3KMdYtUkWZ6dkNYPaMwT9C6kH7DRj4iVju9jt57bvojty6X6F8C5S7e7RJECPnpjn4sDWRoux_QhToifFlqduSB0vhQrbkgp_mPDbhsqj0jTsfhNlUGGbdPugSZAcc1aTBjMxM_yVf6CY-mSs0IgLBIOIA4n0ESJhXaMFJc7IwyUxirx1CDsULvpIRlqSThIJTr3Vl2ICTYSN_S43lvJM6xKcLk742n3qIs5QsNH5i5KFVi67FozfbrSXvcCUyR0u_1tEW8","tags":[],"target":"","quantity":"0","data":"b3IgZGVzcG9uZGVudCBiZWNhdXNlIHlvdXIgZGF5cyBhcmVuJ3QgcGFja2VkIHdpdGggd2lzZSBhbmQgbW9yYWwgYWN0aW9ucy4gQnV0IHRvIGdldCBiYWNrIHVwIHdoZW4geW91IGZhaWw","reward":"0","signature":"B1_zxbc8AZv8rLoOzYWfmjn_GXd7zzhV7DoUi9t3Q6W-1_HBH4E4e2i-WwaYNzbszYetkUNo_bAKoss8R9icdBPEt4NdUaIjiq67_V7SnYpfzOIg6tPMw20PYAl69NCksflJnM0ck-TYoQ5-GMzOXNBP59qkvnl77oPOhG_2I4NqlJvPhpvqez1cf9CSbFlwTASR4vNKkTzEIWXJ6AzCjjVkYMXdne_DWGeqWpshMUK1e6po916tjxGSg3IneULZZh5dXj0b2hodaYrieAhBLb7XehOJekSoUvgDIfldb-h_8hJtegFYFVMjzA7h594sU-GNXtD1wKfvPDfkyQB4LNWLam66xi3U-bcXKAJAPwgGWDXCE1puBl33N4oN_-t2e16WR1rBPg7zxnmfXZ7TuVcHpjbkGsUyGarauX4JwH7PVEYvwwcFeQMqX1AC4xiz3LEJJU67g3kEpqe6UlhHWMTtf-cPa5ROAb6zfoI_eCeN7s6gJgSLdA_ZKZ4eVtPUS10dzNZE0VHjs8Y2dZJOOcEFZHRPmcwu5DzECDj8AzPSgYbFtgNE9uBuVoUstcqBDnjxHALQZLJf0c_X5FFT9HiHf-m0atJc0u-1RP8Pns25xqLSXH0npW9l9Z91KxxqKBhPnlkgKl6e1LAd3rFfvg2NTn0dpoeO6fZPOe4j-eQ"} ================================================ FILE: genesis_data/genesis_txs/1qVeYpf2sY8Qkz0iVomVPVb15NA7QUtF3eFDoMwa8PI.json ================================================ {"id":"1qVeYpf2sY8Qkz0iVomVPVb15NA7QUtF3eFDoMwa8PI","last_tx":"","owner":"5O7AEF_36y9i4PDbTRyEFT6W00feGSwzDygn_prpIi-fboy5m9s2mIYiXL8XT9HDh4HE-4PfSjv0fFZgEPRYECVkBlw3oD64NFrpKpBg5HkiW8KkPdUrLvennVaK-rN8rgqXk2IcP0Sj_E4dkpxc2SW0azi3wfQqGzKAPXreQ4xIVuKtyKzs0fjoKNH21Id6iTPPEyZnRFAiuKgVaf-HsltAPt1b-zPO2-FqKUKXn1mTZuSYYupi_-pgXTNESaVdRnKILwsN6oYZ7zRq1_GOzEEExgYDMjWS-jcXDCJYghS5EECjMFAoQBOlnux4Y4WHUZ3qcCla3Evm6Ugr1HvlEOsX3CrMgF-rqsP-P8Wgst6H-A5U5F6AAEt938SWm0yfU8wqoShEzYnLmsBuBdH4XQBdqH3SxMVwNxhAdyA5bt_BNPtLWmvrzR-JlLw86IZVqLtLC5M046jnOxMeMvx380lvbrpG96nYcvBzH0SXYZsmVbIf_Vc1aTrFSpJBRQPGHGTl6LcQCwWY0nHmKMKpfuBYYyNWAsSKxUWXLeGwiQKe5AJFqXb55Ul4E8mPh3gV7MNli-k1kElsDYudaRwGzZHXblMCbbnn8AfMtVlNGtAAM8nK26pHq1WIzmB2F4Trx-7Jwu2MZ80_xirwUEzzjnMCUMeIeVbwMJcHXsIVdsE","tags":[],"target":"","quantity":"0","data":"SGVsbG8gd29ybGQgOi0p","reward":"0","signature":"QLSaSaH7FkEftX_sV75eFzdIwnqr6fxhlNXqOhvq1byC6nN4K9dpBSRx8azo-ybr2JxBpXtDkJ4jrcgPCFp5wFVt8TbvL2KBOLib8YCUdZP-zFssgJfAA03Dk_SKpQbwBGKkQhhDRsmhB5prRP2BPThxK_BvplVfVtsOSvfZCZk-nLeFmPFMw4eoqqEczHtmd4hvRSAKzOC5sXnF3HtGGDCX4B0IRwnQV-Y1Cum_97bcVWWnlzxQR0HgKyvWCtmbCIbWDcqYAEC6CiilsZ25gorNaEul4YD7SuBk2SVi3gx-jhPJjeMRnC6GXLOozCS3Jy9RJMgj1Ga7caZNzabprZc5ypNC6lWePqN2cpYZmXxdmWUDaTQAMT4vk9AQqLhi2Lh6_yAh8KLqDQG7RbWKwTAziRtUIjvD0AqzwdqRlY6MGb2CQ971CH3iaz6Vd59a6wn61xSUwic7Ql_bvT_VrfnUNpkXXyd7ywB6aDZbn-FUbvZsadISdqSDj6whxL_Fr2vvyiF4LqMu-PDwn4BtRFOQVLcIeq1H7aXFxejLMSiCduBiUVMllfau6mjKeL4wLncw0EaROx6_whKaeipXgu0Be6TvLy6zy6HeUrGT3gshwoM5K0nICMinI7dH6TpL_o0ixjmcOZ9e7J_CnIV8OcGGtTO0GEV5KVXcaXfgTNY"} ================================================ FILE: genesis_data/genesis_txs/1xh_NCIFYbprcgNM4AVvZ47jRxsQmJYvCG-L-oEK4iE.json ================================================ {"id":"1xh_NCIFYbprcgNM4AVvZ47jRxsQmJYvCG-L-oEK4iE","last_tx":"","owner":"uhNEkI2-GNJBgol05-nhONngaNhTdWALp1vqqWxPOfn4yYkX0gsFHyoystrxcRq_jYgAJQ2DQXjsB8LgNfDGjAfLtwam6ojPxMxwJfTPXw1uoiX_27lf78YCa92w657yUQu15QzU_i7yrs8oTgGj0BaQ00vZkCQ0IEX5HUoS_YwNm0qZ_pEVkB_fWfSVX4my8zffFNxaHZFuvWqEwat7Blm3MizXMUhQAwJi6dNgK-f3K7xgyXzPo0XTR-iwWUOU6DVPG9W630WzGXtF8VXIL9jZ3rkwMBPYmdmdwTEIRngGKllnXR-QmiiaQmWlXfwLPWFC_V9pLRZCa8i7mSGS4oA-AcPmO4gkUs5YCW9PkXxbH_l6ApszO7_JtgXPYp5td1-7-SG9Eo0TxV2NestUFZ1t91fUbI6ZYPA370PKuBstXo9-ARX52WqONEVgXWt45b9vUiplPq_EcugErrbMiqKXGtdrFoLzAPrM12IJ-2jEB28ecLV_8k3QgLs07gKP0aWVFCnD_c954y30T4siqbjy9IDdlwaYXXnayn8htqJMKVxmulFtbIJNSXKfafxvjXwUTGDUMAzW2dBqYZLj25bi_D1kZhvHhzHLJEyuO-xKP1YFoxMGfOVYTGaE77bRGer0IX2NCWus55KgmtOr5owvu2orF_YbiZRsCeJZx80","tags":[],"target":"","quantity":"0","data":"WW91IGd1eXMgYXJlIGFuIGluc3BpcmF0aW9uLiBJIGJlbGlldmUgdGhhdCB0aGlzIHByb2plY3Qgd2lsbCBzdWNjZWVkIQ","reward":"0","signature":"IutBgZspQoPIjEwDTn3iffzyUYqPdJPjH_PcF7PPoW5uw-BpSjEGJT1m1c7UJ8NreC2tF_ybEB8pbv7OYalosV2lFOa6ZzSI8wabY8jlBC0CuskiUDK0wl0KAMXlM3tEJZlNGtBqF7S2pc6E4Vr9LuMHVky9SDUHoDYv6cjDFCdultInHdUOAf6Eo5uu_YKJ1lRIGxB7OQnSjBtGIsvx8uX1tuSvWFnQetdA2g7MTI-Gl9DEUD7lE6GKlvkiHC5BH4VbQK_Dt8KYrw5DPKtrTV5m_DzhaIDvGNZ6NVrrBKFvreJ8f0pOYEYFNfbHA433HAPgnAPMwb5v_yGmlim2iaM-95rH265Ox_kfNB2q3d2SAhlYXFQ0WDTWCJbqefWuDTocoVygX8EgUK8GcAVaD3972wHJSqh0OUhSxasi2Lg_P6mtj3byBE38QpXcBYbd4HyQ_e1vzzGHN_J6ixBv3QV3r67_UlglbreKoK4k_mT0UyoSDzoEORqpamN2_5IftfDHMEnoF8_xslWuSUyUpH-yXOmcXUny77hemx3kbHZ-9IVUPWdC9sxNMeKwucG85mb0Wjlh5f21AdciR_LJsoLYs-zWYNeQByCTZB1ao9VfqJvNZ6NcPtkQP4Fa8zJtrXBujpxDYncvV_FywT2IhsUIZrm2a7w615n1zpy0jrI"} ================================================ FILE: genesis_data/genesis_txs/1yvqJKdnb9SRRKoBg1m0kWAsSh9S0R5r9T9TE0YHfRQ.json ================================================ {"id":"1yvqJKdnb9SRRKoBg1m0kWAsSh9S0R5r9T9TE0YHfRQ","last_tx":"","owner":"vD50Itn4nhfFhH7NuiaeFwMktAMwFv523KbEJfszd9EuXJJnOBjqjhU-N8R6qKufaMj2hzocXiLyVVRcl1bfFOm3GAunIlDl8jspbFCal9U74Qpzq9JHpRyrtEg5gwV04Pb_ZhWkNHudrQwpEGAMvNNMIGjNrP5MfYEHyS5Q-_8RcYUShr3MG16MPwzfxGi0e2iMnlKmAAEW_6ZpbFs9C1aAhV2SZnS9uQD56LbboU4JgN21mdTvwhZBNI0DPL-dpYNpSb34JpPo5PTfT7kDFs9Y_qg7x787qK1n-n9kO6ciVi8QB2ZeMJALY6UHdMFFrvyc4Nuab4urNQ_MyP9PzPzOfLMklcDbc73XAUiY0-3XxQ9wa8QYn-0-pRcNlcBMStx_AbAb2WUabHDBjzxfdAGzBoouPXStj-h2yGNHWr8pxf_ylz83Np0LBjA8wc-iw3YT-2xoLBkeyFM6zNyVEKQALTtgiKaWBVUD1sVYBNaABvD7r8dRxE9aeGiC0c2aTYdkeKeMEAf4avythTuH_uzch2OO6CeqClGcpshbw7MbzobI3BvmeE04fmUkxEnWA769BLkUyZWa7hwYZ5vOVUBgl9APLq42Y0JbxJjLly86F10jZdeXmRNR-hGpyNINiTJLg3P84irzht73FUGUEaWOkiVgrCAviOOpctb5bIc","tags":[],"target":"","quantity":"0","data":"b3JkZXIgaXMgbm90aGluZyBtb3JlIHRoYW4gYSBjaGFyYWN0ZXJpc3RpYyBvZiB0aGUgaHVtYW4gaW5hYmlsaXR5IHRvIGFjY2VwdCBjaGFvcy4","reward":"0","signature":"nW_wLwnN1u4BMz7-3uQx2kK4D3WRBtYC8jO8j_gT-y_F95HGkl95tCdQO-HSO6q0ij_p5_a73HhEN4m49f01NO6CaSJ6dyTiK9gjqGSZbYq08FrW-Yz22K1_u2_IcYoeE-dn1dn7DFhjeyChC2v2ToVgNHT7mzqVpKGeVYUbk3XDFYQ84zhJZPoR5vbaCoXyN3TnHqu0qiZbkP9CAlS4jy9oy67j02BkzD6VM_3y5wm5Knhuehp-CXeTj8-BQzxk1jUjATCTjxLKYz3vtjCBCQXAWpy8JwQhm12nnW9KT1C_r3pQvr84odIge6v7mJXAvwLZuycxsSqQhWi5kBdnCneC_fJ03wilDKo8qFjTG9bVEearEdqQDvKxikPE8wz4yQyAisk4gMned7wDPTHMep2JlqFmLrLBIB6LnFsURBJXtPPu47N8U61vL60JcAoHvzdmzNisnI1iLthrc9QHKOH3XQj7ShqicY0wvD6P0PpvljHdVMuoiERkFRJGK27RjpqYJEc4rpS5lVsuOZjdOmeUpbLPkTl5MiuKfTKg-lFsh7bJwAZqEcUfkIu8FxlngASubVxIAqhdDRaWA5pwYRQz98g7PJcDkzNYISyofdYtH2ALlnc9FNfJmDrql2puGmlMCxQkCw5h6urFInuDSbi3P1flgs3GWMHPQqBuQpE"} ================================================ FILE: genesis_data/genesis_txs/21Kfm2Apa8QWeqdMqyQAcxg9HbiluZXfQFu4-6xe-AY.json ================================================ {"id":"21Kfm2Apa8QWeqdMqyQAcxg9HbiluZXfQFu4-6xe-AY","last_tx":"","owner":"16WVWsdscV-7Fv45vnbRYVLncTg2ClaarC31n5sAS8bWSal8XXJWFvZy0gmj_We7n7jp81pnHzaC9m2eia-0NPFCwv8rMJ7YZbhTKGrFJeb0fy0B3MBWuWCl-oWBgMpH8Dz4OtLGebagaz7GF66-KZnsdaERsRJ76TqCTfzX3l2H52JOXaZm4k1AzMbkzyWEOIECUhKYZK8ZxVDHAuKpItBJKZ8tk5ewDXYMEzsYNAV-9xyuDopY5l7RU2T-xVXg969oy2hRpCxkVQ3EsZhmMEIbHDBf6__jNW08xb3ooSiEIUXCIZ23B02DXT1akIkVEkLcvaphzuTQXCHzumr9UVvMc76CMVJlH0VnswhwTJC7s-YHpetQGxwGwGSEKK3zCrXlVYvE14xLX7k7zmw7vu3BZEtO3kNhW9krgdWHvNta8kJ_MTIaGlSzIrCfJ1xysKxUN05NZilJxIdW8m1PGM356EZc0aCBE56vTJ0hANtQ23Tmz93Gh5QA_Rf9AIQim6n9gTBszeiXjjqELScB4kWRxsneChHOH5ReD0RFWO4xJBKIEqGPYpixh0HcBtz7lIiudoSAnZxtT6NWKAnkHHCpd8CPm7aD311BUZMCmYovQqa3bu7gS2hIhH3dZ8Dpd_5yg9Zlmm8yeTT8crbzFDSE0qgZqT7giIqhiUPk1_8","tags":[],"target":"","quantity":"0","data":"dG8gY2VsZWJyYXRlIGJlaGF2aW5nIGxpa2UgYSBodW1hbi0taG93ZXZlciBpbXBlcmZlY3RseS0tYW5kIGZ1bGx5IGVtYnJhY2UgdGhlIHB1cnN1aXQgdGhhdCB5b3UndmUgZW1iYXJrZWQgb24uIC0gTWFyY3VzIEF1cmVsaXVz","reward":"0","signature":"C_5oVIQ9d8KJq0D_nGY0qBlzzg250E-jIH_C8YPrE0GksnUeL5-AcwVWXaM59cCmmJNIAAzs-iLm6MzsK-1ACkOFrs-hMZrmgRC2yY3ZJrFwUmu9sg78h5XcabH-byUrrg-tvUtUIR8UQ5bkLukY2-lGJh9qhjPbVsjpBobHZx3hekCI3_FNDI-PeUAZ5Uf-WJQ6-uDKxXx0Y07SroFVlqJ2Z95wqPJzJSC1l-rAZKoWh5qKGbPorfjsTVIkpsP9RqNlKI0MNQWu8U9q6JlG5jfIhLkpabJfct3YIYAu7ltSkS8rsgpyAh-WyPQh7RDRn1aDibxtRVyrYGL9v4aI6vaeu_ZCtYO_kyBMPYCWPYDkpVyMVBtoXKI_vOqMwvnq-Cs6h_SIHjtVtyaZdm6TVQWE4g8okVmuzJGc-wxPVEZfRQDwnvXzaxKAOPb1CH-_tLzzTrKzBPZYc4YGHZVNCbCqo34rcyXTt6783uC9r3yOsowg5eRiSh-QBpesCCFf0-F_ytfVdH84Ps8ci6PtI4_mMmQlD9GJ6OG25kfGVSZyaiUEDQ0px8M1VQqPUKIae9_RuqgEamGTUSG69JDgD_V68hLBkKUaTinlrrAM_s6nSGK0oN50fNfTIwN1wf0hU0uGcNsABsdP2Z3CGzcWw42o2z1x23yTHVnwZstnR3U"} ================================================ FILE: genesis_data/genesis_txs/24VRr4yT-_fOndcFYtK2oSO-p9Pm6lNtzQv8E-U43Bc.json ================================================ {"id":"24VRr4yT-_fOndcFYtK2oSO-p9Pm6lNtzQv8E-U43Bc","last_tx":"","owner":"z5VWuXUz470LnUXHESayM0oXjMnuiCfY5qfXqnVGK-pc8ckzW-kkhTBTDlr92mGSd6AkbiSQ2zz1Pw5moOVTFH3ev5qDfiFkCE-kWV0Ph2DJmr2XkKxP5OrWYucOwL_Qb14YPuMSOg04Pc1SHl9T0J8ruBFEGApfC21tDfTN5vDq8vq-T-gky8GkAWh9r5w_-RWBw6T6BsORzYSWMSKwd-u9KktLp6Qm-lUU_xdvWqLayZJFmwEkSiPj4Obh5rsaqyBC2DEMt_ha4-LgLzq_DWzlxdEF9QjRzQoWeWzw9HFXW2lJWDYOGnS5hnYj8eCPWQNphJ-I19DIHEF4gfu5sTofiEBpAvomH_BWsrlSihhfHiX0Z9zqm-NeMn3IF-kY5k-RvmN9Rb3adF6hwwqtQrPeI8qC3eyNGIhsgRrvtoi3vp9OwHhcWefo4P3TcWJx7G9mTxcAaQp03I1ZNtGXA3phMauFnLw6o2ll_i5TpllQMEzS-ZoKlGIUxN045TM7YjXIV6l6AoUJnvMpMqmyu7kCUBxWxmXAPdsh_NZLwmEJ2vnh-UVz7FoeY70fqNCTjf2M6fPwIdq9Inlvqfd2y4Myicqx8KqQtIWRlXi11WodmYHsqx6GWuLudJGDcnVYOfFXwsnCAa2Pi0G-G2B4v76j6IHHp14e41p8_2t3Oe0","tags":[],"target":"","quantity":"0","data":"R29vZCBsdWNrIQ","reward":"0","signature":"LMJthLPFOTy0-WSjD-w6fXq5c8Eosl75svD4qdWs6QZO0vg76GevKz76R7_o4u_8KykJCHXVQTaSKBohADT7yx4-iPcqqqQfaSbb5hosfh88a9w8AgCI8YgVT74hG2KIvlAKgZX_7eM1mnuxmGHmHuEmD0pwGZnFqLmsXu_-iNXflIelMy8rFwBurlQEAIvwMPQqpwCDZ3syX5hovp0_rOBvGgl8hIZ6Xi5p7XigxNqyXf0t88bpVnRJiOZhFRfNYug5LLfZBX8n6j7kLM11odnEY7Tci9H4FByMNLzUGAfx7SfMdqi8dDlVnK8SsoA5LKpttwyRCObnBwXMXo0_ZIC6sW5JzPgDMSKy35WS822ObSCmz7HCNwlMCPYSXdnw6kFihUcSr2jXxkXYnkjWiivNIyhdhJKubJ1hWyy24QcCRKVe3XSpU3kFCaxDw19SgXvKr83fZNALqCbEIuF4Bhea3QBD2aOW8ji1iqRv9jRcPnoTg8AKvhT3kmLhN15h_8ljpiKtDJcLdiXLodfJqk2qbspeAAQWCKF7ptFHXevn34aeVzP-n0Ltgx9mjjUDs9ZFQdtaiEWBc7l3Neg1ylr9VX4SYzk4cK5TX821STOx2WT2R1CUIpxi1kKI64z3CIZpWaryH75rMGlDd3u6XZc10asawfy0geIQNte75B8"} ================================================ FILE: genesis_data/genesis_txs/2vn7V0FR0JMXrVbj3Ofvc_2nvrFYCCpRoFjc7UYpJcA.json ================================================ {"id":"2vn7V0FR0JMXrVbj3Ofvc_2nvrFYCCpRoFjc7UYpJcA","last_tx":"","owner":"ugH5BgNNgeguR0kTD6mtiwo2RdVG93r5TRq_iCZf78cCsfWDApiQldW-891CZ8Zj45zDJoKCd3Dy5Oh_M_UaQShjV2ppXGoqepE96rep96jvMPYgb4Mc0ndSg8J1-5T_xXdKA3sYBaQSreS0upJi3Ixil3DRbCQWXgwI9sMfI9Na3fmWOf8oq8R96EToiwDE24sW3IKfFeNQpfv9bFzBenjbbm77Vg-I5Ymu1RFWLqXAUp-TYnyn5m9xq8juk26gPo1-Bw1qWzj32F7Jjt7DueRGDeOE5tG7g4vH5BebY88RkAuj8n1B0hA0NbaJsnoBWUuKs7eCqWsmgphdvujuechU-BbID6IAQqQz0Ef4_iAcERFEywpIeWhPwxUGd7wrivV74jhr83hf-QuNVKAkeJEiWvvvrl7ruHGRUvb3yeMPePu-VdZ-f7Jm3XjkJ92lhwpVSz2KSpp9PDQQOR0cfqvUch2ml5yjW9Z2SYjncAM1iaLZcprqvcZURY2irxOA1wxhSISCEPfDluxuAJ9x8McCXl_ewdBiKwHJA3DcSjplK7S_fPQbjfbzh6XICK51bHO0i_UyganttNoXOwt-n49NYS1njtIUfSgEGn-mbl61EMMIDg5NN8Zj0ElkhU8v31CV39SG0SAyvBYxKOf6onWKSMnxfyHsxz5I_HeGEps","tags":[],"target":"","quantity":"0","data":"c28gbGV0IG1lIGtub3cgaWYgdSB3YW50IHRvIHRhbGsgd2l0aCBtZSBmb3Igc29tZSBuZXcgdGhpbmdz","reward":"0","signature":"nHX4jZT8QHIjRtFxBnLfbFeh-UkhGBp3rL8HDhUaPZGYGDGZACc2x-Y53QgsWg4ovfxnexEgUBdd2icw6jbui9JkcvCOBMlwKWa80JMbX0ixXkWdJc0aiBhleNS0hcuX6aJvEn2rO0qXGFR34y9UYjUfv-jP_2xk34cq2dRXg5VxSkQutm1iepy03r1ddbxukHV9y01z38icg8JoLFB89JcfLpAqB-hGtZU3JMxgWFd4sC8wWJSm-fMXeUF6BqbUPAsH7Sszfnufk3qbnxFoYo_X05TvCE3dw4prFMt6WPtLP2TLLtDdEBJ74M21y36ySaSef2eNIx2SRK4doAMwnLy125AhY9t2X7AzR8ThU5Qv_uIaJkH7Nv_sCcJ_TWjLZKEZVQUfUa429havsqxA8WDSotk90f6W76vjXd94AmAfDC82A8ZRgsg0WlQVM3wOJDjKqSGCbyekc0deHzfQq_flO7_U5ER0sE4PwEMez-rc9bG4fdr7sk0yfVPFMpmbMdE3-EFNX6dsKW5Wm-oqUEmKoACYo9PYOXMBeqprocuuQO85V8VzHBiiHhySyVMSHMOxL8gV8LCDInhxOaw0UXw_Anh-ISuAGkxJEhCTm_lpvChvDX3ST8t_3AVnNEsQSjRBCR6EU1B7cc-bXybty7O4lCnkjbegvb8qe8fyIeI"} ================================================ FILE: genesis_data/genesis_txs/328-6fOVCfCid4QTxHjkAMkQLMHZgDg-hZo5PnVfp2Q.json ================================================ {"id":"328-6fOVCfCid4QTxHjkAMkQLMHZgDg-hZo5PnVfp2Q","last_tx":"","owner":"0tFmYP4c0e5KKqhpXErTOL2hCl3saf4CKWy8_J2rE2qJT5cybq_bxqeadgkmhMf6LZov_DZF4-hAmqi_sCnauuQz9f-oBjvKiZ_X1TMnU_NiluMLPcDj1vFJ3uNGjBlwpJhmNwUhKb9DWrrVkX-v9kDdWyR-opgCJsdQTV_hYXAfNjTX8bo6s0W8w_uhpz4cXk81DtJWg2stx0N752eKZJwqNK78bus65LAzoyaIkHSfQrqd6Mnj2KoEU78kUO87zofg26HLNgY6OIWFsyYCPulC6GG2Hs3f1rIQ9vy3s_2CvZEyeFBtUqhMQ0YVORyN74pok59jM3Qi_CULA9rEXkU-p3DOXESmMBjOLNtgDoMF4IbnStMfgICgX9ZAM8mbVCiFgU4bT0xzXM70Aq452NHpOCpzKhbW-2r2BWnIKlUJaOq_ZOOaCdhFLhsjS1KaQsEyR6TT_DHNuidGah5wVsOKK4gqqhumRaMSj7LYSY_uXJk2vJztoxkEegfemIsLfofTDe2WUg0vQKFoaHPsXlOK28J22Cxj3vTbIuN_hEFLEgmNr0k-6lwEme6Plm-ybsy_3iuq893KnJOtn2aSETdYNTxmhHgK85_aFPdj32yQsuOuqhJzZFKr-7eL17qN6IsyojhIRS8sy1ptMJCvr6Df8JtaoLV47fLw6unUas0","tags":[],"target":"","quantity":"0","data":"R3JvdyBBcmNoYWluIGdyb3c","reward":"0","signature":"udvcpC1kzqCaxT1afIPdWUMshjp4iL084qwdy6M_0PZpxiTUNlNidZYxhF6Gm29aUsRQZZmSaO7RhlCPunA74gPwOHnfq92QDz2KD7Ys4YDJDh6RGvRiUsK9JeNt8NpOevMOIWj_VZoF11cuSr3irdRZDqAG6wUkaAM21jpLs-_Q_9cNAwpEmf__BAKzG8w6jRmXLCeyOVcVBhHU89-_DXQMiF-Cu6d1Wws5MZ7sNAFSRMdFt1xWhhL6-3cpxp-bx6zOU_Qfo94cA6NDx0bL-6CUTYioLnWEsazFwHmJeeC40sZQ__5Hy6wBrVL-Q-n_7FhcKJ4nlM2HQh4pjp3VHMVzGvKKKdq96wzFUms3pI4ZBECbiPWAl8S0bDbySJD93OsLIderIdf27JRKHIM99Y4B-svz4SRB2h1cioyBOmIb-hKi_QwOGgCFnfgWF-iO4HRJJI7OUhNi6N6qoiRh2nLVcVoIw9dLm_IIelZP6Wsn8GtKROz-ZFDLxBXWudKPPGj5qB7xKIye9a6KN_KbFGJGe0oNoaDqZXmX6e6Z24StPiV7pErOfleZTHB-HjCqMySnxXHX0wrENYx6k2yHO9WvlP-cPY7sFI28U_t7okzkCTMWKclA_R3oGiVjqvgvl9CahUVKnqyqb6ty5UUdtocASW1050Tn8VR-NY7kupA"} ================================================ FILE: genesis_data/genesis_txs/3BSgxVi4vtVtgMBtDE8xPMqU0PmkiKtKX6P_Iw0kMsM.json ================================================ {"id":"3BSgxVi4vtVtgMBtDE8xPMqU0PmkiKtKX6P_Iw0kMsM","last_tx":"","owner":"43g_C5qexn_3VnWkBzQSgesSGXegEedB50sXfPSaZiD7GWribE641H-DnxTFGAWaKMKnDEYvbrApDYvwXYBMJ1F0iLjlh_iJ_whhWRjOmXPoGFs8LReDFLwCJnumCYNJVkhvIVZOHQ--Q_iek6Ehu-R0Fxpmu6pKv0iy3oI1T0vVkmd4tH7c1Gn7c1bwBZOcJBpBaKTSrICrc_ocdyQrcgHXcs7JSo5VUZlKNEgw0K1OaiXbywOFn1eyueVTTjMnBfd9mNT82t2Reeely6n44gIBLFa2vZ5dbhFyaOE-x99Sqvz854kpkxQHohPtOyDbA1uMs6qIljJAMY39pfUQsh-hLB0AuPFIcILBFKHaQuRFw5CZGs2LDfrij8EI9fbCsb92OLD7I4IuRsAhRvN7i1kvresUo3PsLzeDMNCDnPSwzhHeEVHBRBk91bvq1mxNPKl-fx0yDmAWJdom7dTfLoVh9IKTAc_6g6TGMWc9tl0qRKazvyjEqyGoBcRihDAFyEk1dOoqO5P9OPh0VMv8_nBqChL877aUX71s9GiFFxXNjm0Tfc10u4RltkoBef52Ix_cfTdwrBqZtSM76MbzWMuOJOK5ocEC2zCS9UvsUKFWz8Y1PObS0dd0LXXCDVvP61OrLqJfu-msMWTaj9uA3PydmjQ2vi7_B6AVxUAeE3M","tags":[],"target":"","quantity":"0","data":"SSByZWFsbHkgYmVsaXZlIHRoYXQgSW50ZXJuZXQgaXMgaW4gYSBkaXNydXB0aXZlIG1vbWVudC4gc2VwLzIwMTc","reward":"0","signature":"VpPIjjpQVjQv2RfEv6Kx-B8-aJWibaYOU_JfiHGOG7jqTTIO1NsCISMP9XTf0he2E2fiC1LlXuIPQ7_1Igz2YI7UQ05dDQqMCUvHL-wHcUSTQP6Rfwp-X5Au7I58IDKRxM2W1YGVCaryq_Xb5mIX3vUbpLQ5UEouIQh2LKIrP59bsvKsFRQ-Ltn6Qe3kgzf4nzRz-NFfFXrrHdUgdxuvVhu1YZPIioD1CJfiJbOuP6gF0viVuSeNAQJufWvVsPCIzh7O35-8EL6D2p7laJZs_6ZwT4u-svHzPtWIf9Flx8MgsIyDnpMH65t7rNjP3UzBtsNOYrLkWHT8dpv4SJOBmUVR1uEk4cRLz2TD1rFxNpdfm6Wy2K3f831_9u22CSJLvDNAhEgG88P-Cb8l2aI2VoAahw4m3vQoVaHJ6RN-kwQhT6EphZo920rssao6PwOmyLnwSlOpRKvkqIKRGyJE61fb0aQfRlb3Y3EwtnvVCzpMblknM64UD222voio0BWrprLc8ioVFPAddwjc0lKQbHPm7Onpg9IMs99jPVibMl-Fy-wT9lbBqmja5NG9ufNb1070XMwNDJ--Evr1kNfd_FzWPLwVYjByw0-aHvfDIScPzuqBkp0gGAMGR2RxTu1w3macuE6_RgfMZDfXCeIpuswPV1Joh7U8as5DNZ1hzp4"} ================================================ FILE: genesis_data/genesis_txs/3MMMUrHDmjbCn_-TOZJJHvjLBp8PffZKUNfm_Ziy0Vk.json ================================================ {"id":"3MMMUrHDmjbCn_-TOZJJHvjLBp8PffZKUNfm_Ziy0Vk","last_tx":"","owner":"z4GZEU93WhZlKZo1KgtZ4M-BC3Wz5WyYQnai0x7fhDaRmNDPhpK-YlJXwpiDdeXZM1oWOqln2ZFPt-8PYKPR04dyqvYG3eMcY1-DwGiBi3UfKH-Nmpa9qp47mUV8EchJ5lfNkVhk2STgEFf_jiirEv6HCUhIM5f7fZ5okjzyYsPjNSFljkwFnqUNI0DWt7daeXO_7plY7zqQWDm-r4wrpKTSQG4ssK7IF4TE5bCdRq0XaMRq8vNulhW2pBt21tTIecwuWKdc6sWt1mnQEZ0HYlaxitLdCZ91u36dH40xkAInEKQdxU49AEBRlL_1lDCIycDfct-Tr_MBFNvwSbcWnp8rLelDVUNjYGTqV-FQpexLsePKhIdnmO05tOCA--qEHyK9spaHyQ10EgNlYa3NMStyYNmK4r-VyqNYx2MPEQUdwTAcqOZhrZQaPyBEwbB0nTNhj9hh7H3fVZekQoC8iB9pOMSmDSI6yRne9NwL3bjBk032Ki4dAnRSoPGxFw3q4-_WmgAu-RbCzlXwHo7Q3nmCzHinN1IkxL-JuX7q0I31SPCzPIi11T5w0pa5rNtUOP53o0UdyqfahDJJS3jRRYLuq2U_WuOsUijXQZZwPuSLrjFaDQ9Uv_DtHMMs1_-uktg3H6NShHgYbPMmXoxaIYZaktYydopLWE6-xhoZwR0","tags":[],"target":"","quantity":"0","data":"Rmlyc3QgdHJ5","reward":"0","signature":"WTDAXVuRw1blOgFO76ruXiks0Az4Y3jnTyla7QN-DUCVTi7pUt7vxYaD_PXTHIkTM2nsNkh2B8z0oBlMfFSaC5c3OiUpFQvE5OUUDHvuuhs1t8Ujt7V8M_e_uY_n0cNHmaG3Ku6zidBhdFWDyN0jkMrM7l7_WZo1fWtjK39QKAd5GekJOG4682g5D0vqFOqnCIYVV693SDsRj-D-qTsjiCNHQrQIxU0PzTlIKe94eogZUAt3oO7SWQZ1ouOYu1GbVlqgCUqk2hehVU4e7dfKiNBGEqcA0HUCsl6dpZWjKjzsuMY1NOkkqQ0zl6ZKmfKmOZIKEnWQzIk0NmMi2hOgD-SrZ18GWm-M6O7lFiFZpsgzyrjn7Vr369lK97K4k79ELNLZxx24fUO-s1GzD2xqGqJbNnqM3Nb23Jc9z62gCC9MrKV7QmSM0f59QYVRhXXgclFtUWwvbIqvnQ2cb_1WMRyBkBG148EiWWiAvAmQWF7h4NmKPd7E-dO2ELq80wnWpTribHH1B6g11GDxoCMr4NqcpqDZMWgqYtN1mtdmhWt11dcstnZb0w_Z5vQcAn7CRzLVaXO72wiWbMcO2jVr56p8pT429R-XmQNZdICgTSDKU1hgmZS_MPmStGmRzLZ87EINJLz2hcVBwzg5Matmi5zhm_ty9pD2GfT7KLL2LBw"} ================================================ FILE: genesis_data/genesis_txs/3Q5gJrbqc-PeOvD4QQ4WCNp-f5cYzTyHyg6P9b-WvwM.json ================================================ {"id":"3Q5gJrbqc-PeOvD4QQ4WCNp-f5cYzTyHyg6P9b-WvwM","last_tx":"","owner":"vYbZq65ub_6bK02xLbB_kEgE6cMGTRmfQzIIz7woBMjLJR8wgxN1MR-njwnIwYv7UND1xI2q508r_7stySTOn5SPN-TJWLpPFJT3ygK8_xJPrgOvpK5vectidpzST_x77wz3sXSxw6G0g4BH90zpQj7TaFYFzv6eoZ7eMCRo2aeznmDBq4DyKH75v2WhD55UakySdqsUaREIxKvrjnHjCc6V3M2uxTx69BjXvwU1bhMCq6UwG8z1xfE2IRxJ4mNr1kDS2fOZ263aoO_8tFb8zbU7KJYy8ihCmB876rSOqxNw-xMyWe-MoiSgh1oYHLvmzfgeeEnAa2obDhbZCPkUaZ0dSoCRlCuVKr1XUuSXBfOGJysXt3z0lHZIXPLTMgJ6V88CEwoGy2BNCo7zkLZOlBRGWiGrpsOhwWSFxNfKLmbBY7KEj3v-CGrCY2IKFJjTVF2MiV7v50g_glugKH2umUgQqVqnnZNMYev2C0H59zkyLWf0pUuoMdrhgtkftZh7rXbOBJRm7ulHvJ0jqoRSwJSAuqTLSF12sdNP8fD50pQFcoXTCt8w0qOCpdRYjlK8nIV6txW7HAfYITJSn9Lcu7PGb21SzLiv_f6F4nE3HRHek4r1MBTA4l6ft3bGbrBPrG3YXdoIqDArSc1dvqrOqQKs2ZxY8UG6gqJ6tXk249k","tags":[],"target":"","quantity":"0","data":"R29vZCBsdWNrIGFyY2hhaW4hIEdyZWF0IHByb2plY3QuIEhvcGUgdGhpcyBpcyB0aGUgc3RhcnQgb2YgdGhlIG5ldyBpbnRlcm5ldC4gTG92ZSB0byBteSBjaGlsZHJlbiBPbGl2ZXIgTiBhbmQgRGVpcmRyZSBPIGFuZCBsYWR5IExpc2EgeHg","reward":"0","signature":"OxAFoDteRI3tjVREUanNe-S7cR64GsV5f2-bTz6DWCppPtthZWEMHLad-vQFTRIG1UtSkaP9Fh1jwZ7mM3zhyhwQbHHa1Nxz7boXVVMDQi04ojhsCAg77qgtiV2Uqc-noBkCYtW8_1GCBW53jdmehfXGYSGs5AgtgapILtt6T_EBJrH8kYQugSeca6DUYtv6M5eIIjeiSeZ-5Z96lu3Sm3z3T3Y254ZG9wMwA_hTCB-fpMImFRGjbswiZY0LSkMUOe17Oap9ICA8nVTA2xZ94o0yMSL-d3LbysNdc76suMlKBP0msW8newif_5EiYWIarb1zCKDjWpkC3RHfsA8K3fIyHaafdVMj68mzlwWSGu7CShXIjH2DXcLgwE0yb3Z97OI2NexcP54Gv57lmQ5EmAKnGxkhx1JGVIplQGmST6juz6Ny2YgQAr3sh7wFaDt_hDKU-LipQbCQqBb8AoqhF4Ok1hZ2fPUgq_UglW7ihoUGJXghRzGBA3DA0apbs9Y8unHxzn7IXlcL7nMRZXZfBC2n0Yiy4pm0rqGzM5TrgE38ZYuBES-3KlOWt4zMuybNaT3GO3G-ddvIakvCx-U49Kc4GPx6Po-FEiu9GQkYTNjX7VSnJNyCbB1jdiEsi6JsZVmy89xt66YfsL4Tt8ZTEOdjQTLioQ9o8rsWJYY-NLE"} ================================================ FILE: genesis_data/genesis_txs/3T6mnguMWl8GeiqZWiBZrGXHHtwm12mIWciusoSACkQ.json ================================================ {"id":"3T6mnguMWl8GeiqZWiBZrGXHHtwm12mIWciusoSACkQ","last_tx":"","owner":"xeu6ymhEdXvhQqZAyVvMSsAfL-D5QDosFe7hY4Cx2Rn7hFUe2jlCZxtu2EkhgldpAa0EoLEFNGRSDz-bMA6zxtlEIl4LFvqK1_RhBrjl8EVc9ROBtz7G_BPgOIRn5dPBudUZPOr6JvTqenT4XaERGbzeWyKBH8_JkvhNXAkmmMCeDDWk8wEf-WY9JY2xCZdFiG0Vm5WR0jgcpxvgdVrewFgv8yw6Y4FW32KP_xPyv08Dy7PxDh83bul9QuRud92O7WlbFxCvgrZ9Bv-EeE_a2idlTrTlwl05T0Vkt6MO75yO-3WOMmJ5p3SPYrRbdI85x7eBBF0vCz05OFZl6zWr_Q3noKj7aIJGUsnTiEYUE1Kr1r_IWJbJBD4rZbowk87s4iCJv_I7xStwYQy_suvaZ16QR304q2rr49xVUbvTyOPIbwO-MEVdIYW-MQ93PziXEbTx4186m4Cd-rlFOR3cGJcwTLO-4DzkN1NDzFDX9Zqib362bYdiPo8thQLTH9vQCQ66qYXHH1ZQK0rkDDMfSuh3Ywv7YCBmZUC6s__as0dpxKwbyrT_h4o16SOJpolk_TNiHo-uQmmv9k6ul8fJGBvhkJktOEvKMi0lXO_l93y2ybRt9wMc4DsaqXzWjiqdnQ6dC5SY-t9Vlw09INsj0EdEojyXXcsJKlxOqG5KYfc","tags":[],"target":"","quantity":"0","data":"SSByZWFsbHkgd2FudGVkIHRvIG1lYXN1cmUgU29udSB0aGF0IGRheSEgbWlzc2VkIGNoYW5jZSEgOjAtKSA","reward":"0","signature":"sECpfsvR5UL3jwn5mgFHX3TNb8Vx9gCG8Ml2YnJ3Tjo2Vs9mbapsVmCQEHa-Vr04XOBM9FxP86I5gXNY3-e2PPz3Dg7Gpt1ObdNcSCuPwgdaXOrlVgn-Vr2dqUFefWCcXVSTv4kxjmqJeYLW6MLtEj10yBjqJA83gmUOwEY16n2PLzMqPJ9UHpxDVU2RVElsXQ4m88-hxikz2WLChEis734sumkmBx9WBRicWQCvh0a0LeV53Amqt4ZcJdSZbuN09CwlOF3thbzy72abXVTgLf3btAMaaYFaQ-9AB6_RO7hDMHx_5DIAa9f75TkOPQ8eSQZ9jQXvrGFji_O5LQ5QTApMxXYJ5dZEZ7B0DKGy955Vf2QNWGiVX9QeULKoP64sFjBaQEuT7LenNqaZPwJS6D-VyPLXV2_sCYBXTvfKkYkoHduFIXmY16avGOBFrmQYJya99aF-JNASigQDMDokRR8Ycub4qRIdb7iHJH_skI41rWVwLWiK3pCBqCDVcM_88hPuFdOi4zqE4fyw5cQx5OS1p5amQSQloJlzOf1mbztCP-yzsSrbXR49sjK2RNG8uEr3j2K53nnITUqgEz8oWJNaH89bFuxYQGRj9CkW6KJfXSJZAq9Myfit36xZXC2ScEjB-TNhNSkO_hfNPmwoHXfFFno37YJSshVp9n8xsKs"} ================================================ FILE: genesis_data/genesis_txs/3khTH_o8WZHSCzP-AThkmt7zZL-d_lcqUKC8nz7c8lk.json ================================================ {"id":"3khTH_o8WZHSCzP-AThkmt7zZL-d_lcqUKC8nz7c8lk","last_tx":"","owner":"vihs8tPlYJaDxA_pGnOtq9AoLQn4Iv4EUcWt6obYANkPEeojHCpRoH6IPButCTaYiIcQDIvz_IIqw8jSZFjpvIZkDtAg5mIMXHV0CvZj7D3fndVxBf6XWE8U47WPGEJYi2NTSVChYSSAkZSzoqm356F9Uwv6z0AGDAk7H9AomUYLU4KX60K7VI5GC8U19PGERlW8VHriOd88Mh5Zrc5P3ukm36AzQf1Csptxad-DKLBU9i9HFBR54WzxzMASxzciXKhAnKMFLCjraOTvByfonwbl9lrB5li-ZbVP9Lz_TtNqrqdT06gste9eM4Qfj2CCZQGHqNEv9fbIRFSX9vMMqqvdVmFrMKo4bskA4P0qG4vztaMC9GOtTPmJ9OkHQBTHdf2b4Flj5em1SWeTECR51jMapRi0xzUDvXQG2gTCe7yRwVoUUmIPh_6Y1YyVX-6KhW9nrRhYdqEKi9kyXU6qRz6iyO6aGpX_7vm79KeSIEofIL83HAlL4fHkDOaTnjVq3QzbThLWlPZEusmn4uoWjCx9kk9Y0svJqqoMUsTH5UPM2WXLB0dDkN-sATN3x5fqfqhDUEOQOZhE-daDO_pEvNG7XCP4ABT1gzxEOoms5YlKx3X8g4lgSZ5CfuWSv8UuCsMxNF-yD7WI4kxUhsO4wVhudC0J_nQxju88e2SOoTk","tags":[],"target":"","quantity":"0","data":"TcOmZiAtIGtlYmFiIHRvIHRoZSBwZW9wbGU","reward":"0","signature":"u7oXf_zaTIivUTfUgwPBxYpWDUoNhptt0uWJLLIdzSFhnyic5U1g4-3rQ8_ZPJmGcuIvQ86bOjCSpIlZa_6yIzA-yOC2yjwxPikd_RK1dTwLjx-v9msGpSXQghGg2K2DJYYb57smP74bY1Q4yBetMt-ZG-IshYIdptGYI6p57Z-MHzdd7Ql6CAptCyTFulM9fIsGZET8G8Ey1K_ojW9Drzxy3XT6BR6arvenPh3u6eGateaBLqut4nikTHyKXYMrZ8giIUHdn_5bEcirPPXff51iItdF3BDY1-LUivQrGrGb2J7TDtBEql6gebn-iRm6LJLN5T7pnj83C4dkSHL_82tmQ_jFJBnY-6jn2bAW4f9_ZJ1SqWRg4xVsMgg05XnLxSmm7zJBNmop3m2zHuXVQSI9LMpYs7Tv9JyRgTmjfjzQNjpQFvT8k0IosPiczO_UF9--10_SCcalY8yix926yj9TeDVavPZfR-CgbhZlTiIvUZvpfMLGRBC-pB7LW-bItkrE5xXJXu8RvidffV1KRvHbyd5Gkb9PhBluRBg6S1HCDntTjG5s00Hwu15NuvfzdxPYVVU8hXMVFTxg2IDn2fxby6wnGWpe05BT-lImfA10NwAueJTVj8sARzwYGjiWK_nl6ycGUEejRce9EcoQl8gBhfbhynFtjt7NlEH5ASc"} ================================================ FILE: genesis_data/genesis_txs/3ku6XelnvBsaRjoNxDWb_kT_PRlQ88U0pbWURziCj7s.json ================================================ {"id":"3ku6XelnvBsaRjoNxDWb_kT_PRlQ88U0pbWURziCj7s","last_tx":"","owner":"4jjzirvWyOZEbYvmExBdpl0BMp3nZ8Grh12KODQ9Xq_HHsXbHmRAnsSverwdDVLtQpBOu0XCiPKaFq_VJ_IHskqv6a6rgQqVLpEj_DWND44Xl_4j0eojXJ2IwcJOw25ScumfGKRflec7UFAprX2wOhiXZ6I2uvnT9t3_0GIDtfOghi08vE3_vOW0Y9RUouX4zTnRfbYFm2n_iyFZLqAIAEre5JAx3dTp9zWunlOe2B3RCge5xrE1MFilC9H__qfzEiNH3m-wjymC7IgoVmbqBQRCoXps7YzqAJxdDkyQDc6NVlfmoc2oXwazzkz0h_F4LmXWO1FlNJK80iRs1P-0tj5OPYnHUf-48FXB91ha6BboAxzdZIxyzg4YDjyUVgLhQtYtWfo0_a-shdHXi3pdCTEB8B5vfr5uBOmonJG8ZY9qPw7tSMDiK72QZv7iIV5a-jGiVlcIW7o9ospVKoNFCt2lSfGyF6jmUvkYvYbln4wCiY6iUBZiTLaZIOzl9DIA3SZbH3VZgNA9B8Ig8EIT69rapWg2DGUM1_An5JKM3knBqyJcQb8TNJGvljGIhId8AgRB4UglVZ9YAfC3X3fGUyaZ_T_Cba4EvP_HsjWLsMeUeurKoGkWhIXSQNOZZlU8_V8p2yfJhgbwuCTviS5donmQeYQNcYY-JBXUy7swA6M","tags":[],"target":"","quantity":"0","data":"d2Vhc2Vs","reward":"0","signature":"awYyfg0iMfwwJ372okD_Psl3r8S5RMX0zkwInSW1RxIcwmdgqIKOjnAleRi6BDRitkgyub6r04xAw4SNbtvZRC8p1y01ghwAXVifo9TH9oY5Q0lOSRLiFPfgg5iFCzZakdreqC5SYeaCOIKJ5TgQkXAijFcG_vHJ579alIsKNObu3XsC-vkZFmtQsYmxz3jBlGDdZykzUzRO5BlwbpTfdej3YRpoKnmtoRjNEMKIjWFn6Hml5rws3dvc0NsVnpyptxwuzQDcfvzU_VpkaqYD4EBd0jgLqnycTb_y-Ks2HziiwR2Lb-yXWLoH1S1ssnWdpIz6fdY1L3OJM6FeTVK1pKmUrk39sax_5d8DTzRXeYnaraKgJFpLcys_IPgghk8lK3DvfNcvOBNA8O1gu-2NCa6bq7vVdjdhwuI0AzU8YZUyl0p2z2-Cs0oKoYli36PwOog61SjTm1gsJkklAHyHg50dunLrnEBl5R4_iYI8vhHzFXGR_0D2Glu6MjVCDXJHrwvOUTZyf9pb8Ms2Tk5h7Vcr21tNeYm4QZsMDU3qjUN4hIcvEXtscujzqHYIlKkBYu4WVGm8NPd1ZXdV-6dWJbL5RwjPpwJodgGVra075m7vkHRMg-Q1h-0v2eobEvZBHNpwS-lfKXNdZ8Znbxl6nI-Owp-4WjMVX7oO_A_dnzU"} ================================================ FILE: genesis_data/genesis_txs/4LwZwAVcaBXhXsP5b4mnE11tUXefuRUTtTibtvoozDQ.json ================================================ {"id":"4LwZwAVcaBXhXsP5b4mnE11tUXefuRUTtTibtvoozDQ","last_tx":"","owner":"vRfC0ispBTSwB8oajO6eH7uOc__XbMBIH_SlDVS__qN1Zon7osDfUA75xngIyrx6f3fMf_NbF4KqNyNyxZEBqqUklcT1Rh8T5aVI397ZWq0p99ar2872GMlGxWjn6Ep1tcLnE2vbEtN4KHJc3Mw9XAxQPzH1RVotBtw96YklU2c-Z96rGK9rQliYCrcpu2X3GpTDXNYnVMLHHHGZcuEexSnRFaNA4ulQjKMKVRFVGGjuLp0hOIuELDEuIyGL4SxaZ8z59Nt2Txzfh7BmgJKkB1DnDwecKP1Gg_ZUfSBIJI-C4yIS1ph6U-7986KV_pRqFtILPQn1l9Y1WiiJVFiMrl45KN-LJ2Ef_V_JhBHgSctFWFdm9w0nWz_SGNs_oS9BjK4sXj5tSaHrKCBVBwjym9K_hsILT18zI7ESSa7dKMDHBt4AvA9tYLlgFM1yF4CfCKLcHXX916uvzRRXdnRO1zhlBIMcXbXxFvc9xNmAhBtUsPuaJxMsd3DyhYjJsHHVwQCUWMfofyldUTteyQZM85O4WkoT5nJpny01T3o11GK_cFUq3ELPFo2xfYFljVVwfj7oLkhG6Hzqy9MucwQHnO0hgHe-TMCssM-HhLX4s3xTy50nLpTqNbUph6OpDZJM5W_jIJyXb-mi3tsvLDMvjuQmb02sc4zBOZ7Cbb1T3vE","tags":[],"target":"","quantity":"0","data":"R2xhZCB0byBiZSBvbiBib2FyZCE","reward":"0","signature":"fko6xfxpTXtvet6Yp_Oi1JeF3ZFdlwT1ruZr8iUTp4rDnPgki-oqxUyrrIK--0tcQjPQOW48APuWzDPIabAjcpr6nTjPdH7xwqgJ2EEBLrZklh6t2KkEp1HmAPuTMB8pVlw2jr8uP7WY_iaDNv760GNtGui2w2SsShvfx-QJ-udoJMncbvUuUWUfItzZ4MwDbAOmJ86lBGp_M7exebsoB4lCYDKAvhsJYJuuHnOmKJ2vV4YoH-nFRdaoq274YK4GC0BPcSBk41tf2PR9FbbYGnW2r_QWrqC4Yiq0Sg4lHK0gu_x8U-mPeLhIjsJC_LCV4zMXcYQvZ_p7VH2ffhax24-XozKOHuBQkQVM35MIoBnEA04oobq-QYGH0s_lPz99yXuvsik4g1iMXJcw67_GVM_Nh7haZku2yS8idX-a8xfxsdh2IMxuBpSddRzwxbUlXGp2GO6dZWmeXQHOYCoFHq0AbsrUWfX0cGbrxKO-xCP1eQ7Tw-R4Bc2T0uIUHRj_O34CN8r3OL1GTeNeQ64Qas3ziifw-Eab_-Okrb6LqI_umacOd0XMZCf7kne8mzl_4OSS5brDvstGpjBmtyUK_y1_RBsHb3jqo-6w0ox2Xon7FEmCkPLTilQB2YrpQZLVhjhKO2jJES6tpAA-QmnT0dEy1ifH_id5PsthntNgQTo"} ================================================ FILE: genesis_data/genesis_txs/4UEhkNbsGdJUjx1lJQgX9KorwSf_RRZG8VMW6jMmf8Y.json ================================================ {"id":"4UEhkNbsGdJUjx1lJQgX9KorwSf_RRZG8VMW6jMmf8Y","last_tx":"","owner":"ydrVhsTG1L8jGKzxREj29zhFVurD4PJHX-m1YzqLM8nzLDDJBAnRE190t0Z5Cn03qNLw-ciQAdw-tmwJf5eVY2Y78iYdhteIRvjpYa7JYxCljeuTAIgz7LDHgn8n5pSnx0IpM362mudtEHX4_4bnNoHzXF8MaU_Fr--ydgnantGPmGagYzaZlOZX9PNU5h_hWpIVgZAF4FkVNyjXViSUThOVVzxIZqJ3FOAA4-4kNjAxvkc2awKBTuS4yWUaoaYxKNfNKw5GzTFGRY4xSzrBDnAPDI7OAGBudsgU8qcaZxhiAnMaOqNstNdw92nG1ExJl9LSEUH6GF3Hknnwqn6JPLw1a_RvTYIHavBEp170y8zbBQAg13FmZ3QUQBBkpoDBO199LxjicRXuUOXEgObDcSiys2v_Ogp7bopOvB2WEqMFKXtJ8paAsaGxsXe5r_HPtMRMxI31AzlXIEv04mmO-UF1uXQSFoKWlky_K3FBZazV3RDEO9pKYzrt4yXggvRIGov05qgEnhd96ezBPFYlPoCxvL80qx-yK9Ld6JAGj4IZKtzcNAaDLE9tVoALWEBLLj0jercjpGdkux5o_kwa3cdI7YysQ1xMCB02V6qnNS2kS9KVfTLnpsLQlcxWEgxXtWfoA7zhHXcDy3LTCIA6US2GFQ-rayg26_0hhSzUJVU","tags":[],"target":"","quantity":"0","data":"U21vb2NoIGJ1ZGRpZXMgS3VtaXJlaSBhbmQgQWJlcnRzc3F1aXJyZWwgd2VyZSBoZXJlLiA","reward":"0","signature":"cCwK5uNiYVBj_MkNO6soloFf6t_EIcba3hqYzkgYzc0lIJafB-U8rYvgJrwgDFySUttAro2-sh9eEAn4NulotFL_0uaRK0v6ShBQcU1xnwROLLq9mJR2hkXCcUtqYedXFAkYXFxuYjdkvHbgKemnDkMlEDT85iqY87mEtCgKglMagzPl41S-8-z2jrw5K6aH4Y8dKXAcEQqXh_DDsF1Czci-RevZ7_ACp0upBkzLqpl0BKcC4xwITYQPDHHxvzCcXhjdY1h8k6nTXCVEQd62ga-oxJRo9Cj7kDSX369hg6RnW87rd9GqPvnA-JTgzBLnlclsoBlQMcJEoQG1PDPACmuHckqkfc7yxa88XS2NZeHitBW0o5iSO5qJa4iZ0JlYnUDbC8COdkTgnuku_Lt2WMlKAbeCEPW3LpG6TmLxAZxmhxtc9y0wILX8LHZVpFx5dXoNXm11o7bcHpK3ponW81jADYPFlNjzvJv7Cpe3J7ikxldhBHj4eD2upj2YZLiDMl7l1lEepA3zrBY2pS8yzgRfVO2CAyn28hJbOI5lBGs9cuiEQtkFAW0TQ1puSQHKeDo8VHDtuA3Jn_4HgJS3uErhLl4ZoVW2DbtEnfZagcfJQrRjaEu2ENcMy9fn9fbrZqRN0bIE36PBA9QGke_ZxPzRc__AHRTAPP37Asfh8j4"} ================================================ FILE: genesis_data/genesis_txs/4bPVo0hCI3E-ry2mBjvOZsBpNwPM108NT0vnJCxCeJw.json ================================================ {"id":"4bPVo0hCI3E-ry2mBjvOZsBpNwPM108NT0vnJCxCeJw","last_tx":"","owner":"oTkEosK0oQ-WpwcnZtNHInQXwPoChbpJb3hyncJgW-hOs2TYqeFGxnMm2tScqLsvH5MlvpL7gKcp03vZpn5964wmnHjMLuPvwRw4HEwswgvszl4HfJNWKpuo3kTv_fm6Yse7H5F1uRptvrkhm86g9TaHRfeDRP6f7IaiB7Suc2Ereo6qPaTRhO2efc3ShlM9qrLZQvnULQzwIyklN7HiNmv-GlhlVEpO1fs0cyt9H98XD-3aJ2xqdIdbeHoucUd8vvMMD1NJse0ejpWlRX3bK9sm8g4g5e-KFTK4C_ayTQlMdVjkScH0dt5R38zzVo_VYlD5XWEJFr6XHe9m06w-ZAvDjEhFyTO_EVx8C15rJBc3h1-8qAZOueb3NYRLOEnFG9ovOD7xy5X_2JZ5aYp7vizNV2zOBlybN6upx6Qb2wHxqxRevB2iNQJHNT4hnqRr7u7RJOLKMV4UoGV0MmbmjFJtmlari8qRYh1i6w7ysUtHqT1mAqqrV88VZoyKP50z05OFKXEIfcEcKAhCmoEtfjJqup_nc6yME_O-49IPHVI6FFXf5SFU5aGwx7zy9Np0GuxM8N2gv76svWudkqtEWDZRB3T3sGjTA8X3dMnKIDKMJBJvdP1attDTjIvpf1Ndeg0cvuNlshbXzykIQgS69X0CsAbHbTicHKxJdb07rd0","tags":[],"target":"","quantity":"0","data":"SGVsbG8","reward":"0","signature":"T3oKjykqcilmCMH_a8d1C2yhwpLq1kTYKTLsqmPl69acTGcEQALHtKVbx0drPlOgZ3H9wlyKk2WjJM2_KUWjJckvVC0vgrJyQcqHXeGkdkyTODNBBnIqpAttmDhkDHuLKFiUUWTVdDE5DgNA9c5pCL6mJDgdACePZlaPUV_SyrRoVa52hNhgRk0KApWu0ILHGXp-sDX-HrF6z1PYywBdO5H811ip2PdILjd_aGHeK4xh18HpOp6c9jeRODs8wydq3YaUelnq7jdIHIdY9JtgBks9PXuGJrZ5M-dyilFxfB9SZ10bfDGioRg0YAtiN-c3bKLTRQCjhUV_7w5fEQ1vwY7NUym7SYP7UUBnN0JlD8Qhy1lzKs4cZ-WhTYY62VO8Qg0tRJOkcJW2Hz92HdVAwWEANoQO8ZtAyZR6a6LfFOfi6CZb3kQEXE7gfbU9m490VaTdyhKbj07AvQbQL1O4cabYNvaShWe1jk2UilnyCMeC6ZUm3ND-0_MrFZc1XSgl1uAkBUBMMtCnRUGIO9ovPGP-TByYJalPGxazbAdOuwltWsUwKaMiUi1zxH7pQ35jP4_XkT5ORPNdl6Bx0SmJg3Zwe8k2d_5RkhW_vGsVRTiVoytsIs1sufkEZScgiEIZ-gAAGc20zuNwb7wLDX7c-fJVa5P1umTVje9izXaNXYY"} ================================================ FILE: genesis_data/genesis_txs/4ewYAvsgaT-6Oy23qPqK29O_AgfvNbhLvol13yN1PdQ.json ================================================ {"id":"4ewYAvsgaT-6Oy23qPqK29O_AgfvNbhLvol13yN1PdQ","last_tx":"","owner":"znlYjxwFlNjt9NfIDIp78hfvmi5UIX4ERBXPWJspJkNPHTNrLRtwLVzFpblc7QV_taXQoYqnBZZ2bW1uuAbSVIpZH-n3HfKIHnXwLsq-4W-EcM62mSvK6FMs9aTIP4PTL56cwVdPktLPycOTQgKPEc5kBZF7c0AWCMpfapBdHh1mcdQ-ESDanrCWR-WyzFIXkVERm2ObHevFqDNhRsQcHoRsdrepZ1hXuwosjDwKGrC1xzQxq3d-iAC9kttBRY_cUgr7lKT0OfDbalx3q358u7mgrfLu0otY9sdoZYEsP04QatRIcBoJ9n6iGcsDgYGM4r_FuoZscbLiCku2c8BgsB6DAz5YBhJdyEXOWFSYHKmIwd8x2vA_J8mETmsmPBSEPrD3Uwej5K_h-KJDNRWqQdNX17qmDzvJtrHfYqMMMgTU4yrE16iH87oUAlhaJBpRVzeob9vnPctOfpy5rQq79XBfJDEmJ7s6QEvo6wnyr0ySNRjh8_eb2EQGXteDX_0htECjcARqFdn6ZbKZA4Q-jhxTxCwmr_36EGhVm6-T_-7uxdvkFhBfkhDNSuAW3MkukLwGJaQGwNmeUZCVXi95P-83ftWZYsAQnDuWq92MP7u83ByktLHkPjbV23vEDgZ9YqXJ_eoe06eEWfb75L8POumRUhyvGKS_pzWPQsIdMQM","tags":[],"target":"","quantity":"0","data":"SSdtIGluLg","reward":"0","signature":"G9NLN2k_8bX-Iwg7uiWNGp52HwIwnHg945W3ctVSLy4HHdQL7xB3OJVJ-Yb1f07xsRP8CrBpUjrkneG1u4N7Ez9Whj3lPtER4UY3u3kpicl6h7Lf949l7xj-uCPjVVh90lLtZkuv0ciGTxGsi1KKWqYFtr8Di_T8ZDg82BGsVPzX05xMyqrl1ued7jR6veeHaLkHMsZ4piNTkmTthIa4rbFNmFaNVLD0FLptDT-emNTP09DKTbm-TIgjSjfNiK7Zg0tt2qJrmbXUVQTdapMfAzNV4d2E1-c_GqJ8bSYXSTz6J_14tXCAw7doGoPn7RPtsKx6VFU0mSNi7gDvLdf01tTMcM8hEZAAaZRmEIYxrUPgKghLpZh1LRLdlDS67bZw9JVXz5Mj3pmVqBBb-GYalYWMy4lCsnGw2ppFwaB4NYQyxsc4Q_AVGiu4TAG_pYP8yPTFOa-sYxA6x32XTNmprZewb32UnQ7CeaiVXce14JxM4QzV6IaJ3spE5YYaHcIfAd2f6eHRa4tJRCE3MkqBDYdpaNluhqI15HtxzVNKYB3DLv5Z1Jl9xFYTSflYUwnMJPFYGqpkQ61Q5a5oCKpWMLs_KAbSMpA8pyzHDj30vFepQiOusz5S980fE-CysNSGDzYKuB2FeHvLD8Ohsa-kr71DR9fBewXXGpXXnu4sL-A"} ================================================ FILE: genesis_data/genesis_txs/4gLPD5njSRtiaJwjcjmNOyI5Vw8sFBQQWOefmy4SPmQ.json ================================================ {"id":"4gLPD5njSRtiaJwjcjmNOyI5Vw8sFBQQWOefmy4SPmQ","last_tx":"","owner":"zyiLqjzV8rfGF2l3MfV9mmqab3MiqsmQTmhz0Jb71bvmIzY0bvsgY-Lii4PRYjiuBHY_IAHzmVnl9f3hK79UCNAV7SNNGawwOVPoeatOg_0OjU-a_RP8esJvcAm99bVgLpYc1g30cR0okBV-7nE284DX6d3MfjQoDVV36nrcXPahyG6tPJoah_qqSAqY48TVJgcymHuI2Re27B6aqaXBHOMIDz3-db2LMYUBZQqckwzIlcAvwPzlv-sqqV-DwFNVtQc_--iY_weHfPn0EJ70ABypbJPwz6pV0Otka_1wtDsGKXey1hTP6d67cxeEjdxv5gSFyvqpdQqEnMB0P8Eae3aW9fyjPvZl2QeYVhrY3HzmX9DEJSaj65MncePxo2yo63Ng0qblJZCZzVbtDRwtBI9DXaLvo1tiinahyVMQ7sRbIUD95J2sGP46wOs5-QdexkpG3pBN6UC378eRVhfLXR6IuKGoP3k93mq65CVcdhjylydHXhkzIP4Kw-4IekE2I_zRpLn3nE_rN3Ia6HFg--W5cJJBB8hNw6odNuH9AAXYxVwiMEAKnki9edeK3nFm_tO0ZvWTHyoQXYpTC9oNxpovloMRXBA8dV7DhLYAnNhwOYhieEfIOGnBvZZkFpj5Mq716QU0tV4Rp_qAK0LElGdiUPOjVOS6hrqjjyIrby0","tags":[],"target":"","quantity":"0","data":"SW5kaWUgYW5kIFNhbSA","reward":"0","signature":"XNCLlUaeApHjQd5v3S5EIlEbRBxuICdjYotnSgJSH4aLReeq4xtdwY6Tmj8nYE28yQ7-WgJS3Nk0wPL6iMzlUjOG5ol1mA8o7Y2ZDTNRpG4pMQRKcOt88PZhFHWmfjzH0StFKpCHSCBYsBw9jaBqserkNXQavMBDW4qH5NwDaz4vTbeeSDiUoxB47av3CPkWz1GOT7_TzmepTh8ViwTX49uWZg4kdikcDOkjM0v0k_PYnIbhhHVLTTBBEebb9g1Svl8KOwcnkjMbaP3hRIjydTg2vhP9DMyRo2ReG82iotyRxDcqyZQzB-HIzwmxVWwfJY4hsHhfqQoQPqSiVVhkLMvE7S6RQmHYRIUJkXYHOmceo6wQzoESIFkI0PegHFzNhWaB_wBxmEDHtEHwgIvekCromttJjuMzAVQKPFP9SarApqEcpO-k8yh4BzXymjcyC6eXRQfiax5u8fC6BnddrQ85wGtepChTDFk0CodBrcwHVrheNpSt65qbZRi-5TulunCqLvGC7EQVi9g-9Z-b8U3wBFhBIunVXI3aWWLxm8nF8SxZoiDQzTeLw9im4vS65owjkI-hUwejdI4e71CGlLIjQsYRYiHeMJ-1PYxB7CWWHCCK1fGqvqyCc1F7iXltduRdumOvj6iqT8j1xk8QEEm4M5lpLjNEtwcnYy9l1cY"} ================================================ FILE: genesis_data/genesis_txs/4pNPqxodBesN6jQl51nH17GA1fWYfHVm8cIEfusnPLY.json ================================================ {"id":"4pNPqxodBesN6jQl51nH17GA1fWYfHVm8cIEfusnPLY","last_tx":"","owner":"20gZ7-uGvyEvTMafLm70C2l5Fvh_kEu1PoHD90BDJBDv1SJ7-p09LCsrTwIcvGKWALxJ1DyWtu56WPpm9XuIMIcSZebj5SZXBMnJKdHrHOLQxmWGoU5pSvMRRtR7zO-pI5N3161f5hg177BNdzptnL0PS6V9EwYXk12_MLlA3yOEHeqDnJStBVl5w_DULLbgjDPh94wJizy--vxVPglVn11NPkSt_afP6oIn1YS_lTVEr2bJFQLC74iT1kRqPwCEG6bsRfuM4TQr77nP5GGs5CxqMeO1tf1n1RkPL60cCxLhf3zNU7IUkxGSMMSMEGDOkHlfReh6kMPJuq6m1Z1rnBZ70Vl6SVbtyjiY2IqAxM9lIWqboL7jnDS2iY7xj7QqXvBtWVan2avXjFllnb33Vjkx8blatSeaWQw0G5V9g2D-JruP_7N9E1sGZvRrHawP-6i6KMzQJvRvKQI57BzgzYjPxj1NjXK64JaeWB9L0V-EyW17qWRL_T0gtrW7FS2I1ueTeFyFC0WLAVB7ofrJ8637WgLNcnNfABrnEz-akmGAxBXS31ZZGbtGWU8j9DEI4BTWTBGKt6o6bcIrlULFk64ia8Y0axIQQ6azXQLKLTiNCR-kn7EBZp-NXdo5BZCCmq0xyZg0K2XthTJQO4jst2cqnwkB5oB_V0VC781bzQU","tags":[],"target":"","quantity":"0","data":"bXkgdHdvIGJlYXV0aWZ1bCBib3lzIFphY2g","reward":"0","signature":"XgO_-3x6SRfd5wJGrUmYKNJGUpLPJkpgU5_nOyPohhmWDqVXkKaOJbA8rfR_u08nSrg-wa3mhPxvRF12dWIf81jMiwiASxpkcCmHQRLPMTLijAnnptK_wPda9x5_cU_qkWe9pPmHFvqVYTLBk0tD1FXKW6N5SlXnuM65cD-0DE51gfl60vm4Bb3uvwLO0JRBbsUGPSmsL1OTJ1cuzksuEcx7lRmx_gFDd2NFjWNbaJOsGVoUyv6wS7gstKLncZ3c-HMa7uYTASX2_L9UQd6xk70uy8PBPAUuMs5nx-dJCIsvC8FHmowsxWR-MketCblBKazstMo8DgnQy7sQ4m-9zThpBhjlDR04XIfzpV0HnN1nu0AUNYRlJpFp7haIKLuDNBz6tCb0acw_5AX7jo4RYnc0Z3ia8uZCqg3ixv5G9kWhdegJgm2ZL17RyMm6vKTIgdjHD5JL1fqlN5GnRgqhremWtN3SBi33aZm6dJm4RNwNN2tz5AbFw8CpVU6xYMbvBqVh9vJi8jK7PLkVzFg4-UZQUscGHOz3aWZrT3BpbccgXQdWdoqtMB5VQsVSTOo5BirReGswOJcN9VKBx1o5Y1VXY2-5fQa7GSiYRLznASgFRFL03DE_MNyqbZ4lP3AkpUQF3SpQrJJE5xvoejshTSAGxOOwZYxlWOZ4g9hvPsY"} ================================================ FILE: genesis_data/genesis_txs/576xa7WLVidNoEcYPhAm7OlyYgbrp7Z1RBIfqLbVFzw.json ================================================ {"id":"576xa7WLVidNoEcYPhAm7OlyYgbrp7Z1RBIfqLbVFzw","last_tx":"","owner":"1feL3K7vCky4LzgEPuWIxIIDRS-fBVO6mh6FyL2jTHHIz8mDtcJ5mF1R-MYfJ06FQUvQB3PhHYa2yGPVegBispDsr70k9PIYlJRTFZhpCHCLrMEUVX-Cmbd3NRnc91LVQU1wUyGlSDq91IdkDNrk-ykS2myorH4Je42fBdME-QUI08b1QIxinlDbxJpuwZ5MyNjCsrIISVqyk0pwv-6RBEjrcED8kdxUfIbi_nCNJ31aR0mCsZVmyYWnPAbAZNYp-Vb-r4rjwr_TBXLZT_Cb03hl6KmKO5l8DBn2LPdRx8Po99TYdkC73mBw2nh-6DhbQGEhKAZe0SKE2BTP61Ccv5k3Q7KNvzz1niEiKrcjtrAe7PVrwmqnNr2F3lrZST3wC_gj1INT7H6doErBlGoplUaqUq4my0RTnTPknc4i-fjQVGoDS1bl89yMqv2pnu1PFbGU5BkLzGbKJoFZEXxqYRNlnrpOKi2vBmSeAP6CqeOmNDTe8qChdOqcr2B_jIRGahxV678q8viV83eUuSxyqoonRh9deqbWSZVPoQqKkzaE_znadp_zexg6WuWuAUGEsFx4OSb-IMsuYQienXbiA0B_aSGS9aa073TosMiTwsqweMAYaH4wX2n_geTQOHlF3I5xBDy-vwAt1J0tfSMlXO9yd3_ARZkGYu5g4JwLn1s","tags":[],"target":"","quantity":"0","data":"SSBjYW4ndCB3YWl0IHRvIHNlZSB3aGVyZSB0aGlzIGdvZXMhIEFsbCB0aGUgYmVzdA","reward":"0","signature":"h8Rgqc_LcQMWsihv77pOrNA9VUYE5UG9DgISNqvUn2iUsARcnZIgrwtK1OU0FhH_cp1uZA_g4QKTGW2Qf2PycFrWpgWXJGGirmLJOqaH79X3esuSVMsVpDsTYQ5M3qJoVY7oYzbB7CqxtxgCZUF_WTY4mHQ1BqpJmEKrlNBVCdUiGyjbp3qyTcbCDKSQfRheU_QERTgPAiHsEJYryQhPJK__pcADuis0Tq5jruw3BjpLVvHRUxmZnqszlR8x0hWSTff8ZRHhhIf03SFIwuEhqwuPcUc-0Qh_4ql2KnOjk7ihtflHGEYwbt14C6CSK5cr5lhAuTkRxbtUXr8eIB4FoUhAAQxj30UXV8kc9YiGMim1brOpwuNn0w0KQyh2HqaKDkDnR46eC8BsDw0KM0O9UhjS8cHXrPkumzreiRzDNG4XY4MHSd3aGLw3-MOkRRdSKswBgSfZRu4MoHztE_GqEjQQCLLfAD2JgFvw2E4PxI3qDG7Y-O_-RFciRLaHB1TNCuW6FpCXD2ZxYB8XvXkN9FC6XYyxmsDMLQH8NWodHkB5m3laVMCxT5q_lMkntWdzzayusKI7MMO9KVKTrrXpOk3YZxTRXAlYvWsyVBhkyBTutcYWwBW18ccd1M6bHKWoEnKBiLMkOb8np8ac0gNUg8fo9JqwqN7vWsLG_ZVxYk8"} ================================================ FILE: genesis_data/genesis_txs/5FL2C4l-5cTl9wg4CblgIxzko8hGsB5URVA_yTAd4Nk.json ================================================ {"id":"5FL2C4l-5cTl9wg4CblgIxzko8hGsB5URVA_yTAd4Nk","last_tx":"","owner":"xGgINtNQ5femfxJeFp2E1hvVIL18PrzBEvPfiXb0xgwuCxrlQ7o4p1CbOr7f7Slaza7vyiXlXuwcx-Fv-HxmoLHckg6VsNWgnaZpz2EP64_aANV8dNkxVpAhBINKWnQ2DntwHFx0TPHH6Nro3s2HTlog43JTEn0UhrBymBsjc_6zI0grn9cxJtbDzhHx5O9L4KhyZfSUAu2x35AdTZMWQ6ZTiea4BPk6lBnl2Wfcdj-NtajRwmzOXAo2S-hZaVQzVc1Q5g2yjE0P7B43tJqHrRCTxNy066-ws5BcJFFl9FjDPPAF1ePYrK0oTR61VqviMmQmjN0-sWy8Ditb7bMaW59N-UO6bmr6bYet5MFC9TYLoDKEiceWx3IaT8oAnBgm-Z46_mPA6ClhrtK8U3yUOjBFa6MNQiXXF8HHNKYhgwXwhts5_Gs92zfifhJOXacxway2aQCBB77kekTziDEcmYbz8WdhdCm4VzomTWY8qeUCxFTIBNH62zn2na7PLlrcYH8uVM_pSmiNbfONVhLJ2azbVEvKUI3d02yCFW58KDgXp_N-kJAcHbVhkOmwSFOIlRAuk-TSKyELtQQ8MsS96fbhLnP-xgv6SzKRlOU_EfJSyY9UZxKd1ERBvzjU4VVBrtbs2nmUyovQ1jyFvoskPWm6_x8FKnUVKPf_TAl9vIs","tags":[],"target":"","quantity":"0","data":"TG92ZSBhbmQgUGVhY2UhIEpaIHdhcyBoZXJlLg","reward":"0","signature":"KgJjBhWvl3TI3HgTLFFyMnxjahAnQ5oDBtcRJT7CiYqP6V1tsrWx_sMXhpMGznGMDWKUQep7gWy4AQZwBRa51O_n4L90IRo-kZXqHMs6C2P2s7nt7cUI_JJN617Q0ZiCI4Dn-WdMAWW9AmwIIVxESLyfj_v0fG2AIUCYJsvQXxZ0_L3UEdEGOirQpzPcrJCzmw6Qpu6VFJyDqRYBLb3162wro9UXat3jrVoGXxKgFDUdVRgOAD6oxhBFoygp2jCde7DJYuY7dkT-A-jHwnouMmVDExzUxIQzjNG8XN_M0QQBtCYnN94cGCevh1cl8YESc0ROKyjHhu0y4cL0DUo2SaX6LL-XrH1gzZj2ajzyWzpAgGfDNMNP_zgLoRIYqP7p6C-LUgB5CHSghotWC15JJjmgWlhUYfEQjVZwLHfVqvAxFud4eSf5O4WTDxDT9h0vTl38BYhf2xiIpreJsNv9l4EPn418iHAhZfsYgTlDzewa5hJd70GTABZ9418RRQEyekBZCB_S3Vcmv6q37_EGW2s1YyxeScTX_AofFv986msKzXrn4-AM2nZ6azFaRb8EHVl5r7er3xNj_Czxd96SjMlSKDbs9m3L1J0aH2HEoqtkuolS9dbg0FxsIzkurddMYrZKgIjHiKAr0Qv9vmoy0z_nHhw2tLjD9YmXoCuzBko"} ================================================ FILE: genesis_data/genesis_txs/5Hatfzkj7ivvIsUIDjdOSp-4CdkClH6B7S_SNX0B2-o.json ================================================ {"id":"5Hatfzkj7ivvIsUIDjdOSp-4CdkClH6B7S_SNX0B2-o","last_tx":"","owner":"3i3l8r00_IimugSX-InZbowD-IoAypUHdp5EEjOktiyShbpSszKXpjv5EWtH7G82ODY_mmY4GUXNCGM3hN-21oP90oAX1TK_YnwNl58dJvRPQmP2vmrsJ5h2OJi7TzTMhtsCjdr7kSvHORDqWtyFKTE_tYdYZiGZ6mVphwMHL_RNEVWIhobWp1obGm-FS0x8ToZzGK-yr8DiXRwVFjVk9WiUUjovYFw2Ylb5CrXPnPjuSi8EcCNBWCyPmjyj3KZj1iPCjCR1-jncrNT9fmOxNUwohvbm5ZKA6PAtDbI0PfowNcL7YNmoK9mBtWGO7WpbxObiNcZXt0RtXjEs3kbWJLUqxNI-MZObbkSEIIxb0YbyU7CVeOAZuAVKQUryv7yHUGWhEHDMZJLZJWtCzOdPsxFOkouEWWiA7qjJ0H0nDjrLj1k3vVQVtz2aGUmF6IhBFlywA6omPo6SzD0M6oCr0IsP20GsAzJDM-ed4pf8Dhv3jbeuKZl0UFSD5zjSypQ_kJI4zAWaGb_2WAdZMI1ujFzv2VVPT0AT0HNkhogpjX5uyLfsbCdIT1fYXuvKF5v7ofUZX-mEkKL3f6jl_R22T0m7xBUtOBEF3nqu2v5rlM7pnYXl3YpplL-SL1FEq0kDBcvMQFFwXRhJMEI6zcx1fj4Bi6Iv27uXlFJVqcgvMlU","tags":[],"target":"","quantity":"0","data":"R2ltbWUgdGhlIEFSQ2hhaW4hIEkgdGhpbmsgdGhlIHdvcmxkIG5lZWRzIGl0IGFuZCBJIGhvcGUgaXQgd2lsbCBoYXBwZW4u","reward":"0","signature":"ionJm4R-YV3Z1R1hyBM4wVDcyMssGLazk-azfO4amXgoY-ligzhad_VAcE7vFNC1Q01b2E_LuUuLz7z9iP2iLOy_aBQl26qCoFhVuBwrnyehtXFT4PfiJzIM3ccDWFPdx1R5PmEv3SF2ZZX9cUuY0xr2up4rn-TgSzMgaPHguwc4ofm4WzkAuj2jvKMhNBQFMwY6VJZSTmkOiTz_gGSZai4ocU9iEzwiaqjTOIfP8LRweeO6CMENsMM678wAFlximpg_p1v7LRfZK5zlO0Jerpr-IpXrfDZy0TsSLNPfuZzIJI_RoI39-KDPrA9v4x31lVUPyKO3kGQ0WIaUf5jDsi9UdrccPuFhWXHGhB66FUpFzZB3zOGzy40NHdLFJhtAUzjn3zb3QgYQZGdsC2sTbSoCzqilxDAQcmzMEsCBgNl7gRVBctzXH6WTfbxarSL-NGNtGy2APtuYpzJ2FgqfkRwZUt0ulszm2zPosik-ybCwzCCCu3urHwKLx-cAnacz2kKwWSIvf_WLdVx93lMmduvpfHSkY76lIsND43OK-YHZaMd__ISrM0hjkb14rSwdkHnTVtfELwm8J1Zckp1XtWmB2G1pt24MMjI1BrpFN-BMO3slJPGqQkLFKI5Ab6iq3oSQWMjU2LiDkRA_FRrC-2QJeu8g0dfX42YDcviUxK0"} ================================================ FILE: genesis_data/genesis_txs/5OdjYWAipCjWzpqfNoNhyJ673d4pRMNva8la_SFfu_c.json ================================================ {"id":"5OdjYWAipCjWzpqfNoNhyJ673d4pRMNva8la_SFfu_c","last_tx":"","owner":"yoZeHi3l28TSC00PaPnMyk5l8B89L_1rnPDXN-L_-cfowmIYKch_rxuw5Jh1azBbb-EU2syGtg2tU-E7prH3gcamSVoufWgoyQQI-Updr6BiaVk4mP36rdh0E_kfg2ALJowORyTSy9okM5-KCJ_xXjbEn0AvvecV2WRXtMidTwWPR9mC9duCX0_-fEIyZEWMUD4_IbtGYpIeiWdPMARoLVeoprFM0tGoKqdZHVi3nJqXFjdTGhtX-bH9377cgCsbSgNIUZaAM868ETkHpySADo5KTJ5rSTKry-bPP7B66NKvR_iWna5nUDR6Qnw4U90EQunkaRHk1LcbUFu-K_wxSXcV3UvRzuBYmUOiAXpdIgj4wGwjv1sIpei03FPfzRbtj1UFc5OvF8K2GVZ8PXTlgClu05uXV2hMtu3JaBrPAsjkfAMR90Uhk0kJyBBzkVTvNmSvs1vm-Eyuc8AKQQhqV5ciUzBasi4XYWfBxElVEFu-hDOOoDK8LSYt6VWCkHtvj3BxhQdtiuuDX5gYdUvoyvgLey1MjkauUr8VKShhUAf4j4wG6U_jyLD8z3bIxIf_hLMZ8MEiPeN3HNr60wTarwhVrHQpLP2gSTO6_p2iXyO15MG7D6Tq9-ugZXCGjGCiv2ASkWQyAFyaf8SetWLZ0jee_dn23lWlicfiFqdP0uc","tags":[],"target":"","quantity":"0","data":"QmVjYXVzZSBzb21lIHRoaW5ncyBzaG91bGQgbmV2ZXIgYmUgZm9yZ290dGVuIA","reward":"0","signature":"NRNg-fTM3QVeeRqjfudqUEMY6TZeH2iapbdt4qT7THtsRftU0wdb_KwJFPm-couU3lmuGFosc_66Q7zS96pLC-mvgwwfNSgxrbap_96ug1iLPa8OO5h68zwGifVv43KPr6fsOmPrP5aTAAZg4UIOmf9-zmopoUD3QgNtF6nT5qw2dH5rNUWOPZsNtmFX71rVM_-WBuLItjyQvrP8o6udpkSF_B9e45-xoGCaMu021g_RoiT2ZS4Yve5o355v5jFgnlkHkdesHaet54ab4KWEjMgL6A1ltzEjWemfLSyKwwbvrBZ8CroC1jHz_5oDMa4_2eMwvF44RVM0KnSfR-o8-YY1SUqlw0SSIibGO5MOuDX_WufSBWBAS3V87U0zcffF5Ip70cZZtQROw1RdFQBOzqVXX1m6I2YOTzAYPTkkuUO2q9fiJ1EteeH6FPhZreU4U8NpJAeKwKpEoN7RURrWk-iopwT7Zbupo3jaZV3J57gZ-9lUC_ejL0wLAOul9AXlBfhtroRLBfpoFbv1mdVblFeWp9KMwrHtqR2agnl6lzjLNR_bwLG9BvOlkd7U_bVGWSFYILUgNtmgB5CDj_zHYbBy5QYrJCD09peBwH2-QnRt3rLPJWhg0b6YASeSMuVf7fL_IhmAuO0hfqStBzVGfRriwF1Sm5lYRWbZPSgJ7co"} ================================================ FILE: genesis_data/genesis_txs/5WKzIeQrDGC86IQvl2NhRtgPNKHGRA9oyjRByV1F7p4.json ================================================ {"id":"5WKzIeQrDGC86IQvl2NhRtgPNKHGRA9oyjRByV1F7p4","last_tx":"","owner":"umt_O82prxNim0k3EwAyX0-b-JYBF-bdo0LgYcz3gwlakfO_Kf8XK8Nj8PlQDDOIIj58rRoWtNem2y3v5wjGwAPx7Ge1jFL68d8SMN_ZfYZ2vlyz-vOObJvF8QVq2ozS4CPXsLLXad0dY5_s6K46vEHd2YkEOMo2yvoH5WQQTG2JUPwIMQFNIxCSD9N0yqPCdXiF3tDgkOfZ8VJ-shZyrvuxD-rRR5d_-GeusTbgI-R_Ip-BDwtjRIi_2lVGZEo4-F1JzDt_1ZizodUldftmrp-5Sr_4680tQ90Pa2CLMeu0DgOMme0OzH9H-dIWHwBoB17dhBINSLnL_-7q8U6PEXyvKRHIMAhuV_u96WxdCkT2bBVIbhJhsn6c7RjfWhBrZhAcl17mrshxJFpQo9LE9qVlzLd34qVgzfFyZ4oPVT8nCFWgYT_CG-2icIvKH-t45astMJ5q4LqJxCNVi6ZlsN6P9H2eO1g6JUrtaXhK4Qp6tS6KkA5hePQalRFJyfAIPBSjXgqHF25V3661YF1ueH2zrJle6SmtOUDaHl2hj8PYlb5ds3QLAQJ6ZQSD7fI3IBSTEv_xzdLE5A_Bz3JQbkqbtjZS7El8mnDwsmLq9rL3h7kzu4uEGqxAs0jlYvpZ9Q902JMc7vdbnRbBlaSS8t5fAFAkW4_s1Cb2cp3_FEc","tags":[],"target":"","quantity":"0","data":"TW9yZSBSaWdz","reward":"0","signature":"Is4rdH7ECVf_9Cbwe912z-UC6xIfb7jLxE-8ec0tzU38LPxzuWhH9gBwvye9PSgmjcmqSxE_c5GXs2thjJeJiSZD6RBIfBDUY0y5Ot3IKrrq9wvIkgShqqjqU8YNW1iMUhIfS01NZYfY2px4oMj1ZH6hSG09VIYyrqgR23BHayeK_MHozY5FHBhoKSYJ5N6x1Hx5ke5rzHM1HjH6p35B5IqAvWwi707lMhpBoxSJYHt2gU_Es4zraGFdAZt2cg1_yCQ_TwgLUfwzfoiPPM8sNr-d8ueFtJae047ZodEK4awZCNA464QfCLRt_AkU1ykug9QSkFDpAJIjlSTWxizpKHGGPz3quVu9v9cdUsHcKDCN6YhCXSxDbemhMtM5cA48OXAqTCDVLQp19YeqfB4hOpmsw1ZLgyYh6dwrfvrN0xqjRnabRSfwbA8xBPNPUsNRJwqwpNt1MP7InuOOWF0M54tv6kQG2SK7YdVMmpghyhTff2zIcs83YSE0kQ0gvZp05k33rbOVsfaPzkR7dzYNEnQtUZlO9qi594MhyLXC6ODuxzOnqfMwptt9u6zbCyDQUm1E6NCCBTBvhUVK43ilz4-vyPuU1naxTDln9RlS-aE8ydzhxKIOh2j6i9I1AQyvE4437EOFKE8LLFfLM3QbDWtezGis2YJxI_QNYOh2lL0"} ================================================ FILE: genesis_data/genesis_txs/5dsjbEwH2r-EWCkfOznV4JkCOLSK9vNY-0iqPr4RZUM.json ================================================ {"id":"5dsjbEwH2r-EWCkfOznV4JkCOLSK9vNY-0iqPr4RZUM","last_tx":"","owner":"xAYyyJbHLkGmfza2uGSt58gNFQqANaPpi-WDAviIaY3ExtEQC_RQWBnOWwhG0o4qoUIFNE3sR7x_HX9MOYdtTjN8AAwq0vbIj9Aui8H-ka_EtHZZzYR1nCJylfhPtg5eLbQl1nYenbqDzR9My_hoddC6ZB79Bn-fWutVtalmvGmplpsth_ZXT8mn9E4yTqNrkD4yU3N5QHIv_1nZnktywUuwqpKptP5F1-KE2T2CF-xrEHqKYgA3Km1FO5TN3v5LWiAO5mS0TgH5Z6gZsrwbhL135f6vnZx-3g2xmzfkijI7vka5mVZb9HunKIEE1OVQdkC7jcgEdx5e3UBkf0rUvfbkPxEHptxdgboXR7mNodDS4Vx7zK-oPqDTABHialMoLqezjCcjJu6vKRWPfhiDB0YQ9Lyj5jpxy-o4RkEGF0wy4bcmLpSQw90kbCUTj_xxwrDljMfT-t224aja-baSw2liG5KrfSk0CEv6c8_9EsjY4rD1ONMtF8yN-_HALIAeG04vyQxAa2W-jGGoY4ih_EscAw9NL8Zb0qGNUFblh2ijjkbpMvd0K1V4kWKOxvI1li5m2JPyxNXR4xFfAHmaZJTDazLAf3jpgmv71EetA0XILPt_Z6DUTGRq9sSPgpcu8QmeRqk5P7XJqsnctNd5eGvpQDGm46OpaPjQ-a97Ihs","tags":[],"target":"","quantity":"0","data":"d2lsbCBhbGwgdG9vIHNvb24gaGF2ZSBlbWJlZGVkIGF3YXkgYW5kIG15IGxpZmUgd2lsbCB0aGVuIGhhdmUgcGFzdC4gU28gSSBtdXN0IG1ha2UgdGhlIG1vc3Qgb2YgdGltZSBkcmlmdGluZyBub3Qgd2l0aCB0aGUgdGlkZXM","reward":"0","signature":"qa5Zz1CpWfp2vO4AKehpValDMaGd2Acc6mUZ4Xj_ibK4COUCiUu7bSCv1EW1Bg-hplxuH1zlKvmcn_p0qwG8npcgl-E_cWrkdvrnyuoSSzHicSeLHKn4S-cV3T3ERbEpRr0tSNF1yZrxk2r3DwEgxNu3hDdqtI2ioWWQy6Wwjaw0HKmlzrfz-zw2a63aFCllyVzV_PJVhfwn8_lwfdzHGdtWXb7Fw1-Wgq5OLRpNF59G9c6bmf4jE8EXsG0vPTmsOc23Aw2x2O_gB9ZEL0uhZXJ7eBUM5lJbNmh-s_AUB9PM9zaMF9_cyxJ6xutk4xq-hCE-ejWWh55HP7rzQ07nrl3oZ-LOFonICgn-u2vWCJ2gFnNeJ-U5gqAAykS3gYzBiNGLSH4e1ade52140HGqt2vEljf-FXUrInbJalknRvU8-o9OTphjnG-76r8JZYC9R-CyHTnS3VJ4ickxIXwELAzgnDXq9NlxGyDpETgFyyp6b7nJNTJ6hYsxw_8JOQuFraY5pOuVBHNMneBV2my5sIjY0xtf_a-mb7ZPhzhPXCyjZq7XODIijTvuxA7XUvh3KKugTNA-XvgwKGRu2unIl81MqvbYbQdDt5Zixoh2OcFl9t37RFJBPqI3qEoMZqd7Irs5HlVPMKEa9H9Ar5VHVflN7aSrMpz3OdlMNtYetxw"} ================================================ FILE: genesis_data/genesis_txs/5mt79Uz6p83vdLtYRiByyWLqLI2GZBeSTutDRmzw7tM.json ================================================ {"id":"5mt79Uz6p83vdLtYRiByyWLqLI2GZBeSTutDRmzw7tM","last_tx":"","owner":"tgYzNX04Yk7fsisAq5t5Cxuo9Iq2o0uZwrGZopX9T8HL9mcxb5zzFpeWeuxWKyR7tQHGoNVvj3Ak7SBZfpfot5RB4x0T2DOEGGMASrQDIG35CvwQIiSDc6UKwTfQ7b5ubwuLYEddfyr4INTQ6s1MAunxJL0eTzUmJDn3lAR0TO6qYCJJF4Xagato6TU8TD67qx80T4bx91ubYvpNmMoTOqYZpRqyBPvnI42nNxeVmBQCUNvvkzvUcBPNTnWn72O0mM_CgCZ0RoX0Up0GiPiECJjgOQLNFAyz6rN2d85s7iXpC2g8gIME1WnBgLgwoP8OpFCYw-WSYxKWJWudDVhH43wBoGrtZooX4FDXMKH96aYCHDrPsJv7n_jgZWrkf75sL3eajGw3fvBtx_y4FVwdGGd99XTluiJqGvOhentXc3Rv-F_DLErd7ThKe3-Ofrg-CESH82--E0VFLdI1eOY6Ad45QI96tnwjQzEgvlLgxzpaGphhCoMgnDEVR5D1H64Z-7RSsOXZ_wP-JTxKAuAV77Lq5i4BihlTtQaDFXDYcO5KWX8Tzr8eoTf78g0ZaFfoPv7FflGCpioQwiJNrQLcgPSreVqamhl3nrATJZvabY1XtpYTGgSrqIp2sA6l0D18BdX8aoJ62tFLzPPCMxXJbc9hdhIamzSRmLj2bljUXD0","tags":[],"target":"","quantity":"0","data":"SSBidXkgdGhlc2UgY29pbnMgZm9yIEphY2tpZQ","reward":"0","signature":"U1bZxa6z4R6s69-rN-Z5rSQLm8JnY-ld_jn7rDoBb-2OAHrUpVoTVS4h-UO6qHBpY2lqleYn9z821Vzz_kjw1sF9c-eTw1VzIrdJwoVdhXlI5H9oEfmgnGpmcq9xI-J_j0ChPHzz0ecNO2-jhyt5MoMePuMqBp31EM0h7_RsqAPuePLf-RnKPWSsJMXbT8a_KAigsWagXphrH4bIKUuNp2pjyLX9P_awKtI7kaqwhwF9HI7PfFfga6U4Ans3gni62FHhidnVQMcsJ6W0A_prWhHVrtaM8V6Z-qWLGLUMnaRIWJOeE82Q0GSSWEJZiB_1kb88msSKwKUJ1qkx9aZfQS-CW-oF_SstRFZrMnosUQpS9wj7OLHqtqSHTcnEG0S0RU1ZvaKkLZydwbbVu17Bn-F7qqs_SpEbEa1K1DQ4cj56SXGkCmU7Qtjp0n_7l03s3gFKNrjj97_0q4-Z1G04DiTNgkEr3kIoEbuunxR4Lf-2hzA0LfyHHwOF2avdwelProA1xjSZhfsF9Ef4V8xEwh7xJcMHoB5lPYUzyjzxpZLw-_yBGT2sfwfi-mwSdq56y35cKhpv7E0-roMwZqWY8kuUZGZxCpPqTm-Skno4Naaz3iaYB9ki60Nvd-D9rglP-Vqe1KubghKV-QAv-rAu2EnpXRmOZNBrSV8qMkBSwBc"} ================================================ FILE: genesis_data/genesis_txs/5qRekKepIlFbUhGMq_nNy89bzx_K44e4GmUKYAe9MRU.json ================================================ {"id":"5qRekKepIlFbUhGMq_nNy89bzx_K44e4GmUKYAe9MRU","last_tx":"","owner":"xepWNZSy05zd8lt53_3633oSfQmqAdmF4Dt-0_754Y0qaqVdEzBV_xpgiJAsBMqJw8Iz1Wfpl8bnBYsFEMcuUmXbKD3yGZYdqZ8DDkuOO0BdLwAyoS67epIoH-i325hL80pRWOYepxIEMiTdY75O-Bfc-dnT3Lq6ABkOQ1e4buSn4SJq5GoaNYgaGmnN-5vAUTTIESaAqTwVKpDaDoZOeCdmSShK-bidkSP1sBukI1yVALp_PJAfncbl28iPb6-ZOkLxdnICuug6QU6oCcqMChYMNbF42vA66DUkPXmFbOL6IAqBWM8HenMEUeQaEkBa7ssP9jAdGMYiOGnypYBMvZyioDf2hVMXfZee7eTYNsNYJMsNJZGduxdJULJaZzTy2l8-r14KzJRGnXgNG3ATK5YPoN_yX3XmtbDlAoJiI2yVm5WD6pmfraF0GjFeWlqydJzQjL9HvRPwf25GxxiVzPin-YY5gIDxFJ-Zz6VGUvev6Ms6zSSh0TxIg_by8iJIDkyTwQG7DN3Z7wjIhjeKsHHhRFBRtzHcMWO7Zsv09B3QPXEuFsCQuP4xKkMAoDGanyPHeIiDadE3WEzo1TGyKRwrlqLDjv0OM3yaTnCxFNTU7baCFK8Jn_LuPIX9GbywJvC6TrcWmbUunOmvAcT-i3fqYPTBPG9fSB-JINcTd9c","tags":[],"target":"","quantity":"0","data":"SSBsb3ZlIHlvdSBTYW5haCBNaWFu","reward":"0","signature":"TrUTQ15dW7zb7AroEYWKR2RZ-C-egZuOgKHPKygEA78xvv-YkedgQGiLIhOx13S0VuFteVoYbMIPba1zeCm44MQhE9F7Rj6kCmqI28D7L6oMMeUK8uJnHwVcWk3kZaxK8KcaviTgr0UOfecRLADelpY9MyU-bcXysNjlgwLwhfM6oU4jUdiJvzpQ2OjnwIo8W6qrx1QqHBkwe3yargj0JepVXZb_jXMkx8HgHlcARr7sYcovkt6KI6vRsLN2yIDL7oLGyEyAoJGD62wmv9cxJSdhqp0HGigqhvgDxepgGXeFMENYhvTFdK_BoTqPUJ8GFqMzoo5-NJz9t-cCSZDr3DZpGpa8AwJ0rO-Hgtz4dxugwCQuBj-2b-OTHVNXVvxIXdIFdwmCqqZdw-2ykldZMewa6o8db610u1gxbR2w8aeJG59jp1NOXPGJIYM_RW4exHVxrfnf04Xpjfv363MsIGfLC8aNp0eWijBYz8daGUdXv7IXyhvRwyJ9VUpRHW8CTPoGYAcP7HkyV3FY505mkLF6Y3eZcGdiVWGQNK3V39f3LVw_62fmHZ-A9GeczJbhv4E2ophk-dP2B1p4rxmZPr_UBllTR7qZmqfoUooIKBceEIAwH-lkkR66Ns83tA0AVTFEkNkeDJbQzNjXMfQTIwmVGf0-6xdrBeLI-A9Zyf8"} ================================================ FILE: genesis_data/genesis_txs/5ynd-L6Z1vrR7Vlyr-rkrga_Jw2ibALkIgldNmsVRcQ.json ================================================ {"id":"5ynd-L6Z1vrR7Vlyr-rkrga_Jw2ibALkIgldNmsVRcQ","last_tx":"","owner":"tuTwag8rVClQhfnkNfb1B_HLAYK2p1IbDMvWPnin2_lwRkFz7YsHkMkLaxcGJo9M8N1i1adyadnizF7vW5zq92W7pni552gK4dC3BrpUB6bSzrGuRnSr2y-JDRfBLVh8usZt7kjhfXpeGO39smc82oTAeJfSMm4fVo_7sbSLcEP4oNFKqAEVFXtfCPmYoN6aisN6lcEkcYB2dfZTcTeV6zAv2tdbQax1S9JUD7DXcxZBVwItjd71E-HPpYEtORmuMpTZ87K6DCsPTNdbHCzfR6Qwx3_jfBRTCDs-GAPZlzZtIHHIdRh_cGYpTEjRljjZjFtKz9v12fWBl7T1Z_IbhJNuKWO2aenjWFUIaKjaoKIj38ZGF-YIzDbS6CPr_sZqlgsgQ1-ho6Nv-pKxPRZC7ggViZDLnY1Ch-zxqhHzPLpuGGmbvtcdSdv7yXclCB-_a_VuOlL63p8FwxohUKDPQ3pVymKWYgTwWjICcQBi3EBbXKxud9DL2jM1r9vVyKN-ZhixYNpB2p4mPSXdolMFcmMbRZ86_CtQ9WPEKJWlHOBpIK0qhxrBGF-njraTXTG9v7w228OOiXBIh1Qngbh-dO9asWgV3rjW7efTAA6NV5_Q5O3pdblaLCzrlx2e1QkoqdQRtmvwMxCWj5Y9FjgBHSp6l-h5IYJMJWcDMfBELA8","tags":[],"target":"","quantity":"0","data":"SSB3aWxsIGFsd2F5cyBsb3ZlIHlvdSBTY291dCBhbmQgQXZp","reward":"0","signature":"ei_g2MzLGEsoi8nZ6f75r32PbMteP4i6Ot7MPJ78WwWsDHJUJfAIcOFpvSNnDdqbHIJsXNYL5ftAdWq3lJ5H4Va8MhyWSsFbRMAnmlcoXboCmAikoKVJqv3ymQJ8iHCM8aEAoHv6CG20cV6Bep3TDl98mVTiOESZ5RdsiV2nCbJK2-pNgqKQuiwlmSS7LG3d6-3rJUx2zZ3rqGWOnS4YopV_UmDhFqVeyBOfz_j5dLWcrd5gN6SZdTKr4ZXPheyiqwq_EvsJ7OKcAt-NmxXK8qOZ3qUiIVJhb9gNe-SHg3Pg8poKKJHwLKsRP31RhvKqK2hyVwHqpb8REnNxXAqTAsuqdIhAmUFZsfn8KRJMmwJeRcrYeenFyq-fxjOUTnPgfosY-3ejipGVLljatPa2aBKlXzA0Y4cWqFIIblpt9HK7992ZOUPZ9vg4WTZiz7BMjXk05vY38k72ZPFvvtoxXNytFbniGmlmmZSf80rQqnKy5IS0-rjSfdp-fGkUX9wACVTQZg4nrF-bCrS56e_qXHTP_c3iDmpa1DZmCc0tE0hQAPF9cCsNMnvFifGyQR823k5KWgVZuKGACwIGNNxQCFtBEGM3G6LeqjLZkP7g65eJ9nFLvPvK9JfDJyOdV6ZqrMBUW5V74PwTuMI8ecGhXW22Z_gw2bQfh9hI7xGFQRs"} ================================================ FILE: genesis_data/genesis_txs/6GNIVQ-23jPJTxQkQITbSKE7SYm6J3MF4qbSgH3-AXU.json ================================================ {"id":"6GNIVQ-23jPJTxQkQITbSKE7SYm6J3MF4qbSgH3-AXU","last_tx":"","owner":"3TorrdKH4wNbzPscxBKf26k2D9gyZ6NUJG4ltgNFCtxDPG83Lv3YSaT7l4m1uUfndLDOIkuiPpzykT9r8as-7BLJwhXFf0YXKVO18kxJFE5JRWYtFKcE1anzsEPplTMUgg48nyM9LJV0cnj0TmNZIwjCYGKXaw5R42UMxHyaIS_OxUx2tJpppZIwJlF5jFU2bltDlCDRmZOACk9uWz7AYEp-s8LGS_IOGRaknVyGyINlThfWfx1-q1Ij4M9R2LNTwvZ5EUgVvCCyS92juV5yXHLwzq0o-xpwp9KW5_7jhyYoyEF6TBZucmgrTbJTLSpJV7pTzm5UzJ2UhhXEo-U8CWpH33ZvszN_DY5ZYIiSPZNmoWlIkzQnMGCONfxvvNejYdyBOVzR5C7zWiC6SE51zoh4vudM8IP_zo0_DGFuGLFivt_AUuHb95KYphqEyRE7DK6YF5dBL1LQuW6L7oQvOvKhDsinvYypYj9cz70WiL0YNyzAeovzpvnjJ5KZQUHpfdvnn64g_edxJLdIv1RCATjzGnbGAZ4WzSbpw8gzxNItS8l9VjmwffDdLSYz7DBxa4LAvGjx4S0WIK-hHkHjDrBkE4LKAXbKMlAQMoPvDvgzwXHiMuVDMyHATpoTiNLoAoNeIyvsZCkfoYseyxhQV93R_gkeH8Gvn6coXxElBCs","tags":[],"target":"","quantity":"0","data":"WW91IG1ha2UgbXkgZmxvcHB5IGRyaXZlIGhhcmQu","reward":"0","signature":"0T64l1CqrPSoXQp6EBOZktyV9ZchcVOLMzNmM2UALrRNIEGFoduP0aF_hp3Z2byAwmgKtAb6rUgFstcUu3c7ZOAc7inchtRLkvSpDFbud_Dy1KqVbbMAwzCFiMGqLqilWCwqGZv5HLSvxLHYTerOb3InPwrd4nCBpGkmHB0hFKxK4ZwQGxxEfBRZNL1B8e8nEGA8cr2GnHfUawPszLU5usafV6DAaQgXWz8QG5K9WFgN9QupDH9SIG5uF6uFFvzL3qHl9qAnhpf94e9o8OCOtPjqS332UQc1PifNjqMFgSZqQfyyf1VrDl4dk1e-CI06QOcghSGktWZvu2Y-I7_OU9e-ySA58E2W-998rkASE4TgmmKufo6K3_EuaB-N9Of1DH2HzciLaTo7pi6QY_qAGbofbExa3nTN8YrRTBVZhqf-TZwK0qx4N08jsbduz49R2byI4bXFK9zOlxTlnTrwJQLiDtl8W4mf-TO64SuiMUgHeMCYKJKwwuyDVmfggN5DD9tRmb8vHR_5OewXcgU_-pufqwiucbY2shcz9NswqaWPgUzU5Geuz3GU9OGt_o5b8hi5GND8jdKVBp6HbILX4OJiZhnZfB16gDtKRed9pE1I7u5WiiuHDTcbNc59D_kOyo_Olkwt1mj9ecalT8JZFBWJAUkXUdQxcroyCc44eaI"} ================================================ FILE: genesis_data/genesis_txs/6J1sN2nhGpqe9iJwgdfnxxCK4af88__HoEG8MLeqtyM.json ================================================ {"id":"6J1sN2nhGpqe9iJwgdfnxxCK4af88__HoEG8MLeqtyM","last_tx":"","owner":"6AfMzYXd5Ul06Gm-SfaOFJlqD7qWBdHVT9nVdh3LxNaCjBhcEcd1MNV-kPMm6g69f0QPQHahNgkNxTFbctY94Rh5mPlS_HH8wUvu7gCXEjN1llZFYyxtqBganVdIxxZfzU1gVX-od8bT7Y2EDtG427c4XLH1ya46Xs3rh1k7gAi9dn7QSgITt9_S1cL2cylm88ejTHibZR_tarn7wMN-3VF4VmD7idn38QPgKBOfY8ZmLMw1EG-AcvLzlQDk7eaBU1W0WqkGr-5v79P_j2tDlWwPY6jg9qDRUfIt1DgA7Wm-8IeIAyLv3ikq5VfOX3_RCpbo9s2E9FNXxIYsmDjncK6jMXUfDzZ97F9bUkV4C0sqKuTZUN_5Tz2pyWHLWmCpqjOsi19p1VtdeGp2L9ovS9Vpkh0Tdk4Vz5JyszfEkJbqli1dd5N4qMs517OmbFLai_BToSXvDEBtqv9iPzXeYriJ2W4Ky5h2lhozH9oWdBRfqwIGDh6n-rODHR1Jwsd50wfge8K8Ygl1sMc-VZ5dbbMFrWvAkEb3QPT4CRF2jJTvuTuUpXgcvzTc8QLAJxGqHctsSF8RSyJK1XKaszw6hB3yAw9pMK6cyGttmmFdYmkZLTJeIY-s-9Jt6n6gKVgVDI3d9XuqRNgrdiQsbfJvquUNnQdobTgPC0puPKxuj-s","tags":[],"target":"","quantity":"0","data":"Q2hlZXJzIQ","reward":"0","signature":"rIi7iSK6A4VY9xhpy1lG92W1gwUJfajWSI4xmmQOjlDkGfUF4noozfGptiU4boYVq2cTj3fpzZGLChG2YoFUG71xVt9R1B8PxKeM_zsHRzkVaXtsmPRX5zDrOvvBdXA6Hjp2YUhzF7Xjqtw45uUbI3Xyc2Upu0irXDH4_CCSorpAPpN_bq8zPD0Jj_cJA-0AvduiFNpWgwVN9dEAu8d-bxIR8ZM7PpmswEsVqbyEIs02M0rcCm5BaVZopIxW0M4MstFltji-abaa-XJv8JiROBJ0w5t8YDMj9OtKexDPjB5hZH6z3qwcJ5Xq-mEQtgyyRMfe0EiBUY6Q9i0f31Dc_pWGcrB5ZB66I7ToWGSI-a6zvyBehMfkMwM2xqFqOZOkq8A4y-IanbKK4p4sltlYDlxcEvtUEuRzw6AXGzROUHgWA5B26NZfC8n1de4coNYnwFMxQ_zXi756aUoSn2YXsdA1IP3eOD-YP5Zc3EZK6IVAHE7ysRylEpPrAE9wZykAYs7xxkFXwSLyEtR08bonWZ10kUACK61MZpbqZwAL1uitjMgE0CpZ7MLhz5DoPDBWVwxUix06Mbt6vR6zBLopGDQaCs7yujykva4aJM1lO2AV5IKlsZ1Gwfv8_bB509qnjwpwE6BUrfvn2n3zf8283AOVX96CXcuDQJp4O9kPq5k"} ================================================ FILE: genesis_data/genesis_txs/6NaT-Mz8QAiQS8atFaOu_ezqZnfu_XaQb-Grng-hvHc.json ================================================ {"id":"6NaT-Mz8QAiQS8atFaOu_ezqZnfu_XaQb-Grng-hvHc","last_tx":"","owner":"xYn4Hg3Ghcfv6ztWqfw7uTU9OQrnMUdprDArOYgPuEz3Un0E-Tc1rwkNtT2Sr9sHyhXalO4DyBXlvUwUCcxwnDCg9r0nbPzDeVDE_TRpP0ULq2gh0t1SPPLmrhYgNn0VgtshK8zkzObCqqFjYsS9dS-Fh3gGa4hxkA_xIpIg6Hw5SBgbRHdRZ30MNsiLkghow4ONM6bP7rTBnIM6TxTGVYTF7Dqh5pSjUNmB9BqJFsmVbHtk272KgezvM6XDIV4IGpmnPBIOCQ4htQY5DRMIFkcyMvWrnafUjUBNVVm7cynJPFrnyc2wdVHwyiM2Dpmn8jssNWxnucy3XmOYLPL0FdQYTErYqR8M4r-tGXVlGBAcyXcEopQaw7P3toh5iRgCQrNeAmpqiu_fJlHYtSJID90XCgFs2YG3dWnUddnOqICb5nrnv3EcfA8EmLB-ny8rIGbL_R9L6d3szCUU9jdRucpTsremWFfnpA-jaIsrmvz0o1xgT8s5liBU8dEJPUtXQDYuEVIVVkzfmoePXAEVNg4OMM-_bKKRQ7hW96qW2tTee7y0VHuB-LZ_kCWijJKwepcEnrIkI3fBPCWvJZNvg7SR40-1yXUs_cZSM-Gtft_Pw0dvEbPX28k4uD1gYY7Z6UIXswdNr84eSTKd7bwsFX_7XE6-bw6J8MLWX-_YK_M","tags":[],"target":"","quantity":"0","data":"SGVsbG8gZGVjZW50cmFsaXplZCB3b3JsZCBmcm9tIEV2YW5nZWxvcyBCYXJha29zIQ","reward":"0","signature":"XZgzxYqlw79OcUmbddhSynL_MUb-ved77n0XTWQE7fOIzdJo617auX40ZC9ysV40VGaKuE9yw56zzENgq0T0DtsPLTtxPEDnID7-cl10ZrkbiXCpYIdY6Ji2oXMhElPGUC731JXIkfgojKq-XqZM1NkhB_lsKlxTjRywVHeyWVEqNlACHZ3lSotFVtlb_XKnhcWcW9A0YKcM_7t-RFnjhW8z9nksXevolRbo7e0A2SGIWRWeHFjnySH9KFLCm8JPPQkEqUIpMiHE6uqvysc0WCDv2nkYrq9ft2P0_LCY4FmcqiCb5Ptrd7Sm1MFPM5VHr2gn7gs6aZ-DygMLXzG7ALKZFnvremPCf2YCGTIw1DZ3bTfPM4QwAaKXRoVpiAQ-bJaCPhojWK_zrYm4_rskA726kqcesgWSKDZYLQ6EBTUMNrlZ-AbUem6SHKtXAxErW4gsnwpESqzE6psGbCFt7qWX0IvIkQ4oI087M4RpwYWc7x31KAAodLf6S07Zic_xu-U1ei4x42qzFncT4cDm5y38gxvSylHolCPkHM1S4_B64QLrzJhGe8XUGqIxuSmWjGByff55zS97qg4qa2V_A66qo2rEmPJkJhCzvKuESqalp296m7UIEExfNC5msVZy3IenAuRv9gSarg4SiMGnd1CaQOth_Nmmszsj_Pm0Zbo"} ================================================ FILE: genesis_data/genesis_txs/6YbxtptbO-sidrnYdgn0G_CiNBh-az5ZzWrSCP9DYKA.json ================================================ {"id":"6YbxtptbO-sidrnYdgn0G_CiNBh-az5ZzWrSCP9DYKA","last_tx":"","owner":"3D0CoWo2whTSsWBK2jKXIZUxyRcqMMdL4d4WyqHVcsR2s1NDrtGyqK_H5frG6ehGaL5Q48O_Lrqnvb1RFzE-PrEG7Mh7-t12fBGB0KTrLzB-Kam5G7iQ1XOrXzz_hHuXbtpzb_XbM43zkDY7YOGVVcMXyk7RCu4u-oK0GdudkUzrCV_LGArZXEc0iY8HWZ9cEGrrc38PAU8I_meOF6DtIq2dz1JtlNsiO5HB6PVhsPDadSzcWInHfkxhLGPXJ9oz084_4gfrK7X5M3xVr26uw6STGvu9w2XLi4rctYfJbX9fZLzJJ8fGeHKjWwRE0erRTTZxNG9tEPXMXSaSrTApqz94AwPHNx41U9erqSrDnjY7kC2VFiFVJeSqeKvvQui9-2IirHfLCWDURqUy7M_k0m9OF_EOcE3UGQBXQgnnCkzJQ-Ux5eM9-vrno7jopcxh5F460WDvRvINkyltBxA1I40Hy7nyuMPBXlJpyBN0EswIJ0M5NT5cesLZo0caxANSFG-ov5WMS47U8GFRemaM87UHH7lrtwf9llM3qwfIkGdaj1kqiBPq6ebxnGQmvvmVKDaKNe0HkGuq_PYKVGsZMwDNqNEidmzScGWiI903aemDb8twAOpozWnX3tF47mEYm2OUQ-4d5U88I93ccNQFt9rcFrwd2yGOT9PF8clEGv8","tags":[],"target":"","quantity":"0","data":"VGhlIDEgJiBPbmx5IERvZ3N0YXJ0YXlsb3I","reward":"0","signature":"agVcHwLyejUWIveBF8AQe2kaFy-1jMuVn1jnZxoTPmNxqIga9bXxljJMGapf-KIRvAF2nm-tkh7sDtfkC-Cv8Ra03wqnkInzu1H3iCM7DlykrEwH8VpIgG90BV6XZ0rFMeJapLSMslLAuFS2bfppPN_D0XF9hRsbNtAyhpFF5lM3vaJRLscPDMA99bAFOwZkdnxiZIWVDVsm27qWi6DyDSMuwMfRiB2F7u7nIGE55L4egUi6_jCQENhtdTEoXipDEM2_pdnpoiFO8Fg1FyDa61Zsff_FUnHtaYZfwsAFZKNDg3-NPF2qQjrYpTrMIIgmpZjW87CTaegRbe1AUrLKNhi_4zZqBRoDAqm08mBQfdl4h0M6IX-yCFrthwErkV8Go9LmtfioCf6e2jGYZXDPwzl8HYAEECOKTyMR2iPF2I_kil_X5EiZl3DXqlKD-uieurNRHwWtK7yAMzp2beeK8odkVzQ8wm3m7SUN3NyQTjJ3ZbIduI_OyvKtYTwxbftsOrpttTQ6hda4rkvgY3s_snc0GAF2-OCWA2Qj59Zgaqm1RCo1TGrmsFAbaN4mKJdSrZr84C9WPHtx72i76ekRF4TUtm6_tOU74AZtsf2LxQT2OwHMY73O7uvXwS7wkvACy9nZsbXoxd1-53neuYw_v0jCMonjBSkoKE8F53eQJkU"} ================================================ FILE: genesis_data/genesis_txs/71M1E7A4e0PFW_6C0gly77iCg7ykX17647i00eEiA-s.json ================================================ {"id":"71M1E7A4e0PFW_6C0gly77iCg7ykX17647i00eEiA-s","last_tx":"","owner":"qGrm2wCIkKdk7x0ncVlwHfQVHqjV1Kb8GF2RyOwJIUvwShOuexk8mB29VDk1xQaCw718nX4iul1o_iN5_lZ9C96NlWIP6ZHveiaq_PB3tjLiV9IkE9nC9qZghw13PxjyPprr2Hd9Q7VDSpbPMCTIjjPVKIfaMSNiRsP4T1Q8uViBXsectothQLBcBuBT-ZNV7Ljw3jT8kkKyTdfqFn7M_KOL-aI_sT8glzX-6AmD5IYEBsTSsY-pkLy_9NxSiJvlC5dMGdXeYn4gLFdPszQ4IDB6Fo_6FOEULZXJWxrPMq38Kt2uL3mj0pjV3tnljDJ74sA2fC30hUX-YmuW43gFi5hWQdnIbqw5zakZD6LDSp2bEjOvyuDe1DCTP4FCEsV6Q7UwDHxqchAVXbdOfAJT02xCkT4e-updk3GF8ObcghQVvA2LH4hMo8x8aByiddBGJUccOZlpAlxfeMHY0wqRzjomAF1shzRyyyr0CQBdsXrf60iGBTdy5jHDXWN_6FA278HwH_QEFXGllgiBpyr1zmlGYpU3PqIwLy4H0W_F895AyRO4RPy_flIO7wljaujA6KgJ7c_6PZHfoN-L8mHq98k9Ppggwqzfal5X_pEGwxMEODgW0nPcssRNvdG6M7TXEFbSp1RWV59YZOcjR1AY8iVASNewJ-vSBXBy5rosfNc","tags":[],"target":"","quantity":"0","data":"SSBob3BlIHNlbnBhaSB3aWxsIG5vdGljZSBtZSBhZnRlciBpIGJ1eSB0aGlzLg","reward":"0","signature":"jrd3Yq-sFOHhbVGDkO4T2QdMmEgr3zb7rC8xavFFn8tWCOr8Ier5F_sE-BBNswfbfBFuLolezV9bf0mU7tbuidenJxNJJQlm9w_R2lUtK5hlOGZYysFKeNAAxUfHZB3YOHxK4G12itZ1QwBf-meCHfHkFLAW9mRcqfslrh4l9puWu65TIVd4vPIPD-vdZGJPqi3AjxZhRr1W-sCbpECJMgaDMqblAsox5pbq7y7J9-x1JeOLqj96i_lxsJDa-WHzNpZXwSkhCKfh4SJKUUGx8bz5tZjLuVjBRneCESLm3mU8UYZzImN5V-CtHUpbpAUq3v6km-T23cx4SK6vUa2Xk37HeFPfVzQuXWo7DY4lPMWVDBemLi-uvL9VUmdLxT8iuqFQImQzSkx51SifBNXd86SZr9suCIhANMFZeLyimhYcLDKGiR6XYtO8PftdOv07HLJj6JqEU6tnIrT_9KCvI8e0RbppDB9_53pC-DJ7l2UqaPi6DTphFTX8SBnTgqkaoq9LVaow9q6foiuPwz5ltV_4U8Yx8IADQQdDHM1qc2se0SwBelafBD3SXCiUk6B7eXH-V9D410s1SNmn3Ku_7BIez5Kl2WVeNhCOdQlsjsaN_oDxVjOleMMM5DkseNa-l7t1-uxovLPOx8fRm_-_M2YEsuuB_bY_ItvoJ5ca3Q8"} ================================================ FILE: genesis_data/genesis_txs/7SfLhJLtevo0zu-1bo8q6zX98WbGgpDNuY6PXbzS_j0.json ================================================ {"id":"7SfLhJLtevo0zu-1bo8q6zX98WbGgpDNuY6PXbzS_j0","last_tx":"","owner":"1TrJEnyg2GF0p0sXt8Eu0XSjGTLeaO_4Vft1_2r0MdoxYdy4cBqFhEP1xvaRns7D3NGMR-PVZTMzd9I-Lu0HkPCX6f_g-uJ-Z-2rotNNpDHc1xi0fdufAyBQgYS2ODc7S-MKkno4oF2O9HfIOFINkom1GwS4XkZuhGD4XyXJlBX0bB1EhYuxBuX0uw8ZUnVRbhKtLAqjwFA3tzZk5Xw6WUT7R83UNYGMObetJd8-5R_J-UP_XN194xN_rMzI8m8s7mkxuFCBILuXlqZWTtfsTtR2IsnfxCujXgR_vtPtrg6xS67WeXS-LyME6pGhiS4VjfQDaDgi9EfC6cnyIYk9uVcdRZdhz68_XF5qkRDU3K6U7rzlQJ3aMzdK5zmBS4v8Cl7mCzkYNPps_GEy0e42lI6etejCd0K33S36MwyY4p1EQYE78GdrrBoOS3UEG8GGStufNFDTbe4OBaryz7GjBFPkiatcMVN1k1Gbl28oSnimPXyNSHmr3YvjCMGULlmxYvsZivYpSnaFrUu5Z-Gpk6bkD6NPSBXt-vJUUjC9EAplg1mK_ECgcAqnYxLuPZFsxaqn0ccntX1JYiSpbGX_V1UNiruv2PKVNhmjPaf6p46n3kNSYxBRt5A8QX8KQcReG5iKX8VxTFu_5N_v3QEyiFtfzo97RVPyBOvpzc-hciU","tags":[],"target":"","quantity":"0","data":"Mg","reward":"0","signature":"x5Y_OgfDsk7CfwPtxNWiki4hl2BrjAWeV6QyTPVljUKesM4K808XybnHUymGk8Q-6b_lFcCjI7VcvZ4m7QWg8QhsAH-aeGlqVIYhMefA5iSz03T5mbfYkYvYF1byRcoZIuOhPlgGh1VtyInigf6INS8w5i5ypgquZie01n1cTKBMBHfHjTn3BtBtSv0bdJjsDOMoTjvi5N5TxpW5BqL07hVppyWWyAzddfMq3nDbUeruLN3ExZf1ZDC4pmkojj9SiXE4d1SXRGQI63AD6f0qXObl1S7ISAnLC-KVYJRP82r_zmcrTPgsq-zUNU22AkXI0i5iZ268VhBuEZDk8CXsEyZPf9Eyt0lN2GE31b03eCw-Nmlpd0pSTQqahpXIMi74mdolMlczXUPVR-Q-Ez_cncTqHdllQNlC7UBvMiog2bHhOt-mBUAFYSkJaw81AytdETORf_phX3PMeHbvljiLtTicj6R0W15CK3AilLip8WbUe7InlWLt4gjOsyJEBKRFxaoxKBH_vHrNjVs2K5t-y8_VcSEI6UrlwLiHwBZrGdF6WLdt_yJTTgKem8dOoaRgKTjD3JdSM6JydqG7LNC2y_QTAMp0_Dtoj3Fibl5xrdIPSy2W4ub4p9j-6JKytkWj7z6krnfTV177qJFOuMYTqGK3Ryt_5eqZzZluflfO-CM"} ================================================ FILE: genesis_data/genesis_txs/7kT0is0QnxdjqkPi0BKamhLW6z6_SK55LMAVKQC6F0M.json ================================================ {"id":"7kT0is0QnxdjqkPi0BKamhLW6z6_SK55LMAVKQC6F0M","last_tx":"","owner":"04I1VdLjKnBACNeXRdWtMcXlBMjkVbyzhyjvbcnzaeiPFZNCeuGOws96dFfupfv9_vqs-IOuZALCcK6sN8pBSsSin4MvApILCb0edYkb57tae5WseJhQ8dl7JBjSq0ThU06VpC5WBhiMiFGRXuyTzo8tG_j3W_Kgs0jG-XRy3bNQj_vKgGNaj4mESP8HO_Kg_laGYAPmsjZwU5QobCXhZe1entxgbxa1AU9OnEzDG8InLR_aGBiUyRvbdbLrDZuk558xPdroQ-cT2_dZoQfnydf1VWveQ7FxM_Aqgn4z11cyOLtXBixzAKWXIEuEY4JnnIcJvC-WsNRiOjL1atB1q4r5hqXHWBGf32POjDt_sbngqK0q_Ff-QDcu6ZGCZu0Nv8hm_3_t9q28YNz9fRfhtpv45ditlHvnNpfE7ovZF63Tv7fBugpyo49psmqzd-s3n_NYzoAlRKDx4MQXqZZwqYSNxQ0Sxte22fXQ3ftgbedM2znJUij9Lnm5qxxyIo9yxz9P2QPjfp1o4MGFgqGUGG5mVwpnFsO8sVMfSxoJvEMWvKsyPhz91Fj970ciGAevbmBPRHNgUoV1GaiROC8jz_LVKpHfRFONfDk2Nnf4tFL4W7ecI3KmLPcEeTG8KA9uHz9_1gyLXRkLL84b0dLRF60r2nP_5o9tTyvu1P-Pfes","tags":[],"target":"","quantity":"0","data":"V2UgZG9uJ3Qgb3duIGEgYnVzaW5lc3MsIEJ1dCB3ZSBtZWFuIGJ1c2luZXNzIA","reward":"0","signature":"eyk8xLUY6geTpM1n5gacvcGt0lrOtdoBX7yIBwgM0mc5xE_uPO39SpnMU43UelTLdXvfKUFEjwybLhrsHYqx-YMz1D_UrI9srjcV2yrfEGVKHqoXiLPiwi70tTSoiAF11Ea-zIMqSQ0LltlXQwnHugt1AxpHKamUmwfVJdavqifHcPieDI5I4RDgGl1kqYjka60wdWX-ClyXsptQ8NB4NBenp54QmXrcFV7IdTvlVWcLvpaJ_oogd0rRt968K-W9Q9FIGRayr4FQtlrpwGBW6KhrBcKw-9kIMo6yBy_oxanWNgenQduTWYRwckJFpvnP_c8OfbL-MqoSUAxwywQOYB0e0NtCaQXTOr0Kq5_xB-xUpVWvRjOXrXwo6nwy7hCml-ZaZk7eD7pHElnK5J5f_pd4aU9jqlaPVvVJNw37WX47kSYDjuSIqOGCPCWkDfFS5HPbnJWrznseUvB2rxKR-bnmeO2MkfVv8WablZMnkASicRGGKomaBsMrURhbFyPzRIhnb26fbjYeGB2eO_RmwWRZyCLnCdpM8DQAXfj-YeaZ-lVbeuwlg73TLoLNME58Vl1rzjnNkgNUQtm2GlCjTnnK2-dfAiDXDSJ6TswT0RkKSXYaE4o1cawDNsdW1R8VJfFYSz_Y-UxL3csOhiPK9hPaVT9Qzjf1MzrqpOnJZIQ"} ================================================ FILE: genesis_data/genesis_txs/87ieWrloTFUdW7YjJqJcINd1M_PBWCzA1dIRFzF4RKM.json ================================================ {"id":"87ieWrloTFUdW7YjJqJcINd1M_PBWCzA1dIRFzF4RKM","last_tx":"","owner":"vxBuootRdMb_4owb0cJ5VhEwGKViOpjvoiVSBzaUSSovZCzxSs8lhKOKRCUqFc7GNbfwgcvLDi_50LqlFarmIIQoaiPNYE0-kC6hCMQNPLx06BfheQpSHdIyRsOp959CxOLzu33BkAG1NfPhsHNE_WgQa0XcJuqSle0B_unjIHsLExr_nll-5hWgn-OUBWEg3SOjnNa4ff_QaFPPaWTX9Kw9E10_hvApiJDukU0RzQp6FpxPA6CDRWv5JVDkI3Vj-EizCKBNgYtCm8JNZ51vUg3hHXiv71pFjh6uD9jnvKkOEnApnSn7o-H4vo2ReFA6hyq6qWz6hXxsnxnZEfNwBGtTIeDWjDvm-Zppp-84ZNbfikKM5s9x0o2h3RAOK0i3ScRI1WIzJqxCDroNzX_ev3CIlnccKSnz3A-5M1lK3LJSV_fs1_vpd9Zi4HPTRbcR6jP9CheHeKaDkwcpuvI8emJLNCKITHh2OqDiz00FndTXE7uFOKk8ZMhu4kcTsINST83dj3PLzMkyUBjflkEatcXygS4VRbEJZC1J3Mpna7l2RvAK_HHZ0-nuYD0sFLrniGqImDFzTVxvzBu-kjnjS52Jpkdlr3Thv3ghObsh-xUE2s_0dGesr7nITUXbiuwN_k4oac-CzKSRjUkciaQQTsA3GchCKSksM0gq-muspUk","tags":[],"target":"","quantity":"0","data":"QWxsIHRoZSBiZXN0IHdpdGggdGhlIHByb2plY3Qh","reward":"0","signature":"CDfWDUvB_Q52XfkmeLZHhs2ZsYhFFZfIlRKSNo3zCj-hoUSgp762HjfatKJZB9N1a3to_vzJ_mXlgfNn0bzHY0rqnoJKBRKYdq-3pBN2924IIZhvwpxf02QzZaTzHBjwD5ramcV6CvNnPRcvDqs7xt81YLopTlaa7SEnHQ4KFcEWHa55qt7DCcvHg1jR_TXp8tXtN2CveicTMDtkb1K9eujrjbDuWZcOzAymMl2kKnIlGPIaHBhZ8Bwt_LZ5aizT6oI-9hNIzt44mDw5CvFGkNpwqaNtnpD4liP0Yq5NANU6KoABtvGxdzgEIbfWStozrxUFcHRs2PkrUKBm3T4UvlfCtfBCo0OIsfCaz0IVHKVXsAczm8PfTXwqAer1pOPsUfgFU8pqUx2fbUG4NyMqSDjKWNbPoFwyl2HJRJPiZW47FxgE8BQ6sv7wa_2ZpGdE6nIqAUCELECqIFTayDTkpBCPJeSzsQDSusJlbA1wskNcSYemf-2SVyY_BarXSCJYC_MUjGdQps2KHSIOFQhM-uOKTyZ0gLnzAqcuE1WH4rqp0o05Jg8_fKu8ILWd7fkiGOrGasKbP_1e2tLP3bTb9fn8Lpwy0BNFct89meH9HHbnuuGBjQfX5f_l9so3H9-HJzMxjvpsh9ENbEMdmShBsRwlc0OEH0qH33AOegmlRnA"} ================================================ FILE: genesis_data/genesis_txs/8b-7D96aRFJgDm8z5Tg47vBbdjseW0rRi17TYDcaQ5Q.json ================================================ {"id":"8b-7D96aRFJgDm8z5Tg47vBbdjseW0rRi17TYDcaQ5Q","last_tx":"","owner":"pdkggT10Ufk5dqZ8U-YfaPg1s7MDZ3bt8SbbWhraEqm2jmgw0IRxfnj3h5QwsXjWgS5ZPOXcV9SvLpTRC4tvgayg3sPKnnCc7HtcU07_M2w2cOunQk-ZCAKnzFFKT5KQGcChGub47zLsZUcSm4CE9_BfD-BPUsFXLdo0TkaDpOEcchLcKAV2aKLx5KTboEMp7FZZLj16MZnNbNXfOoCMlO9tyCmRkUPa35WxU3bT_VQ5ZP3udkYcWVvg-ryocaiR0IBfGuglreRCE_b71mZaR8Wc1cWXMJMct-t_XEhEfccJU_GOTkbcHoAX8uSoxuiRdblz-n0uiOtxjpmZDhUropjrsmox379CPtZ0nHH9D6r52ppAC04Yuv8F_ZKgqfkrkIhAQtnGHk_HysI24eqZ1rS0DUX9oGQy6gCluAlBKkLLrflMIMe8IHJpI3BTHyZLcHIflbEexZCOFl2cO3j-X-6bA7AHLf8dEDqeJxLsbdvBFDiQaVy0ODGlHVJHHiXnNDXzXWkd9KKeJmfeTHliW1SxF_inF5KoulRFxHxgp7eDIW-PvwH4_b2wULREyv_ksbnH6VZcGuPpS5KKvFQT-Pr8wAp92w8YX4F6g1MXDwkx2IXxGgspjgb9l8CdyLnDSHP99eEmoMG1Apm_TzsfPtUChho7HrEImokdwUlw6q0","tags":[],"target":"","quantity":"0","data":"U3VjYXRlbWVsYSBGb3J0aXNzaW1v","reward":"0","signature":"cGVdswnHQMJ1ZxrQAp-uSoVmc18onOBH6-l1B3AsfqfcFTkkPi49vviym-OJsi7NIcqA5uU19WXRMBWzEgVYEUTkW_iFWHu1kD5akLOwYiOEXR-pXg7VHn29O9qpkDgc5-jhnU-ePnx4TvW4YQHC4JvIiVMlK2UQmJCoMuDj6S8Kig0S3tyZbeJsC2Bu8vnWbCBELKGqsFb9uOJ8pBAJtHbIJM2SsTE8DCIFv1VmC11SEqZdPeMKMxh0FH-GAcfQ6xYUCTYq8JnB5gs9sP3XpbmZBsXE_SqT68hXxsmbKlDrEgPtYQyzHQPvOX8Wfc2s4CtKRZizi3VQ7kXl0oGh-X9R3MHQB4hxRmq5wJZZfZ7y7cb-Y6l87-VtiM4Cj-6r9Ciwj6yJG80fLw90NL4jJc_uuMP6bEZgDYLHKmk4jztovA248rYjSOhssQRFo5UJiY08l-khkTrtmrCym4S4w1OgzfdqVWey9T3br7OI1uFSz7Yn88mX__iUx8h55Lvx5x85r3jx09KlL69YQXr7Kue2ltslVbbHLtMC3xcOIbBg_jacY7eK6_j5GRZAjIpRCZq5UlV0U0ySGVh8qsiIuTlOMvMvsrWfUEpPnGPF0uj7s2PbVPaE9CcOwOjlRXPyUzMwPEkbsM8Eo2r2TQi3kcgNQDZlsag0wY-i0EbFGQw"} ================================================ FILE: genesis_data/genesis_txs/8gTAwQ3f17PKI9KCX1cjuXCs9F8Hcdz8KyhsecKuCJ0.json ================================================ {"id":"8gTAwQ3f17PKI9KCX1cjuXCs9F8Hcdz8KyhsecKuCJ0","last_tx":"","owner":"8Ptdr9yfOD7wZYxgop5Mf1ueNFoF38OMppUpcTfszKgW5q5agtZOEUXpDGQ5d7_2gSNfchv3HPuVfRjU4iy0KpEI1PVrUfwdnSATpmBqvUrYPe_EAqilrSmyXFVOQJ3DvvWBXdQfyL4RcdpJQQhXzlmjv4oicFXPRepf-kzWzQ-KoQY7JRqx72EF-p13IDokYqPSlIvfb22FzVo0n4OPRp9EF2ZK0dPqVMTY6cKLpC6tpOQC-yrXvx_Wakwm89q0tGMhf0PCtsRwA74EWh6hASww3leXjGytlA0bX8qKqbmZbmtNO449hNAmhFO2OmjBPhm74p9cUe2CH11YaqwgO6aGe4mjfLur9aE1WPCeA9RnWtqBUKjn1YDIHSA7ppaXWZGqwy-wXtynDEutHti8YuFHinH9giw0qmsN7If64q-Vq3_SpicmwGt14j178PWzQIbIE9AJTzc4qp5Kuw5cXSP1vG7EhpsKU1wkRKV3BVAUla1xnBgkVvSrGwhwmfQiurFUT5drpK--z1QbfVUphICh13-oERoyTqiWkMXqSlDumxjjRfF-3lTIMJ17CdMYTs0QbNsoFm7gywqtGtJqGtiWEu2uedGFfW9a1rns75PDHOuoxQr6B78v-VE2X42GZqpD7f5J9gWSLPG5FhSL8wf-badk2r6duULd60iZf70","tags":[],"target":"","quantity":"0","data":"Rm9yIGEgZnV0dXJlIHdoZXJlIG91ciBraWRzIHdpbGwgcmVtZW1iZXIgeW91IGZvcmV2ZXIu","reward":"0","signature":"GVjeGjSI2n9LfktcE4P1Moen_zegBFYwiqpvZzPox7bRuKcuUFV_Bb7l4c8Ni3XdoIwmuBIPAY-wkICeiMqFelEGoQEqFPsp0qhiV4FbRvXoXEhyowWwx4x062lFVEZASAk4GGHTgcbyiQk5HVrhkd-FjUruuqfx3lw1Y99l_2HCXsHD-QTZOi6YkB8kY-FxYYk6THWZz5QvU2qp1HfHvHHeONMEJv-kQd6nMIIBEu4gJTDU55819f3PvBiZlb7nWaIX9Ugx4BFc-vrqHRH3P8orM5h0-zXeuiMeKLe2tAr-kaMAYy-lle5PnHl8lzjoyVxfUFTXrPYTVcnk7K_wCTEqsJz9LrZ-Sn8c1xhWqGGH03soAWbDRgXPIMqapSqsNHv_03HWYt19twLzpE9ASlgvZGzgW0V05eh515McDjtubnHVZhhidDgJsvm_QAYlw8b8T04EZHWGyKT17K89XBqZ91pL77Gnqu7lqfnDZNQwjXml1-beCRm96BTxka9JOADp-Z-7KOQ8kEsYb_vRQChCLLIJHwknQG60hjO92C2G2koYrjdTA_IOGwGW8IcEnOiz006qNmU6GsnjCq3kcU2OYvjBokdSy5uOxdzPfctimEk2pVHyeDxnxnyIWspzHtGAPQszhbDdBWZNdYZg5c-sv9dyiK8fHMuVdl0elBg"} ================================================ FILE: genesis_data/genesis_txs/8rKBfpmkPlxnnYr6t0xIpUDubdidK0Fpnois7-xQJtc.json ================================================ {"id":"8rKBfpmkPlxnnYr6t0xIpUDubdidK0Fpnois7-xQJtc","last_tx":"","owner":"ykRmM_NRHswZ2rjjjbWgTX4DrlWRpTw_QAgebi__p1tGCfuMrCaN0Jcl1XwM3-eqnazY734oqqlnuZwGjzIfOMHnNaiFPzcPcigz1B_3bGxjTfsAJF2PoAxxk3Y2TS38odQiJQJd_z-EzkNYhMTLSjLYjdBABWgE_Wj55wpGu1AqpZLnHrNebgr8l4ud6kEdFiYx8jcCVrMkmWyBo7A0Y8uk7ePlAbGv4zZH5-jt2_XzQc9p8YkoCyxu7YQsj8meFwpug8aLH9_nl7s30EueXpX9GaqjOB17Mq4mfkrU4Xv2DH3_z7WmV1jpmsmPtP88lnVtklZTXa1Ee0W8eMRW-JZWweN2nmhD0cGz7HqZVYC6pos4nhwswDNks_7ekegjQ5mZFsh4SOAax6dNa_tjaaumJgAbd12MT9OkZnpiObM7AZ5vKJc2okrLPM-cjt5bgMQVA8OiO7MLz85MyF9iAB09XPfGIt2zW40Q9iDFKlqasTabGDYCZgzx65tMI8S0Xg8rqpu1i49-wZBpTINi8viycWLKqFm_TBWnDUvD14MxTpwAfujo6gxYaTDzKlTV-MPPaTSEAFx_WPGfiJklhaG1B1BdCw__GmGMa8GpK-wWhIM-orCOV5Lkw7kOQSwd8rMBA2rdweWIfcjpCcYkJm15pWrNPe6ySU6WOBc7Oek","tags":[],"target":"","quantity":"0","data":"bm8gY2Vuc29yc2hpcA","reward":"0","signature":"f4Fi0ad_Be1nRWtzDQV87sy_4wTVZ7MyN2yaIM5CI7ccErOyQ5rXhlnVttSwh10uCe0yWeax8fVR6GPs-hFrunO3YmtrbxjugUXNTJ09QauMqPNnIw6n4V14riDFXkly2Ev7txjj08qVkPAFSjR8DzJWM4WZjQ3urA42PnFz4iJWTONkmrZFGpZbTwdyJtgWw7bJlHc3yiS1Jv6GjYwfowDDS5Dl5qNz7mHsi4Rg0L2ihef8i4pNSZhArq5vBmWGGHZzWSKyKj01Ji5TSlCSvLgynKfO5WX0KDSiCoxzvMAji2vbahF1zEei33y7SbpoavnW6ezjnmPsa1SDJUVyEObHbxvZvFVAAgIMaMarCV9Y5AYzZ4AkaPtGR0eac3SyNaaHHbkR4uEdvfS5ZgdWmRVaCmk1MgCLrG0kk8-inr7VYwMGtSemf3NqVF17jnzB3OjVwIGRKte6X4Cy1jEaRc1eo41Vvm39oR7AFwzYznqXTClnxMwv6ux7s3mlHWGfSADhNGNcWfG95gUtDEFoHwiCUe-OFoqUy1GJ34lgBnHE-aOwKUk735KTTPNF-7hD59paxmMGJ1HkrmPHb-u8Bt1k2Iu4CbOaWEJCbj7m3gFmh2ML4-bLjZTiDwBSKGXpDxEoE4YX3YZdSIVyD7sRVezNeXT8RtLZx9hHc5MM3mg"} ================================================ FILE: genesis_data/genesis_txs/8y-ghHqMT2lEHQn86jRXkQ8I5cLWWtKW1CQROp8mzIs.json ================================================ {"id":"8y-ghHqMT2lEHQn86jRXkQ8I5cLWWtKW1CQROp8mzIs","last_tx":"","owner":"plnRiUaJrGazHA7n6M6fTW2ymgM8Os2PorVGbilrdvKxBbDSFcS7eN0VdoOAbdNLOcCZD2ihtR6dPJQ3E8eL2dejufh46I-TXzF7t0t6gQJDrX3E6MZNmqGOnPoh7zHmUtB0ET0eEBZr-b9hRc_Zhyl6VOhDjLwZ8OfunUu4iCUzVaq2_jTLF1NEqWN8SklEWKZg3a5CJpVaqXfYLnK6OTtNmlr4Qgd77tVn3QFIR9MS2BDGVwlEa_VFKrnT2s6AYGbqZA2gxc_6YOQPRcpNsEbH_u_wiQAnAo4CWv8n_2Xg4YNeLxcYB3YhSWbnws25MNoEBAMqZvHRppLX3gUsCBDJg9WFQwmAxSuC7k741hG8xrA8QOiCl2y-Q7omx6LhYnUA9an8He1uH8PL-d-uiP3wKQzavc2lFeoVqnqKcwI8dnjsHbiUH0G0HVgujfVWO10QlzaZEABGFF07m7d81lj7LrW2OvfV9xfOwpJNPiQz6PW3Mn0WYuSvs58-Y7of7hsyah44j9ViJJHHKJe4UV-qeB0dw-5mYqMWBx-JDj-76L4wr1kHxZ30vPugkaY_p6NWe31jq-n5ChtaGFedzBAxoa1xqFhTgcDjapM6aFh9LQmBtlRtAbjQIWuS4zBMVIowO_JtFSLUrLF_02jFnxq6qTWSmM4g-8J_1fUEGFE","tags":[],"target":"","quantity":"0","data":"R29vZCBsdWNrIC0gQXJjaGFpbg","reward":"0","signature":"OE8vwuAT8vy4tO_TzScZNh90es3lbSfFA44nuTb3YaSA8gi4xIG_zHO7qI2UB08h8mU09Ze3FEA3nw_gJVzS3qOXa9AuDu2ND94AW2MgUpxjKNvDbt-QMxH5iZfPFqGjd_5D_5wFiaPHw_CUP4z-yjiYVm3-AKe48IuQe7de-cA6Ql9hZVr1fiJgm3DunWHfbH74XPAvUXSMHObrw_lzvtmiarvANqBfjhw3siVIxEGGAz47nPXHAZFyLwgzt3b741G3dmQ68pDASbiqhOhlQZmJqKzzy_08T8Mry4zDPrByg2Goau95X_anW48RpVJ7Shag9umBqe0BSlPdloW5Zh87oepCNLDbFO1L6EFxA0nRnm5aIurs-U5371dhFjEBcrW3cAxuMhGAezhF_383yo004ONxWj_vJ6-LB_jPKFSl1E-EJJ9YJGohjv209d_aNTQmSbAqgryuq9CfqKKy0eWX_sorXsqcuFQ6c2uBYcXbaurY7PxtuJHhqM-lOECzH5UaXgm49PHVX7Foi032mE4hrT6EJdSat9rsiqtfji7aryd1nUqv_SY8_XpVcqhgp5tYOop2KCDu9a7BD_N9sEApAG7nXa5sAbfKOKjuf43YEaLgXCCCJYTY5fOy7UBlmW5wBYVB4tPyaT-rJ17RRy7uCb9wq6n8gji67on77vA"} ================================================ FILE: genesis_data/genesis_txs/96Ijx5TWSxZmZaDH1pteGHFjIYY0aHmGWNHiMYeSYIM.json ================================================ {"id":"96Ijx5TWSxZmZaDH1pteGHFjIYY0aHmGWNHiMYeSYIM","last_tx":"","owner":"utMQVcfoIixH05suJGI-NYPGDm4ZIeWKVdkkG3KuVQd3php3rbF4Bkz-6gmh8cr1Lga-n_z38kHbt66P5R2ywtM5JypP29JdJ6VcHjVAB0L0JRw2Yxt007U0bJy4iRvZ7H4itRH9K9wyFuB-FiMOa-qsAlCkWX-vTSw6CRagw8_8xj9XCjirkL9xd_Xjk2H8UadqY7CIe2HRqMg1jWtpi09rcO4U9Vg30IXg5QRud3B42I_bmBN-mBFuzbjU1oELze-VxhU7GLQhZ2nBM1bQS5gwBtUf3DGEYqn4FOz4VcUoGyfPmkOuJFeVyf4L3EjDkM19zqWBy7cljZn857WmysRsBtY-u51BcLrrz7_7jmLICYp127fJ1qIAwVAh8t1i1OuJIKtryoX2_6hAoc6Q_fij9wWHOHZaMQeVovaA1X8wxsiqtPhU9yzZoUDjWKO1WZsPF2rDPCfgQwnnlF-V63ugN0MFvsO614a6I4VNNnrwu2b7BWIwlyXtZnMZk-Efm_W3LOmLtiy-P9VpnYeF2HkmdLZC5Pkr_noNZrvDzLF58XH4mELK1zf66oPoP5ADX0q9NwtCE-3OdaVOauHh-L7xK0VVqw-TxbLCXzU6IcEJocseSE9I3hi2BU8ri7zG7xdZvTjHGa49ReOPHDHIIXGOT9Pe5HzHJ8ZFkQiVits","tags":[],"target":"","quantity":"0","data":"dHJhbnNwYXJlbmN5IGFuZCBpbnRlZ3JpdHkgYXJlIHRoZSBzYWx2YXRpb24gb2YgaHVtYW5pdHkuICA","reward":"0","signature":"ZXeTax2df9R_ooaeVcWz42O29amTJPJBTTlcGMNTyaGP2RX0NTFrO9hRwaA-218YauIKCcw9G4X-3iKTfAjYCK2KM2sn2PjltU-gdhvBe2kHAdSnlf5EhqeqDJXjfjvreQbNOAFpxfefGYTqVW9qZWvlRpGWpUtlHttgcOrgAnekpMk3GPxmOp-MbnHZ67AXmyUJ_XJu7bZIdnYZXGmziUJNqvUGDH3-YKjonHdKE0wGAXf7DlL3zuHSBjdD_N9aikX6eVInXyeucTR0379AVi341NlFYjkXitoBOsRZtj_6EQoUb_976hgkKhQoJa2LsvlwdyBix-6HsbbXq8XfOBe3ol-CueVVbWm7lZRc331MbWlez-RQqhYNuvMOo5A1LsLRR_MdNre9OmUCmJS4RMyQqC6h4h-2pN6Q7qbI5yDXHN9iCsBCAEHEdxzjHX-QsFR903wWPhWsTg7HlY4RZ02F7mRt-vEoJwnnqRVCdac7HUd8v2IxV_wKJRW5PlDkRorxNYyVuZ6-zYT77Y6mVFBPT4AR5RVcvu5AxeNrvHSttl9gudGJmjVt1jTWdqa97aqFJ67h3bBD2XsCS3cMDGTOwfwvmShR3PnoLr2zbhx3qSWUWqtjVg3mGLhJhW4LicN7sBJz6XeL4qLcrNLM7Gpv2Hf4LsrggOczb04eubQ"} ================================================ FILE: genesis_data/genesis_txs/98kadyXY0OPfEZKeeZcCyQ7z5mRToZklK-D6f1a-Lxw.json ================================================ {"id":"98kadyXY0OPfEZKeeZcCyQ7z5mRToZklK-D6f1a-Lxw","last_tx":"","owner":"v_xIO1uRsPnNeu9cBN8icr5RW0G6CjBsgZ-3WVtM4fNzqBbbfZ9ag96wM3I5sfZjNQObqWwZU0ddRazdFOsYZ9L6iPKE5lTole1OWdlg3xsA1mdxEi45Y8jbofGPBzsUKrpb1WT3z7prabi-e4tuezoJS7Aa40oeKKWBLGBdOe2KqPeA0o2yZQy3GgbVNb1QKY54TGVV08hq8VyJTqZ2-O7A8Xgcndaz6S8leQeq65CFaipyWLoAOhG4H2j8czxSBOkEt5b4wSy1bs2tjBEa57fTuapds8MccneMryMFYRZg38BXBw1kdvJi9zcJK8GmXZXErb3xa9P80suidpxMJawqlk4S90Hx3h70VTJlHCLi0Fdz8J8tIQvVqTHyBLe0S5XIJbbC5GjjtnJdx0r3NDN4w3FHcgX7STJNtsY01xdpxIZpOdzDQKKAdLpfG56mNZnnVrvkZNqMJrUDIgPIDUayQyokKzWX6i19pTvpJ7_7bhm3YsclIyVtP3ShXQoLFAzfc3O5s0AXENBfOsQDdIAYYdWO4XMgE2HOddgE97AixDtSTCa2RNjakzE5h8lXHr0eLaRvNRw2wDxc9lbgWMswsFSPo3mdy_P25j73X7T6KRzIVMzGs5Uf1AfVrI4ingpyztjGG7aZCD-2oquoHdakoh6sE37EnfN1lMkAcGM","tags":[],"target":"","quantity":"0","data":"V29ybGQgUGVhY2Uu","reward":"0","signature":"mxpIYDS7aO0Gi4ItVt5tj3DVInBfFikSvSU8boW4adcjZfcvQXiW4RvJuuv2RbFfUSaXe2wKPENwaLMuwMHd0yNzfJSIkLsY3545QxQQ7pEmmVQtKKxt40j8GXKFIsLakPL_Z3iSvfiICOKm1blVo6gnH2VPybhwJ27xjeD1Eq4AIk26_mHnxYf4OfgivUd-ziJ6xcO4aESElC7RCfU-rYUhB750Pod0ozx5hS32-fJvTMuyCIqsyj8GDePoS-xk0ieuLAVDe7KaZmgREIYikwKZsRaXjSdan3iKdLYlnTCz4zkP-3BZhqpoDBE6PVqLH5ZX410RnPJ2EkWlkik4ycUmK_9p895yVZYPaaxkXMRyWG9NAX_hxKakCNR8_44CM4TXlWG1P-gBM2ahjs30x2Bsquq2L05PXFQgpZsw_iVosWWknOJRnR95sXHdyNgHlWgoM6lxJPwGuVgLr-NQJdt_RfCxP0n7Zeo8YnYfxGkIaY0S_O4Vrtx81g3oObFgDv-3WNoks3YITcxY-sCfggRmjjpESIUS2IE1h_D2nRRhvV15CUFJuh_oss0I7xtAbP0vdzzyqNx_hz3G8OlIVRcCR3spJegrltWnv2oSYvQaKVMhbItlPJVLfK4mgFdKLzaHjG3e6o2YG8ooBoBSpBXIbj9Ljw9P9kCMYSQ8jYo"} ================================================ FILE: genesis_data/genesis_txs/9JWfraRekKtgXiIjssn0tVSzhaCaN682jECsrKtR0_E.json ================================================ {"id":"9JWfraRekKtgXiIjssn0tVSzhaCaN682jECsrKtR0_E","last_tx":"","owner":"u6Ib62iMbwA_Cn8UG18qyWxqEg3HFs7TaqLaJY9SBEZ-o3SuoV_E0evwcoqRy3nNtKJoSDLZuoO9jiHZomiwm399_rojAW3vYzZq7jMMUbRytKA_FHFR2uPvlk-GdEk25M90W64UpYfhPnioAQ0Ls0QeNMgghWW7sKANlLbC3RYQNTwFOpXQMDcBcNLKxMsiny0lmx8D4bQuKNlrhJV_i7C-_-Lp986-Tw_-HTDB67-7ssY7ePoap23IJAZjzPe2mxeaDpEvsxF-MJ8p344HKBeb9M0e76d-2BRTIUuYmMVPcftZ30PgP7qDGuLKWzP2D-8rkgoCSA_EaMRIxjji97NJwOwrBjypn61v9QQc8pn4ITTd3x7yqMkl3eDMMaIO_scFgiVPAaeGBwlZyfij_ZPexqUqHnYbJpcZyE4ft7O-ciR99B3vjKwCyzZ0ZLtFC-7FliAn0FIbaAnYN8ZG0F3H0E9EzbClWhpqvKcbo_7-6swHG2pcOWmqFnBhnfIZ6ru5EjkhEP1Kz1wvB9de_e6WFh5bXKKJr_Z8pa1Ia5nDkFnHjhk1z7_-7hNn6DONA5f_NGH6hzuYR0s0J0aQKQbQENPTx7S3PG426KRyj0oeUuw_zm2DX8a4cu1g1KWN2RKZz6yYQpzAnMxCwaJtiM0CJWIOOwfbIVzyAeiNR_E","tags":[],"target":"","quantity":"0","data":"Tm90IHRvIGZlZWwgZXhhc3BlcmF0ZWQ","reward":"0","signature":"nt6We2KvBhaRI4TZcdF8yZrepo6M4XhHSrUWWteZkiDkAVcrqK-Vs7xmhkOC0mzAYKsRmUEahhlJLq_ZESSlK8D5a920qcDsM79cJcImDC6tS4q8xc8Ks9Px3LtBT8QOiTwt8GN6SLmcoOOJOIMAWuRmax6akq5hMFneDJ5sM3X-uMiP5dgXbwoo7V0NAtX2DX_I6yVREq8QVcDmXd3fZTxoImOJHTkDs778xGNdHqWcst_BJw2OFP9Xt_2Br6VW_8P_ZM4PC7w92VvW6Cl_JQ7AdyBTIhIHEIciYWunI33MiJokPzc4bTCdCdHauR3etIohk4u9Gr70T85Mzuo6fV8JX6QhLn4X9IhRGXs8npolKP2ijuJqZHWCmx27NYy0GA8RZzwWaVtkutft5crPQzwlLwZLrQ2oj9VNVyP0UeCfwWJtHtoa5rQNqT9z8MOzh36MFzOnqhqZXRcrRfUSM2fiJ8bE15nyVh5kKdFDOxkNvPw77BLaQ_zQXAGQytv_gTI8KRK9NlDny37U2RGriWK-GtJtl6qwVnx1QPwlvzZp3A3eMBU5JBYRiuQ54ChZD7fbLAnAWFuMsQ6d8fjMe9vnCBMY8mryj_TfsyeRUh7_oMwTj2YjWyTRw62oO9QcnlprwwrLF7OJHZVrB70kjhXcW3o7g2y5Y9sG5i2_l3k"} ================================================ FILE: genesis_data/genesis_txs/AN48OPO2-1mh4PKtpyoNm7SWJK2j8dF0-TFLU7Z1C9g.json ================================================ {"id":"AN48OPO2-1mh4PKtpyoNm7SWJK2j8dF0-TFLU7Z1C9g","last_tx":"","owner":"uDkcGJb0Rgl6EFeYrkGxHOrNESshwOG89UgaEVM3_Dp7AquUXWYRru7cFTgs4fZV15_fUAZ7JFDqcQPfFLLQMoGY1HvCu5a-Gze0tx9VqdtNTOugujakE5pmison2QJILJnPK0v9tnn76URfW_dUzsjaiAM9_EpGKhlv_yoHlXPguFG0pxbfkagUMfAX6GuwkvwkJeMMI4jwM8o8-LF6mBFhV1-HhgS--0pHFfIe1rv7_ENoGo8gPHq_90jipTEJJTC5Ez851xAuWgQcw6KVKl04ev0snVE_dOsezEMbFEXEqVi-MLeI0A5YV3eY7zsyck-NkykJ9arPdnsnIfKEeAYTUYNwDFqMNnubWMdhHMQJRYewBJGDcI9hQF7WaNhLVsIId4bZe0IxrL1j-ZfHFQfPh_YRHRDuL5Y2mLbAlCRUd-SqAjZort83ZkOtQh1C-dhAO7csdlkUgw18sLQPdywolF9IJ5AGGj-0rvogMnS71hy0KAH2GtJzWSPiG8wVL3WS0V-5fs2sCqRFnOsDfEsWVYFSIRO9BHdn3dUFDsZQXRSZ86XA3LgVL2vqIuUsauIZsAIQS4AjWNHydojuA5L57w6yYH5zDJUB_YAbEx12KL_Y9MOQYae4-G_dRR7vU3o8qbdRpbwNPdxNmTBO8xl4xULtn3iP2wKMYpgBiBs","tags":[],"target":"","quantity":"0","data":"QXBwQ2VkYXIgLSBQYXZpbmcgYSBXYXk","reward":"0","signature":"cTwC7zpEYsVozQSCpKEBBVshLXIwePVDwfKVACEBfoyeMw-l4slkMPCnIJxvhwwi-KhD1YrZg2mKDVz17NFiGRaktRSfUAQGzNyxXRNVR8OMCxRqip4Oy5jZZgOUEwZPvpsSFh3Vm-o1X6oK3eQ19ETt9sbbtpUkXJHCGvil9WleNRjw8O2QQkuwp6O9aTmGlETJ6siOxX4q0h18pSxOCPCs5RF_5RFVIblafh2HSQjLb43ezLivqDtFgt8EcUPj22ZS7YN0wDVoDrxpKBZMQHgeGKNhOKz8MxXR0K9QSvEldyisEtuv6-N9pm_6HBgHCxQibcYu-RS8hjkTy9h6jBIaxvvHWHJwGtivxdwjEF7zMdZ_IjSlRhZHkXv3iKSvKgfNhM-QnXYqIcrOCfQtmVCRqy3aWV2pSFdHjU5GxpjRl4kTghQT36_r0ad0FeHSwdkgRh2QUD-8Ysp95MYt1WNOF0jT-wNxPVScXWFJJY-7yQiUCvILxChGKiQEEdxHPH_r-ck518OTrEbTFbSrfW_7SOWl-MTJlxAMWiYTx4a9jMbveAeG51-z4AAQhn-IyPzD0hC9RSPZYdovl4Bbsjjr4zjU4Le038qiXGvE35R0BrOPxXM9V95z5NyHyelOTXJfFpbNzrG1CSvk05CUkRUuGIfwpAknSGhjvaXB9p0"} ================================================ FILE: genesis_data/genesis_txs/AX6ZZxDpFlNhoN5Am5Hi4DER4zOBGVnQm_bse5PfHNw.json ================================================ {"id":"AX6ZZxDpFlNhoN5Am5Hi4DER4zOBGVnQm_bse5PfHNw","last_tx":"","owner":"xGoqfv9y5LHarHCaEGqvcO_70eU_rfnxTF8gkZMEI9af6pXI_4qoJR0VIS93B0GjMFAcEzxe90iyPiw3MTK4S-6VflE9E-iBPpAeQOeF8JXriIQqIGml1K83bdo9LRvfnon9Q7vU8MEn0R7qUcQgMGA4ML2h4aluE3Y1LXMRcSbcEqAYaLyjId0hjjozeuL1ATFxH8JTIb8E_GvYaWlb41mrryG7WAYFEj8vFGLW-WEHLURYyJbKD6ZL-6Hamm1R77Zk1SjLt-ZdeqS0Z3NU8-ngfN1SUr-glJMQ-HQskgkCkxWNKOGeuUnslM7QvX48RcyQr2isCR7pruGbYSAK3AVsWgO4NxfnAaWTr6rbyPwejIJinNzXVGNcR3rDurBWG3Sus3L2ndP3-of5TNR_IcUVxr0YYbIT5vNCBWDAhFo1cKyzLsipXFuLrwAXY65JjHatv947Ax4tg1MeHiTaCAOEyziDrzY_3WqSGNJj5c7iWUUyngOt7VyExgagwtUCDicQkPWXE5IGUNYtXmCATTZSBfw3UdeY1CnFN2OmE4mxjcWVnQeTxXsZq59dITbL-SQBTygUF781snmcqW3UfVYOxZigJVnfA4TUsrfYbd07HZDpJ1SUUhL1g1HDKcAbPFwpY3UgKAsLelNusSZM0162UFxJUZdgPOmFZeBdanE","tags":[],"target":"","quantity":"0","data":"V2hvIGNvbnRyb2xzIHRoZSBwYXN0IGNvbnRyb2xzIHRoZSBmdXR1cmUu","reward":"0","signature":"H8Ld7Zn5nn1rlXfdQW2TdyWuXO7S1WhH09hVpzS8Rkbymjng-5Plu2arekIC_iVFIzHxTsFIPETfBv6am0oZUnRJAKDcNuP3U0yVy3Ei-Y9j7lyn-pf6FWJMGskImtJrAJIduXabq4rVhFwiTjQgiZL55N9sGX_OQxqKf_6HJDp3HLdUaWlmMtpzqHbvooqJ-xPFZw8KPvOkywi9hSqWMvPT6APgkF3qR68laXe_N09FF_ZYRE3rmyap--GC9-zQ0GYfvE1Hmm38sWmZoRZiANks6xLSycuff-3ayu1GEKBYu_mos9HEnmAFKKBLYEgSla5wNXJkcIhG53g8iJiJ679lQQxo4ms8exicvB8vNKfN0ADUK2-f3rDjBSwaEm_5IJ8pf_N1dFqGfwjRCl_mxIfe55zZ2K-z0XzfN8QqRz3HyAh4zR5WKd9mBNEDh8TjQ1ykXVIviZS5AFY4RqlN1d3SNJf18TSXeOOuIS-Cq1Vklo2uQyJVCmAREbBQi4aCjHzPlZ0psVZoJ6tZe6jFQrhGvxKr5LyXTkNfaMQoGZYx1AHeKDR2NqXPKRtMvDW5SL4bdEL_R5OgyZW8o7oChgT5Pnpzn3iBjWV-LdAxScqHP6wSHu3YFx5Q_ddY2DW5-x1jQ88f27-RhU3hf8EwQp_SKjO2UiYnCv1z-VEZ8vk"} ================================================ FILE: genesis_data/genesis_txs/Achd6pqJVZ-1vNMLC977Lu8f20eBmgAv4dIddXql51s.json ================================================ {"id":"Achd6pqJVZ-1vNMLC977Lu8f20eBmgAv4dIddXql51s","last_tx":"","owner":"rRB4Z_bKycU-DRh_XR6AM39rTMhjJwucJFFu7PU1BrFak83gqd9C-IAfIwne7cRUYUl2rdWIQvIM__-6OIUglFlbHk1GdAR-gyUPiivK58kkFj_I92lB_yyu1pSv7hVIJKNG8vpYdBAo3QxFjH5EU2BL8zCoc452YrB0NQnZObHRKnWCwaHUHCLh9N60qYz_C5jgXTracJQp8P2mktFfFYHrr_omRoPjUGT2uESMQdareF8fiu6zT7gFdt5B2s1mKilH373Qmujh2kGCG9lRfryXcg3Uq3PmKfV4Cti_ewc_60VoQ58BaSyUDGTu0_S4XvF3RvJev30hBvpD8KmRjb1WnDpzJgxqsz8U4CRJlY1lXNSJXo21EHbegZB9NznvS3YS1uC2ZA8b44LOMPhPO1JfM0d6x7L7ne0Kj8-1SwU9d2KgCCSTeKKsWYnKU3TxN2jD7eVJcq_4Acy9nB_GX2dzWU6d_rBXbx_tcNK4mBTxpk5t1B807yd4DjrcAxIx2IeKrdIP8Yma04E0mRR5l3kBvUNGIeSDFr1dMejFIQpSsH5Di6WhoqwiN2z5LGwr02sOpuovqjvoUDiLXwC_SYZhRCvcdRU64wcMD0EvbYh7UGlw1UPX9DhWIlmbLVcVuW12GB5Kn-NbnJaDV_tVZo2inGpderBHEjTVOA7P_r8","tags":[],"target":"","quantity":"0","data":"VGhpcyBpcyBDeW50aGlhIGZyb20gU1o","reward":"0","signature":"H4zivDFeovD1iEUZ3I2snwyHHYyIXiyh0re6ADZZDgEibMMC5Xoi5WVG2_GXgpHZaD9VGd2pgZPbAwo-ZZ-yychHh9th4HwAYIbAQLN0ghU4_RwtC5w4nrVjxHfgiQr4NbP4V-fAVutjXfJ_UCA-snHJ68Gqn3LYdYKqDE_rNg1TKdS7ZAzntesDkXV8uwrFOfCp_OGPc_rGcdL6auJ5FYzP1ukpgUl5DYgwfvSnBaPWTM-INvQf9_6mCSkubxo20ZMdAEAYOy4V6_byWjIzVBVW_etLhBz-pcSHxoazrWQ--yYbWg_H-YRTfqWDCuXiDqOy6s9s-tBvLX3xMw9H5koaQk4FqwDNivbWCOvuUNHG-3fICvGBRPYUWOParCWZXzui7OhfD7II6QMbDegohrx4As_T5BQcXsYnQE9bKeFtRxP--QQI0zFPIpq-U0jutXdrXkJJ8flrPiIhDEQGUiysY-2LlKUNcGRnUqP6Qt3Q4l5f4N6T_PVF0hWcQsL9hoIZYlFW3uqo3ot8TRtje0yiezXBGqr1cTgu8HDN36RmX4t5IIF2tjPKrFYyVGbjGJ9ZL-jybhK6D_vCs6ZQMfg2nU6YmDxgD9J0vAOjguoJenORYxxfL4kMTfP-HlWJy-BlkeiYbHTYDn82bVdzkcZ-IcV48J-pLJnsXD5tEgM"} ================================================ FILE: genesis_data/genesis_txs/Ah6I8y8q0jb15KXjn0PyNfe7FR3v2xobg09Lfj7n1Mo.json ================================================ {"id":"Ah6I8y8q0jb15KXjn0PyNfe7FR3v2xobg09Lfj7n1Mo","last_tx":"","owner":"xthai8g6_mgJdBL6C1JwKZ-pDeQdu8opMYn7mVU9bu6kEi8YdWYzuLSG8jEdTPG6LULqkuN7G9o-78XKy2A5W74x5bYWu1jeuNF1tvNLqP4fg4mSVZajvFrY_c6aTi-AxZwA5QkdqX0KajlWymKtearDFMyR8D-5Bhf8uHhgzijTgltRkMlMgWWGwPn4IhR2lDArFRWxt6B5WUD3JCsLCaCjlxZGyl9xY_fLmwP8X99XVD2p--DMw_ZBTIafoFKyiGYymbQE_FuADj1XJ1ZLWG-eaXq6-XYk4jJGMWM5JEtDO7EHrJUWVV_pH11gN_tEzjcBlEIOeNz4X8I5VKzPQPnaI6TA6bg3FPY11DbJWXovXZ_AAjdE_sAr0wDsgaAZb60WQazrWCQR3Grmhn_FOz33kpI9R923e_ysBh6ePJE7DB1VHCfU9Bt2kgCdau4Kg8apB7ZTIYxhZTLXQmRcbON8DzJhn8X11HlPquycZEqVd--ueQjEXL4MmjZ_s9EoaFTXx96DvStMenZ4iZRX_WZaqhNEzH1qkPatvB1geof-hzbuZ1GmEH_OhVf2yxJ8VFmWqpDcc_xfv_RPChuR3RB3Qz35MTCwyKqlo47N75LElRkVcBDcBiGo8--cwpWJnZuQFM8g4HrAfwJxx6PTKuPCuFaNsSP-OGHzZcy64XU","tags":[],"target":"","quantity":"0","data":"SSBsb3ZlIHlvdSBLZWxzZXkuIEhlcmUncyB0byBhIHdvcmxkIHdoZXJlIG91ciBmdXR1cmUgY2hpbGRyZW4gY2FuIGxpdmUgaW4gcGVhY2UgYW5kIHByb3NwZXJpdHkuIA","reward":"0","signature":"DRzxAHIzj5fYarzn39lzEDrlh55om-NIIZhr9V0AjA_K2RNJFnzxdoaiC9kYchvPB-yvJsW9UaT5grblnYYKDqHX6uAB401C7wG-qV4xW-NkROE1YrVkYJKKCEmILzlMgHsFIeolRg8dgjg9EoGt0LDKaqwrNfWPwXdBT-Q03pEhlIzjzCcdHAzFjlFNtMnk7DCjb-JPc1X4jP0rBvlOKSkHUh9cP6REjeSy42GFDYj2Mip7GORPZKhOMc3AD7n4QuI5tvk91Gd7X7Uido3ScTI8slMbZ-LlKqc2xugq50DI7aFI_Sfcv9-RWaIBGOyILcJz4sahln9I2zNJr6hM_pArOzEG_AMhwrqazfiIGe0j-eZu48tw4sguoMAmNf3qT5jUljGgqV5Hfk-7sORGdRqOoogCAW1JRxhIo1x1V9Nknt0U2EcpTxBF5AUCIgwrCMorCpWudHY4d9liC0J9neDiLHSYDTQQ-74WMR4jM3Cd1HUxSrfm4uTTlnD17a5bZ1BhYhVfL7PE9dOVxFGgIELgdZs-vTrbAalObGM-6rBydtfPZPgjcaG3ZXlbJZXYdoelXgoRye4lrlHcgduVHF2AEPDQWRk91uevovnQBn1k-8ncolX6-SE-BwFteLp0CnX7FlYh7m-uliBY4OF-q5aRs3S2oIwHaIdjKOesd-o"} ================================================ FILE: genesis_data/genesis_txs/AoSTMf_ZxlcY12bK6_sWj02kssD00K4E-vkHx2vRxG4.json ================================================ {"id":"AoSTMf_ZxlcY12bK6_sWj02kssD00K4E-vkHx2vRxG4","last_tx":"","owner":"xodepVz3J3yxajub_Nt1vXeBQ-N615hlMwvjmDIdH60di3CZTFDUXqvONpbiJtCQx1TDHV6PM9WSOUcypsg6Q-h_UdwJc5F3VLGZ0stgJMnd9V9GF0-6LHrHf36upGxyojKcskKJoMWIuEN3hgJT8Z5HmtrPlPK80YLuUDppXlLCdV3Lz8arA5wp5PVLeZ3XadzgNWdP90krzVJghFf7KIT101ZkWAjlwccZfej7wrkZnT5zVyDx5dnBKKPgpplYmebgDXr10VUhaOTza2D0Y400lsYNPovSDR2XxWKmi4P0aJOD534QLa9Dh7MRiM5q9T2rtwjPHMHvkUAPqxC_METujyplxRYWGc8rDQ9NMBCkK2-QveW381gDIW0zB3V0qLUAFLXzJCcqV5To_UVyxKhC6fr8B4IGs_CpWtr37JAE5jBQ0I5JVNAtETUbJMx5N5D0cBLrX02BeXarWPuwY3X7-rDdB9ei8UOd5AczX9SbjPkDZSEsN2R1NGFNypC34fnMU8X1NIfUN1cLRkfyC7R-UWJdfDJ2EiEYoBX_iJwh1zkJl78xNU0AI4O8EfFCv3iEqCjNUFPl74CpXD1KhnnE9GgcN7MXABzwwzM_qDuSe3WPkqlTwpBDqZM-CtlAAyqVuxp4XjN81ydriujCe1NHRodbxxBwUL79fhRUvfc","tags":[],"target":"","quantity":"0","data":"ZG9uJ3QgY29uZnVzZSBtb3Rpb24gd2l0aCBwcm9ncmVzcw","reward":"0","signature":"NKu_531a2_vNb9_YpeXbErSfGRlGem9SCKWJY6-CaX7__ezM38jIvT6vZGeh6_I7bghVghBNUw5EFMObfmlBttulSPCXfjiKbM1WqkFCiJVE1h633rvcoiLhOWOoAJ6jdefgCCwcrfzCvgNdpKjoqDmfmw83XIBr4XYRYf-cudJVKBbNmZe6PyJJzPJwYnIWpxKS6XgGN8LU1POzKb_SRM6FODZM5s1psEC9njxd8WcKEDGjyl669O-Fjec01QNV788JA01G7sznEpc_8dQ2K1Yugf6SN_bvnO046_vkGtGklkXsjv47c3CoJBEJHMeEqwLezSBC1A4uMriuCCAKzzVBqU8sOsi4PhCLt2SbbGBvXos8o2HzOr4JJbYBzDvr02TAz1SYYzS5d3hs1dg40DBRvNBEFiGUe_jJYE2lDOs9F78pvf3gvtN9gJ_U0L8-e5R_OxSF8Fehko9B5lY1f8gbGrC5FUNRBjMMIhE8P2ooadlhl8VdL_ditzeqTGxbbh867oTw6bdt2qM1CmOoF_FeU4k8tOrXwvf5kUTO6APHm-HkoqXNskrgH8n1_kP0gayIxv8abba8HUKJEdQnOFUdg1rYH-DQG2P8k6D85AvbpBicaq-wX5v_hh2ihZzwdc3bkIZygjIL1LNayeuMCLw96Yakzrf7SsueBsGCGC8"} ================================================ FILE: genesis_data/genesis_txs/B4e9FBfqZGBszHAhZqTq-TNjb-oG7rYdlMWrQa4CPZU.json ================================================ {"id":"B4e9FBfqZGBszHAhZqTq-TNjb-oG7rYdlMWrQa4CPZU","last_tx":"","owner":"onnVDPfdNrAwx_DaZhoiS40b4xESqDKkctYhgLf1DJUQYHICatTbqAtjQqL-l6S6A3wnjw4L39AX3yKh6buTjioSPLBA8gx4Ml0x3guapZn2GGTHTObMjYTZy4b4yR2Xf_s3WbHC36J9xImWT_5-uNRuw1GUb5IVGfGHf0jY72sCZ9hnRW3KMRnJn1s0ux1CfmdV1zazgOq3b1k-UAkMYshENLRI7gN9MpCbWEj1vGbQA-YdPy_c-w-ToFZGeicji4wgnIMQBakFW44WvzeCqZxY4nTMjFO7GsyVz5HvB6mqdGyDr_Zi3IhA73L2cH0OZj1dkIhYhkbpOSVTtez9W8aq1H1baRO0YXdk8fF8SOZP72k3o3bD2Nd8eAyLv598mA1xAn1SW9Z3aA2kiL8DU3J9I4j7yQW4gqW-9JX-l_2vMCB88PtWyZVLTnPEvHXIHJyfjksBLVfAkIv4fmIwSjMzqEy3sbX5uzJbeZ91AJ-Th_8Sqp71JlrIxon_BHUQ2xfvvZbH361tQi3Jjgc4HDF0bL7V8qMvy_YCD2qwRXn_btJ7BoEfQPVcLBw4gLkvTFcoovWkZtXbjK-LOfgmuhnHJtyTq0kTFywtwNbso56AMxj40e5jDIJP27aa4dEvVo5bN6KlJkEutGpfnQZoldex0jBOzAfXZhaYjPJkyHU","tags":[],"target":"","quantity":"0","data":"TGl2ZWxhdWdobG92ZSA","reward":"0","signature":"ZZIDqx9r8Rr9AcEfVa3E9Frw9aiK2omAwoiTJXU8ILPKqOiMNbuAkNpps6DCqAQygzBlb_TiPQHHW2wUQiXGW9XRRhxnSTfHF_S8sURSUnV4nPbhMQHrWoKp2LhOi84bBNKVRkg7yyNvoSPLlHkbW26NUJtZTHY2rLABjoAho0m3SQUjVCgga5ungzbGgGXC7yc-ay_wW4-lnzbJeyxzhrNNPNRipwOsEbPq1RGc2Rgdv5-HGtUSgiFqScjbwNVgS2n0yup3JQmYaj_oD3TWDchzCgNkTYjDKls2LwIbWHSpYcPu5ADQ-HsClVNPeGrIFRyzYCcjJm6sFDKFuzyuDqj3uav1A_ayNcc2IJvN-5dL3A1Hg7BtncCqxtV5z4xUYjg8BUFln6gqjUUCq0dvlxxtrWEaNtrgFbI_noYv9jj7QgTUPoCbm-xfVzMLkESK2w_Z3xd3CYBczjyKWkyshsbfzwSFPHHN8GdOQ4Yz2h2JMNzYXB0IT3Spf7K_eehQfpQoOEqITbdFGB0ixXFh00DMZN6ghseRl2O0Sf-lyq5vtkgQtmsPZPk5y1j7SqDyFsiV78O9qGs6bIqiVXg1LS2YYWV3s7Dd83F6rPeKLYm_WKvmh6faYlHfJ6RcrF6fVxCXbLT26cH_suLre_5YbCRWFogTQA3JeoFjV7DSlCE"} ================================================ FILE: genesis_data/genesis_txs/BQ2RVL6XY99AIkPKDBCfUfRmJGejkZ8YKgKZc2LewhU.json ================================================ {"id":"BQ2RVL6XY99AIkPKDBCfUfRmJGejkZ8YKgKZc2LewhU","last_tx":"","owner":"s6jeFFIyL7Ac8quudTrDnWm52uNwmKQtze8vkbwhMiJW7TmOfLbr6WhtRkxalk-C5Ck-qWBHC0EDPqD0osXhhCeQ_zgEWb91S9URINOome5BUZn29re_VCytEuLOU_kWkjYSGUpvh5h91ooymfluQz5hMSr5vOINcLgi3gyEqKzsjSM8HsDBQrk2uCPNJ4dfbp2xWt1iy1IpB7Ko8R8c47Jl8PEAN1Dm2DdqmbvfRh_R1HY7vqqDQChzC5zTRsB067RRFT070t8zge1sUQlQgytnMnup_9DveRSMqFt2sVTD3QlzFocuyvjOI_xvA3M6sT2H3vg5ZxDEexX6b1PqYV1rtfeSh0rn6iYFOZC2m0s4QhNLVuqJvmwUwwvGjRmGLZ1xFYnUGjXlZ2AG5IBXx6i9VR68XrXQ92wyPjdl87GtiMSCNzVHJD0r3WOB_FiItz7BCn2xYngrvqPoPCqK73dyEsTflBwZhs_Xk7sTii_BFiuc46tmYl4sIw6t62KQxIWYoQwxv194gmuRW7o9ETUK6gWmuCoAqKG-DGx0sfuy-DKe5BShLVRGuExh7wfyjCLBMBewO-NnCFei4D6Pku4LKpr2eZZzyJ5BCE6yuakfhwE0l_ELIXGu5vyUJ4CIYQ5cKk3ec7eu6paDw7u8iV6FgetNUx4XfzwA_axyW-k","tags":[],"target":"","quantity":"0","data":"R3JlYXQgY29uY2VwdA","reward":"0","signature":"m0FMPr1SbbrwU4kd1W_AsiDEfF9hEsJatQhi-IdOPEHj9uwAlj8pE23lxalILhMId8-bUl_hzHrIa-Y86-LcZgQY-h4Tpje9DffRSKS3RVq5Eim29lK4CzUy7uOXKORrqg-9tRQcN45Z2qsf1TTbGFcefrQmGsFOZTmMDBztPTHPS_mDiK1eKdZHawGA1GlxYbiQXrB3Mof_xoPkIOXUiCGeKwk8HRCYOPbVCpax4kr3-E3hvv2KHw_Q24JNwuL_yV3cGP3P2Wo1m5KYMLbLrYWp1LIXi-K6gLusiUMrTVP9sOeEonnBWPs3jn9fyq2fFqrA-UepX68T1gwD_sgqyFsQ1AhlzuThmb4wN0ojPhyQerxIDT0vVrlAglbQHoKOV-7JcX5nIuVgo8AJzWHsyUlspyoawxk2g4-c-zZTyYlWKGYnesB-4fr-Yq_MKlHuYjcaBwTgF5OkWn9IlFiakNpYwDAivLXxwQqe8oru93MikQ5rZoppFXN6eSC3frrBoOcUOz1ZOrG8zojHX_7QsJT5vdpRtkmifmFO-xwxDlWrivnJSD42vPZN6moIEMfPw9vVZQB3Xc4LgF-uLOZMteWOHnp-61cpvJpEsdlvNyekihkgywqVGYYarTo6PeeBLVHC_7JlIz-7OkkFqc1k1poH3cOXFQnthY08SQpfxGE"} ================================================ FILE: genesis_data/genesis_txs/BRD5ARo8tiY64RqIoxYZ6jwbE-LQT_7jA513nHwWyRE.json ================================================ {"id":"BRD5ARo8tiY64RqIoxYZ6jwbE-LQT_7jA513nHwWyRE","last_tx":"","owner":"pLn0x-1vcGG4pE-EQHF38fH6N2fZcNRCNGN-qQtgi9IStimAmk3OA_UGVfXBApik0KNfbvN78EKUUqky_0cThBbQ86rF6uDl54I-fHDJ_DGOkqGBUu-Mb_pZ4hriFb4ZZ0rDwLoyboqz4Ymd-LHPh48NkKlFlAtJ0SpYEpBfNhO5QAOBU9RCm4eeze_40F6mwgOp7HOgZ2rQ40b6zfbK3ppb9Ciss3PVITk2KRBh1lL3Ux3h6KCi4rHApoFmOdOeSZ99Ei-I7ZA5UyTC1skA4eO4hvQ-_GnPK5G5uJvTWszNs1cBfpGeyUNtjNjB1E431LVoSg9gq5xWOWeEhlfBDfKdjSXsNhcPVCF97hpJkoEiGd08vU5fl2i9oPyiKFHJ03lNNXMwlJ1M2bHh2-1oSLSfUDHTXNIbeYN2bmHFCOPF2Xk88AN4PW9AS8mVLbG_T7FiOQNeJaOF-Ga_OSA-s24Fiexgc4tWQpSygk8wNdFXa-Ps4hulSZm6av82636q6ApTitapsUBCupsvoUDQ2_fgb6IW6Nd3qb1HIJrDnXW4qHZ2ZdEf-e0xN9IGsNrXFvfr4TuzMpt1EbbvN9kpFm2HrGL3lCsp31_-k1qb8r3YGTUCf6U-TCSALWF8BLakQIPbCfH84u3e38ZcLte3yshuKuqHDma-YI_yAqE0TDM","tags":[],"target":"","quantity":"0","data":"YXJjc3VwcG9ydA","reward":"0","signature":"P9llVCwsnTDZ5rFvSa3tkrdR7DKoLvsCqGCE83lSkY4jN4NQSOf0faT2VG_hpMdpiBQsQNTOQQ8mE4HuMmj79l1RgfAWcova1A5zrRzTZhmKn1kp3eOkWHc8k9_zbMCZGnvVNvTnvE-x8HlqKPowAOk1cj2WuYQmFHVa2jy7jNb3-Z3TiG6KbOuakgM9gwHdve33e-vgNVTYk2e4Q2T_xY8UjKh9nR_lnGDDYmgK1l_nylDNJdNCSwCEpiKEX6G44-6vKTlJ3YqvwSVAsG3aE5aw9eqbU1iVOBC3LUWBbqlbuxehcPeAD5PUsrRj2EK2AIHKmdUGX_vTY2adxITelZoNES3JCRB5oZEJLHPFnp9YM1PZPL4cmkuecz4nLqQezcwMRi2A_roL5vxai2E0gTXCtxp8YC02VPfWR3aJlmEDp9JEJmiiaCFkiqSo6fY-2O3u0gFofWxrttW9IUpUL5-b5hwSOlHrxGV6BvlfMuM3s2-SegI4eycxHtS5L-RRDgRzstlTvTXjkgLd7EnuzBSUmqLuJqtwLugrazQzj3D5_VbIxGiUcilXv3wSzkAv17EUlGQ_VRG_6FlkmdM5VCmI9RCrGCiEDKAv14O6yur1RmA2NUmCRffY4mQrCvXdvf5Y-Kt5lQo0OE1QPS2ebYcH_FcIkjLfBNvzpF_fWYY"} ================================================ FILE: genesis_data/genesis_txs/BYJCPwCLpd9a5K1HFy5F6ZvnemPiPFtV4hz5wMHr1NI.json ================================================ {"id":"BYJCPwCLpd9a5K1HFy5F6ZvnemPiPFtV4hz5wMHr1NI","last_tx":"","owner":"34gwR6cTYvzPku6eHGkC1PnoIXfPOGddXSmuCSClqrq_MKO_K-IPtzuXzPvkJAGOjEUuTbQ4PeuainpSe13nst_TKR7bfV8Ncl-Z0LEIK-uZsM3orlKbaQa9XJ3hcjTYt4J9qh2M7VfdXRdGwSjHPq5pbKcM3nNiOxFgTK8agoxSFBi1EYUjX4AcGZaCQO7JC37i7YVnzHtOIr-qbeYEk4zVvNJicgmtl78zTPVBAogbX5E7tPPnyZgizo4JlYY-ZL_awyeMax0IdX8gykNHBe_iIauvaCtrzPICMjpHNaW_UdSz__Ue36JjocS_0Yok6nyPyPcqAD9WS8u4xYrsZERgmKS8jp5F29wdq-4TAlUcVJI_DWovYjxouNrU42sA9vy0B7AFhlOvH1J-iLna5oyyYkq2683_MauNVePe5KsFZTBfL796f54GCypQf1U1zH0BcmNKBvut6WFcOAE09aRfF3fd8U-b36SyyyJQ6jchDAGZ5i2u9FqIee_KsmHXUbyoKvmY1Edb2afQb7ory9pBsgyR_JenK7VgpOdbhN5vQr0NtET2zMPmdljOVskN4M1oRL6NMDRUNVdJtzrzhnHErJnnTA9fm0TMSmqUbjyOJmBXwm1rrEwQxRZp9U3u3qkKkY-coF57xbMxxiPymPJoiJjCKEIXUJHcQnyWVXc","tags":[],"target":"","quantity":"0","data":"d2h5IG5vdA","reward":"0","signature":"HkZNBpVtZnBrRizTuIHH9Rtd-Zdul2b1WNWEjX5WB59mmBjfcj9MeljveS0P_InZG9EJZn03Jp0fbOia00yLZINJVvKO7OpOxH82RsChLc-Gegl5PmrqBA1pcND5f7I_dUzndsSmgo1MFsD9TYjmk9mL9_2aCU723GakcU80ICyBgnrMboNHy9971ChAT9kWbLQ-TloIUTp9gpGJuQJpx6NIXopM4ZH_9Dc_sWFQyXyWNPT-SfAM5h-lvjxBO5VMtmH62vo4uADUhY_iJyMQRL2kqww4mTitwh3pp5tHUrft2L0gebFeDJ64lbQ8azapbERSu7mmUUjKFsYf0DEaM0EQqUma_qMrXP3b5QwoBjpeuiARtD1exTYNx5Yx4ZOgZEa_5uMgHoENSwP0DLYXpmYL9_qAROReMfmesLKfQqanfFnLn7ItY2mk3sszifesAMUBdLnJEKrY7kl2cke90-TfDqPuYRfglUkstJqBVsZLoXlUCP4y-R8QMZo4jjWJPbSXkfthbGAt6nNnrkAKELeTuQ7lCVO9aBThXf7za5NKcaM1SPoWHooqrJ6SeI0XwVbyADx_6RvfEB4fv4Vb-i-cz41NkBiRg16HdqdOplWlAza0ZHIvhhA05LPOuCkNwBy65osvJq5DvCdCJSZ07hZxwt2Kpfv_HwHLdsywDIo"} ================================================ FILE: genesis_data/genesis_txs/ByvrfeR4UNmWJwF2fU41mBo6ThFl49u24rEGpbeSI0Q.json ================================================ {"id":"ByvrfeR4UNmWJwF2fU41mBo6ThFl49u24rEGpbeSI0Q","last_tx":"","owner":"stZftcxWjaA7Wzp-vbpC0WaJc0foMC5qtbsghLg4Uid4wUZOa6AfC6aSEU517I46IRjAcgyJEPMtuzjd_aTMZ_8sweAa24gVxmtOC-nYkwf5fvrj4PHlCuugDOsJXoy7rnrIHMJuuMk4eHLX74GtuSZJMgltz0CaHszCXtqINu4UodEXsF9Ig5x4_JxzPz10yXOZaYWkg24afyZJOdZYU1iYkkO86sgrqYWDkn3p1jW3neOIv82ZJ--MOWF60bfV9lySG9gCiqiKUXuWjdqq6FHj-y1xTSx8hTPnXbgCvleJVquP9L36a2TvuOOqgdBgIGSMipoFLwMRwm9piqjVHYAMs7lTv_fKvktUNSQY43KBpZWGu7IATr_S3qsO_elp7uis13KdQiqjjs9WDjC9etWlYORFBJwk39jygpd-89yNum6amqMLVIftVDxwRapBooeBGoHuT5V9SqgwyVxY7uBbOkAlfMZjIbfO96NPvZ4DOxIpu629zNC8gGAUSTEhcXy-nwth08BobUJv2JE0l9AUPjVvlZvT4Mlsv9_H-Bd1W07KKfhqBcuzLvfmRVmP0_EAgTwdmUZK8AtD_hFdsANV01IM3tVxvYCG8VlrlHU74J7LOajUjJt3YbwDuzPKHqYNcZI877N1RFpYmFgpBJrsebMcL0IgtERczeV5b3E","tags":[],"target":"","quantity":"0","data":"bGV0cyB0YWtlIG92ZXIgdGhlIHdvcmxkISEh","reward":"0","signature":"odfYYwE-kWOjhqMn-1rgZI0hn_SeB75uytkrQMhvhGifo5ZyI2sGxrcWHNUdTmGPAQ9qcM5x2G2jtEkOdTkfFHenFCzwXx8NcWXtmSNBNmGPQBxpmoRKSBTPRrvwjrjtD082OP4MTB-pRBPxlt5WkthHde1UQca3rwbKTWyIR-Wyy_lXe_wOgSEIcgsY3lLWtDVck61Qv6lcOdQcCr-iAO_Gkp5UrGtnVhOz_HZi4SKqrPqg7Oo6SLJKYtuDvjZFyc1hUxC84gIRTGAMcDxkIvxYxkzb_rX4jD3Pt20PXyZuHcHXSEbUmTgDtkKOVoLB0qparyVY3qep3fxCl0jQYf9egko6rVCxlUy0L3OfrPAAP9OaGOw-7qwJG2U2Q-rAvXYJffCw3sENdQOQgNbF5-ky0eX3zJee1D3uUJ9O77ifhTF9pJb5THjeU-_6z0t9daG2HW1KADjd35Qf7zqHwEgi_QFqL86-ybGgCcGXKLQPOCcYfdqt825IYfqcmm9fuo6tUZ_d0tD2RdSlow_P2-H2Bgk4_qCQ5rjwNhis4XiMpi3iYJx_plavmj9AxMO9_iIAaRR06yTQcJKiAA0Q5eNEIzjPpoD6ss2QqIc_qYQLQ-vzt33WQTdrUbTP2IGZvNeWHtZHyKjj8u86WOBMAnq4l8pA-1VeOx3u4gvkrBs"} ================================================ FILE: genesis_data/genesis_txs/C3auX8HXhc2dChmvSBUfgGyYynuAr6P3g0p7420GG78.json ================================================ {"id":"C3auX8HXhc2dChmvSBUfgGyYynuAr6P3g0p7420GG78","last_tx":"","owner":"u3NGzV6-aIFsgG7alVMtfUkVUc452v6Bjmg5XT6yGOq5eCTEXEIlOq5AZ0SPIN0L9g1ED2a4UKs9geA7tEPTkipDKBZsp7_xXFwWfkRMOSL782W_cRm0PGhucN7gv8qjuKB6-qKv_Ps8yTiq5CBdlnqZX0VJQhgGavTgAhXvV-OpnySQHbYKSimAaXuyMuWooFNvI7UnvBlkO2E0aOucvHPIoIwh8zkr45DCk5ZKuZX7VmoNJ5w0eOsc3D2tQryLRHsaaOMa8-Qiqs87Pn3_MNfsq2_p9ymzPf_ZvNIlrAwglOjBCo_Bf-nabNPcZO91Rr9gyXsGiLeOMXE_roGmkYM7rtMHGb4YCcOkLtXnZqCF51MgzSMFYJQ6Z66xEY_x0qgbWlgCLh22gE8uJ8LYmJCAP7yE5vR6X2dFwyP_SQBELwdiBnuCqFYEkimKTZ9-JNmOQ-BcVfT4aPvdWK46hFK1u-j5Do96Lqn9KfOKLzzNe-n7zNTBiWVR2HPnGc_qLbdg85yzvv1jjPD_Hif40UtdD1mhVtCQvRL8LLHM5uPGxfh5N5s2BxH8COtqn2DQcDD0_LEJPYrceEz3ZbM7MBgq6MlTuMzylROZdyNKYjicNnJONj2qr7b1W3oc-Z66RR06pEa8eVpSp6TM4JTZErwYOHtFMI989bplkRnipuE","tags":[],"target":"","quantity":"0","data":"S2FydGhpaydzIEJpcnRoZGF5","reward":"0","signature":"qnBwiVM3E_FFcuhZpdC8BfGhvEhmKFl1gaVvKOdPMEJLvESxy_0jfV3S55aRbCF-AamP0LTqaRVEypqYNOs4Q4S_OLeuqAfA1TT8DhyY5Z23l772drDt8VWWBVvNNMKF-f7emtiOkCKxnAVTBrwT5SNZKlI-Yr3wfUt9_P_a-G-jQWPy8BC9lvAMomPi-_3-NJgus7hc5M6tPTdEBOOQhCPZKrwBxvjR2bQyt5OLAARutA8qONc62Sm6mz6oBo3rziXVPVIJW05sfBkFDkJommroSeBbhxUfBR1LBIUNpc9bnMSAMFm37tRi515qE86zQvBd9U31Z3JiwUDiPCugwoC1y_rgJk_VatMMMxrK7kbQfkzXf5imG80jm3hblBO7-OJzhX3egYrZUIPiX2_80nqmMu_skc-HneZfo4QqCuEpamkCPgycF70Z63MtzpVt0iO_8fk14jUuxQ_g28FHmq41sEXez-RifQqxReXzFPeU610_taGe87c4UbXuMMlv7evoe52aoXkA6OYZgIRmvN0FQU0hV5J0A5q4lRKnlvAnamJcUHTKZZtwDBozr64Z4JRsVWJDt5kyzeSO2i41wB3K6ZWmj7tkn45HL4Ziaeu276pb02n1aPKNrsT7KZlYcfL81ZO1A-_6CfUTlWoi8uT9WB2XyvmEFe35vgAH8ug"} ================================================ FILE: genesis_data/genesis_txs/CEXuGv3KvVtkf5gkV0ip3g1FF-i12WIDo6IOigORIZA.json ================================================ {"id":"CEXuGv3KvVtkf5gkV0ip3g1FF-i12WIDo6IOigORIZA","last_tx":"","owner":"sPrx02PJE99dKRBqPotHEsuw54dRiJdeB73_Rf9Yl-tFj90TaNCzS0yq47JVsapT5dfCwefLpW6LGnMh1aTaLeg4Bz5RVCbSxFO0vajDoYB5SaveljJPXF01HcTwx96vdoR6Za-hfQgdd4HJC0d6QP4SC3z8J6ngsQg0DDcIMP2gngs_9FD8dl9E6zn07rHebi-pBFKUpuch4NxYsSrl99M5KdXvGd0hzfH9YwtciRGCDOP0Lf1DdsYXLlBECEPmVlBvoUTOUz3SlUFA4YEDZw9sK4i9v4fOygW59qrx8yTCy5D00RXkIh6NPP_GpmwRuuSnfV6xCa8zWcxXRVAPbZVfzHgZ48rsxZfdHS74OuiJ3vhO-SwpddA18h_fzVhAlHm8WHDbZiE6IvW85QMBk-fxinuTuRx_qz4thMnWL86SM-LVV4QEbNQnmWKapFmOi4gaMZjTDl5QF0ndgKqvitl-rXBPMuEU3EKPaMSh2d64cJm1_urbUevnioG3TXylNae9t4g0m1j8ZQzAWvmRneGSoWZEZIGh39WNjh4B7y1dh3ggSDnPMh-lmxvEIkborA5xNL_nG7zmC8jXLSLsqx8G5yDO1fZt-XQbUgK7fYA2Rkj4EZiwxuLIZo56wxZBdK52GbVIhu4f-winNDtHImELjX6Y4whaVjgPxyJVCck","tags":[],"target":"","quantity":"0","data":"R290dGEgbG92ZSBDaGFpbnMh","reward":"0","signature":"PdvoHRBIDiFUKDtF9jBvoRAuwJA9BkW-WFhzuzWb24a6j77KejriHwQ0BxLAU0AMJHpIkZ1S-dDHYLPQiC-n1kGu4k32u5yXuJ1KknF-amVGajYaz7DKp4eP0GuBeVJGihQ8lSuGsQgVor71Q9SKfYJIjiUiJjQVPkAzvY45btGYM28u3PvjN7ob-MwU4PBkZ5acY0u46GtDmILiazegBO1kzFmvPCzPZYJs0aLYBZ5FF17Hi4qazoEtitJEkOEMHghXxcJMpCQ5eRMl_cihSYXDbqroGZ0x5lFjgT_DGMYktkix8K2I7fg4ts5q3q3CKjvn-Clo5m9RifEoNfg9GSlayMXX5JkyiKYSzJss3QuIwI0ug7A7iUXppeDBb_ufG72dqZDG-eE5Pjc3knQJkT1Cx-1frV__E_dELaempeurdi8Ac4pD9shxnybMTrdzLGQTIiFwrJnDZq1Aa0vq9kyheBss5h2UaMet4ucbUZVQRBS4kPqurmQh5LBQhXIrXRETcZJfx8uH74gPzKp2PNMF0MpPN8DcOnFPGNpKe3SB3hfQn6Ul8LW4xGAeeXMlHmz-GuEM2M3RsFH1f_7ca4mBSiBHKmHx_Tpe0_XhRPrpUNGy461d6nR2OSPX9b1sSiQoJ7pc16vjn37Z3jtooIZEHHXLXJQ1SyiGLsbcwT8"} ================================================ FILE: genesis_data/genesis_txs/CMr-rV5FdlQcRBo4loZzj66EFqwHBmA36tWiRMKGigQ.json ================================================ {"id":"CMr-rV5FdlQcRBo4loZzj66EFqwHBmA36tWiRMKGigQ","last_tx":"","owner":"oyHwne8bkEP5Q-5S0r51EMu8_4IBH148l0uGUHoNGPOWVxj94nrBkLaL8Yz3FJCR1o5cMOSYR0cAnI3brpgJTG2TR-jFdrxtt6QxQkj3ZEGIvSzv-ZEvkxI4osXfrvnFQHBm6tzV_5ZUBE_-rUTFRAILfXjoPDYJtkJnC7dsUD08ZgXuMc5e_q-sd5eRMgvo24vWETbZJwBs7bRpL8yAodN0ZOCTniGcVfv9gk30OLfuAovyUYmN-zugYZZLetYe537AoY8LxKyVXfwofuKHkH6SJJM3jHYMqu1YGp5u-bqb4XAURBV77UKh2co2DjLLVdZ9k8Qmmg-lKKfjNT9ZulB4_g7fDQsZVPSqtAP9keqJxo-wunDUBjlbEtgqeFVPdP-JUjMFZW15DvBZaYoJXAkepFwrSC4XNU7Xhd0Q44IN0zBiCTyAVPBMwRQ1E_J0GyfzEtb14sUBy1YotdpCLRNCVjiipuY1S6DwJHSO9Zjr3-cCRxx7toYIqU-mLldJaokouYWRAllVfyC3BxNfsbVNqOj-zU2emGPht7Z4268jeETme_9e12ZEdQElEOc0Sk56CZwcSRdyEbP5g5iDF4Z2VfkHUUzb0FlaqKIFuLV8T9nqFgpfKC645HNrA_RfqwaWGQBgniIQ0btJFJEGP35col_h5MXSZS4AYOceEnU","tags":[],"target":"","quantity":"0","data":"UGFua2EgJiBLZW5kZQ","reward":"0","signature":"AYQOQ6I-elNGGdEQJfbZziloBLM4NCXwSvcAsTjn1IkyjvP8cNetFhBV1O7uW3fQh6qyxCqDDO9xsO7C2ApRMKASZq7xAUxXeu21V_pPAKbJ4KxDAu4F985DIgi7bb_gbvX92yGn5MC_AO7WaplJI5ckPdyRLI8RhmXM3T2kyHK7C_xTRC5UJwIZZDhXcZLadJlYwvT3jqhDdItPzW48LZ9MpTStd3VJ5OipRnpd3ODLA8wfTeLUjMhj_UnnaY5S4oVbk-6I9n_HY4BgvUYzWhzY2TcVVGu06xlmkR-EMNDCsD4WJdS3MEQem8PCoaibQnh_vd2Ord_UC3yoDGFYvGDPfdvnZ8RXu7qa6JC-mln4LKcpmOaOoA7AIy_YNSb33SwylflKYVLJY03GTSp7MTooux3FYmeTj3rEHqTfbAPX0vC__VloOFmXY69pbvQ2TZrB2uCuMdUdUVlV0uNyOqeMnudaaXt0EA409RXnQIa0jNonXRBmMllWNSD-cL_yZUtI8MAleiZ_Ho2p8LynzPSi6L5ixCfjVTnC61oIfJHeK_AJrmc1inOhnUH144BtjMfEM1OzS5gAItqEqGCFqMsl2hsYYxRKN9t8nKll9Ld94zFVYz6GoqaD4JfHwNfnJwcmULFEkiYeg9ZTHWrNW1juYAX02MuuE2IpAhB1hZs"} ================================================ FILE: genesis_data/genesis_txs/COXhhpbcLSEe2iP2kp4SDj5NjjBAC8CucsAgOHRF_lc.json ================================================ {"id":"COXhhpbcLSEe2iP2kp4SDj5NjjBAC8CucsAgOHRF_lc","last_tx":"","owner":"1gJFGx8-oO0St_Qv5FJGgTLINva6t26rCM1IX8tu3P40DTS0UhDvTx8gjnIWDGY6tou8ptK0vFGUb9S0_o8TVB4ymosP_ZuOOeZGTuWhZ2eZ0oyCZqAzT8IBPnwEIBQs7gUmXQSXFTceLv6BuoVNWUM7LcmzySGDFLH0XC0ovok8wB-2gtJPnWBCPNh3OM_JHdIJ3Jg7bkNgnnj80f-u_saSYh-0qvJnV4027pG2VJ6_TRpghA5TbgPTeXf8LW_CXFYbhDz2WObKeQeM7Bn-soqxvcmGah5tCgbICkVYGfxeDgvNxGlLlvP5lrErUMzJU1qE3bo6MHfyLK69yDwrUkhZ5BWoNFGW2unVeAgPQstBHfFOFU2F5f4EoYtuQbvWUlAE81oDwkmNu1vQOI9eeEJmthH1we37pmbMGDAbfRZP7iR30j_IKp9arEnWQG5CK8xjDsR4Q6tPR9eZ2zGTIf6du2APePl7Eqd8ONSSWRpPWqzPvKSlkBU0xLtD3vCKRLCL-qy2vD6c-Ki2kH1xWgTIPoWa4GvKdEYQ9OBnHxsCh80bbsKWmjjAxHhrPt4kDyzJjH5FUr-ZQLzZ9k5RJkb3WwiNPS9WKnGxUt6a2334OyFbiaS_b1cjOfpbWuwlYY2hlyg90s7-unzs1VSCt4jyrnYbxAmseXOPSZiFlMs","tags":[],"target":"","quantity":"0","data":"Z3JlYXQgcHJvZHVjdA","reward":"0","signature":"V97CJe1NwJWMUWh5bsmkF7xAAAkhKzcgAYHD1lDw_KcAbgpiijUuKQdjaIUwTkhFA9vkzPCfVqBUgnVfXX-sfBKDq4KBoVJaAXBy6sqqfvQOgb4bGQfTmmzhYbVgDREUk5p0TVL2L_Nczo2XUv6fb1iVOeAe9uVDFXDZMaUX75GrgdHkV_xfSIqDB1nX9rOHy4K7tJElbT58Dc34A5_sVJAE_WU7o-IP7UOZkbZMsQFLha3p3SK1NRNh2lZZc5ZwMilbIItl3o5mdujj-BlYwtRp-3FcKNDpQogTScfA4CJEUZZys7Qzz5drtEDTcS6FPAg9ikk-DN_ywKo8i0MEugBJ87GRYchEOY_nTr3GTu2Qik9dAq530gaMynt90M6A-l1ugdCM2DVrLJiHn61y9Fg3GlL_TZifCFNDPb_8x4QBBLJTgEyZCn8IrOGjN8Bo_EM1rk-aGpTaYphwjGTKDU14W8-yHrdS4cUJMGP3miO_INspfeh0bc6dDl11hM1657Ou5aRxxEQXKRPHUTdQWS_KA7h19YTcBedPvPhEwRPYsF-3qCDJZiRNARme7gHizvNeqbp_FcCleeBw8DA1nomR5BLiARPcue9A4cttnwNCn30KXAB749GexIfSuL05_x4Y-8c9diNB5Et6BHMrcccBxpzbmK8zbWUzvquV6qk"} ================================================ FILE: genesis_data/genesis_txs/CSkFcCmNgvnp7jp7aK0tEGsLWiZVMF-QBkEFaJrAG48.json ================================================ {"id":"CSkFcCmNgvnp7jp7aK0tEGsLWiZVMF-QBkEFaJrAG48","last_tx":"","owner":"tu-sNMjZll6XINwUUWPqQQ8pFZF5ufnezJsQMBovxydJnrgqtolhBKGGS5N0tud8Pw4aGFWsycpYUyDkCw68DUeKqTiSQa6b6v9Y3lSuI2iA19th5zRIxuF_TApcwTFzXhNITTLFW5Gt8VeaUhrGD6pcBZLzAKJNk4Jx034YbOPPAgxCy_shgzRZZuvjTkaMFs7-dZ9T7a8kEk3jC568ddsqpk68z60bHdA4w8RgOhJrALaAIk52je8N-80xGJ3bYmUnPDa-oncl3ik-tKIQ01aIEvEdElLktS1WHIHlO73v4aQJQsfy4uZs2yDp2sfZcrVSpBQ_UDiYk_poYGt9OpNezHli-TLcJ6HDXcJUj4_AcvO-cbodIMGrdFfOqp-u066Q3TY1EBRD8j9tZ5NGoasgP1ICNsX11vBIi4IRAd56E8n7AVSvacbaJWgnkKamd3V5j5_KB0vE2YAOT1sZU64_vh9yR2KGxd97JGmF6HzbC67vkZ_ZZb9FLpgxARTeirRJ0CE0F_gL2QJWPbhoyDwskzKiBM7ndV90egfxgh0Zpn-8zYiEBpRoOacYp5CF2S7baYm39C9xr9g-GGvue4bJ0V9p9v6ONgZMchHq2auFBs835hZ4ThdtRwggUJJcRUwhxaNjuYcYmpSdfP-UY_LB52hvo-OKT1tiYklNags","tags":[],"target":"","quantity":"0","data":"Qmx1Yg","reward":"0","signature":"WXIKFq646Yr5MHqtJwNkzNlS4V5e727rWOnuGKZRolBKpR2JM31hNLdkVA2w4Fmg0iLpnhTTLkMtys4zkmJDnFOEdMzgi1z0KxLUz0jXKIfh1PjEb1ipwcaW-XMNDYdPFabOTnN0yghdVx3tvRJx79U7MwoJzDDpMXgZ0xK0vMT68dgkBY30YzdsKomfDO6M-Z49tLIn6IO4u4j_gAtUH25M6f1duqPd3NUc0ahDBSTRIOQqbsS-4WupXBryU7GD3Q1rvKFQZGh-QYfOVNEqCbnVA15R8DUTuRAq5o3L0sEgt8CwFFl2tUN1BbT-YklGauV84o3eHZuEa1V77drrp0XHO5JLu8VQ3RnnHHzopLsCLaUJNvRTn1nihHf00mP09dszACu9sbFp0xyehIvA6zp63f9vkX21tD9Q2tpVdYVyChv8qE_lN9V0E2ls1ftRPQQzBql6FH0FX-LKUR0Cl80FmtHJDur0DcrccnI6p1P0BaK9FL83S5TZUDpz40HsQWzuIFLMVFeOzWBqmZ3guCUEYblMzGpoIzD6npMzSEn4T5-DzJoVe95QU286Pe8uKdnHWq7WjjTDqiQ54dYUKekKl2QukGONG5krIgePJoCrPzKamBjUIe14u1dXdzsNX1sDtCbuc6vy8ni2MSyuTOZwwcq5xC6I42D82rdCGaE"} ================================================ FILE: genesis_data/genesis_txs/CUu1gtu6L5tJxkOAu13tNBGDKECohV8M4qgCOOPNtas.json ================================================ {"id":"CUu1gtu6L5tJxkOAu13tNBGDKECohV8M4qgCOOPNtas","last_tx":"","owner":"xCb_nmC5a2uJLUHDyig0VRWnXIIS-QoGETwoN1K_QmLaguOoyUy0QPRQSARafSzytqB8cSPtRB8w82aHOlI8xscE7q2s-ZQFgSzhG4fEBegkEUb2vvNkTchgAbgN_amv2Ad0q2oGHaq5jPYFzv9Nkna_OA9G1kkUFSjNZM0a9U4ybXna1ewn3pklcUJ-OswbHAiuFQ_3xvxG-HsuyuGGOcsVMlZHQmHvHbbX0_o-M6s9ntlORjMTVodAbYKsx5xSj9X3bGOFO5BeWZoGvcXtB_5AVWxdwA8noTRedRh3aIL2qmUINowwsHy8HtHlhNz63xANphu7zb9lYa5q-ciORQ_ZSKbVCSXNnmt_jxofBdN1x-FLdDI8zXKQWBCak3CJWxpw6JCi4dEDoDNgutapdCh8UiSbxoeyBdANWIWlvawhJYmZ0IaZrDfdd226F6-yvspG5KU_8mE3U3lK9ni0TGq3LMkwMMZ9C64PphWBabu_FL5pK1zc6VKWGvLqeK_br3MUJPxPN1FnSRneGD-Mo6-ruEf-YwENUTFTpXaI0QW5i3S1JWr6Nbxo5l0grEeGvXBvZlY1xWPeaSdC5V_2B8JIibyjwCfDhFGNDvfhquRTy8mj-PRpHwqU5WGK9MfaUDrFLNdAq_tgMO0Am1G3Er5y5wJ82bah9NHCDGr20Vs","tags":[],"target":"","quantity":"0","data":"R29kIGlzIGdvb2Q","reward":"0","signature":"Gv_aKzF7L2YuZfufu908Wiizlz5OT6vPWbUv1sEbRLb1ko1e6CB8ftpAaXPyfjk79WwQh4Igaxhd3K9T7zKNFmULCU2n6k2kRj_DQLS-Nj3ViDtAT8AGyKzhZhPJr0EQqGJDrArxRIiYGQj_pYkFaRd1ztcbRHH9r782p9CpAKZN_Xedj24SHfJLyHFb_Ss6um7yRlWNvBC_LwA3uaAtWfFFvECOp8LMa-YUDllEANnx-ShG-IuowD4yurY4ayCcIezUjKA8z49f0i3YM6gfiFZ3KuxdkMpWxjZ4iFpGmLiMZFrGt5kYAGgnJqgQNWfMoSZxjBTPqz-cRdY7xlU_HA2FcSf5uQ9AIoQHvA4qKF47OLsZRMNBAbjRgh3dGFeEdaB6c4loDkyU6C09h_KQLTX5qzv6Nr5jMPqaKYaz9dzPggTuUISAIv7Veh5OG1JGafbvfLDSpliA8JwJO_4DWhg75qmC7RmTuZ9nsmcD-ZpGY5kXr0TUkaK7UUW90TDPP-rrqF0UU1AgSgCt7srUZvQqG2WQaXm3rauggpXr0ffVzpWBFlFW4P3SuntWtrHXp4Fdkrhu7Xd3SV1P0k5qbVKdIlbuArJTqJWagka5utDIVou-ha-VQXwtZBk-R7NccmAH_iv7IWYGV4Lf0YLZeVUlktlmh4T48r9TRZHi6bc"} ================================================ FILE: genesis_data/genesis_txs/CZ181FVir4NaSJ7JsVb50-xCaZtd3dmKbDer7jpTSyI.json ================================================ {"id":"CZ181FVir4NaSJ7JsVb50-xCaZtd3dmKbDer7jpTSyI","last_tx":"","owner":"s5e03LkGfu88YaCs8W1bePRGRQnP9wZfkTeSLqtV4NWE1RNqJoA17_NyY2AfplDQQ1kXtlFEoP2oGwCSydkFeqyiYTxLSf9rYgTWrsZbH_7vPv1_JCp-KRpuw_xNFNzuEz7Q9mKqleo1Sh6o_1-IjSzhkbZE57cOT0V5FJ5fW7jnijItmSyHFll4B_1dRW6tOOVsvNUUc9InSKdI2ULu_QAqrwOuaewXnX_EefwR4HxBjH1QvX2zfi96-fkorPFfQlrr9e-ZfWvI-4g7Y2zJH5WlW7NwOq-DYoA1QB0NNc8PioNWiJj-d0F5g-6Fg9kMIo_EGp5VuLzl_uKEd_nAQOQmI8HqTCiDjM-pcIgcsgtL_FJKptpzoWjiYCdJ6LCUg6faWDHQDQvz6-6h_pUVHGtN9tN4BEngzZEvtyFOC7olypTyjq_tmz0BTvauYKKZ4lIH5Ju-5Vhs2V858I5ZbBka464d8wnNSHSrp8-v9veSFXVdnj5Li3aD8XyND7EfWcDT8B8ScEIDa8yMC19f1YuoQe2L4oa37tWF05IsqmMkrOHUpixOPpkfgjZzgsHMfQ86aDXZQTQRfddJizxiK8HIXn-vQVA_91eeSRILvmmLOeZdlBVWPSRouC5yiAywUEU6IBAlGNZEY5e8rI50I9pRA29nFnct8GKo9VHKKd8","tags":[],"target":"","quantity":"0","data":"S2VlcCBvbiBjbGltYmluZyE","reward":"0","signature":"J67_NR7v9LheGe69BUHU8jYzRVxsrdBaNo1Vz4QwI-aamBDf_Ytd72VKQsdu8bF7TC2aTzxtyt1HBUpuNbkxhs6a2gg53TSbbXY1f9kMIaXhk92CBfuaUQTXO5Q_SpzbpycwEgyL3K6cPQy2oNRVcE6eg6krTnRhdZiTu0wR_PhBYH-bm96cRbi5Mi91tQwR64jTW0W4yeohHFtXTcFdRhL_JkD6-FD6P7M57e3-j4HHpNl48zk_vNbgx68aYv9m4dH8lYx6FYizxCCN-zkdyTvDyXT_74nsuvEFFtWKQgK_RU0X23JqSvfPE2x631bdjjgsbMm867btIHRbGwjiUlz-3W3h646uE4vHqOrikzyeDZNkUsajahGIX3Bp9oyLtxGE6eYyNDSJuveagJwmDVFHok10sA25JAng0mN_B2cxuh2xiaEEAZ1-oiEmhFQCdDDhGLC1ViJjh9h-kGMXWbJHycfR_9LTkYpbh5WyzzLZc6om_yC-uX55IXJOKi2wova4-XMMv74MRuN2nlr2Pchm-4IOitx5ITxNBGKWhua2K4oVR4WNxIc-pQMkFJIcfcXSUL0eumd7ft_6WE8mRNFfoZSAhuQ38DMlOM26xiIjkVSig0cVqMKSkISnuTkIg4ruO-p4atCziJaA_mRftB25HQctdUCvoWbYD_u8_ss"} ================================================ FILE: genesis_data/genesis_txs/CbV_CDXgVNjV6fyoBDkYmbAcaC5VsLDYXgEIwj2Ewyo.json ================================================ {"id":"CbV_CDXgVNjV6fyoBDkYmbAcaC5VsLDYXgEIwj2Ewyo","last_tx":"","owner":"wyDKbJ5GXN48RGneEzgIEQZfURGmPPCTd2bfJ4O0oM2xGnCMnZ2WBqfKR-2jXApe6VEaEQX--dM-V98BqCQrkKIwm_SOoQQ7sUTOI3kaC35C1dLhobF5bBFF0OuHKk7KeGqPb361QGB9yMDlj7eTyZN_VUDXjS_UdIhzqurHHOrtXsmyqD3zdoUN9wGLkJJIHaOk1TqwLcZyphDnqc_0sb-au-nKbf2JqJN4DD9-yxVw4rv-qBRKil4z3p1CEMpOhgxCXhxlIBTb_6IN29vRoTDWju-OqWuoFDCRoC-8Vd3ad66VPvZBi6NG1k95EyUgqzfB7i9VPoUp2f959MsbsldsFG-Jroh82wArN2ydvni2UcYcOQiFXqf_cH4pKvL2IdnDmcIv5BF0-rMYUfCyuxPOplqZYDH6BC5O0y-fSZip1-T5tWSP2OSRcghTtZmyJd4vvdLJDiD_FL7a6a5zanY3mrVILj5h5sdQh7uudz6OEx5lZT4GFUGXHYoFAa92FPLV0ywYTZsM92za2aRULlBS4WXTsXrylnJ-fr8SJCFehgSkdSuoGslG-XwKZNtEP_lXxfB2MfczrnokYkPSpiWFnDgFWm3eAB1LCdkazUDeljYpiybMbPwoMj5wyeczwD2MiERM6-CBahQO34SFUwT6sc7cbz12GlqI9igFJyM","tags":[],"target":"","quantity":"0","data":"SE9ETEVUSEVSRVVNQkFCWQ","reward":"0","signature":"KpFnbhNaOMtTJ9tBuBZbVkO7VMvwF2CossHxomc5fUkvgIBmSvRl0srvZtr4i-MEfBParuIdXCh4usFRW1L1tx2D1hpuWFzGESCL27KVlzmqaIPK_efroc-FzVHYuhxEIXSaWqA-64TjcUVKW5HR3okWYhCk7vuVvuEMnNyoeVRniSTsnPJBK6prDu1vwQhfXlGb2iBwRvzsVqfniJC-PuLW3Vr8DGuECJq8W_OHrAKhmQ4I_Cidnhh5ivJPPZr5IXimQn3Y0IMljDHr-FcIf0elBaabPmuH5L69uT3TjLCIQeauIERlYeVkic80q_0VM71Mmixc1mDrqfDlwOBBN6Sm0GOawM_Uvel263a_gOCSIz2ouekq4xss_zBKyf8DOxU6d50kQhHQ__ZP6dvyhWK-uRyprJ3dVFPRdkazhR3e_kJ5_jWq9YyF7yxaa1AoGfTmEl55AtTZoiE1jEpQzwD9M2GXaHSiC5jpg8FDXfiby1236FOBMJAYhrhDqoFMQOsoW4Eq96oJyA3BwxgdMSwAhqnyiggCXtYJQp9IDVHfG7R0HtQwQtfHGT41Rf5JSI8mkvQRlzSTG25TrX-yD_EWLEPVbUrWIiWC1P7h2in8TXgijaDzsg7CPE8AGOkoIjpV21tWE9IqKpd5igSSVD0r5dhtCDfnBYpdV4sONjU"} ================================================ FILE: genesis_data/genesis_txs/DC6gmByeCki7uyXHJhX_A9x3pkMgmJ8Tv6wDRnh7vGs.json ================================================ {"id":"DC6gmByeCki7uyXHJhX_A9x3pkMgmJ8Tv6wDRnh7vGs","last_tx":"","owner":"pXFY5XdQ0pZRAV-YAydPaxF8z7I1cZMRm6Xjp0fchrVfbTCZ6W9RJy2utdayEdVTqhybuyPqPIdLjhofihuPhCO8XX2baRxPdnkbimm33W1k_F5XfUX6quaJJjcqRM6DDXi4r9Bt3EN466DHALh-lEyFiym2GYI31XxTTMRPNfALs3waX2CNjlcYTuSuTykFmoy068fe7xQyWYMQewLaIjAhSpRIhtiX6CqnB5H4I4qscTOHrn90OrK9zDGV6QaYbRDyUVq-9AgYSg67SPf3tXB7kGAsfwWoPciFhAmP9yk5zARbo7AZMNmea0lTTCpdD3MoPIi3lZyejYoN2cg_myc6aqS1_huXNtX07gDf3jQtlxKOQkZPRR35_qA_u7iquRLP6aZew1GcW2c_p70PD6Q665OHYNB-5hTrRAzJEUzEq6y17BDIHiWNPRfMQ1oJxv-pOLwyRgdT2EG58qG0Tlz2FNFtTJ-8o9vn-omRpse0LksrO1PD0ivK1a5lOvv5GXbgx7cVJBrs6oeD7U27HTazrno3mwpoPrcGh2slNonsBUtUtMZaEIcLpyeOOOz7PyKXml8YXoCLA7Ag8E7u3Mj4aW_NdKFkQYEq3qyKV895vXP8GAm8tyuFdAD8GT5zDvilXlm8komyDHZKXe9WhUnCX-vodZAjGSB9xXtx0A8","tags":[],"target":"","quantity":"0","data":"SSdkIGxpa2UgdG8gY29udHJpYnV0ZSA6KQ","reward":"0","signature":"I3isCYmPyXRf_ZmlbzRjMN6l3qinx1cNH2TaQ6ROGplblxuvw2fDo-mvZ8gJOx-ne5FfteakrriPavJpMhgD1CE4w0HTcdXOu7zGMcF7DxAtquvPfR1egK-l4Np27ORpMQS_DO4tl21XCUMiJucJv8_7zQb0OZJhsjFNn7-E2opYzulfND_eUnkdqUFyLSynSpedDQZvlpkkFI-klLjV3viNcvdsSMvdcXRh2Nhq6Ixluwhvx1Esd2kaxyu7cnPAsJdWMdZxDfseuu0TLB00X_tWaQIRpCcVNB6D8VlbzfNNONeMU-quB5oUNolrJwe-lsQ5fsUHNGHckzEy17dLFuFKraUCKmHVlRFaCAPcd7h39XfrvSm8DPwn3XKN3m0-vSl4U3UHLEyE0cN_3vUe_3asu5828ZS_yxoOmMY7VDFPN_npalbhSNrUG7oO--HEgTuuHPC5H8BdtC5lYR1Fr7Y_AUdP3oPj3KQYDWEo6AeAd3Sxj3s3gOSlC69oyiXUhWvoKx4WXRrt077OOg5rpGmFQHkp8iBwoXO6MHgDo8v9fc_ZqLZ40r9MROl32ZWGK3XWShOK3Ai11yri08ur_Hc-NmygEzFmSkFEDDQzfuiI3iWL92hV_nUPz0M8kZTJwG4pfuBxIJxt6H9WVSpwIKXNsfswYsYqupDJYfMsIoQ"} ================================================ FILE: genesis_data/genesis_txs/DDrS8BD0XTUVJt5E8kwisVTBX4PBWp0lCnSkSD3PJto.json ================================================ {"id":"DDrS8BD0XTUVJt5E8kwisVTBX4PBWp0lCnSkSD3PJto","last_tx":"","owner":"16uZgW-85BC1KC_eZnsSIrYt1TPCcRFh7sobb6_3ol_sE8dHpnCGOMumAxZ43kdlReZF1rCJ72rJZYlTY1ENQGUfmux2NItHpao9Wiqvr24JLNDZMQn0HgUcMQNk10zUvMBgQURmahmtjBRDmaK1F9prPuWiKxFCGAbeLkNdQnEu2k57UzI-6fmJokgOJP8oSXou_lYs6JSn4VG2s4aTm5pOEEd7Y15e2DJ5QEJqCHQYFP7iQdG6phTv284JiZtmWcKbYn6WgGQ-GUkCSexJ4QEdzD6dXKgwcdUh2tiyZy-zOBhZfUo8I4bQuiYks6kMLAj2nWdGxM4wxN9VrD1WG7-bnv4t-GNb2fzMRdEY3BwJUAOe4YC-0_hjEHE7PXB08k8X39NrCMarljbWrxJEXTIaH7reE0EQiknVxlzbVWQfbZCBl5JzKdNzyZ9E312f3xmtwVcb0irVNo5sbIBK3mh130vTNs2fkHRgvSmWnKMjd3Q3vumWTg64rDOTKaXpXNL6OTR8b4MqwTG4mDmk_H6wQCQmJ4-KV7YGd8U7lYtqcegivEVhmS2W5rTNRlbc-nIwZ87YBftG3ktWzbe_8rJYEV9gfv5HnYfZUC-K000RyKP3UxDIrWcSZVR5GERKPepHTsKrClOaVSjxS9cPRXLfp-uyCs9ztoN_xCyZEVs","tags":[],"target":"","quantity":"0","data":"UkcwOA","reward":"0","signature":"HTEWQ-G6i-gzGVbrAc2e55k0zuhn86kkdlGmJgu2ZjQ9yCeVENa5g39Z5XA9GsLcCSxxgsTDblB9Fk9xOmtZbNr5vS9APWSZmDx6yADlTXBfjoO-XolasxHOLm290zPfmC-_4QJotLolvImF96V5aMog5Z6sGnwHXW7aeRobfsly_oHUXybvaK7RQmNGaO9BjX_mBxiBlbpTjW6YKI8VWY2AH0zXntXRlR4PPFbeJsEbUI8W5RJ1SyG6Ds78TIO70kYwdf4kLct1AP_Fk9Y6QJ4NZIz7itRCdFAdsIq-shsx3s5mJTaueJ_YKxG5bd7xP7zWUmlNgkW501jA4-BRvZhTaxVZxLhdWGJwjxbNlnUtvmFfomsO786_UvDrQR-143vZQqOoXwZtwJQ9qJcJQ2c-Nan5ea0VAM9c8Pvs2eRMEd4xSRN5etSFK6R4pNTNcUvQGbfM9lpKfFBSvpcARgffSU5zcx9gAcIAx375yJWH1tTU45ESQXDGu7ErpfRADEb5VHr4gAP_1CDfHN1lcOiBfzrFmaQ1853vBTiD2lSWdeq6Luy4wib99r7OZXldJUY4JOBQz9Y_JlgdBcBKDsoVNbjvJHRwXbQS01z7Ipu9LomjJWrXLMpMledp7Pbi6Wyndntzcr_WiMIp6l8xXtRdWr46DYKvYWZll8WxVhE"} ================================================ FILE: genesis_data/genesis_txs/DJf1SRoKaPo1h3F-7oKIMu4A-r9dXXMjE57WQilPdTk.json ================================================ {"id":"DJf1SRoKaPo1h3F-7oKIMu4A-r9dXXMjE57WQilPdTk","last_tx":"","owner":"zsyFdyHVbaXMKt2myMvIg3_Rq4dMFK1HuZCI2iA6zAQRmGMfVE4dDwsFUCjy_roE7iDD6PeXY0DAF1Xji555QWJsXeiJhnVVzSTgtJqxW3RIo6JJYo8BmHuvkN7SqiawjtukEPUF9f8BMigiaChV5wte34aY0SLb4y1R5_nSlSXaj3XGuRJ1fWwZoU6bf2nAXhvJG5W7b0NC7WFlgNG5VZR2g3N6l7gzgJR9eAA829UHEt__gECLY1oYLAr4BuB3LZaywyfOa-kIEp_LhZEvfcRF-JGU5Ew3iLw-XZJVUx8o6YYn6F7Hce3O4hFKgsaGsGl0XHrdXYNBUmjtk39q486wL0ZLpjdYEC8nvFbCT7Z48q6becznyaG5sPKxdnkvqn5ph8zE75kWBG29xAT4nxTCERk8KSys9qfrtUZ7mLnTT8p-NYYapDdOpxc3pytFT34Thp1M1d6kjGOX6bemuw1clOZcXAghI7vLkM4wuymboqlB4fW4wG4DpBZ0w3gp7hlT43ae7VLLCxIM8tyJ7WEHKRaNzafoecOmV7tBnaKZgAprDPzTMJdbPRGhFGNRTITJONQ8NSEBSPnlIQrk_OMJNfsxLAhlvhK9FsP0bm647ohpyowyEmihBqpnXw-_A_9dF5ZSoDowfNvvNcr3HWH5yeAGb9zSqrsqyJLX-1U","tags":[],"target":"","quantity":"0","data":"TGV0J3Mgc2Vl","reward":"0","signature":"SJUNJMCsD8fh-BRN3IDVhKXxdpMqutvfKZQ8metGHzCFqMerqMO6YYalZy3uWbuKJZTPIRR1zp_CQOooHNz0GB3vuOGg8D7DkA_JeY_-JGwAuC5WR2OxBQoqtqvSNzHUipocG19rGbjJ6F0FeRBimPXQ7GPhFyS5e-pYDwtjzwEvhGElWcBYSLo1xgsF-uj7smrK9uVaQDYGvkumRq3_uMBzaRvTMRCPJl9PDDgbvBPrVZ4bPJlMvTHaBBzZ_0NpeKjUZSYv5LovF_kHlxpDJoWoi3tdL4CuoSv_Xn6Q47o4iHvbJ7wAjK1_PnlR1M_srjpnFOTR9SGFeEZgzW2Eq5EL_1S8TdKqSreNyUoIBczw1J60HvIHWlGEiFY_5GcqiylZIlw2ll0dd2PNXRCrNA697aKve68Fq5IOENgRG3O6f4hYgTpNEkAE2MravS4_pJwo6KZr5JWjBZbYmvoZEO8nL860j2cY1dt0aIxUYAclRlR2-v9zXOI9J-z_pqH5FhGJhivICMOTcaYdhWfSag7tkn7UYK5yAIRLNoXZ7MOIIoP-W8cz2q6KFRLABJn-lnuENNFW7r_59BQvI5S4EJFEqnzbezofsBjaS01q-LHsrwqRLsT8A6qmmYnISSc3EK7zu_YICmagkSGxeczYyZaFSKQSc3f5OjvW9qPl-oA"} ================================================ FILE: genesis_data/genesis_txs/DMtXbcR_qHwdYXvkuCGOQARs_QtN9iWPw4x6TTaWOcw.json ================================================ {"id":"DMtXbcR_qHwdYXvkuCGOQARs_QtN9iWPw4x6TTaWOcw","last_tx":"","owner":"3iUmHTlxrbkj_MqShxKABrCeKBfd4gsFUHYLiDNTkCl_JwUyrguQ8mT_22AgQWHXONaBpLJSnJ0X-0NcFOQ184lwIIqZGOlhMY3Ti1sHq-MnflqVzjSGEeSYW-Teu27v1ILfZvYZPI61YWpBVosxGUTOk7jPsvmrW4WFOtnAS7KRytqRPpy4waplu2yEy0s-ssthupvVU4Xdmi9kuqNpzRR041uSevjdvLUdcf0lBct7FZ4j7x9-fJkjf6GjcEJzFJzX2bC5-C_JOQmKoGOubkPRQHxo47pjyB6mXa7ZenNHRvLNTuD3TlEK6BqJlIobgda-LJl656ahzhOC0Ph2CqKu3drmsMmv7Utr7XuUgJP0bqxtMwCslAU2w7m5FzUMFWq_xxx11bD-9wiqxAwwjoCyxoNBAkaiTldjxxKzGpikg4WZ0lwR5agAOwv_2YgWLY4gWG-Ufk7BYCMLbttc9i7ODj82yIWKGa5i833dorxpAaqUGcmc6uTXCE4U0bFZqsbMOOCufr_MJG8rg2s-KnKRGl3kSIgPn9QRH1E79gVBU6_paIwzZb1x4ZwFinIFvWXk-P4J5zWkZ4_8xJmKMwZIIVj4dMgH3J08_G1O9LTOTL_B5Rj_rmNF2DWiLZ8x_EFPzZLG547Pbte2d_yPp_V_p07KM7RUjT1iIREOmhU","tags":[],"target":"","quantity":"0","data":"d2lsbHd1emhlcmUtMDkvMTc","reward":"0","signature":"xfKfcJrEeyUq0s7ZRHfbUQ8mJcn78FjUqg49rkpXiCRZWGnY_gz-o8yJ4L8lNeFBPOv4xEz6ywPVpZ72ypIngkibT-JFVe7ZE8mV8y2JRACYLtZbDrHxLseCDHmwS7aXr8DQlUxoFVOlz0wCtvxO9CDV-JxXZN-OTutbZ4PNOT3xVc6McH_9MUfJGOcV0Klu_ojFwIKjDqQNPk3-4CiO0yx6zPLPpeuZ8-s43fxnXMsT9SX47RCMDrZvxRYg7Md4GNjkzXMZP2gV9fG7l3cR74FiSZ73g9BmCQVGkJ8ueK0ge3KBIabfU1bUxrNajAJj86VNuHxrWtHcE460Wx5vnBynK7L4GbuucFllFc1P3LnQqnWPoVsqCYM_dJMIjx42XgGAG7zp_TVcyu-qs19XlUYKxRHB7fGz0mdQTpbpmA_OONiQImQepEXZjy2W4hXDz67IsO-yGga-fEtccJ9mCMj55o64vnfGbKVB8oRW7-6DZdLoKZ0ZaEjQ9EY6YqTu29wD5sRZeOdW04UfNUMPjJQCauvpAvYqEExRpcqVIjudh2oAjdZd3MLBB_abApcrK1VN3OeFX_KK8ahDEiNWm_uNLrqg8oYQFO5QVlYg--jD0gTUONaPWMUkCz_dAGjUtNXXfmp1B5QI-8k1GLG1j5WjQTv-ykTkGDDwiWywd-Q"} ================================================ FILE: genesis_data/genesis_txs/DQ6WaBfLEMEFhKoMoutuPyO_zFg1hWTDXT13CD8n1nw.json ================================================ {"id":"DQ6WaBfLEMEFhKoMoutuPyO_zFg1hWTDXT13CD8n1nw","last_tx":"","owner":"xOewibN6O2c1JOGfdxWDruM3eHQ9FnoZXP4sP5hWPkjjlvkbMluZGL-hvPEJMaInTXwa6iwmpoIKSqAW4dwqC_im-RgHOAvgINC1IaBREbsAYRHCED27EJSAEfGtq_HZSNw5GuiRMu_1OrOkkBmBtE_NEJAbd7113cBzEA-J3xjZIzsCSgixxn6Uv_WCTKzk1hWP_ulkUB-4hsUg9pBIbbt3i9MKhAv4NRSBu5gh8k7r271oxSTDaoqlsUJyN1yITCQh_EuYBlgyEEgDnARItoQ7jWlvgXQvuhI2dxjCb-IXpwbpBTu4YmUZpzIIj-A7yD-9zJ2k-E_ToPY6xxyX5zZf5e-2ZVHdsIGY4n2E5_RWH4t8B7GGZDg3KvhZZhDRJ-p40oMA_hp6waDF-pkoL8Da6vvS1jaSAKdWBdytlaL5lV6BwLE9SnAdqSfI4nDy74YSMT4CiPHneBwUNiP1kjYc7KsVKOdiMUimFHdL3sN2uFa_qEG-7WWgHgFbrqsBzOGsKpmZK0eL1P6pQQxEKI6b-oeuJZRTQ-dSxJc_BUWFJKx4Sho7wTsSKL0jOdOO3jU-vjwmgbEefyvCLyqKzWQJZKCv5qJci_J9ZjoWzL-nWXGwiN8OIBMb7SHbCf3Xx2krvwqnNETKAGghzlfEGzj5_JT_eg50cdRnRC8EPVk","tags":[],"target":"","quantity":"0","data":"RG9uJ3QgdGVsbCBNYXJsZWU","reward":"0","signature":"Ked9wMPw7cgjlAalh-m0AvyeoflTmAHHwi-meyDb-6sCZnAZv5g_7duG47oSiqqx1cNXLCTVy_TCR565mOvxRUaO3MnEdpqTO2UW34_iNmA5J96ptWeeW9a_RWNezM45UdO2L2qC1fpOxZ4kUNHYsHaq0GED981f-ghKyp7Q7txcGoh3DAEjhJ3S07W4fqhCsdNoPZmU5JNwxaf573d16PXmf10t89MkA7WwUrgLIRLaimk34FcQMeQ3PuLVZGBswpjraDPiIjQz2PSme2ahPmax_xDjy8Sm92U7SkQOlqIpdNLEbY-svom70pS2EsWA4DfYA3qHFg83NJ4uZJQM8VQ0vwc0mxGgrCQtZ7cIm8xp5mVtzZbFB1jD5qNsbRnF6E7rP-mBpos1i76fxHHL_lfdQvz5v6GNUTGtxKAfRFeFIzqYLfLukKZyFezXjrtwXEa_ees8YR-wKqVcZXwXtusExx2OnexhuztPrNRrZR6AfhAI_trO1eFq_PHJYe5aaSCHp28XiLkSHYVQQk1eqOJVZJjgeI0Ok3xpDdS3K5ZsVUdoyv_YmgcDiZyf_pqjO4SCwnolX_hB21eEf9bKBHJFBajLpaa4wSC7Udcy8TE5IDIv9L1rgLqa6TTPShmqfseP3EPxN2odVgGcCpmVrXn340D3-x9ZycskWf2NByM"} ================================================ FILE: genesis_data/genesis_txs/DTGNdsYZDXoU1nE82yEjG5ZEssxwUmkFTkM3_i6oSx8.json ================================================ {"id":"DTGNdsYZDXoU1nE82yEjG5ZEssxwUmkFTkM3_i6oSx8","last_tx":"","owner":"uinD1QiU4I0-KITCiKjAPm3RSCFEgbdtURIE7m-E15mRAVupVIHYk0JqUshEZPAXchf6vekaQcodXFeG6r1zlxJZT38WNWK8T7Ksj8hQKRXdsBk8essYD9fVBfbUgwlczIkpkon_uBZEDC2TLzy7p7c_Kyi79KEUNJgkFC7n8sV_btOiwewswF0tlR1iHa9Cs50xQnTMx-jVa_LHr4oUwpgMsJlBMU_FKawdBMLEBbyWs8rmzuBB-6gPLqhKCTE2qVWBRXX4_QTsKJnUA56ApTfel8HuyBU4zCvYIae1q0rYagozeFAhudzY-0ge19qU2DoHVVZG9QReUexz46sNcgOwXEZiBQ2pwNF3nqo77ZAZOKKjsilmAA-yuPaz0KEIpSNthMQRWjcGVnkV0kooxwAN3k42CUq_wYKANuh2Jt-c2mWxqz4JWkHABukeFh7I26msIU4HyZ4uyaLWtE3IdP2hRwAvxsdcX60OK2zqukVR3cp6_C8nE08xypNZfbHIBYvp6MEDdC6yJ1_UQEPA_bEvKlH_x7_obYrM5wW3S5a-1UsvmGKTk7Tjpz7MlQDV8YXY2222ePdqk0zl01jpfhGj8_J9cgFkH4Sx8jAl58ZpavN_tINgI1WB6d1kGMzETDEGlLM1iTq6vbtdfLNgXXFCqn0PME8f-729k7e6uRs","tags":[],"target":"","quantity":"0","data":"YW1lbGlhY29ycmVhY2Fubw","reward":"0","signature":"ee6cfCoU9IzUmX5zuVRSdjNDVae_zzWXG7rP1tfdAOwysp-qi6k8dSomPdAPvk9K91jxfxJArlxfhvbhssJ09vM9BrBB5mUDLaE99Cz1G1pcihh-oEqBFgdLLpIuofVDlcqhgBl_4d3OiSWSH-AbzCRH4b66tuE0OP2Tl5CJprSax8mdErxNGFt7sceAeRvZcOdoCusqxeZ_dT14OEJq2Xf6FWMHDYcROL2Kc6eipe9kw0UcBJrz7UeF9n57GDmrLGu8OUla9bs-w8rdCW1W2oc22_T7EZd0ei2AEvBCkUt2QPTtThqLvgXerXBRcw_jHadhR1AfeTyFI5F-FL9QMMh565kbaB9oXhzkh7dW8toEQA7REBmVOUucB31gZwLA-qWsRyvpdYrpLHF8alvJu8Dwj75s-uniBPLj84Mooa51GCFGPp6RPeqFGyS1IkQzEC2As6KLNa0NTb4KAExC7sEnyDFpUpPArVXUHSlAOrbizKnvwz8oZII2TscAJFEitkv7Uv5FVNXXR2EPsEBqa94sKq9pvcDYJdMLGl_6Sa-dGq39kxa9PVvZz2-HIikU2O3IL9QembNFZsBLYKKfxMlBmSCk1b1EKf8D_UgX77EbQjlpAIIJAg1Qv5Jn13uunq0SYzshhfuH2AIv4LZSBmdGY_Ttwao300Sht5Mu8-Y"} ================================================ FILE: genesis_data/genesis_txs/DkBAprUInkCbFa6A_WJJNL1z_PnhEavvyZtF09lmyvw.json ================================================ {"id":"DkBAprUInkCbFa6A_WJJNL1z_PnhEavvyZtF09lmyvw","last_tx":"","owner":"oaNKpspIzu52y0fFWAJGCMQRwiOrV2IGWuN1CR-euNa1wzdya0RaF4WPcDVfY6FQoF0zGllA6p9pPPGozynll05DK-5sPXlom3D830qennb77CIwAWedlpnnXmMvyqDxdUNAK-gjP3Z8tJlqiPScCYdEYThIovCzDudswZsdeGBxX9qHARLM3jhVrgIChdFaF30Sc9b6GjMpKxy_ArUipdT5UtcAv-Upoe88R2F8I0ryXzJmMyLonvDrr8AoA2Hwf3Lsw_hH6MJ9MD0eV7wONP7mgRKFiNK57Jt0PUp1crDavp2Kbe7oA0Uiq-4pYaAqMT_BHJU-q5YGq8oSamaH9OCjzZi6Pxfi8lKA4CSU2PvvtiNBJRMFlGJFqHjeAsjhipXJCLwzYFfZSXWK9uonsoTDOT0vbR2m-yDdxwCsM82B1JxyWbPQLyUeQoLjMkh_l9DLeERj9Cq7PtexAA7ZJNgeBjLKWxjKHZI8AtrBZdTD8Lwi7OAkqMrQlrDyc2gISIOFsyU7hVUdwfLG6H8fQS0ECOoJuVB2xwbX8Myj6xBjUl9uFIeskvX8sabv1LdaadvKrMnTztsc3nPI4nHRXYSyVjDS9oQSytEi8QjZFrc4rQCmn06fIGdw2Wf7ze1_nkAhOq19SxV2P1do8T67QYo8_6D61xxRNrzUOy5j2NM","tags":[],"target":"","quantity":"0","data":"Ng","reward":"0","signature":"LAmYMF9cWPRjj3u_GWvpaonFal_lpIZ3_5VKJ72Jn6xkxEbBh6SMMXtDVmzMIjhHz4hqYbHUdanvK62ZXbWy0qgsTnZspPpRx7Irx-RjJNorLU2VRBoEXBKG9SviwmPjYpwRKp10RIsLmjFuE_B7PQ6yJ4hibuKOkDA9R6hVQT2OxLCakxKi6EcMTYl6gEkT8lvQpZIo4_jIl9ZIvh98BMAt6AMoLg6bwR-4aPs5H6ZTv5VLbOzeHAoZVHokGc8ZfRDsVw3NQBfz9167aYN5p223X_uePouObEzvDlE_30s8SuzAzPL0y-SaRsVW34FnggKB6zGPh31VJD5z87igNPasUtd6Th7H665Pkb0PRz9TjxHPhQV3A71bQH6GzAzdAPaIuU_8SY910k0s6SY3TTI34A8DcmYV--g_G-RYsa_hdwWzcZ0i8Mqk8bpLRYY7IxoDfil8xkiwWAIAINf7qRs03jdalA2fp8WDcrTt85b5YSBE6RWWviVIw_lNJwXb7S8ry5plXSXAlY5SPGSGgdHavgVbIInvzfndjo3JRiXbVnv1kziSjWoQ1hlOtIjMJM2wNOXYGwANPs_22OOLt6jv-PX2YMFg4k2JsQfopfYOrKjjwHKY-3Nu0CQoYGG_l2x1vc0jng7VnBX5waKP0BIwl7HaTeBwyZvOHmRn4eo"} ================================================ FILE: genesis_data/genesis_txs/DpEoi9F4g952ajGuT4g1HWY-xndyE77dn0VfdNXkrC8.json ================================================ {"id":"DpEoi9F4g952ajGuT4g1HWY-xndyE77dn0VfdNXkrC8","last_tx":"","owner":"ynKJ4cFZw3ucAQjEotdU_l4wBcT9fI-eDInayfMWJ85WutgGE_F7YpYtOlJu-UGAVDFgbGHjX4iEDqonr3Y6N5SctRYNWFVJhifj09m4fsp0COkA5H5Es6xLPFpBFkKcgVPNd_3722MnmrEN3mkqC-boRsqFOpUbesIEARZjpEmC6HEcm6oR4FQXTpa1RReSttAcJRYXq7v5nTazXHAfjoMe0qGJ3kRWiI9qat8xIxo8JseEU6AB7EYRxAH73DOtEoeubuTDaLs7BitwQA6pHOyNfAoPeSwOiqqT27t_Gim7OBiV42B9Xx35rgm3jU2rLoyY_VIReI8pUAMs4ETdP3O-nxvOl2HZL5SGEvglsO3XFFMiXobhTLZc-c7WBOkaykO6H9FxEYzQAmqg-bRCmw_zz5ENG4aEunHG_VrLBKILk216LB4ps6ruBTaON6IKqoY1NDLjPUrTT2xYQoUVxdNcdeGtaH1LZjz7HxlDKjlc3sqh-AVX9pj4wKTe8BeULalOu7SdZHEQv6iCQiDHWeAzW75V6FYrFJrjKgXoyilS0zH_uZt9ecgExV7iKNSTlkxzkuTaLf-GCjHgxnpkXnyOggcnwJMcvRiDQhOmQiCIeHS2WQhUCtdm393I-gI_waxmLNAoBVhWHTYeaCsL09NSlp_22jUjDA77AVuaopk","tags":[],"target":"","quantity":"0","data":"RGFtaWFuIExld2lzIOKdpA","reward":"0","signature":"mfaAPS0TsEGTir3_E8xtIhcOTMgdSNfjZ3Wf9HtD8fjKT5CopDX1rsKjmX3oRU859T6nwi_FCRN51JrPiImPoLlJYg4nnjq3ESsR66dttfXTVfBJIX5kKOnmVnK7wQRa1uejQ42O7_gc93b-QVKRPx2sLQX0TBZ_kHhRHsMhfupnGXSgQ4B1allqL7Fmdf27CaM_KsEFRe0NBEcubWwCqOfZ6G5PDaeUsPyhoaUEnJgGPu7Pnm_vR-Ugs3QHS3dFJ_YZzO_ft37U6OhfACOMygLl9s56na0J4EmjjoQzIYNCazGxieKXTKK9XMMgHwJuInFS1Ck1gAJd4BP68WFLV-mvYEzQSvR5kfqFPbkVIwVX7DZMHXAlwSgryoQfv9D2sFaQQqFbTUtiyDR9eUUUGW73DQK59_pJRJWG72uX4l88elkXjoMmAVoUTung0ENki_pEkQr7xLM9wbV2rvkijuczs2iuJXx2K20iIk4rx-6yQu6PrTlxsCkgC2pCTIA0ggjP24QxWWsenqgvopfpGqfdLDiFQaRiqBC4FcCkz2jSFaKlsDokzOjjpBa69ryHv2KUtXppuWC0flh3r6KEw2Yxsf86uWSBlh2nr5p-FEAHx2bOLNoYdlb4jOzK6LY4BWAZKg2HVZpyQ9FvyNiWOUugwSM-YCyb1cvnvGGQlJc"} ================================================ FILE: genesis_data/genesis_txs/Dxrsx0xuPVY7oz9yHbL6wOFxo6ws7ycVe778C2bc9J8.json ================================================ {"id":"Dxrsx0xuPVY7oz9yHbL6wOFxo6ws7ycVe778C2bc9J8","last_tx":"","owner":"ub7SsqUzHX3RbdxLJS93dsGqiSe3ZrKMgxK4XhFT0wqvcj4C_0lJzi6L0EMIKeYuFPs-LYv3qKJBi95pvkN7EKQxqaP7JguDia-LD6CbqvokOsD08wpXsCUncZy8P28KFrcWmBmDiLInL7l05LW6iyx9a5g7va3hGVAb603UQ8ISDs8ohrXw6XiPk9VLsZBtD6fxGmWd4iidZ-TG_kp8ZZCrFQ5y0aV4FQdkaHcX5auqTdfaz1I4rr6Taq9y7ajMX97YaUiISoSHdsLIxtfkGNoZ6kv3Bgdwr1WIQxQuHpLYikfAZi9hZye_eFcUTuktmHp2FCX6ITRfRT2WSN5akBlRz1RWumTj5aD6pNmUdhtcVk0BJisdVnywH4dF-nC23xyfQL_cpmFaqoY3GNL1lNM9SZRAPeaQI9baMYyTBwkNsoFxsB5-XbqIPqtCzU1-cIroheHeJq9RitdMrw9Lpm3ph5yOFHcsI4LXetTwqmfiv88XlZhI5cmT5dwJ76l8irTuCch5ovSi83ePaig2Jn2YAHUlRXrsa8manntL2Nihp0fsx7_DMFRnxR-dWdjG__xg3Xfoy1Sipm1LTT-vKymshy90RigKO4L_u32SWF80qNWHSKKXUAU9TOBO_yI7KMupxoCfyZwgybVyDWFiGQ5VFsrlRBaXHxTSyBOmSwU","tags":[],"target":"","quantity":"0","data":"VHVsYXNpS3Jpc2huYSBJ","reward":"0","signature":"AM2hmlPaVJheE3vLoEGYwiDFtL8tdYSP9589G5CifRdiMD4kH9evU1KFHvP3Xj2LCuLiHBgWVarUtP9U5YrbLv1MIctp-T8P95pm5u2TAx53nuSbocvSnlEvMFKXr6jh2_8cnQV-ITH40qAVz_sZR1yzObRT5Goph-PSM4WX0ZGeDTjcW9FWFkqPuLrD60SWee4fw4fQCBdxsrH9PS-QO4Womqx1pL7SXz4Ribfpm5B97L8fLmUtKrrCQClvtzJlPkqbTDl39NzTbvVbc4Lid9npWxsJnVHwURwJzdrhuIKBxPJafuO5d3DfBWBtP5gW3zErywk_Wb3O0Vs1QFsqG1TivtcjdxWFfd9y9tPUW_9wJojuheEMb4kqMu8LiKzD0uGfzFiNuCDKDdwr8-HCq2gGbBkpOHYsjDIl-7kaIsaCE50qalxP4b0zymmvPRouxR4MD3OtFNs4Wsu-Y0UjxJNuNIdh4dmSYBZCZDdhOLR1j9TMcoiHA1eF459zFu9L0wBRelkGkYXevZCTe66Bcp-p_PKSLIXeVVOFgZF-j8qPNq-8rlkw7cAW--84_2hmNDzv8olVoLCN6S2WHlRpMmqmCWNLHDWCFLkfcrH4RBH0R6GMQxVgBoyUms_xHvoZz_xmxxmzmWd73-zESoZQG81dIB3E8hA4bKClv68UKa0"} ================================================ FILE: genesis_data/genesis_txs/EPZ0hBh1wp-7T4JED4v6DOItd-9MNWkRfbLyizDLBsE.json ================================================ {"id":"EPZ0hBh1wp-7T4JED4v6DOItd-9MNWkRfbLyizDLBsE","last_tx":"","owner":"sQ2WmD_MpIXraVDqnEAtYkr4_8D2mR_UKTvhVAJH_zCnPwRW5g3xibFWP4A77RPPuByIm7g0jo2O0GY3GXXMiDHTL8EMgDa3Q4Scz5FX1EsSWCNsgrx0quCaXaRq_KJiUQAaA1DjySb0VkkJ9VKwSPQqkFfNKNqk_Gfi_QBIbDu90UvGYtYGVTwUEL3j-XTyzqv5vyVtyJuscb6XPDbNLRCj7jeFTI9hrEiTtoJsJ80l3Q9SwufuyJMlH0zwFN0zocNEa1Q8BXzhGK4hqw9FllSyQzsBA_qUS5LNBiqZdm0G7Rn7--rW9VFxrWMaGUeVcUBcJys70M1wA7nOdu5B3rLIg8lAY4zhqodFtRq4XYFaE8im4RPNbjvlmPYjBRWCgGQXD68AP_FzJBG8gx9UpdkJg9w5BV-6c12xqM9i4eD8H-HzUfHPZ-pY4B-cNRHzKzGwiOzdglXM6iljLfxIcgPkjztMoXDyEFzzuzyjdzkCtkoYCTbZdTuu13G3OKktKECFv7Jt84w1c-tZSyeEEU_4fBs8BD9LLt0DRanSLSblnze3pKdkSU9e5mPYEui3nGhtn-aJL3uvMh8CGCsNWFlXy5CRayQxKmR3RxzqTrFXQU2_aTU6lQFFfBXe3ll81d8E9MWRNZLSYHHvpJmxbRJghIR5yFzq-5_szoaMBJk","tags":[],"target":"","quantity":"0","data":"8J-ZiA","reward":"0","signature":"ZTKaByyKdlra20h8TEwDhad_d1NqQTz5Eli0pN5Wery1u5iu3LIb4UgJ03-BZWw8qNW9cLIJNjmK2piEKNV482p88nKEn8PxXnL7ZW01-cJrr-jddHYghs9amqZhGMc-8hxwxXKtfja2lHSk6NDNlJ3z_0rxgt-ePHzgbnoYhqrbRQ3s4dMCfKFP7con2-ECPeRLif7yWAQQhx0gM9_-I_qzmY4Au9nOyEcfvDwXisDsM81moiUm85FdhFulXwHCNEf9lRCt9hvzy3-e9RXAnk0XDro3uvqIKab8L038svpSVJFbyT5Zc9Ba80nGBSFCEiQNzlxxDnEz1iX0B3SMySPoIcTm26_Qb8voRoBU1T-bOUiVg7UiRQXkM3wvbvBNztRUSNgaNFzROrVDExQaXs_wJ9oiQ91UVq_na3Ja5zfGZPqYn6t1MiKiUd3K8D9zGinN8vpCcPtl-bI4ZRzV7mM2vfREIrnOZllD_zSHZSUaaeLj2Sa8xDweloaQRA20V5PvVuMefLkmdgQL5p3MPye1SeE7_JV2TFjIJX3U_tnILyDz0SMqqsAt3cgrwgVdUd3paENb4F82haTncjK_lk8WDwo-4-QtdTxIdGnCBo5e6zx0Wa7NyLg40B-UOdYfLNjXjXk1w3_-yGn7fLDy6FIJ6HH-qipNuOLWQMWR3U8"} ================================================ FILE: genesis_data/genesis_txs/EQh5rYFJ5Z5yESi4DIuvl2n6iVZS899tA6V6rf2Xwhk.json ================================================ {"id":"EQh5rYFJ5Z5yESi4DIuvl2n6iVZS899tA6V6rf2Xwhk","last_tx":"","owner":"07mPgdLkcj8M1iv-I7DiZTZNCjrZUUoFTIUtuDCgodvUGlL5TLMdPH6eMwRWIGMMVGpkW3m3AiDL7GjC0loBvPWn_onGK7C9mm9SWWaKr5gB5SDtghlOgCvMaodeJ47tJFQ3KdkiKO8Gy5cqB4bpEltAIYZYJv-OOkaVW1zYhnSSA21DaEGleHHEwY15KRMn9uH3YWUmBwV4As-JfJnymTuqhF-ySH8Wk7GTq3Lm8I3uhm-npkSSwqI2SayDTj1XXs3oF2Tue5IfMJ4LcgzfVRLA-pxapqqKP3lWr_bl9uF5V_3wU1NG2F5zGk2CFFbgADOZ7Giw9s2JcPynQ2o9tvhhN4t1wRD2sc3Z4qhLuraq63--suX77YdGaYDlV8n53FXrPo480FEsvX7GcYcM-OBVe18IWP9REErZ2OM3jS2Bg4F1lXy6IgYhnwFruZwpdVxUCubfW8DYvcc5ArWwZTSqfjts_FoHJ7qEPKg2rmAy0tZtZ8FC8M5EyOsf0l42szaaWh3L2bIvNIoci04YqR1MuWfaQmd0bukdFouTbTOPhNme8mXPQB7aiJcad62u0nosgMTrkOYoKygWBxKXsxIHW1Rvvphr-a3yFbwAcXAwFSCd3aS4yRH6YEjK6_Op8qr5qfCwb0TMkyGDBUvfTelFyQybjjSzdR44T2Hl2Ik","tags":[],"target":"","quantity":"0","data":"QkFSVCAmIElWQUkgMjAxNw","reward":"0","signature":"UM0v8gBlYxpG-4n8uiqGnCCpWco6tC679GomDhMe58mf1422ZpfrwAObh5Rbtz0pTU_XXQ7sxfeut1HfVs01pHwVFSChL_ufsh2G3z83czZz5E9izlE9R65AGIGdvhNx9_qYTB59ay-unao2zUU0jICTfWNWsf292c-DC6_nU0bLCg_4Ca_6Yqx9v5HRs82hI8-Ynn_a4hFABaX872r1rVhKvxo9W2tLsjoZCEXxSf6Lm1_Quyfc0N_89iXhI4YsFbjwxJfczj66tJEc6fZSDvQrW4nvjbmmXwXRLT8g3TQ6XjxbAotoRDhIgCZ06lFnzwTQoVxwqhmGXxmZO5mJIIuNjEpsnvthYpZspN86uDS3uU1fCPVNhvBfi8V3eA9lK7o0pc6Qc_YKpvR-mWvHaURFbzT3qWlBLzU7pPEwTd_d7R8QN-tqxffqn6qz88rxca7k2Grcc8PUFd10URCVDe63IzgPhFy8DVgdXT9FG1aTX08QeWr308eOV7FY9bgr4-6AJ0Vc48OBEhAY3b8B9mB5BvwDXLR7xDsEgu-US_XhGJwinTqmOtum3HtAgnRjenxZqjQ0_PwvSTO5pg-BJ7jovq9BEUQ6gRyl9IWCVS2QExapW0EQ5qgtO3mqV9Zyvujvhpw3UPHMlnlSOREpJiv-1EavI70H_v61X1uVRXw"} ================================================ FILE: genesis_data/genesis_txs/EUMtkWCJU0L23RnhXKfQ1wtD3Jh2O-vpFnLcQXynoAQ.json ================================================ {"id":"EUMtkWCJU0L23RnhXKfQ1wtD3Jh2O-vpFnLcQXynoAQ","last_tx":"","owner":"twuBoKcwwuoVaLg06T80bFIYPzhtufP8qZTa-QjzYBcwXuldiF-OnXJD8QILtnGS316kMgYBmDi5801Zqiszff5V4wL1rEdCLdyzCthXKY7_DTIhWNYktUTyKx1m5Xcp_RcOpAOubRewkXIB64VeO5hARUO5PCpzsKoody4CWTH8HMNccVfcsGNxQ865UxJrE4k6on2qclvBKCPaA3z2FrunFS_qPpLTIO5SP_Ve9VdIrz1P1mKwRRHcO6OoApjug9NJzP1p-2tgBS7LNNSmqQINc7S4LIIcqct_herAF4m4y2vNO89MLflDLCDl9G6-X5y5We9WMYo3HoycZoiTMAdnjYxpN56jNC0trCAPvISUyYjyk4yY9Mw808mcNoZjsPUb15mZ8Uygji9rQC8Y7ChNkM3rJVucLDOtom8LASy2mck0HkN7TCzfhJhkIQuS9hIrIdV1Z7gWBNG3oYtu9op_kRRD-1yVFsGiOjUNq4hy60opZFQJFr5ntgmm6Sogl5ee9l7Ug3XKux0Dz5I70NK_3n0bst0w4oQeWcMOMwGd0UI0LYf5k4K0TYYRDBMyIcLmVOjtU4KtfYFaAQZRqE1VVG6rq2A7eOTCC-IqQMy_pr-ZCnak9usDWTTEbtMpY7PqBpnt9q6nq4MzGc9yUEuSP3Er9Ek9SUeM03aYOrk","tags":[],"target":"","quantity":"0","data":"dGhpcyByZWFsbHkgaGFzIHNvbWUgZ3JlYXQgcG90ZW50aWFsLg","reward":"0","signature":"Qo3RtExQ_4Z75G3CVTbqOE7webO2lTT80RqL-vP6SWIlA7btzy9WNqSKjkUIh53IY3grboPCMT7Y6QhRt85LU_MqvxVrWTu_EGx5hpczA5ECRSFadfJyyRoztbItkmCbeKw4aVWmXkStuCvvDTdBr12Mx0-tlqvQ8gCceYWOompMsQufUv3bxT1ELM3Igx9dJ-vcpLgAyEWVvOXXjR994u8Tc9x-CghLEgVVX6vNc2Aa3spWeRqpBXNM8KuFrAFmQqZsj4jT5Epg99DChKoZHSCd5JQatqGrurecgjI8QhKJkL6aEF4sSKbxEDB1znmEqWIwvIE02OZuQaA5HSwEzh4MJt16uuoU3DWDhjF2weWlt_110P7SXBtrHrRu3fr3N148QsPgNplZV8kHeYXXzdEo22iz2h5vtRlDvCReqEXoPtGA7MESTGN2BTc7zpZWr8_Gmoe-Uyg7e7qd_FfVX_eDtA4Cd3yix36uAXOZr5gmDaKuImsDsZyPaOMCjIwR5uh2petXmKfl7x-7wJpml4mevVdotmiQk919qw4GRtxmEAWwfpvsysvPbypho8u79maBKpG75eCe79s0g2oN7B6C4F-mzU8P4MSKg3LKn-Igo4Kp2lqeuDrwt_JkaLF3_OHKMB65dB-CzCDkd8f3nMNzBCAt9J5zjnYyzlBGmH4"} ================================================ FILE: genesis_data/genesis_txs/Eeo6rANLMAXonDFLDG2nu7n99O3Ymfk01wYXJBbEixY.json ================================================ {"id":"Eeo6rANLMAXonDFLDG2nu7n99O3Ymfk01wYXJBbEixY","last_tx":"","owner":"0L2MclLMhEAcWd0wdi5L3FihbjsDSIOCVS2WSCga-uxAbWDPDU1ERWl_Bqymr_ClMbpwCpZnR-UNXAQvrecP1LQlOxPF2uxPyWYw3RB1K9p5XA2RPBXvE8uUdqoUWq2nxf-h8RD4orXjyt6kaa5Q5eM7prKVzRVyot4IRtfJgR_lNW-xtw_lrQJEwBjDKvYHHfRe-5UEGUzg60BxQAQyZkZdJq_O3_5LnafGdvnRlYDvlxUnA60HT61R3fLUd4weyIou7PBdXSePoi3ZruKlQSLOogZ5N-EcSQc6cHOQcVX1b9Cu3-p_r00zayyDuQ-VsniGWoFIkOj0tdHWOk0YXuf1zy_dmUvhCK8mbQIsghmr-aBTLSLyXp7fEAExhnMqMBGrkYzYhDeWMmc9-oVw-9_m9leCdgO8F2F6k2Zy7ZLVz93rC8QXC8JZlcb_cd1rfBbxA9lgBtXSpmgm8_rwCJg57TlbKAEItQ80NpzWUDf87yYQQF2BAAy477rvG7eyyP85YdJzOZa-JTgachNXcGC-hGh8qfgREoI9ZvNl2Sc53S9V-y-YBomw_nrd8FdroX-sn6KByoufGbGZnOIbFjoKsxL4ewCBgOBJjmcZFl1HU_YK7BxpYcIEghvSxZuX0Y3gMLBn6Oxl-OAEHCHHI0J7gIC2tGl8vRmtVH04Jtc","tags":[],"target":"","quantity":"0","data":"VG9nZXRoZXIgd2Ugc2hhbGwgd2l0bmVzcyBhbmQgZ3VhcmQgdGhlIGhpc3Rvcnkgb2YgbWFua2luZC4gQUcu","reward":"0","signature":"PVrNUWn-2S_ryxqppXhUaoImDWrh8AlHvsYjHvCcz1ZLhcse2h0ZBjti_34C8KeWQgGZQUoyID7HS8sbrft0UgzheocIDZZbXdkSHBXT6TiQBttLhBQRFtLYBMxzjHpTU-jCbdRzrG_lXR-oXsW3A8LKo4hnnrH1M1AWkiQdQO3e8A56G-HqixijyY2if3oVb2d0AWDswQZM1oTlBAKAPB7WAkQaZhK1Mzn3s8RIFzdK4xkJ8ziNPZUFLCVxeWeE0k0beCtpY9geFVZUlLHT7NuMxgT_-rT5ehSlJHOLnazXfsKKVYBfgPy6h3cscss20w302vMlJ25d53yi-qB5T4sdanb4pSI8FUv8pVyBYNbibGpXs5WNPHRilPiO6O0EFsPBWs8qnlUByWF00KjLC7vFa8NP_SI23giXL_zi8VC5vQRCubFjF3kTrjBTDFEraR7d6gqxtRU1HykcpJG6HIYwoc5z2Lb4in0zdonoYWZkCyPkaXeWSJ-tfYL_AH-SiPFLeXFGp5tpfaZbxL_G5r8sFTjYgdK6adj4cOupDtBNOBHN0_rwgZIpyyoBEYbTZQ22UN-jfWuDiIbKDiKj-Lp99Jh3WVfl9RyafYXL6euEtBW-hUaa1p0NWmkNcNz39Ub5G6elAlC3bVhtmrZMefCKTdVYlQ3cLAcYFKH1gqA"} ================================================ FILE: genesis_data/genesis_txs/EnPMt9yzTsxLPR5mD9zUvndxicdYBUNzOlcCPvQlOK8.json ================================================ {"id":"EnPMt9yzTsxLPR5mD9zUvndxicdYBUNzOlcCPvQlOK8","last_tx":"","owner":"ox0n5YPKBnv8bdv0beSKdiZovUAGS90YEZivwZqNll_yH7_GSalTOWzBO-ayrXr6Fy32Zriij-M5qqN5YiF24xnejzCQDZCiyJFkuwOeDSkHVnyY7mmGeKJE1kUsYcwTIWp1W3sgUVe4fWzu_KsZYtAt478KQ97lxIg6hIVYunsLg_0RxUTa31ZVti8JbL2KAGVQwsx6pJGQEoZVSW6ol-jn0cFKsVXmBIUyNZM1SdxtWZbMl6TRnkhE2O6dW4rbimdxIokb5ODxz2N0RNbku9tYmFLab9MSHXSShMMAtBAZINypyIhovf_aMOeYeDIQlBanXOikFrfh2TbiOWQv2aBSEryCRwKAUaM_JjHcTrICNh4PXzv32XQRhEGt96tCSEnXNbrrIS6TqP6_zqrKR4vBTp_VvLBN9PB3FhFOVdSBYkULBEsAup98V81QbKylcPy31fd11pnaXzg6qwELQANLKci8dHTUKeUdoQedK4z79F15LKN4MQ1FmymIaoIWdGIcVBMZjXY8tucZR4VDbvxH2hM6Aoku6UiznOQVlCQ79E7M5yOFJcKbK0hW8SXsU-rh03HW4BRatz2uaAEy_cckLQJBXVcrkZK9ga2dgJ_8NV3GV4-knTETIGszeMpnbSALwhRE1iL4ILWElOrHN3bI8GmTg8bBn35yJCNyxzc","tags":[],"target":"","quantity":"0","data":"Q29uZ3JhdHMgb24gdXIgbGF1bmNo","reward":"0","signature":"Vs0a3cEgzwhGjOBw8K9iemutJ9hMsEe02NZu2TILoZik3Vtx62uJrDY_kd_xouEY_s6sTLLLVM2cTPx8wVhah4K5X72GLp1cAB5HaNRDzE_6QFIwlCu7BzKBlu4zkOjEn589jEwmxa7-JY22R1_DBKb1jeNFqGYd9MO7R9-VPNAq_PkwsK1vm2ReeDFxoBMJzqlTUvI_hwbNB5SPtzbUQCFz3MvvwEGsMDRikkEAs12se30bp6UJ1Bs6peQK19BzKF8ptZ6-lqUDgheYnwfEsCqlSyb_iL5mRu8TMUuPRcYnx2iG9WWoFh86-CD0DnInyJDkjSBYwifsfRbYCnzd_eXMJpgiDduq07BYGvRNeylVHqvsGc5-jt1nhUJmrMDCRpKQgxOtX0NH6d5-ErAsZ2ydhI9pRBFwt-7ZYkRslNsLnK7pf_feFeukXHvhRGToMyUtDjBMoHzcZLWxWDz0eWfuA3gAQIYcje0dTnuhBlbjQ5sioeH0xJ11_Xxl_XPQvca6W_YaqvGfuuByOuPkOfCl28VVxulwahCBkGoQPRfBE1MrMWZHty7WWRq9ZTNAtZ3_AO1i5xh8wMb4vkEkgMrcrgI4Emmm8dJP6J4tPxOT70IDRGPbdmNdGeW29AKsXtkruqg1kapNtgZfaBFnJklyLFgTpSzwOwighcySGvY"} ================================================ FILE: genesis_data/genesis_txs/EvKHSfokNyuiTarFKOuQ_-SaBwtllGpQGc7IFkRfBfc.json ================================================ {"id":"EvKHSfokNyuiTarFKOuQ_-SaBwtllGpQGc7IFkRfBfc","last_tx":"","owner":"x3LXJAhrvSwBEvvWxaUVJjh-iY8k1khS3-1vvqW9JK3lyy_eNZAlcxAXy2HOq4KrrNBlNPhLDwM7BS-TzBFg0SpKRRSlFKBN2ioCyyoHW0G851b9dOdZc7vLZmLQsj33hkia_QgkK3r68LM15cUpN1HSNcN_yMT1ZGmmDJy19AbbgPijRSnPMBWYWbFnUTFZXRxO7CUmnsKvvUbEyADLjGAxuKE_nKQVe93aiCk-Q8lFj1gtM18MAro1DmGzYEwMSvZwAqL2rkKNr7N6xfCM_7ZS0WqvV7EgKexJyKVObaYj6DQG4siOY8LJd7du70GYaDQMBeZOccqkCQj5KUgW-vya5dRFrmxf2NDkmvPd1aS4j712zqT88L-cSa9iMMaoTJ7kK62KaxBMsK2JB9b1nlfZKQSmWx4iYIAm5h5nRUi6NGk1MikFWXFEFrbtXxjTZRwWRO1E0GOMnPjgbqjNFk0z0X6ZlFGdEPj50Y-zwrYal4cPDcaKJGV4A5oftUqMf1z07UepLSGzGtlQ5Lt2f0lvNa5yFmr57zo50LK1oZU7TZl7cIR7GZ226SbM9riNAqQGt9rjAJKe8ADmZnMs3SGVI4WITYT607ddOfisgisK4mwHwjJPafxIsyirfOT5KX7vHUpwz0_KvxNskN0ITH-51l5FHsl5PZSqNqyWQ0s","tags":[],"target":"","quantity":"0","data":"TmljZSB0byBtZWV0IHUgaGVyZQ","reward":"0","signature":"kYWrCi9E2txq5jQ6n3knMD-uOiFcJsImPCYLozXfMJuDqsNNLgRcdWfIyM2Gbq2OGnarLOmylume7EB8We_saZ8UybgONlka-KM_9k7NUbQSBx1vikNsC19DaGiA6wNfYkXoFXLVgfhc5FwHqh3io6zZr3yVa_AMHj4P70tx-oX97VQUo410YqwyfBle4_7g3KfS_aP-YpzIBVHRGnCzwuq7ye7yW4ETA36gqYAT3qa0AymtMBl-KcWuKWgdKd_BXAEhYjqm3kOcA9X8r2GEGxvjpF6RANS8X0dmysVZFoO7Z5q5tb4sDHVpSbSqG8BCOtfT7VhQG_qM_W6NlkZvP4jWzX7xCovwEuN0aKsBwO-jMCt4xEgHJ5xFOLKt0RENRIJdw1Y-LPrTvb0y1I3xzxzPlxB-bZCsZijIeWj6d-riJzcACArZvT8EahVQTQiw9cnJvMs9_njqjOSRXCg61hAmnDJg0g4qSzLeY_6dkKvu22NhEOBjDfoNjW48jvCIFT3YojNoE7T0C2MOXXOnqE1-kIiZBxSj1dYXiCyYvlfWQjyZThj6gbduPW_w7i5k6DawosXtFHhAdRk1LHNJDLVjWQ2h0BnkEP8gseVWsqtynV-DqDelIoAzx4hNv0dB94nPXfMuT4AXQ50SJ1aKuWVFhUktF9SeL3sr2kwqwWE"} ================================================ FILE: genesis_data/genesis_txs/F5R2EA-gM8AtQ9_NymKwtr_Im3_ljMR38ndzCs5c77Y.json ================================================ {"id":"F5R2EA-gM8AtQ9_NymKwtr_Im3_ljMR38ndzCs5c77Y","last_tx":"","owner":"wdEn35bobyVCkQVPXXhsAAC-v2zsqaBDKiJ_VFzPwhDCuNYFAKqjPe80F-ersyRvuQoPYIC4SJ4_jOxzd5mqp8X4qnLYUDsk8MmCeW8N_NvLyU3Pr_V134urArTLqzWAOrLIz5o9XFtn84wS2bQ-zUvS4TVgh_VQUNJPRe4k1bYg-iVb4hJKUYpEu1eRh7IEtGDmW0OVT-XK6XJcmWv7Y1s3S616P7AyziM2Hj-mIu9FTzxfxGmYltiEWCKeL9XNbm5fZ_H__rkQ2ljDzxWYYrYBL4teHq5ce6CZxisaShgn2-QUUHQ5fumC-VXPBB3LS_-sQVmmiCXKi7J6MBW_Y8cU5x0OuzYkMGtkUcI2fpDif6dWTGgfctSIsIm_AN8Baxy75MvazALwur1n_bF4nFnVo46WH3birXKjx_pFiEgPj9OTWx8oXdG4I0NHxPle-PrK61HhgG5jTFfx0v3tDl7LLKsn5c6PevnSGxkKlBZSvMonNnumMT9zc__4jYtc3Pi91JvLX7AREx15ZSgfz9OCnMM0hTjsUO7_Kgd4Qi7DmRkkQ-2K2uB7ZxRLwcsk3zPsCcOIozkPVQyX6rjnXeE92Zt6Mr-FbTzoWZADvjdx8rAgrQCDI49KdWxIVd6UyklxkoN_Dl8iZfTEKm2Ibg7Vwy0bm4KfaNV0EExbpM8","tags":[],"target":"","quantity":"0","data":"dGVzdA","reward":"0","signature":"qyw4d86pN_5Qh3Jt_K35OEzHDKDqRsRlRg7JBdD9m2d7eZYzLnYCpN1MVFin3gSyTo3H1OYokyAogGXsh1kDYZPcpSqiB-N7gfWGjdOHuUewQ2VpNHuR9endurfThmVvrIjmFH-w6glHD6O5JxsvrLnAWpcauVumH5r4BDZNcvwb5Ytu151wGtCYB5DbAcJyWXqluSkAdgKkeXw8e9lpy-D1BtbsfLnYJODLoLQQVk3RBvl1M1q6KaOWu50tHf85s7bGj3s0p5G7MQiZJN1E8zBGVWSJn-t5dnIKgwGiI5mdGc7l3cv0a5_DnVzPNuAogogklUiMxDEQxRqI61xP578dac6fRENvQnb4_ZHYpLh7Z9tZfieiETY0NSBjGQQek-ILmpTZKSEE7N0UwjommBbKlUnmQKQZEKSWWIm2-s9xAOU-muIaWf9tClkuOemWY0ae_iuDHkCRO_c5Hdauxh8UzKKVQdgEn3fV2cW8Qu3G_-1Q-MkOrJrBxvNXRKuma7UxyJOpoBDHtwYX1NKslHM_oUv6fRzWi9GdFLB_pPVEmsUvWCSfK939NWDrYqFwPL-6EHe-Sy7xMCNgrf4GyFDp1pxGD1k8aRxc4gN_5sPNqLZf80qGQsouaTb9GofLKMb2KsTeavE0Q6uc4Vq6W3K-gv_YzYeI7AhxLpm_YcQ"} ================================================ FILE: genesis_data/genesis_txs/FTYnf3Z3QqEpNzTigfAlGTkgpgCWtFA7R8i-I1ik_Vo.json ================================================ {"id":"FTYnf3Z3QqEpNzTigfAlGTkgpgCWtFA7R8i-I1ik_Vo","last_tx":"","owner":"19e6ae8WGh-bYt-FOlmBKTDU9vWYoXQntCt7FqXyPSdrTBUvhqQ2pkSXChUg65rTB3jdvvbpdP1ecQRNgA7eEVo4O-ULKLoDV8XgeC_ybp9JKta1rqVm2d9AlApsiqjd8EXs_1wQAoRq7kLhm3Re6PcMGoeT8PEFGYNj9B0hCYroo_UL7TZeTYwpNseF4IzaARJ3WquN8C_04e_rjrthopXM5iqbkFAjE8kt9acYBqr0raYnteSsSKDMmtAkLHutQvDqAWk2MjyGRlZO4RYx9S1DA2sIq-PPeo8MVRjBS0d3fjOiMlCfDHbK21gXfO8Nyy8JYdLXwjtTxyobq5oW9LdWky2Cf0gNies-kUf3JjqfvFvqAUzZ5DOn2KWTTlD_YMvBZID6cyZib2bmGCA2s7X2ySpyhIRy5q5-5LKDoWy7oheg0y87fsLnfpzgi9o2bgYav189kL-BZxGYJP9pexaf9v28EjhFCO-3Zptvl3D-XQbd2SfpeR-RECSQzkj2exf5KO8JfSSgrn20KICuU7C864D0meogajfqnGzbsUFJijFOO5-T17nRVSWlcS2V41REy50ux-bkFGlBYEbUC8O8Obbl3uSEut_aMrEAkm-H5tPkLycktTJhg0iANMDGbjqmV_nHXBgJhqG_lU4AAl8YxoRZC1qchj37Jgp3rHU","tags":[],"target":"","quantity":"0","data":"TW9yZSBMaWZl","reward":"0","signature":"bFx8SsfOGU2U_hCB3FUekZtsUUpqo6pdru-0mG3Vx0nFiw8-B0hmXcag5MxjBZwkJNWg0G0Vk_Ty3BAOLf0gSuEPl1gXj0oTxF7qW41FEUZUiCxOGwveK9XlkCoVT2BPEfs7Y6vO-HVETDAaXp2aPvJzzRLMnUEC98g5F0XE2MJMzVHmMNcd0M_WCokht5Sl8GE-RSeDJbDJ1_u5ajHti_WeQ4q4ZqM1pHkppuLRelUP0F1vDiO5P32SKDPJoP-vCTSseN9b9ZvbWbY3taWmIjM3bdsH8HBbD3oLWDomJ-LnThrj5YiwhPzXnuwNDK6dsBoGcFcvz7G_X-QZwXgLIiNW-7SXzPAvix0tGV9e7-GhPaUuX-Q6EWDshB-5zdk2-21OpymCfsX2ACL3_F7kcI6d7JowuFmYXxl9PbJU9T5VbGcp09t2rxa3KPImqSuXP-JWfFIh1iOQNoH11XDrPJr6dCsmQ944XT8J8fItsV2R91uqIIeZhRTpL3apfYND3vbYv35kuZyXWB1_Sj0FPQAmxE3hD_Va-MzAnAbLHLmxpEu7-e0g9i9vS2xDK1NknCaR0F2DXs3UTJHy3dhacYPh8r0pzVn6RfCXn9WNMb-iLqdNUfmzVZD6Ibj-1QDq8ghoIrRIYBxlj8N9PIsw7pnwkYODfb8TOl_p_Yzd9-E"} ================================================ FILE: genesis_data/genesis_txs/FkZzg_-5eSdFlbq9XnHe3wRhYidHJPXwUQ6YLuJijS0.json ================================================ {"id":"FkZzg_-5eSdFlbq9XnHe3wRhYidHJPXwUQ6YLuJijS0","last_tx":"","owner":"ypJfRMRl4tEvRs5vhRu-cMXZ-lsXDjZqh5DL5-2WJrVhSlTiAgNXB53dooaAlRKMI6rqnmut9uPRIvBpmYTNEc77ZEwPSKXrHFfwGBWzoqaz1cHan4p0jVOmi3MJDQeCqQgGvNT6K6U7-BOVCI3aHawEH8RMXTC6iWRTo0jG9YrQbRFcL9QMHZxtbA-Fb2J2ydkvVmxHPSULliSSQGrU6uDDYHFaf5p6UvzGILVibl-62XJSJcsnVn2aQ2zyov3j-WkqXK_NsFgquzF7NLU__51V5w7Q5pnDf4JZfCnckOGCjII1TCOxNNZPn8LEhYoBTyuWqX4DhHsr9gQRUbPJPh7kndLUASt_PpewrmmuU-55FBTX1YU7EGLoF7z0BKZUMnKKEICIs5FlQn9_LKRTWJal1e_AUwPYWH1DTpeNOqiiaLaZQyF0gh3qr_ZFFZ6gqIwUwHsrWuWcx3llrKkz3MT0N2-2GZXaBIS2rVruEPcNNQ8UqfE52fDcwUVXbxzGQyQeYf6ubCYzgx8Wx7OqBGyuk-EbD9zA3YJW5xpQR9I-hBEQGRc5YwhdlRSqJxy9aEzawSv6yzt9ParZyW_JLg7hdngsMgLHdaXbpMABUuydmwA--Kdlzszz3LhAnFGIB6ooiHnVWcJG_bLE9GafbxVRdpIVtHIkZfwgQ80kp0U","tags":[],"target":"","quantity":"0","data":"SW52ZXN0aW5nIGludG8gdGhvc2UgSSBiZWxpZXZlIGFuZCB0cnVzdCBpbi4uLi4uLi5TdGVwIHg","reward":"0","signature":"EjMGkubnRV3uO8Tf7bTZObbymNJQ6RPchXbbNmmvSjjfB2I97ERv9phOP_l9ITxHNPkSTT_W_0sBhq4QzhctT9AH9ZPHp50rWqv2NB9ori22ZLi-gRWM0aFZS-siwks9_yd1Rs31Bt4JWjCGABYL1HQ7_4rnJl86zihdeFNKGczE_vBnCjQScChKnFzQLhNVMseSHB0AnnR0kmZlJt1iFYMjvfZvo2GwymTgXRvMTNEq-pz9aNlwjalueogE8Fr8SJETLooBDuxueRTye-IVkGgqmkdfXplsU0xgMzmfzEmMu5QdYGJ1NC1lI83miW0aMjhhRj6jHsqBpoNWhYqC4ukMzNUJdqPH5aMJm68FWDs79s6TbXmqUDFOVuQIpqJSUyDQrJEckDpQrkzQFk0gExvp544AvyABUPn4ROsPT5p9G8srErTuU1lFp_i78FEy_5YPMgHJZ8Drb3GqL8Jk2m3aJrm1thBnyn6AkNBwyk9sd3kXTefSnm77qizw59U2vNVLp4XctEuqB9tjP4rbob4deWiuNoreSxhlsVrQHwXrXHVMW7zGBmfg8k-Fam7-TMhptPtrJyP-GXOfeH3_vvK7Hs5bpI-Dp8PSz01dvkqsa3NOkjCzWazMDvBNz1hRjLN4hRCg9BL-sWc-cmdvHJbsGy72kmedYQTARLgZ4_4"} ================================================ FILE: genesis_data/genesis_txs/FmfkuPmh0vkdv_qbjXBUX1sQ-DmwBFbjuC4punobGy0.json ================================================ {"id":"FmfkuPmh0vkdv_qbjXBUX1sQ-DmwBFbjuC4punobGy0","last_tx":"","owner":"wj-p2_SmBU30bmKTmAJliAVRbHf7StI8ZywIwaZXWwcbtcbpJs11VUaAiFkbAJqaztqZ08dqahTpl5lVfhtnZkzB1SztIAjke7RP9RJGYAqAUiQHQDEY849nmJtPAyUN8TpzZ29aO44WT6Tk1wrH83_pKBBZA9w82nZHzcxs8GwxHW1rmWwTsFszvaiWm2rSfx4s8xYxcbXsOqhy1pitq-_FNBoyqReHEgNWwUD7hixjr4__mQMo6M0hzsBFpxoFszHNxwMXaFxT0Kcs_oHhYkNYDcCxksc_evtgFJ8O3zd0iCj5OwKkwnVV87piOVu-fPp5uRN2wYsfEnfxHroNnzwjolfL1cRPKyTeicL5OCfqlbt5_zDIFTt5kaz7voXhzf4mr10x9uLvEYhYvYZCI32TloNA9sM7Hv-ngaobizBg36agX4g1UX8m2bTP9WXSq8OhKX55QbIV8s4f1J1WAaJzpbhF0k8hD-Mi6EnIuT9pwr5qadPK2yRKgXqfzKoydiM_A5NB1NYeENCtUvF6TT8p3o0ljF6Ecova9iAJpSgIu_iUplfvQhDXwU3piRPIZuB6BvucFxSMhwLZcWKlLZ1IzRoC8R_mC4zVsP9I3CJ-Qc2zMZe_51KtJg2TzrVgSDe8MRLHouNi8XPA_Hr6u7dyI035OVfXaKOmLyH1brc","tags":[],"target":"","quantity":"0","data":"JiM2NTUzMzvvv73sl7AmIzY1NTMzOyYjNjU1MzM7ICYjNjU1MzM777-9656RJiM2NTUzMzsmIzY1NTMzOy4","reward":"0","signature":"ooqbMCM_uvPOr4MfVADy6d2uv4HFcHY2mdncj4JhfLK2i8we6VD4oyMA6rv3b1c_GOcC40wPf5rIMIw_ptKDeCvB6GOCWp8T20i2tP1JUKoW8G7twIMNl0RJ2IbABmDHh3wlM0jwhRwfRTmgd9gDtMPiVRzZ9o3LZSvmvsDAOIZ8Au9_yVY8qE9Pa2kwoLMSIeGe4t_HnXs1_Ae7uOyq29hD06R5L060LOJIVRRyJ3vYh-G2uG_bXcKaZ4WDquRz6EPHxfllV-2IPr3WhVQY3ymllA_yQiYjZDUjsW0MVEDxZj8DkiFJD2zHnYJ4Fz6qj6aB64pApYdLCR1lh3TURDudgvU7M3rkc91hcYmTKF4bNiWXIP8AVSCnKOE_HCtp5e4HVvRd4dp90iN-g4-e4GWW0cfKejlZizkMz11L-y5VUIhLDdfZFwh0XN2GEd3hJ4dz6Ip9_C7gIXqLzYLad5Rk6SyIe9BHKjxfQsKtU_HTFsdjQycmbsYAQa6GoMRybza_wEBJtrqMNTPLq0jHkd4htjYiG0_4VAmBAIUxg5qbXNKzZNH8cfI4mt09oTFeAxdl0mJsf_5rN7ld03EEd63MIbVNs7bjHesM7TjlglgaCvjaH2pieG0md2ZhSDmh4NpYr5lnRnV7yKukVBjIUk9C6C1Wo6jkEOt3qcKtRxY"} ================================================ FILE: genesis_data/genesis_txs/G1GqspPmLkJTiT35QUTWBT4def7j5ORSfHCtrYzrrng.json ================================================ {"id":"G1GqspPmLkJTiT35QUTWBT4def7j5ORSfHCtrYzrrng","last_tx":"","owner":"5tzVDE1rYes0z90I6VEn3jCsFra3-UvlKS0D0Iw7Qci9tGGM8kU2jVuzRhzzFkzHHJQ0G5xHGJ6IO95r90gjzZjipbjIs1B7oWd8gW9FrgmcodnqSSgosqCxmY0pTV4BNhCqzJ-f5LCTUvQOACynlt3bQNxyyDipL5sae0ezk-_2uCyWVCM2_k5UfWc3QJrgxMv2HiGKpN0STSP4wTsSeGhU93LRb15hZhhh7sfwhvdWGj3_aVodpFxO3pC8oWdn49ncIpHhdWLwMuWUfMQnOkhYk5UDr8TSoTXJa7x0fMEoOkgE3bqGr-OA-1od5WN_hHhDGpBqPn_l5kZrD6xCHk0J7lrrMXQwwwFWGth6356rxBVoj7rWCzeUG7qRnYt0CbEUJ7d79Gk_DF7uYrVTmy5tErCXLGQ03m5dhsA_JXeIlWGBSaqtZjKORn3gp0I7ZgHvmZFc22vqyWiI4DhYqGIz9DxhtmJfH6uJaHpCOOLvlU7FgahqpCgytWH8mOS240rOiP6uUnSJizpc1iPZ9SblJWuAmWhaMUIYMMLArXKzfMVliJEil8k9z3wZBbqn0c-TDlzj6FgDEhbHQFee-565Z0pQVsZyKFUm5pUnfjezKIGIJZnRXA7eJAP7Xs3Xs0LETcAdGdhYKDYDCk7M1_c64m5tl_cRe_h3Zt2qY_c","tags":[],"target":"","quantity":"0","data":"QW1vIEdpb3Zhbm5hIFppcHBvbmU","reward":"0","signature":"cKEnS-uBL6fFLl1GLXONSfKIZHo3MQW5j6Xumlqn3I2SmmDycwGkSk_v_3c838c6CMs-ik6LYtzFPxOg6N4blEa0sd5Y51_6pCEp5R2vrRwpW6001X8sVF0YMBKRmP7W9Fvt4WSdsQ0qGczvQlhwb48u_AeVBxur2FljIDlD6oFTbNHzE93FQj5IUUcGPdn06c41T35IXkA5x0rB8PMwyFjt3k8_QCIdr-DhhayYtMt7plAtPXu4uVo8_O4sWbG7Hq_YU2G90DU-Jx2wLTwnrp-VvQ2MUpryIA4etFqb7_F7shmjmOBJA_YeXERYX3ohrTqCHt6Pf3koOeCVQigZQxyyiRQrPVH9TJLlmUEZ9LtmvHxrGGCry63AMrHUEcKm0UG1bE16JaKmWIXpy5zCGzJhoBQHB2lWOQq6KESKrSzl2fj_85nI2_wd3vQdGNGufjcNyOQkf5LnTnmfUF55zMhh8Acfxndy7H_7pvqlW-2PsA4C5sKjggqf6GHV3A0FlUWz2rq8XGVQ7dXKgKKX_57AGlfo2YjCuauqQwDGtQAhM5Lyw1QvNTmxEGFc5Lx11aQcgrc91sufcGB_C9ZT-lhAj932JC5ntKBplRdmgQVFNGsWovfoTL656lCE5iUHcD-M5kqoawcOLNL-hyKG_g4cTdsb9tnNCAV0SYgW-b8"} ================================================ FILE: genesis_data/genesis_txs/G5FyMvm8E0_07vFgz-XISJN3VEviSrbtih9_Wptef9w.json ================================================ {"id":"G5FyMvm8E0_07vFgz-XISJN3VEviSrbtih9_Wptef9w","last_tx":"","owner":"yXRMdYhH8avNYQqzk7UFDQO3TY3C7E8JKhuId3gDd8aFQOjB72lOLF5w7aB3MWZbahJYQCwrym6U0SiB9xf-blF39oDzsCFG4RdZEV8w-WMdvkDl2YsGh8cMBvOL_e6tugnAZp4jCpUcl411O5dNvdS-b2sfxlQ8if9olXLCO81pDvP6rlC5yZbBTnUQxQ3fXc6zh1K-R0x43c5d0biUe0awJUZEWm-jKRcYf9PfcmtqesTrG1O85go-3NlwnndcRr__vQx-3seFsWQ6lAKRUbfTv7zTkSKmmLnVFb1AdzFZuyPDxBRRF3N82izo4pbA9FopTED9oJJgm2zxAijhK34omKk6olTPOBFYb-rod5bH4_FoT9j8noGWkXfsOwLoCdoAT4ljgse0aJAzOwdSyAjx_fay0x60p-LyxT_WclUEBufG4lrZ5mPxix0ukpQ1HtC4wOe1ZHYPOWlUetHSGbOi2EfG93f96yMwM2MtqwP7nBlMe6d1bFjmysfdZ1NMJZ2ULoJMFX2c0nfoXjuXGYXOC3o4KRSP4jeEfkY2F31YYYyMqk0zPIcjGrezowenP882Hc_akvbNcmM3jYMDmNi2FMIjRmjxbfaczflW9L-syv1eCRxiW-a0VA48ZNL-cbo-A6szqANTd92tyy17NRxp5shgqzBai-9Eoa_EMX8","tags":[],"target":"","quantity":"0","data":"VGhlIGxvbmUgYW5kIGxldmVsIHNhbmRzIHN0cmV0Y2ggZmFyIGF3YXku","reward":"0","signature":"Dz1QhMBgZWHZ9K29dzvykbVof4D4lZ5EbZLplRyjH92OS08_yIfjR06_waWQ0oxkODcLCTXazCzxfUXDx18JmF0W2e2l5xV2u63TWcgh9i4UTIs4T55FwnJFokv3SgPyHGB19x2fSw2pKsIVQdYyegVnA17IZ83RKS976HUmLhCoNBCVBCY6lnuUkoUf-XvqGwg0BR0v1zWvKi9zHe5OYHad6aQp95GQcoWdS7Da9Z6git3VocnWwg8VlzUvAsVX_3fikbsi2n8p02lsHYLOhE3GxKlLgdCo5aNp2A1mNkCK8H8YmhWE0iMtP99BRoLTzJ2C1j9yYS9GRUAEYRRsJGnuMSDT4uP4bxElAitk5h1CTDIC2ddYxidWDNQLP20EGPyPW0BmAaw-Nch4FSkWbWWzzLim6QY-TXN4PEZ8whFVzDqfIftt-NrleavBIK1XBlQcSz8xOMmEn3aTvavYhVJwBPmQ_WwUqTfka38XwZ4hc-b3xboI-GJbWDGtorMM4NyXVUBY6DOAQMI4Rzv7WkuzBWHaUzMLEzgOHy9Ph2ys7xeOftqGbFLoU2Kqa9xtnXObNXIq2O1JZVGQ42rk7pUmEhUw6XjTbSttXuxYx7HrGEsKnZP9uVEj-eMUUtM_wmI3x3ajiGfe0vdK8S2zrqDW0PhiaFOhYWBvN_ZzN-0"} ================================================ FILE: genesis_data/genesis_txs/GlWMQUuiL80knS07G7NpoYat3w18VMuyLEuC_Pmijng.json ================================================ {"id":"GlWMQUuiL80knS07G7NpoYat3w18VMuyLEuC_Pmijng","last_tx":"","owner":"0MEzGJhdr8eMKwiFhBd8xFN-kEVL-pA75blyMx1IBsDOZpAr6q8qIWxxpIQKnUJEhBckh7XPWxLEDGcRiT4G01o4OsSfWFRJC4-GqvOWLv7bV1BYwaJwPIJoxgncWDh9OUhs_Xk5iBEoVsdrkvWP_W1i8hGJjarcsgNYH84RwnAcfoELsh8lSMDzceCuUOdtmDY08eqJt1W8xJM7nV0jR89Pa_oWpbZm77LDJnLc4nXTcIq6nwKdXgH_PeCuiV_SBQMJLhdrjniXLbuRwzUCSwM7ho2ZnNkI6OEBrxXxEXI2PK1sNlpfZ2V4eIsrgVZ9lIgXRTauTzpGJPvFlIBVGdGMdaM2JwZI2rk-Ymyt1aNfGv0lwX4OEsEfeoXSEyhdyDF3_rcGCAjUyJz3Q0LqnZqO9PNhKHjh__wL_SmB5Wt6L_5Yx0GEiovUgpeedBXDm3MoVVkGSwCLZra6V4nz8uaLq749A_RthyjTw6dgW1M7XonWtfB107gVqry6pnkLS5mVLVIM1QatgfEamv_LOEs9_xJOE42MwmTcol9QBFcWJSeNa5gjgtTooyc7cL6PP5ooAb6bZvCDzZ4J5hgDqrb1kBcG0atzTOBYsx6lgm-I1zDgwgLP47-nDZ2bfmMifIDbMv99H5YH-smXHkpxI5pIQPWXM1OxEOR0H8AAZaU","tags":[],"target":"","quantity":"0","data":"VGltZSB0byByZS1yZWFkIHRoZSBWb3lhZ2VyIDEgZ29sZGVuIHJlY29yZC4","reward":"0","signature":"UZz2PI4K4PQhbdO44ilXzNctHNhQ7hRFlyuTIERc5tCTnAzj2IzPdqEXGCGZ4qtQP6QpWNzRypgbJ19uxbqr4HL-VnNNZSGD1RjhQ86ZIqvKNuve3hbpICWPTTQ2msI9HvqSCLARYnlyDfZHRwQuxsPOlX8nK6l9Vp-MpKa7KRr5jMbS5Yo9UFqs2AB7bnFtKpxUFQEnAXQxqKvy6Y7FLnpCq6CflxLDiPJmzMYNOA2ve-cKKE34dSfCqnf9qSjhdK7MJL-EvicaM9rOOfNjBPnxnRVtm1ym178q3OLeVOgU2zL_N_ZNKed-dUDAGWFqFE0RVLXaBCMtLnsRZ632bBfw9CoSR9FKxijesOFeEqOayVJqXS1-vou2ueTYYkhJutE_ufKKMb6ndDmJ_94xj9Eq861dfseXnnudfV2l_275ESxic1uAcUKXRmHqI7yU-l1RyslSKB3m5Pwvatx_-WOT2nVMWSe0bLqVDA5LqQvIGXMn8voUYJVMKlRm-CC-ft5ttXSQUnKdZmu3Ft7iVFkiE0_dUaUH4xlZq2VQpMc6TwhJD2X2Pb9vno4HEmfWbouyd5TcNSR_hTNx_LUYcUWmruPnz-k0JQ0NfMrLzn2p-EYabFOmdsE0sBuX_IjxneFXlbKKczooblPmqafk3S-onOqvhCGjQQT75r88tio"} ================================================ FILE: genesis_data/genesis_txs/GypgExivgblZSA-1n7KjdI0SJOyXwFJkuzzPWS4NID8.json ================================================ {"id":"GypgExivgblZSA-1n7KjdI0SJOyXwFJkuzzPWS4NID8","last_tx":"","owner":"wiMuS8n7al-1axBqpaRMFgP9stScu8QBfMgJhIcyWAI1XHvjOFN2PsXYWaA8Q1XOE5uB19FvRBCq0AZtyUTVuzQfDNTV7KO8tCHPHVnJxmd1LrGyDSeC_oR3CP6ZV5o-BXN8hjTXrstuc15X78w7iW1miWv8AKSVo3Sja0u1ZV3r83hwfX8__KYqPECw0JReB3R2w45gVmCTTT5TARRp0S_Z4AMXgKcVgK3aQ-AxhCN5eokSdQ2X3VI12-JseKB7PBCkNb7Vgl4zlmb2Wx2NfCQXhMfCRBANpqJ6CMADGZbx01Rtw9C-Pwrde7u9E3b7MnZFoph77bKrx9a9za4XklkopJGm0SMfc0YQ0_RV5i07utVhuIFNcC6_DRaMLcUFv5_7q7Hsco_jfkI5bTuU9h14hadwCqE4ZXO8946t6bQUSgu_HxkecSiOUM6HTbxH_QOfayWZ7Fnqn4Q2PmAe36JLaUCMW_4CQXl5rAdVmu77dJ6Ontyoov6XVGEv67nojmI0ZtccB4lhDFXnjohtEfMGucyJH-a8klNJgsm4Clu2sBB3AGL7n2TNJH9O5zH7PO6bj2HAswZKMRus17PpBXDeTK_8rzrhqGwRL3jssgfklZf7EoYZFSSczDyM1-s44tYPKnAqO9yO4WxMZXhmaVjYUx_KuOZncj61Y8aKi8U","tags":[],"target":"","quantity":"0","data":"SSBmZWVsIHByaXZpbGVnZWQgdG8gaGF2ZSBiZWVuIGdyYW50ZWQgdGhlIG9wcG9ydHVuaXR5IHRvIGhhdmUgZXhwbG9yZWQgdGhlIGludGVybmV0IGR1cmluZyBpdHMgcmlzZS4","reward":"0","signature":"OOoTeJg_fno8rNw_qufcQ0DHpwpphC06YiVzw6gRPJqBHLF1grehhHyd1t8Yb8yM1LDimUmA7uO37u3solavaLKMMEIHAcwTpV30J63m3GLhXrzicz7-JCvL0hS94uXxCq3devMksNLVssKw_Vs-BZOVZ9PoUDDQrua4Kj1VLEIOD_NYVtH_Ot_LqzfgLYY3LazU_KvEyIbNX1XFUWn-YnB01AYJOgiGjQ2eDG75XrkwFDaX6Fck6juHJru6fC64gR7Zp3_FVMcYTfHpXDNLtlBrIl3oJGKA3bZibZGn9fN3Im6tUk3YcCVGENGkVxdKIkr2h9LCf9cewfN4rGFQuF6w6qNnk5wCYaEYp7eeHStZqhcPRJRCJYX0jxM0LxyUbX0SYQ2NPmdabPwRoKMc_ngOF-gMBZY7bGnFzQRXRZSYElymDi--3qzbhVxvZRe4RiEDT6q4AUr5nJoHp22I06FMZIpqlEsHPvsV4tZZJSLiaYr3g45vo8vFl25WZEtAw6_qQujBQip8UlNZl1A-rhX5_oGdx6gDIgquIy9BGNpjdQSer19mXGJcXaNTycOnPQfcFD8pwi0hqUuGHAE4Z1nGkTQDWgZ3wQE1JH31JYeQ08S5Xz-bZIJH4eSr0GC53iz6Oiz0lGizezNC4dqRTSnY-Nz6Y1JQ98u6No-JTGk"} ================================================ FILE: genesis_data/genesis_txs/HFUR5ZwLihdaonJWHRHBuLay6cw8ZMV0bM870xhE6Qk.json ================================================ {"id":"HFUR5ZwLihdaonJWHRHBuLay6cw8ZMV0bM870xhE6Qk","last_tx":"","owner":"u1OqnAOYZohbpLmXPOnI4R8RpxsCd3ByRrIi-QoImZ-QwuBwTNc29BBx4w2m1hHsnjSYC8tH9SIbPMQAGE5psdes4U7uU1DYJjJbcIlxIlchAIBq-doE9GFLoZpRRu8Ky8P-Ut9Jp57rXzEudTj6Np50i7aEtaUeaGnqGur0u98WlZyL2mk4XNM25Zb2ofrwUh_cRGoJK728cEcl1VzvipTrAYxwet3mQ-L1-St0PDYLHp_ZE0cIXDSNPuDALg3vWfH04VCCtLPvnV5X7t8kIJ-KCll4ZoWM3a1TBkVHEV33B-k_EKoYcw_Nckqy1wTTEqO5zlJ0rAyR8LbKi5fk8h64clQUcQNLoxC2zsu3QKJT7nsAoIi3Ufsf--XV6sADIFAlOLrIAZYNUwaoiMbcu4aFyKJFPCwVM2D1OcafcXRE6P2wZVkGh-JBbCkpM6TWxjc1emZr6OYr7c2r5e8j_zAmv0NtWCjt53V9xolz6Qaj4fVR3dEhtkciaGtYwGUTbqkfGO8Pw-tXgtGHQRWMmvCkZ3-RGiYjEUQbrvhj1IvkbElyVVqPejEVV9WS25cyNEe2yx-kkkGEtQb9Pe4YhZS0mgny-LG-BNZk-_vNSsb0m9ZXNVyjFMnuay9VaUm6OlkMpkILKBnKhFe_HFXsGvgWjghTsKR3rYs9l49fe4c","tags":[],"target":"","quantity":"0","data":"R3JlZXRpbmdzLg","reward":"0","signature":"ppQYJ3-otBviVMcjHRd3XzYoNq_T-7_jgxymJfqaQXvLiGuDPiMflsDQX1LqqZyHSibSpjMtymQXwyzvLn6gpID7KO5yPCIvUb982BZG5xDpW9Hl2yEAbJ2y5aTxnGMB7iMMnxgpu6BSovQvWeavhDNuOy93u6z9jCJ9Ba_qDELfv7mPjwYR_qT7EmXvxPWfI8uIB-LwtrmkDve54f-cF8ITg4voLMEE6kbVNjMpKbe4O-pbmPejKrzqhy8hWS1fc51sLt_C2iu3r4r4OFzdPTfOB41zbPP1fXA7TXoiaCyVxP4e3pUsRAnpQIZG3OHjZR1tioDA2gIa8V0Nb6BxJOhJsjipwjATImGJtY-qSLsTSLvTIsw_WiF4G_FfKdKCGwycjSNTAQ_sl1VOwrds5Zp_IMywRRvpAb1Dz-0ruSIusfj0pOGEbOOv-BLPRYPip-KJ26S1Qr-pjcCgbrIp-qPJA0oaL3akdawIL8wkfELLGLQqFOd4LaqNz3HDUCmTpACdCOEWyDSRBVpfNy3GbWVtjyYGODEPouVb-_y86xNtMB6MvVwTsNoBOo0P8rKFE84mg3y0E5XfcClqnHoqEq8OSDX3akjC05SU286LBYvezrrAsLQ-V_lpS53qm8OjPAFHERabagKzJW16fl4h9ZMZlDJDH57sEcebTM1ICxI"} ================================================ FILE: genesis_data/genesis_txs/HSlgnBu2Yxros7zyehPgiu2u7h80dJfCCqrA88UnkB4.json ================================================ {"id":"HSlgnBu2Yxros7zyehPgiu2u7h80dJfCCqrA88UnkB4","last_tx":"","owner":"tAf2q5REOelZsFgW7Yu9X5ohits68judV9lkRnR7aSXuxYTsZsyNlQGR2m3S9EFQyis9bGxKjLWdFSpJ6BMpTorZ4sy_4FkVTN4EOLbAjTlKBEwJ651b5NTiKtdi8jowCoWHFfuojHJU62B7_SypcW2iQUzUnp_CTmJDjOjs58xSor4nU3BMhfpwawixq6lwCBZCVq0RWbElHEnGmEE3UdFZgPBpJusNJ3EXFWC-abDnAwEnMoyWsyAlLb4grW2Im4nvYZluhecpdHJEkHcrOhj3uCKzEl8HTttye22bYVSBi7cvLQ0XB3EcrLY6hDwqo4Qc88jihVaf6Zkj4PfftJaKKveg5V93XCzb28vWsNMezn-FrBSDrzqe4DdB6HiisQCcUb1FIWYhW6XsZIUbZWJIqaXLeOPykCDUcjF7CbL3QnKMlFfD5Sd5T9qbSOFvQW5AAfNzGCS3lWKg9bO8vEMM4u1J4JA5a3S5bc2cVQZvADysuNaD6iVnBx-4ou1rr6AyJbix4C9-POWzml3H9jJEBU-_Ij70XQD5c1LFMYZFUa70UNmgTmln-vTeSaA64742JWtUNE24iWWKbZO5ZaXnohDGM28AlpfNOhrwH3E_8Wixjvr0NLMILm_MpMPZc8VZrpTyTkN4EbGFbxFQKKNMraAruKujF6rxfikpgd8","tags":[],"target":"","quantity":"0","data":"Q2hvb3NlIHlvdXIgam95OyBNYXN0ZXIgdGh5c2VsZi4gLSBKLiBGcmllZG1hbg","reward":"0","signature":"YvuySiCV4neZZDl0ywZVCg4sj8nmzsSCrJ8P0z1huv6PALL2vrzfnCpvtxzSHnBVzIr3Wd7PSc_ZysEm_V0N6x7lpeiQFIJEpi7NwwYeVi4IAUvNV_Tu44e5KkiaU0eN3UxTiqoUakjAxCljMGC4m8rNVbp6RMbTDjlHjThzi8HtTqsNLT9ty2tDdLzNBLULShlI5F6BuZKGqBBq6YEJ5biOa6Lt7NHk96QFwZbc_-mvJbwbrbg4Jx0f8zIBpZUtO3_uEqvcMbcwJ0f7-TFo-XyDR0PpsQmKaU7_AMptiYZfE50o1zhbpzf21asbqCCVAf_UPJW7es0fQKcAvJeEe7D3OZYppfPu7mScA_cEfI92NofUwjs6Lem3bEVfYlmkviFaCIQBUc9MAYSHR4o50Z93-I7rWG-eOriDCeIisUlRAQVF63s3dID6_bdBgtt_TuwLPdwjb2dM-TSVjOa0swuHhgppL-LRyr4gi94rjIf2AoPKGRJdSU5wxjvcqxUOvntIZ0myEZsLLz9zCXIFh-uaYVFg8SPcP8A_VmUTwstWSGD5xyf_GjuopcFLKGvk8BSuf9QY1drbq9hSozAONESDC8iazcYQUKWKAseQE2sGjtO0s8kYrIYxmiExIXPbKAVC_8vTc3jqb7nrMvDhwbgwwCjen_Bn3354dgFZv6c"} ================================================ FILE: genesis_data/genesis_txs/HTt6lPYQfcIgUxKPjUt3aQrpwE5e3UA4UT2EI9RxSbw.json ================================================ {"id":"HTt6lPYQfcIgUxKPjUt3aQrpwE5e3UA4UT2EI9RxSbw","last_tx":"","owner":"7TC0Qn3_k2DNCwz47TX-CpOwJ7g_EoGOtT5FftWFAwRrrqirSzO4SkC9GWgQMOqaiye2ww9b6h8ihN0Q6lQNTUTHCijDe7GjjRB3P5ORO31Y9N3vGn7tyJ5yEncSFwSS8y5vSZKTwL5lVFbM94BaMRETKfcofG2dVpXmLOn7a3nQ10TlTy4_xME81fF8Xjz-1SBL9GWEkdOs0EmfBPwudBv5F5bounNMWe9LmXGjFX82KdUjR1y6R6p7x6iRu0iiTBZsSRjHloSHxjXpgBHIvPzcOaRP7oWv0mpaTqpDj1AGnlf3gA2X65dCNHWI6pIzKIwsWzr5O3oHYUmnN3csyTliVBcxXCjS-fU-lLZoIHwyh_6jTFyNN0agzje19oe5XP5uVY8S1Lw9ptH1Tpe0GJSck5MZMXakAULh28f-ODtC5qY0E_Ts1Ut-BHUiSTbp8MUKO07Mq91EtsjdtZl7g13p0-wawh5Lfr3yGX7isFN39uivwCckQmQLX1_d5TqHJkZkQvK7b1TtpWjBs43jzkOnllGO4zytvPNpLKJWwUqStjS5UFfgMW1EMaiK0fcqmZrzDEjliO-jeA9eEepazNE3hN60qe7IiBOJkyxeoPXqLUVajJPTCeOaj4AhmeFiSQJZsmuFJObh_Ugjhz7e69_V4EsDWduK_kgr2Ohi8B8","tags":[],"target":"","quantity":"0","data":"QXVtIEdhbSBHYW5hcGF0YXllIE5hbWFoYSBBdW0gSW0gSHJlZW0gU3JlZW0gU3JlZU1hYXRocmUgTmFtYWhhIEF1bSBTcmlTYWlSYW0gR3VydURldmEgRGF0dGEgLSBWaWpheWFEdXJnYSBJIA","reward":"0","signature":"UPrpyq6UgXHnsScGsyUpJd_4nmxcWoqHU4GlCc_PgKZ2n3vzQoThNZd4MwRRY-x-e7zN22SnG-1gSYty-MVqJZEJTqzvdX7q22jDpFYoN9-m_POYVfKPzwVC1EOfiY_A3goaTUvrfqc1yCa2OiBjXqlDYB3ZrwgpO7Y0QwyA4mVylNcg7pnsfD0jouymt7PQiMazQlo50MVv8UEDROyjS75F428w_1Ub_VMquygkATryLRPOpp4dC79N4iPS-lOjNjdLB2dh72agywSCccUHx2zj-5x0Xg0ZykVxWsm965Gkul--ElwB1nu304P8yBt6m65rYwDJr7PicTXPz5Stjzqo8rbJB9Gd_pnIYZUZM4v0NrrXPhEp6bbM0sBumQ8o0xgAIqOsUywIzdtJoetGSdBi0fepvL69WrO53HuDfZB61d21N97nmEdX_UAULVqoHD5V8WU4oOnbqiBlcoTP8nRG9gOtpvKe6ZtglWLyCvTC4VbjehBW3E0Gc96dIDLXAMXaBSDRaeAGRKylAL03So4cYqOMI_uktQSgV0BDjnvkCoVvl_9SClYpjZM6S8lhBcY3_rEFT0DVhLKPN2qjGIA4Rut442FNBYQyOsztl4LBHBUd-5HkZiE3mBzPjhGW4U7vZFGnyD3GzCQTSl42Y5nPMSIb5TNZQZ5CUAOAM_c"} ================================================ FILE: genesis_data/genesis_txs/H_0S6x36tsFH-x1h77jV_zzGGp97V8UjmgC0RZYwbtM.json ================================================ {"id":"H_0S6x36tsFH-x1h77jV_zzGGp97V8UjmgC0RZYwbtM","last_tx":"","owner":"0rvKZSDkCavOrZXIKB140BgjUKhw3wfdWNz1qbgXZKs0lyBwQjVOi2PkuR0K0wnrLbYpm3znRXxmzvE0QRYmWElIgKef7HFQuDJOpDYGeahlUnryCkaXhp2N68JjyZOwQejGGdCeh1XoNKb7dxnQuX7RNmRgIyVUUHC4Xqs2W-FqHHphPufQHH6k00TaEF3eiz2YID7Iqc2QaJh4TPJdhU9iqXxq2DhONSh4Lw68O3mHdBEhemxVEkZ3l3xMsVAV4IsKcfvFkGdLSKr_XX74TF2AXd-3Hs-raBUTrW_aCyYEAeFafWqrL2__WFqvSBzxV6vnFgZGJmyStVPg63FeTdP9-FDvr6twCEgKvYlbIdv0s_KWS3eDKjIF_cqg0ZC6AxGdsY5J3sHV9VHENk5IyUOpv1yd_1L6AIwkqttYVZmjjDyCQwdrVSxIERx2QJhqFVeyupu75pImMIZ5-FGOeU_zEYL8NuFiot3KKH0WGSuE7mPJi8DCcvFueY5G-9eKBlC0T7POvGivUYD-ccVQ9AiGAqtw5e2-s9SNpB9VUghUy5Tjwzw_1ArCzLUip93KpNWtJmySfrkS_WVzU-coe7GMvSpz6Xh5jYJJUQaRCTp5T-xq6ve-BXxNSqfDMO47Ehb327qmxJOym5IotGZckKjbmt0cB-d7zZVjTaQF7Zk","tags":[],"target":"","quantity":"0","data":"TG92ZSB5b3U","reward":"0","signature":"sYvutzkNKPYl5qDQ-LXWI_pwcUD-Q7gey9pW1kBYZlCnQsHb4lPWR1AjlSvOsO2CbDpbkA-mJVtsS8TNewIl9oMc9Q2dY310n5ke_JF6UEbU-t8muByoi5SqhyBx6if_5bgIlREXmo5lyNcTFnPJw75PjCm_A8FapARU8RvZbJdG9SC7Bix3rAJOO2GNCTGhPVqkq3wEJDnEIgIPad0vxVnJzIbgN8VMNFH3K8Mg3BvmsEztCFbXd95julPuzlNmpZPVIg5oe1JzYd9dGxxeY2xlswERYXQYrMaWQv-mF3tffJwoRrzz9nDLJ5ARI8e7udEKCpFafmIFIbW4uxGHroeEvUra9qI8vNzb1KeNlpnYV7lm0eR8rY1r7Xa6bqijvvQePLK4xhrohVNPEQK0itqJbhDp23iTy9fz31sOuNHSktN36w9SeQpGeGMP4xccTBYWL-tZf88i5mp7RLKDyjG3ZC6tErQBaClKJmhTMvon9KvxLsLaOPqkqWvFmCD7UNwPvJVKgq44OhW7JdZTJPnxUoEYAcmkTUFSAlYxwk-PFYdcCMIFDzQ_Bhlvke1fShOpTDxU1oXsyFGDp5NCF1izoaoonftCXXwxivICWn51rM2OfOPFNF_APbNtowP5qIZGvmXh9HASSF5FIPgrEfo-WYnx088DsoawCq8eZ94"} ================================================ FILE: genesis_data/genesis_txs/I6s8Z6gEPLQABFstkCoLVv_gdQNGb-uuMMut-R7q2hA.json ================================================ {"id":"I6s8Z6gEPLQABFstkCoLVv_gdQNGb-uuMMut-R7q2hA","last_tx":"","owner":"xyHlyZvzMw77-kOrSREIOZx5hFH8OgE2WrbyY9YazqfiIWk0kJmvru-Ec1g5_vJG6ZRWFr8rquqJrZZ9AahWHLJj3d8PzYmLUhoUeXqEZDObovWFz5V8wI_6Jpr6o7dLck7J1rN4A-L3lmCm-qU0dldjt2jczeipoA0rsWY8DE9J_XFynd-d1O3PvXB9QifPb_7lHpWSwQ3q5YVijcUrMDkst55Z3pgA5BTUkJqgQffpUbP-hDZd2iuWKqacKs1L_c9KM-kKj5LJZOozCTsHPDPnYBIJCdcaTvms3R-0yoPLiQjcIPyd7CQwD48g52q4TUzr9Q1pbJJcktoE8GqXuE4tUO8qaeUZWSXZADLkf4zuPSbV_yhJsCzrf8IB6ti_vq8Qwah5r8KQptWd-BtyPwjl-c-7KRFhMz_Jw7oUZqxCTjeNMLPpuxyMhUwbvzMo9pctClaf_id3V_zF2CTYCOdoNj0uYV94MGXPI1YspbCgfSfxUtZYDUPkxp3xvL2Be3SQiihVRKKUXz8MpjpEsviGnQQlu54sVYOMgAk1jkDNiiX6et9gKGs4b_Wozqy5p_aTcaRaVDZPnq61M9CMoLZuQJXaBxpw72cswgjSCP45eMaHci7Iem8jBWsccpnkVE5GiRqCMm0tBgYm8qdTVzpHrwt87a_5RKFk4jrWbsc","tags":[],"target":"","quantity":"0","data":"JiM2NTUzMzvvv73snKA","reward":"0","signature":"ToUUl0dsulTbpZUvDu4AfN6Klfc_DhuFx8gtPn5piibnf2t1HdXSSsv3ctrpNlDFRDAlvh7IvCKlkwgZBPn4E8CLinKjzXaO-HWWMqNm1PAuUgqutEJckDawlm5B_pX2tQACYvQixAiGqsZt4lJcmzlzc9zH_xtCwnYl2n598e6wLQyd5R8oeGcTusWYXDozi7WfjRBgir2cO4V8Xuil8d2D2Cq7CvRNdR0N_vMHtw8Ap0pYttLulpWQneXJWHZMUcIGj0D9nsGaK0f1PGX7ez3Ihcmk1vJ79m-H1V-WdbfJvFmi6wr-c8_eAkAgjhXbn6w6jdFhOAcyoOZNLJfN27-fKarb_2qupyHlfkBdegQIW7QdxTe4UslCoPl7BnHmTLHXQzhFhYOabs7MCan2LmVm-ZPHg4cbuR1Mcc1mOgiGxVwgfywfJj3iZLNNVFMARi6weSwP5o5R8_e1j6tYoqfiV5MHs5WLW0LIlc12mEdwPSqIIAnHGtS35EM1MxVkU4kL74Sk7GHSsiWD2z7WUyAA1lYQ4hLaPHSjQ9zXEqZEjs_9ZngQxre-MfrIL0LTZCd4Q7nZDE2sk5yoHZyMt4M1bCAmewXXaOAzNw2zTsFumFhpPUf47OG7jAg3bXVzCV3cKoUd-CtxXx8zV83JmpThkQ2JRZU5O9zZ9YvpyLw"} ================================================ FILE: genesis_data/genesis_txs/IACLRsWq-T6aesGEAjfFTZJd2sy7sFvWL7O6FI9A39U.json ================================================ {"id":"IACLRsWq-T6aesGEAjfFTZJd2sy7sFvWL7O6FI9A39U","last_tx":"","owner":"ulUtfRLuZO0kprZEDOgrmG1Ma9StJc0lGl1yiZWV9cKhEC--VMRgxe55ep0MXucPtgAPnc_tSyZwy6If3f9wBYJRXopuH7LGYWjStdwSgcXKNj67s4eh7boyGP72s1uztm8h1hgP3ShFv7aZ9O_wUZ08uNH6s2q5EDEr56q_G9vgRAYWsdFJe5Qc9PKQRESKxyYJbwNQtVLDj7UvL07XHiojIM8vg6-96HbsgB6ldf7_Zq7e428HYHE86N1kfhwn8i-T1kolaqP9Pr-U05gwGFm0mKtBgLqrajKz6Tro2K_YGI-DnlUGFQjc5tI2NlIZsgph9D5aeAKNaT4Wr7IQUZH8qvtOhOC3Z2E3fYW2ky3E-QE4qBNcw9QFYa0nhukV-1xz8QLyBLAhzguCWKi862r9Lx7RMgG-EoEZz4Jex2BSXAoVI2my3l68XDIiem_pj760JvNTzTlEOUubGkiWsZoxWxst_tZl1Weae2cniSKIt-pIDniJdBjvJDApO5cfY9p_vrbkQVQ6fB7u-F1749jSuKcGZQxSbWegPWDD_6JiKb0g34dUkbmJNjWvKMQdkXVirw2AWy0QXlztdW-XiIkAPuwTUwnCX-_5omnOGcU9L7rzJ-JHVSWTCtWTD95LhHhjO82J9zxrB2AYsC4nsY-1lDy4rHSgxRVl4ZQ5whE","tags":[],"target":"","quantity":"0","data":"R29kc3BlZWQgVW5aYW5l","reward":"0","signature":"lOuxY6HnEGgoZZYiAnmPEttOU2TAA4bDXEAKHWUztH_GdAMllzX9WLLt1V7bLZS3JS64FoFBxTFXJRxa0XwNdV8YbDWaTKO2QS1R5guI_gv4aAV9VPPD33AxSAMy6jWSF0efiZuJLJjp6kUDaMa7CY8OVPVm9PQh5ILMFwQsoSl8mfdQulewh7KvuoswIYY_hgOV2a1UgitxBvl_-7JJRV1VER_lpwdmLV6cysyjdgola5GvpMyqaBzyRdqT72xFw2elUsYT4TxOk7-SrMhpkL5SMujis2YmYakTXwQSm6pcVN5JC_6XZmklcYr8VK68IvcXbujUddBtQiwBLJTIxP0nSrAOvSK-dVxPrrjVuPn1WPic9F0ZKHgsA4SA0bnOSrW8oN-oAbMCdI5zphou44tEIn5df5qbVd-vocQZQko26_A4582-vfvACIx0nUpL-7z2X0KiTVSq4xsmaJQqyU5qtyBhwOBbyjVJi-mK29I3D8F5Bq13npqIS4JawQBEhcUjavytdL-C052k1FDlHJbOGZ2XZ5wgFSbwohI0LNL7E_CFvt9859ByQZn0rZjQKjjGnXZWcI038ucT2Per_OPonG-l5llm1babKaSJzIhkfdkAuRx4JWgn0bVpgy_qM_nbimvdw8Px0Cv3pnKEDxs66fqc5q_qERdivP9hcDY"} ================================================ FILE: genesis_data/genesis_txs/IJsiiIbd-Qs39TAJ67hiRJFsBye_rgQdU9GBid_PnZw.json ================================================ {"id":"IJsiiIbd-Qs39TAJ67hiRJFsBye_rgQdU9GBid_PnZw","last_tx":"","owner":"sTU0yn58bvNatvRvPb21qfqV4xKt3o9TvryQKH_NMMNtMY6GgPp93SnKZ6U_jt3uw4yob-KTFRIMI8NFQVtoEGxS973XRKcFQ5Bcx1XARAEf4KfQmIFJ52wZdsxrOiHbWTzhU3NsLgECdbF9m7arILRW9kv28Irth5kxeOxMWzfh8mUsNGVOeEKzaT3_cGVOeTiYz7umtL1P19PFYW_mBdLsgFnp7wdubQ7WfRzhMq-JHquoZD1FeRiip9Yw5dBHqOyVqTrhHN510iT4ESdowEb0C4fkrmSuBmbF4Ar2eD2A_A0evR2ONT6-ElunrZDFRuG4saBNB4uLXlkb2uzIkc7kV_rYq5tNn9PPgzjgAsg2V28cGyZPs4cHV9N0q5XXkHXT1nZ0fmRf7QwNZBTHB_OJcb9XibkFjwH8u5uKHBzt4jg5gqGkEEd1HqHN-FqPSNosc_P9Ybl9zvBPN4Bkdwqk_xwKgUhiLkM3y9-oGgOyvbd-Z66jD8OH459VKG03mav74W9Bsl3BV94riDayBYu0qWzrtwd8RzQPyh01U-8ksweHgXkV81QPK96SXpMM04jGeTkkt4e6yo6Lkgl5bbdfYAYmrHwv5S4M_D5KfoXlqdlgUQxo5xDXK18xAd74yI_vlk3tA3cz-5z1Qbm1NcfGMVMlooz4hTRquahZQL8","tags":[],"target":"","quantity":"0","data":"SW52ZXN0IGluIGh1bWFuaXR5IGFuZCBoYXBwaW5lc3M","reward":"0","signature":"JeL_4R2xH2ZGdUdGnYCfyFOmIrMvgfvE6uuE6hxOF0qEvuUahEp2Lao6qPGVOx_OJg88vf4jYp3m4KoflgcErunimWWs-GTCeQKSPaTSV9NzUpvh8gX3BkoAZkWZAX-dhjvcXBiJlKsAMHSllQoe1q0oKz3WxtMfe1zWjYercxj1MehYqO-sUKYNvHGreH3r10BhPT5QaIEHEMDeyRLfYOnL3FwyolGwfVt-eIbPeUeST2oWn36uiPXBzlmHeezX09SfPpD4GuQiK3VJLnWROIF2oBQPGfTZ-7WfgE0nmcrtpeQMLqro2Jqhzqbnl1rqh_H4NvIms3accoa69uJ4sTTRSAubuib_nZkxFTuAIGbpMm4Gmdvsjb-gH_yS-NV6aCL2oJVVJ4vL0c30U5LqBcxnCJxZzNKWVypF10gOaL3f6xYfAI0EzkVbWksNlRllhUDsZi1RIKtkhVbUPTjq6SffbHP_NE_4ZJ9ymM2dx2WSeEl73Ab3eIn5MMjS2q5wzIVMG67rrgV9mHw-Mp2Z5acgBZP8vaG07YIg8mFsUqX63Y6IPVTRQGN-pUN_dPqGHFF5kSSXPcKApzHItE3bAsamsAEwKJviCNlb7HMvchLJLm6EkO6C0qjZw0fZT4aaYBFnKGCocE4ZyronPuBa2pJcnbbGhgsKkmkPYFXa0uA"} ================================================ FILE: genesis_data/genesis_txs/IQgiEwMLp1bb6muuB_G7Q3sRaaZ3OZHUSjgshUq5YMU.json ================================================ {"id":"IQgiEwMLp1bb6muuB_G7Q3sRaaZ3OZHUSjgshUq5YMU","last_tx":"","owner":"uXiwGMYtnV912Ds2mY84MJFwKJ-6D5DgT7Pk2GdNgtZ5UNIuDyrwoEAsmdl_9rprSNxQEyPxM55FQLwob2_oOrm5PMY3ltrwjfcgu9zOL4ljMSWLw9SNwgGh6tfKMUqJUTsNU8OwAo7VKpIXxs-J174dk98DtDqqLAjpb07kSUSux4BhSpmD2Ux7J7MVl7cycNu3xoQQZeeErxm5k6WuMhptL2h7f5I24tmR1FYsGUhKNym6FKOOuBf5nx6Xy_O6vUzxLmiy5L0nMEcM0aukkaLlXDE_yyliskC6EBr1eWu-y40h9S7b4-9u51PZHprixSy4xEDcCQ6Gjxd_UYlEcNXVuCxXFCiEGQE3L-Oti80AqcB5MpPuRW3E1s8DX4K2YaS0eF5vUsZlVHuNXF8GCbODHZjhwnSgIRjj-c5Rl2bGYXlQ7bTaj-MW8d0ZAZTU-kWS6W_GcGg_a5dbB6BeA-vrwP_rYk_OiYds6WUZvyQcwzMnZxWQK12C7l7sWgjBdiM8d-SyTScQu8gdotnCuCqTG9-xr6xVcJ_F2TI4BmYn3xOXZf3WBKxtSpehWaN6VH3iGUlN4nNjxPMXWi9kBRlfg3JOsxJvClsSocfibT_Imrn2gSwi3JvD7E_YbdGIvb5gdEzrNWzludd_z0javaN2yt8XD21ESF0fC2PbrHc","tags":[],"target":"","quantity":"0","data":"VGhpcyBpcyBteSBzZWNvbmQgYXR0ZW1wdCB0byBidXkgYXJjJ3Mg","reward":"0","signature":"WmykHf9WyLjg6ka47j1BoiwdCqcUtDjhJAndKvt4NTd7UC9uN-j_GQHxHmhycilObw2hJrdQXKPdu8Z7m74u9-3YqBOPEEgY3pBqVFXumlJOKKTzEb1Pbc1Xo6Y2CpTz8i6bwuZQzXvpuL3LaXpgbSuYCF7AnexeNZWT_VZQOx_hOTuvKttXp2P358DhjOM5BaeiBLxWpjviTYUkYXuxi0I3jEcZ7i61ReZ6jCQuF777zU9vkQ_n3ocpaBlr1h_-MD6IUICf58rwwp8DAOnLQ_LeKnlKZ8M62AOonNU-jQ3f4XVAiW63xomL_wu9TwS0VBhENv-MFHtspoqm3UIdNidMyoWrhJCCB9U2SnU6X5mrq7uB-xqGcyZjB4MnhWgyB0LkMwiX8gT96ivcDMacacGICZcDVGY76JAetiBvMNAESu9vCFh5iUOgDhuVccW8vfHOwaijmFFTJcST4-2i6dHDZ4YpR3t_lh37-Q4HZDiAsuFqKbMCFBH9wMyfHmODlti-9QSCfCsmQ9-Sck1EXzeQUImBsbJQDudt8sQBY16T3AX7kt59P5seG39N9PL6K4doisj3wxFnJuM17y51c_Gnwrxu56QV2FQ1_pf7u50SQAQUv80UlmCFT_fiP4mrQ5qp1u7A8PePkHBFSFKh7mAPWuqZ55SybU0OYwJplVQ"} ================================================ FILE: genesis_data/genesis_txs/ISiC3yaTW9KnZmgs39osghIg0HP8ISh77bzH7u2m55Q.json ================================================ {"id":"ISiC3yaTW9KnZmgs39osghIg0HP8ISh77bzH7u2m55Q","last_tx":"","owner":"nxSCfZYcODUXNVTlp-s84u9HkqX3FJoEWhIXMJ-xu6L8pOreofZOUY44JCQKW1b3alPd1S0q1UyvnDeEyu0yiQvkETKEnwswYq3iewqtajhdQOlfJuCxFFglmNt-c4YTqSOeJqtBZt1V7hgSQNxLWjvgYuPM-1A8bYPcMOkGFNI_SVlDEMXDfZFa-tsfRWViYWoDoqTiocV2UDQxhg0CcbfjXdNgUe3_Pr5MEDDCm6CAocL2yVTMU5qsqxB0YyrRYF_UO-K7SavyTUeK4vGxPQPLPBK5EnihisPIs9cBQkU1swJPKsE__QV8U4DYyJWjShOHZQx6rugx0scn1qXII8f5e40X0obdlzU1vhU3fox518I_3-MEmsEhdPllDB8Jn4KSZik3gJJu2pw2J2CJYSy9R7kbIXqUhoZ80NRu3IV-h1j973_cig4bW_JyKyjH9aq6pn8FhJuZRgwZMbxgHndC61bMkep6iSLCYPZtnBPxEDbl-5kvtUN9hffijOYHZ8Eb8OVIEcpeJavHy7f58mveJaSoHE0oM1F-K1LYl3WhnJT4ZUj-A28W7GnqXrzKxd_iXOH-Ks0L0gXBrS0kFrg4qQS_EzuDp_qcsS0VR87VyIAzt4oSuBFDOh-xNxbCWEFlcQJ_mZcwtb7Mq7W2YP3s_10nmiLGRItBru_TEw8","tags":[],"target":"","quantity":"0","data":"U28gY29vbCB0byBidXkgYXJjaGFpbg","reward":"0","signature":"BajSxma71R6EJ6dyR7Qwm4PwUs0VARah8yFLJM0oXQtKpqwXJUrBwH78T3Js3HZF4mSM5JJ5Uh6g_v76M4fCUVbBjfuIhyls3KNcQfAEEFw0QxNvlvwNqW6I5wfnZN4ckSyRqQP6BxyXfH5-QVsFbRkT7rw5rGAXPT2PRrTgkwrjI-mrTrB4kt4rf2eibOR1-M1TqT0z89tnm7a5GC4Z7_O99WV-BZgysUu9FF2wQyWQOXRUZpnSOdF0l7Al3TKoj9xjqFjUexkIk9FttNNZBREIP61Q1hSCtdFLWqvNGayoRzSkHTmITRXYqgT6tXbTQEinqoDlWvgHhG_xyMHCill5IE2i9hxzqdFH52kdwT23lOmORCnnR99pLJ3yC2rS7s-3P68FyAuc1TyuwuV8-6RYCOslGrBHZ2QHc-S_dCQW4pTZp-0-AzYJ6-ih3_fN4kacLXxHPOZGcDw9gyW831w8_gzKqFWHvHVtMO79mCSTusZs9mTG9EQ8Y6XVAKiGzSGcAL2mRLB6s2WAlYuB9Q9hFWg70BPddRDY9oLEAPgA6sHCMPZSPPgodzw5j6ieWZJRm3mhTLczHjLfuuPLBe0lZ7UdPc_QjVIzgPOsns4mHJkvhX_ynTBNnbnwsFU4KQzFMLIaUmuBHrXtuzTZOENt8j-8686cx7ei_nzyqFw"} ================================================ FILE: genesis_data/genesis_txs/IpwG_74praZjsu9L91_KWYHrVTpEDwyHZrsHgum4Z8o.json ================================================ {"id":"IpwG_74praZjsu9L91_KWYHrVTpEDwyHZrsHgum4Z8o","last_tx":"","owner":"sOuY6QBmgLH6lrl33fjWznhAsFZWVI7ZoTL34S15bXSGdE_80oQSXn3u6a-84fD_phRaeTgYo6cKGPDpi4Rj63NUkEGyV-Hk-quuqOMb7Ky_98Rb2h9opikqdb9puVjnoFT7MD6_YuxQNf0I3InnHTLKnBe0wQz8R3o4uMGa_DzaxE7unrbEOo7HjUvWsIAOW7eu9dUIdclcBKZSruLWYEw2d4i5xSRitQLPCaYlcCS64pxsFb9TKoIRCgas8aj7qdthoUIzZQmGtAtmCF6iAeFt9_vcqsLjD-xDm-YNyShQFWxXWwof_k9iU6ihdSM0ibIqa3cJ9Oia0aIRmZ3Gvgeg7-0zX_u87OSjGhoZJJmmlbpkurlhwV84b3DuuqeInRLv-8MHmj6-KP6SwkxZxzu7mmuIMxXZe4-zSE88yHxmgof-sfYOwla72VplVDN-BBjKcAWdAZ8S0x4noHohh-h_EMEBd1geDlGCrxFS1RrA7mA3mr7Xt-VMWwq-2ALzVnAhbixo0gtzRmYrQtJBHv8pUpyTwZwpexSCsT-4EVqakgVEHVxDVzvwE-Qqc0uLes7bvD487xTAqfH4HYKAWhoZBCVl0rWcwgEfRD2jS6dDGzmbfjHAup9hoalKqKkDzh1_U3etJznGs636zF-mMKib9gZ3atJySyZFtYMcLJ0","tags":[],"target":"","quantity":"0","data":"U28gZXhjaXRlZCBmb3IgeW91IC4uLiBzbyBwcm91ZCE","reward":"0","signature":"qegvL5duUuuW0I6U2CyL_c7IDIj2rX8t8soN5Lw29Cd8RhAJNuvADnXq-ERa5Hwoit99UvDUJmTA0SUuWRTB6WGwWllANxFkPNt_0fnAwKfk7hoETKN3D-MgUWR38C9Ij126H8mCFNRU52FN5p_Q8O8FzemnKofzHEBl6HDNQOXr0xGstkLAXEL1VfwOCI9_9F44Q0D6o3Sl2SW8x92Ss08DJYv3gXM1P96zCAqS34MR0FnTkUcEjc1S0jBEduPO0ly-gIECqQRQmgyQQzRWKrq28PsbbNga9aeG4UdV2jqUQYMOalabPuCVWtGNCpj_lFIRRGgJo5LFY7dB5Fo9fovIouZmr8qdjTFOJnRF7HS1c9ICgBrLMLCj8VbKK2pYXfgaxiUMEIcrWrtO7sEnhzvC5PaFNLsMtOVkaoXLSVU9mmMBko8LyqeBJxmU8_VjgwlF8Aclp2yCZ7PiEjuDRaOoP1b0zvzPgEG3TGlLMrCCPAblniI46p46SEMBgrqe-xHRvGUTp46D4Sn-5_5Lh646ZaoXbfZcIEicSxhC0wBtj3dwrbs1kIiE6eOw5dDyNJMQvy6pNAqqxc4eosmB4cpzSw6uhh01hPBMzxLUILHNmhHWTpoy536Bunhj9AYLyyYIs9L4vsUQAR4YcpOVPZgzZl82teKEy8Tq_VkmiF8"} ================================================ FILE: genesis_data/genesis_txs/IvyUOghXQ31LnYE3bYEkS82gTAvpIa1rGGQKmiJuuMk.json ================================================ {"id":"IvyUOghXQ31LnYE3bYEkS82gTAvpIa1rGGQKmiJuuMk","last_tx":"","owner":"vjFb1LphT1bpBRSQOyakROqwcdeEy3pNZJaumPZSJoF5FpN-kxulnJg5quRl5-_1K4q4qHCD4Nmb62xGcr22pzgWMQQnvyNdpYK37i7syWEmxcmmZgN9fIjW_zSsGAxQcdb0hDiwdz4fsCZY7vhGxpP2JKu2L4WH_lYgmb-EP6u99U74OGB2UL2wjQcESP8iksV5VqQ_k9onvvfpKH4filvyft4jHb80wKBuihAY75IOg6YauVVFgnSHQk9znmqqe4e2VGbr1m0Tm_u457wJApfpsp9vW047n6AddAVKxltfcdo90mCBjH0p1C9nexK-5uQVnjJfSAg7slxuUK7ciaC41G7ANuH4_01HCCl8MEQISRMEeQMcXe_w2OcXerVbc1BHapHfa0y1laq7BzUv03p5tgRWBVRNv3IZpwmQtAukZSOrg0Dhm8A6_AFk0jITZ1ygjMwEC069uEdLrrd4iFnUO7Z1y9kIvpfAKA2sCJYCY5XxeGTX398wfrPDeA33fvt-BZRfvgNtlcoLrjkMmSHoM0WhDc34I80gY1AuBCI2J1afH1lVw6jxlVHl7BGegoREnlMzIyHP3IvDpS84HXVMPsQTlCnBSFWP60NkG98GK_eYVUrIIKdpDUZ6xhqARTDSUR08VGtdfLijNo6kXO3d6DbGq5yoUK4-r3e7ncc","tags":[],"target":"","quantity":"0","data":"Z3V5cyE","reward":"0","signature":"JigWGANrJZ-7Po5U_t4BdV150A6uCbXi6n-3fUgYNFW3Y3p1PIQ03ijvFZiFRqknUjNnHWS8Q-i_XU-L1ZLUJEY1Zj6PxvG2qC_toipYqVn9VgIE7jyAz-GsgyPWTO8zQTUTk2GTWzAFv54c1f8RMFy67BaZJItqp_mMjjGQCrYZ-ALQ7CEDRwxF8Knyur-6jEU5HMdWlCTPAsP_zXhjIhhlReJ9GF9xtFZVauXeeUYcTDO9EBsHfPhdJBhfyRrYy2TDm-Pni9wxhERqcro94pQPQzWw2rDY5HZdtyvgjTCSQ7PLwFzZ3SAGCLQ_H-zP5dNvkYq_Cwei6t1kREMA5xT6tOxfc_sT4hahk7jQGNukSuYAu6Pj_nkQ3mw4_zVQmBYrNsy1c1n2HU7qTnMbTsFh48f3IS-TikoY9f0e12V88EmQ6Jlam9M0XobgunFX1HDJwKDq_sHdJm9F_MJQMRPyQIjL9cX5yNNQS77EpvJphOY0UI2F2Kqdl8imzLXBol0E6MS7JWALn7DEOYjrkk1Ps6EYEi_pWMgFepGEOvMXR_35w-xQv18J-d4ZN8ULJ02exbMHlg3TxNczvMivd2IuWKj1skJJgQTWEWROEfKi5h8kwJ3ZD330-g9h8Mc3bXqdSP7xhE9YfukET6U89AEkqKfBJVIDA8TfRxFVYW0"} ================================================ FILE: genesis_data/genesis_txs/IwSIt1P5I_mM-gAeAvXiyxRVb73hqkQAMfxLIHbbZYk.json ================================================ {"id":"IwSIt1P5I_mM-gAeAvXiyxRVb73hqkQAMfxLIHbbZYk","last_tx":"","owner":"zhyxp7jWVQIbTj_ei6naaXs-jufb5m2L61Xkor3g9RlTBWih0hTtTDHAfvjyQKviKh4SPWKpZ6jI7L2igW7GCShEu50vi8uTbI962tdJNBr5mjrhtkl--wcpGADvd7USIOfb1j6LOrxswv4LD85bEzezkJZ1asvy2n5QBypDUaLOspc8nmXqG9MniXSqIJ8HI0kFmlj1bVlB2X1f_A3TJIhqUJbQjICM2S9fxviIsleZuFMmQ0ZjFUH_EPtd5qFx012zP1xjkpc-8SZRPQOctFecD02Wgn_33omIzajdUa0vTYxYUSvjUrWWUM_kvwPph5KIbj9iZgHdrvH0qDAfAhoxRd1EBc2I5zJNdyJlb_sVV84PKq9LPBb9nGnlx4YTgqMZvU7EMDeSPSOcvuCDEj8QATnW8VQf65vhngZtdfRIZHiXk9e5cu3wcxNdbgm15Mij4pMdD9Sbv1g3BK5iIxFLRanzrzdTrtlkOLX2kTihuqrykFK3NtLPSsfmrIYE-WrTD7aFj4NIAVaUIPYzxIW1b6DMlfbbX12jLRoqMJ7syukRaUeT0n8OMrtlqeGX1C1rGjO5GDQ9sIFTsON4AA9J09-sovUvKhUsxQjzJF2yAlpgVxwSeJvrcYxrDqK-zE9d9y-Grptqc7YBq2MVtwQwRF3Gx0wRDQ3SwmIIEcs","tags":[],"target":"","quantity":"0","data":"SSBhbSBmcm9tIHRoZSBwYXN0IQ","reward":"0","signature":"w2NqG58648mw3AJsarA7-rxw9KUsbZX7ttzHIf_cYFOURvuft1I3z4Mfq4rRvsO9iiNbPnMsRW64MH5-a8snIKD-aamIK4M8gPtAzGEDPPjuBl9xF5LkMK4q7dfBKAy69nInqVSAEO4gTYdNoflU8mmtCfz5fDlPy5b-VNuHdWTUFgf0HrWrOquREM7BHrCmwL5LYI5zSo3gXbwK7DU-KxHltnsPKtXcHgfgRJ2399yGgyBrKG5McmeeUAY_Xl5ATHi1ahtbEDigat0a-rpy-EPVzCg_lFUHO_akm7LaXIUb1Hld8nHOHFuulHIKwJ3zT93vfpJt39eNZrKBnSnYuJjRulTAb4Rl0xk72OG7LRvaMCKF9umMRrAKcpthzRP63xfuK-7SfVX7FueoaL55ODvlQvPsiMrt-T1lIDXYrHqvnApKl4gGW3UswfIp0_GcocSk-hgC8aONAOU0g0IP9MjWfHoPHPdBq5ytxL4ihMRjrWZlqCMikziH-0VtRE1f0kNsUDZBOOqSHZ7Peadk1A6PnoBr6HbxlyB2hX7diL5gEsl_64Jdj6JjA0Y8va953IyFeiNbsoKrK2o-TZXNKcvzFD9832mVNm_L9iRaD7cKvfZD6Iz10OIrlLXIZl-OKx_EvAi7x6sKFctCv-34icwiVk-KozH71gNa0Dhzm2c"} ================================================ FILE: genesis_data/genesis_txs/Juzb8MlmGd2qomIUwgfGzIFO7c7ZcY87kJPmqpSkt18.json ================================================ {"id":"Juzb8MlmGd2qomIUwgfGzIFO7c7ZcY87kJPmqpSkt18","last_tx":"","owner":"xZqJniAJUaMyq8cCQUG8PnwCjHlNRhiti9Br35s69YGpwkBIGHhLiPhJERLm4fVL9jkzFVTyWOd2hkHMTJQRC8kKlWLOFVogPfa26jc_Ys5OycycSNMP53EI9JmoiPaUGCBbH0VOrznhuDAR_HlLvoy6wUMqEW4EuDEZpW4Vm7I_Fr4Ck6dmeafScgVi9-uRK-aYpdF_lz4F9VDKTQczGTDmzddxRIZ4F2wfCcGSQefvahQAmtbyzMLqB4vr4XtNPM_BXDd4YW0pcsoDE_v-KfRuwrnhTQ30EI4BV3pPvSQnKNx7of9h9YWpAF-umAExqeWr7pmBrx4OsBgVawwBJUqtvR8MihSFx6flTsvk739wDI1ufVGA_ltK0NBtwyv0VHQiNeDx3YgTHlQZP20j8WdO3hF06NIRr-q7msmKTzeKotg6YCOd-AwGM-yMWIQ0AY4mUbuxyTaUpLxxRQo-mfWqqgieXxQvIv00mY_Lw4pRAiLD_lawdoxg2comMpp4zFKCLY7dEPN2b7grmUpBT51cHOS9lT4hxGh4dAbkm1VRORNA4KSm2LSLJFMP92j6tFZ5LE7mFFfQAqId2dWW-EAWJw8uAZR3Y49hOcU8EcoNoV_CcPYtgCfQ7Tqvrcrnv9HK-W7hRgZmiprfP5qiWQ7CXK6Lc5WnNXO0n2WqdOU","tags":[],"target":"","quantity":"0","data":"bG9mZXJiYXJ0Mg","reward":"0","signature":"n1pYo3UsA9zf3FSZfuIt2BWG4dn_UseN0O4gYVezs2Z8iFj5hWI160sBS4feAknqrlpeW_E_-3d2_JfTN4o4hE8J4juTzsZ-CUhVC2L5mQtfG69gfoiax2LFcNcb2OBBfSNFM3Az4z2EnrjwKhM3L8DR_eou-Rvs0DXiDLDIF1gOnURfj4Nrb9sKgNmFprlSgsuafJIsfgQDyqIgl3rHlKnFBtbh2vUeU3hjUvkCE3iDPwgEr7-ebY8vHzQDjbuA7xI34L-WNS29L0kg8Zav7oPvhM9thcPPRF3ail-WSl5W5CaZNS1f3RgIG9Yv70jYFPFsvIQNjRcrTmhbOGdD2cQaT-tSBahpVxapp1W78fYiQxDLiau_oqIPPDHrRLQ6qC_OcdDR2ZCjKUzVbDEigxDf3ePhKNfp81DTqDKgRKzDGFhQkX-JFoJPbcOcDB45MgMQgyU7zxuP2TXeRpnCrOusuJLZqKKyOrjSPi0Ej38trZtbuIg_IMxrC9r0qJ6t0Azd-dLHN302Te4ab21IYtN3MC_9dFCxWpFYVpzInFVP8HLQ6Km527TzM9eNc3GZLFPI31usTF_oLLBzQpTMeCTrt6K-PZwBnKcT8Fblcjsu5gfr7Vk38F-Pp6lbx_4ef0IfAYPNcCp4TxFfmRgFrN7eKTG0pC3tIbvKMYrzjVs"} ================================================ FILE: genesis_data/genesis_txs/K47jh6Jr6TmZeZ_TadmyLLy1V6ZvLNpvV5FWcICohnk.json ================================================ {"id":"K47jh6Jr6TmZeZ_TadmyLLy1V6ZvLNpvV5FWcICohnk","last_tx":"","owner":"ykieywknqm3oSzlRMiQemPGhkfZNpILOcmWVVgVcDotvUHg0aSHGoH9uwG5ia4ChyilJqA9uHR5h4LWTy6lcFU5KMkXT1hC-QpVlVt-0vOY0n6A6FstIl7YiC8C6K-uZdI2IGWqOBy2WfojdjoEDrO6ps--LRrj1N2rKDr0uDBRIIRtxGwnE7bO2zVfUGXBpgn_3vUh-SnOCTzItNcFl6t48xw2h6jpO73JuW5Ct6ku1U40Pd1Oq8Tf8-mE8MYcXeQT1s3kUwDQw_e0TlslWbeMRwy0iDWytX2XLnQKOfp9jBl-XXBuQoR6rSp-cCYL8PJnb9O6nbtrATmPE9lumbwnAdlsyEr6GZbn4n5ZSvNq12-M5nhl8Ri7eMAyiI9yl155_9O5XI7b5r01T8ud3hBkHIoE1EzIrSNX242wGvobyuVT8A8DnvvtgRlakH6mK3dIIHZq66Y_JdzXvu-AByHh8mu_DuqJ9GktY-150O-LQq7yi4I0JWY_XFUCVU0M1xsBlyAA0C-9pSWuVTT7qg2j6JXMZZjIm35tdHkKG-55k5Xl_K9KW8V7FgrQs8DLT3z8kp1QFfFkns3RdeW0Ax7x5J5u1CqC1hWoVKoMNjbQtO2mOk31XF8T_b9G3F_Gj-ZrzNJsPjGInd1JhyRy47jvf2mX_ztnBxUEvZt1812s","tags":[],"target":"","quantity":"0","data":"SGkgZ3V5cw","reward":"0","signature":"GcaPVr9D3YvPe1jdMSZGPrAx_VUPEOc0fAbbrmLvizLJxygkHZU8wLgJkO86GfEUHXLtZOJKwNWeqOgNe50pT-zYgoKd-F4BPdS0i14Qk4SEeE4dy7lnVIzwoFbkYR2ct0RVRVYdXVGIwB-9yV-hrI24WMOiezNffqfJzu8BcoZgsggI0xGJehpI_IzoDaer6AL0ybmNI77FFWEYxovD31Lv8hutoBBfNlsiq_k2xFBHPm0bNI5yh4ltrDW69MrecV7hL_eODMg1UQ5BAiSxjGEBY_qmi3yV5Jh9UGMeLHrTt5kspF8kllzf9zZQ_fUMAp7Qwd7GRT-Gy9IfLxE2gP1HF4bScRuyCddjYC006woNjoMGBhYe53oXACOnAU84vqF-cwOnnMzN1B2gkdXgwvF3R5uExIWj-si7a5W7nehSEnMDBSaXx0wvPvF5caCesR2Y8ts9913MjIr-znSeADKkxTVytF5bacalEwX7BwNLSLX40PK15MSISdmOCIKIpK59gf4qxJR1tG9YAaW07KWkeI6k4NjmsP_usmhE3IyvrboLmLg5iRX52UXrpxaIyRDR0O92TSGsbTLhfAdt_FC0lat_q7t41Lxs8IqmiA04W6jNpIIb9GemYBX-nXLdYNxKGP1T3V5I_8MCRXxmtMdv6xmM7J9euKX6XPP8Mag"} ================================================ FILE: genesis_data/genesis_txs/KOm2FJzmNXa_yjYC-58DkysCdk7FRFMcRmBx3DF6S9A.json ================================================ {"id":"KOm2FJzmNXa_yjYC-58DkysCdk7FRFMcRmBx3DF6S9A","last_tx":"","owner":"sBnBs07G3NcSjtn8Pya3KoyjxuhE_4hgOiiARRN7Ez9JK4lmeeebQ_gsbhAtOCIn9IBhSEjEGQ7Bi0ebYfX7qYOx_RZlNi9bpBdtDe58mCZ4LGYF4nVVGff0DvRo94gJorFgKrmXkJZRWTB2ZcmFkkUqq6wmJNEHdDb3b60JSIEOqlVHR8bgHKAIey9QI4YDFR5ZdwbCDlJZCggNN2bg-0NhEIQT3_GS5z4Q2J99eKT8ajjNXvg4Z-Rm9U8w5vsJrJ9aPYJQLXPbGO00YYa00u09aZZpaTMeuA-c5nv5z8dZMABkcusl9xks13v5YGPGUEyXmnDvdEMkcJyjLueqhfO2wtF1hgEHdJoOqp5ilbtu45nazYyWOyE1FSlmZi0MeyLgQ9Vj_-E9aKeYy5FNG3RqaJRvLoDWLb8ajar21ayQGtuF7dVy66cVXfgRzO5J4tt7DHbHfiz_FNNPb9SGAuIDuhZlQiHxN6tJuJ_uNCprEIP_OekBEZFfBIpnhyOuKxWdalhsnKsHXC3kv42acxPWJ971yyZ7Fb2SDTZkEUnOkkPPKihaqV7PngMqWyuUgPJ9TdOQKFwteEVO4HYycc67ljHjcVD0ATwACZAuZVpzt0fVnf6U_o3d1xbKPom1346i_L0eMLQ98xdJ5TXHZyXP7ncAUyag8oizkr_ISi0","tags":[],"target":"","quantity":"0","data":"MQ","reward":"0","signature":"hndrC6YZcjS0eGvKEtrCrggnIZRbA-fbNon1wQXpYAoNFbWz700AT0LU8MUimZEXa5u0Hs01QRkfWhknhFvbFMhtsOVLbEzrZ3URynS7XXLHbNRMlZ7O_Q5lxauoa4W35MB9hin4QyPKqxpxgOAIPVCekm4fNyJqS_AiVy3n4qzPLTasPdjAkF0S7Jw2NLxe9XVKEZxZF_65QqVfl6SVQQOT2DqLEX6l-lh1GMmR3K2_z9PtlnAFXIgLunv1UPX8xs4y9kPLVi59jQSMJklgO4hnFKOBKBPKuq7_X69cJJiDyFkw27ZmWG3VX5-I9CvL0j4NWNnxIM8ospPx5A5S_m2PXSbfVogBlkqnBTMUfbSo769iKkcVRb6gSnGRPF207QCNEvVZZHJTsSSc3aZZQWdMw_PyQwEd3Lu7MuKX2hWPLbo20F0tMXGGkJhlPmVbkFxaprgiUC97aisLscbW5zNSr1cbjR4RzzOWnIfek1mIp7YoaNHMusnaUnGt4M0iRQnFDYeEy_syCl_Fl_eaIqDSWpdZsL9aM6MA682P-lLDnDZTY4D849k6C21On6Jj9hMwqZz9QPxM-LgUVZmgC2yDD41tevua80l2CGqodT-J-Q-rzc87c5dsfT1jqsRxss13nSSTF3ZBcn3R-IkLiEM5Zp5AqM0msCWTSMg4SRE"} ================================================ FILE: genesis_data/genesis_txs/KPNGfBMOznCXZwOVvCXHRR6sVJx1akVkmXTV98lCMKY.json ================================================ {"id":"KPNGfBMOznCXZwOVvCXHRR6sVJx1akVkmXTV98lCMKY","last_tx":"","owner":"5o-sItqgu3e_MsX69oGY0E5x7FDum0-T_p3QKPj16alXiLzLfR5j2BASHKvFyxqmLQXXnGyzmaUau6OB2RRi7vw13LbLNwafvyPyHh8toQDnIb1GS7xtEq4CD9pcPm_WrmIapit7sQEhthtX_-PdMKr_FnhX7L8-p46VibGZI_VBYcgffLFreCvkAQsN4qbPbFayla_VkiCnUdqNTqGwXIP8RqGkWFcdB4g7HtWRI7Ni3Zipy3UG6liEEDO8m1FW7z5R4pLRgdzl_dG-KkSotp8aX4porlEMAA8sFQXVL0Y0SMVL5yfm_K7wQDvqsVxeoyIYhupeRIFRF6oFcYAxjhhFlAoo9E-XqLL5uLkuwn1_SwDGPupxCzAEzi-5iEOArRBiPFJRM6e1vcCHvcoHa-TTqDGosWihjBU-LX6HlMgS1wVhJhbv_bq6fsl7PIjrUGh3DWLdk5jzZS50e9vhjZlD77mregUZw1iN2xyAcA2G_UqXzyuO5VXw95k7ot0DiRkDtVz22nKIfUId3ODyS3b0WI0P9j03uPy_MOZbpePMLIRHcyZQSrgD2O86X5sUtbEwhe3Qw0ByL4bb1Ro_Bph6fM5IBJx7HXgVJGFaGAL3VSQpoDDCF_cUVeThDEu-5ON9JeWqzqIzIdLU1_R-1f2KAgp3Dr6MBG3zi3Fus_s","tags":[],"target":"","quantity":"0","data":"QW4gdW5jZW5zb3JlZCB3ZWIgaXMgaW50ZWdyYWwgdG8gb3VyIHBvbGl0aWNhbCBzdXJ2aXZhbC4","reward":"0","signature":"rH81UBykJGwi5Py3K8mAl8y_IahO7YA8JxY5H5Cmf1y0QhUnVO1JEn5Ct3Sb0GoDANj6KMJNRMFOsEx8IkxDXOnIuIaijIj3g_giuIvi2nvtUnf7EOoXQk6m8dSZHs_l_gGEABcP_tcevd3lAzxSqEFUDms2qA_7-SkG6h7JkAcztgHAI5lR_Gb_FQdRvJcyzKelD_Rou3LwUzqO7Iei2rDlDw6qZL-_h3qf4vWn0AVj2QzY8SILfMisahNsPyUScilG7e5c6pPZ-F8rYN5trUJqIBCCb7r4FC0cEoI0Q2okJQoDovsRMWwmn4krbqzrHwmCEzrYZZ4kDT3XXbQmUMcKKhzUQR1AneeIQV6AWHY53H1B6pmj92ncQeukcx_xFid6mHkwtdsmLlDi41lf8PqukhaiPAQwq5-elvhDHIKrewZEDpd_LwqicHBVnPvqXyJUWVuaKaSmJRddF6zT_VhQ0nO_H7RRK1w8xpDGZU6gGnEeZvTMrRrHavFxCSle4L_ag52uMNzE_TQdJOZQ9S6yDUTePu2xZOa07tIRhVRZz4vVrduNyENvZoK0ye3-cIoVhG0saHtarIQMkKHUz9qHU95SazA0cqwg-p-_82CLi3ypXieitdVt__22qpbDv2uKs8q43EUIxW7NZFKGn2zQX0XLyJqH618LE3jxAlw"} ================================================ FILE: genesis_data/genesis_txs/K_ae8Bfvql0dGhIfRH-R7W-zWoeB95kYGJNi3HjFyrs.json ================================================ {"id":"K_ae8Bfvql0dGhIfRH-R7W-zWoeB95kYGJNi3HjFyrs","last_tx":"","owner":"0EFYUx-H44dlQ6-7nvldAjluvJ5Lw2nM3RcAUwHsky9Vv_TL_h5LUDX8-GvovCxM38XV1cbW1JgQlrvnixJXe79vVkpCbaQnLhryWZPFVhEsWGtVHE5sMVqBEEUDLpk80iGgS4Fyp8ZDkf4GPfDPthr-sGjeCgKgHEZzsE0SLhyAP6Fie276_vQE26OU7pImotrAFnzDCu8LOq-kgpCc0GOpIYvz_-Vl3Ssl3s4Hw7QvEhE-2B5Tn-Og1gVCEM0ooQscFEtwAN4pYoRtCeAMVHVGQhuR-Cr9X0y1MTE0wZL5OM9aiHdcLl300Tkn8kVZk_io3SvIXDTYcf4p1aeiSaxVcAGUw22ZmPkyGbzx681r6jceELre64PSdZryArIaSYs9VuVhc-niKuerAriCey5BPLkEvrjonUd9Ii9SvzJx-0m7zwNRaN4YVhyLvtlAvLUaqI5-dKEEewcZLd6gmKqJNhXbc5922mng4QtNA3abhbHCevTRn8qb4bT72stMQ7pik77EIXhCYXWtQN8UszsvfzDkzrrMjoIYsJyZEUE4vdRqwBlVKly2TUVdN-ALQpBXAKy1cS2bq7Y--e7sfpuLt-63SXb3tgYK3QD6-2LdqAtF-uAklLu1U5UgPJxS1pw51rwXesDGQcpyC7FsfLfnGXgw7QEte5zFklhFsvs","tags":[],"target":"","quantity":"0","data":"I2lncm9raW52ZXN0b3I","reward":"0","signature":"M7M33UfKKb4d9hUwpN6-tDFJ8JAf9OeIvavZ6KqoPX1e1QClCnM8d8hq3-JHhcMiYy-M-gOH_mFAfPJw7g5_nqBN5DhuDVbPUUXtAJFQyhuG062pvyRpQQZUA8F_IjSWhoJmS2GSaBzxhIcqBHmMavnohagXvFu_1ppLrATV0wSLxlyC2TdWvXLcAAQANOLVMqyIo51Tvf2fFUAiqgJ1xToTTirLL41x1F2fJo2kPfqsT2V4Jdej02FGDYTx-xTAODyYCNxZk090kc9kgWOoLrTF19XDnYj5yN7r5HAU3KanqX4j8A4g24sA2LtGYoUquA54Ob96DE0Gm_rxzNsxNQsmThzekhuhIA3q5lMYBW3B-gwxKMkt8xJEiVw2tGdl02buMHhnRUoG4yQfaBaD35o8LwzO-XQtQmVr-gjKYx81UBDHMiQ8ewhyCsy2GVZ2AUxK4KyZCYyAhPnuDceNhWCucE1e2JoEdvsfClXzsWv6KfTdkPwWoPJXIC0tXqPbudtGJrY-07X3BHriOwLjVl7ZHT3TmbSPSeZl0PB2vsWIj1g7nH8EQ7YAqJiopEiMK5PAYpbeHZrCfjQM1BPLyWm0vScmp_Y-hYPLm-Zt1-7cl-7RT71wXYYDcPfrbwuZBYq4U7hmWTkYzkie49IvX1hCGBJ3aRYKJVh5rxS_ACk"} ================================================ FILE: genesis_data/genesis_txs/Kgr-XWwHYos5Y95ZJ9mAUwjYjj_rP0I-GnWctQDNlp8.json ================================================ {"id":"Kgr-XWwHYos5Y95ZJ9mAUwjYjj_rP0I-GnWctQDNlp8","last_tx":"","owner":"8PhJjM5MU6_jQSRoVbyXjLN-7Zjsk7vMcvpyP9VvpAPqfceMC67iJmC98syXB1qD3OXco-yzhPMXvkVHWchxIrYSrdEzBj8kX1SXIzI_BKdSzzkeh9RNwm7DFnVYJbLB8xPPXVd4NGDTlUxKf6KAADoADE3nzVOGMDDTVvf8D1C20BEGDDznWvMGvOhZbk_fdwamvurviKHw2ywuxVjnBszSfQAJgkqbNFasxjpfHuvQpUKz9ctZhDJbuGI4Jmkjhb0OuWRLYrLJK12_SyN6OysHCeSH3SZ_dIN-UJvZfTYt8WaEOApSBNclZaNLNhISng3ZADMJGuWS08OXJAkrbo9G5EFuIEqRWwltPEJ2UU6N20AlckQfyqxehhuN-F8tRCvkXIyyv1mrvF_VvI9xGWFdyCBtxQMeUZjcJu8t6BT4X3z6R-dqc8R4-4fWlb-NWy5n6YNWF4c0rBMo_ol22FacvrCiI8pzNvU4ONwGGxhUjzv7Ra2NPHI_YcikDapp7bYCdpRDoBSgHaXZ9_WOat3S0u1Iy5KoMMKGZThmZpM-p2nk5P45MZzvS5eif7CKlKUIoQWa_BAMJV7NuRGupBHFEVaea4ajIzJgKsUp0tw8n7bHC8frvYj5viEvCf6yzQopyKV4GtDLiiJZfGrwRNwbhZWUYETY6kq8vsQf2aU","tags":[],"target":"","quantity":"0","data":"TXVtIC8gUGhpbGlwcGEgeHh4","reward":"0","signature":"Pt3KTXR-Ook3hZ00ZRy_-oONek7A1QnhOzWUDd4LdaHW5Yv7XZfZEbcBMdRDjQSk28qomIiDYbc1gN4ber_ToUGagR2aRagdAxXV7cGDJMt9PhZTNaKBHXTeksZgqnOdViB6caJwi0-6faiKsPzR0C_umspt3nKj8O3lbQoYrZ2fNFIN2KosJaySVourjKhvVjG3F-A6NDPjqGJcMT15qhTa7iJNFKj08MSbs22rqFwemp8gPsyYXdEB05aZcSfr5smLvgVehCILW9-kklgcB2tqPKvkyvlZ7tl1bi10W_z6c32e0S49WaXck4-C1OMIakdCegZjub5hHPenlJ8dF5dX1KCfxnhsEeqTbaYpbGST8sDS6CdPibroT2rGdjE4CXNU0eKZCeiSF493N9Pxp23_8UVBwdKFC62Yj6ZiaWTrY392L-49BkqTTdlDMqUxCTomYBLVviYtd1BlEHyY0j9gg9gjbwusFXTi806RSv4w0Op4G1Fz9kGoalY5AFrpM-_hfPtKGHTcBGlKaCtmyNUF-K6aUjcp4QTSnU_ixfdywlQS_Hkfdk7nc54_cq1Un_elsrFliuN8sDy5baM2BWR3E6CoJq_NQw6xSvIphehCrNQ3PxYDHRKNF-Ny_O8_VAQG-fNWaeJG5kNN2ELcAJF0mtBUu6AqASZihnleKvc"} ================================================ FILE: genesis_data/genesis_txs/KhQeu3CG_X1zoHbyy99GUlC9gVFFexf6vVPOlLgCj9I.json ================================================ {"id":"KhQeu3CG_X1zoHbyy99GUlC9gVFFexf6vVPOlLgCj9I","last_tx":"","owner":"v2zCS7BgfXnwLayQt11YEEmT74laCB0pGQmn1ltHvR8jQ0O7kKq3tTrEJ2HCgGMY8LdJyz1u3RJT9BULcJlEoSqxeJV7M8xWdM0mZG3O3s2OVEQgCI-BD-7ypwea2Z2p187RkKk6MKyVAdC-HM_8ybapQgjNz98lnfggduYryaQoRWRPmL306dGmZnJ7fuNwJup9yjiPzUyMbsz6d0A_PTqoaC8WnCIHcv6GTRNbzmmFogpRsysa-DaMmUdOGJs90-m2be1Uwe1L4qL5Vj4ePbiFifpPE34E3Xs9NaRRP0vTKx6B6zxUCfoClu-bEI3We8-gLJ0sykvm3TwrXy_vkc5zRMA04k6FBMa45xd3gosnYwA0VErHbaDP_vuQth7wflqyLrFuAx2iu-cYGKx5JtxbZeTBgRvaEon0SbCf3XOzaPEY2Gh3XgM_C9NCqZuJxVIJLl9BiQyCkj_JF4TlC7WVTk9R8cJMlqE63tKoY8uBKy8JRaKWORMwOf3EVxAE_xmkJowdGmkIiWoFZlk66DLYtBxp7YUHfWH9DTA7DkCMiAGV1Fr3W-t7ErIhXLYKH9XDczyrZz-uBc_hty3lr-erX-BbpdSfp5n_DJc9tCDUGhsFmRjWJaMeVi24AGaWA2vssx162vxeqzF-mWIAlCGwVmzkQu7sTgRk1mY6ZgM","tags":[],"target":"","quantity":"0","data":"TmloaWwgc2luZSBEZW8h","reward":"0","signature":"KFvUQCUGJZ3lqHfH2hQDivkBicc3mthAHXH2-BR8xmhQcl-743WHhIT_H1Cl9yoBeF92yTkc0zNABOPtDUI4h556vm8ZibnVbVSm-YMYW3hm7iWXC7gNqFBQjrFdsYu6AK3wGNCBiKcnmMYIB7HLS35lJbVTyBcoaWwSEtlrxcsP3df0kdPkoW8wAkoPMquJ9DEKPaWrbCsLiVb-Ea58bY45RSsBwN74zD-iyQblsciBaZVyMmR3hDCkVqxi0d7UdboIdDftVKYnTMY4OvxRmXboQyDs16kEPIlVzQ-bA3tD_IiMTt75jUM1fA4LZ3oHnrbSj9CO2aC84ocxNlGBKkQladKfCMWboZljZi38oDxYB3NnVuS0neYSW71qYBwgNrFBd3mKO6KeCkyyt3UKvpQIUE4FSSXlhLNDRZ6FlcRwIsSfFN_UZSUzy3tC7B5wUyrSgmXGBOCORaxE-yZJlVd3DCwzE81jn6hOwwEtnZyMm2W1GVn431rhVtGwo1d6g0Pa-38dzbQ2-GpD1qFaHNpKiAdXUjmmoDgKUShQ4z4SDCx3WNxAjpuok61Fq6rp2ywb6IFZo9aDOt-M8WfRQDWSaDDnR5i8tH3m6q5sEHqJE2TFPHQ8Cwu-PNmJj-OjJuZvKNgWBEOMMxZ83vVGxpN7XKaP2XolqRKETWeD8Vw"} ================================================ FILE: genesis_data/genesis_txs/Kl1zrMIDIC9yW8yLMnSKQYDoV0PY41ymzJQw91qaZvY.json ================================================ {"id":"Kl1zrMIDIC9yW8yLMnSKQYDoV0PY41ymzJQw91qaZvY","last_tx":"","owner":"q8Tcnjk08iG9HNQg4oeOdrgpINCoiSAXnIa42j7W0q_p0IDG72RGL5mqkRqzvVf3HZ1y3NhdSDOx18LeCpzZ6fleBgNq_D7L3_WIRddkM--2jE5qwm5JW9zZjaJiAy4Kf_hjylW8VZ5FM48xShduCW4Iqi-VP-eAw3VfTsRBPE7HA2eY_fBbejZNsYh5OqqQxYDmsVNJoUJNs0xAaCSubzJgXksSZwS2OH3ju59DFpK5IUZzpvraXcL32TIxFxad0Ovx8xVcGxuli8P7JQ4Q6EFPhjVWqaxkknPqjxuzmItwj1BTuaXCHhsfR1bDzHqAA8ZpwlEIyFq7aPi58Nn78GwnwHsYJmbZvqbC2q12SA0IXNgO7wFgZ-2tFsJvWRANeQQ5DSfP02IEy0nez47e7YdqLJtFPd7y_oz7Vnhnt6y9-KscGnzbLpTRqvdNtgvHZDiMnaLpEJix99NQKjE_6xrqtRDXDPEz_mBeF00YK90CZmGEEuwlT_vfA9hGpRyYauhaHqgViJU6nvc-2GLB37fF53JB84c4qg-BCA-d7oru3r2S8MiwREll6fOD9LDD38Su9X-gGr9wTg5suAoycuzR2iW2bnNr5QLQzv1rCNRIwP5hc_T5g5Jwwa5TWPV0OKKcdOx43V5GuqleLAZni-il_p-Y6TuW2Oe02KlxeAU","tags":[],"target":"","quantity":"0","data":"dHJlbmNoYW50ZW50","reward":"0","signature":"iuDOFUv3VQtiQjTymOWvXRcPGmU_FNUi1K15kioen902yKbaJmKgwX5X3eybIUGAyFwdsXmN7CJE5jh0zmxEvguhJYWIah147amdyA6ta9x2Welyf9kCtewYD7RzisBZNo2WXi2xuc9f0XsUXKXbqmTw6DzziBCXzs9ZWj3NeidSBv88Tqd3xLz7rLJZv4sklSUJAzcBcomPakxJc9HDTve34YThI3j0bO_-JAWqkitLRw8nPZXdiMzlQmN4JvrYrQo7dcp7X5qTvuLWvqRBZfwT-zpYXQaFQubr5zQ1OLi1hPv5Hga0H7RQFQ--aH-ikL0sXKxNBJND2xHiZft5tSrSxoMBshH-C2V5NJxYxGi1Zna94ryXznhmfOxjZ7CEzc5FBpkHXwLFo89zM-YDBtJdMwCPyXo3qoqNrmfeVIClbu8KiODI6wWXJCUdwre25ZNHbkiZaB1rK_6UyqrJp7rsAuE3BNrBfLtr9bYZLK5UmmpLGw-B0H5CR2qKoC186SBQV4JjoebMOLm3W6xweUZf6B1jRfqPu9nuyw1YxUD8jw6AEwlnowiObpWHaayxvOzm8q4Fgldg81R-5zlfYnrwzWp4OutOQqTdVIfJQ1LK7d00o4ptQRPkcB0IO97XdFtiH9Mw9oUKvo7dTDEaQO6j4wDsr5P9VoVY5RPJHO8"} ================================================ FILE: genesis_data/genesis_txs/L8tkBBP7fyYfK4txqP-fGk_ODOU4UfIgFV79O-qd5vY.json ================================================ {"id":"L8tkBBP7fyYfK4txqP-fGk_ODOU4UfIgFV79O-qd5vY","last_tx":"","owner":"smJNtkDevoa5KLLtf22cCzuICFDaP9K6TgnUieK-OlYRuY_1o15VRg330XfHkZ98L04sJgaUIbOQaDSuDjFhaYtfss-pdxK8Y8kmdce0qLmZOU7oOZVnUJs6ArAL88MIxclzo-A9DNY5zxunzXhOiAdJ-2tjFel1AtHGHl8_CzNuHl_ooH1A1Qovx0-8o6FMh5fwh3UAuVNu63odQEPQHfLNqX8DCGCjlKfW5f-4pCdHFeRsqpZodW6EpknOCvfrBqpZiKVQ_pZduSFaqaropA18-bJRA9Fx1zQppJwCWzW6h5zyoXgTIKbhzPlXDdBHQH-JD5kRzjN_I0XA3CRjZR656L065bGkfX6C3GLAX3pUd7HH2jLhH8R4m-rrZg8WGPjv23CvGAq0VMZbckKib4TPnR5v1PPA5Qij9P0dqLxbDoqVdAl3Pn747RP2JyQERW-f6WItVvgzRTTJ03SDMFj1miNg2_ppC9j-CfxiQwhvycvovMFFNlYE39JDUv1x5SXm5STCghDNO0SqfoVYE-hSKB71rvJ1WErg9xQfiTAtYrepf4BnpKRlxh6uajpI26a4K7UykYdAL72M_HWFK2cxbLWzfYr3Vz72FI52kyB0rt-U80NYALnM2HW8F_tKuGrxtN3sXIeY_FKU3wOuoRJ-PxDKyJqqiuRxGvzVj2E","tags":[],"target":"","quantity":"0","data":"amF0aGluIG1lbm9uIGMvbyB1cm9vYiB3YXMgaGVyZQ","reward":"0","signature":"na5plKk6OfotVEFh4Rpx9QTDthRKgYhnXkJAP0EstsLYiCv0DEXshpY0G8nHiuUokqAOoJYC29aIU_3umskV83BJP2wkQ8c4nHMCqnxdtXZ5o0_mgPKZhdlBeMQMgWypqShamsUASscwAKBXbhIJ6LVJg_krnVKtv_17lGOg1yjveYQdaAyJ9Hd_Fd9g5f7sjK3f9ThdrSfA4pI8cNd47twMS5njcQA1PaMaMxTfZmmqlHm9a-HU3tt40ibq8uA5hwJitQxWoFOAEh1RQtJbS0ob_vcuYZF_O5jbEfmIlTe6werVwhgqbbD6jaBclVvEjOEzTbESp4Tc-GbvOGmIrpuX19-MjpVyAQ3nFHbx6N1MMOAGjCbL-oGhLNl-GAJf52bFQfTSW7SkTC1vPZbbnIbbnm2T8vq5cRILTHBJs5p_F2gmBbftDtCrEOsruKgEBPuJaS7cCyzc_Zxydwt1RfHMxHXHiJk3bjDl6H6yu9F3k9lghZOJJDl3Era34m8fDHrWjUm9xXZnu01KByEHdwuqAmoQcNgQf4yNHhuzzb0jHGb4vUEnM8zXhizOuSJifgszxyqGN1pmT-hhXgPwcR7WfF0zuTwjYYkiC2B3AzyTwuNiMXwoPHEJhZHOKe5TMbuFPfG8TClWIoj6-5Xql3Vn9HCt6ZNB39dNsy_P-gE"} ================================================ FILE: genesis_data/genesis_txs/L9J9SkTWI_Fx5KhujeWGokIchHTSFlSIC0blr0JIz80.json ================================================ {"id":"L9J9SkTWI_Fx5KhujeWGokIchHTSFlSIC0blr0JIz80","last_tx":"","owner":"srbfy86PAVew0vVzfarcd1JUupj5a6UKVkhF8hQGr0i0x5uOGYjUeW26rTrkVAhq8VbcSt75A0XXNjaBqGx_T959vcw41qEl9YP-pwMA0lO3yrO7AbFrINBhNWsIg0l1AWOGGbNLEUlB8rLqpNO8fqdJ90u5dnZzaS8ZPaE1Y51VCMAV2hOXTsyw_UCoCIuSn8Qzb54E14hgWkgsLmaYoVJEV7yOy7CSDyjavsKJ9Q5veZyiHEKZ2lxmuO2Af7k9n1sqou0FZLFhaBZr2VhjDYxm6qh-bNj-OCtTLA5MQMyuFRHC5AzRmIveR9NVBxH1tXaLC83mwM7Y7V9J8GZ0H2CQAcAwZq21dWn9HHal9dSQUxZ0G49Gfv0STBjdu1sO53_iZDHwdm0N2rHFqKY6UDgXjcAyRqsI8utC9KVBPTmF_GMdAQ9xjy45v4JM4-E8lxkIXk83SK6VIA4254vd_L46dtx_8KC7IwGySnw3mrD72bHhLsOw7TdJNnFhzcOpFJOhDtKAYSOPY4D4droMTMpDchWJvKH29XxKgaF3TkUJpiZYcSJstL0w2AUQzyp_y5RTk_bQjMlJIi4JOPvopkcIQutciNGX5teLbpFWoE9WZZWGOmMGK7F2PAwH_ES-H5_pWZCSRQ5xfk6h20S0fqQETaMbk-fyQFgS04Eci9k","tags":[],"target":"","quantity":"0","data":"U3ByZWFkIGxvdmUu","reward":"0","signature":"Aw24T_Q61S5BMc3kABh6XDtZ5LKEhTXk6ADUxo4TfTrWXQKbKYG-GtdfxDevYp-0N9GXahVheMDHV9mlLTQAjm3lglJ_KazTzk3I3HpCl-hpyGWBipKYEX2Rlj5xJ1Z6vWdXTW_eYxtVBkdTYc9UUgRhIiqtJpek_As6WNZLvP6VqoD1Ee4cx3UmyxxULj-NjWu22x14wlnCn24GZHKt9JWesNaijby2lxRJfjInJoh2wYPHkW77zcyYM_L3Le3bQYYgvQhyGM8rYpfhvbdRqmY9VW_Wufx8aKQYZpSNyrNmni8jIXu6Scb68O5fClEebAhDBKXKQv0cXefvIxTmQ6H0R0X7_LaBj0-d_qdD7D73QbnLr5KCjMB2nrGiW16DOuj1-QQQqkL4gqD1eCUiOUGb25PmDS8SsJu1ccY0JgswrgULOLidVdtUWd0wc2mKDUQfd_7Ab1VF_69l9_Cq5R3kvWBmBb07QZTExiX0p_Yhw5p5g4LxS_6ZHbG6TYrw_-ALk5JHCc7BRb6I3oaVyzLfTjLcYPMmsT0Dk4mDODlze3pL9TC-_3-23gf02sSewy82LHrf2WvKgpk9nhEDrQrFZgx1B0--Kh0xLJSIaG7IP29UwzK3K28hU381EbwHworcAdyvj1BZxHg-6-ULezDtjMwrXYGCF8VKw5BJM9Q"} ================================================ FILE: genesis_data/genesis_txs/LBTipZADoYfO-9UecE07Z83ijiLl0f2wAGXyRFQqKCY.json ================================================ {"id":"LBTipZADoYfO-9UecE07Z83ijiLl0f2wAGXyRFQqKCY","last_tx":"","owner":"v2yWmW0mWwTHLn1TjvAjPcMUodRm6c_xeAWp0GRS6ryHUr6RX3gfgFc8I40OyuuqttC1wVfSpabCtDNOXcOWN9SoDCrBO5_H8sCwFL3x6NsYGE17whjVJ1DosEIloElIkru3nkJy6_6HJPlVXW-KwGjJumG20DrMlPbk-5GHLxi7vX36cn7Bm_o1ctnD-s2d2smaPRxRXbfWRWNNOYy1Ln3CPoXh5GotRpuAdey1_u7yKNDZJUDfkBCTHlIv97iKRksgIIXySeq455pWsq96OzaOUJY7cGUWr6YsnvoWqEQadsRVCImih-RXiWV9MdcT3ImiQ30pE8hT5AhJ-h0a2YEwGOF9olNx-O-Zyp_4grS97Oc_PoGw2P8xbQhYMS3Q4568GhAl13_bXBrfl8ehYmF7RJ0awjka4x6t11JWUVey6Bj6d4MniPZO6sSiee1hSWCbFYCVwHlElN21TWlHf4HyHe7hHaEOETx63d_O_eFClBCH7ZtnZWS9ofZg72PXdxqsQLQDA1zsNXm9mYo2TaiJmuR-vHoWv_nQZD0KCxAzuiY2knAwcG8R-GYRoCCPsX24cktGHNYfD6pnDnMr8Zvc2lui-8OBKd0x1v6dvDc1BQxnJJwa7IBhPKogaLzCIL1CNCjyGjLwJfhlZW-Sa646NdJQxvCp4CuRKq6G2ec","tags":[],"target":"","quantity":"0","data":"I01BR0EhIA","reward":"0","signature":"qoJHahT7t3KtmnBsI2M4fr2OognM4q6aJt05aalGonVGKjhHRSqox5sSLM2YwBJPiWt7dKCtuL4z3fwHIppUWETpvaZv7L1XZS_fBuDgzSygmYIv9ZVUA2wgWN3OXSpsUvp-7W5HRyfnx-icF0tJaz6NAB1M5imTfeIjhrXywjl1z0CVp-_AS0p-qAgEldSb_nhyt1dCTc2hyTyLvihYIHUE1gLr8WoKFknf8DLQ2rIGqpm14HQz14hvltPaNHADs6eDtLHggVfm2QDcSkSGmCXaHXeSaNSUioG015iaNvOxh8eLYQOt7DgphBLe9K75e_6IB1hQJ-AIewXCPwtmOTCUewYpa2w4Cpt21XNHOsiaXwW-0QtC_6NLGf1G-lTOwrX0AFbfkJrZvNlt_qSlRtkYsQMA7w60UByT-i-bVXDVNJX-twgxrEK_z_LFAkXT9v7dYyVgDZ5VS-Uah8as6sIXkOXMIFBNpbb4270flxbF1YTHWzxSqtmDY9YBWB52f6PVffD3ZknpyllVIrY1hpApkgjKlCxPbPkY3kBW33WckgGk2F6HgAR4EI-GNO20wduWFSOgjSs00PqhTKVIzqh48VpKdt53mviZOj7JSQ_UF-8sCnnyVabeHdNG8DJwY7HFog2rkM8gbJ-mpvfGNXSHzXmK3H_gaKVCWhr9SA4"} ================================================ FILE: genesis_data/genesis_txs/LC-_5GDhs09OvN7r8GPmjMa6A9xSeVtsAmDgYCgspvc.json ================================================ {"id":"LC-_5GDhs09OvN7r8GPmjMa6A9xSeVtsAmDgYCgspvc","last_tx":"","owner":"05FIAagGAWAtVnQSs61qNVpTn_ytsElXfwKjt62PODTD-A8S7Jt47o2gys30iee48L8iA_wdwsHPFVup87R9pLXdw6xLoC2oqRcEKG-CHOFXxzFDthuZMz-0qZ11je2z-xiD5tn1-a4EJWSEe9eY3v5ShrDg9ikMLVCHKQHFuhJiv-HhexUAK2Oeb0ROd1TRmXCOb6NLWqKSXAuOJVCtw2l5Rf69P0D3YWbL2KCF572ldF160i2i-6kwYjz0uSrB4FzXGmiExhn7eCNGmn5wdxCYhwgaPZ3JIhsn1ysc5wwAU_ZrxpNKZrRESZfa1lJa3krgZ6ZFXc6DPi7NneBRZGL6hNwX4JDXWOwUBfHDnINRBTplO4AkNKFPhypPd4J3mZiImG455QA_Yi1jrRjmkjPoN_WRJLMgWkLX18_V_dZWKaQFU-gheW4bM4mMcuHqN6QxN-jp2bmpku88oJBuFpaEoXFJfRZEdG5NEVm9aS4wWMMI8fDvGS4l-qXtgWRjGovRX3vJvVnfFu2WgjQ08A5oYW3mJiX_CHqJHCr2P5JjPpuPVCVVAvRNNKmRCYVKRp0Xn7k0DlnjZJE22cUSmDHDB8fNxzWwnIZtNOI_g6vXUZ9Efo97lm9vM2bFZpUDQEgNUJFvQ8BpDDzxFy6K2ldFCckTGwD5tkBeNh7iaGE","tags":[],"target":"","quantity":"0","data":"dGVzdA","reward":"0","signature":"NjjFE-sJfpQxTDx5nS9YGBYlkCybe7Uap7eJipB_tPAPpWUwXoICEDLWCLuGUYXb_L00qGmE_hur9wHo6xLh0u-7gXLDWznjXihZQFFxN8njIWpzcZPNt27oIqUsA7miLf00wMA57-iL8HOi5d4Pn2W10hyTcvInxx45_RLfTTNOiEWcgqFsIzgrDYpmsRVxehi1s-f5i-1UTE-ksvY7kvlioVIJviuJDCnIIqwCzQijVyoKLOstjUE-L2pKDuNAhjLChra4kZqD87-60-OCumgDsZnXOWhZtGnsgZbgJfPUEeMGU7jvaQ2TYPZSaoP9tw1arF4dUZRkWjlwqe5By1AaZlaP9Og4QBGA9cTbvs8dYCkMLjFLSC6zz90kWtN7D_KfT1qCQCpdt0uc-WAwbP8X1cp6oOTB8ThOCslMP-ohdF84kVYtURxDVbY67sHrmlfAD2evR11NdlbsHtBRMx4nGPum8Q3f5utcY5x6INsbCw2YrUR1c-tVBKN9BAskk-lcHZdUD2mHW3Tbylhb2LwEVz8ZiHXEJCd7eamo_doSBGM9yMvfFnYh9W88NSYZMqxyV_YoaDKa6Yi7Med-MVedcKLhO08uAT8pghCYTRJPYiwlRPCcGcMp2gkLGTr_30WpQkACA-Mc9TSZH6frjdlvlabAlTGQBwDmjy-3830"} ================================================ FILE: genesis_data/genesis_txs/LFQ5iV6E5wyBbJmJoFJdH39ZxfW-y7mZFKou2H-ONvg.json ================================================ {"id":"LFQ5iV6E5wyBbJmJoFJdH39ZxfW-y7mZFKou2H-ONvg","last_tx":"","owner":"rWtgN1mZLPU8w3d_UpUwEHjeoCoobohVO3MldwVxRAUV1ZAX2_q7zpewclqskwsKR7puJS7-8gy4vBpXuxGGgzYOUEU1bmbL-yAkDascjeMWKoKUO3sg_yddaFvHQVFpAqgMZdbJ3e0hSYf85kbnv2sryxGIZ_5cgkg9KyJKzWG_ottBexAbeh2zNos8xOAxxkAzTytEnVCkbSLN1HR-5jbi9pISxYUi9EJ0Aq-eoIAGQZ6jZ-otZMnskkdhQ3c5aTW61b5oCDGugVuEX7Yx82cyrBdwwRrfqKT_7vFsRhqsivtSGO1F8o0b7McDgU_8lMjkERI_ZhDT8ev1ZSg4uKvSqMaKqWlK-wnss8TaSeVwWxH1N4pd6bGfv3KSCv4egY1EPOJ14CTJwIqK69gMUZQlH65CZUZsKp0NjVDV9QN9K15kYYg4pWM54sDyl1LZusYX7T24FjGG0EgS2jfhWv5WM3Rlz0SoDwUW6EF3QpFPFtQ-a4ftoFf3BjE2snJsi7-gASOCf6jd3FBC5x4gWbM2WLHX8a-h47WNlxStkwiyA4cLci3SFgMdKj5LjSJMsvxg1EsVAByAzldvfqbsTb7zWdW8ezlBA_VulbKjNlmjoM85wd5odb-tpbtufMnSmzIepodYrSyHu2lc2angXAG0WklQC106UlIQTpE3tLk","tags":[],"target":"","quantity":"0","data":"Q2hpbmEu","reward":"0","signature":"ldgimAec1ZS6wzwT3kay5DtEqTTizk5LNGD7h8vJYIdVIae-Fw2l7ARWOs79RDYXFa6xamf0OVSs1ESKQNidOCVjVB09bANWwWSUHMfBCpVWnBg0MgY07HDWlD6khRhVIU1eXzpIPDExfq7tFe-Vr0kBJKgV2X6jlCwCVYNUwsQmAFZ34jBceENPvehzwAtCG-Xx71uSGZZoGLF38byX5lJYvGkXMP5xG00XVoAIHqj9RLs5mjWQWyNmf56rkNAxibv2Y-h0QbMNvtFBP83gzMehHLv1qpiCrQNi6K37NtAHOTxsalW1pKc5lkg1_vMjeWRRnKRH8HbnBo8pLhdutz1g1qByoKKzx3HaasZN1k8r7vx2IgtqyMEkRXOoWclutgF9KnvSXAhsgNTtefJVAOuklncBeYxfJ6XOgnNy_YdH1DfoJeurY9afDNFKtuDKRomI5vplhsbJzknQ-qcJ4m6lgjdfBVMZGnnOkbI6YxQq9Y07qjEG7uT5Ph6yEpR4zD-LdZ37L6Fdj_zK0lLuTszZn5vqCjRohWrxXN7ezILgRFoTAvxmgO8RcMQq8TFLeCHBvRRKs4atXTm4iL0fWk9aJzdOkax12RHyqj2qDR2v5yulimJ17ME6JMXzOqWFK2SHdJDgxqYHk2L7KbAYguCISkUdsyrPxBHou06aXJc"} ================================================ FILE: genesis_data/genesis_txs/LJ2QSdjHftgyCOSgy9Ub0OkTTN25rxCY7D7mt6u8Uy8.json ================================================ {"id":"LJ2QSdjHftgyCOSgy9Ub0OkTTN25rxCY7D7mt6u8Uy8","last_tx":"","owner":"uCCZUBqTT0cGMkh-JfLJ776kXh2XkPIiS6uYNw7drKEKL93Bmohlo74W3559fJ0MJqKtsbHkLsLpizTy7am5cGPlz4tApfSegHzRCSmUPKtcK-eyNXPRHOx87S2A8cBiRpfsZHS9dwrzQ-Ql6ZFIPaowonxd3QU4_n2xvgewfgEEUSk7wnt3TatY-5kTWwF69Xu5QKp-iC_aqBX-grGkEQS27nlmGLBzMwkoyn-keWyCMYsPRtW4lxP1JiyUZco05gISFluVADSYnPVpKDX0i88bSw5lZr2pNXXYxtuVAsR9K0E4dtyE6Oy54c_skVq5szjMY1J6nJYj1xv2uw0e5Owy6v5VDIN3rzRHB-nhdV3C9Tv1EYSvjS3NSVg_5qoMxrUVAhLIDQeo6zGfCAkdFMreJ96tFiHiiaeNSokKjmZscTkz3jTuIvDVzaLIvpjgBu7lDQXTJ6l8RXreoGhT9F3ZU9goe3Zke3OLXkHF1bTVFdVJnIvGaGx-moLqNyVowypQRcZ8K1LFB6wgApSbasPNPMPPr_0xqnn7kh9dJ6WCspEVrxO0fuu8mY3sZNSpIlVlB30mbZIQk0crarNG1KwoC3VCEIjmha5FPLy7B_sukUVBl4iSIgGmp-67vMRmqVOMr--xH21ybL_Zjuzrkcx1rSJSaFqLuhfHqAd2bT0","tags":[],"target":"","quantity":"0","data":"dGVzdGluZw","reward":"0","signature":"idVMekMXC4_NTWSLc9DOzjIf0EN7cBBsCW5NTNvPBNMFKMsthO7lyiu83DjbXgEpCA6cFjsGZt2m7IS-VEptCGMChgCehCFaHzNvuc4_JFBwBiXiXL9G7sFJiJHHiyGFaB1SKkLnAhtkjIg9BHNwDLnMlH1u7GYSQcs-rVCPcQhiQtDGWVmbrmSxbMWUVWGwuHhre2IzqUzXZyKXPvPdEqfShWY16qJ1lIPCaVkbDNBoRo6WJ9u5OMCY3MlgWjE-nWTJwbtz7pM_wgts2oILd0rQ6Cf8KszlfmtVPj47BzZS4PL4w_c2xAxPwrNe-_1ASJztjc2Wp9fffGfRJqCC7X1bEHz0tiI6NvEW_Sc9xCrl1CJ4CuCh0ZWlre4YgHRcmGApjaQnxODJE9ptct3uagEqCblYlHfop2XDSb3gX95wtdZJMtz3zfgbzRV9MdSjTsvBdkmnk-fYtfzwxZuFv-Rq36cANxac_r4cMAt-oqBmi0jAjMHsolPe7IaHoy_ZCcUSdxReMoidf8PpwPo1ovu4A1JuPbu7MC1AOcQh2Hg-498qXy-Z9FAWAWB0NKKbb4qw9FbsAVXVbqRsBjqnBXABk2IfGgUJsJ3PESraWSXsChDYzLD7tWkC1FlHnEh3zpjf0iEnWa34Rulwh2A6-Es8qCxX976DYkrh_5KMczQ"} ================================================ FILE: genesis_data/genesis_txs/LUdFh6g9auj1LRtk8IUwLoY3e91jIkcSyPKuQQekPY4.json ================================================ {"id":"LUdFh6g9auj1LRtk8IUwLoY3e91jIkcSyPKuQQekPY4","last_tx":"","owner":"9KeRUUXA9j8QU9jwX0yM6LE884cuWElUEOskB0x0Ky9_ZY0TnyNgqNQ_63dkArqdWpfYEWyEGVp7qbLs8nnpgcvv2xAI7Tld9cmvVHgXPagYIGZwaFzzTd8k_jQWWPekjI2oD4kGLPZfDQsF3LsZVufHsOTbKJoGwXYPTP2tQ9npoW99FWLQ2yBo_taNa9QeCcZQEn6b6C_qYIC0lCYHuQQo67vZ5GJxuga1a230pNEXYbVEB7QuXzhzUL3MsR88d_IuK4TW8UmE-a-p2ZEGYBmBK9GTUI0y6nRfLgOLr-VXEGLkOjoATI4Vosmma7KreOzDJh8QTHeCsKO5vRmfE1HT6g48BBxiB_X8PsK94P4-fh6uenZPgVOrRKDNuRZQ5wZSft22cWIBSasXo32SpxDkKf3JKMrs_WRhc1pyTqY9E7Pa0Ffe8wppA4bOVNhL31nZmvag55S33oZvzNqeLkwgP_dKrtQMt2UcoQQPeGxXqN5I6yarZb2lIboJrHNUquVaYAC--29FJ8KXZQOSHLT7_uraBIqH2aQefV_j_IIDDR-Ue3kHGp8W_i_VkHqcvKXInnYuMsu5QkRFWzr57vuNVAfWGhDHt9CxrA3ozOfM6k_RIOjBgauzVwD_rvYIATvGbvW8EM9_srH0kvGAGjva04AEcVCu2rrTtPkmrWU","tags":[],"target":"","quantity":"0","data":"S2FtaWxsYSBlciBzw7hkIQ","reward":"0","signature":"fH_ynse2th8oa1cSIEokYrJrzJFsl6Kt4Slx8auFON1V9MU0OVePp4gXY_MJnAG6CsIl2Y_4_97tmINz5Y-i2SrAdHRuwocNJw0tHz4bzHPN1nyjjlFRKPCbiqaBKESq-VXTcXwkNaLyVFYboe_x49Qq60DkjiiKsME6HAnfB5_6-uaqdkW6_BH0d1AoLCq5fRIIIKEyIRtMIehjFSdccxAjgLVT6sfxv46cZ7fYpUyJ7700ZgOKP4ow4rek_jirMk_5fTgTe4qTqTPtsb5Yq1tXSM5owvxBZ7N2LVyq1iyFdXCvL9mibee4rFMxShqjmamfk0zxyVoh3AzjVpaB6q2khCZAbTQ4Q7ljbxSek5hOkIyZMVrUyi06Uki89SXayAfUcEmZMAp9aammGt2iiok1fqYaLCGmp7qK_KCg_c-Zl4yUoWqzsAPJSVAX6hWVwBm1Iiy7xvyjvEK6hMKgOdyQrQtZLbsEz_WFimCP_CgNeOHVEBkvnMbOYDMeGb43Bjf4bCwB47lSemvXfDmflS6Ze2uLhHi61w0kKvocgv_J42c0Govc3PTN3LFoIvnOV0PZfWuqsFmbK6wnFuZ0w40ND6xyx-1gaWni8jGbeq7l6WDVbbj7mk0n-Pc0KbvJo6V4kKYSPKOom92Ih-eL5tUE0uH67bpOaj9JURximhU"} ================================================ FILE: genesis_data/genesis_txs/LiitFWnODMUA7esa_f49IiMEdN7cTKoKw1cgG2J_eNE.json ================================================ {"id":"LiitFWnODMUA7esa_f49IiMEdN7cTKoKw1cgG2J_eNE","last_tx":"","owner":"qPIhDK5q0LK3HqLOd_alCstBWOXZZwfwKZdP8EgtViQuIO4C3qVwgtLfIvkHNFxXpCRsXu90Wc_OYt7iawc9DnGCsBtEaBGD-EJcFG0LfZwZt3HTLXaGiOzPkr7LFgqrES3ByCyKQ3eNoB7uXyuJSN9pQ_bMLQwjy9W1NhCaMHLdwlr49fMmpPTYdXOS45jdMUHUCfx_Vwxwzz2DYExy3rBENSEISaGz35syrIml1jBjWP9QMCuZpgEUuzzsYS6OgB236_JJLu-gq0m8vn7gL6BH7nXl52Ei_L08m6ApPXsyHyEvOUFO2nH72rwa72ySYBsJpeCsCwiAzfKJ4zGs7NKbpfyRVbGFvh4VqonHFCOWhbqWpNVJSYuU-GlKD7SVjy-GN06m90IbdyhUluO7VXAoIVeZXWekQXiCmaL--0afAZFjsxcDUQZ81xxkeINAROmMUOXS9TYEA1pYISSWWQNTLZWA4E_y6NQy4fF6aruFm9Ukz4qzZ7_Y11jndRH_H0TDxwqRmAfls1Y8lunF8P2dcNWfbSOAHOXCUO3fbR1L0iNR4G0AmhOZAgrnUN9lctGeQK0DDz3oDdrc85qAtJf_9kIVWun8hyL-nb9rcO8XY3a5dJJcLK6oeg3FBHi0YtTmdV2CUByaPO7kQTI6jikwbh3CY_3jksEwg4bn6pU","tags":[],"target":"","quantity":"0","data":"SGVjdG9yIFJlY2lvIE1vbGluYQ","reward":"0","signature":"DR6gAnWAyHBgMofY9VOhH5iT5HvacinZ8sq_yWfLlbZp1iQ7-K_i2SWmHkdGeHGvI21Wn3_NegMNNgG_AA3IlZQ4zap3pHnMtwbfhGDVDnELQCYA_WfQenByQWdUficlR_1Wh0pz0x9lCfrk0pz4ZyL8pLvYzaw4CAlQ2d8r9xDaNo9BAfslnNJlYWqXgHFwSYv-jm3Ez_a4S_A2tzW46mG9h2nlnfE9it66klT7FU-EX1Vzfv5uup4slbxRLJAqqPr_9jrneylrvuvimhIjFanvhlhrVGAGAiVpWlA_iNnbx1WiK_YkcXQTin6Qs6MPRxKqLCj3TuNwd2k920jTW7fb-bJJ97oflYmpvjGNq2mkqcBtSx6ZdhNz_JfHgsbeVhSyH-FUQwg6eaCviudWP-6qUDOIkDCxni13APWkaVxUcZllQivKKVZPWkufDYSMadv_oqx22tCMRBKnCt8iSluPA6vEDvH8PSKly6CTNlB7KhwACFB_XhkLp56zQGFlB0vGddSNXcD0jalWvPgrftv0uZE_W5EVifI53DkA_QGDyVZBEGNZtkS4XnS4NeqrnFUAVPnAX5iPpP9uL_3xkyNbsxRh6H0eorrrflcXSalTIItNgX9yy2e4mqfYld3WkF6qZ1ydN6BL3VMe-nnDtPn2dAQHgMe4wbYUfdNrDXk"} ================================================ FILE: genesis_data/genesis_txs/LixFbPqM1ZZ-5JWo339FMfPCpD_6M85rVK8IVmmt8m8.json ================================================ {"id":"LixFbPqM1ZZ-5JWo339FMfPCpD_6M85rVK8IVmmt8m8","last_tx":"","owner":"u8mNxMCQaNEf5z_0scFG1KQuuZ0Q_iY9GkiajCRmLmtE0MmlQcJLb4zN_p_JPWWM7J7eXR7vNPiR74NuwGoJv_4nx4DH6_oaUfiBJvIORaCPnwmx_Lu8yWx_z0OALMSIPaKBKrLgNUFhgrceoGjVKw8twxzIiYUKxFZ_ftsqUHQsFYO0D66_tInIZmrZO5kQFWKjzDqefiC1ftiDsLYWAUYA4n8UJRxsHQ5kYDd4M-WN2UtjU2hy7n3CoCbzS3m27F5FO5AE-Js8TtHEKrgkgB9uSjJsQBcQsh0LHxMWsyFWBhI7A1iEZg8c1lw09cX0GEBbuCI7PmXPeR_f5jJVBI_v98qyKtQxBcJ-8g2r1XBWlMhLqCUk2WsY2POUAY9YoFIb12_i-pcqQG1KAWOQhsps7FHZW_9DwJCcu4m-Wb5BmMpLOR0688j1jCaAEQVMEtIdy2GDua9rYd4Lnmc1BRxpaIqQeJibg7ZkFcuaBTvRRGNjudVklivzoh7fVg_jeUIWnTx3lR3EkiwdavgcrVHWQg9AnuYqjC2nn1eQPzVjPmdfxB0Owib5Xj9IqLunG1JUOnDFnFZjgSh4Jbd0ezMLkBnhUO7UXMyGJjUr8F1KsA-oI3p8C4f0sDUWBS2KJJZZwXVHSbppx5NJOtYO7UbL_KEkGQJy1pYiIkd2Rw0","tags":[],"target":"","quantity":"0","data":"U3VjaGV0IFNEIC0gcmVtZW1iZXIgbXkgbmFtZQ","reward":"0","signature":"rgc7cIstGHuWnDIl0PE57Hp-ljFz2vDaJewfNQNsOCxfTkq8lkRHLEy_mEYhmU0_Cwd-PRtvwQjPJ11Oa8HWFlD1dxC1UjHFNvCOZWeqOI84HeY7U15VIgIiNiilTAJAkdQvSTmv0e27hoawyfWSelOowfN9Zz9JYxzM_sgWQUUqf1emh8jU8mBs_jg6I0b0bzpOPLZwWNYkuJS4QVj5qWYe1A9YMjsoSSlzW0iMJXysdebZjA2tPj00Tag1zhRuJfb9TizpYu26enTlQCxb4McfItRm1hNvh2hvCsgpiLEPaq_ixwmDlk8xmSCdLRpw2IEUBFF64BjT1rHBNgpH2KZzQSCCZuQgbH_yzFgfYRnzp3JwOws3Ii0RhYgKe6AE_FiLtbZb_ZQEd_kqSccLUl4m9z4YXdAaBu6f-OcrWzGNMmNRqV3RG4KczwJqM8lDT4xB9lhuudi4yWsnQVvYwKpeQaGUuw3Z-l4kn0uzRTqtVCiW9KtyL8vlwPuxTYTZj91sno7qkFP_PVaHZpXhuCiPwYuRQG71vjKs7fO8vNY21ptnPxOjzdm3hrQU5CebBcGZuWLD4O3vOPMsAjzcUW3_-MuIFs6N0msR6punoyahM9qUXXAAzp5pyv8iiIbiT3YXgJVinTaPHeMj3FRQIn2aD6cUzRtJuN0xU1G0D8w"} ================================================ FILE: genesis_data/genesis_txs/M7oOLbk7TPBanLCS0pzkJSbV1CYoJabbsSDe_pCjhEo.json ================================================ {"id":"M7oOLbk7TPBanLCS0pzkJSbV1CYoJabbsSDe_pCjhEo","last_tx":"","owner":"zOKwtqZQmENDpTQEqNtqV6mu0qDsgI6PzqBM7dQ4GKJxVgn23SiTdmnHP_1HgwXp47tv063PjFpU-OCtAXwSLpjhRABycr45xYhkCiZZmnUXyfBrPspWWCZ4VwdP_SeDffQgc95mjcQZUr624BTPcAdol6QpUAz7oUju3HuOv4QZXn9gDU_1RcgZWRV1YL7joqOLZSeaxj9o0zohm4jXsAzkYbmFE1wmIuqbB7gFu9FQr-tA3mWa7x_S6r2Lk07QVTpieVY2TriyO_K-3OEgolDljkyGFZUptxoan3jvFm6idn0TCtYjEbpQ9bZhZlfg1QBpE5FrX3MbeFUnn-Gbp1mdqKzf5DQXkr66daJGyqC8WrGi3bIXx4dCqZPn3WrjoVMkhR1e4BYVHwUMAVGd4Q0gVSGwz91eG2uTjk_drEIhRg7VioFoKXgujbGDuv6VJV_Xsa24Jnc4u6BVtcIoopKPBOLFe0wJQYfqSr46Ds9F01HTiQKfh9RzpU1GkqxFWuui96Dsd-1CP6nWlbms3wS4Iex84YsjcRjHQ8X27JGYy8mjKWQC-Kjv1sFGhuobG6uyq_CEIqFa6XhcydXrsuOoK4uy0bpJsP6ko9rzgNTd7u3u2VCpb-oZBt5jPqY2Q5oJSIwo_ZNZgOkszMUiWW5tx7BZtv4AqOPojs5GlO0","tags":[],"target":"","quantity":"0","data":"S2VlbiB0byBzdXBwb3J0IHRoaXMgdmVudHVyZQ","reward":"0","signature":"VnW2KPXvK_yryUZKqRDzDG1D_mmlljPbvszxV7gyh2ARRNSkUE8mBvShlGggKW9Ha0USUrYtoIdNY3C34-00DT09_y93Ew4t4N42bzJLDjIA7nKhJr4Tj-h8NzglR_SrgDwBICmjDuUduckvOzGiSGr6GYI0n8CScBdw0QLKojHuyb3-Bh2krA1SPXlRM7Gevw5wd7EzRFSOW2O2FkIv5vKqxBgzCFkgN3drpVzKZnUeLfKUjt-bH5uLKe9ems_wi_EqpoXa0B2-VI0SbJL8vPI_f1AM3tVVS49SkLu-6NUgssE4BmtgobifD4NMnJKXKBsEHyRrizRCsnIbAndC6fK0ANYnaM_wyJ6d3Xe2gTmmtnm5fWbmkDEU0bhe5UswltKNbMVIKs7xtdJXb3plmpL-UL79hv6f7UCR0BhU1DpHqt6QiH8AIefG0Byns-BfVvcnNmQhG3jvL-dOscQRcLREmd5iwofNCrIJQ2Z6hHbCsQ9Q9asK7jyLuSG33TRlkGaxF2LoBS_0j3D-KfeBYmi8Rev7nmO1kfdf39fm2Xrui2xh1YjyxRugBBX-zDTKqoUfk9mCNMFRb8Z4req28okPUWNMRFzq2JAMyZ6v2eRgmeegg8yZRgRip50tYuyqO1G5NKo-X1UU0DpSEdnoqX8lFL0ZV_mWsbwUMx-BT_g"} ================================================ FILE: genesis_data/genesis_txs/MOoLwb8S881q3-gM4GK7DuCEoh5CZnF1tMIZG300X58.json ================================================ {"id":"MOoLwb8S881q3-gM4GK7DuCEoh5CZnF1tMIZG300X58","last_tx":"","owner":"6db4iwf8JDRUVNCaFAbzLIVD87pj7sAfSk0cXvkrM6xgAg1P1zhL5cx5L2rd47GayxP6tj35q8_NAbddLIC9vsmZvOBeLWTEcIL15pPfoYwrmJz3DpKuZ2vqQoxtaN2H-sh62HJCb9TwedBhU9CT1QfWT5jc6WZTmJLXJlw13ZZ-04P3e8fg3LK6i0WwTnO1tkZqwLXKHk_yLodA99oB2RrDe4mTkJj2oDO7Ky6wiJUSnfNoymE4H-n6vsa-82Sr37lX_HVFU3ZFOW2fbTJnXX8kLmevW3u3h7RZtaISuLGjnEQYAHmWCW4hzuK6QqmMYZrVkf4W60W5Xv0WwNipXyjBucngeovGpcmkhCIXIPWurT-CS70Jqv_MVXS3Oxdv5gFLUi1Ko5h444CfZ1gSoCiF4wgEl2cUo5oobQYTE72Z7H2rYyrUvNcfVuIbam0JRz7dONf3gX-szfTOTXwgKGcmCcQ36_HSPs9s30aMbFkUOkjG6KggibSBXVLhkBZRi8FVsdhW7lzSZL2HeFdQMS7K86nAA_UDpkpnCeDTnRS8nwCdeQj8wJI32ext1wkF5blV-pffHUHXwvjd3cee0A0I-b9HDPfdUAZtHYiwFVUPpBTHl9xM6zJka7xSf4lyIRfMLasEqcRm7plUAr0qZxnYE_9WIkfExFXJchaXpoM","tags":[],"target":"","quantity":"0","data":"QW5kcmV3IFJ1ZG1hbg","reward":"0","signature":"qyavphbX_tb7_lv29bZeH_Eus6fJMzDBwnq7FrnYZavA41sTCJfyyUMzHhj8PbjM0T1QkoTUVCm4hhI8D9lnlKLx2e17SYPeiFvpKHVpS1Wp1ZjLXnjwEWJ4TaMS9bcfFR4akTrqlktrstmjWcOCnG-NXm_B5PRQbuhFthkKkfqSKZFwJjjzQCG3xQfBUqQ-RlawdopmLUMUGx5i4NSwKW5d1IGQICYpPcnI8v4QUNLWQVi7AdaW_l-1ONsRLOVOh_ATVlHaiWtvhuznzSdMlz3yKZh4ESz6orpYvsbVsot_7PDK6pvELMWIkBsH8l5mIq_kbjeklU-N1tbkLZy8uzTKp6k2tUm4EKuWU6M-IT_JhaOPI1ue7NZIkYRTQ5Y1t05ZExVvnz3qO-xqV67GfaowMVW3OuhS2HJ83BjN_GeMF78myoyN3mCL8W0MMLobzXP_e1chqYMSOT0QzcsEGRDKDdF3OIKCva3pMMveTPfruqdc5q_1dwWnS8fterQeAgpcvCT9HbrkpOjW0GUb4MsHNVnvwTEwY_mPlig3MnFXgmE21H3taDVvb2UVVl2-yWWrdW2AD0wsbKF86kfETXTDCG_u1yX8s19mJVFVQ1241BI8HDRP4xUalf2xaDrVtbnlElqz2M1WLaolcpaUCz4LdXcMKHu-6Z6CWQNUejU"} ================================================ FILE: genesis_data/genesis_txs/MPP4fxmSkvM2BVq8rumeT5yvDNu3QAT_kqpOlAq5s2E.json ================================================ {"id":"MPP4fxmSkvM2BVq8rumeT5yvDNu3QAT_kqpOlAq5s2E","last_tx":"","owner":"xL1_0ZoXer4vIgkcg682epYrfGw5ZCRCOteHmKJwjBi9Ziw98WmIYX4gwL9lafv17vLQHIAdE042VSnGpTgwwNcYiurXdJTcXXXDRGo9pQcHMzJQmnMIyg_mI2d78f5qOl3wbesW9_UVt5j2opANb-VPwZoGKiRA2OJQfguhqSGuz6iY7-1gga73pJgrf1hHWmlimBg5yAIMExf-UZarb3fyjaQaMssJbozYs603xdRRhIys0umebLDVTwTcvYbRl6Siovnm0-JYFF9izV1AiTQG1rXsl3ROXfaYSegVe7hO8zrwAo2o6busxHEdNO-jKBV-Y9Fb0w_EtU3-r88DrZlB9bTLHHN8IlGQG2CgfW_tP7Y-hYaYtHWu14itu1uqMeHYJAms7RZ-U4PH5TCvZ8w2AmrL40dppgkJe_hkc1bs2BGa2-NB2CzYHxJJ20NnL_R2jJniQgH-42qeVG0y0UaGrqlzLaX0h4hHS0MRnd0IycoJ9iKtTNof8_bh88CWHdUSI_8sSNDgYGeJhsvU35HXi9trKFWqDsL3eDQqiyg7neYF0DFvhCoYuSwvhVFrtrIdKuDL89XAE1dSXpo8wZeMUNr4DFNhUSLDnPXNZtd04206dBgMzQJPw5QcOritfyL-bz67AjKrvcbahqRFUCtfJ6ZoUtAGSPpCcBNF1U8","tags":[],"target":"","quantity":"0","data":"WWVhaCEgRG9rdW1lbnRzIQ","reward":"0","signature":"nt5Bv-y9vEIGc0Bw1h3Bw9p_izOXlnUunK87dK6EQkO_90xt0FDjW3twJ6HiJ8sdcs0Yr9mzFeLnBYvhk0UuBjKDCWaRF7Z8ksnJIrPZqzrbqsn_yEHMxMf6j_skBZhOXvECk_syfKwzGr5da30wbowyhqTSHPCEn-oI0I4Vku1qxEdDRWytgQV13Z1mFYcJ-f7iy7m0eUIJMeT_H5ZlNpkIPbshQ81uC88PFJGenrlBEBAvkq1ISzsKR8E77R1B4_6ApU2W0JfBKVG8KVCfywQcgKFmx91BAn51LdWNyMXaTAboE-bXW7aptjNGiAmxoKWCWQGwxX8BUWG9IWSQgosDBbq78IxCCQ6aT2rEvfX-DHQjFtUognplfoPjDEBbM_iGZk7axo979QkJ9tYNHUukbs_gkMpESWLzPnRCCnTcYeFLbbg0is9_xeQACagwVREPSyy9O8HTC1BOen7ZKF9b1GMKJQfG2eQ-nRIbKHfAfczHOg5mVICUPSmCLhwpqfJKG1drE2luj3DV9NK6ACh1A_zNeufzbhxvuQJhdCz7rKK65VKbkVYgI0eERWKaoamm5uSmMBUYVkTxVv1w6-k-FO0nsqaM9c-jgCWJmle91XnJZf08Bv67xN9U5ctv8vNmdpjlFH9oZS9o8LtxiW4NDCEhtvIDeRTOIbUtxvg"} ================================================ FILE: genesis_data/genesis_txs/M_wQsQbFGtGiEaH0uW2swBubAnFab3ZcCN8IYWZvVzo.json ================================================ {"id":"M_wQsQbFGtGiEaH0uW2swBubAnFab3ZcCN8IYWZvVzo","last_tx":"","owner":"3spOip8iKzzEqGW_jRcNeIByIB-rYkE0azw_irvGkPPZ53Isoz1kbSt3nBdyMoyF0CaAVyt2WVddoHlHcDq1Fz1dZ74O5740DMTxXn7RbbEwd5rsrbrGAPM57wZZ6PZx9KLgKY9jg9PQ4gT9lKJuSgfBMjTpnB_7b-UPoO9w7Jlf3WLvKOqbg8YjVpcghl66oV3mNELZ2EINrvB-1beEIhQyIy8CuNp4kz2q0o4D1zXYBGyuq22WBAmQXLrCIcc9ADmlaceQfXmCYVdH-Mq7npagWchhgfiW6oLg2GT5TuebfqY4uO1h5JQUriWs--VU9n0wor9oZjxOKQzvaWCtjMvtjZDl3jWnz0h2igavxVDarOt9nqEs-JuVAvW4MmvfNCPf2C3u25Eq2_haH_1FI5coSH2HaIGyQADcCmpMbJuS7LMoHVDMUbPlfmGNrcbizCm60vLBxkSD84Olfn9UX51Lyo1qnZPW-RdlWwFfy4h3L1lqbTgADT8nvtkJLxmSA3OpdbAOopBECSjf3SFwytxJ5JCWSPiShE7EKV2sSU7d5tKX5NcTjWpuel7nO2CDIUj7_9rpjkGksVT-M19Rnvx7vC9UowzBJ190htp8km3KdSPpoLqYFc1xtBLtIqkMxq8_doJ_P-yEgQr_Tyj2pClRtIIe4JiMotsiCigneYs","tags":[],"target":"","quantity":"0","data":"Mw","reward":"0","signature":"2NEWvCZa840SNpHygBGEnhpR9qsc0O6fmFXqlrj7rF5mmPXmTaW4kB-oWK95JRIGrQNvxyYBzD7nzpou9vOF99u7lM6PweUkkVhP-wqz1W3D9C4Z0pi9XSWbkLEvjxRIF9VY1rmRYyN3s07HTu1Q_cajb5VOymOuT1-B1R4Iy7LLKeQSM3oU6tXHGLGDNYRXYSK5X1llqxbTN74yc7L3ooMPPoRQQhQ0_IqPQZUFeWZUr1RcvuLG5sI7D1y4TNkYDjGmdyCXfcabQ4bq8Qc-opeBH1fTvyoheat12qWvknO5Pbeot8iZjRhsKKeUD1peIQIBbg0lJmPwzHCKtyhZmAefcY4dFpejd0G-vrnoF6AQPXOFopkcfiAZ9jzJisF35dSYjZlnyQGFe7ZZG-LO5DQ2iu1BrHwdbmdl5IO-ImSS9e3TUVkmKkAiSEKZ8b-FELtzjf1twLlPUc0ymF9ErRqSyDkoQvj0wr1fUIMzGQEMbMsi6O2A9Xc3D-5NTu-r1pd0eSAxa_dzPLqPos9XTcYbnlLJupPeRSqRNNGlH8sv8ghQFoUUWX9ElJ8vNNyMOi7x5FT4rTEKv0UJM6w6nGZ087jYP88B6LmD7nTvQsTpNB_L6UGIQ3Xn1v3jOnrq-bh4-15HgJJ1chZ3EonT8Glvskd5ERr_wOq42MXxpWw"} ================================================ FILE: genesis_data/genesis_txs/Mk8XJgQPSOIsx_QX_XDPxdEG5NcKgO92q9i37uLZsrs.json ================================================ {"id":"Mk8XJgQPSOIsx_QX_XDPxdEG5NcKgO92q9i37uLZsrs","last_tx":"","owner":"3STz8xp4flG_ygxTDb_GSdz3qGjB9IhSuTDjSnJtNm1om-HDHGvgiX1bjeTFSNG3jDzuszd6uPiIl7qJkY7ZRyF8QRNPEXsMNxjvQj5ZhSlAQHCqpURYgUZC1oMt-CdxkzgqA1kVdZsjhsLeTfsZ7RyrL9pR7tRQsaQnrx-4tMbEVILF5sLqPzk34I6abRh7Tz1FAsES2d4hrunXp4gcnyK7nTS36IjhffUt9AK1_kB_mhGR78ngM3bV7AjxBKVAoV4sPZp0AxNVtDcPNd00F9wX2jPxInbQMkfBu41aY94njInTvg5IChKBvugzLeqO7HZfckSKKK-MYz8b6h6U6Vqv_me73rjRXgGEYBq_kmXZKQPIhk72YGhgAVdus3OcwiEzz0r_DRkTECVNHLUMskF8eP1_upyMtNlLwoicKpDNxlPqDSkEFWpQf0u-YLRF0O8q2L_OLidBU0M9gGaPwIJUAG0hUDdLSNgD8qRbdRwgatMuS3-2Tq3FzjpcBng6IMKG_F5MfaUC0VAf6xg5RESoTk2kMgLTL4OazTxj1YX-0c4REBsspZDTiS_dVqNS4T8hooWCCVi1JXrvcjIab0By0B4HXTuhGUTw1w3yuReNd72TSpvja-qrW0hLtFA--UHivjRt9aY5Q9LKBOoE1IlB5p41PhYdkhDilU3VA98","tags":[],"target":"","quantity":"0","data":"RG9uJ3Qgd2FzdGUgeW91ciB0aW1lIG9uIGplYWxvdXN5LiBTb21ldGltZXMgeW91J3JlIGFoZWFkIHNvbWV0aW1lcyB5b3UncmUgYmVoaW5kOyB0aGUgcmFjZSBpcyBsb25nIGJ1dCBpbiB0aGUgZW5kIGl0J3Mgb25seSB3aXRoIHlvdXJzZWxmLg","reward":"0","signature":"Nbhs69xEkl0n5AqAWmPmxgiPcZe025YHje314GRrilwbsXtMGFqp-AXL1LVAX3_D63Jg_iSM5EM8poSVVjzJAyobg-nJ1ghCF7TOOZ5WU-TzvqgruzPNgG46RtzJMfEiAocWhCwH28G1zLgl1byF9o6yxP6iT1WcpZn_-hGYBcjh3S-sUXo2OE1vVIbCCQm58h3_LAf6naik_YBsNDN58XEG55fke38UBFRBrwNoC8_EQeCDL9majT_PcA-hl6FDy-abrPdxTSgpdFn23Tdgnw7dfSXyAywKeid-uj3tX1njYtnN7sGcNGBhd4Vd6sBIGpQpmhAcMoinzuKAdT19injpQUoUmjHbV9i8dRDUiz_hU2OPaFUOCYEGYpz3nRvyJj5Xe3nCCvFm3mA8-jQm0wPgALYlBE9Wfkx9Paibfegenf3ClmwDkF1ole2aW6GtFMXP083yWRMxQ7-9A3tPrgMY28qhyyVKansazlbt_s8puCq8DHtRkMmjUJ9lJ7jQ1lmbxGhdhRNoRr1U69BNU_l2b-IfpLk4SwGI9wMI1dJ-U8VK7pCGCLVB9brsB4e7hbs6DWupUq1THsKmEVWVMGQRlkOyPiMskluiTVcC9bI4Rwm5yMSl9nyVVb4cz5G1Aa0izdQGelnMZa_XXWimLEV2Hhnywy9rgcSp0_LOoRA"} ================================================ FILE: genesis_data/genesis_txs/Ms9gCRdVwT9u8-ewYd6c-T0bet-n24n_q_Hn0-BlMow.json ================================================ {"id":"Ms9gCRdVwT9u8-ewYd6c-T0bet-n24n_q_Hn0-BlMow","last_tx":"","owner":"50TgxcnKunD3vAcwH5XsgHG9XsYkuiqfhEkqLjidBX8HMxaVft3yZSpTB31Mj1gWigHO1W5UsTZUmUFmpvW6zQzJCDb8W-VyBl6_FmLGDUgqhlJ2z3tIURQmqrSfUL9cYVvWHIfipPS7S0buv6yImMqnc07sl2ugAIUHCVtGhVp9max1_cNDKdHZssIhESp6hbrU6cDYKAIRF4o90g4dO61erTwd8Cpns84itn298o9IN5128-HEtCmJ5-xvpx9WCppFkCGq4fPIlx0rJ8WWUYlioHDdrgkUqQh-bxiiBZkLJYGZYBKTwvSPCiot7nL7pbZDwNVkzwH63tMNN95tzRuEmbAQZyBTQ3VgE_Fp4cNQi7H7-8NuyiMFK8NI0ZnPKh-OUz0YfrHuSlozT8RyIlX3AX8N1WmYB7KD3bQFRLxgiesY1aor1UDnxxHI9fywelLVNztMan72bfW_fexd-54p5g7IeZv80L8n8Y7knPz0dRSZFrrzjF_hDj91ZlxMIntT137jFa1_IhRGzeNjW_P478E8_t3s3_aZguS58durEk_4rFeTEhObn2jl-b-0wG5sdcqj2tR5iOA7ntODhRZ8hImKPBfypaJ8M51-daYXIP-LOfl59LHwOrYuUm8vXidIvBoK1rrFZ6JJsUYK0Jxk1vNV9K6VDhnuXVZ1IVs","tags":[],"target":"","quantity":"0","data":"dGVzdA","reward":"0","signature":"bfbLamBGbe1obP1VwIHoy854mtwtF1TDUFGg3Ofj3oV4WJlh8tLUy4mUzVtsOIkBSoU4d3ywF1nVmPnwpexm9DyUt_sKl2sNIluTNHX4Qc_4_b39N_Ff1IBCkX5Fuc9LhYu_tQaJw91B2GTf9_94J3PbKGqiJ44BcwxbD5Vah9lQU90n2DvQBj1Y3Kgf03tB8UI_eTPQSqJx6BiVUKD7Z0YPS3iOE6tqxZ6dgOr7qSgQzYO1jiQB5ItJsipIuLlafVPxvsrTxrEl36r4F0Fw8kBPX_dyVb5UpcEoJAn4ZfUZdiaJxovWrb0BYXZ31HgxFhn_Jn7gComc_dnRZGaQi3m6Gr8AmjZdmfqi_fCOb4TzSeCSlKWeM83QIv81KTgkbrh_QiNjZC1R4IhJL3EUUj2vLL7nAU2g0HFcgFGf-SWQCTAAuOYjyq51Jept4I6Q2CpUNxSvDrxRoYKZfqyc6tqB1dQR2PrmxHS2-QJvE1-40IKWCZcTzGbqIX9XB8PoVKOrnVHHMBD215aDN0uV6I9mQ-2bQsCfq6I9tPjtOqFRxEDB_WmJukYTiwHwYucKZPbQulERdaMmieELhfNMR5RywYYnG9uy2qLRQAJuqsicdIuOTPMXVHAkbharKt5u5U_VabZ2L7Mt3yIjP0Df5RWj-T9nwYcrBu4i-Wgl5Rc"} ================================================ FILE: genesis_data/genesis_txs/Mv-TFhA3639O4JbKzoO3wo8LNPcFwA_vaaOLHfWRfSo.json ================================================ {"id":"Mv-TFhA3639O4JbKzoO3wo8LNPcFwA_vaaOLHfWRfSo","last_tx":"","owner":"pTz0vEGrpQr2ByW_Jvwf7w_nWJDm3iEbmKQ1zRWou0-KhxxWic7GAgpzcBWqcEmM7VIp8KL_ggCpxx6z8wjdP6OAqILrsL5Gp1NOTgNITe-3MrAa7VRYqBmIcuRZm9HgbqsH6YL_EoSlzwNZFi0tB_PKfEqusaq5bAXegALw6VznNUPdbJPDJO3sHd6HZRC66_PvVrevfbn5LVNxUNFMuLMrBbgiuzPw5_CFxyCr6rvvQyQha-13c_rR3lL-_lUjgVT1Of55-dQ0_6UQuOlc25zCtYBxxk-i46uHrXla0W4iTZnk7pMIFzQNCz8rJM-zgWrNiKzaKqFKXfUBldw1I_6kw21boPfocNcBnS8Y-E-GG_21AM5P1JVlQk8L4-EmacdYMU6uJibIM9SoOTsuRA_xABD9Rv3x-IQsoqAE_b17FnCxmdP6SgYYuuBs8NSFysQJmw9c6v1xhRuEx3yZy9yrZpfdaPFn-p48fc0d2QTOQNwSFjJD1seCMs3YdR5kyL8DUyeoc9zGXcrwTi3QWEf-a9dF5U7_8D0u6BPXdmmQRJb7kd98xpeHoErCA89D43vL64CJzX8D4Cr0l0eijC5gsK7Z8hrHzTe4EWAJ0c8Te24Z8v07f4kkk8qGch_MxDYhUCorrDKsuIiKosuiBgJUAtKpL9-AVXUBcNqfNYc","tags":[],"target":"","quantity":"0","data":"TEVFUk9ZIEpFTktJTlM","reward":"0","signature":"Q2zemcLfdjARzaKYfwyU16DX4p71NxyP7JQTaXceD-LjB1b-8rok3l2A9ubogZ6R4tjTAnR0uNNonAmN3hLc5TO8hs_rIEVZFTN51kavZ9iXBBx2gNHJcunNqPc9uJTb9niyfzfZ3J2t5k85VuIsSKsvZEiA9yBcRROIyEQ-DCW4VqB8rhpV03WsAMQ2aWJHCk8pAMY9EPG6G8S6lGun1Jqe8OxheDBDWbLwq44b2DWhncWFcEQeDQFyESyrYjP0Tl1zde7yDPuZ8ZuyhswL1bGEfJVTwM1QZm2u0TztVv6XCxj1gKct8kAMJkUD2bG5vOmrsn-JtqhFkflu7gHDWM_B8ZEQp-mrGx2VSeomuUNWxYIqMOZL0BXM6fZHSu603XJtH9HNGefIp2K1lNsGGp5OgNKLknI5bqE886G9W9KU_bNbP8-z-6D0QFWixwiZMboxdb4l3k4JUIn-_qLm0Qvmspta5Tr-Nd8M7WefsJvPgS2JpNZ0AcLZoMdaMmgzfr93CVFJmAoGUWnKgicKUyOHIGQCodBjZzQrNlx8FL01jjpJxHbVMId9gCQPfFWRnzlD-BnGkTmnhuc2qImZgJaun-O6xXjE2I8wgsaq5OCutJ6oXjMAt1nK_nJAv1zGcwy-yiXOYPS2akZMdURvUhIYDtzBJ0OaiiWHlTyrBDI"} ================================================ FILE: genesis_data/genesis_txs/N3lqe8CUwPfChinYVV4OZZQNjtXc26JkOJyqgoKhq7E.json ================================================ {"id":"N3lqe8CUwPfChinYVV4OZZQNjtXc26JkOJyqgoKhq7E","last_tx":"","owner":"qfF1ttQacUz--8t6awxW1qaaZgEwsc9jUk3y7z_MoqMTPGzdUOLzW3vfPNR-2FLwnbPyF4azGU-X-2A9dc09q6e2bHTfXtKWwmxFrXhWgq-o74IOY4LXAyYom_2aH5giezkilPXLB-AY_FqjmDb3euzNgqbLx8SOmqpRLMzSQQHwqlelRh2F1Dl6OYO-r4P0zmPCG85wQ6ufSFG4Bh9Sw6fZWCySirtRo-Hbi6Oa6Y0OnZXWjQlhsF6vs62YLnEApKVcfIMCYdJSyOZJAm6jvQJXqazQzSJsgbcy3gtLK8MojmODo50smdy6-JBseuMEjjbyqSQe9oUivwP3udnMaHP6yYb7AtklRhwOIR4phcmOk9nY2oblZqhmxcIGztcE6Gg8gsRnWNZ5RN4yunZGRjUphJybuEToJJRVB8JA_3K7JLk70taSj3D-REyC35v4nBIfb_-vuZNE5n70Cqz86yvnGpim0hhDLB0pCSRp17zcL-6E9pPQOKR68WctTbYIV2Ry0GPU49sRNVvsjDXHbCEBNq1Gtnjyi3aeN_xie_2dqQShAETjjFvJ6st_gu608-C0J-VIdzNaeNU7OXUor4Wmpnjm162LvBQwBngqj8ttEYriiYLBnRIXyfS7LIbnoSEK9bgXoCbvYXDX5bj1j1YO4k22Iq-iwK1Bg2eY08U","tags":[],"target":"","quantity":"0","data":"QmVzdCBvZiBsdWNr","reward":"0","signature":"NGR3xT8VWhPmjk39AMnl7dnD_2H_Z1Ak3Y7EJNzZxFrVtGEzvXYN80reGD1byzy5VJ5wB20T1g7VRZF0_q41KxlhSOJ9VAWTHFN_-lpmBbYS_ZyKuWlN5UgFBbnuSFtWHMGnfb4t-2pRo7Sg5XMxTvrPXf-p-e1xW2toa2BdzX8BpXNZgIT_Ah0VD7_0TRxCPOMvto8YZbXfeA3KsQzOnzfJmCLIcXbeinqz9JJHZ2QSUlZOdn9Pa650TVvCVmlA5LwJ98NkyZrFoXT7imK9IrftH5RRkKXEvt9BYPvepMNK9xBn0T9WNZlV2YqsAwYaVqDYGYBjqCX5F3LJrek1Mf3XS5bxq5MaChoPg2xjcKP7tVrS-kjlbVHTH0NUmwchmIWeM2QYeYNjpO56fFYxj5Z2LNV88-QiBGqLYjhRkAr10JiJqLFN09y1tshRm0KviIo4HMtMag6VM_u0nrUeXvjeM5KUeBS4cH4tL8wHKQvAzpCIKyk_KaBEGPeMTTKcWOsabVAmK1XWdojQPnTKVBQlyRDKxogtD_0MZklh8etNp88pBN5RscT7kepHstE3xMHBt9nuI_dXfPKajefs8Lft6Vl2-L0okGNIJjpDjb-IcSuDsm3X9Gr3Mb9UkwOcRoc6c3skXJ5tiqV-3iEluJmVXindiAWoM27cX26n4YE"} ================================================ FILE: genesis_data/genesis_txs/N6-1fOVDkoeDwKyoNdLxCVoyy-c0EF178A_oQeEchs8.json ================================================ {"id":"N6-1fOVDkoeDwKyoNdLxCVoyy-c0EF178A_oQeEchs8","last_tx":"","owner":"syKRV0_12JvUKoYcLpvLnW2IOajyCxZ_FpvG0I7sDv1eiOq_6SjIiI6OKXGGhABV-1aBZtqWTfb5P_vl57NY-4E7uvGF3fXoEoSOkldHXAKHhbVypV0q-RfD0r6pmDwhzKaSXxBtqXVIcGm3FFAjeXE96i5Nmko7Hhq47TzjCgEA6WhpkYi1h9pD7pdwmVBQcGrp6TA5W2CDbLGP1RlOvd-ju65QAuMTH2rNsLPD8bqOn2b2DvM7qJ-_YeDS50zq3kZ1iAnX53v6fmlEjYRirGr1ieeylK6MqZp3G4wysGp6VHDP4OpVhbAlPVKSe7a5O4X9OM7UUnMGGlG4DDD0RIBJW5dtIFyaBaXbOBz_kb0Tzb7xJCa-Ki4q6uMa9GLwpvF86H_3gYb6lU8DHOy6P8Bhv6r2qvulXapPJStePWHX5fE4oEho0Kmwvo95Qh-Zi3PRdydTOLrDgTvb1Bdv_5Eo50eD-qyp-UmgG_fvvxNGg8mMJH-ZUHqP_f7sBnawgPo02aSZiFqQSs2UEiPnTzOrDB1QzmbHCSoWjNv2XN5KHmrsqsascvnz5xBCxCZiBvs6vKQ_OD06K544_-1egThNMZfwZFZJFlpWiA7snPquT6QkDyVhpwG3u30VTHzAqvkoaJtho_d3uJZTMKgAbFGHQVDXsAvbpOUIpg5G4yk","tags":[],"target":"","quantity":"0","data":"dGVzdA","reward":"0","signature":"gF1kC7DY1LkGBy_RLjcfcbshkNeOPG04VCqR3Q472NcMqyC3z6mbVuT_XxgeEd3zjOvUc-72K4iKlOf2UhEECTnX9cj_FnKV963zNmY3BpIV8MGIQuTp9xKCvmL30-np6bd2j-WthBZQHv1LwTrzCnDGh-K1TIlAVCXv5ILVfiEsJeudIkz4a9IIMx9eS9pWO6BBbkwkmV52nJu4_HDZlGijf5onNkXUgqy2fWVTpy6f7IkSmgC6rIWc6J6PfgIZPuNs75cQjaVjHSQ9LK9dfplT-jZwmuVe5bZ_WbzCDwzwmtq2GzDnAVngbjWohajndSaZJ6KzhYlt3v9otVtoXlJfszUpSocrY1s_ltZC_OBTctMHxQpJ-gi9B8tvaj9ysu2qakfKkGGPoD8zEvy8WBgnm4r7tFv0eAmINasVdIiNxga2yFXY_l1zq4aQWU7ki_pyiioRjurzoPZMCrAhi-4UyX9-AT4RfHjlzShHhTSTTPezXjBeAoX9k9gR4-0V6dElK9elYTOB7AKVa82YHOo_1kX4FWEWEHB5KW7cC6xbiNmgOi9p9gfkxhrXucdugIW1LkCAuTE_HV50bIckKW_4Vx2d2nq7fCIH83i2yQhQhprHgj1fmp1dSHXVIKBzzGlMWJkoGhHaTOoHYHlLe7zLXm7Blekt7BLBFvKRAIo"} ================================================ FILE: genesis_data/genesis_txs/NBxewjnZAfekK0hKmwL_OpF1521JTeIpLk2a2TLDnTk.json ================================================ {"id":"NBxewjnZAfekK0hKmwL_OpF1521JTeIpLk2a2TLDnTk","last_tx":"","owner":"6NnMACJ4r2JODThVLRHBPVEVgu7oxeE_zWZZdUvP9ozWGMY0QG-BxF3rWhB9TvZAFrhtvdqsM9xXsu6i-k39rS7ypAqtQ4d6BMs2AnBeksxikN8Q0N3M30JxKzijNkp4dGx1HCeubIY64wIRamCgUGDVOIUtkCvhP9oZX7Xhpr1LwpjrD_JSaane01EIyQt5apcHvvMPhkMPivSrt3T9e46KJn54kI_EF6n2lnRy2OoKCtSksMutD4OPgxFz5ofuXzOfvIno6mE2RMNXSghmmHpvkDVC2Yx9sV8wvknsKTmJZEoDiyhwqpcKCTm_LdzkwXo6m-cg9qLAd-RRZnZ1JqbWctHTwL1_rUXRtYpS3qlmormiZibh0CmgAT_59uKkPGcEGzmThVayOTg-orrWQf_MmTVesMbFOJorb-A14Xs_rGVXUkWHXlHeV5MmTTolRfUpMZtnQEN7dvSMsI4TMLJp8hDA62cAWnfW1bWnJYctxGOCXLtIPHEcSbqMLxSnHjD0TLCCbBVdlJvXu7EtYXZShnG_5OWj2QT3uY5W6EpcDORmZetS5iurfwh5Wl_oOL3VdXyEFOUuLvjILgpdSN8o77e1ZRfDhGq31N5wZq_LIHswR1Q28mI-5x775_O6HjPrQjezIzDt38otRbfOf1qCs6NAdXgCjeeOcs8UB5s","tags":[],"target":"","quantity":"0","data":"QXJlIHdlIHRoZXJlIHlldD8","reward":"0","signature":"AqSfaTqAkvRAv-5PCbOVvDdBuZvpsLYZTGdLOfSLHsGbsbiyOYBT4T6gce5HA_SKOtpZMsyQ8ydp9fT1fA33QR4JtLY9wiPsmMQb909BmtIa_uQU3yHLOf4rCHfCv4WsQq52hLzOODLtjDosZsJ1QtnkR_HJTDXAEt39ASMQl-CqNZG0KIhvwaqhZ2wgXPgwXLNKxkZc2IMFQkClc7an71xqkSDaDPX1YpzjT6cjmEHmj02OR-Li7xKBrfCdKj1_rfwnt_CorUfZg4teFW2b2fRLXchoegslNeu-L8OUv47prBipa6xgBpFE_fXPSY9q8hJ-NCY7z98MkrV8-O9UEoIRapXO3Io4vVYvi71EoOZGwvpvMJKhCWrKzVV3fOMpeGLJWkrnetBnoSSHzZoWtQnnd2Pj7r0fqZSrF8dHSg5b1TFZ_r_W18KFMxRiaV2lfW-ZMujWlGAfnFcb-EjtLYgs7Y7lAn9dgi1OwDlRgs-1Rjga6nfI2bHLbyHXeLsVdwrXpYJ5Xz4b2_7xp9c-is8BvpZvOwt7XTxcytqiJ6X7TctLW5_0OpE_J7JQ7K8G6NwJIlsSmxBmyA2z2y59h9_eGsoUq2ac6Hg17NOVhjjaiTXOYBgDd5FrDtavmG7x4I_VvlU_Dk52KMY_96_ktW6kQnQMScebhZSkuwH3Dxk"} ================================================ FILE: genesis_data/genesis_txs/NE7AIvW60iQL_6aagNTSiaMpmLfAfRwbxau5FZLA10g.json ================================================ {"id":"NE7AIvW60iQL_6aagNTSiaMpmLfAfRwbxau5FZLA10g","last_tx":"","owner":"4cAfWNyrBJix7ys3_Jb1qYRYpeHq05r1PnuKQL3L3PX7j_di_ZJyLSFH72VFqLov_nW1Z0gsALTYJDDzR0bCyNYe6WmC8VefMHpasXK6CQfD8LUfxNq0Jau3Lggh4RCIGh9EP6getoSyrrgpP4Cm7xNKB6zvrpAHR79jSICdjgU-qq4UT36mLOacDScg9QP0sNh-SmQxVWc5FLZDwt7wNM40FPh3tdDKCypLzruQekk2AquBXOhkvp-Q0wJMDiywzVnd2YL8PYYILx2JzjmwWKHhEFtRFmzsqa1RIKDDWr9TGw5EY5aCbkhFIhQuySR61HNSGbXbkpEAqGXBWe5jOT0r6OjsuvWAv1VMGJSj2z-HTJLUZZm6tx-fy3bVdbvwaC58e7Ar5LuLis63ON_EMG1vgqn8-zmvauatJrg4El5QRarZxSCAM0vnJqtqASxy7_wjVfagtq5dWVH3K5fwrlyjscJ_vaGWJad6Ydtk52XZ1B8a4lH2bjZTH4K4MRv6ZpLTnCOVsjJJj7n8DOg_mZRozIEwkorlQ8ZbIAotrhID9kqP1xT3r2vWukN0p6NNflTY0fhRwA9EHVPcp_FbU7zCX2094VfRQ7hajnR7G-NdW3kTL0KrGoC0tR8bZLYhPh0Qnp3_c5ks3uOnE0zqQoX-44xhcLMXyWdRmWEgmE8","tags":[],"target":"","quantity":"0","data":"a2FzaGV5QGFyY2hpdmUubGli","reward":"0","signature":"S5I33unL0jrZYDx348oZ9KFBK5rSy-D9YM15e23CsCt5UVGHjIcDKVLzndnflXnKiFJew83hS39JrYP7LXXX3DNBbEnLF-0K9ZZDHWY0dACcbNAYvI0Rz8z14hSMP4TnzDmIEQmYu134LqhJArkS6y_oHo4TSa4K5bIenoIGZl6CU5Qn9E4cX7ZFtFPIgNTrWooYduNV4E6jkQkp0eu5t2LT8h8JsmlbkPUatXLCZLg7odDSKRrzxnuWE6sNEj6tCqcqmi4UAUQjKrXD-6fmaEdJ5QhdU_rw-WmYCAkaiEQIa58L1sK4-ZY4kt6CZ3_tVUUukHdSqu5H_TW-nFNShU6CguGtmBzJsAJUs_1Zut-B4ybOcTUy1G1EChJ5p9T6BhxxRozvhksKnL_YD1feoQBUtO6TCeOhVie0JB9Q8AmIauZO8Ri0hGQ6bcpjnaVCgyBlbuXNlmZT009cwpPNxhP8MFYEfGzuhE6qr-bjscB8FiL-zObBhxbRCjGeutgymIrsIUvM1Je8TwqXH5QGQtFd-ttlq8XOpzYQI2Lyu9dm2ddAEoiXQ53LJu08ZXj5OU0whk_L0CLCYBEBSrj2FOFhCEp70I4dnlp6KwsMXXqH62l74FhcVvW75lxuJrwua9x5dZEXwRsxX4exw2uDALMOXTd-wiSNfcW9CMEZTl8"} ================================================ FILE: genesis_data/genesis_txs/NEXnMz8Yuw-xfIPprKT2iwx5A1UjWwRHCH7XCpeXIPg.json ================================================ {"id":"NEXnMz8Yuw-xfIPprKT2iwx5A1UjWwRHCH7XCpeXIPg","last_tx":"","owner":"4Pr5VItE-OLK8ppbGwoC5lMclzV4AYIyAkRLU2Xlhig-VhwSTrAgPbsla9iH-wn7U0n2hHlEDJ_XjnIWi7lQSuxnPycjYoACbk1LpK3bG4KdMCxrfQq0-2gPd_7kBev9nBCLf2qq4SDiwXbJ3WA5fQCwlJeszXpYQghCbXmpLcidlW3tbeuWrlUM3TscxsF0TGeJDbgEpDR5FcESTKh7gNjFfWhVCLgsZjGQuFwdPI6EOExYl-6EuoAq3suYCnRdhdIU5NDO99qE8tzuj4WUCDjRSxZzPCJNdbg_CovnsHBz7z-DX1of0arjFMyPglof6st2LpOM8gPYJAjrIHcRfJnRyarJg6X6JhIg_hvh3_GBpe-Hz03y-6Dvi-7KaumwTQZJOq6oNErVYqXxV4B-uJdpKFqsGLl6LnIIgNkFZFn0qO-Og2eqtRhzJW85UsiocOMl8FrYAE5McRDH7_784RAwkX0ch1gWO84zw7cTOFwDdoWUZ0m-DofkhL55q5xBD42DjvjawWQFvL8lzXmuGI4hbSkn2o6tkfOQ0fyQneSPF7M4-TfRQPHr0ykGL-8pjEcTY9OMenw8kQVAMPxrYFcOxfegs25VqjLnZe0-lQelF2a4IcXhJC4DrWZ_JuUGDXhHEo0_2vEcoZhETgtF577oX6-gLkteIxu6Hxk81Ks","tags":[],"target":"","quantity":"0","data":"bGV0cyBzZWUgdGhpcyB3b3Jr","reward":"0","signature":"T4FN6FEA1ZbHMj953sfP8KGzioj-AZgrZcZxUsM1rnVkFTCbz-wk4d-gq0noInTsgdToiI5pVJlcY_lYoUKV3UxStLENo42nzNQxIqsOdXulEvsaC1c40ilDjep647OCZrJSO9VaL6xu6xhMhE7L2Mdit8UKuOFDCu73j8pwW8COWGBzLDisqApwSgUfgoJerFRzzgdJVH7Cqyt8yeYYD-erpdl6Q1FhE0UgHFZ99y7MCpaOHlHKS2vkNf8iuII2azMNLueY8M2_0EanzkuLcp0gd6dLTM9fXImpu3JHmiFRk4UQfZicpnywFHnghBq40k2gaMvkdM1h5rAIINo_kJjQ45tW5IJiBUBJBYlHERJXaI003W6wY6KLDUiOJtt3MRVJXjHV2krtBR5xj2jkWnuHu9uqM3L3zxfCDyVEqEBizRfphmRXgCPOMJ-dSdqB535hpyerpNnTlFvAKbNd5w_a0IBV83fPwn5H47gOriygwkFUKc14zfluagYJIS5kbMaXD3yoBdonuRiL6lAyr7JfONnV7jj9zHOuiAA3xAXGzjYZJ9beg80zRM2NBLPlZ5fDqUHg23IRDvbbkWKPzyRkc0Wwc6YtQZPkYLNLuP1Nn428A2tVbSQ-6ZcnVMcm3uYt9FZddmk0Ag20AhsRhNJsy0DW9TwwQzc28NsC6Cg"} ================================================ FILE: genesis_data/genesis_txs/NPLj86idALmTczSq2vrZdTs0bjI-e-KI0j3EOWWpu54.json ================================================ {"id":"NPLj86idALmTczSq2vrZdTs0bjI-e-KI0j3EOWWpu54","last_tx":"","owner":"t2J3Ly7R0-NmXcOnS0wb2UvwYZS8LN_U51VK6ifADLDHlhyPW19LBncbv4DLhQ0ybhU6xaY_wlXm2KWcjYMUtHPPn3nHsgQ0T8Lm0PdKvBPVmy_HL7g4YYsh7T2FP0rGdcwtg4583g0fCHxg4OgmzpEwEBngcKJT-LTDDPyOvkL-f-1htgqgGD6x4Y-bgDTsR4e3Kf4MpJvNEiqNUZG8pJVULdEjDf6nFeevbUXwXX5Av-ybEnwbIALrSXZvc4HmVNNXyTenMgB6t9WmdDVzIsaVm30rnuPSP59C4YOGkYtJP_nquoesailMy2qRuJw5HGIXo912KL74Ka8_uKoQIfP9M8nfu_HvtttgQJxqkXhCMBoKdQjiimSfmpjjKzxIWvrjUCrbNA8BvNFn6CmggmDfjSBWcAqfI-lpWqsI6J2Nz9UN9iOuK1m5Zj10MUam6POmzo5LvIgSpzQKBE84-mBYw-F3J21RJN7ywRFt_ky-3J_GvPM9CLHOx-5P_VvGoh1XFjRqw94J0X3JlmxqVXLMdL_Hvce3axI8NNM1ptufduWULd9oVKGx7frn-1fjFnrnliELQONf65wfjMrH6zRpj4m-ACODjA2FXP4TFo8R1lDgs1Nqa1Beh0NOHIikqOWXTId0TqdH-s_JMaZ04SXqAf9vr7e77YMX_QhFrgE","tags":[],"target":"","quantity":"0","data":"R29vZCBMdWNr","reward":"0","signature":"MH_8fPnTSXZbyve2iqhSsv65n6JGTIAyhWt-cs6rb8zbpDLHbNlvoUYWvkmEtQ2sc-_zuGF_v6Sba6wTpVLgL5QAiNfRmj4wRtaJhk2ro1cNfqTH4YNeCVOnHyhNbQhMKV12SoOeDaPWspALWE7_D906QM7opinnWqe6J_p3xv2dDHi8SJaDo6wjKwZWOR5H-me84cqV05xS621PJfQ-8yBvJzrhcuWlndhuvjpn1KzOrm895nf62xJbgOsFaPu3eg8aV-dLFFwocDtJFW2q5jZUEfK1pso8i3QmYsfN5vgYDeYtQ8wtwU1b6I-AuWn5fZnyUs-T9ns4xjoJ6CDdrQBZCFcKhYgy2RGM1EkNh8xlbGDSYrz4bC3QUQbPZSx8CyHFH11sKd_eQogoLtBXdozPLO98XnXCg5u2LDLJAIV3YDisZeSPYgVEAAVrCd_YLt_mO3vXkamGyNmd7pCKTsSqQxMbTpXPK9NLBYlQ09Nr14VHkMVGowPgem7KQDnq4LsTXD6snb_KqLgxYNv9kKgKAXwrM7z67si34o43IE033cfPPe8e2c-iC4lTJBSHCenhTDoJku3l6VccL6WyH9yXcXAiVdtmK_7UKKSUxkef0MgXuShncjaGOeFMHEbv9Ot1UhWGNdVjG8iRWpfdD6w5Wb-Izm_M_9_fDQF4qyM"} ================================================ FILE: genesis_data/genesis_txs/NptjIrqZrQMSdLbXAGyQCr8audCzArV3EofsjRCqrQw.json ================================================ {"id":"NptjIrqZrQMSdLbXAGyQCr8audCzArV3EofsjRCqrQw","last_tx":"","owner":"4ckASt73h9vv6AtmEoOeUK37uO9lTeTwRLxmCXAsseyFxVo8wHBQzuibdR-7aETHy0i9tcSBqaEgI6b0esqO1j_OkKPw2jTLS6Jv23RPxXwe06HWPUjI255k4vS5X1LzzmXUae2F0Vr-C5Ku2eW8CJGkbmucwnzwNSWbMdp9tpJ1wHXeauHgjNmuleiJbyrw5rUlC7YJuh_Giu7of9lDs03VPX4-JUa-Uh3nb06LfkAGbw9sfYcmOz0CAiTaTVebYiLEcbar9fADGFwwon1oNXMWN1iaJYABLK685MZIv_-5IpfFRUVdwaAsr2xk-hVdPzXJVV6t6VB5foN4iWlQHp-wy0YT-8owYFOGYEH6-TaTCGHtJPqpl81jhIZ2e1Re0v_UliinwEJ9coIRqF_MNpblC2SG6RjCmnTNJD39ltX-gCYHS6UgKPeZbHyl6ao9qtfCkHXOwqkl2k63OIzAkDSNwV95Il7WoGuV-_HZt7LgqqIlVtogVexSYggrSV0w9pJAdjyFTXom7jxyMx5ro8If3hahAxnGPhOiNSCB9iO3gk1MeeFEZdSbwKQLuuzTXgCM4DizO3dPmw6pN5s8krwkzwoJRkaEnWv2Q3jHJOuxkBE4Psu6eukbWu0ITv7tA6nkAz60TpdOnYoE315xq8zdBYNt4ntLvmBCF6WguLE","tags":[],"target":"","quantity":"0","data":"ZGlnYW1tYTg4OSB3YXMgaGVyZS4uLg","reward":"0","signature":"VITdE_nP8aLCjNb37B94_9pQbU7EFs9U4jrdGoBOHju66DUvZEqj308sM6Mqo_YI8KU8XXo8Znqiw5blzlkCax-qusafA_rUG-YBwuVf043oPezNLIaVWEFc7pZ6bgZlf2deIJ8ywd-HY3kulZM2JzQeCUu2a2ppd2Ra8BCGbn8ALf1LD7XuambpntFSnnrbjGm_bYQy3wBCfEfsMw3ROqA3-xKgYheIBklue2NW5uAOjIQZbbDZSFGLDFU-4VqYPZUnp0N_dBXkzkIurHD1uNJ_zm0IVG5UOGJNYxOdPqy1TOqxXr2uJ-w8M_fEko8j_19uCo-VmWh9gaPA0O9gmLiirQPYEycm_PI5n2k4ssO5WQdyrxuA0J6R8gQtpIVdMkXY6m5A-7H6P9Q4lQSSzUB5_WaVUNtE_IjMG0iVSJETPFKXF0Gs9HRsDUHPYHlpVDCEyCiQ16XL_uzRjCduKjOVq_HdymvT12IsmZklkalt3Z_ewNCWNMmf-7rjzifmp2vw8S2CqyhTzzTW1YOUwIxQ6CG3w-hlidMjZYZHwATGqwZomBHoOl42_02_-cvBudAcZxBN-yEhxC9Ktct6bbmf7cs9B6ZovI7T-SwTRWJWo-iKNBSZhJS4QBxu07_FrPPRlyhvzoPAKBv62Sq1H3VdQVSQ5_kJPVlnfl806gc"} ================================================ FILE: genesis_data/genesis_txs/NvGRQrdis2HV22enpSpPqsb0M8s-pN_nl7eJtalZyC4.json ================================================ {"id":"NvGRQrdis2HV22enpSpPqsb0M8s-pN_nl7eJtalZyC4","last_tx":"","owner":"yUp3Y634J0FKmhhBD8tMJ8pYCuJpere07JaHXF0200pfsSgpkgMVlmHAskCO4Niiztu6J0AANXsaAciHQgqTihnhU6QBwNfKWf5Nos3WWGmH33Bt6vVVODihWSS9kwg-Bdcepu1HaqjptPVvZn8DnqxLZc_vZOW9lrlRriKKUoJiXpZ3_mvjqZmnuRejQfVxnxNUSzfx55DNGoCtbqkOdoVzAGyK0ATVnmH_goDnW7t4oePw6XgDtK63YJS9co_lrSeursJRo1Q3JSWdCuFxdDVxtbOSz4-7myFVG8WqcQYDaQaF-utG5rbBHUvrlxxrEy3FdtotiZQPXhwIee1mxGrYNThAsTpoS_FeW-AFo8edVGxncada68jWwHIiHi9iD5tHX461pYOawz3F-bScK6gzYXi1Rov9JYDhLMHZ94vDhbh5twqv-FF7SrGTu2Hbe8qFSSPZXh19MSJSndDf9Yu6O-mPIK7x2qeZYhop9YSGrjxZou3P_XDC3veP-URZD-543DaD_uHbbTl1g2mW8_WwmLteRsqNhEBUx331a7HqnBs0DVqK2tcRH-gRAVpuLgYWD-MLwKo_vJMDPFxekkPXDxfmP0S55t9_VCQQywiU2rzcHbznhQACm9qrd_GI2c-evL_QpxDLPzg-XxgLcK-E9WCRLG1jHAq9hz3eCbk","tags":[],"target":"","quantity":"0","data":"ZW1pbmVtIGluZGlhbiBmYW4h","reward":"0","signature":"rYKBBWZ02OG3NGd_a0R8yO0WuC-5IoThV_Mf1ovP_Ig8g00A4LcTrcynwwIHklsIxMQRN0jX3XBKptK4J5Xz0Xe9PxSmbz5nDqm6wGh3KzopCKgAoozefJtOKE_1RNN-7Hayvph-nLlWurPihdwTbhnED3WuQaOMmfRzACJsFv9JPF3FIuaTmNOrwswQmWNoYVgyQ79hTIfXSK0FVHkpRw8juuIRwMcJSmXHsW4OatQa5aQVafCFF2-Cu7p_xKsxqDSYWzQVKo4rGWE02NTKILya5MyznUog_AEAyzRgCSPiC73g70OZYtQdSeDSYyD96exhP0ftf6dsa2PK_VsIwxRCkTHKiUlc0OYXyCCGlYi_MSRVp0xYpboNliYjJ7YGix1k41g3wO2Sn4eNUKdG22gIpA4Ycl1eXFRib-nsmcSE2Jltm3ytRWNokwMzrJPyceQ-pX6e2OkuO948mMmM5G6rgXtacs2B9gX7SHdJEK_BcbwRzYTWfI-GEr2rOGdXs1pR6VkNO888kbntpg0oqJ4ep6HXAHmr9iSVgdpiNKICpRX0d0wVi61w4Z2wD2Z2xZbdck94UYc4R-XTI1m9II2OofKYheT0PJ3wIb-pvpqkwHdgCV0BaKk9fZ8gmTxFF9EPY9jwGcyTkywsj8NZDfrWQTEpBTnWskpEKPv-C7M"} ================================================ FILE: genesis_data/genesis_txs/O6qlkPRgr7H3WLHjVov-CTm-q66Q4TuvhP6GC-c5ZjY.json ================================================ {"id":"O6qlkPRgr7H3WLHjVov-CTm-q66Q4TuvhP6GC-c5ZjY","last_tx":"","owner":"1DpCMLIndupNtMa2SR1jm_QivewdCWZywsOEmn_Z7mj1gIzygChDXUpcwJF0oEHZJkEvzoy2Q3wg6nmz_ogbsYnL4zFdO9_32x6K2pqECVwlrmP9v0XDgHrcLhCNlo0ddlmxMcibdYW-DTa2HPHtUT0JwvBTEoWtHY7zECdxgSf-Tk6PAMrKW_Rb8FcVlO5--yqzNaN7FNLT5HD3lnqa2Y1ULFb4uv-WG6jqRAK0b-3Ou6GUBajWOHPpyBiSwP6EaHA7v4wg-jhFNPRamgCJB97lMgGFuEKdZtKXhMIPTAQfE2qPu9a2AM0yw_CcwHziEkF-asuc2L7T6CkDizI4rdHynJAsl4hBpiudq3tVG-G_fWANd5tjrP4r8O9e7G4UqwF77ZLEz-hsBTSIszBtr2ysEHDha0Am33eLV7afHKlCljG57X1iQm43WjOLCQeXSjK7Y3dA_QV0ke7HHJEyxRqqcv5gvvMF91gJ2_Q-s3vO4IFak-ZSxW98pAWcivL9qcZbgdnPpUsUYkFW9SvHNT6GPddF5sZO5_2WDPZ31NmJXNUStGsnUGFfQ5_4l8rXAeXrrV1Y3v_WJD0-vMByv1_7P4vYsrU7jd4vdS-WeTjY-SLei-M4b7CSfp1zTXHgk18yCqC4dSs1lkVDBxqhkpCpFAOGppK2PV_OTlWM2y8","tags":[],"target":"","quantity":"0","data":"dGVzdA","reward":"0","signature":"dtI1y7oNyS9nF9w4TDeN7FTppnNyHQ134Jdp0YG0LTbAkuY2-TOdvNKNFU3YmUzWPwha1te7QtoJHv36toipEzoUP9Cc997oULvPyOF-MClpygdPHhipxAoJFh8q5LSVT3hwDUXVQshKw685BM9n9ED_8gWuJ4fgxY-6hiG14Pyi_qXz3DcRxGoUzp668C5OhYYBUi9iasakknkOWhfoolMURi0FwwbRnlywno0y6-XHVLh89FToQ0DH4H2_osRoLJxH7-5B4A2EnR3utWJfeaRpqUSZJNd-oB012t-bQ3LCVe0Mg_kHEyPIoEmPpUpM36jjd2bvfEMpsv6n5P5Xy41hcFdpV_KWJgKsGHXsbkMnwH9mreznVEECuzibWOAnkfAASPAmFNLMCGNcpDoVyswaUsk2BTYVtldL7gfdHOs4xpgGZiQLhpuicTo9e5tTLV3v95YF0SGNw_befcwlm1rNHuzIYHhsb1z_q9ly-nQzoE3D_dHABqqOMHND16Y265-MKdesTqkD1tdOk2tFibVnpbY3hD_TFDOtkc-ZDEhy5uLHKC2dYzRGrRyLORs887jIuEqJIZJkPD7Z9EAwPVspg75qNHuAJLbsIM1G8SJEHGjGQov2x8zJ741RTIoQIJ-g7WzaDw-c9Iw4dWjhj7icR8uyAcyEIg6nIYEdwgg"} ================================================ FILE: genesis_data/genesis_txs/OILhne7UcvACtB4peA4osAjRMthaZZSW9OWhe3NpLBw.json ================================================ {"id":"OILhne7UcvACtB4peA4osAjRMthaZZSW9OWhe3NpLBw","last_tx":"","owner":"7wiW-nxbsbOZ0z8WAT4JgJa1qVPH7vBPCbbrRzjoQf2JXWHvHpTgPbMhuBxRJgQLUmrtKDZiAuO1RcT2fTP_0sP_PiYXhTx30xUlBXdbPOkNqp8PUsl1xa0aM8pYlVRQSQmoTxc3STogpvpeq_Do01v_OOFqiSjb6gq_hJJCNFzAr3eqbucbiShLAVS7Bye5S09pXYvkLpd0lb5fb2tufj4pbhtA4vTWGJOrH74Kl_76y0etguVX14oD6KooBtHDFGPeVb_QHEU6YfdJZXCTyOTMPN-0JzW2WtHy04OzDmLIyzbNYEjCxr5521Aqzm-vI-937WmrfHYm5Le0upxNTCrnSnLMm3Zo7U3Hyj4RgCuWJ5oSouwucvn9vfiOku3KkC8DKTrk3U80vE_YIlE8PnXrAAO0_-MnFQdcEyUegJr0OskD_xpug-SPOiOkvY4sNDW8qEc3B7--DV7gWNfqCu_Yb5sCwTFQzQur_uAy6uhd3zJHpbw2PIUSASJUvjTjj70TMmQQwZGcXEK6RyF92aj20YR5SAex70uqks_SQ9gwC4aqGdfkvQVDVMCrtmNIFMxtKbthuC_wa758Q-jftLwe231RoiDhkH9nsTPMQwwL3Ly03R1YMsgqdKj_BhGmBq7haeaVfEe8vcscygboiREY9A80VtRhG59y4jd62BE","tags":[],"target":"","quantity":"0","data":"b3IgZGVmZWF0ZWQ","reward":"0","signature":"3hZGiaw1HD5ly-u8bPDuKopl_B-YOUeu_7_jJjKeQEK_Opjj1ACcZOFQt6VcCT6x3-WKU6Kegbw1esxCQ2OPVL5swNgqruQfV9dGMk1V8wvl6mXcpBtbPlD2BoKB2TI9wd4ESQueEOJP-gHrfMoJXoWIGd7W4IubU8v_3Ox2npe8wrtiP8mvA0-moRo6W0ulMPgdaGJbKu6erLG9vFjKdpBOrHUZp-1FgxF3hLltlWzywhvad9Tmhbgk59WvivOS3piWT-8isWmOYjArXgenrMqWZiqIKxsv4FBekfPZY4z3Twb07r2QKqCIXEzg0P9iM0VzsLqrvMMps5ECR5dT426sFKIKWF-87CD-EW-n4JBFd21kb8BYpMoMCo8pZJMsE2G0lE1hSRrKpI7MHiBV4Sdb_6YLAOmFhfI_J1UX0GAIBrztH7YiRQwQQTsGaU67o3IL6RseKepdu53k8UBzvzEHbBgCjmgSeZbhGildhVkqjHw5QcnTwGxVJ0-BG6du4F_z54zSwE1bLwCQL3JQNPE2eHDvW5YIIX0coIC0--LeOFgO_cs9QYXgd_cCmWKMWr7VoiVWk_e37KcZgWVpG7wf6bFVwMh29Befs1n99pCAdJXPyE72-i-Kf7d7J7Y7KQYSWLXA4FAJgoQ8RlTBTwCFAXPFM6bu3uteVxsQKjU"} ================================================ FILE: genesis_data/genesis_txs/OIOqGvvuafD_5J9QzfxyPiNlnqzIcL96i6u4PTUeDmA.json ================================================ {"id":"OIOqGvvuafD_5J9QzfxyPiNlnqzIcL96i6u4PTUeDmA","last_tx":"","owner":"xkLDDrJrNjPpf1YJv5TEWC7PQNvdLvMOFMTLEtoFSRWgnfwhOA1HB1OKmHPTFYyNmRMnZBYfqJWFuMqpMJzQ6HdV2DIzOMBFQMKJTBKU8LALFDCkPACEtkD88mn1P9voVwzhYq-Wkzlk3KlKdv7n29CcPfIsWh11Q5g44CHeJt10z7u2xGgG81HPbK4inlXh5fkpqbass_SL94ALMHiU2AehP5RaC48UX3k1F8DUl-KNmvBOYXFvShHrUrtjriv_PU-ejglKvkNhMe2p9kwWCT7yMTwK7ggeiHs18F9dSXDoPkd_I1gyxfX2dZ_XLGJkjia1pHTV3ZlVzHsAX_RFX-oUf5i0jJhMy-Y65hkzvjc94cJCs3AGhrZuvC0m1JnfRgNjom2ers_-nYrjxbTcl2Fipf5cjcZHQYSgqE6k1UMM7DjJEbZ7WeyEH_fbhFCgx4mxhjUcNXqn3JKo_oik-uD8i4cdLFfrF9iZQZ8GZ3hqf2ZegXojN9XKRWo1qHewJ8o0mWH3uEfY_jBNCAR8Kpq9lMgDBcOVWGGRinysWmw9bVodrrHtuG53qSTs1dWmdHicH-8IVO9TK0sXrGiyW8w3hM4A7fjE-M9PdwHpY4SUmkRfP-v9vDpbUECn2Ii1Nl0FSoR1_WH2AowxyxQ6tDn_USCV17o7AcbuZbo3Pm0","tags":[],"target":"","quantity":"0","data":"SSBsaWtlIC4gQSBsb3QuIENoZWVycw","reward":"0","signature":"AANo0kCrxMyXCb5jqCh-i0Ju6DzsxJDLt6vap_w-JZZOlCfRB46HeUTchGJoJ48IiZlAWxouabLBoH0FGouhpAt2SYac7eLw82JyIEThdlPqtWe3hymbH5-EAUiTEBWrRQd4_8Z7QN5gV7aM6PYjD1h4R6oOXv0A6vRXg4xGQWWwUND-omdYDn53PnOgqkuGDZbfItquhNicG9CqCP8-hTS47FpxbgIgtkuoj64bxnviSq7n5IaeJ8frjFg05okdiGKrWqj6CqPSI8Jd07ScrbUIwzYT_3b3RXG9nSCCE70H8T7dQBnhDUC5E6cfDZu5Hjt4wmNqQsuz5TaHrE9L_jDOYscAOoG76V5ImluPMGy2GXJhsGI06NqitXbj-30QbvIx-56iosG5tayRtmJMRSmbctMfLUtizMOOcM5TXlOSOV69lGzkkFordYpOs7KU2OWsxjnc8pqhoQzlEpUa8OoBju4-jfH4Rvk1dgiP1dbc14ypX_dfx7wluB6rd1TrhQUSuzjnpvGfoNxlL8pyQT0OBzIFKVIz4GcbuzxjqXpldpyJol_o9txgLHFNeCgFlSdqZmqtvYe6haMK6MUBHinwj5ImnAFnNw1KzcB0jn22ySRfpYuhQki_Ax4ZygHEVSAvoce60wESJwGiZ2D4vSwkeWMQpidL6b69WL_jQKs"} ================================================ FILE: genesis_data/genesis_txs/OaumRLT8oE6J8gqrQ9DrY_grMuSfWtai95VnqrX24hs.json ================================================ {"id":"OaumRLT8oE6J8gqrQ9DrY_grMuSfWtai95VnqrX24hs","last_tx":"","owner":"uQ0O38J8Xz5fwjrWJw3wZe8sfitb3LON2fnToY0Fj2b4LSoN5cokVyUOoYJ68cy0qVfwUX_qEO_xMZ0KEjHn45Y_8VlnA_hp-9H4F5efHiRgIXlpO8XUEUMQEFOqthxgSQXHpFTkspGSxsXWldlxQsndx23Sbn_bialwLsigXLe7If5RIaGohghCu2Ej_YAC-JRqv20TwMPZbG5_4yfS3Q1DlRJvLRHICTWYD-a4qSqHFRhujG2Sd7CZ4q2YXlUYHk3gyvAyhulF_powKUiGNYK2lcK78RXnvcw2Iqj-YUp5A1XOzo4eltzp6fHohZxLpuLgv4gYVPuu9uQa8itI7WNwB20xXMRdP6NQsGxfMZVXi52q6pmKSfG5wtEDuoKUSpidOI6kVtpg-i7mVx8IrzHuKLEV9TtydoLAKXfY4FwvXeXroSd1ImHU-g4CNuh1acniPc33eqMGtIEji0-7BnWK12-iTLPFIBBu9eUIEn9blxZ-3SwnwJNY6ARD4-YA_U79249UHyCvw4xbXsnxyuy9FkXJO6ImumAmVib-8LW3b-jQ-kgU70ioZpBL2D_nB_MqKN9GmaIjOuTThixDXJzJNBXuPs-CK7mrCtc6f5z0qzCq9mOoHUdxtE1hvSf4gxOdsKxGltWav1K2M3KwLJXD3WwEBJS6alqi8c1VVq8","tags":[],"target":"","quantity":"0","data":"QmVuZWRpa3RlIEJTIC0gamVnIGVsc2tlciBkZWc8Mw","reward":"0","signature":"QU-0tcYJETy1hq1MV_J3IZoSKV_gfCIytclzcbT_6YLbjj25KKc6I69rvx1yAgXu5ulXqgNrBBZVc8Yc2D7Qci-pcETjOTTfu3IC9sKq06x8_rdzyVlvqLYdbdEBi4uW1PmrXNjOZRnRAxPqjs1dUCI5fYSdISg5EiyMeSmdnb77Xc1MAiXjfeHX25r5pHm6AIs9T31LNG-99KTSjYn3iIJWDkmyo-ntt-oT2K43m4wWk2bpjWOkRVr3NsBHIK2ydkMXz0MM83Vy8cZl71QqBa2IwAMukiuqtd8dJyUQb3SA8t6OSPGIr9GiCCmGRqZ9qjBQiWeq2AlrQc6v7hlVwv-TDbZeThMHjNQzWiQsl6Q_SYepEBvupHhS2gNv87dIRD2PZ3tZEfvYsNJkJvZqgig4nf9xN1luXx-W56nMUKq7aaxzw4YQLeZoecXCvwMvX17MEbORAELW0Xh47pq8f0DMSMxjNMo_df7wHAa4vf6OQT7FauVHPQ3Wo2y5j1akHy0RpV41tKmm8jBb3j_J1e9j0Tgt5Es2lmxXnzuOMfzXTiThPIBZQmtZiBg3DjT23GJKJEhZI97QGZmpNLYJ2UTSbZIxF2ZFRu3MXvhYA3NKzfa0fL8AGqZ3qokY_XXCW_SeyJe6G6FVU8lKiSb0RCSnZ1Xd_AGFvgBejDo5Wwc"} ================================================ FILE: genesis_data/genesis_txs/Osgzf9EDK9j7TMlqSJ_5Y1rzZgOA6qfR7ktiakLPk4A.json ================================================ {"id":"Osgzf9EDK9j7TMlqSJ_5Y1rzZgOA6qfR7ktiakLPk4A","last_tx":"","owner":"tGc2zDnhsaSVnE01LgR9CY0uT_iXmQt7ojbwE22x49bFV_af49gW_RDwVJ6YTrh_4ythN4R3w5Q7_F3-RarxGsbe1Lof_qdVsujUDqgwE_WaH-91xfJvvIlPCwTzTuZbKvV9ijfWMJA1DT2-giJcgX6pwLkQkeTJL0MelC5nDemsOGdzph7ot2fnEosQG3RF0JmKQVrIca603AP-H9jeSufbbtD3g5VLmrLCEE9zpwhePc2Tr8KkHzKOWBsE0hqpxDfu1O5vec1rQGPiUFUMHWsIwxMB1U93LxDTJS4bZlk22Cmb9QoFiesk94o2EyXF63qG101feUvpEeXOAByih_Y8Sufdo1BrZL_xaEUCRBiBNKBs1CFFV0shkTtjspOWvzK169xU9l10egKfF5gM8LQRDmWDkWej1Y5M6W0s23ynF_BAORwiH_rNAzIs5Zzh3sBraIyMjjMO_l52CHQ2YFkRwJRNMe_vGQfenKddszL-oNcTskM1nkqD76oBNj8dwx5dnnD0oDUn6Zms7fEG2Y4o5Y-3CMCcN2FeO8LDziAntiSCY9XR82Auz0OqMR7pz7vVSxaAEa1KoEfnzqfG83FpNJrrsHMEuo3PN_z4cJSip5Ih7H4A-NsQ1Y_XsNnJX8_y5bNlmJEom8vXC5FtefVwUxK1fAXttaJ0ffEcbSU","tags":[],"target":"","quantity":"0","data":"Qmxlc3NlZCBhcmUgdGhlIG1lZWs","reward":"0","signature":"WJ-T8oeJ4Trmu85d1sCxP0hY2mvGj7o5lzRvewrDI7qMOoQS7r4Z-L9NmheIcIq4732cVg_0XWI1UOaVbXunnSWcAiwQW-Y5rHzkMuAgmUXqWaEUkBaPiGaGWMnHB3Yz4UGY6NdIBsItBbXIjkS4r8kUmAq8Wok1FR7L73bu3YEf87RRTPLqVXCg-oYxpopESE4oK53gUL0ywApEqnyAT-aeqpL0OO48qqO8d0kh8Oi8Ll_Jl6cYga_wrLSLWSZgGDjcZncVy0Cf9-Dskw8AZKodGuTveErLGt0ezruMhY1K0WWvApphP_pj67cQOGLsaqSgiVGqgVXsE5_T55Mt5wlDauJZgMFdEkctkQiNSFFgSqF6QrwX6kNmgjHNcostLve-U6G2HV94nZFFTAiBwk9kGojvBJ3qZLoRJzsBvDQEhI1ixgRhiD1Kxe5Hrjkk1TJ_j7Bs8gcgOyyVhr3TEaeQXrwL6CFOFMVQGwyVO4WR-p50daFWi3A5z2_mWuklRAHN3ZKEhRPMpo8p47RMETtYIa4RDj0e6J6Pgv-Fvvx3-prYdYqvyKV0sL2UrIIN5x1VSJH8cDqwfsS8B00IX-9TjndrJUDqnozFkCBBRzSJxoZpgDF6ayCvcDzPRl0X9GV-6UAx13eN9Afx601qLdwTzDIuOIBA0HDx9JVeucQ"} ================================================ FILE: genesis_data/genesis_txs/P_pvvzlCIX7Yaiuv6zt1voLcn69gb9jAHPRhHaHjLng.json ================================================ {"id":"P_pvvzlCIX7Yaiuv6zt1voLcn69gb9jAHPRhHaHjLng","last_tx":"","owner":"qx21z7mgGCuKfCtUmErijrPCG3TTBu2uVUiW7r948DgPpCvek_PzJHP0bYYoz4CWooDMru6ScrTspFWk3kXDjCUUd2m-dT4hR0iqyigfWaJm8GlpyrUTKTH0p66BnQgul1DWhJePB7i-DyBHTJOd4dxdDRBD35SZRyWNJJwjhecE9DRt_zfxenMv8eXG-CkyQeMBnk_5xq2zRHCS4KPJNaTZzirohvFnxdv93WXj5X0p2FoaUOK6kPKQHXpArfgymJenHcYQHO2-OTDPO2ioKzT84SSfAbUVtB5IlghmFu3YkwUNLtgEq6WbuV_B9VoEyTMEV46HFa7TrOUsb6kv11Dn2qZRRUuZzKPeFOTCjZcHH8cUo-ZfJwmzHW16JnxB3gdgHQEl5E26Hw3D97GC6fHS6naKC6-b_Yjn-aqTocWT6D18PQ-bXpiuyrdt2yW7ONfrso60UG3npWJtTcIvWBTH_fBSqClhm_bh4B_pb3EkwlFFQFLGeXsS_srr_s4X629rtNOXTGxR0bS1-PUq6FZdEnHXgVVx58lGZynLzkweloczs4Tsv6PN-naK4mCrCQBvU4RrXFdeWjTFqxrx4JMPD40wFvw-Vqwdu0gJtLBlXvAlgKp16BnVQAH5Lvfe7Dh8kxeRREAFHhu9nVurnx4sl4_9HC-y6ZIxQwh9joc","tags":[],"target":"","quantity":"0","data":"V2UnbGwgc2VlIHdoYXQgaGFwcGVucw","reward":"0","signature":"UuqufsybZgIpvF_IPRZ7Yz4meyjyq86pD_nB6NZkzHQmGeJFYbScQb_hQt8VAGsIR-A3n6HeBHCjEq4EoW_vZTpdjuNH2yaZ-8i-hWijTnY3oiu_mYnDcvfmYFJhz3-yZrJomagemy99SZ4kUVx5B0N-rqkiGcdSOR-j05kYYCS3gChxRyDiZCR9Y-lSRaoA2ePEY5XjUtyw93OVe2yyy1-N1IP-dzHFlo_uVZ5Ef246J24mnvYayw69gP8xJ3LY1TeSxZCRcRvYn9IhC_vkzVWl1vg1leRjl7JcXOx-lotNvH8J4iGp5lGEorIgebRRBn5C9SlqvEXpBOIrmXMyGu9m2kEUmN_puGud4VrTteuRqusHaLp3OOoI24qOaBlBt8N8NJqg4eDbwL_u_XRTUeBPTIfFgdmJ3gpLudPOjvp3pSXmW2zjQBevAJfEMcNGSW9ldCCrGEoHr2DWRA0es57ALKBhLE_-u8ogRrmKXAUjCbsMSnrYIcnm_mht8BGP3A94XSQfO1ibF594hEtfC_-daXndHRSP0BgZmJtV7u4ehXlfiBpgwygml9MDfs7lu7TIfALuzlmcOD1B9_4jryyfqugzoP3JCgaYnAAkr2nRPoTfdqOeNKTy7p2FbvMwEFurdEy_X-HB5rs7Mzz4XqfpmYKQQR6QXjXb24_4Y5M"} ================================================ FILE: genesis_data/genesis_txs/PjeEg7GpKT8twlBkp8GHAsEqfMvmNd3RaAx-l0R_i2w.json ================================================ {"id":"PjeEg7GpKT8twlBkp8GHAsEqfMvmNd3RaAx-l0R_i2w","last_tx":"","owner":"rNRIDj-hqfbif3qSouqBtmm-e_aqaZy8Ecarg9nLSI9mLGBN7M_j2o8xHXvpRtJqojMxJz2CZ31oyPWJWzdZNqDXewhpgThefeb8Qpy3K0MaEGwJ9hTLv53LXDLYfAULEnSxZikgFn71gkwyL48-XoTlMhqaeIRhJk46c_w8t6gve17hQH3foODtFnofiwH-08Den_AzveYHsHAVDAaEMsyoNph_Ydfo_aohvUPo-gnuC2jWjxaSnRYjoVNOJjyQ9t7Uc58ys5dcSa347CHDUDjxlSm41c2LfCN44jlhHhpwYj0lF_rv27xerB-EjaSBUxcmnV4xGC5F0FhAEPhwitMARr7oSdrfp9iqlRY-Dtk3SJbYvl76ZnU5Cb4PDQ0b7o1JMUeqVbuGOxn1jPjH1wXIhk7Dda8eAS69N-taujdYrIzUMSrm2XNRRWSnfSPNuKWTDk2iLuKwva8oZDzE1RhHUuQ2d2RdL1nrs1cMy46Un-12UebJjUSGt8zqiM-74E6D7V69OhX9SapNb2Yk0fcTbWfX7YsQeJzN8FbXktMwGyGdbD5CU30mRC774EbLV7zSIIUdUcyqRdHe1P6Ierg3SPQahNQoGiWe3YYY_e8HUETx-ERPrhticBrDj6__699O-pIQ_4o6qrGpyKANt6CeqZmJ_was1Mupck029h0","tags":[],"target":"","quantity":"0","data":"aGlnaCByZXdhcmQu","reward":"0","signature":"QkpVTwXpJmZnEkWJF0rQjISC0RdKq8tt71ZVJOh2jDoHSFkAwCLlkFwrBMArcqhrsRXqICxGFgw48LNqgsecYUi8Aw0z8sNC3R1xI5QG3nMz_ad1ioOClfxBpKyaWHsAfpOEcLJGsL0bW-aqknRS68Q3SPeNTKojXQK_pFw3mQEeigwJJkIT5S5SdNEV906GvTEnL5OcvIpybgcSF_u-Fh2DCMIx8JUt8pki6E5ndr_sD54QYmr2Lf_0VCsbQbxLF73XgXUSs9oFW4bj1_lz_UUBIBKVWuALWfljpIhvoF1zOPSNeVpK-2dlkluabolH7G3PlC8xyLKoRMLFryByr52MhyjDjgOlZxXY5Wxo6SrjrlCo5_Y3ihYPcY8MGrwpJnb3rURyhTmdmhJzw4S2IBC-2Nr0XSx6SKas8ptzKaTJv8eAT2rMOhvRH4lsrwJx2TwpNxDypNFtiDoieZz2hkXzwTvYI3AvraNdMkWKe9XZQqzDdpRfgWCnvYBwECEqeZ3Z6BIdDrTASj4PePeBq9d5WU7gM8By2YE-JqPX1-MtfCB7UJ4oKbBZ1g5Sz2rLJubDkLKNeys87kLcyjj2AFxYHsieY_o_73xk6Uf1F9k7BqepzmrrAgWSQ8uOPxixgSSYChOTc1e-g61-ok9FsZZsUzl5I8V2fXlOguz87Mg"} ================================================ FILE: genesis_data/genesis_txs/PySb_0NIjROmsIgwz4kMwC9MVmeY1MwuKdil0WeUzxw.json ================================================ {"id":"PySb_0NIjROmsIgwz4kMwC9MVmeY1MwuKdil0WeUzxw","last_tx":"","owner":"4sPyS3wSmm3amE9f7zfmSP652detpvtXD87-jeTzz2ib5I8aJFBURGhdHE8QvXhawdY6VF36nm52dXgjBgJAGH9pg7h6MSUX-hLknQImvRdmeimg1Vhc6Ol2-o1QExYENwcRyIYEzVEemyHm4uPOWWrNuY9T6PSoX2sXjaW18ySCURQpXNJtuczYxdkGok4mOKbmuegg9wE2079UEjPEeeKwOafsNaNt7tTbaVNtBuC09EdSZ_UmqalQ1TF22-VN1jyIdJ5ncpjrj1weay-UlKEVabsFcvyJaMMfSnme7Qz1939ODpKL_gp4Y6jlAJNKVDT4GFYMTFttyHk4r0SQRtjTqO3Si5BYQo-9qe-rYTlD6lbOyEyBDi-fobf1mqeIc7oPUmJ1iM30mM5bYbu5lWrkv0QMb311hXz1l1FgDnpvBsFVChiksubRlNlWYT-a2lk8Gmt55AOPrdetSBzt5vYbo8m0ME1i6XSD5WuVpPq9jOA7P2gCwSR2i9Y5Ab7YPJ-Q_gjClMc5pvnwlRFVbCsZs_zgbvbPlgzFldz2aGuGFzmGHXW5GKUSR813BwdoG2WDecHmY89Yu9T5viHt2F7hkrd3XUNlZubFLSkpNCpfK6kGegAmxK5KxrCVZtzfsGdnrzp98LKg4q68Gv5TRE1LFsB6JDbj1744r_Ov-W8","tags":[],"target":"","quantity":"0","data":"R3JlYXQgSWRlYQ","reward":"0","signature":"CbBLAopxhPg28OVOkMKblyxhaxPwngtnbsjroaIKA6sxZqcDsjXwVXYWEmErtOVsHPd0z_BOLqXMOaAEBiMsGyadyZMIqqWYidOa1tJqchCUg02VRqJkBW5Fjab4iTdUtgFw_lFzH7RiLbzPuN4M96W5gdvRxGPMnBhsvPj6UW2JDUEr6vfB0fcYOC0FAwSeptcNPL8R862zVMhzXNyNwbOzrLoTrwcvYTuhjA6yFZ-lRfQFv3VlpIQXZydwln8ewQDrEFfaiSPYVj5bIgW7KdfvJZMO3JIRePJ2_ZqcMRDVa1dQjf20COQaJGUnfg7f54SQpU_-tc6L-4StrWINPP9PnJ8TZ7xPfxqn6BukkdP3dVCZTwo6qsjNGnBaecRvtvY0Zxtq4qVzeyFXe4IP8ngiPQEQue9BsZSX2D2kJveNGfq5lQg7z_jr7Nn5GndT8i5zXSRwuJHc7mouY9jZFdF3uZjwNnNdVroadbjV2g1dr6ec93gvhmDa7SfShwNIsOuWB8l6SZjhI31JuchIuf7mlKxcfJFUMBCq-k49yGAVWDlcO7UVK-oa50KBsHw38p_EQatlh-gBdL9EK5XQ5DETOWUlihytmVjDf6bl8f_7jGXsWIUMAySXzYWvmIE8-VMyoCgduVv6NTqLv4j7aXxDBaqFlF8pagC7C7hFk84"} ================================================ FILE: genesis_data/genesis_txs/QDBM2PowqCX0eUCKzgV-DgdzeDz5TXLKYS3HVXLyqoo.json ================================================ {"id":"QDBM2PowqCX0eUCKzgV-DgdzeDz5TXLKYS3HVXLyqoo","last_tx":"","owner":"sKFr_9mPdKkp4gZJldT_zPdpvoa-jhZlz9aBMnycurlpo2Ger7pi4YEnYZw65QPT235_Tkh2nRNfJRk22tOkLN8wTd50X8BWFT_z8XlFrf10BWe8qOnrqEeu1jjV2cY2wZzw_V71TbuE8HmI9l7EesHx0lXpSlRDkBTkGz1zzkV1jiKemOdKq4V6xbH14Xsz1wgPFLvHQAUfoA8o3T3rWk7Bl1U91F-R2NRl1w3bamOdrWAc3nuTz3bGDuH1_lbPOePUipQaHeZ88TdOnXdEkH4LbQT8l8I46TyTGL0PoGraDUpEkjkAORhJh4gqIlKuC0_KrOn8P7kojvJLoIzcgQv-KMNcnwYJ6u0cIyar31BWHfGCpspEa3coCMUzgH6hSpr2D9QEGHy27LtD_ZTzL0GvURfwppl7yyZYDP0vZT0-EjdUvSObq93aXRTBjZzK5hHhW4LLRqgg30DvC6LStl2i5t0S8VsVNGDeGEVZozc9_gS-8I49S-ECukKB8_foluyx8AeFpLhW0mdnUlkcQU532a0iNaQ2mNfLvRGS2WBv-oOm-kyuIScyYvAEU8O236yC-iQG6A7tuXguMHTOWeeDr2ragUPcsGH-TdAG6VMqPKHzUlkmb9r2Hde41TYhBH7WpVa75dg7XH7z3OIuQavj4i-no7KrCVY0gBPXO18","tags":[],"target":"","quantity":"0","data":"SWYgR29kIGlzIGZvciB1cw","reward":"0","signature":"NPjQIksTI-HA8loU7x_evW3-IQo3e6y1HvSEMFqB8gsHTI7o6t_Pdsi3t3xkPJJjMupGRu1MDlPzbkm7QeQmjWB7NKTTI8cy5vWSh3ZGQuluUEzhf3fFIqp4NeVcJSGXqa0SkY1w7dJMCjtWn-8KUaWlHmCdThASaUMvU2aXnFcNt-siPsPMBjJwhe5DcP7tc_eCpdAkyyqYBvfSxkt118ozzrMhNK6k4ZDVJ7UHux9_Q0c2fyIfSrTdazVgG7Pfpy2ujnMRal2LuoMRFjK0LS2SV8wKcPkA_KJf7FnSHLF-nhuLQJgBQA6ug4DGgDrkq6-lhZ0Wm4-bxNIevhyYbwgSUUwPUru0LoMNfi7RvuNDSTJApHMhtKmJ8CocpJ4VR-czo6T_cp5-Np3j-0osgF-BELbrgrxPX0-KrFx9aLhsdz6naLl9rvI9PZQ2qbvasmO-S3mKst7kVWk_HJeRu07x8uUwQbebSy8bvkILGQN8GxT1ewglvuPBmsHmkEAxxgU7pH-xCP5Tso4Ba-cDn5RdbID1fCRvkJMKRp8xDvsW-eWX6yBH3a4y50meN4Xhg841atwZ948POaLEYogB0TVyKRBNlx8_zTN7rv3Hh64BGsqmwIGaKLiVYGbd55qgl2up1fuYYH5915g2F1CVt1xXizagjnnWIAAlEyGynMo"} ================================================ FILE: genesis_data/genesis_txs/QDbVk-efwdVbHDGL1vZO3mQ3g65ol5RR-1wOvPLUkkE.json ================================================ {"id":"QDbVk-efwdVbHDGL1vZO3mQ3g65ol5RR-1wOvPLUkkE","last_tx":"","owner":"rfYGHsPMN14zQv7XOmivaVfCCYDbgRGKUZcjUNnQXo-QS69Sk4V5rQSHR8q4cTb-Seh7uPIa3yvqcxnTiROvMQLw1Q5cxiiJxnANyC6CGzLlKptbS-XkI2-WbGPUfQOtyNbsL-uRDpqhgSFAnrc6V09K9JM61Sw-Vl6uZ5iCezt6YkJnXkdMv2KP38gJWmUKFJmxqaHCyLSbW_ITrFvjtbSKAFA_uGImTMAQ8s_ggMxbNUnNgcDwmadcw4Zzxd93pNxwUvwfhKp6KK-jXM8o4hhuECs9AJB5aSBCjwgf0nBefcXEm5lWjHt0fqQ7OBHn6cKs-rOOIsQCK7LlMOTT0e4WJP13U36BiQN3ryHIgoam1bzUHK9h1-UsV7NkMOMTFvFnxmma7DtWhNaZGCE092Z38xK9KfyVPag9EbU5wignK-InhDdQvOx8HJUZmr8FyUVIY3xewxJ9cAwdXZO5udpEvf3B6xM9Bhg7mRUbJP9ZiEWiKUEDDlNRRCgjg1WgLEAVzwnornuAyyWm9voQqXnGX4RRG--Ae-ooKXMvEEWVC5PrSoPArI3EJ5LW__4xOGsMdlntOqBXHK5brzUoZjNDEpsmYGPex42jecLkvxPC_TgkMWxq1Ln4sEVtX49HYtwIAXgWaiJp9j8l0Eg05B2zAo1MzbQps37kexfCZjk","tags":[],"target":"","quantity":"0","data":"VGhpcyBpcyB0aGUgZnVja2luZyBmdXR1cmUu","reward":"0","signature":"SQI9Ae3MeN426Y22taZPwsTLkFr3weknZQ_hTZ0jrVdCI8XQ7l2fnyKLZKyybD19Qmf5yKUlsfS7dxDg5vIiMxBtw1Fif8tBjwniOlsWXS1j_x4ia2GyiYV39zUdDmTKgj8oqWqItCk1MW-Qn1owckWn0MqSG42I142DyxkimzAIZ289pai6B-GQqVJ_7M9SFhq34GUabrPKYwobQOJQHgvhznCuqv1F_F-Zxq3A2g76wr0jn8wM0jc2iwrK4ziEbvdNDtmfHCflqfDalYVIzvgjq4RDg41Br1E5CbJC8It9MvLojFTpOTVXoxnMLM5nQRXNbGdTEO4ClzgEJzt4IJir2DP61iWTVtGw2Qcr8ex2cxumq_MQJqowdN1rM7BpxhPwUYL_EZzbtsvQwSKnkIAJECO0jYRQHOQ60b3haGQk765Wm0W0VB4GJVbVWI1AWJOxxbF7uF9fczexyqnsbIe4HbGGQCRPMGPj61qDarVi1X6KYiHNU89WAZxoZ8k07tocpyMUlKenHa3h8Qe6n3HFFE81OlosDbybeJaTr8uso15ltFqklocRboEbh_qSigFurA3jl684dWBK0_UF9NJOgc_OKC9CCUn-zh8vr3jsLohRf5gasET5LcIlzV-QDQe-_Zxv4ZBKWt_XZHq1SRblw5I4fzzelkT3RzXPXkc"} ================================================ FILE: genesis_data/genesis_txs/QJlE99-614f6XzZ-7VctQjX9DYe5wnO21aHSgg1RhnA.json ================================================ {"id":"QJlE99-614f6XzZ-7VctQjX9DYe5wnO21aHSgg1RhnA","last_tx":"","owner":"54Cn7vK6n5KJN_jLKRzCjW5ifs0ArkCwITN9ucVT3j-uy_kx0Iqiso4EFiLOaJyGUCnPirPiAg2GiADISEaT_oYvosN5DBu1P0eN3kqf4j8NtQp4RxRfoxHdmHK1ksRkIA_Kj5pDLFsMwtRFeji15x7NrdEt-47jKKeK7fya2eUr4SU4y0FKC7vwWVzbrc8XhL8m_eJjBgPtDp933FqpxC2Zx4EPIVriqQoFt0iekdOuYE0J15NKN-X_eStWjPvSgxA9-K1zWT2daU5iHne2McBgLO3TbN5QHmev-FcLCDKC2SdCUNXZhi-lWGOhYFIvf3BEsNUagLKfBGQhnVuAZ2KCfc-ncpMBTVcHLzdqYT-V--hm3pXvQht5FnfqGpJ2fXh-fs3bZa28HUw7CgatV_NJp1zqH1aYQCwD71Ek9c_vgBmRGjwRdZKq3SHwZbP1uhp567sMwFASCVKpY2l7-2hsGESzfBXON9gjXNa81kLrJvlnpBX3vE6fRvJSyfr_mf8Y3rzP3FBS7LCOQh7kbGdLMJFn5SXStqx6_VDrMB_WSxo96UpARO_x7lAPtFtqF3k1liwPBvNe_ctsOAHv4ZauhwGThaSOtPCexcsw2juO4_Kigx_A-Pu8BFKnYT_deyAD3oG3OiAdHrM8U5TzV2-gxQaj1Cg_4qdM9xGavi0","tags":[],"target":"","quantity":"0","data":"TWF4aW1lIENvdXJnaWJldA","reward":"0","signature":"ncTQcboQLlW0u3O7qDWnKnQpCB0kelqCQuo5b9vozGt0FKF8ryssWIulc2TbBni1OL27Ny2jtgO7OiBtDKAkvNuZ94rtUaNBxIXLLv0PLYYFZQbPyarbID50ktTEPtsQrUoTORN-IH47bu3dubdQBuYyLIIl1hedup98a7f7fw0KJZcncm58CAeYWQMVI0VxWzWSaQePlqXPuZdCXPhH48fbCOw-DXb4zoJx69FBHVLLBCoK9pNmtPAqzGgyNrEfU8YJenLZt95PIq39j4vLZHzMyE_vGepMaPB4gBBVpOVgmlqcCB0REQjF9UusrXOeIsiICQRK7mEy8k_hwe0VzsIAYA5htWVpu9ESYnBsPXgXXEIeg5gIA2SG1uIKSVt2aIsUH6NlixKxw43enulf7ExEptouCK6cTkFW2ouvN5Sh3dWaQjnywEEp6syjC9uTIP0uUEb9a6MI9L0L1je2iZ0r9eIqRA1pUmWaIwi4ljXon6UISdK6KeRipPeBxkuUorjjHB8YwYS2MoHugnC0scfIWpWVjBZKOjrwDnvo5Xi-pGLIEp95fIYkh3ConjMEQWt1Lt7DjPwGcGP15OsKEMROO-u6VMEQiiqv6dDraFXGC99CL2-SYUXdqqqYMq308E-m9ufPurOiTMYooRjWXovwmp1JcBjnSZf6kD75cnY"} ================================================ FILE: genesis_data/genesis_txs/QR75we1zHW-qO7dsI932kXX0YrAIyuC2XIDRhfmK-fE.json ================================================ {"id":"QR75we1zHW-qO7dsI932kXX0YrAIyuC2XIDRhfmK-fE","last_tx":"","owner":"nAWqUDiELL9x9z5OipjQLsXx797gmEhhG18KdXoaTNp85sti3yThyWtsb-s6oSE4wgPVhO38XinXVOpIlTZWsA6Q7i7Zah931L_NTeIPBnYgz6OFLtBABJXIpR6kFr7lmF47PHxDd6HfCr46fIY5PvQWSObg5bEK3rJO80oKgUZxTiFfNSHRfQZaXT_uApt8fi4Hb29wmCXYOnZG5p6pDiYKnG3oKKIzZGJY9oE-pRg-lynnQF_2vOv_YOEQ2yxNg_977Yopua-pG_pR3AyXkP6-qOqZblg0pUAFrWjh4i_2vZuZ87ZtAs78bqwctKy1SW0fp47nlJq_MHYJXXRi2bmGrJRcv3-tfHDTAiR4x1U0HQ2W6bopUyB7kQtg3jGq3hzEFVfniTNCz5qpXdOPcOVI76BDWyKgN6ukTPDUdrUZ-sIVijD2L3XkT1ltHp6rxlYudx0YCsA3oE-yuvds987XnsIFBdWgoCg28ErPLrXKt7R_s9xbdOJIPqfJfi7cMEzie9_noKvaRTunWzOMpBKSI08HrJNA67V0fiwBrSuQgA4gxZ5sKESb8rRb9Xt5LKuTDvn7bXASvrpEJ5QRHczGAlVgyu7jNTaN-5zA7zEeUCftdAghK0b1kDBGID5aOvNkeVzvFbusoSyrtYZvNYTXhhpHVb5YSnQo11ZPEj0","tags":[],"target":"","quantity":"0","data":"SGkgSSdtIEphc29uIEJlaW5n","reward":"0","signature":"LUpyZGEFjl5Wl1aGc4joGFTWIhkoo6LCf1cMavVaikP8u73Zhq4WAg2Lf-2kI2O4BnuNeMzQK-gRZiGeCQBB5ofzV6KM3zQ7jOuY-NfSUrf_CRLb4GQTO7L2QMsJaUixDUXcCbes41u-F1GiJt3nOUkKOBOX4xt4O9W1nYCRyAidBXOImrugP350XdW_8uIXVX5Rnjdus329vVyC00Dy8CWxPI4ilOaoScpP4g_SDixw3w_kcLqNhtFMpKI_wYK6n2r5Fkwzdb1cGAXllomBHuLY18BqpX9Mg3gLrkoTbLEazCt3JgsgGDAEJpSfT4SjnK68uBe93dKfoDbKH6o4iIgIqDfIot1-ZmDs6XwqcUSX8LX4GdTVJ1v-YLwqgu_jtf4a9OLUVyLy332RbRAtTpDja7c1zzjb_I0PBVuwPTrNLOx1lpm-v5M9CRNRvOICnIKYDqG5l2uj-7aq6FzNtNR7s55_QxGfOdP--gw3YFwEdBkblffXnuJpBdDYIIyWS8jhG5iE6MdbKCrZ2cLG4lUozKgoXsCjZdXLi3f25CbtXe8D5zp_Zkr6kRX-OW8y66dscTJ0gO5hwdGxmzRq5sSoFsmoDohmTVUuSBQ4mdIMrJTHXyI3I1hpjCQLGYYnStUqQAZA6VE5hF5aGkOsB5WBYTwguNEbxMOBzq6W75c"} ================================================ FILE: genesis_data/genesis_txs/R0Mhun4e-WmLLGxnJq4SDTRqyNvTDTKC-uXuol1s63A.json ================================================ {"id":"R0Mhun4e-WmLLGxnJq4SDTRqyNvTDTKC-uXuol1s63A","last_tx":"","owner":"8QWLMurCUQXQHGRBvmvoYqY1cAxUl91i74uvQhnSdJ4vj0Kt0P3NhhNbLMtwW5dYjfHDRAzc3IhJh0vZDi6X8iiFky5E3FerJwDckUoeVqk630MyW161D1gMh1ZGywM1xxHytShCY9FZYyuUso-nftwka0rEGDCckAquXSTsdJBavzQC7ejRw0uxZmNRCbYD5ItvVr51xFcBq-Px4pXDWfcozI_Ej_j9y2F7dyksSqdu5Jd_CihjUdN5HxCWf4dgVXKliqoQ4eJ81Nqe8XMeJ4rO-sXk4hh8__nyAWDyk6NV8uB7gtfT3YQUgVNZTmPQ7sCF46uSmdW-Y-j8TmITvYBAyxHmjfIYs35rw9Aiyf8rrmDlUnJSfqjJK8Vol24d_T0dje79XaziSHzvyUccGJIISn8IfNzlIjsG4jvxayDqpGAWvDiJAlHumUdXcYvm_khpZ33SBcS9b2lVjTsX8Ke4X2EeRE82Q8T4iJwrP63a2b_meyTQpqFVowDwdH58x5bp6NZySGxT5nzSsQD5SOd_VykCapE9R1nR0fmYYkFUNYaTVLhz0J_c8-9GPyj_B3_L6jZCGBpyAuAsNjhq-Md638fMuEE1g7s14ruo5pSz6yoCX1aPeVKmI4wBSzQb1_OCWXv1Q4QYVPRd_zsCpnK6Or4-TUqV7E2PQbwGVnk","tags":[],"target":"","quantity":"0","data":"S2FybWFwYSBjaGVubm8","reward":"0","signature":"uD_EURuDBYTwyFbv6iRW90HSQITkfjTBoK6_U70awvfVTrbOTj4i6hiZZ2zce91YQkY4x4nOskMJugmY-dkr599odayCQJ6hu0mZlyQ2tPSfx1DrmyQURvsJOwS_3IdrZ9eq5Ho5Ly9V6nFyJrkMlEPcBoYChhv79ZubyQIhiAjAhMArPSq_wRavTpQ0hpBxnQh0KZwpIgZtxJEVDiG3NUNnCAI64pgOav0uou6l8fqKvm0V1SnTB4yu7klWRix9Lnx7eB_u2sly9m1uzvp4sqaWo39-RV18cuAzJ-W7BaXlidq29gHiK1N68GwNLQKnANoq4I2HpfETvSujXP75GRylAsxcNN3-bGg9hptjBfAUXuYtk-WPEACi3p9ziCqWVWUX1XTYbKFfRbkxwRmivrwO_ZxRv7ZSIIYWxdMCar57DBsa2Q5TjmH4GlKOmamSWi8Awydlnf5l5aiFa1nkN1avKxyaX-X3K3ZrRVrRpon1kKQhLMJZfkDe1qkwokJnTCLJAT4pGdAtTtC-X4Ok4r1dm6f2pbqS_jh1bsYVDpLhg3Y2cnvDHwBDbZdU82T_XZCCJv9pBAkWac2mUrh5vvzAuNCooWq_DrH-qIwfFcn-lS4jIKEl8UUoxDY58jIfQ7p8OaJW_gE8lzcpxvblAa5nHJ-r9xrK-Jn1Yq8HWZk"} ================================================ FILE: genesis_data/genesis_txs/R2h2i6y-KFxuHukxmHIjSncPZSiS4tpuzH0tD1NAooI.json ================================================ {"id":"R2h2i6y-KFxuHukxmHIjSncPZSiS4tpuzH0tD1NAooI","last_tx":"","owner":"o7Xp764JXAUybNjZg0Zo2_FVgo4etg72aCcUnSR7ti06P_WqCDKB0eV8aObfixRXq54cjjotZ_s0GZ8oarwtodeGBX1CaDpTacoy6Cdj7vOBaDKpCH2IUMaSZYJGPHxv6iMQ18Frdk3_dNuJCJOWebT8CBWUK0DE5trqJGS22MlW7HeHMWa0CWNnsiqqdZ8EMrplaMsw_d_TAS2LwUbU-ilbKx34ilgkWD52k13C63bHOax3ZyGKykMNo6WmcAYsuIKfzYOLiuPKQGm7EbA2MEc9fzUb_6idcn1EeQlBcqPFpKAkFzfNt3Du9ID6FuRqYSRdyGqEEi1W_MAHgpUtUVECq8r0uJunVzN7fLmvFjDpQdz3uwss1ApwTRdh5RJT6JIwmWmQUGRNpfNRzzMoRFCc9Q2m53SdKcxchH7FVuW1dPnJYG0_hpcwvhqwp5Ek4RghNPn-19Oyfuu5t0mM7rEIEKEr64hrpk2PRDLR7DoHNohwJb-Zb20BkdCY0Whm6yRonCVgL_bKtGCzhBpazBIf54VUwqzBqesl8xPK7fDChvT50UGQm5aKolIywW6lGmVtDeJOhbAIqFIL04h768xkeX8w5c6USYbXNN9yMbZHyXpuerCnudjSipUm084r36Ao3JI5YCPx9zkOrEzh4edvsuipThebDsSD_LNBbl8","tags":[],"target":"","quantity":"0","data":"V2hpbGUgSSBtYXkgbm90IGhhdmUgYmVlbiBib3JuIGVhcmx5IGVub3VnaCB0byBleHBsb3JlIHVuY2hhcnRlcmVkIGxhbmRzIG9yIGJvcm4gbGF0ZSBlbm91Z2ggdG8gZXhwbG9yZSBkZWVwIHNwYWNl","reward":"0","signature":"OfxPk2AtB09-FSyG7eSD3LUYHglTpf1y47nKgydLcM1jVW7zAWAX-do2EXlexP_FJU6dvhwTX0As7v15AKosAWwz8TVp2SxPf0xK60vVF-rCpUYAe0bMuARk_56PB7KvJ937vKrzLv4I_2kPF0ZBv4R16cTb7oObwjV-AYKkIerJl3Oy5U4Q4Pgs7w4BiWRHSf_VNdrtuNDtsKVx5pEQEZF9B5Nqqe7Yw1fZvzU2YJ_-fc1F7OLTFdUMrFsHHP9YfY4no80QbzI9-3GR2yONCJMyU033_rNEtWvp9ZGaijoTru5FEUCzFAc_qlJLRqBelw7qe75Md9jyTFykNgbzeWuwidFqLQ5wp_zRaeo92LyhdPL9L3yl4aRtDsPOr7QMC9fDxXT-psuFdznVYxhAnUU6EsAIrkDTlPMqCwU23tZKX8K_dSNGUQ1E7tviyaXBUrhcsHhzu4yRO8VAWtJI4CpsWFcBp1pgMsVU-Hx1SDb9ciaANNz7h_GyJIW68U_jW8uiIcIhHJxEm3XNs088B-4F3STl_y3ZqUdOPybGupOjNn9CqrLXNPXXHt4QN_MzlGP8e1PmLyrmjnTleHGDlF5a7X8CAmeLDz4DYC4tlp8IWyvTvtDBGeXUnUFnxZSRm3caZ6nqYFJv8CbmwHJ23D7MWNQnyYCmh_p-BSWV0p0"} ================================================ FILE: genesis_data/genesis_txs/RU5mkM_3UrjRMffwgj7ovDMYxxjhfXvliozhpIqw0sA.json ================================================ {"id":"RU5mkM_3UrjRMffwgj7ovDMYxxjhfXvliozhpIqw0sA","last_tx":"","owner":"uBWykl4OeuYQnxiik46oOX8cM0WHP3_tYUUVF8BQ2RNQNyqethufKnEb_TJcNteAPjmm1r3XpzRFSSk3eyIDOVgKy5kKnVSazptjmA0IyL13q8XeMimE3By9FoDsr6HKkRNLvQbfzfmaDgdG0-jbP8ICjOBWEhFuDeImc1RmqiGJx8lYgcAJdG01hXboN-dNnK7ThRnnnMEveIdVzuKYMpF2sQK_H-osBSDr1BRR-104dMwobhaJE5IZJaxZgIOzGdAGsojNOnGBwOGeOL6rGLZ5bRvQMaSMzuUXG29SIE7-JhwxxRatXdJ2aln7MYANWGs-Xim8HxLiljW2xzRDm0G3fbDRsxPOjMA_PcVzfsvVLRSyCGLADIJ1qrQA3NtYIHn7_Z5ie94UxvShk9T4HaWQxUsmZ7sGe1y6GvFrTF2rFuHdSbO1yLGrEIROgu8dfPvbeBUTOIh1nRIToJIVr0C8PwmFyEv9gpPbu1WjlDRm39--kXMR9oetMyteb5Wb2bAwITbv17P0jnzy64Ahr2-PuoxJkUnDGLpkJLue81J9tBvZweUy9IEvmeEztpRulzjbnYysO9q7mcvW3iXo7PeYqJ6Rh2T5QB3dw8NrAIiK3Avsj1a0_pAtd6k_48Tgwzqc5Ds80qrW8fzR4aRSHCFse3LD6Np5qUnFc_geOjM","tags":[],"target":"","quantity":"0","data":"Zm9yIGtpbGxpbmcgdGltZSBpcyBub3QgbXVyZGVyIGl0J3MgbW9yZSBsaWtlIHN1aWNpZGUh","reward":"0","signature":"CldG9jOeOQGdGLslRe6LSNNY69ssFcd43DvS8Qk2LDovx69hjSXiKXV_GaNUbjlHzHaCoVcKTLNzuDMsTc2HS1FEJIb7SNDQ5YRToUQTsUQTo5iOAY_-oDOwJIdr7hRzXTLM4l_JMoCWIR_AIFNE-8MhgN2uFyDwAVAqAKA7saTfWIAEpahA748Il2HNvoWiz9Azn2YOroUhAJORBbsKoKaSUcMfLVweWnG2Wb5faluo9R3jQLO1VDgTQXrN_2pUwTJxRn6NL_UaANOXJfyNiyNzHvdEkV7my7vn7MXa7-wOywPUYD6vlsUvfoM7MkMqxf_nbQYykRvdAHp39pUTRhn2icbDGtekIPrtx4wqitrzYqNeiMSFlW0rOzdbJiK0iYoVQDJmHdfphbl_CxgXR0_j0W65ini73gxBXfzILQEb0-wy2PiTX2W0jIsNhnsNkCj1wtaygPDTF_uaZI6EnkuKLrd-J2m3gLaqhmZ1NEmeuHZMNFzvxGd44h90tw04c-HXIheTnK6R698tSy_USxxiD3K6O0KwD5TVFYxd1IK0cjEp6u355_4JPKSXUjxU4V62XTIc9EYIUN53p661KAzwTiBESuj5fy0WCbws_8AUfMEyqKh-EHDmNnjXhb7RfI787EauqA54qnJA81qfvtJ1mpDQ8vTxl-bF0U3T6qM"} ================================================ FILE: genesis_data/genesis_txs/S5Uv2W6erubrzYjzm9QHKij51XE-j-GFdYwcV2uPIAA.json ================================================ {"id":"S5Uv2W6erubrzYjzm9QHKij51XE-j-GFdYwcV2uPIAA","last_tx":"","owner":"su3ovqflJmqgBh_Uraw-mSEQHwpELCOcTKOvMSB4X_ITszVhCUYw0aS5LcJYcjYhkLWw9wP78OOiZAOepQWbMrBuugLn2__HhcMvmcFFaP0lObwGJnzWt7oI_DqckSvyBvmRjUQTxdcOQIeBqLyOXC8Qjo_LqAzupf7oj7NaiU-qbgwb92Fp12Aa_7_zLAjmctxWIagvMhoAX6muMUoUtnEZa_vbke25pB63FG9XAS9uswkJoUEGZ2Zsn5o13ndU_wyn1KuH_5vLUv_xDfXkkjo4dQLSWtJGl4-yES7sLKFVAU45ck1JZ8zftFEiHJea-yxe23Iq8Rj_bjmi45UlxUa7REkZHDFdIxuqgj5dqBi4wsAdEVQYxz7BmJsD-RXVUdNWb74zQNqzBGt5nAPYm3PHnCrd4LoCsXJMtYjRt1d68ioDGmJNKTbK4mUDahcza4Dnc-P_IeFyJedwqX-L6ar2k5YOQJT_Adc0DakIfdI_m1pu8VxtEXuj11dXwhYGeFZWpRj70TFAHCSx5vydGZ_8DVv4FOT2t31mdS1jz9Y_U9DXpRDR-y5FcY--4GbB_m7jC3N-rhgQ1vRJ6xtjx6RQmzQ9ygrczem9QXe8HRiGbST3I64W0zL4GXizr63HXbkh6YwX_TaiKniozTTZoy7jkOjL7qHlG77_Z0VyXtc","tags":[],"target":"","quantity":"0","data":"U0kyMDIwSVdBV0ZBRlRPQlRFTzIwMzBJV0hJTVBUTUQ","reward":"0","signature":"aNePB8Pio0VwZjJjR13gyDQ9T1dk0kd9h-baQe2DRRR7r_kBZvyLiw2vfD2RraEwBpD7rHVINRJEJFnqeU9ZyGglL990-pCp-pcg5PrXJrSNT_FWdwrc5ZUyPtuEZ5kMe9weRxUd2I0JISiDOvG0XHYBYiSXQ6tIV7ohYUV6JXGwCV81h9oYI4JwrUfGxrTgttt-6Zc9bN-YAjaC5X2VaoprBcxku2S6sOYeMN2Z3pQhoLeis8M6Kbf5Sy8MloZAs-atzeRaeXryllI9SR-EBDmHQ0ubP6YKM2_0fuNJPrr2k5uP8frihL3Epbd0f5Zh2-gyS7YpQ1wX68-pzVC8R9iTskDr8ACfYCRDDalD72TWK3zfIKPDyvqtw3nB9smzjcFs-042yZx5_AWbyHXWddtnY_Lra8_GL7ZWbOgJ1ViUCfQSJN7Jrnja5jg_m10qrtaMSmaZmrd-Kih9m7ydr5kPIVJ4JmpTWUStHeWIjoXkFa_vb1a6mmte9W19cFCgeR_BK8d12dpt1g9Ucw--GPaQf6X-jBVOGhZfZgLiaBAHU-Zug1rsHwLos9Ozou2hgnSW2adJDxIpnhkudXAL4OdJkC2mRBYyLIMmTpETsoIMUAtUaqN2ssbJ60XpYeQdCftP1LCYZFQzTQ5MvCiydOFOiP5kTmpL702bHNQjXJ4"} ================================================ FILE: genesis_data/genesis_txs/SBhaeMSTQm3rS6puYacdT-4wzlnkBlZ1agn6IW6Oyg8.json ================================================ {"id":"SBhaeMSTQm3rS6puYacdT-4wzlnkBlZ1agn6IW6Oyg8","last_tx":"","owner":"o3SQGukA79nTOdmFoLNt6I1OSvFIC_cIlcJjGjKwfcFMZaP6DDL5Q1QJc15GEi2gNiAs7HsNtVzDBqHeTEaaMdfF5MyjF-nVa739AkvAFj-hszgvu8SfRkd2YHzjiufPQiL8nZh2UMHwK6NtNVdT5GdbNmAHMtCppc32fZJDi9OKgQTg98hnQKLIttNtTOTBKaEmXltuFprMoAQgJit98JlAKN7gmqVJlPi0LE4Q8Sisx_kcy2Gq4wDEntZUiUeqwLw-P3xzNR99K3TSp3J2YQbnp2FVuZhR29pUmiktQmFW1lJaQMyrRk8GjrrBfWUxmj7pQQ32gXw4l14Sw9NpmaSodh1TDNnx79VfnTlQITEp_e1nsQbE16zN5Em0-lOSgAgaGq-tKqE5xqFZOov003vjnlESnsn_8ruTy40bmy7B8v8mPuuqziv_g8qwY4RfX3oH7L7RRfsM6i5WmqR458Hq65xkxadMbU0vnpkYtX0-Y-EHaLT6NkTZVb7QGdXluNv3CsaN9qDxhgYGgSiaFSZERb1s1i66A_7B_iiD6ixWXFiqM6mBGaAose4nMED9JDe8c-9UrJQ7rxi07P-L6DDHzx1e3aUt4WIt8KMG0yMd79vcvlh56En0vOE5AMZG3Hb2NT8wEI3de9RmYFcGamijMf53qSHQLr-JFPMvrps","tags":[],"target":"","quantity":"0","data":"SGVsbG8gb3VyIGZ1dHVyZSBzZWx2ZXMuIFdlIHRyaWVk4oCm","reward":"0","signature":"LjSevjaKn1AqFy14nx79Fg-jkuLiENQVZsWwLiS5VTOTQ7T_7hVfQec08y9Sdoqv4Jv8sVYa9jL5RZfNUGCDgf1fpQ6_4ThbSz53N7aWb0sbZpIBUimd4P_-FDug1bWeA_UDoCmz53SGJjOZaY-uhOrVosHZx1e4FTeSqZVT4n5s_QH6c4nrq7PHKkwsHbo3r3HYnddgo7BY9h-GIAKPtDxq1qGA0RAHkfkUvgneqfRjZxgHoIFpYbvyI_lD4-inSW_8qIQRorISDrcYCmik68Sj17wzaRKnaljwSViB0f2wsfkbJV36hJxEfqSEkaeXVo4z72-WPzuWHXizfkp92sUejyjBnvslaVEFJ1_h90gosc6ZmM6nK2aIprkJa8f2lmdwRRRoIintDuT--2N_Aur8z-DMGB3-9Ni9gNv3Tq41ScHD12Mq8e2_BXgEnVLIIWLvHKCoGMGQlhzIKsYLy4_SDt8ujWuNdOoD2evwfsDe3BYJmFQP3KDl91M9yu_Dpr0f0UR8SxbMSqUxV5nKU_dtOy6CVVcTBvnM7Vshoh5xDu1xLPHKYos3N-oqZMMBol5sBEkSprHTg_UghPTeGdErFpJnyuysIkAC3Dq98o2Om2y-fqDtrn27W9TMoMMiH8F7v3yATek2E5Wkq_lgSyfgYHt4ccrl8cDGWxch43w"} ================================================ FILE: genesis_data/genesis_txs/SCN8yn0cQASui1DeV4mMYeQrRn8eXKr7Cp9ll7L3UfI.json ================================================ {"id":"SCN8yn0cQASui1DeV4mMYeQrRn8eXKr7Cp9ll7L3UfI","last_tx":"","owner":"pqZ3jbKaPmncUkZ93YuOo50A6owD_Y41qhJBcgudK0PuJn9JpRCFgiIqdbvUhUwRE65dkb92Xyl2MZvVTEgtnqgzdV5bDwh6-Fi6uHcneJg-uksrsXzCd0SReg0C2X9ObqHkti07vxtFK8DRSmDkLE6I5LwY74-6OE17snHviJAJkzFB0iMPhuD2p3O18fl_yWWOMMOVs080ckrB2XQeUPSts5CBquZJgeVThZiICJbHTBGOzGtRrvXk4iUHquV3OnhN2rnDriPftc2fqYX3lbF_q5PGNuUBj2f20m6_hS48pv03s3Xz8gIOhR7UsvmHSk13UX_7sByGhE56nr_RUXoqurS9npWo6Xwyt58YpcgvmoNjVUL_4r6bR-47HoLdQXnLNLsYJojSAS1y3PSimP7sJUYbiPg2lpzdliL4kJG1sywvb_R4D7ck6uE9E8-VH7RnC0KKiu7DQIAB1PoyLYuA60tyYEYwjzPvzUMfY2RiotjYPwayQHos1K5960skm-sd7bAfSgaZgOnBJTMv0LYimiw942A5Fu3PfB3Ctqx95XJyBfC7IObxBO9tihePRujbocWYnuO0GeYbUU8Upl0072OfIu-jRam3COqvjqvXOo7Mr5OIReMZWvjX4F_X-6_TZ5Ta288h6QVrVCGmiM2BB1hVmk4S06QZbFxZSYk","tags":[],"target":"","quantity":"0","data":"TGV0IGZyZWVkb20gcHJldmFpbCE","reward":"0","signature":"DV6_ABJDG5KZ6FalZd6K9vFbMfB4uUPBMvIOdB4h_wod2Uw0294GnJHHMLh2Oqr2m3qH0A4KiXmmUOiUTk5pfSJwwSmycx7iZEYu3k48LQ-VyDZXqt6JaExrGzH0axdmJTUf0sUS2nRghLtM5bC23aSrAU7qYNN1QJyBzW6TWw7qWEHHJfz8XUaEUdfhBcJO3XWR9qzN-Tic9EIFhK1QmKPFxpXCXopLR28QHPGuusEI3R8afLdT82WA8sk83CCzzqMSNqJdZT6q135zzyrrjeTR_NILUqZSRsJ1-oIqSEy8lA9A78YxYCnCUt1xFyOow4J3q8HtGO2Kwd2BPMYE-gAMC9Q84BUV-ccj7gxD3S3ZGkBd2BX_L3sCUx20lr1s-S6oo3wp3OwCV48dsVs729B1IXI6uIfTtebZqDkVkSQMNeYj-TSIpduAT4AcAh3iU_AmDQPeuXTOyhXjo7Z5g2c3iD-GII49tHaqUpXXRZTLkHhd_KFWUayX_1fXsa4xNTQCZBcBM4VqfirFcXJNK10zB_Zoo9nTIPVJencD4f0dES_ZdBakyJ_ReKH7nkOD6K_F2TGN8BvbCX6Vewu_dY8CHsducBbtq_cW7aDjR1GU2uwBLo2EHxC28vPRJmRUjjjol1KOx1Qqfh5QiaONYv7g84qxN4zNwmUS7ISnjZk"} ================================================ FILE: genesis_data/genesis_txs/SHxtj5_gLdJMI-6CcspsDbFBuU_74df3I4-sAJkAr6w.json ================================================ {"id":"SHxtj5_gLdJMI-6CcspsDbFBuU_74df3I4-sAJkAr6w","last_tx":"","owner":"vxta9Tv-wEfhHJ2eHdCbBob95AevSn0LEsM9x1_x7D-psaLR-x7AWQPuBC7XZ_dLoFUXIVbLnwma5JcHQvEs70S6EuUUPSiq0-HNzL5dDj9pfE_Krc1AtDuSfFd8WLQU3CtbZuJLyHXLSLAhMkZcZrKh2SxD7F5oos9I_xBWxwMVRetCgfAKw8DAVVtGVP26ggIJUwER0j0V4lBa46fELfpq2wUqQ02R6Gn6uro2juoa9IiAr_P5t_CE9ZnM--yskq2PB0BHIJcrMdPYYlW0qlCervDlmAaVIrK3DQBwVxATS8mPQ4bXcZdnptvw39Mjeo4Fx3TLMuoGnRjLI5hwJokJJGNL5g5x2QKZ27KhnTQLBXB8FIkn-2hmBZZKlfU1aI9R8KDTjCToLwCck8Jnk62DcR5nN0b9r5wxsNImQQejaBc072umCu0iwFpRyTw6oxvC5RnGUEzQmV86fguu8q1F0jAxZeYWtdxAn7aFCw7O8tMaMyRp3p8I_RmE2yujk1IL7jP7MT94SzmAWTIaYNWGClniYtO4J3t7qRaU6IWRJH7nzmPWOIijh2x59u3mBDUU213QyAghVH_C4riqBrUkhfxu4KV4fEVQpjX9B_SVznyWSS7cNCjR6ySljZi9CF_4uKn_oyXMX7dd9Nmg34urA0igsj-t3hKqqjaRd30","tags":[],"target":"","quantity":"0","data":"YXNk","reward":"0","signature":"fJ_-UTZysSwVs-Y75ROaJuoPi-1GLEY2o38tbJGGDfq_hyYuO5LvOjCQ4ihps-ZN33NH3gCokttfocabagjyk0IyZho6pWaA-Ty4KXvCnHehluFYPYqosv4PiAeEt2eCYSoT3ReJtmpYbWHHOxEbqyCosn84_obvrkCr4Wrp7LJCCTgAHhAMw3SHUQuGHz6-bWT9z0L1EyYROx3bjEFPq8Vo1I7lH9Xge0zQsnk-JoyyHmF27SnM3q0780LUJtgFK4ONg5hViUHiJjcVKAS5TC7i5ESW40k-eTwu9H1RZksyzltCXDoOQy8uNXSFbh6eNZ4Le-Ct60CTY-ElIsudx45CfSZ33XxyG7jrDg2rXLuthn17eZpSpAsuohKSLzOdnuD0jAL_M4nNEocWKF7WCOqusmdsuoeX87zw86EjKyF5Yp4-LYSR4MRj9_kEqvgV0ECKDANp2riRCbekI7fP5w6XYUSwlGgowz3KcagVVUBnr1M8YaXVzb__j_5vH5TAVTf0wQLL01z163ARuiSnWJK8Kf417iCV7Oy7b1jBFfe79jkZl8I0PkU7n7RxMLmfEY2x6KbYwiU0qX-gP2yyE1iydP7TWRR5ZlCo3MSbZT37xSm5gZcPVq8eMFszTBrs0-VRXRAHoHwve23R1J9J2KmlF7BdIhk0kb37INGPRdI"} ================================================ FILE: genesis_data/genesis_txs/SJXMM0tlXown7l3ffjhsiKf311FDTRa7QkKX8tgyEZ8.json ================================================ {"id":"SJXMM0tlXown7l3ffjhsiKf311FDTRa7QkKX8tgyEZ8","last_tx":"","owner":"3pt7riOg0KCqlhgEEHylXYH2a95lqbTMr5WR_f1kD3fx6yJHcTh8Pj2WU8lEVc8GCfzX1hzBCgkBFq3Z1BsKjAbIWjrzjPrigDkXic6T4V-ypzKw8gCIYVVAMnEla58RJgcjmSn7fexKCAoQXBrMv7sZb0-uUUR3LY4GTfRjPNZfJv7CujLPWJJRcQ36tZ1vHbFWN00_KMWe5_2m8Ww8PiXA5pi-MEymiqjBFoWPywJUybB81UZ8uWyKVQnu0qpLSXaX5BQZOdqbi91AUjvvemLbbWuoUksXPESpbKx518IfbdoiupdkF-yYwM_Lnj3AarPjc3gxgSKFLqGcdTbEyOJ1azO1zVlLhxsKBYLMfoyFmlA66dEseynb3GfHA506GlCVKFfFH10AfTYcc1WLUUzyBZlE-B3bOknRQJ4I5uqooffHmfX2fFRmTF3DDvy-QBiFfL-bJoI9NHBR01dKlmHZTBsG6KO_bRSvfJhciHoPYkog1utM4XEyfJ3i0xw0J5urrhkajQbx2BoVb0uiojvaOvcfQsRh7S7YsKocnBDIl_4qemSxjwD3t8RfNoFfzdjblv1ltgGMK-SnSfMGJKPo8S-DVIZqBvdos66_d0e7WHMi1K13PmnfEcBJeSbdLG5Zsi4oEyaFHbuT9Bx-wB8j6Q09XUEdS_b94AxOG_k","tags":[],"target":"","quantity":"0","data":"0KPQtNCw0YfQvdC-0Lkg0YDQtdCw0LvQuNC30LDRhtC40Lgg0LrQvtC90YbQtdC_0YbQuNC4IQ","reward":"0","signature":"MUt9CH05n3eNjI4FounCZ8oORsQB8QkT2UHJS8Oekfx7KNlMer23tlxCFrrfTxQwVY7uTGutVyWuZp4xPD4FFsw1ytIAaC7056jKmhYXESmyYvHF0BV-EYtP3DP49lACUTgyBm_ekNij5NTJAfqjFv_DYdXt7kGha9tYSjT8NDKQHIW3qeD9vuBs9vQ1el66i0rQ8Gr1LpMMxV8x6J2o5d1CN2PE533zKbxjU8OqcN6FKKmSjmEBj84ihsa5tpGdA5PMBAcKMMdI2qyek5NrJJPDA_y0o6SlUyk_8q5iBLYWw0Bb8nmi56w-JHbd778IaKHIaaMEuXgoLnRjluikzNxzPk6Am9pwAWnoSCrI9cjb1D4pXZlIjOszPwixfkR5C_hpo1w7yNqcw7iNFpBPWS9CUiknEqbD6zw-PdwdaX7cYhzrFNoK4oE1BUlkd_dYZ6aDLDvQGDb0hI97C83ziak7cGk9H0l8Fd25u7pUKFejEpEIohog5dwfRLaq8wYkAOMuCYaV38svImIcfklzPwxB2a6_oSLnD_w2Pi0uro2PuhUkrVtnYcm4OkxwCDsOO-RKLn0sA6XrN8t5I9EcMTQQ7N6FNcHjhHDAwe6hZ98Vab_MioGI5wz8Ph1WLY-2YhZaLwWzX7IblzkMdmMDmt2810PdyAZ4YQbobA5xlD0"} ================================================ FILE: genesis_data/genesis_txs/SWNkfm9ZZPCiYKFg6oIW_IgqJp5Ypbp-Fs9S7YgPm0c.json ================================================ {"id":"SWNkfm9ZZPCiYKFg6oIW_IgqJp5Ypbp-Fs9S7YgPm0c","last_tx":"","owner":"xSjidkjEW1TTEO6lsZhF98M8gPeAZBwZkif5JMDcdGoXI2nYneqIlPvSeWkEVKOllqhgPdyWaSHYihFCIHOwsFmy51xh-HDUCQTv15FQd9MZbW0bvkEjgIfwHQq-JWPuObaIRBMBvE97IAq_4LhmhUWcvjXt8vej1CjIucn99_LJGP4u841cw6whJu4-kosoQIF2Zg_dfnFw6WtFJq6m66uOUjKKZab3YURhTjDBgvVcboqG-itaFtevYsb4VFl0jvwLXPaFhTtBR1eD5uNUQLxVP2OcwZjTmr-eCelAxDS7YGirXwki9CSr-MXgNhpBLvXT6l-TInPLYbj7NJrzkffpnc9QAHJK7B7IlPmukdYuYNF-nip1y8hhEowTU4yNt0Sjhfnsiycr3rHrv9gnPSXZ5Epv9seHEZwauPI5S8Q2Qiax9F61HiDpnW5FdoNicaJAWYT9sEpk45pgV5tMT3IQhGRBPt1oPq8YWYwK744xSiJUzR_JkPAHoHY0jlIIl5lkxjWiKNRs82jnFhK_HgC3sZObhKkKNWnmEEfKeJucX6HuAs5IfEUv7Jbvgdw2qTrT-Hh9Auiv5YHbOPTMxNudtmTfNAEFde5_9mgwJdzD4rRLE3ZtYeKg_-FvfFLtAoa2PLRguHuCL5KQdvlrNSGSuGNcyh_Fge08OHX6bbE","tags":[],"target":"","quantity":"0","data":"SGV5IEFyY2hhaW4hISBTYW50b3NoIGhlcmU","reward":"0","signature":"cRuFXYLofyu3oac5lZrOTtGeyids7QQPOSnWjeO-8KoaUuc5SWLvdlzXp2QZtJ02jQKGp9yVIHr_peyu9bDF0IV1XOvJ-mdQe-LlcREI9s_GnfLTzfUbZx62scffRt-bhPGXG6O-KEo_oTSrK7EJcEGEIIhqasUDnX-vlUr6OvctUbTSbRd_du7jhOOJA-6uKK3edF0pHErpGBzqiaHzt1ts2iVfcK_CO9TazEuIUoaCtCegLPDDL56ygPxqxE1Rwhh87_5VS-KQJgev4YnnjBq8xBv_egxU3aHZ2Crc5INT9o9FmmsYOlkxkBRvNdD0zgNHjxaVaBDzfbv9cFP0Mn8NEm_9hFYhOeFuePZvFbzLtNwuzcm7T9CFQHfSyRIlv3bhEcoE9sEyfRvAfpP5gDV78q36b-o1h0Hp_AqeZBswle0jvJtdbmXA3RJbQxRXzfidHPiMZvFPjwJlReXXmDIO8LT0jCYzgpuHHYuH9TGMrML8hOsO5miR6_b_IgAvETQNRCffWujGOwtdmoEovFQ426TXWEwaDvAeL1ksTjE2mMx1GRawTuvU2pr28U_FmZzI119MemPMsw0gDQXJh5fJy8TnQb1GnMLdAkbvKyIJS-Cev1AH29pr_AO54NVjKXdXXx81FEEqJvMx_taJiul85YTOBtxUya1vdONKsm8"} ================================================ FILE: genesis_data/genesis_txs/TFX7m_Kf56rV6LNuyQ31NeVoDHJ3x0YqhIv4-IBQ-3s.json ================================================ {"id":"TFX7m_Kf56rV6LNuyQ31NeVoDHJ3x0YqhIv4-IBQ-3s","last_tx":"","owner":"rXPk340SHXLKky6ZFKD8zaZwHaxWxMQLSELy6XX3I6ZBpUVtJdhQZYa59Rswc-9IqZRrZQXNN1TsSKk_h56bKs0cOqae1fQhNfV5lymVjm3-5RjtNQ7gjVjyDh2shugh8Y8H2sD8E2x3S4au0ZbvkXCNIMegTAMm-uBi_U0fy5zZS1FXlUW8BI6NfEmjqBsZRa-UD_yRpGR9g1AGOhWVTrTpLRtnBefaOo-g7TnRzSC8SByKowO0tKz_ph_iUi1VZN8OroNHmniY-Slb5OSUd1eY0hqWldud3C2dVznMd0kZCgqcpO4SmriLApSJdkOPXFMQWpeb8zO6S2md4xAJy4ou4PTVjGzdLxN-WndC7ujL9gPd7v--LPFxiH0bKpl3TOuI3NAntYePxE4V0RWOL4D1kl6f1KhIAeBYeCqx-N5zMId0KZ6I5y3lDcNh0FvMDNPab3uNGe8yK4bOpi8ZDF7FvbWXbrY69oEpguwfTO014kiBgF505IQzC870ttab0sZ43Z3KTq8QWZvOqII7I_ArA3ihHVGURwkDQ1L7HYANHSY-d2s4PqunWZre0HGezDHtcmTy7S5QvHSCnRhAjw08P5pTtllVfHHU9baosNHiLIcMuFiI23FwC_B-pf4H1VGu41HOoWb-m9Duy7MSlhDaAIgCYS5SAdsWFawdXoM","tags":[],"target":"","quantity":"0","data":"SSdtIHJlYWxseSBleGNpdGVkIGFib3V0IHRoaXM","reward":"0","signature":"FEs0JsWvJ5tacf03JZdkqpafBsfYtBiPGnZEe9-Y5kMK4SkVltH3Bknr0JxvHUB1k0AOyYG50-zT23GTab8Y1g1zGcgWwVIOK9uaVq8GhyLJ6y81PvzFyi97iuUO8Si69dbLFWNkBE3eFNUyVYa3J_NpP2izKbtFwxXw0YTob4ojeQC-zN8mW14m_tUvLt9V7QVLC3JhRX-dlUFPjBco93mF4F_ycvqYvlJQk6bof0cxH4BaO1kWslIdoB-Gi9LJ-KJq7RcsZNDbFo_VxZaT1fgIVgBeX5jcggXW4iuqPcqpzMp8ojMHpvWJFRx60JlEBnYYTbkUI7pX8kDepbhUAD0Cm5nLZqJUY8yWOSoZA6Rxwk1zUYpFSzqqZdCweSRcLtesWTRX-dAEkGgwgVoCQuVpobS52Wzqksrd5demb0WrAO_3px46_jyBMYFa98_jRmyp80lXReZNWNznQq1BaosiG4G3Rn7m5Kq9nWIf8U_W7MS11qXcUQFS9S17w23p0_qK3oxAMQtRDcax-RjlxGn9fQeHojRFzzCHgGHCQNugiS3IMmMn8TqrpWouOKwHYLH-Ygiek1Xm2oBdEVT8NY3LPkK-iH5qMuHzwhJsYkcqQESRMOjpXFtobpPz6EariUnMqi07NJgafSHXnsXggknM2-7KB6FYR--9sam3Mb0"} ================================================ FILE: genesis_data/genesis_txs/TGdhJ01pPw49A0ZIaCCcYBnL-RPK_3KZH3cA6E9dVqc.json ================================================ {"id":"TGdhJ01pPw49A0ZIaCCcYBnL-RPK_3KZH3cA6E9dVqc","last_tx":"","owner":"6W4NK5__ZpYAEIPeepUtNOJWVM_CJiq3MTFOVFW9bSzxOA0uaQKUneY5lAMPzJz3RaTAEsSylc26cZShqoLrXA1Ko19u6nXxrAfSK48Kj3-ZRHFFEsRIpx7w-y89oT_huI6CSDEq4_xP4hlCT71y9_boZYp6VQjuArGA1JUXeaDMfwxifW5_eYqbMd6gslu2dpXUJcwiJYHSNzeqsoPcl3cNZA7lcz26nJC3123IsKC8bx5g7TnUEc6ETf2M8mm4cW5pddmRWAl45Ac16Z3ktAEFcQijLirgTVgXxaXgUQ0I-rhtjkbIgXOy-h8pe42qGit_RWoGRgFt7nBjCZNRIaiFJzWumNbrrEJd1i-t_ChhV9Vyygrc7Ykgtt6Eze3Vu_lyRzzobjs8jFeZ-VQg0dAXFi9SrSWybSOSHBL3sXMnAXZJDMDMBe6cWbkSXQJhIFG1YRXA-ZsF8Oc0kGUa9tESbGxBFuXpF_La1arLz25YQprPnV2eqxC8oyHNoNb-ngTeZT6EuviXUcPx1OiTOJlF2pCqpRuoIDlfIS31xieTQt__XSnqbbFtduZTKQX3EI4K_ItZEQo_HRUKzBlIm0GejBzg-VXRuTYqN52ouIkShBaloXx7P4x86fMoHLWGpFxcXWbAzf3xYiFFsIAfomvQUEMGRPGw1KITxt0CorM","tags":[],"target":"","quantity":"0","data":"SGVsbG8","reward":"0","signature":"FyHDGKpbyePfXLREXJTLLEuDq44XZuKyGDmgn35TmLOVxgIiWYGzgKifmRa7mvTxRNItfk005nr01lIbj0Nw9s_1jHZJJagFPXTBGxj5MFQmOO5YZGEFnat4Y3I1TeN-f4dSOxUXKGgMvR2npbn_9DiCUZ7oFlmYgf3hHWEgRO55TGs90Y0ky8bVT_9xwJ0FdTqRny4um2o4QE5-kTUS0qAW9Fji3PVBlStjCVUCrUgQ7uc2DI4vWK4Dooqz23Wp1vGr6HlqX__q8lnT-VdHdJMECL8NWWMiMry3AIC8uVPbw0AD4p_yrg3xdiyAtpFLgWG2MPTkWxGY0Vk74Jc_qZeg593aY6jt6FPm_BdLUFlPMdqu7Lsvyp1hHnvXCiQw6tYbZhw0BAQhit8DkzEmTQFu11hw6isNijXrheiipvbcw3pSpbTdwHZ1CinBeT7sPi6FKoln34UyUKFK_cxaaHj-26J2URfNTUGAmPZb8rLkmMsRm7XcookmR8k71uvEGq3YYTRSqis_geOuksqjACIKM60mVjK6hZtE9jjplXS70_M5zP_3ucQa2uTk5BsITNNU7yXgUN3QrouOrsYJN67wLmcEoZStVp3hCu3bfWV4T5gVgqUQnkE-2xMoqm8V6_9sdzBeDeNnCNQjZ04y4Gd_GrM3bQKcjP9dtP3Cz_I"} ================================================ FILE: genesis_data/genesis_txs/TGp-18LYjSWQQ36gs5prU-vDgteOL79aywxXoDS-w0c.json ================================================ {"id":"TGp-18LYjSWQQ36gs5prU-vDgteOL79aywxXoDS-w0c","last_tx":"","owner":"u_mRLmTOFEs0l-PA29CY-YhkC-s8XxAgz3Q_sXG7WFDPzZMC_yRAC2PJTOlMuZHVO6eEPVQrp7CoSyOKb9CuCExUsMQMg-g6qQgDd32_07ikWhFKZMFT-Iyobr-HMszL23wGRsUeuepNXLBdxkKKBIks6Ec9u_dw9V6dTaQG2ju-vuj-KYXOIjGkG9_bO1ts6pQaJ2552XC9NtuarurVO6F15qAqUgBwrP3MhC-wHBeGD8HENhsCEB6BsKZ7sP3cFGCvtZRNSL5TnMVz_WQhrFRINTfJhy4qEdgUIHaQx-18fecztgyq7EOBeo1-n6MG8bIJ7LbW6cbGTWwoL5LQrlBiMEsWf32LUpCSwa02ppn9mLKJNS0POKFrHOn_wJrMezEwFSVALFBHsJfSTXLk8bQJWnx_WkDZxDWcmutA60mBhTIZHBXYqGVlVvPfAnTidXX15axpp_phvvCnVBbzr_f7T7x7wiUpqPblE0VLQnwWaBO_soSWn7gThJyWO-5XaKKUrtNjj8U-fesRNYoYU-d8uiz871eot9UGph9DCQkC0PcRxZRCaLcLrFDa-ITvNK61MBfdDXd6ZCdFtL2E2fqL9ney54tJQ84TcuURN7kxfe9CkUFComTjnsBn633X6iyGqHBNVWx4TNRe8Z6onqidfOBS_ycH-R5LO2NdlKs","tags":[],"target":"","quantity":"0","data":"YW5kIHNvb24gdG8gYmUgTm9haCBhbmQgcG9zc2libHkgYW55IG90aGVyIGtpZChzKS9ncmFuZC9ncmVhdC1ncmFuZCBldGMuIHRoYXQgd2UgbWlnaHQgaGF2ZS4gSSBsb3ZlIHlvdSBhbGwgd2l0aCBhbGwgbXkgaGVhcnQuIE5vdGhpbmcgY291bGQgZXZlciBjaGFuZ2UgdGhhdC4gSSBob3BlIG9uZSBkYXkgZXZlcnkgY2hpbGQgb24gZWFydGggY2FuIGZlZWwgdGhpcyBsb3ZlLg","reward":"0","signature":"Vg7kS0BjLFiqQvYXmQuseJyjmfdStUxBL9GEI2HANfrgZVri4Nr0vPNMgmw9FJkW5AVX2CBW5roOP6cRiuN4-oT7dPyECgc4_sXoae2b8S-qoPtFiq-eut2VaTBKCBdf5JQhLJC7tTMc9drqndLmEn2ROEX-KTi5ED2RhdtbrIUjRNFpjw8tkPnatnD-RGDZjB-D6SCAuYCYvh-6azMI7L-Vd77y-m-x9xyyuju3cMl5XrFbF028Who4UUY9Z8dQaRN9VRb4-63Q1bohHYXTbi6rTJdV-nc1M2NmoYIw0D3S22Pxwq2L2N1dBYxX-DLZZSxFpzBCDlYFMFk9_YmnSSW6iYgqPn7VJAhh18QPNH-aAKUC-q1OaYnEtxgCBG7pHHNreX2aTOBLRYhU4E4vL2XvYbelBNMZ9CKiVnmwJHneQpwuLG5HSxLV__g3H45Its7_w6j3xYrXp2gKPpbBUqUMYwMfgDFN27cY4caQiA_S3u_jCCeAiIBAAzubCDBwMlIF9qhjLFL4fI1s3Qktzy2U7IAHEZuXKyGj7BXQZNMV8Z6M65GyJl_vhhFXcWTMXBPWc8bcg6uXsGznpZYPCxM0CQ1Vq8C-UIzplM3uBtIUM2-6BAltvGmA2cqeZCOIEqmX_NsilX_42p62AWJwxRfxBGtWr-y26zZU-4PRnRk"} ================================================ FILE: genesis_data/genesis_txs/TUIdVI5yQH50laHvkxgAnTV6uuE2LXXH3pxIe6Q2S7I.json ================================================ {"id":"TUIdVI5yQH50laHvkxgAnTV6uuE2LXXH3pxIe6Q2S7I","last_tx":"","owner":"ua0v7mt3Dhj_yFfuVQGi0BHLXN-s3HVVA8PIhxrVeqtzFKIjcGBf_i9LONMxWKvBByoRPkiA02JgmL4iBQj0cCNY4vyseQEDfxrZE2G-MYC8EIAsxXgS3w5j1zgKrl9WDd-nA_tHX4Igez_LZhZ1UbyvfvEPbxHdTVHa6iAv_4hgZxb4Yu_6HLzs_g5e5Vx1-xf-xfFOaXZFyYGtGjjbYALcjax1HGdqjSJtxcdiiAgI56_Fdju2l8f1wwMZDV_o6RyVlvTpjSof4iihY-HyuIrHJnc6u8zmujCdMPUrc_bieTb14cgbsmi0vcMuIKXML36BQAIX4jSG5azIuAtSG-ri5HMJudc77Ro7f4YH7Mfjwu5rIDmO4b83BbwrMzcEpC9W09rfeWiTGOp6YrPs90rV_H9_CkQH_xeXVMi35G9GD5bns7A6M3hVIgzJdgLOgoQ1OKfak4MGr_BTcga2ztPS29Zh63SZhmn0QGqJG6BLFI744uWZEBDY4MiDR3Sr2zupl2yQFys1UirO0D5VociUbzQ6nvmIYVOZfO4ImHl-sYtm-pOU6bAdmEho1lEwN-o9m14YPZORtwdG4AnEEyxepHX45dO-NDM1zT2zrcXX0scG3jAaRJxYcHjkW4HgNGuoeLd7Zc-XvBsgJb-WRTsMSeRbiofja94T3BgITSs","tags":[],"target":"","quantity":"0","data":"Uk9DSyBTT0xJRA","reward":"0","signature":"Q8ADpFlNUVyTf1dE6w1LivfCvInw2vGIfrhaPQGk7VNcU5-Ln2fPAKrQBQJ6p-6C8HOTR6zcWJvNIyFzb9EacLeB--2zshTl28JT8RJkRT4KZYCyBvb39dFN_D3cO4p0WiN7dCI3YzF1VgrjRFlIKK48xfjMlR9LWtL9id95-mDgakC40skNrOQXWZcW9UhUXQ9khvKOAg9LkZ300khdWCm8y1Q67ZsNHxsvGyd4HZc9dhWeGZxc1XRG1O1VbZKVNiKmlnXS-6p1AswcbW2cWm7wEpy7fmD_VmR1wCV49BGCRtYMCybJLF9cJ2qri_t6wmeUFOzBJVpgOpAs_jkc_Hai9yHS9681VWigslJPDD4cz28ucZentgq_N9k3SfsRjw7pCqBBKI88E5nEWSxl5NfMuTtomWcWhPBvmbASDYWzsZyMi59L4QsK_XK0Kel6jKx3HQcvoPQTOb8_egRhNyXxhV5y8BodGPwzpvs_nh5F9DAnT2_6AANE6C3gtfV8cfqgmhMWAxbfKsY5p2smkm9tw8f9FdweAD2-ty4H48ny3eaolM58F5CWE95DrflJosRHEC1RpcqPr9IFZUCa4NSkU6UZyH5wg-ZzpGQsel_oGVyUG4XxOeHpxitJMhvpSUF6f2XlmuJ0gxFgkk1VTJDwZ9g30EM8_Si_Uhtq_0w"} ================================================ FILE: genesis_data/genesis_txs/TkN4QLdC4tu-_Po50RYwF33shyHcanHSe_BKpryK0JA.json ================================================ {"id":"TkN4QLdC4tu-_Po50RYwF33shyHcanHSe_BKpryK0JA","last_tx":"","owner":"sVbQsAh8Cep7uJm3ferXj-U54bkY1s3yKuJMxZ93KYePva3DT00YNJKd4JoJXFyuZWqbS_UdmI2DRANCjmKkEOiG4kwAKtt6IsgIA21PH1jOnTR8isIAleQV8FDUFSC5IsVArXJz5IsYxYb-IBOfiHKOPzdwgBw9TaELvfx66shqu3hEOksgE1g2JJwnIvGemAuYjrn1K4eRXMylmFUJqkWSA2qulFzw4L1cHeEUjWsxDjYJlsZS1ZeTRkZPkw30XK1iNJQsF7P0ixVrwKK1OzZsOoNiQ8xZb6VHoe7PikLWI8KOWmBUc4f75dzebGJmGCcYiHAuNfbVmgLEI2ykGZ_HRpViZ2UaKBmfTS0FoJR8_5fV0i3CV4iSZW5uzotLsCRN6RKzGwzDZW-ufdlJ7E3Ihqw401Wqqytzq2oLqQBjhs6-oOlfw_snZg8d1EFhYKwrjcNkVrbDyfYqbenmYJHo2F1PeiG89V--vsQ28xf7YXQ9IQZ2kPK6P5yf72W7FOH92s_KI4O9svKlcSN1SLyoIUSi5wcWwhMroGRPfXLzH8qrnefEjN6tMhQdAQc3zo67SkUJWjgXV8YiGF6nhnnkKTqenX4P4cxmYeGxUzjHFNNsoCHOSuXsV7UM-mfeLY_O8Sb2EZ7dHJbtMDEs-df3IPmfPB4lUCjifKnvLS0","tags":[],"target":"","quantity":"0","data":"SGlnaCByaXNr","reward":"0","signature":"GXO3FB1neK2JrH1QXONZiLFx6URMusNAyqXIkBbwcjtQXh8sM2gIbRUPUlT0kuDpDqEYY9RIrKJm2F7f5t4XR2lEHzbfZSICnQD1xkhx5DlohMvO99-WgWDGbPaxI4MkM2GkskFTU0EZMXOS_F0Vnni6Xrhum2Edc7v7giPNdKH-LVX7s0sb-UkIbDieyriY8IZv7Rwclxe_vBWE2Fr2PfVvQj7GcV8w-Qj6d1Uf24ks16tLbJ2vDM8z0AtwW0-ukKN8Ntrfbnx9mgBUVyiiEs1hJLRL3Os2YBEbgzjJzZ7V7UG2oxkfBQLsnJAK4mf15Qp2Zmj9Q1tm6urR9ndHyskjsFLFhbY4ZQYIgWflUQfLx2cCgO3rA1SJ4mzPxVxCeOh2F8-ZaawcZ05cjoRzoQglnpT8SZ5dNLsliPpAGwC77Z-GrRRuBA-DPamssL2MQCphHorUGaHN3HbgmyBP15Srvs4AnUMg3lSFYNhlqoFibHV9h7uQFBWTMaoFaSoJ525tutt4l3fJKzwNxBIFI4pNY6B8KdAPYjPw7bUsbtq7JWP8-NSKXVbmgmxh4cYFgypZSJqI7qfOrr25SvB9kbEQKpxjgoTUOc_Mao6F4iPcQHyDEAa0szznYWZ_z-6LhwAC1AahEtTcaUkhp46wDATbTxuGHQbXx4JpQ5vP8PM"} ================================================ FILE: genesis_data/genesis_txs/Tnf6b1F67AEV2r9Flj8ktSSHYoV8SeL9dFvHRkavlZo.json ================================================ {"id":"Tnf6b1F67AEV2r9Flj8ktSSHYoV8SeL9dFvHRkavlZo","last_tx":"","owner":"0jVL_1GX3dmYhatPZkATAH34cffj9DJ46zkzl9YyDRfYK0YVOPvkMAn4alVvSqZiEaMU0ItyjyUSg7X3fX_Q2coZ4ac5PNTALeuhR9RrFGnwQw45hlyT3MsnZ4Ytx6Ox9MZpyVVX7WA23uZ82MBmphq-2sOATePrdlmxNx9sFOe3eLQwvAc8PznP0lps9wFCDj3Lt9W-fwe_lapRuxm9p3XE2MKE1KF15_TaAzf6iuIxwhtgVO-qPwhLEaKVPhFR06QD8TDZgKNcfGZJeqvsvJrjrE1pzpr815VLD0sdS-64FXL6aGaFZ4cDPuXyx4cIDP5453X4H0CFUhI2phL3IscTRV3h7cc2fALXlXl5aUdNWIKzfOWMPa11TodXTRIyJ6Yt6LGZJmRSfjWKxP02EDb0iUvtflqP4FlD6gySimnKwVCBfPx1jGuusLU-RnmTwFAGj8gEp2UVfdjoQNyf8RRYCquWid0ny80GmFlTOi_5rpBPZbeULzfxJyKWCucpF7uCkhpTL2e4V1FEy-ivsgZ05F7eWXVYlig5TsQQwHixN1OPhRqAmUurAS-qTASpoDG2oQqbsIJ0W82gUt5q1dWCxWWlvYozLbiiPxGHkpUJifi8L6J5XpTIZy6aljxcw0MNejwS-J-RwqZk_6s4o3_LAbYqKfTt24k4hzdsTXU","tags":[],"target":"","quantity":"0","data":"SG9wZSBBcmNjaGFpbiBkb2VzIGdyZWF0IQ","reward":"0","signature":"feNNQiH_tgJT4y71d0cxfY6ubntB2kUW-89cQ77gIDsBAxQRTWXOfNK_bGc5ZHuMhEdyj97yqhMKQXf6rTqfw9EyhEngsEsWBRBQtAQer09jynKHh4yV42QidRDJ-dhynf1tDjyGqBWI_GBVlDd8x_g2BCzfoE7WZQkyz2M5PtASbTbhJG4ynWWwEWTWQA-vBsRwr4T1_TapDTEwYM1bU-W-gAsaWqJAAR3x31jMLfIR23uCC5sYor9uej2kPNF56uWFW0Sj-r-C1LFVmzosODk1BMRu_LdqYusOEoSnn2ryKFeYY0fW_RywAJUJpv4eayj7yd53mD7s_wPyeTnNpfngXbsw2IhdP2xVrSzRel4Rmi_vgdvN8Q19uUXrnOYflikOJwPnoB5BacIL3MQ7--X_hSNGnGdcHJFOtDCdxSdad7hsoa7m_FioqvSzf6nl1aVo1UhYg-nabKTUdHxJ7H1s7nsHl1eRghMEjPY3cWkmPWg-IINdqlsG-VK3HBcAho6DLm_Jy_NZo2ravpBtCTnvCnIP66b7_RD2wzYkDdqd5j7p37Em8I8-gKuGBWm18gf6edSiYrUoTP5keRBGYHPdSe6SGGsxTLUPwAxmxYbKqqC7u94z-1LP5WhcxG1DroRD3pACsMoS789uBtt6X7CMabK2pyE1f18LU_5sMZs"} ================================================ FILE: genesis_data/genesis_txs/UMk64563QZfxgZr_vKOTDrcp5XJNENF82Pji4a078YY.json ================================================ {"id":"UMk64563QZfxgZr_vKOTDrcp5XJNENF82Pji4a078YY","last_tx":"","owner":"2vYN73CnqAI6pnUkhQdnBaOSuvDZCsBTrj-5C3Y_c6OFxukJXjNDeXYD01BiaeTC4LjMZttmhfedA3E9sR9qx7s9PZ5F_3-2VoXjZUqYahYoFGmWq5sCLEMT4uWCNZwQOYIUGM-sUbzVoVjOmzIpej-RsH0WQ8ijsgUaIwam6BoXFsvhLRsM199KlJbY-E__AeX7U8cXxljn_1OUM8_u1FKv0YoaMiwVJWJSxR6ZXRQDV3ZEl6bofPbDqxh2JdeWtuHaeYps-a69-dWkudtvB1aNgaEfDfPM8TqwCgYujDkIEyn67rFM5vz-G2stkKa8da-kFVONTYzwjt6q-JVgbudcQa8Bj1fbYEpqejAleYu_teyd7pI0AZ9rjH1U7kHJ2tbOC08T383LDyTmkBleInjumEh6ZM9aM0RqEk2U9iRwLgEsEEPR4X3z6QCKIswEARp-uYbQVNmVvVQCWWKhTFbFN3b57sDxBXZYHT6_jHFOTBvsNhlWb2sOEL9pWhppfC4YD1Nu3O2MqfJjctHSR4FOpK-v9HxglUZLdviTRauYWGL4ZryhkOz6cW-yq-girolhVFFncpjNnYzU2EtjJwaOjEzryx8HY5ggdQgAH7B784VSslgOj4RG-Vw3y8-Fd3TlTM9fpgfZqtbNvgwf8ys05Pzx8n1TUFtYkgslaxU","tags":[],"target":"","quantity":"0","data":"SSBob3BlIHRoaXMgZG9lc24ndCBjYXVzZSBtZSBhbnkgdHJvdWJsZS4","reward":"0","signature":"E8ENbpxcvSPR7KCVSOcjmWxG6y1Yp58uUyrCV7960u1pkaFE2sFMH_b1wnUFNvojYRDP_Cus_bzWcsjPBibJ6OeHr85IenbjtSQ-Tgj0AoGpnsUzdH3J3giGaCsDSgYJF8v-tovxuJdPVPSwwYX70oGjD1MKb4DWBhkI5Xzw7JodougIH5XFwkTSPkeC-8F8Mjj3-14dtdc8dSiGc_QTacbL3pC9jY-0SWIvfiTzrrciBp67HscfbZpO-EcVwecm3hmXnZCmI306OCLcyuzTkaVQPciIRrR9d-GXr5ah-ry7UpctJaX9uerXxdhaNV5gFHPZEBq1BJJavhDnlWndtMGINq3dAepKP9dgXkAShnosXzzKgcvaK-XyrCvR74D5jEuBNyuv5HHGQcJjZ9-W1GDmHsP-64UWTwQC5mO5C7rP_TIJU93G_cSULBL9DJlrhwAyrk0dhq2gYSh-xY1RxoE1JzTTVR0sXvbiFMazxvca9i0SbKo4HDYgf8z5qJ9qdywMKFWr1Ep0fszdnvcG_9TvhYVi9J_bWOXkVbyyqxoCtqD6dJEatMQ5ES0CsdQ8T6FfZqABpOu68Aj2RaYMOF5WH6lo7yTowMrPNUmGCyTrQx32mp2Cnqa4-JSu4DYJCRXLhsrphAMfNP4TFHPLGbo97MbKPZPZI4P6yoPs1sY"} ================================================ FILE: genesis_data/genesis_txs/UYoJMT0QxMtB6ctUB-9iQlcx6fF8R3s8ahM4_iF4wiQ.json ================================================ {"id":"UYoJMT0QxMtB6ctUB-9iQlcx6fF8R3s8ahM4_iF4wiQ","last_tx":"","owner":"_qLEukMBrmsL16aFmXldqUoYG7Twf0E4DuCa00sYRiRwxl8UxG5Cw_7es0G-IehQp0QTA5jYvELFjXhZQElKjhmxQIs1QtQOi2Gu4zpQXaS4_dKGSnzzhWcUKJV_X3RguqEeOBcXO_jT_mQw170IsfjkK8kMDejSDqLVm2jnGiIubqA_NxhdfHnLsXFGc1SKxHh8scFfKlHekL9blvinOfajs02fYMYVGoZjvUyjpsjeTjIls67qnuVmF15fcV0gVfh03jSPb7et239rz6SZmpIDefk0GXkquHPiYSm6HXDLqVF3Cuoz-vd2p_CtMkw1NZaareK2ZnogRgAeI8ea_VKRbHMjV661YEj72h47GPelE_BcYmFAxGUhUMLsNAnzjh9hTknB2Ii1-UCZdi66w2OCFWB2ARQvYK2gu7C0kMMX7AXKDyTo6NA0a6pXYio2M-LQt9up8gjPmRXxS2CFuh5N8bp6QL4tJthI9z_gdYeDGWEHmJc0moAm3x1yH0laRNDoOt0VNWMN_oqZR7_vligOKA5h6ePFrMZSKyLzO_mATv5mWhqySrqQUxvuBq2IFrlhj0k1DltpJvFpgkshNAUIDJI_d14QVE6-dsdYh2ezv7Uvio72gpZoG8iqplw4IhJuwsH87cmGKWQC0n_Vmwtd63yoXnwKqBlntHD99CU","tags":[],"target":"","quantity":"0","data":"VGhpbmsgTm90IHdoYXQgdGhlIEludGVybmV0IGNhbiBkbyBmb3IgeW91LCBUaGluayB3aGF0IFlvdSBjYW4gZG8gZm9yIHRoZSBJbnRlbmV0Lg","reward":"0","signature":"zAgbl7Dr0g-ui5Hw3ulSwrRBtwsm1w3rnBy30eUh88uKjpZeWr8-uQZ-YCEb4bSR4vy0Bpj9_EYAZ9LVfo-nys15PwJu249wudN550vhn4N47J4sDEtxzAgniKXbreDaUTmG2QqFNv1P9MeaAhf2mEjAUnBbaUDQxX-YeEJanyiD8ZiYz7xZIvTC4s6v2CLm-WGSesQwfE24LittR-FMEzjBx5QwDGElkn3UjStN2yc37eUtsBIwI0UnNjJXzFDhI-ULoCM_8blDzUpgzF-1LLVKZzP-wMIU6xGnfpYGKGaVds6YRF-MYmZY8jxv-14QDrTmqgU5PDW8VU2E-JXgUkicp6Vzk5JdAjst-MJGeel4HNZ1ARtpQy75Y5ejfSp0O-pLqYDXgj0yzqG8O-jb9n6JM8rGhvD1jf0fJbuxJBCqYmTQKP6xLbXRLqFG3CV34JV5gbq-VS2WVo4Vdxf6ZYGb3T_J4EjccP5iO5A6gqhDeRWq1AM47gKdM147va58UNWn3AVXO_Jb1OBrAwWR7IGG0ozmnCS3NFhZEmXcXf3VfUSQSZ8k90rxJgYOgyeKlRkxUArufzy6XdhRXStN6IyazdZA34Eq8a8PBlh9UJCWpuy2W88RihPmqY9CAyC2FJ8ZNUxL4J7jh5aLFK4d_YVhVXdNgeEB1ncQPrLUNAY"} ================================================ FILE: genesis_data/genesis_txs/UdCfZG1jBYUKgeLc13zjRxmQHO4_13B-NigE57jmJ5A.json ================================================ {"id":"UdCfZG1jBYUKgeLc13zjRxmQHO4_13B-NigE57jmJ5A","last_tx":"","owner":"rYg-Q1yPVdsi54v39M1s2N4UVbQd3UIc5yPIBU5UwxaJh2-jqSlwOfrIh-v6_7D3jzATExVKlMLesMKkllXxlZ6tPsF85SBn-fqp-MnzlSfnbMLUn5pb3lK6cmOVeDKp-dPWpBHKLcMZl3Go3_WyCrYeKoanBF2Hcn8O9cYlYYjrxt3rscevHUonGIuwjjETP0t1NDMkbvqKF8XKIlZqFjjZrKQiZC7V1kHYgkITKJLV3UB0XmGFt2bYyF3Todq1CZl4gv8yfOuaAZq-ZhisUdRJ6Kqr3sLtyByiUG35QsfXB4k9ltODxTTSyEuja27kIyfE01EQInRGYoIadk8Djt7pwZl74TSDGExzghIYaXHv0iP4hWPWnQWvLIHPHM2hN4pzVu7UVNlPTmbJQYCch-YIsosSk4dgOZi3eYXqIiOnhh8kV3cj0_UsJdFRJ7pLWlLy9U5N_YYpvKHCqsDFS0d_w5jYH-_b8oCULNwejNoSaZeq9SzksXFPPB0rjvSvddwwGZpvrc6MZmQ8Vm_pU6ODYAzkIaOggtWNasqSsGTGGUugUKFc7yG0GxgT3qeOykG628DgpkFeMDMILL5ou22G7Q8lhsKQi26pCU1vOeU6m0nd9ch2kZ8mhKGdPCwYKM9Mg0PP1P66IYWzfvkzShTWVZ7rfu5j09YYlWZdafs","tags":[],"target":"","quantity":"0","data":"c2FtZG93bmV5LmNvbQ","reward":"0","signature":"fRl_q2ZCG4vKdnXAsUS4wpStQwdBQizlyU1bl5eSLt0OIbJ6ffLxeOi68_WuyLnd6iqcz922GSDOdSi_VQiyIo-feBqCPe9BZkcGUzjXT9vAmWfothEx4luQkvnQFsq1GpS9Ay2Flb0UirOlH-J80JDzTxNgBj1_oe13EvQbHkzsBQoG_Cw_Zky8GJx7iDtUsNhazLhcY9raMNQ9JIvWDWdAHwJwDQsXUzhcSaf26sUNWjZakE19eaU_rloF01EBVa5ozBJlJ4ZLJawY1TDLLweE2VA2MMNaSxSSHkPwQyLe-qGiQRMWESZ4YpFR_8dP8Y0XnI6ezqeDrkH2rutVwgI3x8hqJpLT5aYsAAgpcP27aFDR4BoUhtzxeQLIjzG7VRKhbFwWbxGm3mrxIt6P77aZ-0lAuOef6h9JIyWSKPs7FjFAhhc52uaBVpwwVnV-Q7g3jfRmo0buu5LAa5aurC0-cpY_DAlXBYhHpL3W3OarpKnxGzDWIateRQTC7cdgYTqu6DKBpKWRf4Gedrb5q44GH6fFq9wSK4PpBz4jBYMPLWLUNl-1rPkP2P_hlJoZD4sexicjLvODkpj-hf2KVXi-JSE6P_oAVt8tpnlD26SNc-yKN0uWEGtMXQaQ7ZDdaPxpxfobWVkNBPF7xyQ0mwp12JNplcK7nZvfLYphYFQ"} ================================================ FILE: genesis_data/genesis_txs/VUfaTp1eAzjnbxLR6xx_qQGVn_WOTna3rTolM8wY5BA.json ================================================ {"id":"VUfaTp1eAzjnbxLR6xx_qQGVn_WOTna3rTolM8wY5BA","last_tx":"","owner":"v4Kkd3gTp_gWLwoH0r4Uodwvnsl0aGebrQlzMI7V64G6AqtINKeunmbVV0may6PV85VSWdGmES9uH1U5tOggsK-whEGk6wcU01hhiEmLb5HQDmYZqsVcgTDzqTNXBLuvUUWuClQVXk1MBytsxE-cmXvTlNqbqBjFPu9AaY5CkKCK1xpvR1NcCuzXCUIl280LgNdKgdluEAh4iER3rmIfsxsh3vFDAq7DPt1X5nikZW1GqL0xSgq1pByy-nXInLKByWh35aB-S3-O6YcXT-Zj6sbzq672fRoR1iXA2ZXYoGZyvGGkz83h2Gdos7gf4ZP6DwfpOD8u0Mdtg_4oCVDYUeoW4WwBuNcaQQySGOBDwDJZRWyNrRDo-G6FOb0yicKIfeaeAISVzd9x902udmeqQkxQ31CA6Te93OVTamrKpnvwtO6d2O1uTtaBqLPdkS3P0U_ne6tep3T5jtz-VE3cJsaIk2DRObRS7vQ3K4cyk4968eowaAo-H9yJ2uGFfA8twnW2MpB8VEk9zSzUOCjaCgsccg7jtj3cDdy_NLyxB-OA4qql6LG4PirskA00MWlDjlAQ0-EcNoiP9r9igjZdwsxAZ8OAd1FHG80xmbxTD3gobV0LX9Ae2hcmDIuE_54x_-FyiF3-XM_APvdMtipiJE6rUincuMo7P6qb6wt7Pqc","tags":[],"target":"","quantity":"0","data":"TG9yZAoKCW1ha2UgbWUgYW4gaW5zdHJ1bWVudCBvZiBUaHkgcGVhY2U7CgoJd2hlcmUgdGhlcmUgaXMgaGF0cmVkCgoJbGV0IG1lIHNob3cgbG92ZTsKCgl3aGVyZSB0aGVyZSBpcyBpbmp1cnkKCglwYXJkb247CgoJd2hlcmUgdGhlcmUgaXMgZG91YnQKCglmYWl0aDsKCgl3aGVyZSB0aGVyZSBpcyBkZXNwYWlyCgoJaG9wZTsKCgl3aGVyZSB0aGVyZSBpcyBkYXJrbmVzcwoKCWxpZ2h0OwoKCWFuZCB3aGVyZSB0aGVyZSBpcyBzYWRuZXNzCgoJam95LgoKCU8gRGl2aW5lIE1hc3RlciwKCglncmFudCB0aGF0IEkgbWF5IG5vdCBzbyBtdWNoIHNlZWsgdG8gYmUgY29uc29sZWQgYXMgdG8gY29uc29sZTsKCgl0byBiZSB1bmRlcnN0b29kCgoJYXMgdG8gdW5kZXJzdGFuZDsKCgl0byBiZSBsb3ZlZAoKCWFzIHRvIGxvdmU7CgoJZm9yIGl0IGlzIGluIGdpdmluZyB0aGF0IHdlIHJlY2VpdmUsCgoJaXQgaXMgaW4gcGFyZG9uaW5nIHRoYXQgd2UgYXJlIHBhcmRvbmVkLAoKCWFuZCBpdCBpcyBpbiBkeWluZyB0aGF0IHdlIGFyZSBib3JuIHRvIEV0ZXJuYWwgTGlmZS4","reward":"0","signature":"RP27ZxEBPN5fvRqP9LrkbndkM3Ci0UprSr3wC2tjS_FFDq22GSpWF3_ZMn2RyGUy3ztK2RIJee-WmNCjnHNz_dYFuT-guwD5dT00fa_szKWeAIgBkZR0G5RDasGq610J4PLYlyIaEZu_-PxivW73HNIgQcXNvyShtCkGKvnora8XoEdf_IgPW70NaqYSw6yFU8FqR7LCha7jU9fIW0FhQKGWKLzXnWDRwLAP7R65MITGkqt-SCwdErTzOAPBpEPLzldZPFiZwT4TNV6dl3CeimBjtOtD_EmnBwhgdDzTs5ZY9nVdTsEjmngugqBFZaTaxRURyT5tJi468I87uyjlHCTes5fTQCwkbwYepbHQzDu_uuam7Bet11qICEEL__vXqrb7m0cK0hGl459pUdBNZFkAHaEoZ4f1DxWft17gqprDIri942gqfOYS8KPhiSGIHO5_zfeLar1HivNTRUXWxiIRN99L6O6Cegb0ZGjoC7RB01Hgqs0o1X6g2x1GPrQmuBUIoIFYWkR5SI_pyKyy8S5UAeQ1gXtPtEZzJNjxOxCVqZZQYoQOt-s4GL6GlOn66lFF-D_oJzjoLTjyJZ9QuaMJinQ5l-RX-k0aM-JqsP_uufkZq7Fu_Z7bk61_dkDxOI9kFJCSzxV4qSd8tAOk59yT8O9IQrAkL-LSB2-hCYM"} ================================================ FILE: genesis_data/genesis_txs/VuXQZjhUaZ2Hyi6Pl8_VTOu2mUWjoEemYb5TKXPFOS0.json ================================================ {"id":"VuXQZjhUaZ2Hyi6Pl8_VTOu2mUWjoEemYb5TKXPFOS0","last_tx":"","owner":"tRdM_yeBdtDsw60HvwMhEYEcC58ojL4ekA_n6JI9UtdzZ6Y_S7-Vgmslc1IuI-oEcttgHuvOi0ixuHW9qFQZ9CC7W6H2jvOS_yPdRc5TkGBV3385zCbB9YXjaB7JfOpWDzM1QZyb-3LH9HleFw5sP6c6CKMyWxk4K58TSc-8zjqRfK6hmsorKh9_YaSepFLgoJpW91jGrT0cmDF3FXcRtD6mX2WxK6tu1p2Qfmyc5h5RKmWUG8e3t795yVpY_TePckPmpRldjZL70NyBUFwarxFdzBrH8iKN_YYkRK9kfs97yAM2iovfJRshzcS1QQcwbGUCNc69IGD1DSPg-PzqFQ0gTjaRa5NP2JQzzwPtzKZj_9fhfLkGHJP698e3XTYJsntGjWFciuUQfEC_3JEfDW3uZy4bTAqDlv_NsdBvB8f_wYgSuKiDKXpAPZKTaYNDyfRdOwZw1KL3wP4tjfIpsG1iY89omzbjZRk34Jhb1MHpRSV88WTKldl8lJlyCPmCCpDqJINyMgmZEgmNXrVdmqkd58muQUfSObTtsiTisYTKr2QdRd2-jXUQXGyVl28ageHdCQ_Xb9jEfM1ew_vCt0sAX1ZNAN0nFZfNvot1FfiloKwnkWwGJz_WEsKWks2xb2BkE2olTkenkQ2my3X9DgSmvJ5US_fmhn1BY7Hrd98","tags":[],"target":"","quantity":"0","data":"aW80","reward":"0","signature":"QJT6xkO3TBNaPXwyoX34HLamhkGXd-9-H9AuV6SbUXcIuUJf4ArCZIkkdug2jNxt3E1UNkOo_2itTO1JvcJXD2axfeQpQwUprtbNLh3vTHPDcqe9A5HB22RxE9wqAgPfUatq45ObkPxU6UgjRxlbYuDGMvIOtWKwFpf-aHKBEGJUFnHo1RE_kr_Q5Iw_a8_ILN9fCzdNepgNPVmosJ7Zki-B4HhEkZDPgVRpsDLijRgTg757pHr6If94NwaQaf7mZqyQ3Dw0yEmuEDscOoLGY5Rsuu0bdkQNoEpf76kHtmORjF-U9-J2DpBTiHfuEp9eyiUeVJ_qqPfk2-8PdFaFqd4YYfRmzZisD5YnHKhNn1ejltDjrrkTAf1vwvDAEQTSZshER4Wf0R3YBIVhTMII9eRLcK6nfwBJDR8Ep7GehTChXRiD2mhZTsVYur00DzY5vEFfxiO1VD0rb2hQfFnEcmDuY07savgBZRroS0RJG_9Ga9IS_gttLFmpmLeNXO8wIahy8k2W-3ySUBcbHlxvg-Lk9aH4pLqVAv5K0med7kT3pVYPE9hc8yb41_XtbEds_u_CuubTedsiMiCb0f6IdaqzMRtICwuLhcMqp19qT1KZsJ7UXsyHYczNpmgmdGTea9627j6qkPykU1-Vg8AIl10koXcfQsitb7slGBLYf3M"} ================================================ FILE: genesis_data/genesis_txs/WE5eBi6hEq90HQvDjtJr-EmZATWJthgxh3HPPuQ7410.json ================================================ {"id":"WE5eBi6hEq90HQvDjtJr-EmZATWJthgxh3HPPuQ7410","last_tx":"","owner":"w-sdvsyskkxL3NWBXBTftNhwOHYitd0hXbSV0aMT0_65b3Iuom6YgLPwTF5x_Lj1aSNPFGELz0a7pnG3jqbICsFi7J6B64dvvjF8RNIqfn_nVTSmcoxlgTxpkqnl2Tf1XmtmKVGYReW-egBng4pVR0fFy6n_4XlF3a-TRiW6X9BXm9oFo6LWXowUtY5gAMaN_ssjwy5esqNzi5LGks45IiMyjQdXEuyvRO6JbBi1unIZtrFrGzeqfc0tlvBsAaLrYP7TTVoohBgizqhCRCmQTuoQMrydZX9GeX568x-2MUCETymf_RYkRTGjiENin3wgYVRoq4HFei3hasY_vB_i5SL_ZjhnaGUAtGNXZWa5AK1dYzWo_cEiyM95yxHDDqCE5-mP3fJq0gsPCyREOVNYHG3sEP7Q45xNh8DV87Xcjxr7FYmooeMNEJw6zG4qMxGsuKjuoYDa6SoBwtZRpMmnyaqcyx_6WpViYRVroZZchlZV193mpWDMTYWFgx64bwgJ2E67-cx4Z0OvH2aS1_iTsqMnrn_fAW4D2c2YHLqQXcllZvLodcCE80twd-6frUZ8si8OXzFwZ1gJpFad-9Z0EnNXbnbygIg8-KpUVjwyW7_eU1L7mlL2w1Mp6MOaIYforga-MLlI1xI_A7a_hOxB64SP5XpbUfTwIH-J_YXT3V8","tags":[],"target":"","quantity":"0","data":"SGkgOikgVGhpcyB3aWxsIGJlIGFyb3VuZCBpbiAxMDAgeWVhcnMgc3RpbGwuIFByZXR0eSBDb29sLiBTUEguIENZLiA","reward":"0","signature":"P3XktQwng5HXLVwcsx6AAVjZGojheor3sjb7p6WzmnO6SFiwRSnfIyE_VKukvZ7aag5CX2tlAV85bqupbywnLcBXtlRzG6952L1d6A2akAMxmtvAVFjNqmvAFoNsr-VCmczEuWIh7CdxRJozxMfwm6bX7mYBDEYNurwRy76W9vmiFb2pCHTDMeCW6FkCy3dVlPKg4aXM01je4qgRBZ5rZhNZpcY-GsWsYxyUXpmM54fXwob9PW0NEfWGyBbpBm_Y-qrcBBNRQliaSWTkkrbOZ_Q6v89sDSIqPPVT3ecTllAciC9fydj1OXxEkp-FSDDOom9OCHZtQ3ZPsyGbRr9oVVO3rVW5H-zX1HQBM7c3gNjaFmCHGpsbaXg3vhU0PzJnnpTG-ZzCYwsObN26Zq_ErGizcJdgw0y3t8y5HrH2hQ4lSuGgp1BFsaIEOvhy-TzSQtRXs22opepcf9-QI7eF3YIYjGZmQZ7IpnQrQ9uwcgjwL9ljyDOUGpZXZ7DfMy6yjWSvApZVXNIZOJlR1hN9OcWhHUvpRGNp99FOcXZY5DkdESyDfbZCx87WZwlWejWYUMVIYSc8z_9D43CYN3-2YwYKoRxNRExcJISR4hFnQ5XxuROPnPj9A_npJFp5ZB-g-3v1o4NL6P4laZpazwELkJRGlwwP0nllXAiX79RfRcA"} ================================================ FILE: genesis_data/genesis_txs/WsYJKhqhppBF6_eGbd0OACdu3LU6-CUuMcLeG3ST2qc.json ================================================ {"id":"WsYJKhqhppBF6_eGbd0OACdu3LU6-CUuMcLeG3ST2qc","last_tx":"","owner":"6pH_4c1ES-ZZC1RHihaUdmP-XXCqTmNMQbBtrNgvTPH2MLCXM57xEL8hpH-fZXjpRuDkZeFybrQw1XAr-qnlbKmYlDwUs26sTY6zPakflg75EEkbWpT_m0G-nN0RU0RR-zwh2r5BbLUYlnoNOULUgAW6wr5w7R_D3yshOzlqGc-jRwFI8v-MmMA2AiezvgnEkZh6uOObK7EvTCvTaHr3z3xFAGeCok_Mm_HhNKt5JuT3hCr0tNCNqwKfJNE49SK1sx24y7GcTKG0dgj1R97QJt5sK1igsOILOczmmJw_RvhDTIL83aTFg7VOD-lEB_bGcZVOQUGrO5BNWR1sKVyidA4_czSGa7bCXss4_d5BU-TJWPADNKYltSckr_ztpIroQFonXHReFkJkmzaExevzigY3OyTRtF62auYAv-hk0nXoWi5Bab8T1YJpwBvRKiJ9gltb3058XagVEE-rE9xteF2U_POIQ6n1cvs4iUFbthQyxlkVe4z259rxveEqPDYmP_ideVM0MoDGEDoRXuwafkAp4L8mqAwFWbOHnexT2DVINrWS3582M7Zk8HN-to27SgDOw59AgP-Qkbm6UmpVVHBTVhaJbtjLyox8nwuAMbGHp03_lOEmABcBrrdh10Hv9qIJ1bbB6JqmaEHZax_LEQZRyxFAoa8VMLbkFwCB93E","tags":[],"target":"","quantity":"0","data":"R29vZCBMdWNr","reward":"0","signature":"ano_3glaFYm5jQBt4bAIOkHkJtnKeIm7z-wHsxhRDatcyZd6Mcc5BDyUBszI2N14p6Yj7z2gwFnsHBeyKl6VvQUw0lhHXalTCUszPebeDr5Fnjy7nhJxRdEzHjpusCsOV3OQkfrSCRCGOQk8oZj_9Oy6IPmO8eYgDtmQ1rPYpOZ_kswGm1RNRFZwBNPNs_TznwPQNCLdkwISv1RX1hTEHzaAeXir7jwzyDTREegzlWFnocL32JcJiccb0wV5dHryIj5h_p-ULudrY3GHOCS1Ffolk-rKtGc3Ua9U3Ng8wLkZRZp2ucoyZ2-L-DlM-6FQlwIPorALe9D2BAvShQ78hJ3yIdqaQMaHB5B6jM2M8hGIKWPUl8Pf-7gOg5GNfeI5lxVE-OaHrGk31cKcVNu945dWK-PHYSXalERQ0_JsA-PGFWCd2n83N2vlzh1hPAz5sM5UHZhxLUYU4fYTXENdTMTvIJu_rb1TXsQ70JkG4e6ZNdP3e66HgBz3r_KkJReJhCcVCUoR4iK3Ds5M7sVnuA1eeB3jXZrQGPTK-blfEgdZWLIp9-pQkkZXLwaaYrbuf8ow2j5TpSk5OuG_NQAlaIz8s8K5MypqOwjWYzVhY_hVUItVHsf2_964gEN9RVtEunjWXUdwmajGzOOl1bybPBOV6wqDPtE9QndYjbttHSc"} ================================================ FILE: genesis_data/genesis_txs/X9biR_ZA-rnpzk4gfLi0-pBSsjjT2l9Rk0VfYwf1WMo.json ================================================ {"id":"X9biR_ZA-rnpzk4gfLi0-pBSsjjT2l9Rk0VfYwf1WMo","last_tx":"","owner":"2DYb2HXHOXQTUhYDTPVU7JQ6R47bvTO3E2EIqhHiXygo7MRjpgAogXRqOiohorEDT0TgPHsQmLOIXlNgshxhFimlIw1e0FAEm2DSEDwA7iZcTtuwLUwV5fn3DlX_xgESO6izEm8AB823q2mitd3npLjXsmFjZUTyneGXYh4RZqOfSFieEoeg4SlQedy8IN-npqiHG3Rnd6s9gk2VpFkhLHXQGUZ0QVIoqDcUpBIZ-H9zNcZzY9tDwwzM9iwle8N8NtfibZSabo9EZlwpJ1LsR2Y-lj5WWB5p8a0yzaDHHsvszSRuZciWceiJwR8xAX_rkEBFcgajXRKoRkAdoAG5RjgNB60ifpwY7_pY_N06DqxIWLCJq5M26mQmTweGwQzauDKSdMqOuDYKrtoD-pMeW7QjlXVKlYt8VWQuWzMGJmtH1PmAGw2OrklpTJ1LJmLf6FQ_qJa_Wkok4GVr8WR0WbONMEMj3UWgpS1B7xvRCWkYi3mRHJWwKUrPVCTU7Jhqf0WFmddwsbsigMUiB2IbPaymYCd7tHbLHuEWlx3QGM09TVz0EmMR2v-m_i3KzApyyfEhi7AKkEEpRjGUuo2D2H_iEF80Xa0i7OSPPdad6dLGRGZ_X9wuQHmxBnMD6VCrvhZE_DNaX0k0Lfh5-wOBRnMdRWmEIi2x93eqIofaDYs","tags":[],"target":"","quantity":"0","data":"VG8gYmUgQU5EIG5vdCB0byBiZSA9IExvdmU","reward":"0","signature":"lSxl2IH8fCWLQN0PcoKjvZjzdNa75WDgRHWtrjoN1P5V537JT5dSqwV0xSJwsUVeWEMSEVPvUBRNKf8mVGLAm3TgY-iB8zMIf8nxzvBcKaYWMeaAHzDV3lW0Z6TIrY7gc_GlhI_Q18vb0DADF11kS_nYzTfTcXd365ZUJjXrtgzzHj0lszheWATg8IwJvChInSkUNgHOp35yU7ErMlgSMwVWS9_SaIgfS5ytc0mjEwkFLma66_NRQFyHPisFGUfEjUlH5mRSSC9_X8Drx_dm3nwFdrGbpEq4NkAFZiN4DZKLzO-Iiq1EuwNjJ5_WeS_JjAcVdZGk2LRvygP4IzLyg4sughBbohYppDkD-5z9gx5aQMTRvH4gQRDcfU4IsFUESg5IyUcWWY2XBSSCGLeqGE8gQe64Mek6wH_3dCdbxe-9XaOs-AyVHKEex5m2PG8ChDe9BaRgEdJh3AU2QH100e_jKtI7lDK_Hnp971T-RbCxAg3EucwPnM412u5eraUa_foL7FCTT_J8NyBcKUPCw9mIDxqEsUIArVrrnFR7D6BL3dYZzdmLsMQqudEnLmHZoFMqk-ZJA2HkjjKnRFO48UyCLCKWjAo0xJX6QERH-A7JW9ZhtjOPRKLjbW8tekTFiz_OppnLUJMG3MmP7W3uW0ixE8N13H2eCHRbltX_lXs"} ================================================ FILE: genesis_data/genesis_txs/Xjz72yVLd_Qzl8_GfSPqZA1MAkxxhjr2Lsf2tGCj_ZQ.json ================================================ {"id":"Xjz72yVLd_Qzl8_GfSPqZA1MAkxxhjr2Lsf2tGCj_ZQ","last_tx":"","owner":"rUos6-Jqcv-u7NB_IRN6G-VE2zVd6WWGMH2rOkeALfJKkV1qfLIQIKzLuVDaWgSWRVGBijDaBft9-bqd1x-KjGU6JL0nLYVWY52vyUTbVhsWnTQaUSs-p3ZhxMwfHXbMq5vstaxvARQNXS9H-_7QWWgzHWUtl8VVTX7tVq79YUcAsNigqLWmCyrH2_WkPjX5Cx1Dwqud6nymYLlDlASgL2thS65XmxjKcODipapLuYk9d3aYnC1pnMsR6JY85_QFwrbF7dnOArH8_C60zxhRDLk2RYz4p-CkpLqyaHegZCHHUMzRBTgaESyobVIZbOlpc43BYYJea3PcITMxiVvIJ-nwPGDcrgRym2Wo7O7AHNJQuHQQ6s0Xjk06iEQgGZKQVq6MPaG15KZ6Hi_1kCCBdZ3Hp-ldlfNIMkf_uSn76u7og3Xlnh1fU2rvLTccB3I9XpG05dKQvYem3mgx5pAWAQ7UgKBmi5KyEHembw1A9g1NuX6MO695qK6JHjxTh9jMlJ2xhP-6ICCuCgDnNV_0OQTeSoaRlZcEDu75_44293eiUdk3WBaF_HnF7jfhI9uSr_oKEd3iwA6erdxOs_TB8F4FNU8I_cngPuuE5gTwASmfV_U9XxqtGtxcEsBZ1lkfIFJV3OV04j9feoImunQSsLeQwAp5uGDNFwh6rhTVZy0","tags":[],"target":"","quantity":"0","data":"dGVzdA","reward":"0","signature":"l0avRzJQHXJgne5y6tH2GUpmyxJl9qMdSpbNeZV8pIIayH__HvO6B3A8EHOSDzwXG0sOannH6vt-AkS3ytwfvhn2MMt82901N0TmZwNyGo0LUAkYVswYLCKJygmD0W8TB0RfPZ_W-zKc5Q6ycBItZI58KzMR0hqLAV5x0sKTAPyI3nih5iCkX8HDIy_NFd4sshlTNFNx9dzAXv5P39z0gxLmLV7DekGt3GMnRFMnmoX6LE0f1F7vYGfFcMcGcaTGsXLdNKdyj5n93gqbbBqsQCPVI2LKEnL8WTy_vT-EOPlBiF4v_pDtNdficATwbi2FFmLX_0DPOKpVLiP-1u4ClV8FJ3vpW-hTPMamdUwIaKZ2S1TbRcAs-Qj0gZrc4pP5HAd3kZnwhVINlOrB2ryZJQvLKuUEKVH5X_eSh3CbOPHXEfRU85W_iKafilWt2n_6MzSw5j-Fpl3sfpoVGKpcrldNhF6_CZyCo2bw84ps_4WlU3BWlNJy1r_X3WwViSYMtdeRS_7Khrsyj4R6N0YSdlFBmPhAitC9JHBXBueyvI86arh3VdnJ8f0IgcMLIplQ14RXBmU5wO1A0wCsZ1Q3rOW-cCj8oGJVvpntQE7Th_TsCOoesZ6RA-A5v7OjXJdeaa8wUFLeyqpCJ-k0yGchb4hemMZOaNCBXh3Lzhdv1rw"} ================================================ FILE: genesis_data/genesis_txs/XtDRu-1SyoRL21gpKcxWtxyksVwTF9kvW26hvQ_bPzE.json ================================================ {"id":"XtDRu-1SyoRL21gpKcxWtxyksVwTF9kvW26hvQ_bPzE","last_tx":"","owner":"q1RSDA4wzpbqWggL3jfP3INUPJIKtIKdtCvw99wGmehu-Yct1Q-zYV-D4h1zyM7ysXD3gcMIDPZ3-TBWdJ_EOOTokNbj6F5IhTQ31ygB2niBNaD50oTb4pOLT-pGf3mh6OggBan80JVXX_MrtLprh4tH3u6T9DAgJR_BLaIzRiKiLXqxxIQgaqsFlbuIoSCChg48GZ4INR6iPzz4s8QO2tr6XNA8XDoZKgU8L1XZN-ZPDGkWoYEAE2TW4n9WT8rlb5Xc4SPw7o3JqmkzaYXgMy4Mg5Y5xO_-NKqBVo5BnwLqlfyKdqHa6qd9nDULv1-bTTsDLpEDVjshuRAnKA5IgFez8uY02tnsjZq3QNDie8b8ATkxD-KtrFToGXNTj7gXpfJXJYf99aQNxlZHBrCDuOYH5OxhzpZ6S4qYJNhdSNhFIjZJfSylhYxTyp582xHlruWZGTEvB8DdR6ifSSRAFhh-CRJC9PVGyvzKxppVGQAwplMFgo_gGzoJ1Lgfq0rWuAavvJ_ceB9X3nEvzmfx-eXgddgV7xX7XRCzKHUN3KXGrkZejAOKKfyk7tLTYgFvuYT8TZn_LjfzUZcc9a35OTnAMQGRVYrt4odX29zMQ7eBsSCBoNfr-17KxiMouykembNleitAD2quVMcBfmyXHLgGwCEekzpVxuxEGPpGmlE","tags":[],"target":"","quantity":"0","data":"TU9ORVknUyBXT1JUSAosCgoJV2hlbiB5b3VyIEdvZCBjb21lcyBhIGNhbGxpbmcKCglhbmQgeW91IGZhY2UgdXAgdG8gaGltLgoKCVdpbGwgeW91IHRoZW4gY29uZmVzcwoKCXRvIG1hbidzIGdyZWF0ZXN0IHNpbj8KCglUaGUgY29uZmVzc2lvbiBvZiBtYW5raW5kCgoJdG8gR29kIHlvdSBub3cgbXVzdCB0ZWxsLAoKCUxldHMgaG9wZSB0aGF0IGhlJ3MgZm9yZ2l2aW5nCgoJZm9yIHdlIG1heSBhbGwgYnVybiBpbiBoZWxsLgoKCQoKCUJsZXNzIG1lIGxvcmQgZm9yIEkgaGF2ZSBzaW5uZWQKCglJIGxvc3QgdGhlIG1lYW5pbmcgb2YgbGlmZSwKCglhbmQgZXZlcnl3aGVyZSBJIHRyYXZlbGxlZAoKCUkgaGVsZCBhIGJsb29keSBrbmlmZS4KCglXaG8ncyBpcyB0aGUga25pZmU_IFdobydzIGlzIHRoZSBibG9vZD8gRG9uJ3Qga25vdyBidXQgdGhleSdyZSBub3QgbWluZSwKCgl5b3UgaGF2ZSBub3QgZ290IHRoZSBib3R0bGUKCglidXQgeW91IHBhaWQgYW5kIHRoYXQncyB5b3VyIGNyaW1lLgoKCQoKCVRoZSBiZWF1dHkgb2YgdGhlIHNpbHZlcmJhY2sKCgl3aXRoIGl0cyBncmVhdCBiaWcgYXNodHJheSBoYW5kcy4gCgoJQ3J1c2ggYSByaGlubydzIGhvcm4KCglmb3IgbWVkaWNpbmUgaW4gZmFyIG9mZiBsYW5kcy4KCglUaGUgYmlnZ2VzdCBvZiBhbGwgdGhlIGNhdHMKCgl5b3UndmUgYSB0aWdlciBmb3IgYSBydWcsCgoJYW5kIGZvciBpdCdzIGl2b3J5IHR1c2tzCgoJa2luZyBlbGVwaGFudCB5b3Ugd2lsbCBtdWcuIAoKCQoKCVRoZXkncmUgb25seSBibG9vZHkgYW5pbWFscyEgV2hvIGNhcmVzIGFib3V0IHRoZSBiZWFzdHM_CgoJV2hlbiB0aGV5J3JlIGFsbCBkZWFkIGFuZCBnb25lCgoJd2UnbGwgbW92ZSBvbiB0byBiaWdnZXIgZmVhc3RzLgoKCVRha2UgYWxsIHRoZSBnb29kbmVzcyBmcm9tIGhpcyBsYW5kCgoJYW5kIHllcyB5b3Ugd2lsbCBlbmRlYXZvdXIuCgoJVG8ga2VlcCBhIGZlbGxvdyBtYW4gZW5zbGF2ZWQKCglhbmQgZGlhbW9uZHMgYXJlIGZvcmV2ZXIuCgoJCgoJTWFuJ3MgbWVhbiBtYWNoaW5lIGtlZXBzIHJvbGxpbmcKCglub3cgd2hpY2ggbGFuZCB3aWxsIGl0IHNwb2lsPwoKCUhlJ3MgZ290IHRvIGZlZWQgaGlzIGluZHVzdHJ5CgoJaGlzIGxhbmQgaXQgaGFzIG5vIG9pbC4KCglTbyBoZSBzZXRzIGEgYnJvdGhlciBhZ2FpbnN0IGhpcyBicm90aGVyCgoJdGhleSBzYXkgdGhhdCdzIHdoYXQgdGhleSBuZWVkLgoKCVRoZSBzdGFydmluZyB3aW5uZXJzIHByb21pc2VkIGZvb2QKCgl3aGlsc3Qgb2lsIGZlZWRzIGNvcnBvcmF0ZSBncmVlZC4KCgkKCglXaGVuIGEgaHVtYW4gYmVpbmcgaXMgYm9ybgoKCWl0J3MgdnVsbmVyYWJsZSBhbmQgbnVkZS4KCglKdXN0IGxpa2UgYW55IG90aGVyIGFuaW1hbAoKCUl0IG9ubHkgdGhpbmtzIG9mIGZvb2QsCgoJYnV0IGFzIGEgaHVtYW4gZ3Jvd3MKCglhbmQgdGhpcyBJIGRvbid0IGZpbmQgZnVubnksCgoJaXQncyB0YXVnaHQgdG8gd29yc2hpcAoKCWhvbm91cgoKCWxvdmUgYW5kIGtpbGwgZm9yIG1vbmV5LgoKCUxpdmUgcHJvZ3Jlc3NpdmUKCglzdGF5IG1pbmltYWwuIEFmbQoKCUx1Y2sgYW5kIGxvdmUgdG8gYWxsISAKCgliZSBnb29kCgoJZG8gZ29vZA","reward":"0","signature":"bGkv43Fu9p7NxsF5N_QbcH3iHD80ZEmFxDSEA3haseymiKbpKU-tHdK6IrlKFtMf770ZN6ao6Ks1d7EXUbX39xLLVWmqtwpzTwlQP6j_RNJ84Ke9u3-s9x1oL7yIPI6XZrvinwW07sfFSaOXT0A4ZD0HERUh7DUIcecE4V_E9uTRRgejF-Bv3-MPRKREbNDtQI5Q9hRjl1lL8T_ByHGxk9LaJ3NGofHDHCKUTBXuCqdsAcwpQBt7DIE92dwTM6GFwOGSFzxQAtG8uKBjMTfSmjij5U5WxV0nz_5w0RWz25WogdABDT9D7piYzC7QeNDWDTbScYRzPFDggmvsoKO2X0vv9UPn2pp12AYMXdgfZwHyrbvjD2Pw_GU3tbDFIKucyeH01SpvPVS5vgQQANSlYF37yeToJvT1lV2RmIraOVSV5JcPXtKYQbRdKD6-IeVYURWoposvSovBF08GBNg36leiUJLZIE2PTnVdL63ZN5NxyKX1JazKDwwhranvxVUaMhJy9n7o1NqXSQvTDauPvZecGBNWP1xn8X64ez1-RmB4UeDS_mVNhPY2bzP5_aAsoT8_DXMVsPebWyKTHl1RpBCJ9rsvE2Ee_xrkh16o23hJ06Kd3fI4mTr3OhL5KajuKa60CEExoQhqeJhlNEvMBFznwQP_wM8DnbUnE3e-pWA"} ================================================ FILE: genesis_data/genesis_txs/XxgirNr3QGaJTKxPWqK9byYLj7SdbfZudKd9rbynWyM.json ================================================ {"id":"XxgirNr3QGaJTKxPWqK9byYLj7SdbfZudKd9rbynWyM","last_tx":"","owner":"yp9ZAC90UlO6qJ4NJy-_ohIXI9Kp9vfswKwyN3NlywEHiO6dM1hcFHTvs1pipy6Qompt-kllQSKPhqYx6E5TlsYbmFPqeQXpP5o4pJesVZAoknV-ST9g35CohhaU95iS7OxfDQK3ZzHcdsgn_SH5jSwGSgz-jN_uLwvSiolYL2sUPYZ24HTG9XjcULMg9YZU0iAwkZgbRul6d1OC_vJ9sJXXccSKmCLZgk9VXU8pUT4GH2qxOKFuKcklX51nhtFbMKy0F2IXMHvSfG2CQ7R8VbGdxZRmourHBq8Ino0DFm2mWpsv0xKuOPQd0sjZG_CmNuhrEREhZAKSdKLASwz3dxjzAcnYMyL2PM8yJpRJsUFRqf31Rjl7jSZqNbh-dQthY8fYKpFdmGJKH7pM487WppOXD3-hSV30LzpE0IOySqOCX1mwkb8wxPE0BXQTVrRSIrYIUuo7Mln3EngAp_jbS_X45BkOvDqqYkzNtCrivsRxoLDIcbMG7OacDPj43Y5mZqJfxiFOampJiZvAdynUSVfi1LKMAsZqQFY_xPEGlObOIgKlKZ_qqdD7dtDFxkZ-gtbGUSnb450bwJaJXONJ_8QARuEY9Io9i0LjlCY_TAYpcU8lD23vNyvDtJrZg1KL0T8Iuo2gtmQm2lfCOGKJDGmJTne74XzwZDNLfrV4I-c","tags":[],"target":"","quantity":"0","data":"TXkgZmlyc3QgSUNPIQ","reward":"0","signature":"M7iD3wy2R4KAj45NmlAFPce5Jl6PSzgt43gqxDYeb_M5Ak2GY2_f0aMDWfYS2htbAhnbN1AQzfp1jc16nWcpj3DsAU4PhFg5lWjj5CUCBPMran60ch2fYio0fgs4m-MkkJtma71huuk-2AHINY0eMJkNNT9qhi0iJGKtHbAUEOKhPQtVTPVt6VjpD00sJ1I4u6-VxkuxgDnh_dXma1nUNihb4NKKA6DgS30CU3_Im6oi0til3fyjNDGKSBoW2FdV_UFphXhg9KGY9vISEuZ9gTpIe8PxChnyL6LY1mfHWMXuUjRhuLHFWUDOaw1sMHGMjaFQN9wO9kKrRCWKqk5Tu0J0Yr_itx2dADL7eruiS7loUcJbLA_IRQimTNO0CUew3dKAUDlObBVZEJh89MObqKCDvLsFveWnqIHnwyWop_oXEWdP6QUcRPGvXLI_9R7x0E1GHb4iZTZVWng2L6Doj56LhBKuz6kJtzgf5wjzdTa7hBNTHA2dw5ogv1IO1Ktz_Jacv4ezG54m8v-mZZICJ7IXEOppNQDuBnRgBvkTl_ON8JDxwPmHp0GYUAGx7DqP6Y-YkGnydJFxsU_RApi9jeGAuCx-mCuti_DGfPGfIPgr152Gw75WgqRfMg9rzT6bOJi171g_f1uM40wPhSf670tKx-nLuhcAoUyEKVuasSs"} ================================================ FILE: genesis_data/genesis_txs/Y0PLaTBQ73JXn_jHvldOKC3jdbqDbqTMkcW0x65_Jek.json ================================================ {"id":"Y0PLaTBQ73JXn_jHvldOKC3jdbqDbqTMkcW0x65_Jek","last_tx":"","owner":"1WQM7cWKavaMhqtRWt-1Z5Pvz03UtZMrbKjnv2Bla22etp-8Eq5dfIQvcuvFivqQV5CP91eefMhoyuUUD-UTaw2OQV6I6WcguEdHiPsrHE55kylT0vBE8C_0gvR0SOwTUEywqCqfOiwkuGoPHgfJuXt5T_aB33HqZ-Qorlgq4hf-Bz2tIYFR2EqWQ43mjYWmOldY-5g0cTKMZ4BIgipg3Bv383qI2fEQM6aeGtztUCUkTTdf2xH2XfG7P7qSSqi-3IYdkHoRI6OT6F7j0chhJZqnAUEopqg8cdN4A7QmX1nLzGhAHrZcrcd_DohjrNsgD635cdSjnCixcKEvsERKPrQmG3klaCDFVKkCi0peGsHudLYY4bRIJ84uinnmK1cXTUqcnDca8EWiQJHtzrylSBdT0FrMfQZwqAj92GL6VONeC2tp-Mn7dFlH6PWCna3saojhGk2q7-0Gj-onuYi1efkL1M-6jJs7liMqxdrSfm0I-FbB4uGFRPKrgr-rsCCjrS_hUHEA5CKsssZl1XbAhv3YS4plrkIIzyRKm8z0WcG72R8hAoDz39_i7rPSe0CcWYoqsbOkOX5Tsts6B6N1EEl56A9-M9r6A8fTPRHa433LW8YwCwi_Wn4d8hk73cYqpGimxBm1ymZcje3MKx_aS4eBeQ57VvTJY3nYm5MG9HU","tags":[],"target":"","quantity":"0","data":"Z29vZCBsdWNrIQ","reward":"0","signature":"vP-jZoTX7PM_Vl-aX5r66iWmP5HlejNyvT_zlF6GfVK8fPonyWXgh6NOMiZ41yNr_MFCugPjHGnBNDH-A2A6tsI6BcBK2Wuelxf41dsJ6CI9P-wthHJWctC--SogzargRFl6hPhcXlZc0VZLe4HaSouo3fzReoxBTfR55DUQ-Go8efcxb_873rvembbf6jJToluaxgVeu10Tyjaw3bKZ3reG7vcbD5S9VxoPdd_hXky-IHy7KZ7MFNxVhHA9ZR3cGB7wv2V6XoI6Wrpka13p7GU-9fUBu0G505JLm_JJECMFB9kkQtitwRqZcef6nAuy_E33Fb_025GSRPEL_MHSW2dDb2utvWc4fDOd5X8LbQl2O8Y-iEjWJIShELHionxg1c-v8Z9MIUgERvd_B6eb6s9n_XS1vL6bqbtSJHQPH7Apz5o4R-J3zTe2egE07pT8lBu6Qug5iqiA0UNlbyqlFp7ViA86PWajhBpJ2nHhmk-wCiT3OSRNvVcvhim39OGpRGvKz0WZ2vDzsLbmwmXROSUOtiWrpTE83YWlFenwY4OI1-QUMc0GvtC_nPNUY1vyBJETKyRRT_BvAjTlwDpIyQcuzCD56Ze_0gaTYb4tDA664RYBxIZeC1s_tTSQmQaUUbvuhi_r5v97Nhs9TRpI3vZ390V0GuWsI4LpxaQFjWM"} ================================================ FILE: genesis_data/genesis_txs/YIEEyYfNIRSjzm_gzv6l5CelyL4AOzKX9M4XPXRk2Yo.json ================================================ {"id":"YIEEyYfNIRSjzm_gzv6l5CelyL4AOzKX9M4XPXRk2Yo","last_tx":"","owner":"1PzC8obsqe2uCEVXHHbA3FFblOnYIKs_x7_N6YG_ld_TqLIU6qeoI747oepqiYq-_EhAyC_CA9oqvqpRGAPzf3olKza7KypN34eVxl_tUpChSegVgxB-M9GItqp_4xa5BLMn8ZVOw2NtdanHMGnbtTy0z5Ds6Fb7cvk0T24_nNEw3YR-1RWXhnnmGN6GfyTiCd3HB3P53mAmyEhyRVlk61Gq_XhNUpi_LdJj1ch6dELhWmlPr1S32sneTrwzNUp02PSyaAWyTHNeW7XeTr3lzkWntv4VrNCdmoErLTRJeZ0MBFbcIi5XtlEIm2KbwBhdb26uncUIznAdBlJEjPGztqgC_FBVL_Qt69bnoCMPA9lhcyT4_5PzZczkA2Y9Hw5QNWayBexncL3Y94RUSEuIXtEcqI2FsA90toM6tgfy3RTvc1E9PEy2rloWIzSqPFFK3Ho-xUCxrP3Qw-yCBUWjJflJZSh32hPgpnpHC1QrQuk6lT0ZEPS56MfwqlHLosYwWUCgcGFgnjlmGLJJ5AFU0LRCGyoxrC_IH3_yir89Oz3w79eYAWoXBiJXHl0fg5wzv-8L_1h7PgyBFyrYKqlPqEle_sn4i5WI6r2fK8BwZmeavRmtDgLUImKsbfZS-kwOuXH5dCSDJ8Ir5agSCPQXUftyoxHXMBb_CyHVWY6CUs8","tags":[],"target":"","quantity":"0","data":"VG8gbXkgc29ucyBmdXR1cmU","reward":"0","signature":"E1rWYyHSZnJMSWBiqtS-4cxFPh5yipa2bg1oEfaCQrmJ-Dtsk7rkb_XllVd5S17mO5egIA3tkN5P8rXOyXVyLm1Q6gyumSERsbjPuYdLhlt7KCVqcYchtDEDmpH-rnnLoF5AHjXvEXSGl9it3Lo39NUSxvkF9LgcpygSqdayzpn2Rl0m-KY4NNvbdrmHlLMSrxSHFssKcxVtoRHB5pBz1Rw24VT5EgIKKYxX1XkqCC61S-6IVImuSbWaBWiSnleVFsKYJpgliz9R1P61gqYD9472dPMaT1HjSdcxq6X8WlTwgLtbAz7l5V_Xs0a1gAD0CNOfMYHd2-p-OZt1PeemNV_BXyE55r4AtEOAbjkRTdnAzBpF184r54PT4bgn2Y4zvzIX5njDHO_mCwTMvTPaW9F-u2lVOCu_b0M9oEsdwT7kKnBpgtRiMsaB0xGLuy7CKwfwsSUqlhhdKnjuJSzyOYbA-LO9bjL2yb1FBhh1Hig9hotym-Jd7H7LCieXY6jR85YzTp6qNwWbPwhLzks5vYciK63k1kb-cbTRpEN-A-OE0fdi1GnAmmNjJAW6U7RtBRINO7-APxAUUXJtxGwQ2ILEeEN_LXWqoa_Jhhkzspvta7DSGwK3dWQ74Q0jyiqnAUvD0-UsFouLuPCLHWkmouB4d9TInL_EJq8w89Ymot4"} ================================================ FILE: genesis_data/genesis_txs/YfHEyNUGsOUiuqCgHV127cg2Z5Yap9tcQB1LH7tq9ZA.json ================================================ {"id":"YfHEyNUGsOUiuqCgHV127cg2Z5Yap9tcQB1LH7tq9ZA","last_tx":"","owner":"4NbPfDsG4IXeMRP00BN3T8C-l2zJ_YEZJJa1wr0dV2O82PdjJ3Jj4dc-tprhOQT_Fw6YK3IPADQ59NRAgJnLCxDf0yg-_Zvo9xeF651Yv12dphDUXUAuZXRt_VwnBB_uX6LOXxwBu6TbiU0mFGsHm4okSqvkB9BvTH1pfAui0HYggoUwy2_TSF95xfgBUgPAuSvxZwuanNqVCgVjULHqw9KkCsFEmsvpvGT3qBbn-coI6c8QZ0NhWt5f7imRFoF75FBRa8uglhwkG7E7fSJ4g1uj4XHr68XzyRExKcMcHjRTcHFOeb6Wrybvd7aA0PQNUKIG1gSUWBAReHge43JcTAsd2cbOngrFv27LwBGAsG184vMv_y9G0_twBNeGiL6ALaJIIB8OWAuMiSSk-DmL56Mh-BQOkYMpiq6dBz_OuOMKKCYQgx2p8sGtd-AZ-JQwJUn_lE38g9t46SG8jWYZ9uW8Gy7rMMj62GtPIFiXvskHaTYtP1XnPZdrLyjcDTSaEaKaFgEj6i127iVOQxO88kSM2AWInqtrcZRClbTbUsXMhi_JiakREvgSB5oqQ62PzynU6t3ADWRp5mu3fIYV919wjShMkzv1CW4cRARGQinoKvlZWfTiJ0kdQEjxKd-iHSvGBuTZpP5BtF6sqoKWcx928vZSXjgSP2rvrZBv2N8","tags":[],"target":"","quantity":"0","data":"dGVzdA","reward":"0","signature":"z_TSXuZa0x3ewXDy3CttmmTeqp3HnMFXVA1AG3zRyx_84A49KvaApkkz_g130sAYwyGNFUIuyHtaLL4bu4LVF8-L0LCl6DP5UjSLphp5cZCistKYouO0COep_27d1Brx5H-cnnjwcHW7MxYvto774i1YX_E4MYnMw3nTukcW9teez8yRsw6sWpgz9HcbXQPoDNzxUizlLQP7g_HESSB-kKltFd0yWbOkqbP97elSDwYuxnPvkGyZDDMnivsqB8kvcWIAcM4UbTdlwIjsztWxqqPWHAhUCiQLu9NiOBDyWRjadEvgaUg1IYK2Qe49nna1Cr3NwogmH-tV-wBrEqtW0WffC2Sn3EehD2Buw5a3hh_qsyi3JryDkFPy2cdNoGwT2BYd8ErmtOnRNoU_RLMdD4kFNauZOCj_DJ5NmfxtkgGlB3lzmCv0aelmpUzwxGp-z6IPJ0kcindEoYX8OWw4eYX1O3DQI8fElz1aMho49ZMxdlh37XrJ6ol_OVjOp4wfvMNy7bZT8lvCHk3TOZCfOWZh3T3E2GDVw50UpvpRJ-Rgh11Y7E0n-3IerlqbC3uunetKqgL5N6q0kmWSJ6OYXzSjATO5t5YwV2GnDzqJ_TaHcWxc0EtFAehSpAC0a91eY_AMCWwuDmideWIW2o58KXtM5qWH9XSaO_rxorU9zJI"} ================================================ FILE: genesis_data/genesis_txs/Ykh5TAI6koBN4UTQZ3GNIDr_uHNjlpHH9HsvtEkoWLA.json ================================================ {"id":"Ykh5TAI6koBN4UTQZ3GNIDr_uHNjlpHH9HsvtEkoWLA","last_tx":"","owner":"rduXOXjlJ3kP1SF2AuhzLzrIuIVGgqcm2yzWo5JY1f_OD2PhJtff2tbYwCmZ1KWbLiHP4GnqX9zdHJ1erYZy-8d0ZsM9pCqSwqNFy3HeiPG-eNGfdLbHy2DIQCJ47ZYBe8BW7lGNvNBcIIEihs2VPM8MlZKmJxhyI2bDgZO6ONxokw62wQbX-X4JLoG9_-zKIuYt4ZoW6xIkBpHifM9B4bsZKNE3Mx-ictHx5SzJJpoYlbxgxFXfv--zhMU6lnm6b1RKmIewFCa24aE1p5Sy821eczA0b7n3tWGTnPeOUV8mSD5CSPRsXPB1SeyamKvBjYh09OLQ_el83Z9zXDJen-FXBVPUnTMGCAmOa6fKjGZENXR2vc7479jJA3dGiI1KNSR1tYpzHvbK1GXWpYVJYBTC_8BYZEjv5P7zUGgOp14JLHJ-jUS-RUBa7Khf6bB0iiC5DpcqATok7Yxv1KV0HBZBv78HXEyyS5lQAKFx5UHnqHDrSVLK7Bhzk29ffqeAyLHAJuKazM2-TdRDCapsNg7-qAWYVwmoYDrrWQ79_Qf1vaKClBOSquI4Pgf-y5xoOvnl8jraoxg5bqtkStsq67qRuEjERpZgnf7A-ncpVm1VxVmYKkXb_Xs31WIwxLEVYuftrDbl45SEHve-I9h4FKFCj55TNoClUaj907REE7E","tags":[],"target":"","quantity":"0","data":"SSBhbSBNYXR0aGV3","reward":"0","signature":"FmKNlfGWRM4FFG4lUdSWRpO_2Tef0IP9kKnethsdcQt9NpXMFfbmYrXUZ4l2ZNTYXhzrBs1_SeedzIAK7qjbXGdmtJ6T-3LkcTptLWQh1rSmrWciNXJd-30weMRn2kuIB3MplWWmD6rLuirK6MCF1U5wdS6bMxXoqcjWeGNJZrnc0WdlcCOBp8C3Ddub126PdlivdqvCcEi7ymmxwRiB5RHbWMH__EohP2PNhUA2-jWPPPjmZ4whH4Rdn4OIDeybeMecsFaBne4NzVO-IgSFlD1M6sfIAJ94ISmw2dzraEeqMq6y1TNZ9AaZZuYNrvcgjci5Z9StfM3sV1KOn8w0DV7FPlrcdIspix9JUe_0e-8qPp0r5XjZ-m1TO0elxkH-CUU9tJh15rdDz4jGSZUGcpPDdZGaSxBpnb_N6FPJo2ndeRe4Iklsmp6juXnjhG5Gyj7yleoAjBz4LMlB3L3D3MH5f3isKSi7jNKslDUNQvop9DkAhF2RcwEn2mQTWWcYI0fNxKK4RelyUhvFbpzRhuPynoebL7Iq12cjj8hdlUshANhd3PQNl90HiFgWkEoqCWNfhpUdV31xk1b9ApSyMH8mTb4w0HDLiCFJYVzL-B_Zx7ixcBBGkefluOccmUZDcwuBsw-QIs9vl8QgCdqruLu53YqWSghedY0l0uYI108"} ================================================ FILE: genesis_data/genesis_txs/YlalzFjBD8CgZxDlI6eNWE3PIIflHGzXyY9VzPPeCFo.json ================================================ {"id":"YlalzFjBD8CgZxDlI6eNWE3PIIflHGzXyY9VzPPeCFo","last_tx":"","owner":"u1m20sw8ENhw3axQIudfK725us0Frlf8ZiRYLF7EJVjQCr03xQ_5aPnfCyYqo5ah2uH9lqwmurUWGn9XHqkKSKWgnhedl8uCT2yLj3Gt4MQLmVOV9N4bvlbmiFuXkB0ywO_WQOsJvx-Q7A3Bu9aWkJrdZDG0txLFe1SUwzAkLKO9oh-lG-yMkFRIGXY1HcCz67tqQqEBpt96QJCtY8zsBUXYLKuDmUY6-d7TrIzvGhMtHFvR5enLoWpbC8CcjyAm54TULyPKwjYUtZ7-vJWgctMxyKDw4GfR9TQlqHh-OfX05gomEDcOsblkGHswvZRGwEY3ibMAIriHnokxu0hnstX7DuQSGWAXir-c-8O7QVmkLrdxReq_7X8SCmht5F_SdaxwcTtlKCkZ2tDjsROPHpK5adiVr42JufnRkBlH9-CjlckSb3EN0iATeAVhWM2cYMFpHnzriuMd0_HDDqOs1EcLcp2eDMzOkZrIFrBeYfpsVal9Ulh8DKPPHZdeL4zQkKonh-Gs4pK2s9sUK2iKIyWLTLFqXKQGvVE1dntKg2AGSq6fZWKCNejC8m7YdMIOl3VICfbCd8F8YeTSa5fKD8baZc6rvt0Nn0wSHqytkc6ZM2xaiTMCfV5y8EUeZC5uh0pX-EfTdYEr4OvpBgP80FCEinx2NSxAXlMYLwBiZfU","tags":[],"target":"","quantity":"0","data":"YmVzdCBvZiBsdWNr","reward":"0","signature":"LRB6f_sQoEEE3nA_tREDgr5wGBDpgGRB5JWPOSD-2f_veeKjWbHCUM_4JfkUzVuEYMWW4OeQcyjZSeaFck2j2z3vgTatv0PyxZcBnZFTtRXByH_ODuEUzQEhnDToG64tE8Ew1pOH5tmIDrkdPt2-_8mG4Bx1JFjIMUcrLWBegmzqzR4u9A-6Zv8GFB8P7x2f0zgIsDhX94tdkxRSxrUm4CA5y_ap6XbJFbR2hHuzCAwQkq51y09HxsPIM7BneaT6_lzWcZJkCx7lTJx8-2i92mveGHUirJhq3j05xLlHFmAxf-cn5Mc3aruMoyUftxKN9qC1aREAGZiWwZpecGY9Je6s6L5mwkrygGTKMdOLMwbUAkUmoC0HLtPr-WHH0gaReqEUqeDkD0nBjVd_F6I2GQVo1JIYX3DdbE6CmGduG-da9tzCkmMv24FPKQtexzGawZ3zNQ2d6YVvuRlmTIz48m4rMP9ZMbiSIedC0P4mgafvuGHjSixCm5zTa_UwfkNK1NC_7vR0yAscdTqK354Tt0kQyz_gqwUywvFaVfuLsld4XpZ2HvMUqYuU9dv5rx5ScmsEwDXDcSuufz-LSu9LwfrdwxzHO4jt2eJf2ax55vAZiiy9DUBFXbHzA428duTV1NtrQuYH3F_huRLiPhD3Dg2rSIbVT3KyQbvEPafh-s8"} ================================================ FILE: genesis_data/genesis_txs/YukfPvGxtYmXFF6wJjDiZcvqmH5YItxwsoLbMxWCVFg.json ================================================ {"id":"YukfPvGxtYmXFF6wJjDiZcvqmH5YItxwsoLbMxWCVFg","last_tx":"","owner":"qxI9YZZB6P-vn1l4mGdTz71gLp_JPD00tv_LTNMoyJqaZWQ_P-SLPdWhS2Ebg5ui5Zdu-3kqIeQma-dnF5FQoMDn-yMEoT0oPAFC0nhXnOXPHLZ8FjZgWR8PvcIs5QVJ9AlmEYClsHft7097rbvRevBcO0lMT8yoTFtqherST2RZctaeGbDg3wWoPF-wTOnPdjyS9Ja2O6vap9MLyQmjcSHZsDcJm2V4tcuprncdlmebxxckSCwMPToyY2aIrAMaNiGUkbfn00GXUrgHL2-cMtsGawFEnJSdrGmUXKyi27hZMY53jaIb6K1Op5i2AsQAgAvZasBo9d9Qe0h8Ei_b60v1-HH10KCYgaefrRzSqWupmJ3dzy6kUpdbY6fRCdgCGkqmmVzw3U0KHqpU79HV2wQw-v1Hx-1Li98sY-dr3U5oxUSNoaVG7KecE3j-l0MS9mth6ctic780i2so7HP-iB7v06Ms1vkVRQBgH1zuxjXZqeCbZCT6W4UDSA74PR9dH_qLA1zjgDwCoOgHQgp1r9NxVi5YtYd1FmsjFPgCeMCo6FmwN9kZzKSRkSh9q3NbHlonDXq6EJNXhxkIa_eJTW0H57w3AQaRdZQON2sZLtsGQy1uQAY_xSPtl0VhkcP1-rt2h2uI4ifojvHVjqxrccriFWRJH0ulAKzH_8Jo07k","tags":[],"target":"","quantity":"0","data":"SSBhbSBlOTl5Lg","reward":"0","signature":"IpJq_fKGJsVbd_s17lRyqbvRqOr2PKE1sNFey-krWIVhmGOrzaRmL0UgZsPz9Vz01Exrrb8FAfZ37K4bN47q7JWbzL5uY3Eyagr9FnM7Z9QkcrdhkDRxwiApgog9GQhdKukgb9NvOlxSNJitSHJ5e2umh-HCvpekZd50lyUST6uRiX_PGsprZiFBw218ZldkH8ciGwufA1EO0MFRPc8yGHZf_n_oCMVRxYyvJgKEa3NiwXcGWlRTtJJ6NKq56djET0s6h6ZGUmjvpQ9PVs9wByahDNcjmMROolHITvNsbDE70TIDWUM0Ma-2y96rQr49L508YWlkYG9gBBsbKJ3vAYK8Lj4lB0xGuXXDNcRSPk91NmCXP77bRML4SS3QLtqjiQk6hOkceZz3Ux6tS0aLyZgzPG2ScOaU-ry6TdEVr_bErYMNAyL7VKZnk3aubTc3sw2Qe1ajRHn29Arz2wl1I4dfun0Loq1mvE4m0gdy8ziX1VqendMfE3XxF1UlCPMks2t8NmV9i6e2b-DoES62HjR1tFGdIo0fq9B1ZRxMsZK2UP6W46P3axJqTVp-6A7Uwpjy00P8yyBTIPB9bKYCV2JhpzFCWasccL9cmrqZUsmMt1hYaDDR4CAI1NPv_tRR9DUuEGqTWgUZYj1qoMgoVRMXQvwmJzMPZjo3LhIWwAg"} ================================================ FILE: genesis_data/genesis_txs/Yzj2WZ-3q5vKkBJtrmGlVjZND7iqtzvMRafS0TnQiLE.json ================================================ {"id":"Yzj2WZ-3q5vKkBJtrmGlVjZND7iqtzvMRafS0TnQiLE","last_tx":"","owner":"0ydRVVAQPDZx5qkgthV3mDUfz8et7Tw4fsU4cYIxf0XlHPHRu7COGX90mpXawdFFFZ349ZhKjBFMZJWwTCUp6tFOKHJyVPEMMQ-XxLmz-Ovus-5oR_AjjIQAT2TCZvxY6-5WcZZ9FeWRZX9OpyYPs05nsvToFbC6mHWw3jCL_inT8HPQhjwFeiNTy4gfz6lViD7B5GR8_BnzNwzt5grgLNaPqXbqAdByOh0fQ1agQUegg4io1tEZJAaSubS3gexBZcpt0-Ui6prSh8X0gq9ERuMIRM92_6R2gzX8QxosA3fcSYo-8PlwqtnNOVmjzm-NDKmrnUCs3vt9470qAKdZoiuVGOWLFwiNwMOWEyjk_ml1vzPCaRaA7r5Xbb13lfy2pbrzxefx0JWcIGDcTAHLplRAsjza7ezZrS-2OkPPQI1T-LxXU5Ra6OSMPZr8Nq1khziohsaAh2t1WVbSUBMOHC1zTvKjiKhayQ3skhTr8JcLZHbQpSpvC1uriwmeYyMovKOsW6bQoskCWkDHJSHj2wtBU_lNbGjl2axlzhVMSh__3-w6SrToo55APJViHGuj3GZJdEva98o4pGmRAaVoxAvVIobHrmVb2s8sVSL-LwhzpIFwfjBKnsbRFA_YQ3jvqJJYvBGcgFAFLo0iJIcTZd_ElpB6tl9s_s2j30LS9OM","tags":[],"target":"","quantity":"0","data":"TW9ua2V5IENhcGl0YWwgd2l0aCBEYW5pZWwgSGFycmlzb24gaXMgYSBjcnlwdG8gc2NhbSBncm91cC4g","reward":"0","signature":"fdNgnqEqYZCSfWROIMxGivdIsFA7YNuodeyzMUIYaREL_TJNOTgKrSAXl8v_n08y0Y3ypB86TQqozr66mlKBa6U0fAOlOhzesDf5lSYfAkRyG8PhUABKEbVTsA4gnOZrQEsj2yXwZXiYjBz183dSsBKYoh3YMNHArl8biisbIdTDNChzWmdvFYQoFqHP1GTL2Sq_yd0CFZF7JZtpoF8d8nHoCITAtdMYHPOM59I3lFrgmjRyE79-RDwmjKbYJNfNtYlHGOdaR0UezV-uj_Pc2yWa-BsYG8thsIoLMMjxYQZKVFQtWZ2nmElu60-sri5nrNo51UYIBhYOdThfvEhoBgOxn5lirLZ-OJstfZ88jiWKpsdXgoC8O8FmqzgVYY-dzOFPet_yoZNCEBYD3Yy9HTI66n0RoVoOo-A954J2w562paszOwu35fa5eLVeYlXNTDLsc5QGM_csrwd49PUBm5kLJrWDuHh39hzMTPsVtR2gekX3iJWufDFFJtlJ7AUExSfhaCNptBtUcb9Elhwdg-ayZF-XLWHYez28eP3DXLh2BZnznJ-kG1Kv1GmJvbzvC6EgHcs2fOsDV6MZpInv6rkCbPcjswjiIuywdBqpjPeWzqs1tPzxLb3J6SvazCM9gAwGJc8cZtFfzFEdX-TNLAHg5pijIzJ1nn85pTRrtb8"} ================================================ FILE: genesis_data/genesis_txs/Z5e9G5QMZ_scJQ62qoqUs2XSuhknTuuAIhhGmfg3Ye8.json ================================================ {"id":"Z5e9G5QMZ_scJQ62qoqUs2XSuhknTuuAIhhGmfg3Ye8","last_tx":"","owner":"srhJUJV2ukt0E3h2Wd6O0Zgh99CPDZlqr0K6iLCU36uWMDa766i0IWP9OKUB-HJk8w-B75RH_fgoJiLMwcWEbh_Cxibi2Sd2iT-rRTOs5QVHD3Ek4GzUiebQGv_zlWF4aHwCnQcT_hqd6e-_YYLK3QhDZ9NOSzPd8godYUq69PU6BUYGG-TKYOg52Q37hH6d9wffjeTTLwLMDb99OShu5v2WWnkplpq6aFp2oLTowsribUdRBQomYtWATET0RIPesAm8clhJFnhRh5YgFS3XVAxCFCQb2uAYRv1yUFzbdIGgcuUwLWzuagFKuTUvccOcU0Vofy5AIZ7lnbvQSKbLyb5gh5sH3JRPktlW1l9TAN2MHi-1raxSJdDSp2wmJv5NZbA_COgcKmZNzM4Nu1l8E51AAScfbqh_yVR559T11fRXVBlZYcLViSwVvorohJ6ZNIqyjjcCh3sOZ2_FQbn7IwjKOJtneda3Ng1pT1CpdLXxhUrH--318cbNwGzXbwiwj5pUGwVLEinSNnYmDEYA0R5dFw9DjKKNuiS7ap7rJATNqwL9CprY9fJr-vwPLxwtNgtlsTMajXsBIP97dk4LHjLb47IkqYegvBs9KQIDZa44J79sBmZrL379Se6wXdldb7xvKf8KAVgFX1358gGEN3HYCFVmvfEAmsAl0AZRQHU","tags":[],"target":"","quantity":"0","data":"aGk","reward":"0","signature":"hmdah5FA3j_JwwcarTjhFtR9HzHYK7buqMyrtBEmrTM0v2r8zy927ImBvr7uPBhysY_0Kdle2E2S8T3_9WvfyMvSn8nqWLV2Z7YgT20EA6ZyUFEH2jl6r6hGUaWhtlx37b1OC9MoDwWTpjj6Scz0SzzGPZ4UX7eQWrUJQ-udSDVgxwSoRMSF9wSMNeYNsORGSD7HVHOBjQqPAoW2GLFcuNCrWzQ8EtZDP4EVq_fXxc8sNcqi18AMjRqb7pHFQMLaTD4UWaeEaSXObSSSGlczZ4CrgKoMfaeF1fjDc7l59Cqoh7i9hzavypvJ6bVM9Ci6H-8_N99UvHvS_v_BmLaOSaG-nKwBWBoSRkQo8HmHR-kUNO_u8awCu-OMaugUe0ptGlmzUP9mFNheIw6O42t92b1iCar9vUixO9CkhjYneAXuKrbOq39VbhEIHuZGpQKHtaASuuPXrlgv2qtIgyQ-icSVI4FPtQ_1Jcs-tkA0c5gJnyNhIQTBlB2msqlnggVlQUUIgV4rS5WmLHiWtymz6c7e8DlH78vJJXJL0gQGE0QD6ATWsfSgPIgHq1sB8953ycKf1qDa2WaAc0q86geSYoP5PdE08cSHehAkz3cUn8aDMeisbLJeUq06-38reDnG_-M0gsiV4I0MP4jXfyvLaYWUC6bkQO8U9V_L5BMRq_8"} ================================================ FILE: genesis_data/genesis_txs/Z6IgRWClifhTSnomxJet2WLw8UUaslmqAi2nynj3Ke4.json ================================================ {"id":"Z6IgRWClifhTSnomxJet2WLw8UUaslmqAi2nynj3Ke4","last_tx":"","owner":"o7K6nzp9yXeYCITkjRfe70OLgCayO6oRTPtgnGdXobRGdi_dST0sW7EYcJbMgeW2ck0k4hTq_edjznz8WZC27A63IUGzPN2sDVHbipUUJ0oPjyL3e8GzmOY5tTmuGpMnjY4XjNatPGakj_SPUqSF6tJfWcefVPvT9sgZdvyi1SAkLLqZJKiGbxgOZFzCjjnpmPIqY0vb-BKRIRgvugtmzIfVD1jNiPD2vSEZStCWMf41AKZuf15vrhOIbyXE_iFPzEb_gTkMnD5tw9Jz_3ru8AIKckfZaccCq_6rpHQVbG6U3BSnqhawXVT6Gjc3tmt4zOIqqPf28nl5I9QY3lInphAI3QLEDotDvd7N6zCOygP-rsMtIDC2_agIkOnhxMWgD150Wy93xH-6reEnJwuZpF65Phs8rUQ2J4U9nLyd61zpDAN2y6fCxa_tA54cHYrt6jzog3cUAlXxdTr7vc-m_FbAwTyKiYOZ3Vz3jZowUJqSjfinL5CI4R38x8ksBD_x-01RGNH0MkZMofaoqq2gIgKBESjD8jmwiJZg7YOCHeQ71KnJl8b8sGuWYcIarj66wLkh--l1TQ8rTkibDYaBFln-91IwsxdDpOP-feOjcWnkgTzYYuuCi1NW3SbUmJ-LQGYdlk4Ym5p5gT1st6dLu2x29EnewevQ0QROl3NNJt8","tags":[],"target":"","quantity":"0","data":"SGVyZSB3ZSBnbyA6KQ","reward":"0","signature":"JQX5Jjqvfa1o3z06xGNxpd_7JD_XxYxiTYPzBr0X9CuQH8u1GoYx9YFeeBxD4GjMxwMb6o8n0z7NUN04B4YbNavSlqHxBxVqVmp2mUBZdNX28NfWVQCTHcrWQ2xjtPsxgCR5Up8Ujql-FMr7KgjqSTGv0zCVsON48Q2aVd5FqndGGx7ZqdH_pP-Ju-nqqCwCpy0mH7y4JNGyC469JeOBq6EEduIJIqNPcssWEk-S0nA9QGFZm4vL4gmcH68OfwCP1WjDXnhKPK9gwnGvQUU8_lSanwkFZFj8BA5z19AwjnHy_qdnUYmclZJAXZJXSl5NrTaEb4DNnPKe4yeEc4t4kQ0GtDNHKaHgQHwdTDMYRuQuak9zEcgSgVx6wmZtQ0NivyPpIFgAZniP6KEFAOCqnNpu_yhcOO_SKu35AgJdmLt9wjXyglZmn7BFNqRms9i37Kpaxph0b22VO_mR1Fr1l6J1BLiKSf2UTy605C3TRjyAuaQpgb07LnWxMI9Lkv8xNCT5XKLk-cSml_LSup-8MIsFpxMT1pzIf9BrFamNYKyQpF99Leh7UCylttIzJ3Ty9-HR4-nPXoOt2AXQuSr0D_9jswrXYAQ77LFKEeYWIZMllKAabxKoALbob21xOqwQo8zMWYT-F0xQDTDlP5cxpETGRc8KMrNpmCICRgZQJMY"} ================================================ FILE: genesis_data/genesis_txs/Z7gfizrPOypT4Pagg3oli5g8wA8pbKB0ZJnrw-FVyys.json ================================================ {"id":"Z7gfizrPOypT4Pagg3oli5g8wA8pbKB0ZJnrw-FVyys","last_tx":"","owner":"26wRF9oNU0akucC-BqaFA0-8_6tt0wcUoQAtLgu2DyaUZuS7_qsvYqILCqr6Kjb6a03Lx42JHVtrt4XrcD2SKSxHhL0Oi6ugqJhJOZRiro8GGKjlH_K4-IhPWMcAr01vXUh2Pq3qLtY1GsU2zsSVzY72jLNyHIpusytNapGXF9GH3xT6RMMXsARRbEuTmoNNYw4YpJxiGIogTiQLBJ6B3XDRQ3B9j2xYZRF3PcTpKayUozD62ekTt5XEFT7YbTPElQDeHwuoUTxvf2WhA_b3sb_RJUDqSLUpptNAybaO9rhW2TwymQBkJfKIOUIfcREtnh-dqbpvvLdg4flyAxGhdW8I7LkD5AkqTzyHkv64FcGAbzB2nxxyYE9ZnfJ6BzvyenBCOnVVUs8c5t7MrXqCp-MDDCCijXAio0ksGkB7aIwwoTfLSy3XAmzIME8A2vTcWMJsB4U4MFXtd47Z2jBXOxdHS43CjdmI_pXYWvmgk5za830XjYV6idLPqZjPsusRiww2ZPD0ITPRs8yMGnT47Rd9HZzVzrCobRVrQiSGJO1EBOk8N3iz2czxyCNR8MOsKNI1SSzN1Y-EZTze3oz_AdyIO1sRFVqfxUYSMxxHtmtaOaz9YN3G3W-4MnzadEC34ntIrJE9bUkFAgiUHPmKi-sepfNPAC7W_xwQ30TgNws","tags":[],"target":"","quantity":"0","data":"S0FPUyBSRUlHTlM","reward":"0","signature":"qhxqHhj1ZR1pA-HQfHhjLUuo90AvFHBxoQnvpz6sm0G-ROtKIWaq082T2gryX0wbYlKXU05QVmS3L_q6mJoKYujWfU8zgsCpN3cUFa30ChbQMw_5_3z0Pwr2d7Lvzt9XlIADcpbtRG3IUEdJMgBKjbSrWN7qph4uflVFQ5veHwOpxOMM2rqR1S-4LF0S6gE7xuFeu8o_ZuSi8Y3RVBpaxl-fU2N-SQ4rysNQ4MLPKNw_2YBWNwWxvkpDtl4_UJjaR73jnEfPsDFIW3hFNqLhuRIMukQk9EmGltuxh3YhZFmN0MUfAM-KyG5QC6UA1S4tH4ZNjPmpI3mzr_MCEAAridIDwIn4LDgIr4wAoSzHQx69tQjf_xAltXem-7-HOCEYNWn02Qj5Lw5dlD3Toxx4qio0rVq1OZ-xJ4JUPs6_Mv8Gcx1q6-Dp8S466bHvtFi_KH94s74i3XI6VcGDGKVDUs7Td3Ma9daia6K3miUv5zBMeTKVXFMN8cdh6eeFuDPBmAz2swBGPAv-htbksAx-vjXRl93e4nikbk2OpOA93OXGdsf2-r1xRazFgptGzHsWi7kyLUasay4oLLFKZCrPOxeT2SuiA7sxaSdLTPO_eKWp5oujjGYsOlKBNdGdCz23iumWc03yRqFDuojEDf-rvlbh7mmVA8Mofl6LLFSlw8A"} ================================================ FILE: genesis_data/genesis_txs/ZAk05et7CFN69E9NwET2mSRI0ISRigjMEjcy8kbO-Y8.json ================================================ {"id":"ZAk05et7CFN69E9NwET2mSRI0ISRigjMEjcy8kbO-Y8","last_tx":"","owner":"v6AdxIYE7i0WryfruQmBSXNH5au9_Y4F7hP53qu4TSxKlypAVImLgGaG6CecuB9abqkDZPRATTQgw0lTUm02VUGzPipC7mfce98odcZVXlS5_WP2L6pvQD33oibPc9GHp7_lrEofhK7qZNYnHZB0FTmtL8_b4zOy7LODZA45HNwl3rxVkStd_UHxEZ9bv_1_j8lx2Wolk4P5rnAz-Guqc4BP50VhiX4iJi29dBpizxwJx2TtlI5MFBC_o6yK_-TaAN6rHnPlZUI-Epn1DelmKgOJddf8K-CnXJX4SiniKxynxPvdwIhhJTGCxdHY6Tkwy-ceO2_ACQiZk_vgex8VZPmJRgZAFu8dTsqhEL7hPbZDhjQjPvnkbyUURioPp9qrePzZ7l2pcVXr1ZluFtlaz-WglEfNsYH3zXLucqiLa4mpgKBgiC5uxvs89NxddgbapT2w2svA1Z4iHdN6E_EJpDyz-9Le2z7KNEp3e71U-cQqfdQI0JiiKDVl5dztfZLOevMXKDJj23lS-qmeiYHM6SgKkTLTRUUE1LDXGOH80SMiZHQPcqmdXRZlkfHZzNueDK0PJYVlvQ0cayEceuzwjP57JLxuO3C_xhnDIVdQdEkeozBSDMbmJvK_Zg8E9P6EUw2huHcJzgeq7FpOyz6PJot1qntZ3EBmoVBlXm_Q8lU","tags":[],"target":"","quantity":"0","data":"SUFSV0JUSFBJQVNJVFVT","reward":"0","signature":"fmSdIb8KpIiyrdEYIaMpIEgTdq2kOr7Vwst-n5RSOcUscbj3C1hK0Nv9ZUox6q0v4hcFbgu6KZQl1beHFsZQjLQ-GyHlUYw4aZ15MoEvWLU1KDbmBlFf8PZszJhN4y1h6CTCe0cHjQ25-_KlBkKpxiXYDOfID250F0fkxE-t3rkUNUw7PBDHKfwwk3QBLsnofXb4Q2XYJptdJdW1BFWhfEsO3Tcbmo-irqZT00pyKCdqLT-QisypQtYHd6b3jgAg6T7YLbT16MEp64Mk1msBeQvEsmlAbxhNLeIZAhYP1Hd0dYGuUen03LkgBArL7HaTrsCQxOAiN-U-ZJwwooQclWSJYI4Yi7i5-wvvcuPMhNfQLafXQITjYrq-PJX1CEc7HgbcvesLtyQfSAwgFNyKans2NDA2Bzlmov7JFKvx3AKjRT-AXZ9pRnVKWm8O21wppn4d4YS_t2tn2xZUUNQfzHDQvFOkmDQq5l0D1zozmONwG2fq2s3e1_qHekG8xmDOOeDJoq-b7SH2RwFJKDLhcacqchXFeAKB4scgOu0HMyu5lk8IuK-vFOVTdSuVVkwnVry_vt-p4OR4w0zez46mX7P1nfLkudrBb0sLCiMbB1G8bDbzuN4jFhRIKGsBdn55vpl-KTXvgse7iKSS9T08fi1o76f6r5o0ZR1isHDC56k"} ================================================ FILE: genesis_data/genesis_txs/ZC44Bxrx6AtNJYLwhvpALuINZRBXklme3tpeJbJ2rdw.json ================================================ {"id":"ZC44Bxrx6AtNJYLwhvpALuINZRBXklme3tpeJbJ2rdw","last_tx":"","owner":"qlTm6USprMfuH1KKWHl_lk3hvN1XjF_Q9nTd9nYjXGrnqSO3gbEBeqmNSr0GNHlTCMr50oGuosi-q_p1wyt90ZRttHYodZkZs-ecaoUiVJgcv8ccvqmAqPRb3xYF-qCx3e8d8Gekt05Ai5LAAyg5t5BZ0bMt6XekrmKzM2j8QsI6A8YJLpxS8JmPiP-JMKOO6K5-IxSyI9MMW92yYrUgvvXhtNjbhdCSh7OJKaJcAmRESena1bg09xbiemjlXwO7cK3ttUafV7NHxz6sllgz9liYYSaL_xmeAARO-saZ_0PxdsdazpPm8S97v5zXrNkpsFKk-giDHyrI2Ve94DlOQ4oWyVRYSoQ9xPbeBXwNN-IHuDkJDhyvnXWMC51PG8SYn-0WlMGEaYAzJZKmwFNdc7E7JvSF8PyX4M3wdtPUPXJY0j1Eg-p1zFmsUOC7688A12qj0Kd4EJJSg9GLiwnlV-TwCyEEUg3O8MW8nYEs8Y24RhrmjbFH1e4ce5Owt7YB4L8qCZnABboyUHvrItcWizqErpx1sVaz4UyX3n7HYlQboZ8-4CH9GRBYmNEoZjHJf7_Mp0MSIWne-tng13nViWaF4Tm-1ebOroWsLcWsf6vvHsCrb3kANhQQ6cUSXXzTLSjoLyO-1FwbiVRJcMRYnsb0pT93OtrHyho4R1EZm8E","tags":[],"target":"","quantity":"0","data":"U3VwcG9ydGluZyBBcmNoYWluIGFuZCB0aGUgZnV0dXJlISA","reward":"0","signature":"hKBvsuuiaWnhBi60ixzBX9aS3yMSb45ZWvr9Re2rLo8FjodHA5cDyAB52W_bEMbM51nozpfBMqmO8FwDy-Lgqq3pSCxZPxxNw24f6HCw_Qhc00Xq6weVSpXATXS-OMRKa_6Bi3I4qMKcsTohCbT05XrQ2WOpNUc1rTzYSIXJ6KTaIg3bM3hEV8nqKvtyQxIl74OHJJWX_5yxAUFOdbyJrDDnNksIEamZ2MRlSVdhjUFzhHcQeOJAwUCYVfCupy9YB7bEi3KalG2S-k4c_bTx-PKVK5qtsiCx_2ZwShQvcimKAs--wEEdOgxJn1v2GiP1YZ0ZcXLaZ4d6bMrLP_Y0PCbd1Ch6uA128knGQLa663OzFs1HEdZ1_6bl8084ASwd0y1tYoseEdQxRdz_1rIt86SsZA415hrK2FqbCZhWMZZWZUN_AZVlkRCB8jsSPGvKYFNQg-x3ybsz3ep0fr0pktM3gMBLTaJ6I5hwURIwjRndckkhUH3ojYU0VyzN4rLAA2io3Duhdj2l7cvbQCh2iRH3UlCG_xSmrjNVUdYz4-boWb1u_eQvRJ06i761mrtpSFE-gBDU5-rXesGrxd6hkp3BaKC5GIxAcIgQiMW0_Hh6BEWchw2CULSh7ry_dRSZwI_rdaMLYeCQDGyzBY1OtaeNL_Jl_nSztirvjg1WqVM"} ================================================ FILE: genesis_data/genesis_txs/ZEB62vqKvkPK2s_RmxgQ2IhafMxJ_TXCGswrrKLhYiQ.json ================================================ {"id":"ZEB62vqKvkPK2s_RmxgQ2IhafMxJ_TXCGswrrKLhYiQ","last_tx":"","owner":"pqqdMlh8U90sJcCJQ1rT2sStZ9MuqJiMqj25vkhyf_Un58csrD4L942JYnTQ3VCTiLmiZua6c9ZcodD3k_mqWxgLpOw1ZyxYuo_W87GZ4azGgfg4jbZ7joRgtUDRAx8uq7rG5PY5UtRicMlLv9UR2X9ActQD47WJCmObaEOkhOhy7maaRwbvBZa8-A19zP4rwLbWg3XY_twQRll8D1zEWTeDI8tjiJdkP_0M5Ee9EzWpeOwum7B1JyDg8hFdFks4NUts_Yz-JrmkneUu9fICUwAHZinyg0ttmDjmhALw7o-7OLMl3n7fSYpKZD-uTpX-frJU777tpINmriGkESgb6aWSTtP3xs6qwFmTL4eZdY4lSldpY-_sRC_zvXFYbEpOoKIwSNruOUd5GiTK1jDGr_kWqpa2ZBmblC2Sxk6pZQXgGYSHiaXaWUTW2VxMJ57u1g2DGtocLQqTVMJQD0WDsz6kUW8sLdjF4un8ksMwT293zPx_vKwbDTqZwlUJr_47Wz4Kpx68Jw38UjxOVx4v7q7p4ZmCC3Cdj2frKyiH32c_xduzQF9PAqf0VSeG3-E_X2J4vRT9RGnSbIqVF-mb9at8ZYUVhMFXAEG5vpWHuFd_c9VwX9cCei7IHeQkfA3d4TrrqUIIuEYJx3WCMrL5ETteGdPXCHovkqvOVk97pc0","tags":[],"target":"","quantity":"0","data":"Rm91bmRhdGlvbiBqdXN0IGJlZ3VuIQ","reward":"0","signature":"FAtyZPfd5BPBh446d9UKGfMaePu0uSSdfpVMmDrfJvOqaWHIAKGYi-DV3MckN9FLMRAlunbuvQ9NAXppe4PmRAcbhdru-hRL_x6-ASz3Mq74Z_XeSB4wx21qtgPD-Fb4Lfd70IKvVHXaJN7Oi-F4LhdTZ_WMP8cPFd5jjGkETePjIOdHaQDjJdSWzuF7N65nAoip0yrpUxMTm9LvVO_K_Cx4lYyuPLFgnD1pH_z-X4nfAtbF2YXJPbrmR4nQM_THqXI498wxFk3p3s4g1iog5EY3buEQ3QXwbhSNKA-kTPBCFi7FoZ4NU7dWBiXTZha3KSaVGIEsX0Spi5mAEUO7yKe_68VsKw1e17LjiX6xXzoB5kCaomHm67mbboJ4s5wz06v3odjy5ULSqyb51aLogwhDg5ScBdN85Rm61luoSaszQbO4tEsEBR3w26SBnO7eAFGzD4QBR_GlZTV0ibDqDTqbNuXHq39jgh2X8hZwoIHc5jBQilXt2TZqp0XTTh7FRfayhxrdSabOEmp2mRSgUXMYftBSIHJlZ9OJNX7WacD6u_oWRtTJcj_icC0ssBb69Eyn0gXPKurcBYOLyVdsDwKvGqC9XQR64MKS7oOpmER6_BBNJoomj0aXCynecMgd3C7jnBqHFokQJWnWike9Zkl6jbARIaaPyrl_GdwzHmk"} ================================================ FILE: genesis_data/genesis_txs/Znw-6H_ayGJBReeQm9z9WKulBH1ZzrOovdMsNPcIe_Y.json ================================================ {"id":"Znw-6H_ayGJBReeQm9z9WKulBH1ZzrOovdMsNPcIe_Y","last_tx":"","owner":"1EIvvKlnMTRhtAu-twHTN27g6TQtTptx1humep4vjTLiVLlhfUbXZyWVhtiiVJfwh8uo0hSr1rlM0wx3ZWcksZG_bEHlw-m3K8yx_VL1wSbgk8hNDDM9KS3QDv75ejvr6m__7IvcdbDL78kD51aHX_xI3icrKWD0rcUC2gKuZXoVf9JHrivzsJ0EDUxrxARVtIZ-pCHDpxf-8DEUwcdbP0zvXpyw7q-8dCRhTGN-DdXUT8Ave81mnjjLaLjeFrG5Sluv-RyBR7fv4wVHSPFpU0vkzKAxhf7gNviiNhnK7Vg2lg3o9FbDW_4Dj4iEbehyGJqWYB4xvYQMDw7YkMyHOH2rOLr-WGPL1gNaTacE3vrhlwUXJ3pmqFBX9UIFnQKC9M16dL1vRQzsPF7Zwfz0r0nWtO5_MLSpt03gA43DHtwlICqj9VMmcIKPVC2Rywcf9X9j1Vx5_HkzesjVYPS48zWuySQ2_9nL-hyGCxcsMS0x0VjTV3oV89m0Bjp7RybXHWz826LmIJf1rulre-yPwU3WcrOy0QYH-YjDJrASy9Dj8FySe1-8C3vtMOQZqO5ck64IYFQIRnyEy8JgILLG79LnZ-cRWqCs2cuJcqz1n2q14FPJMbGmkw40RoOqrooLHBxGnqxWS9He9eGOHtOkxc9VyUHfdsZJy6aOdo2KJTs","tags":[],"target":"","quantity":"0","data":"SGkgZmlyc3QgdGltZSBjb250cmlidXRvciAuIEkgaGF2ZSBhIGhhcmQgSWtlIHVuZGVyc3RhbmRpbmcgYW55IG9mIGl0IGJ1dCB3aGF0IEkgZG8gZ3Jhc3Ag","reward":"0","signature":"x6mBqZwIm6gnap5Z5Ddzb7PpPopFPEuLiP58cEzeNSWcc7TOMUIFfDg2DqbowbQOos4b9914DHI3IYTsHllCjR4Z9-0MCMSrKGXTPUy7BXn11Th4OX3zBmHpGiuTobozX59BxoG65gNNUEF9nc-fq1GCJJAwaa2T30qu4jg1-y81DF5VAsaSyE_nsvUMpVLrR7Wc_aTOHHQLzQ3iOTXA9Xaxn6D8_CyoOI4CCQxUBFwDjOWuRjVzgH8iNxuYeJ4CbadWVrUa1aMDW0FknixtdcBq1aM8lUfWaw_vwqyKgfFyMxIK7xM82z957USHNFCPXXaUNEx4klfzfSwKRA57_dbXexd8wHQ3yk44Xr5cpVdtw53SgguhJRfAjKLPGM-RBcBOKS8dOrJ4LelQuaj-mkU6To8N03ZxBb6LoZ0cQ9fmXH5UN-o7CghuUituP2wPSQmufDPubG6V8GXA3EN2L4Q8yhU2u8S4ANMVS1ANELUP3qSIXvf36uaaD7uY8OJvn-2DjC6lGaAKzfpMsrj-bskH8JXcHAwVY6jpA6WMz6tYrwZA7vkN_zz0vtDb6eduWI6LePOc-LepMOfoexabvT5XKMPsS_spv6F_TWIF9SKmi0Z4Zqmhvuf1i7CzNu9Uao5XbII5vaCyijQI_0SD--YjHVaB6jy_IqJuRNqS7yk"} ================================================ FILE: genesis_data/genesis_txs/_01J_SIBJ164H0EedSfQ8h0dMfqet66WKHwcOFQEsMc.json ================================================ {"id":"_01J_SIBJ164H0EedSfQ8h0dMfqet66WKHwcOFQEsMc","last_tx":"","owner":"x1tMT_9wL_r7a-wkJc21udxo12FpXdvyHytN0oIg-1Z3OAbMmP9kdVc5PasjiSH5veE_K0SNssre8HTfKaeDaNq_0VKPZQGlGUX-b7SJ8b_rd1P_C_BYix3MT1_JU-QYHzw5RIHSdW4XaqDmdaqtikx-Wxx8rH0oQ-e0FCjQkEWZkMeqSooapFuVIOdTCnS0qUNrkAFL2ix7Pf8ZEMMO87tBA9PEBq1hQ6IhZSX_kjBX2p_6UDK7nSzhL4-9DBzMqDl_AG5r5TKMFYUt50hc2PkcYHD6MFnz4CFWkzIKqS8vM17DsETAFqHHVnJExa7xOufIGVxJvG1kvIYOisMFISOtZI1pzDBNNOGrIKQkjkzE9gia56rT1EVUBXxCKL98k3Dy0arGDhL6kq2Wi04-ks9urLoZY5WK_lShaVDCTsr01W8RHFgvjptP7LcyESnK6enz1IDFjbLSLsSh6IHuR9KmlcZyK-RPhV_rTYz77LYOd5hMAsosGjfbtPVurjwoe_ffx0T5CVMYDHy9ieLVP1AP_Fm-K3NoXXv44sQMPi7MsWZw2IwYSNbikXgH8d5uIXpRrWuPYwlsxL1l9IzkDwQJl-ftxTOkZJ-11n5e5biMznXG7An-_KwZpRgtLR9Gow86EDZSEh8rTAWuPtUev2XaaEo7G7RUmGpCVAzVPh0","tags":[],"target":"","quantity":"0","data":"0KHQs9C-0LLQvtGA0L3QsCDQtNGA0YPQttC40L3QsCDQv9C70LDQvdC40L3QsCDQv9C-0LLQtNC40LPQsCE","reward":"0","signature":"OwRY3S7hCbRJmyi2pQztvaE33O-qDDE3aKXek3-W_dPXdV3G2swQ87JLb9z4r8pe_uJSY8gCoWW5akeUtZ_x9234vGO9NioRAjK7wkE1YvA9iksQr0FjyNVpT3XbOssYyzNvKwyhCcvIG141_h0K8_SPdySsX0l2pcmAEiBSav7c3l9ePlyyU4MwPIfrOKygtY056vRo76uDpPJHKmM67Q-jtgAQKrjREKJEUlt4ZSSntkVkkMmf8uow_VZIB1zlY60GR46sa0O7QCtWmuOYjOoul8-63yDmw-ydEDsczgquxBxcu25qSmHSrgFIKK1J1hCGUVOG05EZvkaCVsP1tM0YdYTxYYqe3BwDRsNHKUX-OQUyD9pMsDOQVOAZYa3Fmli0xbxRVwov6sFJPBwllGdOVq73B6faO1tNtpPHa2c21VV2NWz4_qXuoFu-fKtP0L4KwTxMGl0StEYbGn4YvZdTob4P7xgibOp2_e_ql6dn0Rj_SA7AvaT6sQYtko2s4q31nM19njzVK1Z3P3esh97qi3muXjQmxQfDok0uGSyhwrG2ik_kcrgajfUpTV0ve6WYRXfbVPqPUVV9mrRBbCjA9_T5EA8ytHBpG2XRLqTejQuhBCV35LIi4XtSf04e0fW9KjbUWIdI1KnxdJihICN20ppKCTuR5WjJ13hoFGM"} ================================================ FILE: genesis_data/genesis_txs/_Hf1lw_E6Lyd-0PGkCRQaN10cdEx4M-hl9y-zWiDo8k.json ================================================ {"id":"_Hf1lw_E6Lyd-0PGkCRQaN10cdEx4M-hl9y-zWiDo8k","last_tx":"","owner":"qw1sQn1s1D43YeVbCv6amq_TOyhPEp-L3h6VSBX03q-qnXqJa6CK6Q8ocsc1a_v_NG45sOYy1YixtmBsqvMNsNFm60YVBYg5Qde9JK4Nd1-n_qTNPev45Yak4Jw_UScQA_s74CgvtMmDQAZZVsToyJDOi0HK8BdWn5RHHCMYCX6m1kOtEOr46Zjetu42oQTLNxy2zDCZTE-2TCWu7vUbSt4fjgESJX-shwL3ApWrEDBIDmfncyWW2ajY6Ctk-C9nD79hbWpxghps2GuB489vplidTA5Urp28RfMn98OhydnJ-AENTPJvQ_IWyHPl_3tJNlMvsw_xFT30QWI1EJ7ZJ4vWWwmhHPyA_N3w5GtAadaVac2SVh2i_kRad86tye0r1muB88AqoNc9EDSW9vSBNVGWsaZbdEtAOy2ddv1Tozgr7jsK3kWjjJRSiSQ3Tk1nI1kHYyRz6o-FZOExw6n5W39iI7IowHsNzq9aAhoA9NnilcN1GeFmBcGXkLUTV2NcSIK08Q0A3qg_4-GKTZ5RLgkc8DM_JgzBHEPLDRBiWoc9JbcxjkMe-LONbXBKiWpDIawclBQQzfFiglXnvfR9Jj7ebtGjZjoezaFENik3XXym17OiOUq-QZgDUhlOExgY8IAWNu-QH68Q3D6rjpb5pS4aZEU57B9LtLO4mxtIg9U","tags":[],"target":"","quantity":"0","data":"UGhpbGlwIER1bmF5","reward":"0","signature":"Eqd6HECjwRwijPj_NFnJ6JcLfIwksZ3tFtmczb7mZs04i4s5RS-u4Zg5Syr7mVYy8zaHrsPUqJeJtMarXP7EmBrp7NNJrYSF1KxqdgG3duecBmCibHmmRvcS1rQq5DCafJQe7jIqJDDRY2i4mvJHRuljyTHcNAR1qWnkDvQPLfuCUNB6wO697hbGFw6b48qNh5HwYZjwm5sVl3MjCikQycKDQIp99rdGThLdVJgAz-Yglq0nXemYpSPW4tVMM6rUCuWjyh5x1YEghYzL0uGtUrVh933HdRDCrlWnTfMKp_YTvgb-GMJbvLqObksWDqBQNaREOdTB7DoRXBGq6pTQLJqCFRZn6W9nK1L0xx9uq3hXqLUsjh8-dsGrpJ6pGrC7XXBnDDlPGg4dduyjL7Dq6dqTYqe_S4KdtWWsnxcZA5g2kyElMner2rbkcWcn7uduJkpKrwGk2Z1-ZGT9SkRoh6-6tAXwfByQ5ROp0tvZWULHPgIfruQzp8DIJTDvF8pyGM4pDjknUUH-6e0KsQSP6dOisUwy-RyIIQSItYBZlM_HD0PPBShhmyJ--DB8F46gTXDQIw7SC8wd9uOCJVPJsSV9BgvMmnIElQDXznUL5KHW2ynC0XW8-S47BenuctiusPw7IOCKdhmbZGlZSPZm-14BsjKHrPgCATHONKBj0yc"} ================================================ FILE: genesis_data/genesis_txs/_QEE09XylMYgab9MYPvrrMy7v1jKWh0bGwqFvsBsO8s.json ================================================ {"id":"_QEE09XylMYgab9MYPvrrMy7v1jKWh0bGwqFvsBsO8s","last_tx":"","owner":"q8S0gFgYLRPs4oyDYJeUxTm1AyaQPxKwtsWYH3dnNhox9J4vgzaoWSD6p8IBGHCmJqaA1XyAMOyM70M3qOJ8fnlSd5aVqcPjNOHXwl2tbpqODw1kqH2mjjheSfx3ixXUckUyP_A4Xxbaf8c9Gr7A31i034TsHrJHn3GPnexvY85VHL1xs7eAi5WDPqY8PjRfPPKdCN_9_qlgAPNmM7gghPlboGYJpdRkMkWOBrd9iaIuPgCzWadEqAyPWDQwk9C7y6VdSNLC07GrvDYgt-4pWG5STV9oFYEdNPePWEYRorgahmxrglBH7Jurld75p0ZVQJN3x2S_bikhi_Q40KOk2hzEY4V4iGcDTX1mXZdqV2ZKmHV-gnwxyh73H6faM5K8EbTKs93qnjjsolZNCtadpPy7JS5nqNfINITCgNaIYMr1U8R-DMIlWnfGYB3xqfh-tQOJr6bWkPo5dL9E_vO_Iiz-_Fg_fv0cWqdb4dfQzP7Gnv1o4kIqNOw6_fS_3cl73NdXD5yZpZ1LJ484WgH0FGHXihmWFOgp9ppB8ccZe_3Tr5Hqh_ali_dWDnzn_aHvtA-eBPF3lcDwU-uk6SEnEcTEUwUhMriTsjulbat3GBzkBCOvxRrhXPgo5_24pmxoTsfKREnavO3vTd5njfGrtEZh0lUAZYawcPuRv-gf_Cs","tags":[],"target":"","quantity":"0","data":"V2VsY29tZSB0byB0aGUgZnV0dXJlIQ","reward":"0","signature":"BnpmILLQ08mY-jC_fSP2EfCEm_ThclPazcSc-d7JygvLB5Q-PIDvfOc6--jOV82VCSjrIfJZxTK4jtvizQ7FbTg6ialQCBgYXlMBms8JjBazkwo1gveOZpT6lY8AlcVKCeJt0D8E0zZhlzpiAlyi1gy-b-oJjzadBnOT8YFC36rMJWnAMq7vWRpSwyhVIu687eq3CCKr_LByIsUSjEoCSQVAO8aMqe3CWLrGKYV6hCYDyZC-daJbhNhTm7SM7drMN7wjGxQ3u00jsgHz00DaVIqPMLTRt0oDdSV3qPqrNBBSVl91siUwRyyfKLKKLYWe7I4g9d7oMVUwbQlywU1cJdxPokc3sJjPFcbNRlFNCLmuLufIrB1kV8MOVLsopweZ_fAPYLJj_CdEwmeda4pWlNcgbPjNCuBDFMMOyc12G5VFdhxXopMW33FJAaXV3YUnwD_iXRjOxWfuFmDV76KJ9ViWCIe0oQKs5myvUk2QIzOAQRUToxWSt6bfRpqd8iMvietVnOUGcASqEmc4WK2k-Uq31PJRKrV5SvJS6KplZk9NAoOK35UGAS3k_adXkAfIdIgZjr2acJWZ8Cd-5VzaXL1shrx0fgioVlHT_ECaeV2Iu735T3pJJf103YK4cVqfvBHtNCNBoBszIkXc6uA-qefC5q9X96jFMkwonoyGx74"} ================================================ FILE: genesis_data/genesis_txs/_fLFu_BOzTEPdX35rqUruuyNxi7f_La8T1_JG7pIPd0.json ================================================ {"id":"_fLFu_BOzTEPdX35rqUruuyNxi7f_La8T1_JG7pIPd0","last_tx":"","owner":"2DJr6cBwwPZ__KdB74emS9Px0SxOQsY4nXo8d1yqaEGmijv_Mno4DPAo0gDJeXCoZS7x9C6wYh-C_7daTt5kFpXihpsyzPmOK9qOJbv2uzk2aTl52OLzjYCI90zUskBQsBDAE6tBIAq7sLS2ZOGeDC-fdAQfFSf-7LIv47Me5larKPrqlkuCM3kNevUQkKHRgjFe5Xwf3znS79YK99YPbtULxUQh2G_Mls7pfzkCu-QOrxsk4LGxx_u_j1aS29Al_cYrebNEzwMpxGZ6JT6eeJYfpwQKZu6coZjNbyeq1bvSfp1UBoJAAxMYL7tedxGjKGi8U7R-ZNMYC0Flu3WnDudI99NmsQYixO5E0wuQVPG83JAzxlL-QF7pSQ8tRhul4YyjX6V5dBz3GlqBFurl9tkuabPn1PNcnMOyG54wVkOfZB03NyN3GIN9y0njPYMxN1pFruS8nV71t9zCbtoaCDkAlTQf4SOgwUmxaTKZscZYIPXj8wgSBcpgy7syRrFD_fWgCu9NgJxEhYkFCEtJLbVU-5RKxO4tLHMsoqC1Wh80UTlwzzu2BnCyDhXX4O6gvnnj9mtNBooUkF3R-W_RdqDj2Q6RFzSRz7IJYq8v2RraUBmOIL54_k1aL4N6H5NbWGqbbOmiI6orWOyi8gIcEZnihT3B_LBryokVGsWhcak","tags":[],"target":"","quantity":"0","data":"Z2FtZS4","reward":"0","signature":"hDMdgw-hBPdr6i5mh8IHGfCWeVEWcy72m68sJWTbkdNGmDla1iJy2gfKDwINBbx8U5xc69_toQjPagudMB5AELP-eInXqQT15efj0COMXmyhKP_uK-RUdae5xasWmGHJTzwpgYveC7khxUDVw3yPfbsnzCZ9wWD7vKlo06P0hGEXVwCz7ZArTU7z8GnBtIuK954rhl0dJlZVYCnMb0Tkocn7Jh36pcmCz7t4294wneuOWJ53SvzNtS8o3ACkHcAUdbX9U5QOLZQwY3b89LGhZoQsQ8SkG7_SHOG1pzbR7_M-9lsF3PfS3W0Rip4NpigBM3TRnhXpIUMXv7PigonZJX1bgX5MdBG6Y-IWIwLbqhw3FFIR-W6wdEANbzXtKnubatjvLIQcWQ6lVESETuTeKAZCwWl76GQpZ9VgKG3m64nwkBWgzbCIjgoABKqkp7pY_lCY1Gr1iq16TE5SMkWc-Mp6n4vWvsKcUbpHcXiFuPjWiqj0UTitqbbarIWdYcL4GXHc4_Bb9O1hqxveE44dHbpca6EDE9GRPOyt3wnbiC5yq56fZ5vMPKwcL9XHtXEZfTZhnlNLHciUqb42s1blw862nM3e94Jw2sA-pvazSAakGCfG3b0KoMK7HDww2me-m4IN_tt-6ZWX7oHM4mPgzT-EL8zRTZ1NCwL6uTRZ1gY"} ================================================ FILE: genesis_data/genesis_txs/_u44CiJCcYiOrGffgZoQSmUrJe8CfYD7Nw0MdPX0tUw.json ================================================ {"id":"_u44CiJCcYiOrGffgZoQSmUrJe8CfYD7Nw0MdPX0tUw","last_tx":"","owner":"4Qblnx7gJe4GTyC9wGCIkqS0hqjjLKfVprQhDK0Rf0IXJJjG-_dGn3gI5XOBmUEOYog4vv2_kUh-5pdSgnLtD0B3iKSu0H3YHu4GiGZTcye7OTJo7RYvjcwP31JwFeRJfplnJnRTahpBBqYh5D2UqM-k4okV-1xZdzd61RPlXhSd4zEONITTsiA8IYMfcEpXBy1mDutXuIBhFFOzomStSkxf8hzMorV21LqpQKrb8VJ47n3-btbCRjY0FdJj1FR-8qXdst9EvCcxOec0kG7Kc_rlP0WI0ZCU7kLM8BiRvO4SFEJFjlrmKyeTEuPfxtnxdB3t78jRPsct7Cr4odC280b26AVBywkNhdHof4RWoZFLPjsySXePiZt3OTxz4iaHkUoaO2qSPfu5QdjE8G_Lfze_B-ZYZvTKxMxV-HWSbypCi3Hn0Fy40IY4youtd_SaoEAw7CWdukyDZwwBZhcYFJ8czvzNChp3y1RUoRkI40UbjFSqjY1YdS0DZdmYvOP4ZIMYG8DeO-Iy7wCE2dmOhvPKeh8GtLecIUuazEUovVgJPnzL8zkHf7YWxXFhppEAkSSWS_y5BL32pmh1kpPqlHo0TAn0JoLuw7wwqxK9pw6L3rfdJl8XnJBvQKhgAGEWzqbltiUl-PXvN8Bdl6NpBI-os00-CCh18mYaYYUbWJU","tags":[],"target":"","quantity":"0","data":"SG93IGRvIHlvdSBzdG9wIHJldmlzaW9uaXN0IGhpc3Rvcnk_IENyZWF0ZSBhbiBpbW11dGFibGUsIGZyZWVseSBkaXN0cmlidXRlZCBhbmQgZGVjZW50cmFsaXplZCBhcmNoaXZlLiAtIFRoZSBGYWN1bGFrJ3M","reward":"0","signature":"p69XT8d9WYZNcAO5NKGbFEW6jY35zl6PbWOKja7jyTOgmqJSrJnvnsZ_mpOOkhrVA_RkbXqQtzm22i2Y1n5vE5aPFiN7Q8cpW7fUj4JbEJPmAq-OlZFGAOMAwFsIgCPKrTnu69EzWU3slPCFVdkQ_YfS1e6vnKbNYEWAX3eoUPcgvpV8FisGWGWzAWBjCbX0m6TbCg8eXGWM3rBBiLVqnLigcztvYRdDbs-UMBWGYdLR4f6yh_UvkTXLYeO_06um4yWr_80ET1_E3m6EtXgWtSj8eILwJtlKx6k8ekZfkKbzgEWA-2e8iisSmIPaXSZZwa9gjZTmRUD_ZPwS5XQmsXnGiRBjy21_9DE_Zbv3BkVKYxTLvvhi7mm2btR_Fi86TyUBpW60EetLNr1hTxXw6adZFyloDAdcMs4Te7iGoLeg8e-sfCGSrMxdf4j5pS-D_pEWof5AU3OFN17Hb5OnMS-zzt4ssBOs4IeMnEF5YRFJAyWqb81gTKMWlNOvxA-dWak1gOiDA4vOmVSfYE7laJdApiA-A8S7TTLtaICHELZyaq4OERjeXpN6n2HmkOfZsofZt6Y8t1MjJRsfkcnuJTsBgA3H_3hTCyrfe6ZJUm7X1R7iwz9OL_6ChGA7xrTxArjP1X06SsjEP1yMaXkTTUogJEffZEd4aM9HRpjsOmE"} ================================================ FILE: genesis_data/genesis_txs/aGqWG70qjD5P8spXLMtyXnYxS9k7Net-u932EyIFl28.json ================================================ {"id":"aGqWG70qjD5P8spXLMtyXnYxS9k7Net-u932EyIFl28","last_tx":"","owner":"uAMDlK8hR02Xl7yH_DH6sZX5uDiCNkWSvAms5mIs-QvMx5kUnzp4jykTMBnLaoo7ms77kh0DoUbdpNwfba67FQ8WU93E3UrIPNQ3CRwPp-Dq4uUQ1KuaZ9kDvX-zq-PtQY40K2SzHVhkePbHdwsguwVi4DuxSNZBPbBevyTJXd7yI7UVzD6H9DGu0YkPtioP82r7X67PJRQOscvdUpdTpE__Wg22AwBzAz4mhFFf6_IPmaXwYJEAw155ft0BuxvjYQqaTSmgwzygUQmMAC9S_Fye3FKU02nHpelDI6v_9gK9q_eQze1eFC6CDFofK4d1Y8G1o2MAjTDERjy5ojay_wrLr2ou65BrXIygcquyYMQbhQAUnJvmaQliE6gWL2ZAtgl61y0EX8Z7IH2sOmU2ZhJR_oPxkbmj1GDH3OKgiTMGQ-nhzaU8gcjXqob_vVYac6DM6Wgd6xAjuu448IxMqodSh6CQNbiZYeWBbk5d8UG37F7hH9j5WM8A4NteMwuFI0btgZbXRy-uhZLH-Ym_anGfSf85-FOSGzTy9AyqDG-3x8jT2NAragXeGJpngnlslVwK_ON93gy2Um5F47fa3hnTU85j8_zwzzM6puCAgyxgB4Q2Zu_UqizdLnVf7Xzcz_hb2EhVK4mvqsvnErn9a2y72tkUPIw3WKp1Ot2HNCk","tags":[],"target":"","quantity":"0","data":"Rm9yIG15IHdpZmUgUHJpY2lsbGEgYW5kIG15IHVuYm9ybiBkYXVnaHRlciwgQW1iZXIuIEkgbG92ZSB5b3Uh","reward":"0","signature":"gADNtzwAZHazfeGQTYrmaoy1hCjDcZha3fdMGvORS0OnHF-JxekI0YeN5cB4OVT-jJjQz4hwgwTuvPHnGc3Eeoa3OW0Vr5TXxKSHcopav1_tZrJsmdeV2pE5KQ3QPa4EpuLyRfBi318t1VLr3ewMuc7sM0C0DQOD4d8uNtmdmtq_dCx15-Ugw6MEbI2j0Ku0RXjwXYzBLP00M1mVdz1jnL-734xKmd0qFzge2ulQ8iRDZWYduUJ5wmvbFaRU2t5cC2njDrZd50Mxr4uQJRkAtvTUUu3TJL8ORM54jeI9aawvObKACGA8ZiJ_Xg-9WJm29kcCTgZz6loTZGFSk9GM5qBxzzxc8XqoGcLSXQZ6SGMPJCT1U_wCba7qK0NHB6o-nmVXEnFZXapBQ2HWb49VkB7ldZxxOG5IJCj_KJVqd22IWZUzFHJ6xJsENLKw6kVlwc86k21KC0f4h95fW7EmP1vU0DHgwQMB3Npz17mwVpxCFLhYaFy6iJLu-y5bqjlkCEBblhIYjhjAYs5iEKw4bfp9MgScLjpdeugebVC-Sp8z9IyR2J9etboXp1FhS9WpJ4ztPh9-g7Pcm5QR0Ui51gZo42i3FkHseoXwKpbrqnH7tpFXo92ixbMxXLlHxYxj2HYor61O9Xiw2aDtpKuZAmzM221BSMQaCOQmS0jpaDk"} ================================================ FILE: genesis_data/genesis_txs/aPxbCROotxwkdovWbQEhw18UNAzVy-AmjYwjo9lb5u4.json ================================================ {"id":"aPxbCROotxwkdovWbQEhw18UNAzVy-AmjYwjo9lb5u4","last_tx":"","owner":"1W1o9lI-dW-oYiStVdshwd7qGeEhZsp1jqnHdm2aE20DBC0oUAjyJGTIUOnX0OasDH3C5Luk5huFSoJOChIzreUwpIxGI1Bc3PKbM7Ql4sD7NQ0JuCn_njup857Jw1zWllPgQ4M4_M2jMmbxA8P6BH5XNL5yBlGok_mFniH1U1mVhC1qN1BTEdqoT4dhISlBVqXop6y2XWbbnDRu3uu0UNcFFDgW38LxR8GhUC_vaLj4atAQwSfhk9M4K6UjQxaG4PSLPWl2MY_bVM-PdpLXzkqhNMyxUuPgMnB9_3ZiUM0E3U1E_SzQ2JJArgqzKefkteTiMeDISFfKXPWme9XC1vzjPqkgbuD373RVmIIVrksmDH2eHiuyV1YJcZhe8JEsCQh8fbk_Bm3zHpbSM9Ckv4cyt5aQRzS6dqMPhHDOUi2CiuyYAT_tminu4oH_wm76DYY3uZR8YRvKe3mNH0DHtiyFh2d-XJthWwqyWXNgQcpw2rp2eWsQOLEUZC8Oq5t4505wLNYQGndpCLi8C4v56L1hDymsqVdkm2eINiq_6VLRZiVa6tlLZQhfVxLhKxV3aw9FGg-dZKLwGkrGvyu5TziIyf0YqtL8W_uc5smB656M8gm34G8VpCU96x04_6vHdfgGp6o_9r8W7MULJDGaieFVSp9WtEJkMgmUCyfYgmM","tags":[],"target":"","quantity":"0","data":"VGhpcyBpcyBhIHRlc3Q","reward":"0","signature":"eogtvzZy9asMWYD6aJenPEbJ50oJVdgw3LwYate3mrfJX1R8ijgJ_agaBV11_G4V4gJn18IVAi6jix8Lq1ZJtnXWv7NKpeBGD0ILucV39UeOP_vk_uOSfqb7Vnlfgp_mS1-EIYiND98hRCpzEYhkLN8KP93SZhPo5olssuN_g-5FgcbtODBgc6qKucxugHS0vf4oO3jI_V1HDLD_IsxlPvWd1FhxXgesQd_Vpw9imYc7KRDbWgKmHqFUXDKxs6JePvfFqGB-ILJp-iMQdCAbacKxWw-1pbk3RbEt9hj0aLPBWAIcPMfA1uatDOrVQ_ZTYJ_DCDRVevEXGV9rmaHy0g16ykr8sQ0TkKuhdzG9NBNMs62JKdwOkAFrV1FnXUlTQ-m5LkyENmtVNOnO1ak7ofalDmYtjmIq6k9Ya2cYxXRJP7rpM6lSa7QlrPRjfAfG8qtKazCB2FAmTvSFTufrNsq2HiQ3UOG_ZvJwQalXgoDjcC9zlPCl_HP_pSN8RIEYlynZ2iz62YGExDI5Mw5O3e_ozQweFZXgI3vCiLxUL7wgp_AYN1hS5ye5jLFyXTJyAcObIN0Zm9FfTJE4XKUmoOnYd8YOCiP67ty8xRtJfUionqzHZopL4ox7PzgHT7wMlS1AEsyZomNjmRqI9DZFbeKEr0D4TmhrM1AfJPlhuIU"} ================================================ FILE: genesis_data/genesis_txs/b96k6w6qUyLSSWZlmupyBmav6XYMsdt0xTc2yIUZtOA.json ================================================ {"id":"b96k6w6qUyLSSWZlmupyBmav6XYMsdt0xTc2yIUZtOA","last_tx":"","owner":"qsd1tL0DoVzru24KiR3qqOoqc4Gq8emUfo298MCWmBhpi5aqmQxX32xP0H_joCmE-gfzFvE1UqYmYNn-H-372ibrxzgCmPDr9g6CL2kbdc_6QzWDBATLDOc4T5c2txX9NKMWodEON4bOhRa4nYqfVqHLQ6TzxFpw0c0KcBgzBCaILnliRrU5ijoovTp5XJxyt6RoDH7VjGvQGlwzwYS4bF4fUIBCsESNdVemVA3AnfBADO5YT2y8X6cvhDIzFGDwVGFxp-yy5cRef2tv7UhLvXzhOA4-1wFEjUlTZ2Ycy-5vRTyfkl4st5zVOSLb2MyfCxC2Gz5vXklCmB8w1fKOhRyjBLNxmyK9C32r5O3z3MtrwZeSM63Rp5n5wt9ZoGZzbCgsedW3kuLUTxfp-Vl_vYE5XcAEhIJ_H-NFVaax_ifhn-2Q9eSjkAybBUrPCNT1kHnEj-Rd6D5TinISBl3beifyEY6-QmpBP1kHk5h5Jd5I1TTmDVVO4MgoRI18Z9aRtbiYNG7I_s_v_WU43FkSzJmsg0qlcThAzT3hpNRu5hqSxQzWXhIIHsVP1PFjApw71L7YYMf_lrERYS3vITxaAu0Mmft4RFvfec0tm1ZVxnUkdfY8fxu7fdaaU2IfI2z5SLOPe9KSzchkYbbcLKmR4s7BkB6yLD769Ko7QjknWjU","tags":[],"target":"","quantity":"0","data":"c29uIG9mIEVkIGFuZCBmYXRoZXIgb2YgRGlyay4gQXVndXN0IDI2LCAyMDE3Lg","reward":"0","signature":"LHf3AoFIY6i5Tq6iiq_iQ2hNQeC2Ok_11Qd9jxMspataIn3LiAwUYfZUsu0jNeKcLyUq-iu6lnqDrUIim2KeA_maoA6RVSf7dUuHRubmtI-ucBASd2Ne-FhLYP0HtfAhbTYLXkqmZJahF4p4f01oS9FpahyfVACy4oXZ43hM8lzdDFdoDw-HLc1ouNbCzdqd7aK3f0ZF-jporyNEamek79x4ARjSHxjqVBYpexAtIIXlW2xPKzHNlzSdeTXIzboVYE9Ka49gpSjejLLMR3XzXqUP2pO1RcNo6yXYkmci7trPyKGk-Fp-Bu8-ZFEPkfaqhy4qOIXl0Lzjg5oaEldc-_3OFaiI7mD-lGdmURDnNGc4UsYV6Bq5W7TsoH4wAFmKjdo499W4cCan6He1ydzOuUIITn7Xyg6siRgrpNzYWVhl1adghc0BCcGnvRPv4XkVpHxDb-Q6XJ4JZWypOLs_MqeGJYb-_5zLghdxSbXiiGi5B29wuWLaPEo7-SmpJtNXg0yEqeLZUgFdo7cWhHu20QdhC5H57q10IS_egxdaad8d0Xosq9chxtA_s0UvDDJokj7Cftg8hZaA4iLDbSgd5ZwBnKb5sUmvQowLSwSQO-s_3vS7yQjrnQTjxE3w_BNB0dPJ6ch6LH8OCr9DgwluH4SACWIl-jen7MVgt6ec9Iw"} ================================================ FILE: genesis_data/genesis_txs/bnT7410oaZtnCdurp5jNgLKju9d_RRxhgggnxa5frMQ.json ================================================ {"id":"bnT7410oaZtnCdurp5jNgLKju9d_RRxhgggnxa5frMQ","last_tx":"","owner":"wWTW5uL_Ta8m_U30GfmefoV3Xn6CwGBo4fYSV_8jCo2DxIKAiIbIiWI6zlngnpuZxGUirJ5iIsU91usYteZcOH5dQRtkXQmbsSkoNYSisVotLFMG5bggV5AN_vRD1Eo3F5sA-01AG2YG79nm4TFWT4frA1lga8iaxL6RNYddnP-ttGhDw7mbhMQxWwJehwTdW4V_fElVNvmxYnQZuHqpWkSd38fqXW0YDQSsp36y__lZeePoWW6JxnmdIsdrDvgvpYrle-TOKGfZst6MejKGQkHPY1hH1BR_x5Ci698xCdwmf6B8Dh5iANxmTYgR6tF0gjSHoLT2cEnPKKq9FPy6_MkGmXV9_hGk3PQ0LjNzvLZSIiOo4YbrVhYxopf5va_SxsxEpMlPW-HHnrGK2J3xrNRVUbHB9Q7_b8C6n0RxOniUjobUNxsAJ-ceKDVnNcZf3BzW-k9ohi0hpOP0uDMoY1Nf5qprRZ3wRw8kIeDlH_D2lxJqcSsOLsaN0a3-ROR-wCZm_SN9NQ-mqxwmpMpK_eA7e58ya4ZMv-ludLRHqbeSoqSwiGH5_kB1gVvOr_NZ1iRwJwrd2ZHWOTQZb__Hon-Ji0X5QoA2IjtTSV4nIbFog61ftlr7fyup-GKeseZI2UG4B_1hwNUypg9GNKtK5WXQifELSFbA_OVoc2Uzmnk","tags":[],"target":"","quantity":"0","data":"SSdsbCBnbyB0byBzcGFjZSB0cmF2ZWwh","reward":"0","signature":"Gf8rK4PskI3JL6tWlRJTe5GNsNPavjt3ds1hjdhbk_HVwTwS6Fx0qWc6ikWBQKz9umDQxNM8POsIiyN58fWFWDAmsZeGaaOGYRCRVwnOz8loAonJTdwE_IxOUsZ5kj1rJ_I9TaWD9irKVt5WOZ6er9xW4g4BTWKt_tNt3DbD2QL9dTKijT_b1EjOhVf86t4TAedUMU5OEiZh7gEmgNTfc2DxeBRGE2AFbBlJBbVCCXZfOBVlsKkHVPkSr7AZHABMkrKKHTP8k9Ww4hpgNHwx56IZFDEvReM79kd_Q9xvLU5YRTP0foXgtGljLWKnSi5lEBUZnhFyjgloD6V1RL7qMiYz3S-0Lyl2fkw8PH_VphSUmafBcgWuRTBehXWkMv2l0Bs0ppDaBc08bxDDQk7JIc9NY_FrbZukWshRk-v4n_rWdVSWjQQ8-Vss2rGxscBK_n9VS0sM1_L-zxHO44Y_fLSDcxwwcRG7Z0VitqQv2RpQS4j-UHuRC0SC-RT51XlDhKpkDwuy7Abo2_JMwOwwfEZAYUkCFvZD79kN_AS1vYIlrmEexcAH6l8fFXEpEaFUVGYpL4KlfUxtrE6XeCqqGgY7c4NCSNQtkRF9C8BxNnfdKoVt5bZkRc2_xdMjuAoZF6k0QZpy7CUTrOzfLC5oIa2f45sWeStOxLZXilKxOzI"} ================================================ FILE: genesis_data/genesis_txs/bqhG__MMablNhNpiSp8nopeKDCzXy97jLuSBlsKk_u8.json ================================================ {"id":"bqhG__MMablNhNpiSp8nopeKDCzXy97jLuSBlsKk_u8","last_tx":"","owner":"mBPshDA6lOuGAqGXm88aq9ebJWc2f3mHXyq1muThO2n2W47bP_gLtRit5UymqRMr7kr1dcMB_vL78LYGn2vq1GZouWss6LCrGcXYZdq5c8nqmEPzYV1TOnloFXatH1lHhl8H1ZQW4ZjlLm0OX-2yT1Y7ToPTADpen9yK-HGKWTNueavz0OXlRQyOjw0cL8yn277VJges5AlUVE5aQ4eMSRey3vyWOn3y91g46zzlmPYTMXgvzSBsQNKGkbnD9lS3Lcg2IMMRK-gNj4rfwb9rJO3QtfuBWgchTPfL6yQNH3IeF2LdlYHy7NoqaA2VsMkSp-mCLj4bW5kEGVX-XZ71izNLoYf3uohmoNxFYDPmA8CAwTABnpK5nupKSV6Gm1lei4mfGZ55njiF7JtYlijV9om5iZpDOcGoVM102v_xJUkDGYaFQENUtQA4NNEpibFjznvJAR6vQtxY_321_YbpxyBwhwZ7IKe8LY15ux9chYOclVDQAyR2U1uwB6e1-l6enT_AU7Wsf1OFGvQeLPc3JwFFPj7tt4dJxNTNe_wWng-aKET5zn96zeMvs0hVu_QyLRG2H7TsCAS59xQiK8GEeo28n5i9JOaVmiVoXohJigHDEBptYbnbBF7KMonwLyVtZJywmRYDakEC-tKc02sw4JLWux27J7CdNzvCpnKfOW8","tags":[],"target":"","quantity":"0","data":"RGFuIEFuZHJld3MgYW5kIERvbWluaWsgU2NoaWVuZXIgKG9mIElPVEEpIG11c3QgYmUgYnJvdGhlcnMuIFRoZXkgbG9vayBpZGVudGljYWwuIFNlYXJjaCBpdC4","reward":"0","signature":"Wy7ErBM2oth9Dm98paOFogWqZ_eXAwNSTY-si39NY47FLya4kxJUFXoZ20iohbu2oCpXmXH39-SRs4c01ayXHDxiWQmjxGOittNNTTJFwkVuF-yi2X_jJixPCNbiTX-2UB-pabyn_YbVcSph5iFV_QR6vm-83l2APuuDwBldmH2cTsHk_95g4T_ph1SsNaBF-zU_Xng7FyVPUfPTupXYCRe397wbl49mRpBKQ0cniptOy-vZXNV6WjM85m_6pYoPpxUXioeXgL-TJNpe-3n7higeJMpwr-TDeNKNxb7j2d5CgKgUSxCJ1Zy7J1k_9xaiNddJCmQ7OnLw06HeM0k6be8sVLkN_zWyHH_6GlLAq1nr3km32TNJA8y1o1zJybT3lg7BuPMcCmAeoavZM_jJOzaieKykLPQoY_bDXtqCDeEx2u-c1-w8qmNY_ouachftPWJoVMGbkts3WL7_ddeaTbsS6KGrXELvOUZBTpefoApZoUgJisuQOjwbsw0bcUr89sBPMxLI6fmQANsPWdIZbP4Ah9YIUzQo_88UvlJsTXUTXn76vS7sc0yr5kO9OUr4LXdX84UKANLoqtD0Az-L2ORL_SbDUuElC9QMzUQSl_w35EFyNcYOKPHBTcV-W6ndjoN3_ImwLeC8Z5R3DfZSGi3z6y_20boSc7hZwEBlu3o"} ================================================ FILE: genesis_data/genesis_txs/cIXdvNTNHJSmA6Rt5UgSNfMcGfvxDnYTa3a1ulS1SiY.json ================================================ {"id":"cIXdvNTNHJSmA6Rt5UgSNfMcGfvxDnYTa3a1ulS1SiY","last_tx":"","owner":"wggat8doPQ6VMEdddzkm6DfitfzxuKijXCD9okgLK-DuZc_YP1j7uxXQFWeJlzqrVbhbuTuhez0JRI2tSJ4rPkW62TuCB9klASSzvZYxnktfUx_gmPrvrK6gNeWTTmop_Eugle7t3HhkUvd0IvQOqLcZWVyyEfE_mI-1LZByPOS1c6H9UsDnlSbfSlcbYNDB00VLPjjmD-H6ZZkyhDaJZ67kmsARjOgT0htiK0eubaYeNv337-M1GRozfyUck1gq1S1GFHzawwGsuH_htW8uYJWItoMJQrjLLZRKJ2Q0x45FKMLwEdcVgWPIJ0nF83YuOct1SkXPaEogV2Q2jN3EHcUC2dw568M7iuy4ASMraU_YlWkxaZZF0Cp58bY-Je2ZWLFDNVmin4B9ymuFcZ-VpfLXOMLBJb91xf1MkhuC6XZxOJoG-PQ2sb2X991aDb4pUkRgkAKyVEA4lqISuiKB6RPE3dNXcVMGbfaulH57eN2yack2vurQo57MXseRTouurNrCM9-cB2TkfjTCF2j3ex9UEm4XZmZQ5WJNn3gXDVIqXcYGj_l6CLsc9XhuJzOF7gXH70LbnM81aYRWeq8KUkygYGEqGDcz2ZDWFNlASp_4zQd-jA3Ee-ujIngP0OhfEN1Al1LMIPavmIUg6EGqyie59NUE2ftpnJYv0Fxbjmk","tags":[],"target":"","quantity":"0","data":"RnJvbSBLcmlzIEQ","reward":"0","signature":"sd2dIcYiFSjbZMjXw7_IkDkZw0J5oVqoOzgNgBpi9KwPxEVFz1mH_XiDmgCCgNGdL1_n6yFxaigkKLlsspUPOV-60R2YNouA3BG8DJO8dWJRi853kizs9AIq4xyaQ-3VRWspB7L7l6cZP1JOqgjBrJMi5G2ja1LnZasYgr1MEu4oAEnrmd2s-8xocVbLa_66w3kDzMlq4AH-1hzIAwZ74h7QpzoT4KQpOoqjLEIekwtS97uXZqQXrWVqMO_ruq8UWOXYIVJ0WlShCrnV0yfYzcDK8CrNQBbq1or7WV3s7QdrhSj98hk4MlAKqdc205vnYB1TteAhLX7BSn69vLlhyaIk0MlOcrd_RVhagYG0YeAxIvGqUHurzA3_sLye_5smRd1xDYUI0CtSFlDz0Ta8qjv9KJBt5ODy0DQm-zOwgUID1gCV4YH1IxzymeV2aIWkcbmNOeu0xwjR8P-08ijFNTzRtHGGxn432yzJ4iCN2SPTqq0km1U6mQGEmk_s3xjmvl8Xe0AsjYsrahK_BrHPi58dNxp_erBBKv2R3aEtM2P8CylWq5eogLuuESdR-8sejDYcUkn1qMfvU9mUEE8pMmqoPk5yE48y-BOFf8u5gA4a4luA1Zm9hJfo7Is5pUq42SLHpBL9XX7gzrON6JUD4YtTtTvQ2zfqXYaPV5SERzc"} ================================================ FILE: genesis_data/genesis_txs/cgU_TlXi5gJ7hShSBYsS4UVi-sLTtfFv1y1sy2nNhos.json ================================================ {"id":"cgU_TlXi5gJ7hShSBYsS4UVi-sLTtfFv1y1sy2nNhos","last_tx":"","owner":"5yoosPXV3A3cFxzLBfOiEqz-kAqDbNChg1PvBx_ahfzW35GEz_4ZolmBK4uPwGRedKNKiF4YP6Hyzm0hd9MR-B76Ei_pg9D3MItx9GrBFaKFbHu-y0_9ndhGc1d2msuAH-fWRWNsiY3m95AyIX2MNdB64k12oZ5GrrgHrKe9mQTFQDVEh0k5uEEtR2qBGfKva5s6qiyOfZGQgEcXgjMPLCFMSuogEHEeZBTCj3jiwOUn5hiN99q-BrmzH-M9J8dfd-Wv_KIj2Retq1QFXXvs_tHzzLJPfcqMfcRcoBvEQ74GsKni6e5-qSJOu-Z81ETTSvYMx7sNOQWC8gGz8FwT6W-m1CbonXnZ2qr3RKn3APmR-hErAK5xP9vQ6omdzHmnRCDU8b7a4OR_mySsrlZEvNJ8JpYCdmBsS5MCsB8S0u6O701T3HqpZaGHM8_sELS9O-wnMGrAL9Ii55I-E9yJGm4oLpMBVxeEFXIL_DxtoAV4Na85A8V7Q9EZw30cMLdiJUOPccaf6mbEbQ5vSaCGcKR5zOLsc9uazSVWKF7XNOamevRbOoKkG3cDqZj5AUqWUKmBA04BnazdffgFSTtfgKSW32IJDyOC5-QRLIZaQaDR_VTgGtBdlz_C_aC8DArbkhq_hvgUcgBHHRYLQjGPd3PtJQiC9QUb4ESyaGj4Jzk","tags":[],"target":"","quantity":"0","data":"R29vZCBMdWNrIEFyY2hhaW4h","reward":"0","signature":"mhE4678maVK7LAQ-gq5bIUbmVJUGYY62rgsA62TWzudRrWitrDimG6WyQ1qL8a1xJBCTBNrDdJwF3AsNg_PW0gbqG6zkq1EAdN7No6pkUazw9QcXuY8pgDeLyBIAfQPrWsIJTsl8_OM1yMmmTelRkqFRcg7hoL775yqg6D5wmoXP9U3_x98mvL2q3Il5687G1__gdV-ajqM_V3UuEnQ84ZsUVmydqfvPTaGVq9bzP7k2p3C5AH3ZwCziGw76cg-uXf2WiPbMMOv3Dkk8BbbrTx6nI2OCk14zrN7AqGFb9jliPCPEIcU7YJY4jE5IZgcqsYyjloe78a5QIGE2BW2eJCm43aWs0DZWc4rpeFV6OlrUZOwQuzAanEPX7AWZEqhl_zC-2Pe-U2Ol-ih1HvmSDchcFYY2GabbS8NTrXNaBPRgydjMX8tZFjSlll9Pzoa70OlziCEjmSFIyD7qBGtSMOwiYau-G324Shpb4GX69gu7QL-qOImJIF1DtLb0nPepu-zJk2ImuyXgtaTeEzUdaxoZWXI-VJCjYUewFmezaUNgyDRageB4f7XBdBoms3FTBFM3kKRHR-4RqsbyndsKMGOv7Xv0CmAcGVddljkQ5sG42XMZIoYlrMknylW0DoNfngJ3WYbW1j3FNHjG9-ihXFOrGK3NY0eahLG47zKL01w"} ================================================ FILE: genesis_data/genesis_txs/chdl-kIl4zG7VcJbKk0Q_5TeGwuH8Xp2YFPLRJJKTWw.json ================================================ {"id":"chdl-kIl4zG7VcJbKk0Q_5TeGwuH8Xp2YFPLRJJKTWw","last_tx":"","owner":"2vMfzCTx1701yxRSc-4LZAxwu92hJ1pDhi-zFMzS5q06LbhWY8LmePSC-12FQRvSwv3HGVEJukpt6FNLT5Fxp9drJbrlmibEcd7_f27r4ZdubN-BYoTKhrVdYhuEWYiimbiwpBro56oijtdQCAsLIa6TGUFNpDfs7XqoE6KxkJ6Dcm8UDOCwO0XQBWkgn5nLPf-i3oYBotxFO7-wV6yqRa9EspRKs4rWGA4vpjFQXxUcsVVpUxsV_nn5jOX8ceSYd71-tIU-p677y2IxS12txCbGpKH83MPdJlCrhd5702---wiT2TYoA8jcYScD-k61gECEkmmS90yjse1jck8TYbiwOcEhXLcqKTXzRCVKCDyg3SwtbFz_ULAjoMjceLNfRovxYSPZsBOfe3WWk19DBNEHF4M7NP0akPqPM4SeGZ3GaTkzxZkpTgf7WEpq6UBpFN1Lp4DQazV0roCqw2nu4-IAadn9VDRh4U-ZijIbXl-ZA0TqbAidMDyg28w1srj55MVZrGIiHNSOy0w1GbBKy6KFvRVineGC-AijhNf4dR4KeI00q3HaX6Ce3xETaz1csbtRPn6x5FyS9OzqrAJ6Qk7m4sTAvLOfcspmn6XgLz5SP68B9OG543HrWYYt1W18dqyFkJWZg1vfQSOcwlPONhk0BM7P3wf3eo3FR4BdiR8","tags":[],"target":"","quantity":"0","data":"bXkgYmVzdCBmcmllbmQgYW5kIHNvb24gdG8gYmUgbXkgd2lmZS4gTXkgb3VyIGZ1dHVyZSBiZSBicmlnaHQgYW5kIHNoaW55IQ","reward":"0","signature":"OCQ-eUe4oDuEghnK6m3ODehzzEkBBNPa_GXRWB8iiJXS-L8V-17etdXUVeY9rq9XpIdx-hBfJYX9_gGWFkT_OD_Bm4LfvapNgOrOBVj4HFKeKgfpdjXrb2f-L26g2CHRXgiDUP8oZRAjGQxpN6o2m4vyk5ORz7vmqU8EWRDdRiJSsN-ou5OKId4HwqrNb3X4sx153QdHDxPN72FKrDj-vVUdrm5g3pB2tLpv1AUiYKZy5NZdQjFVQPCQQF5hZqboUY8AADvdeb8lISj6xi0HigZlm75BYlAacf1DMLMC8smtXwfqbGkoCwPLjvG5JcqtWPadxFQl0Cz-doxR5gjvUFwmvTGvtpdhBZlBGmbL4dQi9G6e45BF5K-v_ShGT-ZjXhXeoxiscFncxxy31HJOrGzTTI3dKoAXp-ZRFzlYUNMXLyJFxO9fdjzDoSx8wQxk-w8-8nwk5sIVrZ6lwryzqdG7j7qlrt1ANn48xTl7_pfEoy-pdSY89UNcVxlzAADhdcDUQ0XpWOz7ag9xOJY-BUFveuYrAA5OF65tNcFzXpS3w6RE0BOoO8W5Z5_wsije63iASQevoGRyz-iNnB0YHQcwdxgK3XUifP21Lv4rPoiiqzSCoAWcTcfxgBXRa60xSnOwfAnVeFZyr5L6_AV_1yHBn29--VDK2Y6RNbv7GZQ"} ================================================ FILE: genesis_data/genesis_txs/dYBZuFcCEgGVcfXgS9tmeJsue_qwaCRO3Mg2OHCZh_A.json ================================================ {"id":"dYBZuFcCEgGVcfXgS9tmeJsue_qwaCRO3Mg2OHCZh_A","last_tx":"","owner":"zN9M9UvdICaVVs9MyXYRD3F8gv6ZZO2fUD6Wf56vnc-zHCMhUGPAMFaVnLsRcIuyIZpM2F1Q92015uOCs2IsOsyGkl7LMpUJAbh2UQ4xy0NHTK1HAAwxixfq-h2vGQcJ921dUFgmG7OWslOehr_3UUleSeuCuWxy7WoGdlPfOqbo0gbsZzS-winF7MxqEu-SJqtv6dEED5pcQv_FKnm-M1hP8wzvAY_SsgeiZ7rTRi5RYUx5FMyMjqiw9LUZZXxZYbfOQX6t7xoDIvIVN_oLNJRNQvmZg8_W6Dg0DK5kqz0ccTgmpt6Enu5SFZRSkuZWwVrIQi4FtClNiH6nF8zkE1Xdw7HUM8sBv4RuGxgHiHp6tbejnGYIgn5IeBlRQsbV-HqNTFMv97zlhlCZIdFZZyYQ2KSw1fqi7fgUQgVl8hXUPwbJEpC5w7qokaEZOjew2XtMdhwMxpoz4qLo2r5D_mhuLfaVJy7TbMUrGkGHQO6aNByVTBcQHFMOXqB8ZcmwhQVrqTfheWCXMuAfmee9acCSCeC0dZIDoh5xPMUk0y-byr6oc8EMip3fFpMgs6o-fubO4eNw1sMG1CxlbYA0CusMqn_NleBusp_U3oDsICNSf8mPir7k3BDeUxb9A_V9nHGCwLiWrAxsSJjVblxLu4dEFPa8Z_-RvQVknStQfJU","tags":[],"target":"","quantity":"0","data":"Q2FuJ3Qgd2FpdCBmb3IgYXJjaGFpbiB0byBiZWNvbWUgdW5jaGFpbmVkLi4u8J-Sqg","reward":"0","signature":"bwsk3tK3kSvB12m_kUiNQX6WPQCaBTuJdtbvCE0_rtL8-o42oUDwpZk48uHHYhZ-VXn4IVqGZPJO_l0z8e349jpTQ-oHRu_8xGxu7mhBB4q3WcbH0PMiIIM5WC2IR-Ps0C1lWMwbTJFiez-Yt87LZAykG_WZk4sbhOjykDT2R-yfmX-5rZk3izV0khr6-1PvTTsLmzJa4VtcOvEnLyIgg0G-xkDl4vKPRr6zPHZAxd-8VOSlO25cV-ZYGTAycQVF6XeUO4qgcoxkvx4E2i6ZA5q6dp1Ec2LR5o4wC-laoKEGRI-CrJej5dmrF7vEPutyPkh1818Y9Wh_TMkqC38DkQW05Sn8Du4-Bg9DF8msed6TItYEUJVfwSDuRGa36k58WYmW8YRiJ0Sa89zEYT41UUcL7A5Tol576CJLN3fO4DX0UlIcM1Il_YhgINYWXmHDgN1rmuRSHGOmh9r6GP9xUbJw9Y5-AZx_PL_W0GnnovkQAAcxNFBiLWFRhxyjwDD7wyQ7lnmQ-so-P2GgdHduL7BkQn0L2SQS3wKT3bMjLmwS46GCzVPyXVoxzwqvzG90f5GdPCx4feGYT-GYkGsRIX_DchwR6yyvmnIBj2erkI-udCsseyEXFiNInQ1mv8unngsL_MTC8pjtbE5SMngf3Pc9Zn15--TpKxs_gI9K-YQ"} ================================================ FILE: genesis_data/genesis_txs/daTnztzTMlA8Ras9XgQ05Fr9ZYwOg4-UDfjW875yQeQ.json ================================================ {"id":"daTnztzTMlA8Ras9XgQ05Fr9ZYwOg4-UDfjW875yQeQ","last_tx":"","owner":"t2CCQRHKh5EyrrpzVCct_dZIdRCEobP8hKb42FOCY_3zwl1BU5RjB1nyscpkaJQtQePz7hV79SjRNM48ldVQXXlcBgMLqLFYGFwA40dtVrfKNVmbRLLeZBFptlhccYaEasqSOBCnhiTKRkuwOXtzjmhn7h_lWTGnW5uBjGZP2vbJkZlcaZ-3vxjGt9Bta-WNWmhUWSaL-50fejzQGOI7UCUc40elXWnRdblp6vgSTbVnxuQM-Z1jBXs7_XqfUzBaiJboZUeWl9_WQlwrhkTncqJ1l8YLXptOd-T3Xvp1DtpFuZfJRXV-Hu-Lfb9KZfSxKLSMlSzk9wTUyvBio4TtBtTUAtXE5x9U-dJJKG3S8gAQ-FlWzCezVjHkKC_8PbWouF8npyDUR6s4CE_uUXmZzdeicorMN9Z3wdzBWH-NAVExBOr1lRAdnyn2MYbP-XfM5x_bq93QchNS8xiHAqaSzHlb0AP6-62WqJxgAN08DxKKX0Uh4-LAdTygkEJw-iUmLw22l8aggyr4jSVWkGmAFnE6uy2rjUi5PJZwRnMYZf6PmeM40aJEokN2JzSMr8sWwEKGBwXwc5Xr9ffS7IX7K63ytjnBE0Ye_GN3eXFChjxzOE28I2MZR6hEcTjXRwhX7b0IAqNb3zUBwPojNWBh9MXJdechPKwDq7Gg_UFrx70","tags":[],"target":"","quantity":"0","data":"VGhvc2Ugd2hvIGRvIG5vdCByZW1lbWJlciB0aGUgcGFzdCBhcmUgY29uZGVtbmVkIHRvIHJlcGVhdCBpdC4","reward":"0","signature":"Wxx5NLc-xX7BreSg40DS9O32RyMh9J2lRNQli7RvTNc01Qy4wT7tdnfiRfxAjcSnmU7OLbFaJO732ZxYzmpt1LYCzF9H8LLmaBXQ4_gHgoZauNmOcv1RYBJ2HxrxWwPwSMKZQGrlO_PLupkJg7G2i5Y_gmHNukASbWiP7T2HLZqTtAzwuZwDU6W89IuIDKPmUI1OJyVdWS_XvoMLZZh6MzcwW165TgT54CLN_qpJ0vxRtsGQG6jzVsAUl98F2IokTu4GlCG80A958FsF8dASFFvCj3mM1mvZnK1jg9KrlxF3mRiHbzAMG2PnwjUGH6S-_yrdRDqmEGGg2B-p_r2peegnzBc1Dz2nx1Fnx388JjNHPPP5iNWjF0rpzAiAPJWOBt3YeI7ypzWcankfo2jYBiFTj7sTSM2GO-FabWh_oMy-aI69dM0DBbZEN6BsDICxCpip_s5i2yQs7SB0hZj281zORr0av6pJtle8H12DKa9gVgxZA3_BMCFQKmy1LUWq3MOt8aoxrkR_aTFnA3CpJC5kzCJAEs7MxUYovaFc0p4A3JzjTD_98VzGhMqDlOnAQzU0cgv1hmP8ZbRGpZtJG-0hBvnmb9XPGa89migA1dqk9dPsUiQ56js1qRe75LZ7r-hOgfGva12jK9X6Jp_VrdKQ9eq7LdQ5deC-YbZpOyQ"} ================================================ FILE: genesis_data/genesis_txs/dn3p_BqD1gIcZQqdA8r6TucwycKGave22IqNjzKSHqI.json ================================================ {"id":"dn3p_BqD1gIcZQqdA8r6TucwycKGave22IqNjzKSHqI","last_tx":"","owner":"sam1ocQLFyDkoh7WNPLZjL37ovCi0-BEehRnPLTTxvbNjzZCv2djMjiqoSag7TpGfvc2QvCZl1HXGsX6vY0R7jDs2uzaXPUeYvbL8pzIR9-JF46RwnnlitdkDp51TbOeEdxH6_hmp0qoD8V-Izz4rdB-ryG_cC1hednSTTvN8_Xu720d2-iM9XA5nGEbcGM3sn89WRgdaD25dQJRjq1hl9cMiq6iaA1kgTgdTX1DCrHtzkK-3rHsfNxQKc4wLEVTmx18JCKdaSXjQMs535Cga79ygf5gYql1B4jn-SjLgM62Y1GT8YyIKlAfr68JjJ8Foe-Xw6IsruQ1o2oj96Y1KtmP3g-EvpuBK9Z9XEuJXq7Azsu8xifEdGny3BgYpuWuw1cHqsh998CEdxuUNS_5I93760UasQinblTSqEu0Q70RrxPku1JwyfTG1iszEi5JP-rvpDiuRJjOsJRH2egKynnmrWbsCeqT7Dy7xAH8gEGUpAl2eFQIBiHwXQyp94mJoYnHiZeiNmdocbR7xihQGizM_-aoi1rPzpH9K5p_8E0kfXE0659j_gesBhYBHz5M0TvgNR6C5qbBScFxAI5EFaAHNrOs4QitlMUhMHkOSMeX8n8EY0HEf6eZ1dLn6zxmT7LcuJ4Z5GK85lugJ7x4yfAssX1R79TvccIj5WtSwy0","tags":[],"target":"","quantity":"0","data":"VGhlIG1pc2VyeSB0aGF0IGlzIG5vdyB1cG9uIHVzIGlzIGJ1dCB0aGUgcGFzc2luZyBvZiBncmVlZAoKCVRoZSBiaXR0ZXJuZXNzIG9mIG1lbiB3aG8gZmVhciB0aGUgd2F5IG9mIGh1bWFuIHByb2dyZXNzCgoJRnJvbSBwZXJmZWN0IGdyaWVmIHRoZXJlIG5lZWQgbm90IGJlCgoJV2lzZG9tIG9yIGV2ZW4gbWVtb3J5OgoKCU9uZSB0aGluZyB0aGVuIGxlYXJudCByZW1haW5zIHRvIG1lLC0KCglUaGUgd29vZHNwdXJnZSBoYXMgYSBjdXAgb2YgdGhyZWUuCgoJQWQgbWFpb3JhLiBBZCBpbmZpbml0dW0u","reward":"0","signature":"mWtuAyADsvC21TYG_mKPm7P0ut3hGEPzepHCbmNnVn9mx-Kc78Ce4ucwehEe4OhDgRMeJIEz2-w6df1dfMSSGnbYEjM3scQcfUfivb0AokfRoeapQTgC3y0aj4liNn07h5yWXeNCw4i7ViV8FF5jP6zl0SViJ5dT3VLIGfoewwEghDSBqDRgCae-m3SS5EBCTDY6VnJtP8XAGFtHasz9Rj1rT8Mbu59939_rqLHmEXqNOZVXeMqiNzCpxMArTQIUDPGo2POxUJZxyBFoPO3C-0wRXCF7KzzLq0GZJX3o8eKggk5VCoDfewJH0FrJftrZTeIq3NAoRIeHUza_MGSsJXlNcT5JFay4icgLbxWaOeMY4Z9GlTzikk2s6z3kJmfWLmVC_ltD8nt-Jqx-p8pHcXNTW-ZZeJUKhGTQa-XVmDi1JtYU2EFZVkjOsfvLKMDpDaGbn5ihA9JQMyrFJLbcLLZohvYv7lSHcSm9q0hJFh6Adc-YNby-pY1LNFWopZsVHlSP91Dpa79-5Zbdm1ClGvg0Ikr6k3Gx_edLRDphjQcqSUVDOWNgUyuqaAH2iTDfUF5w7Ze3RVgF6gy7QK1AMirDyNeLE885KT5JclircBVN_zVL4s02_eCiCufVHc9O2TWXyAl_lA87tpRheYFWKOmGnvlN_UNKK2H1a1E-D5k"} ================================================ FILE: genesis_data/genesis_txs/drYsyF85HcvC7LM1hkzPPgTj3_zp3amcNVNobBmOxvc.json ================================================ {"id":"drYsyF85HcvC7LM1hkzPPgTj3_zp3amcNVNobBmOxvc","last_tx":"","owner":"ugbb-AkdOkfxlKU5Tqx23wy91EtMJs_8MOqJDM0UehzpD0qW9OcgP1Lb36jLBgomlOOvgLyIxS9cmPRp6jghyuA21xIiaC8mcbyILo1riMpm9ka45P3Ywdv0mThW9IQhFjOOcc7zT_9_UgbHhr_z-1Mfv79K_lnW3WM0QzvzD-DMYiyEGf94UaiPGvWWuj1fMTT1HJ2a81NJAnS7jjxO8PPb9UdAFs1cVwfqgjMxfA5ul8GV4Dimck_nGWJa50zpLHkWZvXApJZOJvB7jwlLG634cU_y6_qfjcXkqK2sXwMYz8ZkxIKt9ur191qvzgRbIp1Ge-_2LP047pkVTX4gxEYOLtLVcREoKjterd9SsCXPE3xRR_psSZ9y0D2I5rF4pBpI0o57nreMiGHYcDjPnx3Bz63HHK49b8JAxYxIdc3rMQx_dA5kEU_k0lw-WQjKEjw3YyCQk9vPCKWxYpCBoPWtcliwO2o7EsXdHJ9WDz1Ki9yi4OAAeyBoDbfZstCaT5u4hsfQLJwhqXIIxqGKTBwpgtkHZK5GbsZr_WlFPxUABHf9ffe_fsqhvTZ74CBN9TPtrllKJLu3KZ4qxnrZMFL4dq04pkv08y13VoLMv8VZjd9Y3eO77HUD1g5WE3ltH45GtOax2GFfxj0zrv3oRyHqA_dKVQIUfHGq5_kYEAM","tags":[],"target":"","quantity":"0","data":"aW50ZXJuZXQgZm9yZXZlcg","reward":"0","signature":"GyJe-uzMReDw6NYz_KKC3hfCVR40nT7CwhoAEW6Ba4ctzdBhl5pjb38FkvFFY2Nab_mBH5cwAW4lwD5bdzmM3Mfgbp_bNajSIoo4JAbYABsJgUo8NuJQFvj7zVfl5RlEpwhP-R84Z45GzEMoJhDYsJ6JMu-4Ae2q4ciL-Bx1iPDWnxYbZw1ac-R62jo04U6VBKepE0cc017_bToEuL_0gL7o-S-tUoCa4sZZbJvpe3KwuQN0iFyzubMx_UZ4CMgSXiHK3QS7init484-_OHiKyOzu06vIpPx-9ONNfUKinbGUC91A5v73ICfmd4bi9eOpqLBSxw49JKIRq03AE2cka5EJ3gNdAHBkiuq9_xalqFs-XDLRF3qmYGLrjH60MhbO5DT2w_tofHgLFcf78mznCxxoy0YzZfjeCf6QayO8rjVauEbWdx4cqYFOM64MOOn5YhOJ1J7HYv1W9b0uVlpMPMSx0F1a1KVfoNioXsIptg5QuBraCiwDw_MiFd9hFA_dOzBo8mxKOVYFcm2V-LJ6LKoyXOvB0INFSwOy7SvVAwO2mmh9zNEewMAWUFBwigwvgG5t1VTKGTBuhvWwqbK9kBTZkJodLoZkfbwIcqh5be_IIfN74YvwsF3ewPVu3fb9HxdBZzfT7Jtr-kHDSjI8a3QadUNdb0HhQQkxwbZNO4"} ================================================ FILE: genesis_data/genesis_txs/duSw-WaGKAabAztyg2zkj6hjgaVaRGBrJuvZ5Gd2Pzk.json ================================================ {"id":"duSw-WaGKAabAztyg2zkj6hjgaVaRGBrJuvZ5Gd2Pzk","last_tx":"","owner":"muWzriTg_Eymwd6zqnywNvhKLXoUHi1L1KMUT7NtlGKd6OExknojZnIlN75L56kytlUORY5S67EsUgn1JAdmaymDucDrBu7bTMoo9rD_nxHcMbUZM8kjG3Huy4So3GPCI_NH9LuwoTR-SJmA79I9zLG9UmtUvKOdDPwSBTC4SnaVc6bcnx4ApdH4hoxS0IZiDwD_k4SpYl9wIeJ9cpaP-jIt7u6J5lYSLTPWr2McM7JVNtwvJW0gr84n2Hz8fQ5KtEJsoTsYBPe_CK4IGfNVNdWePMTlM25NqXzoS1Pst0wcaN1SQxEzth5I9VgJ04KEBx8jcvWp2YNd_QaW86nlhVSuJaLrVKncGselftcVtYm2Amw0SIvWdhn519fOCkwGudYFardqW4hYeuEsQ9RQWh4WEDDW7hD3nD4xiaYrDvk5kPMn6g-NmKhXu9C-4ga0UZcZd4FRK1hawtYHJmRpcM8_B3F40hkuAAa52D-1y87o15r4BfkGEMkKhT5QpMFyzrlqT57aOTwyls7Gsy7Sm9vn6GAI47NOLxdW67xfPEERua_ZuauXIJoItqiBZAppxePtXinj8TW2IvN73umWhZCo2Nz-PitWZ1s1nNnhQnRwWxt-BKeecNfoCEZg70z_Q5YMAbyypbKT34Exv2oX2D1WkmvL3n0dBULhjU8GgB8","tags":[],"target":"","quantity":"0","data":"TGEgY2Vuc3VyYSBub24gY2kgZmVybWVyYSch","reward":"0","signature":"cu10YWLpyQaUtDue6h8F3GZwR6Ine-6_SYqVplQ4rigN0PbW-uL1VqvI_Re6JZmAJ29eeJzPWQhVB_ei87jqWrauGzbQzhn6xh6JzORdiF5izujquosdfAl8vlTcCEE3ELYD60FFs2P2A9uSlZ0NDxAoQ3QZjPsHwAnl48ziPte8TJ3OrGAO7iIeoejtr5OFJMS_uQxKiwb7iEqdE-RpGN5vvMhxcd3sJsXN4KEzytLyRCPUhTBsK8YqdD5givRUGs_HP46OudrxqqrbZep_J-LcU9EWgDV69nC2T1CbURB2mLWdAr8I3rVvZExO-DQsF0nvgE-4Xe7rgtfXuVydfMDWUny_kOt7pWrnkWH5MtJ4QhIpIcsR6Z8jdFDNNGH-JTA1KpNoIykM1nSClfkgYkjxBEP_-Q7DyoNn7YJFVQqpxUxSU_HDTXycNLl_zAS6eEdrux_GtPmaJqeytcxSDJrMRowjbq_yPel_o1nuwhJ6OSuNI4Hct7813GMtmjJZUX6R5Y1Me5n5TXmjFXxUJwmC9mdb5ucdo4mbFl9zbjtVRJgplpCIz7xiAPjoSVamuIC9aqtlYOfJ_7YBMT_o5euSEjOBjH-PcAPhzJZ4NNGdNUvBJMYuKgKb6s5Zgqy1px7SMwLr4TIBU2M_VAa3XudN1tCGD-cCAGAGZaKa9MU"} ================================================ FILE: genesis_data/genesis_txs/dv28G4IsYul7liWrycsx4UKSYHA4sWUY6xFQzRPi4p4.json ================================================ {"id":"dv28G4IsYul7liWrycsx4UKSYHA4sWUY6xFQzRPi4p4","last_tx":"","owner":"ts-hYwVlfOU0H3oH5b7886u7V4coQTCL5gNvuGPREW7rYcc-C0lyn1Ma1k-x-RZ15BALdvzBIc0XEWS_ytpLRbfdj_TAtIdEOrVAX8Tq7KkF5-dH-6fBwCt0zkRahKZA4RUkStNu_NYSIpeH2bk2jtsL_hRqfYmhcv5LTTfYv2Te4VUTL-YrCvrw0c241DdgHyeAQlDAMYgkBlZLLd5b585ia7NQ7ksAvdUy4fETR_agX_mJY7squbbMqdl2YeKjvQxM-wGO_K-p5YVdU3kFW_6uSYuIFaMlnpkgawYb9zHALBLGYVL99ZjUOQ75I2pustofx_OKAzBIa4_YunJcpAWFCa63Nlsh6F0Cm8Lxy31m0JsD10zQshVA5f_RA0s6YmA2bo9KpL4T-q57PsDuzgAOAMNiIhePW_71oeD2uUNYfB4wDkKnknsdRLMadYzmm8l8QCPK_-FfY6vkaYy1s7A-11cxOClerCfua2D52gW_zDJ8DJB7HiYkt6wGIYW2zaYiG3P2JKdM_dcWLLdnhmwVFQqWTZKIs4bF399h3RhrRSpX15ZegQwFOHz11pKuVqQlUsm5B-b0vh3CPYqV-0TyrfenWnivyCR88oI-0BuogpGHeZlAH8ErN_iI7tq79jLIr1lVNmzqLMQAPS1aBpGYkl5fxYvAJ_j2XAij6yc","tags":[],"target":"","quantity":"0","data":"VGhlIHNhbmRzIG9mIHRpbWUgdGhhdCBzbG93bHkgZmxvdyB0aHJvdWdob3V0IG15IGhvdXJnbGFzcw","reward":"0","signature":"cOGtWrOChFpyjqGKka3yLIGud8KgwO-wcIZ8bLokr-tGWb8X-YShuLo4HbgeNIGg_zDs7jHufvCwjAFmyLUZJIHvgJGpRF0S-xmy_H9Y1ND-dZ2hjy_-NcxUJlyhCb1K6_3KpQXaPSJ2CSmhIF8XqtDxZ3zuB7mSkmlu4Bd_EkAPyag22VfJAKGSN0z1ncKXrgFnZig78ZPicBlLeN3TI1B-xQ0p6ftEcgNVeL4ZZ33unMR1k51-G-wz77LbOEfwyRopN9q6kPPhw92ien_d_SHU0mm6OrPPt4W61BVYSbC-khPG-JC_g2MgejcMkKrS9k93AkPeNs8wQAWutMwBJSnEZmwCGqPWhMNABfmRoFoS4gsMhpmKafT6Sp8Sl6nGshvH9xNGG2OLkb9aWr8eAdIrq61yn8VeUTskhx-o-3AtJNY1anH93VTw_q7PE64GicligDliRrJECL6drX-BB5mwEejNgt-EJVQqwjs-dIBPlPMGG89gHJ2V_ZB_rPnUMjggap4EZCzkR3xNRMf01L_XdA43Hi5dA5S9OalC5lhkTC_TU9NPVBz71jWf72HD7MQUr8J2tJ4KOXv6m-VNu6EdBlOLkdUT9NkPqAYbkqCG7Y8TEZEBRc0OrZ2mOnxVgFpQPmtv6TRJKz6-x8Dw1k9rTk8gFH9OySC_gNeiCFc"} ================================================ FILE: genesis_data/genesis_txs/eGhF0za2qN5WuadlVZ1iak1S5LxXswHRzIa3j_P-sUM.json ================================================ {"id":"eGhF0za2qN5WuadlVZ1iak1S5LxXswHRzIa3j_P-sUM","last_tx":"","owner":"uAXuVFIvakVIKv5tU97_R5zd6lS7BApi7WEdlULOOkpkzS8sV-x9siDTqf2355fm6u4CKiIHtpQoj7icbTouNaxs4wyW_T6iyZL7L4hubWvMzlZJf6_eYtSIrXyhdnzXn-iMpax7TSn66WPwikBQbUr6zGI2xjhxKamb64eCy5exNsSwbxbdazvNGLSGuB0xQw5hI5-gU3-qdJ52U7vFTJeRTcZ1RRIH4-CQvFw0XIf9WFQklLquiT-6RyT9AYlQfb2gSHqpTE1St95so_-l3dF_mKQXBp-Anq1oxBLqwhe_4InnPpZR1_0mCG2RDGPjINrIiZNxGmP91W7ALf_dXgKqPZru6TX-IigYY8OkGBIyoSV8U6rGbPoRkmJTvNg2q_ClF_yXgxRYMaVDYUc_BWFBG5NygIWowVC70j_4KqFaRXOHcGHM0a8QCnja3IMxrnT8dfrKQAs7kCVK9ha6AAgDIHOnVZ5T_beUr9aFgJWu7eHmdYG6xtULzqV1pRi-3jnBWDvPjZ5kmLeSY_EE2aGCIU7VcoGeerQyXaGNvIfkGOIl0-VZ5h_YETybI-VmIUib_LOw7q3Ozqerk_TqyACc1Lk03VMbf7nBy30DXtg-M9JnAufwT-0nRDZEcnw5mbnW0lrJx-Z1-YWevy3Wjmyjf3Q1MsmL7MmJmJP8deU","tags":[],"target":"","quantity":"0","data":"QWxlZSBpbnZlc3QgQVJD","reward":"0","signature":"N0ey0FWgBesQAxptPEoaKPNkoFc6RpiRhqskE86r4fklZt21msGV90eanSUIISOrT4AJx4DxxnERI6xVbJiFhp176dQMhfyEHN1iR6nYdMi1J-AQFhcCOqH7065iBszafbVQ9RRUH5wMx4-8Lm87eB2UWIdFF7MJzjZ23H4JX3Sg4tvXMboVBDlN99YfPB4UdgnMWNyoEJ4sdfBpfuuXG_cIGa1HOKkendengh4GyFEbGJp9d7L8srgKFnIU3_xz2uebNHqfMW_-pPpKdm9dfuYXLjGhtv53fwqY9K2gBSoXUOc-5PQpqVgn2QeNqgQwJWJNWElbbZvxAslVZtnvs_xagln53y_d6t0IWzKqwkudkOLerVKDkU02Xd_OMnG613dAej-5a1ZgmrCQpJSIW47HuntDT7g5pM66p_-Zg1reHs4PCUYMMWhsXn0s5sBS5QW3Eo9EJGEQOUPxiAzExjzLMJ95XvKBFD7_G0o4KMt4R0A8sFNZyGovFhDpjTtkU7gsCsgTbX7wfUFfvG9k4GAYi4LJ4P_FCRDYXu-SFOrsViWxw8jhjIArNWub_qOksJIGyUJ5hRptR6nYXjo8gvpVe664SMGc5zbIYxpYBTGH79Nth9QGa47MEZw9Lo_5I_bvA8vDXS1EvQVS086cGfQoZ2ssayo86GTYZnipyU0"} ================================================ FILE: genesis_data/genesis_txs/eJ2aSQ4nm-i8XAZW2pcRq6GoEjW9K8EBM6w7rLiuSHw.json ================================================ {"id":"eJ2aSQ4nm-i8XAZW2pcRq6GoEjW9K8EBM6w7rLiuSHw","last_tx":"","owner":"v3NRJ8Qnq6iGQqfFL0cedsjLN-sFJjgMK8V8IAX1jk8bl_W64TGWnBCwvGM9vz_mohEOEGv8pB4fn57yXanXvLZkIbROF01CC_OmmXnqkyaaqRoQ2OLKeKuDUd1s4XiW0Dbt0QhaCS_CkSiQFWKhPVLDPLsrhna0c-lvCqJDRnc7qTOLBYQ3JZt5kZXNXQ-YhK2_rDONyAn7Yex-ExLIFAvsU9M0SLkXaZGPfwX7M1y3Ee2PSeA1ErRbPPVSe1UqE4FEIeT19vw5repfflU6QJtA9fDvNRSr0bw7mpmSLmVYos8-UoTDjl2JANBOHpvFklW7RWC0tAR22pSLSxxgK9R71ZagN3-S2hT1gqkJNykc7rLV6DFr0Zr4oGAr9c8N1uW5N1_OqXx0s4edzefwjKZqKlw7P2raDFzhJ1-P7Cww0KYYOX5xHaBpOwVVaO9JlAcBgjxpP86anKhtGHqaZR8uFzgXScBnmcmTSSZHFHqm9PaByZs0B5wgLAjHL1BdGTDckCDn9r7LCaNCPwTYuQfc1-pGu2eNAkdCssLVsJwK3ic1F93xRwGC3HoknlVumrivtCkTu9ywStW7ICZQAgSWJAPeLHK_Y2Fbq3jbmRHfm88QWljjMwDcWtrEUNk18romTaJhLl54zeMmOHuGg9UrYJBn_7pSDnl4hxDRHEU","tags":[],"target":"","quantity":"0","data":"Z3JlYWQ","reward":"0","signature":"mRSL8_vH1Llku7HhEPkiI9JLe3r394nYVDlMpH4om1Wli8vn5M_MdIoUIRxFIk7y6DOT4XUg3_JVIX5wb9qgax48U6bGEhW9o74ZOUDh98uy1uOIYUcgEQAWRn-CeCD671pMXuWzRPeqAG1WBk-1FcJ9ddVMfNPVYyddN5tp_WxzYff2QDlmnczIm9ApQRRAmITZXPNLTeij5PKGYIH9G0FSFkSzyD4DBgekBVZ4w_6xVaRLJIZqqL3rOodtZ7wc3yEBHY9fs-Pd4Vylo9SSRA9l5gvio4R4hw4DR4VZXVUihb6yQ5qMyErjNAr9MRuUyzDy-rCXVsCYqGVEqon6Ef1Dn_uuwDq0BQ9AShwbcuM_X69drDvWsXQP9mUVwiY5g-4mn3EdYvW3x-OLrrjqWWjrNLX-qaEml80WDYC2JCnqRv9Qgle7r_Z7sgMmCRfBLBD90nRA7NE51N1qRrBH0uqxjcpOny_7RYVzhpaR-HnrbWJLJJje9tC1_z3vgU1QIPy1fCL9vokPVgqCZIga8JyA16Y7Ff0EqImNrpZm2Ko8oRZSzA5Ory-xQ6dlxjCwzDRJvzgHBf6tG4PD2_J26HWelBQSjHo8BWc9PXKwUtPIrrYZISeTYLOuVa1k4aKErFoQI20bXCF9o4AYNJp7qX2uCuhW0i1nVazYxqAfXo0"} ================================================ FILE: genesis_data/genesis_txs/efqI0eDfp0OcYB-Ms5ELukIUr8-qtlX7Ica-ikhVZLU.json ================================================ {"id":"efqI0eDfp0OcYB-Ms5ELukIUr8-qtlX7Ica-ikhVZLU","last_tx":"","owner":"7d4t1cSZkIvKy-77I6X6MqDe-S8qat8BA3osD-2K-pAoQimJyDMMKrq6PAUR0R0ssOwLdR7nwwK2ZNITS584wtI4j353ehfnWwbIgTB8LPitw6KvsQs0ErLqpC87Gslc8nw3hEg8nqg7b1HpRiRFcA3gl4hiG1B_6bYYGE9tfJKZPKh9GzflnwFY0uNf-3qZGAMi2xSHHX20IqDdhuYlHu9KD3ENVAM-1lhZuVIuUUZ-eXEAqdqq9vkwwoyItDhZz5UdPL-pvCwv2GYzTCCBJm8h3Moz8pz23RNKgq_BxyQeeh2huPZMDInAqoqcIYfzuPv6KdxulFsqClmjsz01opbIwIaehkGTJCYuNLQpqdox8cjFcX4vTWxeeVqQjiDMl9rSuYLftVZZqDPHbMXa3M8kC4JEwyItnaRtAt-XDyHZFtQwMoEyuSYaE4t8gk6OG64F6z0Rsx59vCOxDlCmBS4P9jcZJ8O6iaGV1E9fvMCbyOZs21W74vBomb_lMS3NjJzVl6rZLfD49FY8OJ3C8itCLlvTCjzzmJl7xsS8tenxIGQWIRzOK1NCozxRXtTbt9UGi__15JH2cqLWDGb6YH72bG7IKDpabZsw3FcRd0L1ozHyUNxn4vVq-tH-TDS67xsZ0ljVbN4MGQuyDj2N17ksy9B-xTpNJ5s94tGvklM","tags":[],"target":"","quantity":"0","data":"VGhlbiBhbmQgdGhlcmUgaGVyZSBhbmQgbm93LiBEaXNzb2x2ZSBpbnRvIGNvc21pYyBvbmVuZXNzLg","reward":"0","signature":"7ERKBQtL6zCAQl_xPlfJCFPHuuA1TT4vroH2-eAxnbdAD59X50OIOmiCMSoQxYKc5HJgHWZEy3t04GRLUBHMAN6bkk-4G9S3iqiWWWvXRtvdzX9SjGH4g0rDt1P9reUf3CkfoZfI2Az8UOzWk-oSkjv7uP1NIqhr3ke-W0JAl94INFpCIsHv07EHyVhC_1lRChxPT_YKWHH_h1kUrBmqaV_av9rNPiJBLJ-FCurtRt26oketujemgf6WR0d--w1CfPW2Nq2YYvYvjCFmcKamJLA_fVNcRoN4FgANc8iY52BzVMF6FMnKo2QovBOSx-mgP9yJ0FaCmk3EUV7Uu-OGl3x5GAilkJWyGAHmtm62caCoAeIT14y_fUc8qz-XkfBaNJqZxdqo0uGsTjF-7ZyEyG0RWrFQeA0wuhmN-wCyAj7ZtJLH-q700iMunEc6-z5qKCBC0ewux9j6DvtIfzRsrtkY4KMQU79TADrdeju-Z3SanFyONHYRGH0A6UY61qnxDWv5620bmi8LlWsbEXZfoSrM0NdcayU3eS7fBIlwTwM5PMldwWWGnl_VQLndj6BdHdTMjLwSVAILBna-1rA0RREud_d0KfTYGUqnTVXt_hazV2ak4Gy6IoKHFACQ_Z-4qYuWyId_2l1kupQTm_XMWDcFZ8dCNoOXhSkBoKV91oc"} ================================================ FILE: genesis_data/genesis_txs/ez-ItWkyBvBZ6J7_Mobrpqc9RTp6I2JBmkPDV_xCQVY.json ================================================ {"id":"ez-ItWkyBvBZ6J7_Mobrpqc9RTp6I2JBmkPDV_xCQVY","last_tx":"","owner":"1YBf0ANqOBDFEKhWZw1k2u7OUElohaCWl3EpvlpKH6_3IL05H576YbSXNsPBJe35PsWZ3rGK9ffGtuA2XP1g2nFDI376Ob6ihYRVKUduvujNGi3vPOte_cJzWQXT-6o5QcOOq89S96wyFJOu-MEHcvuzm_8aXm0LMgXkSY51RXTCZgStsNBBZKyZcKwovHoE06xE1nO6teR7VinbI2JVTqNh7MfJJmCXiWR7eTPLGVwoon5iv_1TTgRM8d-pa-NRPpQvkxL6JJJw__j6_qL5G2RlAB87bdkHKxot-3K7QFPYgVDY5a5TjZ0FLaiYbSiouaGUPBWQTpIspjqjBiMap_UAb7QGuZuWbhNZrLyCvC6iiJe2kSF5xqC7R9OGcC130MnI-MrnoioWY7UofoL-42LAOtRtYFUsDkQ3HRlfcrXCuiX4lC07R_1JB8efsuEyssgfkAUdPG4o9Lrpaul4c_zGJsd0Dcdt13eR066Nuv9MJBb9Sx7m4GvvI1KiGzW-MMPsQ4nGbU89ps-gJtjaHnlddRyqxyhjq-JtOrPrZjuCg8DYW9jB4j8rasp_mhxUgGQzA5EydGx0D55TIJOi8DDpCaNGKrcmraTkRNd02sbD_A_IMCrtTBkyFXlZNXaMmEnWpH95SPMq2wZv81k44SuAk6QJqemGUTd7Y62y0pk","tags":[],"target":"","quantity":"0","data":"RmluaXMgY29yb25hdCBvcHVzIQ","reward":"0","signature":"INhORVY5sn09ba05kggTiVN8qqo1uD9wpJ6yt8Euc9jcHq5aIhN8KpWqTZj-WdXC53k0G00iGRQBnu_2XhaZM5wmxIhDV7-ugdpEv6aq1Jev7p8Bh0-5Eh7ivRCnt-_uYJ-TbaUFJLafksm8wP0qLscK8JwpAbhP4Mm9XRIj_xc1qHaaV64i0Rpl9XmkCZOzsY9lrtIJFqi-7zP43XkDzDjQJ3C6px0J0_hS0LScbT8NCBPDeDl2GViUu5WlP7bJknXE9wjNH_4dGrumy5HT-x3drcSGkM4K9kM8-lIYv4tNjM3F9ynSXBJy-UCB7kSbT6O3ym983N5wHgHioUrBbixnLzI3Ki9ducr_Jj1uR27g6KyMazmQZUsgP6M2Jrpzie7lMURD9faCaH1GsL39mns1eNCwD0jzx65HcaVDH_iERTYkxMMkK0f3jejpIVMve4y6M1Fjcb3FppVy1y6d1AITWY_77CqFUsjX-CYRT0YlTnj8hQuOkhJds5o60syA0cYDfVQXfGNQVNA--CAhajdNXCgEZ7iAYCBWgr4oxCdlGNNHFlYMwtW7r2XBw-3F-rzuH6clwJY3sDAHbMxNehYhCw5f4MIy9wytAEpVM73wqIPru9QAWwdiy6old-sF_-5g7wQQZP4chFpTgty0rU7OUvCjIMCF-JPUeRDmNhs"} ================================================ FILE: genesis_data/genesis_txs/f3jE7NK419FZzwkx9VjTkrcX5FEgl2Ky3KSK0vH-wj0.json ================================================ {"id":"f3jE7NK419FZzwkx9VjTkrcX5FEgl2Ky3KSK0vH-wj0","last_tx":"","owner":"tqYaS_oanm5iwKkIxZ-ryNiZ6kZu8Q20b-6rua24BXn1U1T0ygDbGZ-rnAGR-SZDFbR8dxx7tWU_nZKYhhWjIebhPFMTA7YARnW7gC2FQa4SCF4iDWur1mFl1ASEScWwP4YbFtzU-lcxQTMr0_F8P8j0bRoRbh2lIdcGN6qj-kpzXcNvRxqziE3Q3wKFevk2ScppzSwFtVKXUbPNxvJ-pmuHBrjgZtY7IpXvJNlXPNy2EKeKUKz5so-B_Bw4b37U79bIF9nlJfeVGThwk-sELWoqSd7rumTrL7zkHIQyLtvLQHpzbNRo18ad410dCEQzxInMPG3Bu2fypqYzuxs6YqIXYU-fa8RZMsr3vxixjC42vGpL_wmObkqHGvLdMsMuzTlfWumSJrVvYhuN05Y-f96PP8hH2H2WgqnXNFURQ1KzKm8WoDJ5z88Ul6acgivqqEO8cLH8FZpUohtku5yieofKOpQTD4k1eQt6A12R5Tpa_FNnCNT_A2uHq1T1eGKpYKUWY2YsSwwig3QGU2Bk-TiMPRISNxPb7cjYWqNki2F5RmFMmafJ8n4krCX1g_XO_59hIfFzKfmay_Mt20g_7kNQ4VQxufMRDMCohCUL8vncUX0n-4nPe8GgVJVKNQzjCZGOV8hM2Omj389M8yFpkmhjlz7wtqtND52gIYVcfuc","tags":[],"target":"","quantity":"0","data":"SnVsaW8gQ2VzYXIgU2FhdmVkcmE","reward":"0","signature":"X5NZXWk7PpwhpunlhmrNO0-aAzQW3fii9TN5yfz2I41G2ZKTRXPISDmD5sSvfGp3cL8Hb4Gmehkc7JP0-om_UWA35xxm3cWt6M-XA0kekAjUPHIC7bgjimYjqDGG4QSAziDQqbNwhXV7Ux-2bv0Zva-1fWw47gTMoaNrWrIx5srhP_hD-1JlAPL519oinRnr-H1XvpC7vc6_18CP4AgXDqpFCTXqe8MgDo6FafKLabqB9lNCM53aUUipBGlLYwvQc3VeaqWUzUPA5gU1yPSNaEpqkycVWQwmtGyr-zSnr2LcQxExnEXnDa4vByLT4C9oug7sL7xIbOtMpmXZAhk2Q96J6WUF-xmvZhuke-gHo02OkGSZ3lTlhwXAdSwqSU-Z4jwWbBhs9yCGSJvxavMgRzr2uRkLji4G7HDCXbapOwhw0LC3gmzTegAMiGMrJx4UjNjb8aX1-37-sn-NwMWtUAI5QzSSub_3Pkn_xWIGf2vOo_b1du9bQdxwb4SkhhbXqkopIalbEGjVSkVPL8nJyPvTafVWAxwKq9wlPUKaWz2zE0w3gb3AGYW458pekskp2JqjuiY1wYawA9515SoaUZjEwlN1HDGeHNuTB-z9b9uoXKVO2cK1v4SJl2e1rkux_BD-vE_Of6EDmzbyMk7-lZRptCLB84ZLlQvhqlFtkXY"} ================================================ FILE: genesis_data/genesis_txs/f6MY8LMCwGbKZqXd4dkCROQK0qFMjS5OJAbZq-UhMGA.json ================================================ {"id":"f6MY8LMCwGbKZqXd4dkCROQK0qFMjS5OJAbZq-UhMGA","last_tx":"","owner":"w_COBAsbcqsrULrKdXRu-Pe6jeIH0bJz49CBbHpZ2M7HccLB8bneFg66RsNt69LC55NGS7ByrtTYQacnrGDicvFia6N9d-mGYb9LQ1OCj6rIlV4Hq5HrVw8XfNApCuYVVssyONJehFVa5bmN7yBtz1w9nrWyAO3gUMpX6sTwMBdY8eYHqqQvKWFa669VnbWCwNOOY5sN_P7YYVFGftzGWblAAJrc5dxv_tHbRtA3BEQ6CuTH8sC2vccYedjXxioz1RDV6KR6R6779k2SUdsiJbsLmhgPN07geAmauvNdocbXamUs7on8Y0nTLRrIzo9pnHNZScuTtgqAu6PIDwXPKSSy2XXqzmeLRDNbgfpZnSrjlBf2QzXD7ic9xTCX6kzBAwPX4s25qBZeavNxBLY8mr-WH7cXnAEn7yuLUunI4LcZV_8rrZ--wnH1ehUjC6WI88FNNlAqJHcxHaScq3e20fMc9l45rhvLuXysspS6e0ANmKU2KDSh8B7YzhGzXBe5gVwkJQAkKilYVdUN-NeZlVC5xWtfT3KdC_GJg30NmoYci26aPDLL3L0nJtHz-iOZ59lf_8vsewRbeIx5ZnkYDt1HM1ZGDk0GM1oRz-4gfrG5L-J82dVeagbUcAIcpgZPlC5uFFmUBN3Y5ij716xuyS9pGTg8_unaCM8ZcUw_aa0","tags":[],"target":"","quantity":"0","data":"TVIgSiB3YXMgaGVyZQ","reward":"0","signature":"RkkJk0PokocPcPRhPpuASN-ncxW4ZoKvjZqtygK3ObCR2r0I4j29-BGPXQ-GiTeANWTSrx0qx3Njer38TwDNH4p9soN9EV0iyOHSwFw65A4rB9duZJ9254PIHuCvbEdi28YIarOlehHSRY2lJyWQuaJr-ks_wO6-5f5NFdr39XjuA0X3KCqPNSoIOs1g9UWNfDQas7cpbl7yOIllr_vpwONyWYu9b19ikaaXkZuzamQk1Kz7MCjRqmYA5jbq3HK11L7NhESC87aR-1EWgRjibee6MJS6ekIadMpXQ3go7wUSfRScvlUjXzyR1VJkNvZLHnAF98mBzgR8FUdydnkmhZsVhGCxyReEPK_QM2MyNm-_a9qOFPaOceCEwCya8QtIqZjVHIxtEkw7hfeqLTF3P8aqhm1kv4zyCkMMdf6Vvlxbl-hSnjWWLXu8SVX9HwdC_jjuaAB-vXl3gh7oX_-pchYGf-bCNZ0vrzH08JGThVQAL_LkaoHtN1yikp23ckworBJzuu5U7Ba_kJIxlvUIUKHD-n4X0C_Yba5vfGAJnhTMZOfRkmcV9duu_QqKYtb8PYRDDO8bj7QxI1lAU8085eJWF2KonzTz6mtsMclZdYpqdPUjVk_M16Yn_5rmBWiA_wKNHTYm19Ji5LMfaGWTYTOYXAOCFYWN01Bu_6Cnp7A"} ================================================ FILE: genesis_data/genesis_txs/fBVa04p7MEL8BsPpyD_Pwv3uqBnBMVzG9YpXsCwZLtc.json ================================================ {"id":"fBVa04p7MEL8BsPpyD_Pwv3uqBnBMVzG9YpXsCwZLtc","last_tx":"","owner":"trg3fJI7106PWDK9tvPSxzE4YuNwNzPUukzur5MIR90nuhobkf9vYX5yPdO5FgitHgE3SJ00tCBMVsBUj-y30wwfB82iYKNKAmaEHY0RlczAO7fwM2N-z9be0-KPop-TWImGMXllooIX4jABHgD-S33RVxtqd-rkP3gHLVaP4H6saBdT4AvvXlgxRWwpN1jg37in2DqavvkfjaBZ284X4Rtxt5lrHFiwLN-KHYtWaDJymQC7MMbdi6eqilm1qnDwuU0V7n_J6ph9pW0hw2uOHAJt436Z0_In84tp3TbbpPr6tV55n6__UypVnMTMLCaJKGdwHjreHuDqCZedaEHwMuDPNVB4A7o6yHzu-mpvXyWd8htCaWaRCMkLDQql-04zIJyEt2J3qPf3h2TOIatJOQp4tnlJ2NfCwiXP3pUGDyIpbrUWsmdryxkf7xMhBWRyo8N41wN51S74KxQ9tzRh9jvX-X9lpOsSI-cOgPFD5f4PzpbGldBoSrgoCZBpV_V6EFOxYLiuLtGeJ996behTCgg5v0aR2PBvoET7z-tyHVQ_zc26vflFAuBsVOct9b-UPrY4_Y6ocU45p7v0QWdke-fJ10uuAnOnukT0_baudJXmvmK8ufj5rkvQPAkdK6QgPTpzaCkNhrtncdMku_uYFTQBdUoPEMdlHacKxYkArgs","tags":[],"target":"","quantity":"0","data":"dGVzdA","reward":"0","signature":"WPo4XixrCebw0ug7xcC9ZYwndHDF7JIWpqs8_Nioyf4ERbvzgAIIlAMHvi5x2TBl-Hm5nxRzlTzoJ7jiSlukRwF-XbgLrqiT8ws9k2wMsqYrT6aPj945XPp9somwqn9HVbrdFuvRJdKd8akBIORExoLsTDq0sDcHuFFJcUs8njpPh1LPluoaSSBsd0oElFSly-PVKoO1BvUvNNonXU1YHD9AHpjnAf1qEfQhVY5ZsDTDAe5gMeVtS9z7O-3ckYdP0tnUUy_CoaFZm5gnRgSduVDpwwZZ2F9BRsOEqQIi5zjVC8iIdYnaU4MsqNS0tgpTYKtNwtUQkf29OM7Awqemld73K5nhdTW-koY5J0cQM5wJ6-qDxMp7JbV9_J6w5-nqhp7q4C6sq3yKchE8WX86RTT5KRGOfDv-kDmRX2ksPOeXRN1xOrsukZbQK4qpA5utF8lfH2vfzEOs-zUYie_TdtW7q7tqoPPA3V8tabENqnOCPuXUJNahs0jjVa7yebgqlmPav-l-0Tg6NfwewK3gpzYTeqq3oUBav8kI8JdkV02UYJbweTORWH8TIIgJfua7bgs5MCs5gNe4B-JQ286s--eKeTfJv7IO90ixQeRSGXRQSMBwclWB8CuZwCDLmcOYdxLt7-SK8mFmQ7G7BSCWG1xMNqqYVRqKLbyngEFlhjU"} ================================================ FILE: genesis_data/genesis_txs/fkbFeVpiaAOtvt_-M9_U4HzbA8Elh5sa8xJXObrItYM.json ================================================ {"id":"fkbFeVpiaAOtvt_-M9_U4HzbA8Elh5sa8xJXObrItYM","last_tx":"","owner":"rxF0vQiMomNw4kEQRkoHnOIE9xftkrSI4WTfuKbZ_UVOCIi9tiEoz_YFEw3GLGPQuRbvnLHTEDPqoDBEsJS3EjG-dNi_-15eryxaRf2g6psMFN2w4fXVQ7zQnbFuw2rxTXMxVR8tWVgogoX_TRkBBGLsSrCJsU_mjxreuxodIRyS9hhFhm660APKCdaHmm6IPIz2wwfVpWW9qEibcp2LnyclzBOfwKuYi-gNhCb8YhsjGpYFRonbqwtjSZ8nX0h4WBXxvq98CaBXio0afvtaZ5m4A1-WLVBN1VG3_yq8exn3nsE5wVbeLLrgv9qfrstyWqFvtOvJyrN1vEOxhF9oRejYF1D23QAhYIg9TYb4Wj1sBqs97tyXuu5VZN76lBtH9zwqd658PAw-Owkmjotbk82QUPdWD7ProtJONfmj-kg_24InxYbnvmWEnsmAG06s6vZyvCcA1EQjO3H6UOvo98NrWmPL1k-uMLRW9tQAs8siGch96MehqHxIPt0-oxQjnPz_c8X-3nOVqsJMrrCSUj7AQSVQ7QLtLIVIoTnJQa7xppF0Mr-7S9TDquutSqW1fnpsGvvdahp6nPXsiPYSB5fzE2SHhSY-9bqdRn4C567Oci4jzrADgEkHT6-morch4m8D1mHb1yOw2nG0H9Nqs2WdQFAP5Lcwz-abjCz0L5U","tags":[],"target":"","quantity":"0","data":"SW52ZXN0aW5nIGluIEZSRUVET00","reward":"0","signature":"Wd_t4vRp3QYWKRsqVGORt47bsA113e0ZrJhuqLqTT-pbmMmGr9K1SFtSQjPZAbfqcSTURSrINRW20BLdofxinWy-9T5ISo6WEMCTaQYhAu2pTUZPhcjrVlyBiiqMpC7FNaK-fJdAJzt_tRy6h7M-RosUDS8YpvNs1kRKSTBTtl-g-PxLU2LaZoKxi3Pe4iVb6VGX_WYaZqjL3kswtzGLfwmjLGPr06tqbL47QMBa5Y3l3ojwmH49vQJ5ONdryGdsg84q6jM3X_73ByILiI6lUBK4-wg7a4N-R-PMZUYe714mDq4QABI1BfzeQRwMwpym4ZFIGeDm-zLHriltPenvcg9xxnPW_i8g559__XR66NwdM_7IrdU8mGccxbEQJVvLW27ZI3mNsycc3FwXQeNXcOWGuCA67H_T3L3FF4kG9m1nDEtD0w9JJtv9DBb5y3xubgKk7xbJSeh-a6876Y9LJGRxjfnbGWDBpSIgLfZ8wj3c6R9kDwWXb3rdNzSvY4CBHYBVJUO4g7Io_AIguLm2FResDvx2MMuiFkW69-E_9pNO3qAuHK4FlrG_bVx7XcL9F9fF9Lz6wylh-0xf33G4TdgMJUkKMNiYNGZT1LwxgvM-ZwhhrkcbrVFDxfl0DCZhSXrEcm4SJyA6DvXjOI2du-u9a9xeAbEqRaW6STGGQ6c"} ================================================ FILE: genesis_data/genesis_txs/fx1EmDF4yioha3ms_VbddDQjl4bt6pBLpFCESuEIT6E.json ================================================ {"id":"fx1EmDF4yioha3ms_VbddDQjl4bt6pBLpFCESuEIT6E","last_tx":"","owner":"vDxPjreu5LQbJ0YUBX7_FTFgnXCR0fWFRyoDaUeFwTG54zvqv9UAYbQ7vxW_-xXvb4X5z6kItUv5rNipJ-Q4A5IxnxtyrF6Jq0pZ-ygXTreAy3k4hyqHKSyMP09_DIPMFDNfIXqo_di2vVtp7JSSmMwcYix-066HPTJssBvfqKnAQqP6qibSryBw1-e00bON1Ac0jKiubYnjpAH5_hRZO4JssRfyx4MbF0AZRckE30UVPpaxh4NzJaYdwVry39t_0KJOaJHxa8ezvmt5Wws1MRXxVUVToHM3-DhMnb4_Syvv_IK8Nc9WCJGVJ-h5p5us05o1jdtElu-G86RhCEihaQPX6ke9QOjO8w6iRbengEMsTSXAvK1lpIR0mH4IQYKhk34bRh1Sk7jhoXnCM7P98_RbvugVN26HNvwoCxhC9B2rl8PovMDIEdMSUDs4JL6R3BNl0Kfbq3DuKw-7eVMsD1REqoxh3v2RJBDr5yZnUqdi84cyf3iig3SJYuiYClyXHIFcVGoWhBigGxTS9dp6tHehO7Dp_7ahLA8C69fpVIiEt5ZKiMcl6bPPNfBR_g7gzzhk44QMhrotfvbupkTVe_LShU1ATyvdSqUgiX13KDoYVtw4Xi3UPwxt18ycnK2wKJhX05AxZ6TJ5bpnHKH1KDwn3YY30Dj_GQiR7rHlkC0","tags":[],"target":"","quantity":"0","data":"ZXhjaXRpbmcgYW5kIG5ldyB0ZWNobm9sb2d5","reward":"0","signature":"KAoryyJv6Oiv_glyrvr5HJY1cNA7tUnDjkrrMZhqXcu7M8Zxlk2X_IgYsFhrwxyxRQxetiLEEmKXDUPsQBFcMOefr0ZmmyHq-DTQJoJ3Yc9LYbZjXndb769D5PASCvn1ABxj0Gm1YsLgmKqhBSlQidEK39xYuB6TF7l3IYFN9i1cp5OS68gp_LiABpp1WOsgv5NlSpF3WHBwCOWPU_xAZgmjrA14jaWARZjszMb2pUTA7Oi9Fz12RakDCWPmaSpYr0oLZxSjyy83scGut9spBZdY7atU2SvCKAwS7W0sRWz3qE7dbfUcNOn152CSr2soPewK2GPriB3toZKa5XztuMHlDcJL_ZGCnLsnpmNCXdxevwkLDtVHlcC6HMMrIqm19TKeTxMnYKWtgOsmv0HXdblhMHAJRxfFhkfyaQ78YW0OrGMbgMj6QH271B6N4197Z-PHZJt0ZLzyiuAzVAB0B46aD7oaLzjAMqYXLH_qYRB4t0QOCjfCcH_Y1EQEX7yh72D3dtuirO0myz_oGo_FTIYE9ZTwMZAG17G8S7NBvu2yRrXi5eegm2hwvCUswOa0OpGp2hJaF88X_RR7X8yYImIdQscRcJRkot4zD2l263MufDurNupR8RSGj7k2ydzeFiO-8702MQWRkD3RsffXVPBrYHdZ2PXCo_McbFHDH38"} ================================================ FILE: genesis_data/genesis_txs/g19-Tkf4xuM9golcjx0mA1RkJUYocQJ3uYnH8MU1ePs.json ================================================ {"id":"g19-Tkf4xuM9golcjx0mA1RkJUYocQJ3uYnH8MU1ePs","last_tx":"","owner":"1QzUjNEgwnoNA4csdhZ4Pl1ZGTFORWnUFgyliNk1uI3aKUH7Lgx_YAPla810B2JPq3UaVgEKKySL22cmv6elr4gK7z3lII0jITIeoSzyer3YaJhkPX5Z2VMOXqbgEBQevT-LrvA9jl17F-4D15tUPM8qdqDEq1jNikeyJiv-eKc62eEj3gpcQeajfttYGqfgDVloai2GQ5LaCaCyYjzoaADJ_VtgxjONtC7VckASu67JRwyOW39KUfH55r6cxssYUqRBrszG9Nx6CY3ja85u5vIZCVqB53IxVSd43oS_ZmzBDZeIq_0vuX_aeSK40ou7q2pBlfR5zo4CKxSb0IWslwhQM7DbmYq_aB-Tj3P8umBwr2ELxrKL24EIJLlAD-0zCrVEwLnQPnggMqB353O9dpDb6rDhvf60liKJzgvrC-k7KAmSnxvIkcQB1eWIH5-sYl7Gfrl1kD_VdmwED-gPM_Gsla2RoLfzjLMPHnpdaFdCphGYwwW7htMyhICw8bMSlMmUxseOrA4oH-R076SzYpLERO3W3NoHc2mfzgEzea9A7e--LS9R-y45onjzB8F5Xv2hgdOZB01K6qw1C-QZcNj-4DoINJQ-cWKMSk4mIxuhA5A6rEjF2gljX5uneL3Uf_ijtpEPQyR1EwptqkwTuBoFa-Glm4yzS65oZJV-KZU","tags":[],"target":"","quantity":"0","data":"SGV5LiBTZWUgeW91IHdoZW4gdGhlIHByaWNlIGhpdHMgMTAwMCQgcGVyIHRva2VuIDop","reward":"0","signature":"NQskYCaILRi0lPSD2IsDmNTV5IEkDl99VhLv4SxwP_UFzVI7kdYn2rdeYO2toTYuh-JUfk5y10r6OjTKuGU-puSp7XBfYrYEggvZr6cDNDl2E9UUJyB0xY50SVOU87Lw8idQdJkmGb-mf0ftLy8BvUEmO7nvPZk9OzoDp2wiIU-e8Y5PM-YuemAFebpBI7jyeGsma-FMPKICIFAcumVOXrKikisFRTJkbu52LpRoZDZK9tiDcADA3LJ8A8DZGVYLICyvWIdtlUJPNIv0vTkgSFMF8orpiCyeAqPATlXykFTmTnc-y4WhhffRxx1BQ1CxHPoPKjwFvZrKmZTF9o2Jd043MjLISFrboh_0l4GLIB4NrPKkNST9yQk6gSmPtH_np8QW5gLpA_5CcQw_R_EE2iJkZ4Pp1-pwbq8Sz4sPd80JvB_MbcHqOyBsIKK9cmonTClklYnV2OOnVxVsNhSSt07uiI0lZG9aosER2Q1xf5P_JYy8YvuDScifekW_Si_EIzINb_vXFECqt0U0oRy-wNUiLRCNiSkLwypxbQZyZp3ie7k5yvcZ1hEkol_ULns9kbqrxSLekWlRFykW09zakL7-N_w9LRbfPnBjoftG-btP5PHGWIKON0S4S2kx5SX6wSJ8rgBO81jDLnzGiVFICpgkZnkmAz5_cKxfTsR3Gxc"} ================================================ FILE: genesis_data/genesis_txs/g6TUtTIi_rwlAHNuO6ACsQqIChWACugTPmZxaaJltDM.json ================================================ {"id":"g6TUtTIi_rwlAHNuO6ACsQqIChWACugTPmZxaaJltDM","last_tx":"","owner":"x-62w7g2yKACOgP_d04bhG8IX-AWgPrxHl2JgZBDdNLfAsidiiAaoIZPeM8K5gGvl7-8QVk79YV4OC878Ey0gXi7Atj5BouRyXnFMjJcPVXVyBoYCBuG7rJDDmh4_Ilon6vVOuHVIZ47Vb0tcgsxgxdvVFC2mn9N_SBl23pbeICNJZYOH57kf36gicuV_IwYSdqlQ0HQ_psjmg8EFqO7xzvAMP5HKW3rqTrYZxbCew2FkM734ysWckT39TpDBPx3HrFOl6obUdQWkHNOeKyzcsKFDywNgVWZOb89CYU7JFYlwX20io39ZZv0UJUOEFNjtVHkT_s0_A2O9PltsrZLLlQXZUuYASdbAPD2g_qXfhmPBZ0SXPWCDY-UVwVN1ncwYmk1F_i35IA8kAKsajaltD2wWDQn9g5mgJAWWn2xhLqkbwGbdwQMRD0-0eeuy1uzCooJQCC_bPJksoqkYwB9SGOjkayf4r4oZ2QDY4FicCsswz4Od_gud30ZWyHjWgqGzSFYFzawDBS1Gr_nu_q5otFrv20ZGTxYqGsLHWq4VHs6KjsQvzgBjfyb0etqHQEPJJmbQmY3LSogR4bxdReUHhj2EK9xIB-RKzDvDdL7fT5K0V9MjbnC2uktA0VjLlvwJ64_RhbQhxdp_zR39r-zyCXT-brPEYW1-V7Ey9K3XUE","tags":[],"target":"","quantity":"0","data":"QGJlbmpicmFuZGFsbA","reward":"0","signature":"d-pie5-K8WoB56De7jU0nKID67zs9NrSFh68Q8qZMWewNFo2mUBeGa24TTQappxeik_8ivfqVu4IPRVVc3wvPkYNaS26RQU91EVxtx_Qucx4YFdSOMPktIhDTxlRwFrPWcZu80vBZE9nqVik3552_UoK78SC-7fdvQ0qAN44qDVG9dqU0aukXo__gxEzL56zVOiZf_6ENhErRw3Y7rwv4wQcBS607hNsI9Zg0HsHH9Khz8Dh3eiUI0hKtruzrIyQYqnIJ_rq6jd6uXGYpTyAqNY9FV-9PqiWpGZvBbDadEXBq5HXPx5rPF4hFgw30xsbh6Pf7g044a4E-JKf5MhFd-mb05jdL_LhbeujBx_K33frbfFW4W0H_mTj2ONc_djIzvXagGr2t0J_ZE8fK4gfH7RXZqkgz61E4zlABofPYCqah2JHHfo4cwt4fCpjrqneMyEYHiU2U7S2_Kq-Rjms5HI6xpv7ryP2D67VCRcIFM4Ep2k4lVI2XR6kJU8WvUBdTqMyraTyN0tSFBBnyiTSWTfmuzm2rPvuLkzc4wc9HLS91eC2eZ5f5VoW3Z4gcQihfHAM01x-8CrjWnke3HiTkx1YDBc-nd0SeiyC8n6oyuLepP0ZCtewp1Ag6_Zku8GxZsRERS2zOcvvYXcs6_j6tkw4hs4_GFb-IZ7J4ur8H_M"} ================================================ FILE: genesis_data/genesis_txs/g8ZQaQTNUbg-jGeE61og18FrGqpFeZxjFDypGuhT7zI.json ================================================ {"id":"g8ZQaQTNUbg-jGeE61og18FrGqpFeZxjFDypGuhT7zI","last_tx":"","owner":"mAI2Z_Z5PyLh6QUJalPyu2jXMVVfJS6vOYp3hpGE9xnC73Mp-O2byDEcHJ5QUztESpvwYX6RMkW1d8RoWPZGEmVUYMF_g5Gj_5RZqSIKBC3ga4GFRjaA-Nz7LbCwnVr4nuKjysDEZj3MfHkWhjYILj69pQli5Ka5KI1LRCfpYCdsinL6SMZ12aNPzFybW27b3bR7aqP6SDV_PKMD0WIbMHlBXQ-eY5RdkMX8xCNVI8o3uHoie1kglJbRV_bo9NELODY4LXFZmcsoRTLEMB-x2jkiBBQFc_PCnFnlGIR-RLuLtSCHhTrOUugYWC0GhQAEhgoIbH-AhKR5ZzafMSCo8wiAwjkKCn1nMRNK-CM9tUAgiRXkMgZ_NRs0fLmg9BO8b8HHmqb-RalrgtvhCkuWRwe3Ga1T2amwUe6m2Cu1wWDYr2hJmrPAt_AoMmz4j6DrqNFKkLQ-irByOtbcggnQE4pjF5rfyvbW2ODdlDV-oJme15yLIgtEgu8CjGSUnSY5dEm_huz5fuxOaoN1k-xkvhoE-kxVj9v4TAgBYLdP1NvtVYhiyn5Sus7u76R7uXAO_rhUdlqrw3UjBo6LeXsZQRxJ_jEsKUNvUdVmRVLXpCQ8_S6ycg35x2W3pnPiXeduQT2FtbsY28FEcKJ3CAUC1dT9EpohxJ3NE_S55traEhU","tags":[],"target":"","quantity":"0","data":"R29vZCBsdWNr","reward":"0","signature":"NJqgd-w_3A9gfaBpHI9r_ultiYgJr5F-J0naITINUR0OnUIiblZmMmVnFg4xhiU3_KbEygv7UtOr3l411Le1INJEnlpO4eEJFJX4ue6RWf8XL5ks3SCFalZDfICD7amG9Ms4gMaVli_qFIjAODMS1hLE6jkPp2AeQB8KGS9rcBPkIWPiUJg7GV1qTDPQGEhVGk9Dt5l4J0R9j_OJ5WebWYcdSWwOCVJj8lnD068LA0SUkOBID-lsBmX29rvHS1rZCUocrQX7hpJH179GIyPiPCwm3vmCGnAOv4PTQze_Uxf2RSSAj6DBeLcyJHR-kQ5kT5UGmQNeWZP6XC4LyfB6SYM1AKNVNNKSCAhLQ6iI6QVub_XTweBiDtDV4y1YYd0wXIMZJD39CnF0np5fs12B0IVV5xdVlYDyxEibDexESB_QlrM6XCiZ-xosxqnOb8jgkQfdPivIqJ5ZM2vqcCf0mE74j7TIbLXtjopDH5dlhOAX2rjBeB3H4fBP9aVHycN5ikE-aD-bk3t_tsmfunkbQa4NOqKYyt24AkiR1dG1TmlnXf80nUX3YQ9W_-Uw-F54cF4fb9jCt8oACwiHkAXmosdeAiqhw_TL8LttlDSjv7gbCcfI6tUxUGvpjva8RR4QfjnasQGsSFfYyhcIdSE5v7pSMMXK9808s57c3z7XMzM"} ================================================ FILE: genesis_data/genesis_txs/gE-2fjp2ncJ0ZRg12UBfqnCBb75OtAOksEX3wGZguqw.json ================================================ {"id":"gE-2fjp2ncJ0ZRg12UBfqnCBb75OtAOksEX3wGZguqw","last_tx":"","owner":"1yQvG7vhlS7v6slHSQBy_2XjQn6OajpuPHltYsfKm2FO4R_RJ5ugy4_ioHOLhHzjPH64WYHlJuH63Gyy7kyodxRmtyIy8sl6VCcrEUIbjMBiGt33UFlcdj9ZlWJnFth3qUV618iSI17az9QDObK7H_NqCmH_Yq9yR8jarKWtlQcDae2GFRlucNZckwJMZuTWy2bQiihY5MpLduFTqBV2ovd-S3uxCbigo_v136tTfBazHkrFzBMBLdpJfgNG0hm9TRehExxrmGw_bacUF1SMgvEOSRwBxyWBo7kDid_sqCj2N6zX2-GOHsUx1tXIAPoJWg0BFj0ZdJjJ8UZ-4g6KsUWwan9BcAKN3XoIycRl5-3XEjNusmEy_vMhiVv_21t9pVDcSKUcA355c9wwET8JgR16eV4bXHv2ZUNaj-WkA2_Kc229wUjIBSSX8pDVtlruL845BkGlaX-6HpRsdlO1KXCWEq4YzU3NuRIapj-0B4L7nvJyId3FhDVR5m4ewsaNDiVI79McXVIfPl9KSy6R7Tv3TZ7OzCJIfL0q9qK243F7GW4mrHKaqMQcDt2_QY_rD8MaxR-GKL23V00Zj2Pdb8T6uKWsCJsBPYw-9sv2ET8Jt0jKUyc48PdjOswsMJfWExMriFuNH9sZtu5EOKY4qtmSWmvJsQBAulIR1aQx6SU","tags":[],"target":"","quantity":"0","data":"R29vZGx1Y2sh","reward":"0","signature":"uYpb_3zQUnIFeoR-1u7PUK4GgMn7CM-RoPkxQwSu5SwVmCTGIVPXqFpxlIXv1zMKxMiopw5IspH6gmF-LlDEOtF9y63qOhH5_XypSrLbMJuSDEDG_K3a6k_8NIGag3CfNrr-Vlqq7F2mm1eLDVKGnp00rpjAshZKFn5_uLBaSjJgxm-MjPbS9d-o9ae71D8xekby9tdBxlgIYvebO9EaBNY3hXzAj97Opo6S2XUKa8036iXa4vLCKa1Sv-ulDjH3UkCvTK_ide7tFKI15DFXRSXuFzJ-rbrp60dujjED0na4z0MXd1w78jEVL60EW8date-bLFCWumHjVMGJXO7NJGnftV7sjOrlxsgp6TNwRZBT_8mOJe1x2uir10mXZaWaFqO_pgLvjIgvew5jTAQAGZ7aelJXgR4mkFoiHUtiCtx7Tj5TpwoBV1yoYUX5oLfFyAVk7XFi4vDf6Sd9n-oCyu9x-LgY5MMBsiuaVwBYWAu52ejEmnU1gsxjoBBXweR2bjlawNNPfibmSBiDzefPrZGU-WQ0KTQ_WTfer26BAf1VEcHdTl30LcerK6pu8aRr4Bls_E7oy_AN1D8LQvBg-Ah-c1ZFCccpUqaQKbJy-VV0oQ4GtIFOrHULhKQD6jUbvJz-AxcwoBAeMUkkhH3EE-Jai_xENKlOsvRcVsSK_sM"} ================================================ FILE: genesis_data/genesis_txs/gXd75eQL5Yzcn1ba51nORAvb6f_surSnz3xcNlLAxEQ.json ================================================ {"id":"gXd75eQL5Yzcn1ba51nORAvb6f_surSnz3xcNlLAxEQ","last_tx":"","owner":"tRKdRsXJcyeUX9ouUEZAOLgNXP4Mp5GCLwgnNyueiWKVdNKe-Jo5EgPBFcdfqQBQhVwuPNyUWTHORx_iBDyQt1rqWxPRO6Rj52V1EJ1CA-G85_oVwNWMRgFgKr_F1FiXTye0j-GirOVNNB17MKZVFOYr0QYkwoQd9msJBGD--c_zkWyTryd7BZkNj0SwkWHDxZSsGLx6S9uvjspuSYnkpsdes6VR14YW2bt1mq7ULzVVbuMR2RCat6vcURL7jBfBCElLIDrR1VCeC1TE59jC92rTNVGmzU-17ZS8xJsJnkgygK6bY4fZuuTNAfSMSLNQ3VXvx1YYPS77jaZBBCbVYMOt9U0B8umszaK-7hqltQAvaOzisvRAYNkAAhqkG2gO0sJ6e97PHV18rfd_h178MpuGrWOhgUpb4MSdgJd2OnGqYN6eggSM0OYwQ0ivKge8ti5TndYjJrw3v1mycuQ9CF6KwVYzwHAzgzXukYUYAxO35wkrQBJR86Yaj3HyujsGZyDijCsjjOBHAGyZu5pUTN1iPEmaWGMrW7jB_W42lcX2k0AocEKBKkujEw29cKmXwZaM3kWgzfqUZcVN8ylV1FcyhKcBR_k5dq2zLpz_b6e9s2awAID0kyfN_1X9nGG-xdwLymEn1tj992aKvYQ_PWykzHuKXL-a1vELEDeU2vk","tags":[],"target":"","quantity":"0","data":"d2hlbiB5b3UncmUgcmVhZGluZyB0aGlzIGluIDIwMzAgaSdsbCBiZSByaWNoZXIgdGhhbiBhIHJvdGhzY2hpbGQ","reward":"0","signature":"e4r5BhWeEOgRfOgiBbTuSNutleF66EUZHNLIuidA_2GqePhde1PzB2bq4xvoOASQ1FmR7UT6B0B1URmoJ_dqz2wfA44-_OlOYh2AUkcs7wBQKVXFqDZThvOU3hszfmTnPsbopb7_R_MDfMvk5yVUVolClhjSpFH-1rxFWFzHP68EM9l1Zpt6CDvhADshSgWANuJWcPMeTbWD-CflIKZ5BdB_gUpWsLV5GJQnqMUvcM0ARSJGOpBQlSVl0jrE3Geuz8g1tQSaXf2UzY1kGkI3tkoTYeia7ESLzbHvdAIaBt_g7WIPJfQhzivrJ0ODVfGct3qg_DEU3DzRm5ujQp2mMe-_lRQ3vXrP3twKcq_cOY4WwIki1BJcEp1joWoq2wJ96_YKhvA82CRPV3Cd-fvnYORLVGuejtlR35ReDF4pmjhHHlpSgV5gV73P-7voLL-90VSMnO_uzb3Ym3BYIFpyjrodKE-_Cr_RFh7YSrscB81GM9tsodHu4ayMckBdrYpNQyqpDlhqjpUghs0bcEWhcUhjZGzcvUGpnUKxRBN6pO2v6k28LmDbBoBmvdkzq1NcMnSnzcyiKESzxLuPNSo89Vm-YrMVVMOPglcsK-iC3E31OaFu8XT6crT5EVUfTm758zrCKcTAdtKUeoic4PqoheYJTlXAHAYveUMHck0DOzM"} ================================================ FILE: genesis_data/genesis_txs/gbYMogbLVx3rOmm7K-o3nfGPKauLMLkGMSXcKkXW13Q.json ================================================ {"id":"gbYMogbLVx3rOmm7K-o3nfGPKauLMLkGMSXcKkXW13Q","last_tx":"","owner":"xgZWLy2a9i6zcZIO9u8jojQ_NDfqZlEaT3syvYm2S0mpsZlaHZ9vV_akOv3QkddwdvD5GG64GufyBo7r_a9uYoYwkTZvDeZ494D5Qv2L05lPoEc3hBKH45KtQwzhWT4ly5GTStaKCAZEHLL5340l4P4SlAwtVod_H5EZGFrHZhIe7TgmD33TLMadDXvpLmD0CfHwZ7JtZyQR3a-Keg2oM0Qb3T_I4lnlSe45Tt-7OuL-cUHPnnVruRsS-JoMBM-Vwh2JU5qvkl56F2BesJlT997KvPWsCMUgOmhqr-1v3jXY2vopO1OttAkuq7riogcTWwSHvv4DayIfKzgv8NY3x3o6SowQISFw987HKDSQVIOiwfOZwaPUTSCI3AqQmWkO6hqZF_U6xeGvSH-eiN6X5T07dHNQHpLqlwlzLlrJgdbCJWx_-7JfTeCFis5hS7pe2t-ylejlecXbnOarEtEk6z0R_q_2FHierVjKpyeda_gvMAzObIeZoszVnG_IzbDvepqqhdpzmUfXChSeFMjqZ2Cb6DCSeLJTg-K-xMKEuDSIOblAShHuiPbkPujmdC33TRS4DVZWfCmL3gT11rdktiJcIebnnBdjUJSGFYV9i0skpWq4xqc4AeQZIO9zwETozPZO_EdJkfsLc0Yv74lZw7CCTyhRYMNHqhm46xFPLuU","tags":[],"target":"","quantity":"0","data":"U2FtbWVuIGZvciBEcmFtbWVu","reward":"0","signature":"CQvCUM-80QN6X0Nmc7SL6hzlX-qQmaVLakkiD8Zzi7ERwDXBynM2lP13CiJnAXrHKuRIldDkqAxqsPp5MVAX0_MO1wI-X8QY4yJKCDYd4YkE8kKcjGIoVbQTw0VfAemqd11p6JKhIkird3qGhr_q4kQ8Q6WtYYnc8uuOmFVgv48SsdTyMsAqZ5wGF6VWqq3O-lLmlHnehtwBJ7z9OzSlzKT3UqhB632sPA6eiK3IAwTTAdk4AX7J55oubA7M4E83s7xh1wqFqS2S2oRVeLD6uaXYGQXs8LP32NU1F2oVcB0L4sJ81w4MZa6aYu3PXlq2iFC0AfgiYPnHzeg0tgTuWx5KS0F6twCPDMy39ltHfSKOzC-LwwHnsi-br2KxuFB8zZBt7D5EDUVUF6jNn8S2O4_M2FDvsPXOOYN0zdZLFSWVIRPZ5gj31bg0HBnkMWRgjOqVb3W_iKzM-Y25Drf1K7EORHF2uhjEE43D0h7Fb63Qz3CO2pVVvLuMn3xx0Y6hIjAGtfM2FphYy1XhGuY8pph0twpJ8Fe6oQ8IAG7FGCwjtft0UTwkn1bL99pKmBSa0pj9SY-OkPyUZUNY2FiMwlQbxjHx1ZCS_jMqICkBhTRIx7K6OFeOkt-aW3Oz-9BhpyOJAYdJh4N5T_U04H-LzT-9i6Y8DbWiSQvgX4LquVs"} ================================================ FILE: genesis_data/genesis_txs/gyG1bGFt7qkMyUCrKiEfMzMzc3_3PooewqNeJpy-3Xk.json ================================================ {"id":"gyG1bGFt7qkMyUCrKiEfMzMzc3_3PooewqNeJpy-3Xk","last_tx":"","owner":"0c0-1JicFK28uZk_J21MRmRCdrA_763bwvvxN8Gmque5-OR-KC4OvNQzxJK3yHR0Av9p8_ajbGzLrcRw7ZWAyFvN0pLNLf-LPPD6MZ__HFZM7ICQ1AMwr182L1ecBv10cjAh8VJSH5VHthzCd4miBvoOqDyIYVeHqO9YfYi9OlvLYNzITVWkVznU9nHUQhmafHW3vudjmKtM-oKPbA_DOX6vl7bPq-Evd8M0f4ZSCEE0jkp8TvbZDtrszS8_Gdac2852mt2gsjDsjW-Iu6ikoquMKHg0hELpk3YcCH2EoZBfuAgZ7bKTkIYRCGCZ_Kphq5mB2IzrwiRwkSfoO-ZgBhP59Vm7W35QAeRLjBM6LBi4ZXKLij3A48t7jl3ucvfz6E6ywk0Jq6pgXI1tQ1vZVjyUTNyzX7kaetmx0O8vcmkc73SHB7OsUOAetvWOzIMhjQeWIX1wqorntk5OcNp1cuS5Ym_V_wTPx0aqQUadp6bUm4SyFg_U1zW-Esybe9FEAvwTtI4McT3V8zHDdQBaBowkC7jyq7KjAx9K7CgHswdlGnFIDiHfX5JymvvzWlGRldkhQF3y1_TfN9Y48FN4Ohwmgf0J3SW_O9GwWHJEV4oRzR3nPmtma0br786t_GLhKT4PzAQ6FOcBXuQ9rlxTmNDgin5tzI2jtAO5_K0VXPE","tags":[],"target":"","quantity":"0","data":"Q29uZmlkZW50IGFuZCBHb29kICBMdWNrICE","reward":"0","signature":"CIyArit_k0qRSlbulntjvCOkvawb4YFRoeKFgHBUezVTELMQvItfKI11qjgA9KQw3Sj8I8sDY4AvWHwrZkqs3VKzm1_JxX2aQKIMihUdtHXpGCAr72NFFuyQa7nbOgKohzw5kDxFU6_RTdQgqcA-jEjUsmYYay-RAPYd0i5wVtqiOGAyU922ieIk6LL1dIVj2GI7SM_LZolOCMLe4S6U6VsDOu_iq0agyLfaVyi2QEvLqlTs445RK3oix4INmGlxg3a9MuVialWHjUfj_UYxSBmT-uXy2FshiysgHl38B07s4JitaGCfovZPi1F399So2y-HWrGpoEzqBQmnMNYTOGNKBlAuQGD9MDrOXMTD3xUTm6mEYdiDxzugGvHyrzxLFEnOp-01-OyzCysi-AiriROIV5g0MdJrZcCzU28JD1XEHAUaw0Eer9RIp-IYOPehHm3YegD1jTdCBmfdn00Ypy7h8PT-krHJw0vFM7W1qXm8RpMWTm1DfbDA62vY4njParPsoG5W0LymHRkz5iefNCbHsClzWG5v4Jpg7n_30EE-4Dgw-YGTFAAO_msPHDQUSSWttv9pWZKgw4TUtLZxwltpWqsSSsOxu1ibAEiBewnQiPvXYZssKJ4Cp1xlBPuzsx_nsmufYRGvj-GCI7c9WKNq18S6eA59r2qzrYgrxac"} ================================================ FILE: genesis_data/genesis_txs/h0MlFXsvtNQlFwgTh6y7-gjXEj0CbGECgz77EwQsca0.json ================================================ {"id":"h0MlFXsvtNQlFwgTh6y7-gjXEj0CbGECgz77EwQsca0","last_tx":"","owner":"wUpjGqLO31Mw0RTHKERCOVqBYi5Cx6sTe-BY0rXJ5izNdPSs4vgKv9e4gufZP4VTD6LEONGdjBYlWYH1l5S-H5VHxIqmNwG-c7tG7R2wm9Th37DYK46S2qa-q6QIolMXv-HQz37lW89zOIts5LyDUSa0oyFrXBmMimyArNYMiN-I4_JopbyjzSUaUY8FUeJ24xz8Judj7xUp8_pub66s6qA5ZBAlUUP0LhvSJX3dhyw_LWAKaGi6l9TkUGI9MSt5hdnwRri0TvxDZEyvCcDojDnvMH1mShcBZevYU2HDosSalLG2KOUDxURk9Mr1L2TjIo4sIvLZwgf3n-eFtENv6BRUCqDXMBSGF2P9nSJvVhUAS6gfuyJ0JlsyjP9eJISE1_HtFXvF8Z3xS--DH3g3OXBNX82tuLivggguoBGV2Nngqa9ZSIHwH-v2ih4sn7QmopmHBW-KAvg5tkG1alBEjrSRnL9ole_qgRrVbuIPqo861KtfIYFVHssLd-dHWog9U5-9jEnsMV9soMyOf3hMhw-BfbhpTIp0QSr0WWfJz4TQrjG6Z9oWhsLvKb2rASHSSo8pWO-pZBQM3moG8bnUwhQoWiF0HlYe8oqHM9dkPVnk1px7tG2jCpA-y1bCdOXaO-uSRW_uCQNrECcBiPY9tleKOQ5h8_ZnaH-XzZy9kpM","tags":[],"target":"","quantity":"0","data":"SSB0aHJvdyBteSBjb2luIGludG8gdGhpcyB3aXNoaW5nIHdlbGwuIGdvb2QgbHVjayB0byB0aGlzIG5ldyBjdXJyZW5jeS4","reward":"0","signature":"gp0nJPq6kerVs05RUwUC-j_1j_miV-OSkzFi-wTg47Nxx5YOOv6LA9Ila68FoMBQduSMBw8FuaodQRgQrt56IWqZQIGEOYR34z5QkW_JUR2S36orQEt6LX2JG43qzVAKx0wdr-zSahj1nx-MIasVQmnMMYRJqc7oFmAIdOuMdORR7DhYcO4BYfZVJRschy8jX_15BvcD8oBmGw5Q5s_KrGgG25HGwHavq-lXKJ0EWQN2F3vJP_s9x_37v8y8OoGNBSu7oykytbw-YoJlLdpo1LyxRhajWyj24u2hqfiGI4v5bV1gxuWemp59eINwQAH_wwSdGdHKqvoAv4ot6kwS2Du_TzwpgmXni3DhFeUEjadGAzshG9qYjT4CtiCs0H9vJXzWJNo_J3Y2wIH_5-GJMubXh73-DN7cQ1YOuVVJlz-AL68cu2nTIBqGuuVxo9-jJCLZjhzD_-YyET92Zg6NqldbTKQ3Kok86301V9roIIheOgrUzicAWG_pFOQUHY63mAk_M8LtykCydOR02yARXfdT2duNMPnSBA9rY4PTFwoNpftea4VOaOiXN6ftegoaaMiuytnCr0f--Y915VbUqOIDmlUgO1cdtEWWedhRmtcXdNjoK3QqREUVGwXIT6yGiY54svO8mFOD_TYowMdLaPeGqUfLDtLXw9gau8sVJ_I"} ================================================ FILE: genesis_data/genesis_txs/h0sgGEeQQcmSxg8uyiCOigWtI_r2ex-58nk1xso004c.json ================================================ {"id":"h0sgGEeQQcmSxg8uyiCOigWtI_r2ex-58nk1xso004c","last_tx":"","owner":"rLUiJAnEBhIu0o6Xu5NbwsrtGcl5biezgHoXWYc4-4z5b-6CZA13FqkGHLfzdBXCG22e60HWq8LKgfYE0fh9uKmJm9kO8pk6Su6SyKssfP7ZV8s54mrc5W_knsQAYO3ETp2iPSc8TdJctc8dARsYqniUDXuI2dJqHr5KS7m7Jfe9EWRZFK9ByBbGsKEykd9hbu7WJ2y-x4CFdEC6Gc0yMpKjBUm_QmAOTvKP87DnQnebjy2ZqxK9oQ5Iv8s9ovdz5_q_g5oGluH9pS640ZAJ0UCFywNudLVlVV_UaokOXP_nrizlJpDfEL-qUD88RGt15mYl1ZftOE93rCRkK5T-FZn4-apghDu_HfcXb3i2rCfezlcer3zf0V2DPswsNK9EYWsviQvXvWulgLiW66cHxdb181yqw0ccjqzxXhNFFsSUYoon1GLP5YKGC1ApChIwpjYgRFOKx585DVAuQmeOcGb9eTgdbtaczNofpCYqGhOzaMreXyz5ROmGZQKzr-iqVoqrceOz-Xzs4kB18oLPCMczcn-vZur1aM1KyxRIVe3VRafFvppMZJBzRPbe2Hwinrox55zXEEdoUZUTJOkgrdKdkfQB3lT3eQhelR8nsvekohLW2uJklvfXfSWprQjZzbLXmkCAlYXbaHB64oEX9lcPTOkI9AwfMMbJ694h2SE","tags":[],"target":"","quantity":"0","data":"TG9yZCBQYXRyaWNrIG9mIFRoZSBDYXN0bGUgSGFtYnVyZw","reward":"0","signature":"d-N7jHy-AZ3X2JwKyNkx5Yh73NzStUzbd9O8b4lqFBDc3Mx5OYTOG69sjKMG7kMXAT5aIjvZRO6D1qt7SNVxq9sX_j84e8crZfwkj9qqs-QJ0R0ZWoJ39Ej6j86ieKNJZ8AhOyGQMjKPXJkvI_rDGmw5bP9VGVfDn7aN7V1PitthFg8iZVA7CODQLblUU07lmzuVH3Xk-xo4wGHj0LymftlJUhdGHMH3GJ4BuYJ1tPsK6MIe5KHT8Fush5mxLFUkQhYaMVsIWaN4vYwBVo77lssLMWpATmEtmK7T9iuYaoGZ-n7rq3mhCS04QIshYWoqzfNOYeJOSHyvepp0j39unpA0LKstK_VWS6e_heSpH8XqHF1ne1OUWhvT4WT0wLRtPQ3QpJqYUTBwMhIXmyJixnScvgqqbJDquNKKpjx8YR_zqTdHaR3J3IfrCqQzx_YodNhVN3nKjgNxEPErELfF76M7c8nQnMZKlgIDdNQkIjNFdRGaC5m1s4n_P2kqIQZKCGMTBueuYIaK2CT0Dbl9SClnzxlbwpu4zUQCUK-s5ooPLPH54fmAQRIDiFSq1AHm784jy3LhChIJ5z-pfewEg4VjueXKcycvAXPH9rcKCkoWz4urpzsXSoP_qs-_6ImIlPrplX12F02F8j2seajQNUXKG0oMPWML9mf6FZa2HUQ"} ================================================ FILE: genesis_data/genesis_txs/h37LQjpChpTPMquvaxpfFeKt_7oAB5ElDzsdbCQ61n0.json ================================================ {"id":"h37LQjpChpTPMquvaxpfFeKt_7oAB5ElDzsdbCQ61n0","last_tx":"","owner":"ti9KzFJZX8RvQAEu5U8nmMGEM5eGJ6yVhayWpV3G5uRPXlCurH7w3Xi35pagW2yiv6uZMA-DQam-4KQzDYxoCobZWta3UeZDz8L4EWuBuQIe0jw8AxmF_5NR7bzBSGngpUJnD4dRaVxxDnIkLqqP5Y0aQTcPUpirhauG72FfUf0YbjL7cos5Y19xTFZZ8G0qqTjH3nwVvm2yqe2MPzGrw_cHtsXoNekgPW4mIQR4PGF7J4yA86rgWUysOAeDFQ0rcxB8wSeZ9Og5VPLaRhHo_5OvVoeG5PYfmrvrnA3l4p_T2Y1zP9O7dZEk4v3Q1ciBBhtqLBJz_ijPrbAOMtZds2TeOJyxR6WvTI9sUcDq_Q8Z-5pPF84VfzQEhds-WO_e5JSXA5EjyGSb5v5kA_FGbhefVkq0rzDVknf4s8TEA0PxUIqi874jid3QtUDW_QcuY9nK1sIuDxIThh-JLndaCu1P8gmk7w2N07elqguKeLS99ek4OyUL9rlg9UBMJzapKCUgqSLMh48FYNpN7VEizpSRfhqdpFMCXzCZRuwIISN_33i5E2WHYK5GAD7t6O1AwvdAcAFodAzPHfUp_G4CN5GDG67pO-588cT_NgWpiHubL26zrzgxWhPf1Clo7Io5XQt01fQfvd-u6QTBvbCSVMGewKc3QMMfVVNdjDJDo9E","tags":[],"target":"","quantity":"0","data":"Q2xhcmE","reward":"0","signature":"LzaTBiwi4IwBZNvM_PjKVeaJ2f2sWyUCcd08wYF5T33Sttfe2M1W11jwL7RPFZsi7GvHiQBB9ukD2qgZJ4hiA36QIKmOEHwGK3vpZDJdg9CnkGJ4A0vQU-Uik7e60LdYp4ZZWHRTOKyqiPeGqDHj8VqSWt8KPBeTawVdmsExotIvhfye0su5RTd1yFNZBYvWP6KdOgcFkZOxtZ_kD8UVtC3Jk3YPlA-l3lqphJN7-jaFPKbFbYdSsTbsBeW3iHasHs96reHU-ItI1oV4ShV0c6nyRJP3nvHlxkaayRskEC5wG0XTMMF_d5lKdtvyiNcAihD9u_blT3u0K0yBR1OEVi9mokKMTXMbSx4AkgyggKnUmKeDcisXnjq4MqiSMEPkvleBX8a1CymruFvRRjxGwB8JzzyLbzaP8wZfAr3nrtrFCEFBIbATISXhhvyGq7-Ng7BjdqF8XbcbDPZGg23fKgfgXcPpYi1fGDJGWKotrpOvVg7XkcgT0OUIXjk7JaUyV-pweDFRNaVO9XSpktqO65S35dEXRrUr7rH9p5vKG7SKKq6jtkwA1chvk9106MjSAE3sfizfzIuaFKF720v0_DLEpR0iCY1aZMpW7JMItM-H4F4DyGJtpiLAWLGHeC5RZ6i-3NlaPJ1_j8WRIgw3-P4OY46TyB7gt5OEW1qHzeM"} ================================================ FILE: genesis_data/genesis_txs/h7qIFbn0LoexuVwBcjKW7v5A65iQDQFYZUQjuowfIbk.json ================================================ {"id":"h7qIFbn0LoexuVwBcjKW7v5A65iQDQFYZUQjuowfIbk","last_tx":"","owner":"v1SlP_q8MT9dYcqMYTCD0lksvwGGWn-rjY8XMjgh-WYW7EHbpEvBheTdY4AFeta2Fa7E5rlXPV21VTci_ndvtzKJrBM49JABJGKM3hw64WwrFmmTJ5WaLOWGPBJ7aOYeXlSMkpOEYHz60nvsI8qcYANqQ68mPGRB9h7EonNYmRqBwAvr6CDeMvELzzpoD2aO5q14kUBsrRMTQBkyztdiehy00h-sG1gv6vD7bymC7NuNjHwOj-_AP68uyqZyMLym3axIpOQ5wZvFFAiu64CscyVXAu9uQa5gcptdba9k6mHBpQarHiNgdZFCuJAw3OyR45Esn2wa4H7n59BD1HfFeWIs7EuIDOd9BiOj2SNZAxYTz3sMv5XEwMGbqUS89YAOK2LORbaHmAsQepclcriRm0Akg9bPwFaDuboZNyoshFY56hMqwyrOEgEud3SovVHnHmXMSRGpddWHTjEaid4PO6CFdhJ5Y-iFkhp234e8dbsmBdRBqhGB-MDBByZ64UiOjyARTSuL-SHMmrFzhcbucqvvLs0ONlSs959dwXuiwzf4LjJSP2yu5RsOIhBqSIi9oEBHjElz3rwrzl4XQc38jMgt_7rTvNDXMqgF-DhlIMJ5v7OF6RSDc6ZJOdYsRYdf987IngwKA69yVc4TDpWqyy4F6sVo4HWA8O8wNRHvZWU","tags":[],"target":"","quantity":"0","data":"QEFQdXJwbGVLb2FsYQ","reward":"0","signature":"acr276fMzmZ_RUglI-vsQINt_KPC4fCzwGDyQuR77nwBQu1bwSMlflmy6v56vthCYsCDEWQDUXSCGBDGYOA33rrAYc2PuzUAbokFtOL7JxnwTXuHrAXaHXINc7dDoV0CCkA2zFVLeFlAiSq4j-PO__Bhh0mJcQWhLYCuVWHF9_plRvjhmL_6aVUNIR1IV_ttXfbMYFcdOkhcgZIkbLHCmzhP0-Kvjq20o5DBjjANXE1QDkFSjee8ruqFV3LrCn6X0mRl3MFPW_R9ZDE4oOmbTenA99xpBD2qASnwh0V33w9Wu9us51Ql5UoYwA-GDoR-ncSkW3HsG1q6ux3DZzdFThYKwBO_cOFJHejOLz8IShR1257ShlQfwvmRQPqQIF3WcmQexibQYcajDhJ-b6PiTO30amq9t0D3hamzR4r-5a4F9pVk-HtiP89ziRgHuFyPq-Aa2Lzygo-1oEE_5RAkEP1yzkRUlwpCp7IKD7nr3pdmrc-qILyH8Sl2sypVNKsGFZ1w4iAHb8E-L3M-w2DbcXr1IijXjsfZ6pT-xUyHFKoZbapqMj7CjXGPGFWvqvW-2ZB-GcxX-aaoWzDx8e1ozAOcAhqaADQgkZC0A-M6pZZjN0Yyz8NGHcaXNiwU3ElL8lmIkuX2tYcOdPld97aGvRxdWNxprBGv4bcgwXE6GIo"} ================================================ FILE: genesis_data/genesis_txs/hRTkBAH0k74HlmlWXTWmetXcIFXvM_Zrz3i1JXULZSM.json ================================================ {"id":"hRTkBAH0k74HlmlWXTWmetXcIFXvM_Zrz3i1JXULZSM","last_tx":"","owner":"4WB1edAYnquekrH22_Y-jDcXZgKlM3KPl9OKwISqSIcqbLuUvkso8A4b2yLGK1K6yCTF36btyw2diB83s4eoHSPJj8zIQlm9fH4f11wCdMufdYTSZqGoskNvenigQIPMtYy7sOxpx5b2iZQXtNeC4_dwFAQTnDd0DvIxmvH2yv47Ah1cLCSMVeHBEgxp3dgrgcisGQXqP511I14gJS8MINdD0BnOv1L3r8TITVHxZ7X3Zn_zUZAnFaySGmCT0X3yxygHRSB78XfrL-ayO6wy5TZ1ySlx1tCibVjbnMGex1ffR6exYad91nqaEvRJuud6Dqn8iWQnDAdI8eiXRTez0FjWlofxH6cQ4D3U06nxb6_y5PAJL3LRHp_LZhObIQhK89QZMagwV3k84D5EDScLiIsLyVrviDcI_fJR2r9wNWnr_zp3rVRG9IDacVu4KS94Vx5iFY_Ww0cntCmIjwo_dQTtx0crkL2H984sVnGvDFARNaPbZwpnDrxtVZE-YI58Ono7hZiKQlpiqW-7ts1BsyHlMCsNuKhc_qtgHJJqjUfPSC2Vm95Qu5UvfMylld4i6rpqNq6_xKYdhbD8yFHu0jmOb_xUr4Wvn_vxGItNbFa2VjsWcUHEXk3ak0lofq0f3v08EZGF6Y3vM0tSbTsGlRVsvt2bGepEm1psNCA4CDc","tags":[],"target":"","quantity":"0","data":"dGVzdA","reward":"0","signature":"VrzyrGN6clHXnkt1SLJghRxqw2Ij5xUEdImexPm6OtaaNQmJBFN8_jw_3-xuP7TUp8PAUUHmdXQoibfx2JnXI_C83Qpg_bVeo0_dWbIESv3c6zKFCBeiXTXEzvyoXItrxVwvPNflNX7_yNY3lnzkZr_ujNd8TZlqPKGd8YvO2r6QfmjMM80Mx9RLOlcL6_mlOygo7CRKetKAkDBoi0Y_rW6MIDIzVhgh1O4twAwvoE7kBop5w8d614dzm2KLrLBXmUruxU516CMa9KO_pg89K56YIa_gXmmbBFczEGYGh86Hf0QpLp5MnKwtvZ3c5FvC_-Ed95Lv8C5mYVoS0JbQeT6dlDXIzgklmWdK_niMlPCLEMbIQif_8Tg8mxwDmg3QAZuU934ywB0aB15G_bnxaXgJE3ZQ_S1O-WaN3AzGZNEs4a8vTP1Zr04QMOw4PIVaF_KDXCKw55HprXCwzHx8i1SrqoA9yPCAVaBXoSOvifYzXTDNLqPZE2s0qfkc2zmYrIkegiv-7AC2AqSTfYSNMXQ3XfLqxpXZDS_-O7ifGIOF1HCSvcjvYjQoaVwXNiFNyJKOaG2oIYddfwZnpXHSg_yRAVpCYsTYqcaL6qqQk2rHt6Kdjar0HN1jJqo2-cCVUZv1Q2pXia37VOJjGJ72NQuQHcdcSEBe_U8L6bMxfBA"} ================================================ FILE: genesis_data/genesis_txs/hX6nohfkKZ_9ajziHJ6g5V5cIe1EX9H9rg7eScK988s.json ================================================ {"id":"hX6nohfkKZ_9ajziHJ6g5V5cIe1EX9H9rg7eScK988s","last_tx":"","owner":"2VcvMmcZD1ZenGi5Ii9JE49VLQPuVbk2HR3OGX5S996zb4Rrlsm9zM45g-eZSgZ4n2kzOZFRhILM45ue8LC_TT8jiMznz27G6WEHEvob0US1xgaLDakdr9aS5Ad9QurC1rWT8mJq2794-FEGPV-TExMHkcHhsIPx8-mo3acXzMV-aqR1sezjoxh3dIodxjyt01VLygy1cPmL7kz5uYvWmjxkShX0Vr47zZmJIylI6qxso--9jOyqeTLDHmvGGurEDM05L-YV8mA7rjBjUgYXxeQatjn9TbNOFcLM1kal3MGV9ZKoyTLlVpZTC20uC3HiCalIt2eEqS97_TPsZRolceN5PMF7IwX1zQQ_aAtmMeJx6rEWVu9oRHq5vAdd21gMXw1SPltbuotoprK6H9xUlRm8TP_OpnyrsfUsvm_bpz0yxWVP1ZZ9iZh1SAD3JPG4YA4Dz4k9wm7VWrq5U5cfsujYIgYBdeU4PSRZwxk40NMpTZV8dJYXCUA0Ok4qxhsV02e8ZEEzgyPQbDbAF83dAuj3p9BHs-OBHa0KEhDGXAmQk7gnSnhr_zNOWGzF-_WsYlrmzlTLaUTJ8miMI6kihNev_RaINOeEgN18b5-7yThdH32vNQzMOszDkN_xvhI54KuAdzbU9PEZPNgf1XSHkQ2x7BSQ8zZ3ACjabpeyd3s","tags":[],"target":"","quantity":"0","data":"SGVyZSdzIHRvIHRoZSBmdXR1cmUh","reward":"0","signature":"dP53_-TNo3vL0cJDcLAy71rNZxmOZl7KKdw7QdhDTkO-05ElXGDyNG1BngzxUR8zG96TanRPC4czeyhDjiHFWreBUqw1wmy1dVLqxQg5zndDa_zAHL9lLqqkM4PpDVzQLoNit-HsxDpGV9ciRt3NBFsfrktg4OxAIpOpm0quaUGcgxUwmcW14kp1h8nQOZTY10z7Cr4AR6MP5DqYMruQWQdhRymvRgAS2IRvRjsJcOwdktcj-xXjOOOjcMYOc25vaJDuRBJxoL1gnOzIY0lLKmgIIjR1i-qoof8qlh5hdYoeUkDxYx3MFIGtqOKtu5cjz6IQfZ0R5CiQAGQrN3aWd8E9l1-9oFim-ey0_9f-hpP8HfggmKX9UtpZShrK-p40DhT-AJTpmctpqJMQJ-OYNbY-SqnYmTSCjIz1Yiaes2H9SVXe6iTe_FIXzlsBLdOMfhE0E0Lrl93_8JAp07838pFyO9EGQK41LqJm75ebtkpLzVDavVkVdWGDPiyvZw78yKjlXLMXbIaMVOhJfbbOU4FQeE-2LlrNU4BN9n0d7PZ1oEi4L93oDK6cgPcH4BtHhxiVp9cMLIcmNGGFo2U6if08LawZFxNGn3MlMEp9VSfVG-M6sbFjFT79NHEijg-RbjUNtULOtgXCzPhlTJjiFRMT8uoKIUO3QHuh57pl-2o"} ================================================ FILE: genesis_data/genesis_txs/i9xaFWy0avtyCCxQdmWfGNDgh-PaJgIHkNK1pcJzmV8.json ================================================ {"id":"i9xaFWy0avtyCCxQdmWfGNDgh-PaJgIHkNK1pcJzmV8","last_tx":"","owner":"8Lj8DI_mf5fhIt5lJ-Z_-WjaxU7wRc-C9NfTGb5WmZrMFndcncMleRhhyvwa-OPe2_rj7bqnWUTScP3Nmp91Vm3aB_RgqcT9IPnEE5_D1HNgZQZiL6JQdNniE0Ikz9bNaTIFT6ekQoXPGIlSyYXSfAN8CW83_9lEw9Axb5jtePwfzJnX_fP93A3CUGKVHaOuyRNudYRJigHBXG5Ba-EunQMIuUVuUpPCf4yh95Dha06a-_4gQjX0pZJ5NlHy7B9SicNQEGjSfZhA1AP7NutUaZCIm2kkYoV_FSa6NoLxIIUqyIBFJ0qhK4wej0Nv3w2FxIOzcE40gkR0C9PJfRsdjL7FO8RI5ZODCSmyTPiIj_WvttaXkBo_NMr6pGhkMve9ndEAzLzdC-vd3cqw8WKEPcXmtQjPdBltYn-Mm1KORRvkB7BchzJBX04YnIFhsn3YgKu70prHKonXbp_1EhqptooNtqRNrBf2MDE19h4iBVxzd5cHoRk6X7inRFoOAPOaiyoly8NlN3gDMBfpbxtqbmXT-r6AQHZ17_GO5DNF__MUwsaE_7hQItViB8WRjZ24wu5_vsG9Zb9Vfsk0e2ya_lJFoA343C9BqLmq0K2yIkeRsAqw0kAY6i1ei9WaH8kwI60ilpnjRx-wFentpbUfFPKPCJbUCQqiI-_GpWs6XP0","tags":[],"target":"","quantity":"0","data":"dGVzdA","reward":"0","signature":"nlKcMUX51mfbgsvdeHqTEF3lZxkeFI6R72mg1zC_7rvkVTvzmHCgLkMwTOZxSfOLJZL1CWV-zloJW7_5_o0HNEsC4AR-GSXOeWZOgxLNwup2kvOGF4OXEZm_ycyV8d7km7DbgfPJNwTFxan5GAPjFjbYjX-Yd4gxMVetjXYHNSno492YLrMTCGGTLf19QI26oeoRIlqQYCbzvVNMMrj3xv8goYtCzJ-HaGNaPFbSn7UB2Y8etMwCCTP5AuqRbuTmKScT0lCqIAmz1QQWGYghGjrpXdkcVV2IcD-i9Or12EUaL0gWJTwJ0uYmblzea6nqlwilqwRobSRrBwLvzJXLDzL9ZGtP-Ir-P-TBhAgD2SOWRjBQlUWMK_FM1pUR2-tGCWp3plM2ebjClyMmciKSYuM0ZK05eQVXrc8bZmZs2LtjJproPgirvcNqkmvCJ_8S3L0vP-N6tWw2ADzSq7qLQz_BYBRzjLkljuxTTbJ-iIJMpPIttl7c6I1hu5iH_li3QmCcW1Q3pv9kCeaDKU8YqASpUU6VUIhmDgtll4XKOK58izmb7e9ESqkd9qYwHn14OJCe1uW5XigdwMHV9q0fH91kIX9w7lbvIOTGtUCkvGcMhtVqZ1kL5guX-wZGTQFjaRmfpjeGQmXKKMdWqoEDbmjNRCgwKqKPs_Oyzx13da0"} ================================================ FILE: genesis_data/genesis_txs/iPb5JLzNajAzUNByVeIGSEPR0rzGOV5iIYjWpi99APQ.json ================================================ {"id":"iPb5JLzNajAzUNByVeIGSEPR0rzGOV5iIYjWpi99APQ","last_tx":"","owner":"qhVpiajHn9zVnNuNCt6uPsrp1PuAfQ4DwAFsVexQv2VkCSlhNcOlQmVvBk8sHf1rdw248to6cHUdxbebCoFG0gr4jbj2FXy__jtQP7KMead7tcxvZONtNogoiW-OorPWZAGt8lbAYGzER1Ne7HzeJeNcMR385xd7McpLNKpOhkdYlLPwp1XI7edQC4txFvYrHWtJWiG98hgZRKUfT5vNIbTYRnPCu_dtt_ljhuHDoakjUxIJ-2ui_fXIdT0EROZDAXNsGWv_xCBbSJK2rKL4PezXHyvxsNmUljTYjev-tU_hUjyR3E4jGD4p8ahWp9NZ41yDKZ-Sd-uP5ogMGiQXVmuu-2VwLyTspgpv8c2dXezPwHTQQ4flYaKlWZcV1jh3RUTxqjDYLzp7fFTXnjdlyrY87Xi0c4BWt_VA5mmEVs79RWClDANqsag702xFPsqNLMmu3dXV1s1K2wackHnrK-nRd1mDcvfdQG_9_DCR0GgW4zeEXQLsaaL1Sv5U9cdJQbuUIyLnPXU-5Oe0wFKUhy2AyZ0zX65Q0Bicc7dSFfcyuwZti93k_mDh8to83Nf3auqGjuvoF5H1GlG3sCK00P-yRcS1sUxmWi-jEAO2vV4j3WhUxpQu058txO-jcppA2_5WihgD6ygXNFkLbbnnRayeER8HBzUu228PA-r2xGs","tags":[],"target":"","quantity":"0","data":"Uk9DSyBTT0xJRCBQUk9KRUNU","reward":"0","signature":"NEOXT3fnk4EjGboj0CoSu88bjFQRYnQ0Xk9EptCVuYz_X3MuZeRDepsDwHSRwsJFFwU7_ydAdCrRBw4LechA-hBWkWRSbGz7qVD1Sm7e81gGOw7JX8stP5zfFI5HsjVlFRZ4nianghfgeNHpT4p3vB7lWJ7X5tEmvUMN3AShM3GvdBVpkACK6bGYRTI3c_mmri1Bs7zmRPnx27gQTnrUA-xL-J30IbUZKsknypM9YJZJLgsi5JqdpzK3nNIiebjBjYqbBLOXjL8SYGo16d0yVWXGDeNp-F5v5OWnv50gNlEumcXWhf_0faTCpKkWDNye8FzCnvAlm80ydwb4yAOLYxAWvzbMvtYz0-A-SGJdBjZB1ezVVD3wzrTeKYUaO_Hj3LYC1ynvFi4KsafzVlZp2dy8Ts0X6cmO7LUbGWyA8E46AwvojnD74TQYFSdg0e9WAt5QtLvaZeN3ymRFeSrDSa0kOZVa27bU5fx7EcLCk87u0At3q38H9q8oienwbQrTukyXNEIRLEbuEu6yPtdU0wlPhx9LhHgwHQmRrUDRaQNpNsQbdF8g9ZNzaD5VkGbschdgPo2OOghe9JGQuUXdAZ-H0GNAKAbN7x32ib9GUJYbHNKkH04qhytr9flSgbMZ_NgqCsHSFftsvxxjtkdU5smnLG7bwJnzX1_YVwC4ZzY"} ================================================ FILE: genesis_data/genesis_txs/iRF6OnneKHJLhLMdCXpo6LsxVyWIGyklFEpu1bN3cyE.json ================================================ {"id":"iRF6OnneKHJLhLMdCXpo6LsxVyWIGyklFEpu1bN3cyE","last_tx":"","owner":"uFJEuFuDDsVB4zF30pZUf22RaIbb4q2tUTwhCOGUxkaP7CSqD2HmVaNufPupFt_yr-0W_lhqfcSWl0IdYvFxTGn19j0a0Dvrv8MHyy3-AWfMcEGxckusZjfPycUXWrJ7Qzc9PsiV496P-fJ1aUwv9czwFDFirMcnD7lNoYP5bd48FqjLzpCPB-xpQTiu0CGYQhMqgj2cfMYmelPqpPWanKQY5W34eY32cMc5VvA-9pkj353mBpxPi4_bLkKSOxj9zcJ_eGcN0CuiUmYskA9mXENNZZnpuAbPO04OXUgjafQfTAHT3Su3Km3UCHuZE3ThiVnV8F5zS5VmtAVUHvVGzJiOydTBdif5HpuvkwvT9Bwq6NGM9tOQ30UQ13P5kIeotdQm28H4UF4w-R-22nePYExjt7dq2nfSPNVv4NGGbLqOqUBoI7M-KXI-3GnXCDViflfZI4cDZSnViHlpL0f5tWk1pS3T_tx37Rzpg5FOGf2oX2mAS1vnWBNL-zvwyjj8AVt6_pogPA9Wq5kjgL8N8MEhrIABlb4HzrWpdiZQIobg3WvdOLlMAU8DOqAhRLSKKxy4IZ1f3AkZN1VkyOAhBRLdDNuM85bOSR9A1eaTS2sfN_YIMOHqo9EEnrVBSq_GJuh8qwB1gz6RA-EuY2tUww9ah4uIuwTuRbTCmjXs2mM","tags":[],"target":"","quantity":"0","data":"c29uYXRpeC5jb20udWEgbG92ZSBldmVyeW9uZSA6RA","reward":"0","signature":"oS3y1A7Ii46ppdKDEoNKXxxOa4vsICMOR8sA1HA5gJDnf04CILRsYAWbFfvMXjCA3d-6nCbLr0A2HXU_ZmQ_TBK2tfy6JzN7VjfXy8gb-yQN0jIqrehwo1UJC15rJW9SUF2ASK95wAmbBm1uuW7aX1Bu02IsR-XYZN5cUETg0f8qoDNMnOGVTKXZ-IQfjisT_cx4PnyrH6sMEfFr-P_eu48xYw66flD7t7JQoygY_JcKlIf1CuxXX4JsDkgWZbS862VrdWE9N_EriQiAyb51WxEby2lVosKTBx73ulPAmbe0qIDc9lVcD-JUNIoCaPWATf9MZDgFMkDrBQfaDM8kkyZuEyuBF9_tIgc-f5oOGunUhydD1Yqc9Vt3eMu4Y3SIlQ07hh1FHyYCvx-nHPJl5gvyDtgaZ6b_YOQGsguZ5emOq4YAsTUOjNmF1pqI-dx73K5sPAkKNo6vBmytqltPX9bFw_UGVlwAlWq-qe68LyChCc366HUb66ezUz6QTH3f_HK_WGMP_f9xz3UfmBKDq0pJcdGExZxdy57odyYZWWAqludxilz0uB58sPkxOGjMOOu9r3S6DcXg82RWgLt-e_ZlhQX9UqspxrWdxDjkXu7AASEmE2kbIAwMyY7r03wXDCOxwHF2wb7CRVhB_kdv8Md04RhDqXWeXsk0sulCBW4"} ================================================ FILE: genesis_data/genesis_txs/ijroBK9n_uKCS97V7iege_5Av2E-tm6ujquAazT_sBI.json ================================================ {"id":"ijroBK9n_uKCS97V7iege_5Av2E-tm6ujquAazT_sBI","last_tx":"","owner":"oScfzCL0J1VHsF15k6zWjFu9D4VPRY9P0dKiLsKvqp3x0nT2siHB_KvomP0c3QgAUc2e4IpKr_TH4TnFPcXQDOMHi4Jugj_MHCIBzq17dzEsc-8ebFkdT2yS3DMGag9YMpJQVj8BNfPBr_HaCrtso0O4mEK1kb3ae7DXqp02wiIvZ8s3eh2ygxKNcccRRADqv5k1ilW20ReifHdHYO-7yWfsHJxdHV2P7JvLb-2bDcvhgeqHj9UacusVcPheBQLeUZ-TNVBMkGy2B6YUpo2aYY9DoQZ0-bU00dbZJbd3AZvEZS3fmkEqkEFDQES_lROLvVeROAy5jRbG1oyrkR-cpr6GgpWlQD441HDkJmEjoUbcQCx-AoJvun5zsT1pTqZaNramrD510oROd6nB40XTZHIwgCxdyrD8V3ZUonOGNrEqFNGX57ZHIVt9Sx4UNd6qW_hfQHMHy_uqOSLAn4lcp6LF7XKw4TcFEf8thm7XkCiYLrpxkWbFsBibdqxJ-l9hafhRyTFf7vD4rpkNt0GavIA1tq8JyY4TymRHpH1-KsOmw7BxyCiRd4Z8YPZ9A7LWGHm-0uobCLUDUA-S1fmc_4qkg3FLNdG9octPHMSKvlUUMyHi9XYxhEY4mIkZ1qawMw8pYH0eIEV6K5x178-8akRlso-Mwym5OenW0Ev7gC8","tags":[],"target":"","quantity":"0","data":"R29vZCBsdWNrIQ","reward":"0","signature":"nFDNyc-x4pFssEjkx3McyIgCoCWYMhSAB_irX6b-uzykth5kzGv0U2BhtqeIIxBk2Qt2zKrgt_535VXI2mTuk06gqK1N1oQrtYPTJN0IuGXx6PvZ3dJMAF_t4t2dciqsgCAAPYatjtFhXncgqvcGYzxXY1v1XjKbOmnyphIDpM5C2xmajaatM6vcQWJBirwUhS_dxFqHeY236puVa2SejzBXKPf_4DrsakgwtkvLWLDnkiUxt7Kr_oG5ydLUXT-SpDS7PktDESAxiJrMGAGlg7Rnjoub4-WRTc5noSiosSP9VwGtBqUJV6E2R9_Zg8l05fdeGeTl69WMqYeX4t--HC8oJlj1NQkic9yqKAiIOiahaNNkqYpr1997xqcrnxQIZOw3mzAfhvvC8qaCsmPdKvmrmtgt8pCDOZIbAph3s6ncJTIHtrf0hFaC4n4KhWWBJyIrHQkiziwdytijl6HbcbHJimuaAX28pKVt67IE-LRVirQldIQlo-2DePKcXwYZ6Twcc81xSD4qBwAA6srWJ9ZTfIHDhJ4eWwud-MFN1AcoMaJV9Ww4fbVRr-BYIHv9bRa7b21AFl8vd9sOvBgrKVkw7KjldvpJV-jC7_GmRhqvU6ygF6WbApA1S-uisk3Da9SViYHpYGz2_DBTcHkAu-uJ5MKvn9qHNG5J3R3ygl4"} ================================================ FILE: genesis_data/genesis_txs/iuTLZ3xxGpaBCggV5xfUkJ6hMdUQKHw6f_vEn6sbmPo.json ================================================ {"id":"iuTLZ3xxGpaBCggV5xfUkJ6hMdUQKHw6f_vEn6sbmPo","last_tx":"","owner":"8pbyqAJdTwbk8xaHywIsTw3rOGqMIgcftC79zNUyemlJ-xIsGE5QR6OtRQ9FzjchexbHzRE1uQxf3OYRzO-k5HwPR0FtxOKbmfuJ1382zUW8OD5_qTxinpYFP61k9mA6iamM0xCAq5TF9RLBHruoCHaw14wK2WWfgcy41xca_TQ953h_oe0IQU1YRIc6oRYChYUCKjLehRZhciyT0PSkcOoFrPqNlozH3F0WWgPyASV8qc5PlXe-MuXS9Y_MhfZ2_fsS781SW-SqzGiDqrZxau3q3OtFZhe7dBvz0YKd4Xf3Lcoo8chkz2QYQZZx8ThOi08QHns6DJ2DLwbWUx6FB3qFVroc4DYUEEfCGUbkKYxQtWFmrK2qQs1bFCOadJ6VZHHQ90H6B2cRv2q_ZRKTdSpStf4jnzcJOf6oeQjqwDATp-byhbBaJV1A2OIrptSh53oTFjtfLJWeq8dEpx-20fRhYgYDmSazwDLWzEURS3THSWvOTMhd-Hb2VzbOtrGJ89cvKkKZsBaZ8inubcAjcu-YAlkJaV6QLOEyHjGi6DHC6_iWAoHmggHyevleiKxSBSwnmw-SCUEueZD8UygVKrOxc3YhcjsFONRMLDKl12LmN_dQcyUHnCixsv50llDsSKUGOsq1-3rpr7tIHCKQaaewa7yMqFNKtN7HTEp0Y20","tags":[],"target":"","quantity":"0","data":"TWFydGEgeSBQYXBpIGZvcmV2ZXIhIQ","reward":"0","signature":"usJvxZ_4l7To7435PnYo5kWhYjhfZ5Gg8JQLCsWFoUrtnsYZGrnr6cQPq-iQwC_pISZ1rvrpReD-lC-xiyKgspe5LakZGXJe5i9mSl9VCh7bxbIu8E9jLscn_Eq970tqTYyNtN1ubwzlsALcbbC-6T2loMhtZug909O3BV8TBxAgrp0YCFSFyt0xB0IvhhLgRF2I3IF9a6YFB2wvylXauEDCs-CRfswB20hDC1s7jeuzegyLz_mkH9xsViB38Vw89TVyOHJ35PbCMw-XjO63dP9Aab4zw5CYPofpqbBDkAoDMu1Xxj3KKpYoAuWYwMiQodNp0FjYccT_TjStt6XMug5KIT5MPNkbYmaqD6r4XisUcmntrxCdRC5E1MeJO0glf_2yAlMXGe4vR0_MRaPjhGmdt7pySVxc0HQuX1b7SLWAsCmQE9FVDcFvC6BsOytmErYNkdeMcyI_VttLkCZ30MjH1leVCOlJYKBlLVnkPPG5dqgHS9F7rjYoeiCW3kWnBD1xQ9oywEr-KQfSwQxn_tMv5Ls0orzPuUWSMXrdiM35Cvha6LivsTpniTnvI7ZEkzBKzpvzZA0DTIdw4Pi15o1fDiR8l9U7m1XRPDRZG8duXhrfB7eQ3hvYH-7YiDRI5KdXmVA534eTV78YNR4uOYfvVKW1FjQxpaWcRpeUqmg"} ================================================ FILE: genesis_data/genesis_txs/j2IiBCd5Vf2Q8ciTVxeHbN6JgrXUFiv0xtoMTA_VtqQ.json ================================================ {"id":"j2IiBCd5Vf2Q8ciTVxeHbN6JgrXUFiv0xtoMTA_VtqQ","last_tx":"","owner":"4I4-_4lpNZBzhaOSAURvMwTQMj7PNAzNYTCyOm2cqKYihern9uZUsESM_2KovM5PXek9n0PBlv_5oFrJ2SGNLWjU0hxWTg9X5WfAd2zxModOu2mnnHLAYXXtVVNJdxVCuZ-IP4gkjCSCOXZ04fWfONORSQyGBxPnHIAjZSmjRWl2FL5nbUqeGkO5FXdbuCeGdDuUwQRcXkrOQBguSYbvUnbaYTgOiueuB9DxOZqJE-zcqO9V-CDXgVzw6XZhUi4S1yPer7akJOSK9jDfe4y52crnGQRP2R3txY_j50JoxoPbqnWjTnw7Lc_0GOd3PqdZfoPV1aWEeTvlQV9SuQSGlJoIDA7j-6kdNLwD_p9EWUiE71NsMo8h-JeeuajWlxTX80KQH8ENQgPNGolxmF5K9UbU-E0aLKTeDv81dRU3rWGaatgIjodI7R71lKWrgsWXAp-IiI4jmvkePAyFYqB3kOczbgylKw6F-jtxR9g61UTO4GOhX9ed9RxR5I0yg_avET7WOGNeDiLWYGV7mwMnk8cxLsiCRvLFTR-AC1Z_oODwBH6c9nXlhX2JC_ApU04q0sdHFUAD2ys3zC2r7eq1xQY0K_BKYPnpd0KcONqFSC8jEWgyczsllumej3tggikcIGkb8Fzvu7LpbqkK0aENLrmN1-z9LO_nalCrnHmGb68","tags":[],"target":"","quantity":"0","data":"SW4gb24gaGlzdG9yeS4","reward":"0","signature":"h74TByCz-HIYiaOltJZR5k3iUvDhLedGd8LNsPCUFyLn9i7vaVY4JgaKQFaWgVPNFW_OK6tQS7AL1XB9lEWrbKleZQbN8KObME60phIfj7gO-Z1IZ804iGBYavTa88IKrp0pviSp6Gnm3zdaM26z6xvc1D2kvNM6NFJ8h2zK_EpdP8VcjU24IGVagVi4uE7gEbR4DAfNJTVPTdmPiuCs_WUDjjwUdZAU-tfkiB_DsohXrpocCVbkmIQ5rc8Aarw0lcusu0BQew_qlyg6SGwoGkdnIqom-njzlBRaGmmFMs-8uAEjroQF3xNLW4BDXuyOiIxaxIyHqbFvmegZbo9wGC0gVvyTkkUm8LeHAp-sjviqRhdwQDRRq_UQ_wM7W39ur-0zALu0ei0V585d5dawB2UjxDXeqqf7kqS4BjdulVRQAWBef_RbZabrRjJZ17EOBOa9FNCO2COivrRtfGQddHc8LR9Rtjd8kRPldOwC6KE3MOeEBCjYAMKi0a_LfRKLCcQiP42JrP09eBRKkLnnewF-VVYwIX0D-0U6Jno0VNkrl32q2XGRbD8z_2GfC5KEiO17BKX5eTx130YwBv1XAtXs2KeKdn6dXcCrjjPpVhauVxo0Y8ipiH-eXJ9cz8chvCo_UNYzp_QS9GkJ5nOSasI3o0FCsGnXsbZ7TdhdwGY"} ================================================ FILE: genesis_data/genesis_txs/j3l4tvphmVOyVyFkNdS7ulmexBqPqEvsSJrBsjAFJXc.json ================================================ {"id":"j3l4tvphmVOyVyFkNdS7ulmexBqPqEvsSJrBsjAFJXc","last_tx":"","owner":"3M5uzZscVe2KIhy3oDqPOoMUtctwn31VQc83_KRYrumsmNpsCT1gapoKyvp9QPlN1PjB988ERuN96yJhU8uHT1HDHIxRuKgIu-d-jswDlrr-ej4SE794dN-I6vDaeAP3KaNoF6DY39HI0MwyBd-dxl9Yt3gnfk6DaxRVDSjAmyn-5sYgHVeenJuc7tssrMg-x95Pg87DVLwxehILuKzT41EGvqUZ8KlU2lU1AqYCzlbWv4wiXYpMo4WOOdMRTfaQojk8fb8qwy3nAE5DVgFBAb3v2ov-NPUlvaaBqSxmHSLW1FjeHEpBvMxZGpA52ePYhZQerlr9XJK2sGYDIv3uv7RiGIZbNhVSsBfwXo52znZ2WHEpNg4Rtl7M026xdJL6wLYZxNVi5Zd_KgsqL4lFNUwYr750uSGs7ngq2oDuLm_lfFR8cn2_Wi-5mg4tgTvwpzXXqWedN3dYi6rL8cyRUeRGicFFV8kO3w7BWPMfTNJ-IZ5yRPzdHEZCX4zY9P6gb9UI1gv3p6mq18Gdw4B-6S0gwHFfwxYVkVESgKdwykiVHaWnvEjYyNB6-a1ymD6zjCeVQKvRFTghB3RcTnK6kpPqCPCeY6gmkgfo21Hb4Ht-CmdcjDhyzhjQYG4l01I0poEK_9lDbh7RdcpDeBtEMZO7thmncfcBCGFTc6hY74M","tags":[],"target":"","quantity":"0","data":"SSBsb3ZlIEJldGggU3RlaW4","reward":"0","signature":"iWEy7bPlMgdgBQoXR6HBTVQbTmujvnCYax9dPuMFoS6Ehx4QJdyZmNFZhMYBVNapTInvEhgwpPGWrWmPBau7mKRifTPDGQ8wxmt3No00hyHOUIwmY9j-KfinLWAxOAznPTcWAgTy7DCS6OYDfildaxRwyRDLSA4zs_DGeP4H_a9qV9xb91OUQNlMXWnhSlQk9RSYT8hpFhSjUokq0SXbX-lzAS4K05bOs---gooNFLve5Y6vVaUOP1oC1ROtP3VKVc753ND-J69py6Ae1lqxFYPn3ZpdFk6dpK1vX0L3SusFBWifq8S8kXIkZsMRM_dQuBzONW_5JCuaFc_ccYfzOIVyu9_NZLTcyTfeNaqQiZ0LDUR5X0_wtGN4t5HGPj13VmxwTH-6DJYdaS4IbPJNo2nnz-3-GN414s1gSR8-KmuowVN4sF9VzTEnGgz-eO_HP0IrZFDP2x73628n_yyndTX5nBbu2SnyXSb-XMungkXKoZMdpzlOIfCMd3O0cFSOSXqDvH9Bff701HdUPtM33vnZwiCLMh5RXUvKba_ovGMEMw05SUla3Jk60eua1pFQyHnMlSNJcUx7b-Cz9GTcmEy7Js_D4vENtoiUUvy9dZJkgU0Gqaz5shruqnlEkfJDuAmods1tV5rEi_3An8qNH-br1Rz0lR9G0xrB8hS4TVo"} ================================================ FILE: genesis_data/genesis_txs/k6UueT0FWSSUbAAH4Uc1Oz6BivunVR0nSMTEILnB_dQ.json ================================================ {"id":"k6UueT0FWSSUbAAH4Uc1Oz6BivunVR0nSMTEILnB_dQ","last_tx":"","owner":"zd0TGMB0T__R6fCkE7bXRIK1ySNXyJZq-6dMBLrkKwb-CF3R7V4GMumbKMfXeYMoFNx1dsPy33Lfd6X6QYTSNq1Y-k68iUB3gMimkmfIYwnPr30JODUelF8Mxy2WC48LWkm83tLYrQ2plXruK0fbGfQhBn3yGzCIVxAtJ7rZ1CmgnavawOmMf1g7ia2HVcDPleIx-tTkwi0zHbFsG4UHYsthn2GQNxsKfTVAAfAkuP3cbfLUE6sHQjCDR3JVasrjjw2NOoPJMtIJSqJh1MoBdmsqQ47hZPMe3uXzSGJZWKwYx6_f7150yVkZd-9WHofO0k8zONLW3os2cqsT3TE57R3HR-RGB9-Xln5ajI2CRti17FEZJTCID5R230QF9CfWysGJL9JmisA3I8i89Wl3bGRdUnsOd_liMOkDE6bf59L2pIikRU5QQv_HCf7RjeTivO1MhDnkmcj2gwDVFsXxp-iCD-ynL0FxoONARc5Jb_PZswgfIEnwRv3eletA3Y5EQqdnu81AQULwc-QElGIucnbTCUHxUJCYfHWZu5EqocRvtIGPW3u8qxVI3m_cHlyfzSp5GhseSsQWpp8D54I-LOvTeQJsvHfbNV1By-M-zLoqeD_qyI_MrLAziOykB0w30i_qFh4rMUQfWLqnA7EMRk8Hpq0q0IjA34RFRpHxiBc","tags":[],"target":"","quantity":"0","data":"Sm9obm55IFRlc3Qh","reward":"0","signature":"LCFY_HKqyNmK55ezsh0LazGoEGsse29vW1uumZn5xvXcTrkkbeCrf9DE8SiAlgcShwJeZsNtmOZOe5DvrvHabMG6qsgnhEZMWQ0s7SV5zXfKhhgdDepQ7ijhq5TTqswQfyOz9K7JPsU9tyaiEqGNKtaZ_OdI99JPqtCcmfYL-OW6Uol37R0HP7ulvaQMCcG6axk1NpFhvPHFHKKu6H2AYQ4kYIu2eTJ2EchkuqFCyRzkjFfFlDpTOREJzvQkecP3cjd_a5EuTQvipxIl4zE_VEU6hfHjqbKZGvxNCjHAk-UCYyxKsp5NMQJiG2XgFaKCk3buEyH68EmWCUOYTkUIbf3LcW9-VHhSc-kQXwDhPhO2tmT6FLkY3cNC9rVfasMVgUePDllb5XchPy1Wp_Gafz3JFtKxHJtchMWRRKwpgFjKBdvY-bJXsmvD6SqT21XfXUOxxkVHtzRpLwmrJTbx2NpreWk21tQyDQpjAYtncb8yTSohm5OZArSaYjeMxDEYYEUq5YT8k4pn2z8qWIday08l0i2pP_6i9kpQywONDfIwB13ep3qAw-BPsjTsmSuLpBHnHnNwYZjUgkxsII1COcgal2wI0tQGLu1lL0xgmbg1hzsP40mGc69RcPDuTwEkbMMETM0wYYpAkiZRXGm2rO97U7gf1XNZmNgqJFnIhSE"} ================================================ FILE: genesis_data/genesis_txs/kXu3jTQwgYsphIUFbaVGg9rNiil96fNjw0RBa6oPRtU.json ================================================ {"id":"kXu3jTQwgYsphIUFbaVGg9rNiil96fNjw0RBa6oPRtU","last_tx":"","owner":"ugJkJnr7FAgOewmZTwr_nvBzyw9FgCSmMCzaV--gljl57DdtiqnuOIsVPFTIDg8YMDrK5xErdG9FkRpTGwDhLRGnjw33V7Hge6SzK_MToK4GMdC0DELNc2Er4xSIlNGao7W0yYc5QALSQBFps5coEkMV9o2fqU8WNoNOuhGLqi8-9yTaBlVQChkamiR8J1iWp1wgI6-Nmkx3tQLjIGR_V5FE3eTI0ClZdJSL-cntzuCBl7Juq8cq3DeXDM39mBshpMUQowCVsYtNoNmS58_UknCkgFKNeCV0o6Jk5BvQDMVj2ORi75XoWIPgNHTEB5mCzHZfMKFK6q3yLmUv7Uwz-57BuBIX1P-fC5MxIwLIT6D0XQ8RDqxchlP58vDAeD7O9ev1CN-qxkO9lWDpot-fdxjqQS7_0KLnHVImI0XRfdODQcs4uzo3Mze99tcLWo9vLJFOo2Ha3vuWu4T9j2pZnTvrPdjk6szZN0ZdMmA5cv6YyFdWe6n2T7pIufrTtfzzJAWhGNbbO5eYpnpAuMtAmNLtYi9Z8IwYx5twtbKCKGKCcR7IBXzkIywLIlzksCpHcXVUgyg58JuC3bK3iR9b2ppavlnRPzxH46jT8tki4HwCea2qWNOyu4hLiB0KZuN-C069a0CQjjVutcLnuwec2UQuplEu0n4R94uUeuHAYW0","tags":[],"target":"","quantity":"0","data":"R29vZCBsdWNrIFNhbSBhbmQgdGVhbS4","reward":"0","signature":"DjKsMbka34C5a-T6vQ9f5Hwofh5E95cxX3_zQg9_3tRE78z1Uml1enucgSHr3LReGnW9gFK3oBgtdyKsY_GhO9ZBvDG-pGGPnnDSb2t04ctPk20tK9lyZy2KGb0YyekRYWC0zzmJTB1kB9HoDlK7BcLLV0SDwb2LIcn9S__LgXw0yjQYfPCFYmgxCeiHP2HGhvCwWkmntdnZmIRdOA5KLptAzKn6AhqY14LAyTlzezSuJV-C2Vr92WwsO6cEIzjkqNyO0ylu-b598gAd7cC0dh1faDGt3QpppmaJgm8G05aUMAQFkyXPFRVIdlyA-juvyrI59Q08AoOI1dwp_hkD1iT130k-phY2qWgjikEB9iJXR-N3paXlWn-U3t-U-mMc4aVTtO6_MPJZeSCp1pJlpSGA-4IIrHjJHjQ4ZrLr-oDpuwvw-8pzyFljOfEHcnAKLhEO2g-IIDZZFuI_lmAJ25NTv8s1yYcc_3O0F2zHQoV71ECwDrzj9NsFtwjTjFwsJReLQ3szL2bj-KUKzTdoh_ZD73XNDDL02-ePq3URa61jItDOIIcExAWoRpCEqccEnt_Kcz0vE_5heNkmYugTQ3d5nrFpv5SN2MRyB--rJs0-ZEji1sBve3rXvemam5p8mBN730dms5czBbU_fp2sM53cqKD3G5Wmhy5lsyF5x7c"} ================================================ FILE: genesis_data/genesis_txs/kcb41aN752OE__qEKDQAsbpzCUXMdlzI3clCBuxdVts.json ================================================ {"id":"kcb41aN752OE__qEKDQAsbpzCUXMdlzI3clCBuxdVts","last_tx":"","owner":"5o5JcSZETIFfIwBS6imGKODHxRtCEbvtqgaQIpIv5al1ALI9mQQpRLaCevTJF1DOfHl-pTqU5wjn2mi21U5-je_zyiSh0U-fQF0DG_xJT8DwZqLMQ09mKoa1erBLqAJNAijc8GlUb0WKn7sTzncRMsDzz8hsFpkqfdcoEQ0PQQ5g1KP01Wj7BVA78Y1TeRth4-GUCmVnqhLTcOZJn7L0EiJgFXQWP92LKeYio3HNi4FsBCoGRnOZqof8EZNgcEEQCVDeg_0Fzf3zeO8z_NSMgigTIQ7VVhsIBzq8F-vZeIgir3E3xnNUoS0SGH_afJbte5fccPAFW_NOs9W9nqfXKvo46kA7793xCmu3SABYtHR0MgvGDc2AED83oVAVZgAtZzUBVBSAMm_WlQ6RyDnaQuXN_ri2R4xdMr3XHuoASoXKFN4Ln6Ga9rNWR8aDSNe0bf-sigVO0DGJI3ww6SRJobGi017VTZMM4NXWGYEjpNuGNpYxd0Kqsc1nuYjPMawKK90Sc3Xwq3AjX1rtgwIK8MhKrKY_Om_S_y4Dabcx2Xndb_hjznZium3HarK2p3k9EOmmYBmVgYFkXEh6cUaHmTVxJl6tkKw7vYEOtK0YP_USbF9lUc6qxqowgq_I7s8YmO--Wn3A8OmrljGyFD_JeiPr36h1FEQagRg3BgfPxpc","tags":[],"target":"","quantity":"0","data":"UmVndWxhcmd1eSB3YXMgaGVyZQ","reward":"0","signature":"bUBBkDW5oLtK2eOhxunCIdPArO7m-Iuxm94X_c6s5zljrYhljrLCorFiB-PdFfb5bk5Ay2xTL9nhen1XM1oAHsJ3FQ67RNeV1j-Iav8FGIsSWxdyg80wZO1LZ-B7oe42RRbhS1o3lEli9idhp5nybjVAJRjehxE2oiXMQsaDxsMc1AkE6OxKAPjUAT_fOSdVxRM8qF7u6LY2jggGk8kFx_gPwH9cDnSRB03Vu4A16-UsF4VQDVVlMF-RPBZy4Zczr4NyiJyZJmzz6Zrdg2AjCOEQb2mto_aILn773wNk5dNDWMeNsPdrkD4uUJxiJbbfpv5dzmKqzEgBjo-nHOO_h9wNfcVHimId7LZW0FQI59l2-T3FX62LDLxUsbEmaCKP0kKv1OOlE7ECwOQtx5u-FWKNh3DvCZ4EKsw53aGYpKSdARp1WwX9rJh1IKHj77PayDvqqR9bwyzsts6Ai1FpBu8qgjfIXfCpO7HyGgjX-HmEHjTADoU1Xwn6fRCz-NNodfGQ4_NV57hzaNiQ-C0RVv8OXbBuR-_W7Dz9sgk_zM9z21kqYL8-jyeQo1VoBXpWF2b8HTMkjsRz220qg-YgzIRUI0ns_5Te7B20puiDMjuxIBF6IRn5PeOZWOP1lSfG3bkJo_UQVyqe97WRkfWMZBIa_bt1_pIR1VlMhrhX-RY"} ================================================ FILE: genesis_data/genesis_txs/lFqBd1sEhgw1e_adedkee2hXP9beiNYbF625KV0vObU.json ================================================ {"id":"lFqBd1sEhgw1e_adedkee2hXP9beiNYbF625KV0vObU","last_tx":"","owner":"99FpflBJvGYzSpjZw9yNZjwQuBgurNKGBIhugnBwGKVvX6744JVfLCgYQPWVVkTNsKBLEdEqvsNvKK3bp49c67x83gWe7D-bnaNhkQPpAwMq1RzepdboeN9J_36L_kDKmpAfrg16jX-LWt4VD0pW-y_IgEX1tNsLXKjPxrWvakaybT8t5U1Ob9LoZAYAYbdLWefJYCWoxDsUZqgkVJ2wZGj60nQ9LHqfY1jJky0iQg_7m1B8OlKQgj2zXHLHShpKjGZwyuEd8bd1KS_LPlebo0W3rtC9_Gg_rKT7tQidwv4JnxD6xEsKTQ-Ttei5gtgzDSGn8OW57ufsL-Jn80O-k6xATEPAfYEdjlp4N_cT-GvfDsuNAs6yY0V0y1xirrDsb3VuUhLay0GqhzZdHxNuSwSy1P04PITB0ndwFvAJQ2ZuUMh0-qHoLCoyFO9ZWpAzW3xnzE8ag5bkKBBxNY7sIi-RW4P8X8WOboV-DAW5vdN4a1tPdA6wFhKRAzuQTV0twXlZdAryiZ4uOBKEV0d4LBGuM739OfxH7-u5tU3Mn7R119ZlKoEeDvBPY0PChmZ8w4iMAEuMYp9RULbinp2lOvttWPKZ18E6P_vibenEwscBZw_Z0aaoQMZjhwVrN3tX26ZkITOeYH_pfoIs5Y5q4SICiQthL1Dy3AFRgY5F6qk","tags":[],"target":"","quantity":"0","data":"Q3JhaWcgR2FsbG93YXkgZnJvbSBTb3V0aCBBZnJpY2EgYmFja3MgdGhpcyA","reward":"0","signature":"HnrHedAY3EfYufuM4msiwDSL4i3-vCMIezQx0hgEKfnhKjBio665Sh6BTKF9f7LvItAf4LRjJEn7pimmmrT8K6A8c3k9B8cXGTqc3ptwyH-CcXk-8qaeo9IhksCmYxx1UlwGDezhmKESq0PgSk6i7a-gjaZFHT8Obis40eJc9ZTLceJJRNffRfwUZzJDXNIsG6yoqHbyyRxoDHnIsmdQJZafhan5hpg0X3r3JMWj6WpWY-2U-00u688GoWTyhOXhE98HtQMtBGpkgnwTuWQEm6psAlZU6tbq48Y5zawziPyhk9WGcR2pV6eHo4yTL7QpvdVVYvlbPkIYD-14aUqJwu7G16Y3-_XLgKFpWc2S9I8o-hTcZRpYynJ-hIX-6nkFediWP8TqhSVjwCFYhK8EtKkNFkiQuXFBegHg5LMsVQBxVc2-MlTC0Pe9dDKSXm50byh1__20EDw38f8gQ0vDI7PZfq3RmmeDdPoP_VYLmhuhGwHP57nouPA3IrxePzpaT6BrtGHtc4A04y50zVjLjwPnVr7Eu-F3Ueq8Ncjgo71ljWGEm2CTx6IkIt5SwLqsHnGbddwhwD-nE9XtntAU_CgzKq5xCRkbEB4HBhcIXx4FUrWbmLQWbm-9DP6-qHMwi9UVJjCsHmsSn_j_ye-FABjldXhAnmhLh6P6yn0g3CU"} ================================================ FILE: genesis_data/genesis_txs/lq4SrnweWCHnEhw_AV69gMLyBrPxYOmOdVdRIXkHwOg.json ================================================ {"id":"lq4SrnweWCHnEhw_AV69gMLyBrPxYOmOdVdRIXkHwOg","last_tx":"","owner":"yyJyhjObKzAoYJ9byf4U15_Yau06vCzvFT1ayGhEStb9Te6FcC1S3adgqyr5vk9olu-lyir3AtXRuU5mj_t4-38HOQujq6kW0oRDMGooEFIoSnclQh8FG6IFAENlLWCt9bUOXME-VJioAlQDeNcAPEHNTCB4tkRoNC0dBAQ3sVowFbMSxQ_SXJpu7oJGCxmQhtPKXJd7u04x3uGaOg8Wpw5amvpC3IHQmIHSQ3aUfStOTWb4NWX7Qf5ry04PKUJ01DSKJRgxWKu7NGGPsLAH0JdTAYob0ZsxoQl90h-dKYrOUDi4kdFZsV7TOy6udk2oUreZIe7ACqsYcBjtt-7d0KGrfDvmw3gEDHySDvdjAJF9Dlj9Ki8hkum2HTEJ6CjHmeutgFipLSF07Y2OTuPCpjz_jBDglHguk4BrhIeWtHRp_AiMwzSlImUz2G-f9acpvQtZbcuOEJkVKxh8Pt4Hd2DaFg1dwX5d9-LSYj9NAeBzlV2pxzV2qEFmLy_XGDAvEGc5K6sH7K5bTu0P0_UxWydDqulkTdBKeAt3p297puxuKvMHbiucD2nG0Benrr4tBBlFCrXE3c2l4pQTvp2h2yEgJfjNriSJP7l3paygFFeZVZUCPxJczIu2pfg2raY2OGFh3tDULsAl_MdS-IbZXe3ecFj-46HWiW3FaiKFvwE","tags":[],"target":"","quantity":"0","data":"T2YgdGhhdCBjb2xvc3NhbCB3cmVjaw","reward":"0","signature":"h4Q9y_SqZl7iD5UgW--CtK3kamrvPzaIJr88qu4u9mEMQL5hU4rVAm_8PZ4kooyXoSWtbUOTl9GBbik8giJU_GifiZCGQS5TcL0zLdq_ndmmgul2Aq8uz_D-08YxSlwvk39U7CdLf3rPO41Ny8H4WavDqAVT81e0964SiDQdCVRRROydCMq6LNFgQ_2xD5gjLhW9_fUnIpaS0hUH_pErQByY1oMFdP03Qy9cZnCohaUqn31SLPAaQZhFKQeXCx0-BLwiT9JcAxQciGV3jNQ_2_1INOylADAiZ1IJ_vDNBb7vUEuEd0UWCW0p-WzaPqzHRAlyjuP13FtX2kkeGJjT-RA6Vnp-xZiH2gCH2IPKB-PMnwC5TxmqxIpOX3D1snXuUXe776FAGGMkaxMkKnIX5y915I2kOp6DDK2ixzVp-dFniyXcCBDTGM1j_yXy-BQorPfm_HbFqU2idZQCpE7Y25BIVoC0N_p8eWDmoEbOSDPLIAVD-_netxA-PEqgdogQsGr5eVRbhxD3iHTNxHsYR6LeNHDDQv-AugsJb3AaypTyKK3Zpukhl-twZj9g7x6Q0DCfG4XrPNHUXs4P9b03yZvOBA7YxCf4M54gWO1Tjv2_S755_Im_CA5snzwA4wNse5uRHxXvr3qJ2hK0HUrhwgqn5CDcKM4mSrlXM5dufWo"} ================================================ FILE: genesis_data/genesis_txs/lsuH-ITPI--6KSzhIFclsEAWOSoRQu-8tlnOSxj_Er0.json ================================================ {"id":"lsuH-ITPI--6KSzhIFclsEAWOSoRQu-8tlnOSxj_Er0","last_tx":"","owner":"pXZj4aL8SYN1ZXQJhYd8YmaebIpsaidvecp4dnEaAZTghwTbQE8qms9preUjQga2q-2Df0AW6BRM6FZGJ26eLD8FTv0HPwc858MMMTVgnngx2lbI2cXOSrw7vLc8XsZ75EYkh3GxWTNr-VMxhNoIhfXeTv7lUo4DwtngbqFiiY-GKR3sw_fzXnypiV58iyse5XE-16ov-vSzDabXO5j2NqnQIMuAEktbBTDdJGcINOGjY7e5uLq1DPMlDJqD8h8Kfp9r9IzK30IAWxxBlOIs-LlrUHirYyTJimrbNf9xvrFKzMW4UUDxMNQLadmVUpjZsH5U2BqFVqIJySZ1XFRU-qzRIka4MbSOAhvbgYx5NaDdHyjPTHZ_UypKksC7gOJ4eyLYA-6sa6C4o-ric6ai_-05wo7YID4qWeNC8JtNjC8YVXZopOpQRd_fHprKOfsFjQfNk1GzqJYDJmeWWu4UDY2ksd-K8U8xMgiMS4Ezr2DA4F7iKmKdmN15RsVz0sFHXPn0ba_xfgBta1g2GmLWomKK38y3Hye0kkN7cQKuBPw4Z3oi1NT73qtq4m8WeXkmcASLzcfNMGNW7Mb0GAVW-IREa92AHa5ADcyyfcKCiMI9vIOQCM2I5vhGn_j92IY-lW3mKu3f-4eBB8pEWwsaEVkHrXbWzixN2oN8FplV2U0","tags":[],"target":"","quantity":"0","data":"U3VsbG9mIHdhcyBoZXJlIDotKQ","reward":"0","signature":"FAFlc8pGwb1qv6jiX7-1NoFG0dwgdhpNAX01vj7oECcoGZXF5EgejlExQaelQAwlqtKVce9HDNrc2hhxTK92jvBf2yLalzjClA9pAMQBJmsQIxVuT-cVrmPysRBPpSSdcwrnEOi_W86vSMpUuHoV9x4PZ01CMuc-9pe7ALmgkd7wE1vhy-tqfVJZnGCn41v_YQuSJR0ht9y8UP0D8o3a2qolkDsxxj5XswPwusOYj0nJDlR1QY6t3XO3xYRdw_uB4f_tS1-ijqUovIWNrW-vL7wgHsyLRfwDLKaoBJHcW_iCbbHCY3XFn5WAWLaVF0Tn-vMfDQCTnJBKb5I4DPfdq2iZ82Z6BpP76Jg2xHMBWdlBfM_VmbMuCA4O5BdPTnC0TYDKtJLcdnmsiPz93TB-9xAzImWgJ0ukR6asM3w6qzlHZ0ek1pgjz4jxcrGvbKaHma2q-0Ak_59r2HV6pUdjXRpMMp4IwWr11Vq6qRgRKfPAXzJB4aXuR8kOBefBJH5XUJ2y_UkB4xtHi9tqdPavsApbwUZOBL8z9sXAeT0-KwZ74IBILdko-i7jOf3MeB0iT5ynIRitONuzKkk97nA_dlXTJhLFDErYr20k5mFdVMwKVgY3QhCPI81ZpkIys_oOeKO4DQeR22t6zcff8pyE0S4DEmk2GANoPra5793tWhM"} ================================================ FILE: genesis_data/genesis_txs/luQlV_58e9qjm7EZpoO6f5Y1j349Q34UwTW1Lx9J_vE.json ================================================ {"id":"luQlV_58e9qjm7EZpoO6f5Y1j349Q34UwTW1Lx9J_vE","last_tx":"","owner":"zcuj3nZt_YL4hipF-S1IclpbGeQZA8y4RsjGt5bz4qfHTbB2p7SgIcFDnR5noajbQ6nbIkZWLpKrPHEd_0XGZWdqsGiOdzNW3cjvpYJKMmpKgR3T6cvL_2e85x6VRbimaeuwIYQr-MYpsy7CzZHqF5CQZJ5feZ-6NbZEHd2oJkrTGFCVs4dqcUdVdTyLpwVuUYhfW4U8GtIB9Xg-jG_aYZYd2i-yr3j0WGPlerswOHmoT47P7s7eH8lP8yMe6selc2Y9zrlqkiz5iMAE_1dnf-pZNcys-8Gcu2dkE_tCuLbqwG_r2WjrnJTeWQN12NL-HPxtsbfbJEJauo6kwzFbZOLD6J83ImhHdZqY3G5eTKLkaU5CtBs45llgIcm85mZJ8eLfbyO_rvtXm_n34sc7jta-4_DlRV2la9SA5nOFB_iy57PecwtmgWzUa6qv73XvKDj0noJafYRssOcXiPBJPFvBhAAEyA-cIq-c81QeGXMPB7tZ5KRf2hZowvqKLgk4bI9pg9dlBjSDzSJwnw2N8VYX1qIf2pzcBbkPkvy2lt0pbUrTdDOkJce5VTseC5CX1dfHL9g-A83lu5FabEX1qzQt_-mTDQLByeuWwneNEx7bpC5KnSpSw2KHAEbLx35M_IKdY0HfD2OZuCVduEtj7q4PueXjmRbO8L34kvajYBM","tags":[],"target":"","quantity":"0","data":"TU1YVklJ","reward":"0","signature":"QQVB8HVlGM0si5it7GAngyFzRjUL-b5iBwc6fPSXazOxD0jTgHLL6Xrz3GuTaSr5fjcBiRcp2735jDpVZ3G5-9vNhtwARiJyXiMiCdh9pW7L6BGD3t6SEvrKD2Y9-LDkbcOj7-7f7bEeY_TtJTKyetwag2fMIuew2janvwXohQWt1V18yDmCKclkouAT4OpN1pR6iSxEVJkbOSdU69-zD6-22qIeElpiG9qvSVfFoJYplaNWf_v3EajGOITD3ULbY-5CvuPUjvnksILU70JhIqtI0taU57L2bPmxXpinTnk9K4PG7evixU-eN55l2lvJ7DYVrLtLxKFBhtyKrdu55vznJx5qKlgCDNBTYOxxIhfm6MKEw18YzcPosUbgfHho_mEEQpPjqYAmwOW-Mbx3izYR7W3KRt-gGooDWi5tpxbr23ABCXGnLLxqByS9qIGi6D-FY4K6HKIcBSMMTMQumc0VlZcTevdhqUBkOTMfr1LL16txmEJi0QrIImg-_lZ56vCrb97kOp_YKLtHBSsGy6IqVwZJnuN-SoxcJ4Gh3Z6ZlBLKIWsaq6HRgeayJYO8xRnPK6O57wLYMJl7TO_k5qFW5CX4CrfZrXWJS1AIB3Has2PM-oR6YdY32KkPi4sVyor2h9y74gJ7FnQW8I8OYOdkrhXzX2D_NKoJ87BIWA0"} ================================================ FILE: genesis_data/genesis_txs/luyHFFFOvjKPqi6nVrxngcHaQ3RwbMDMqVTLqPagHy0.json ================================================ {"id":"luyHFFFOvjKPqi6nVrxngcHaQ3RwbMDMqVTLqPagHy0","last_tx":"","owner":"sgsE9-C0c1KaOeGISQMRDrd5PFLVDBJ4xPh0NWKo8GtIrHa0ZjFO0-5DsaaCKaLsMhsNb_9AG-DG1Hwr1S3C_QoJlON4a7Dv-xOpTczjyvHaFZYsfyVttekSfV8vVhcP3i4VbKBO7Jln4OPt0qgJQB8vTVnr3F92levzt11RGu_1k3R3Y3NHse_KMA_dWiNSbUFYUXCO3Ykxol92VDWIBNojAkARjkASb9tgzBBtIU9w6x2Uw43NDavtUuxJZMKBatzAwPwfKAl2g2RC4aFHqBSvwiA-7jsvUB9Mj2506G0eIkla33Qyf5b7hT2JIq-mXeKk5qHetatHviPvOTCibQktee3jPSF78K97phAlVmr_dGBD7wfrsP-c3JKmNEX4aOBI4sjmLXgGqjzCMG7DqfU2bwQNFfaR_-M77dPjgaY58UlMw5j359G80qpB1abno_yB8Pdw6mLKIOmcDI4hBw_-n7_WW2L3EQf6m9B8UJZL8sBZIlItZ4gIrMtZ6hBFf36j7gdGq-BjxKMJhApe6ah2sKUbEeCRl_6g4Yk3Ai2RemifBYZB6AjJkAD4aJsVQYY2WOD5__xR5acLpul-tuJHYutxDbBvE4yANuQYk1WYiA8t72tcc1JWTJHGdJ6-2Ewp0YAIiOJVOepK9LqLfXjLPrkcSHNgP3mo84jQA_M","tags":[],"target":"","quantity":"0","data":"SlVTVElDRSE","reward":"0","signature":"fSiWt8YR0NE_0tk26rzu5E-xuORGDD_x3M4eIO5rFWpczIZPzt3CJlKWtbAR0gGBrDMCRdeymLCJXSdDbAQ7zBa66PGpSKEDFGwmg0u2R42PItzpZNoeBTOtNqVJAOdZHQth8aSKwN1D-qCIRVfcUeWbZrmzWcRtmbWMyNAHiMzicWWAu-YSC1KOToLqdJeaY08hvUV6qLBbYMxB0_x4SOc_wakx2gz6oy5LSyhBRxK5nadbq49BXasczWEu3-2wIVsuDl3XXzZNQCodP0oe8-n9qtNqQp1-CzeHLanbmGGZeIKhlJ0sfv91sD93167mK7TN8rqjNH5W32VEUlyvOLhNGJMPTglzuxZmbmnUJzFZrDqYE_xla1YYZiwjxma2QqbhNM1j7J7PAQYpY-PmzBYRG-Z5wSG2FZfp8YN1Bdv2cFTgw3_CyZhMuMKYV0z6B9OTdNfprFwBh9XYLIroAvhud58cLMIBkapaAR9a8jvzANWzTqPwsVoVmOxPNkC-FGwV3a-2Xezg_Ex73yr09jp8e8iUgF7NJfeYstOlVC0V4fL7Rq-6hS-7HvuUAAin-2PQLgvYHb7a-o8hVQ3OVLmmLKVVhM31DNrBJADo089ZAFQqvwPRZTwGfUIHMWP0pTu5T9qXxHCp5VTgxx0zmt7qRSKpueZj34p1mZVDoy0"} ================================================ FILE: genesis_data/genesis_txs/m1Vv28IVJIuYiToBhxFVp3dA47je3L8WkzSjggAWXAo.json ================================================ {"id":"m1Vv28IVJIuYiToBhxFVp3dA47je3L8WkzSjggAWXAo","last_tx":"","owner":"4IQRR1tmNYlgd0xfhX4S8AmOipWRbuyUljy9ZRX00ssB98PNWqKFzWm9O9xn_xgZQSykh9QOReEsfI6nmu3EyDLaVYN0UfkR1HcMQb3javOAlxw5oJF4uDwHhR0V_J2Cxs2lAfO47tUASL3PCy5lIbtrWGDDN7OHLCakWL1lHBTlii8-CCSxff12LynpkvyNXc9VMCIP_wbEGpzQlq5htU7WF08F5UgO-kWVeZF1unpLQajtIU4BZKHzJMLvInsvmJ-RGdWdIPZDIY348HWyShtHLBaLXVwrLxlLfHeacwuOVNaiv4C5Eb72JddbLBkVQF2xMQFYxJfRTIFEnJFeEbpzPOkIjAAR9HsrlhhU1aD2Wg2dnBnJ2KosVx5ae96yXR6ZdtYeVm3R2OLEHthD-Z927fYOGF7Xz1oN297awk6qVH83pl0270dWWQapT5hkYBGckk--XmuE3qFyuG8GEfmFeNY5fA_-3aF1rmoMeuUPuTnQvQ6ZG8QL71l1G1l40dYFsGTrXRQLJCx0OWe2KArsHcM1EzaBUJjJsR-WShTnZ8us1IN_qqOMGCnhlSL_cEcrSCePskcQFdia8ovbRrjXEpu0hFR7f8CLBB-fobqNMT42yVcFh_e0rxA0CQGkVNsHR-SySM_lAArctWwbVu4sEd-aKhDvt-tUQCyJNAc","tags":[],"target":"","quantity":"0","data":"U3RpbGxBcnRz","reward":"0","signature":"g8EXPjq0CcIN4epLePTSSadwYA0FjFwwFDp6rc278cmXsVv9apSPXztd-zZUVi97fC1PWS1J5mhNFXveSOF2r-Rni-Uo7sWfyb8LrbtDsJpmKc3dr34-QuDkZRR5hJFtKNQNAo21fAxkt75CXWPkYROA3gFnHOSqut9TZ_7oZzn3hT77MoCfMhB-ZW8JWHM45PD9jNjjqfUiCzsBUdF6lK7Hts3uMopBkefDLA_bTSNO98UL5Z_Ser26Io1lS1XeftQlMXA4das1aYMF-ySySJkHRZ70v-6ExdqQ1gpfNDBZzjGH8JHSTZ59_ecIjGGUO2NWTfXwHA7Y82YyMWzKk4zqd_BWpFF1u8iUjw-Yjpch6PDjSbkt5mk1i0JtBCxp8XPojQimil1OggBBKZlhmfUEn1YdgFUMiL6xj4qc_SJ3BBQTLYqjuQa69WOk7FCSS1wQYv9ZF10klbG6sSZ5ozWHdhx_rTyedOTfw8-foVd62VG_P6NQrvlO3U8x5OFrEBGm4PrXZffR5lScr4fBnG7fVAlYVnjiTvC0xMRYnBg5UAaiCGLPs7YAlfT7Bfh9S1nztdtLdd-_UcRHrS9imojUXf8P-b4h9UMvxRV27a9Muc9dnXvxplPudXbuAXt6WJaYXkoZ3rlK2oZsr0pqx1-YM_4LgoyS3ZMMBh_GWoc"} ================================================ FILE: genesis_data/genesis_txs/m5zFPHB-2VjCgTLStD9TLZwD1CHfLELPKkVXFJGIptM.json ================================================ {"id":"m5zFPHB-2VjCgTLStD9TLZwD1CHfLELPKkVXFJGIptM","last_tx":"","owner":"0P4DlnmysVSxS20bBeS2E7pyRlRR8iSi92O7R5yxbRDZNWSCOTOhGMef_ObwUo-UpyuaquKflFZ85YrzObA1Ttnpl_sIbpEL2LiKCkfD1e3-PZhTqo3DxENtmvnqHrhujwTVwQlgpKXhIzAPO7J5yJogjha-IUs9F5FYFRrMHiEckxhvwv5WQHGBGoxft9ykWYBdnRni-kQAQXRf99BoX62SM-lrmysGAjbRWBflKujkVz0XyfLzCV_ipJdslu2nWqmam4oU9FePhqaoA1wk-J85PeV0kmxNh8eHtqfRFTk3-NAQzCSvv4fRnMvrkXZaIw1vxK3aX4MdkZLoVFPKG4vXHRdGciNiU5Bjbn99inEZZnrfM8TPoEI8Dm0P4NO6HLuqOzK46iDvKS4dL3xpB4CBZEuhKqrInZu958rvcD-gMXA_BqJBGa2G7D-uO3I1zvPe7YsbJbUGvev_MWoJChIXM9-IpbyujZX3jbjEnmMzszrGqBWCuCHB7uRPObbfVJ2liHl78UiqJw8CO67g6Sc3DxFL-2zV8Zm_WuibKinjUCeIpoRvPRFRhxq1C8qbeWv0HS5gldTFXstToVe5ZkJiXueuntTv3e1Tdm3_PmIhdcKMR7B9MNrtqTaq2mJGIuX6r3Mbm_V1-xWWp7q68SayBTo7ZJvkjxiAL7J9p4M","tags":[],"target":"","quantity":"0","data":"V2VsY29tZSB0byB0aGUgbmV3IGJlZ2lubmluZyE","reward":"0","signature":"fo0h354sVKWoXysN1fRh3N02vQhmmWtu4wAmh3RAPKkPqYYpWgZjhzVxJRQ9USiLJTA3D10dIRBaseNAgPViv8ziWrEhiq78JYCfMuvL4M15R0Rq6PhJSBGV7CQqBHHkG_UYkum36brskZzrbzcj4fZwGej06femHmFUkCLKuCdn3Co342UeLfq52x_tUAWN-7gGk0ivY_OrWOKv1CsATMuBSzFFd5we9ILWUc9aCsqXThD_cehav1bNJG4jPdfgBE6PJ7eA4bdHRvyTlkbJqXn5IJedItV552naSnRd7IrO_9zmy1z9V8D2vV1UchwE1EdXuUVaV_jmb2EXOniyZUT03VFDiGw7P1qWT6ymXJ9k3RyZDcYg-qF7mpFAqFM5B7Dp6mzg-1nUAu1CbR0WpV_Vs3y4qRQWjImw2995Z4CNRJDOvx4BfHmHx8GhQm9b0Jhv7jhVn_3xo-zDz0fF-re1spItL7pO8cNr9-moGL2qGghWYCUPuQqeeSP4lnwRTUDs5FQKF-xM5JDU7q8hwhpG7PlIRBpTOTnlKebjQ5xylXDYd1T9W-HnA-tFOh_nZGSGtP0LKp5bVqbAaErzp3bd5poRIdcAVl_0V8Z6gmYk9SqDH6fWOVpEwr66dTMgqHXXWeDJeeIT556dwZ-mYGFltDkxgMZgGoXyP0GJ90E"} ================================================ FILE: genesis_data/genesis_txs/mGAMsTqBzau-MjTkMS5Z3g2_nUD-qQWeLtq6qlzkVl0.json ================================================ {"id":"mGAMsTqBzau-MjTkMS5Z3g2_nUD-qQWeLtq6qlzkVl0","last_tx":"","owner":"yk6SsEVtLmU8ROYXEGIiOIGfpxlsH6TA-uhADUQhd1FGYYriyK0gx_Ly68dkJYkSZTnaYE8sht9j3PxRwJSPpywN7ph0SyZSSsdTsxiYs0GFCzY04PIL6iev2PlbayMDwOWWQHBPgoE_yJgW2i8JoA07xOHcCo103OPGyiFySiIoaz1xvKcP9futUTuzlkEoQYJt3mDYknpBvKrcvabKXNUIY2h4ZqRDR0i26TCRxj2HP5YpviOHMPPnzDlIJxfp-5DPCawwnShfw0R-G00jANKT0vDtPz_iKImCtGfUYxsG8T7tM1jFLSZboUscirELdKqjE_yNMS36bEG3BW9mL-1QGK8_MXRapcJKp0HFaOIU3HVVdWO7XHuOpr52lj5FhhuT88cJ01sq3OJGBDVAUnL-EDYqj0QdqiiXwg3MeqdDL2HGYYba9Eiv1bO-C-zaxpazZR560AprqNgNDPEvwW_-PT_83Kk6YKINoUJNY4M26RjGDef2E08ZpqLArAVgTfTQjKcT2fIROu1on8tSfq9MYEHxqeT_TA_ftGmqGnGAlSjZKwvu0EMJLvl2uWs6eZuV1tfo30LIFdFuk42ReJud_Mwb1feEWK-4a0hO-aQEp2Z8zIS-LIsONZkTqM0SztM-G0sqcalENAM66nnRKH7lE4Ua0kEHqha8u8o9SPU","tags":[],"target":"","quantity":"0","data":"U3RyYW5nZXIgaW4gYSBzdHJhbmdlIGxhbmQuIExhbmQgb2YgaWNlIGFuZCBzbm93LiBUcmFwcGVkIGluc2lkZSB0aGlzIHByaXNvbi4gTG9zdCBhbmQgZmFyIGZyb20gaG9tZS4","reward":"0","signature":"DLcLRE4l-2rBY3hd6kHDIKQxh45DkiRiPnmj3VkKzz278XriVsi1TWrvXCDsG1If2r_v5p3L-s8CqGCnHHiIJewVv76remhYZv8xE9OeelykQMsmYr73K8O7ZqD3yFriDGjG2_zNEIx2HAdHedyKc0kS9Yi_Y9vzmGItbbGxTTIo0LQ2-B6130hYfqMZ4uMMRrlxq4AQMPCYmLpsqVkb8P6RnJ2qN1N8_KA3m4PPf--omO7iNk1ETS0ng5ZwoAUknRoSIFT90H13ZtEYtHks6ar55t6THnHFSKtzF5cCuPxjze0RpTs93CrQ6CAz4GoylcPZtNfHt2T9IL-KNmYU48d3_zXC34Gk5QY2RglBR-ijMegcmjeaa6b_5K1rrZioXBRD8kP9U3_6QfhOpnna8zLbDIgQYNqlvpmwcSLNWgsHvPhVuV0tdVbhKyclnkRoNnkctLDMJ9gQtSqEf236zjnkmbcqi-2rNSwxWesUG0hkl5Oi6LpyMM-J0Ok3ueeASyArKQuKsfJ9eDjawswbX3wMnWBNPv5e7Njwm1GTsau9Q81B6L0S1AnX8zcvzMPjKKElDhFp4X40g5MU2KSehGTSaKs45jGCt9bmP67fVp2ciPRQNzGi5z6CwHZH4KvCEcgBLbtkK-_PniAIxN9sUYbdvFRnrxAr6eyc1zRLvkg"} ================================================ FILE: genesis_data/genesis_txs/mJUxc7XyUp1HV_VRoi_54geidr26I9PUaiNL4msSNxk.json ================================================ {"id":"mJUxc7XyUp1HV_VRoi_54geidr26I9PUaiNL4msSNxk","last_tx":"","owner":"uGga713Es8Iwb-rqgBXrzXEXPlNCxY0MZLW6KFCHmyDVS78PmoG6JV1eu4yxMAlnfF_sbr14yqJgcJcfIz4jFQjX8YTZiGRL-i0HcgD7ls7OU1fYp5NkzDxeuWdp5DAjm1s1h7uaI8frQyYQiStE9g3p2Hf2n6UNOPYP3rHtfrWe933he8m4hBO2r5UkucZtwS-nDirrBa59ybIzlOdF8DMDvyQbSx6-mTQSWtkrwXtyggGO36gVyHaviaDcYNJKy8NsXFmsKOExzvH2zqtJbidRwwRHtHZPvVy9xdetDx3Ira9iOAuYNWQLmM6OMG88SHaBYD9684Y4-BAgyf9b5Uzn3oj3oGKdvymKBAk1Kt58s2BQZjg52Pfd6JHDBKc4LX0cVUAU0eZlMTDus3BfUnGayT-suKi_1y2u9hNMEI2TWieIOmUUW7YScTCBi3m7V91VbZV1gJh3YozXEoeHM6_OImeOEDsNXV4eHprvPCWUc65erRqkkHAspW2AFuqIS3AZ3lXDSmCcWWKtHpBhcvjai31plK_Xt4rEjW75DeuvmyRbu9EUYEc7Ut60d1vpvn5tv_rfEOekxHdpV-j0Ai5FuQMdK_DD0xkO58GrSayluMsr6K3XdFVi_1zwhsL4LegNnafaEOK6Kx62_Np2u2aU3BZTPv45enXu_SZyjqc","tags":[],"target":"","quantity":"0","data":"Rm9yIFZhbGVyaWlhLCBIYXJyeSBhbmQgQ2hhcmxpZSBTY2hvbGV5LiBZb3UgYXJlIG15IGV2ZXJ5dGhpbmcu","reward":"0","signature":"j1Qmj3BRQoRk13l1GdfALqX5mSrOplCvxZLzXystMpEJzEXO8YukG4BwIDIz5XrZB5ceYpLxYr6AGVDeSmxbtLIMBi93J2YkG_wFYRHAj7g0UWu_DA4bBH2X1gp0mIvRYtWsaFGc0iFV6ASITkdQ8zzhSmBz5k8Npa_dzs2OGMZs8yMtnDsqOF2HAOUA3wwTFrUgn9UFZX4tZAz-_I1ubPNarq-wK8SNt8SIReiXVP79phdTtJ1ARSPT0rq0OnafIOiV-L_Y9OnOEUH226JCY59_8tuCjNX1cuCL3M2i0d_LZfzN1AOiEALLhq54CXxWozIiPQQYXF10ry8CQNJ_IROU3hr20erTWAtRTTTWxaNozwnF4GNRyQrtrECXpPKzdY9RZADMAqu2r8kJSH5JR5KfbN5ui7tQx9KgAIAJYk_E9DiY-Rc2bRyXp17raCwTN_EeeOknZ1T91mzFv9Sst2F10f9YWS_ubd4o-Dr1fd16V4FlqDXaLzoYsW1Blsy4GeG6iocDB9BzoZVCk36eveGdhWU2Mc5QDxwZ39gSiQrh5up9QpBBrHlDq8LW1bPngqQyZyTbxL_3b2B6s5KB4Mj1W3HNlUZK7rWlW1Tfo6RwzKzE-cE94WgW1OmClPExQHVAP5u_C_DiMfXHtHywjye8255j4bKvTyVY9JT93kI"} ================================================ FILE: genesis_data/genesis_txs/mcFln0_6FIuLwE9GtMRzmdQts4QALV3dxQkXdgSdO2s.json ================================================ {"id":"mcFln0_6FIuLwE9GtMRzmdQts4QALV3dxQkXdgSdO2s","last_tx":"","owner":"tQVoe4e5Y-3CDV5V84p1KxipTg-SSxHfwkDRjdSsuldtbYTrTN0OCkaz8k9bx4JbVWcpoGrscMAO23VfvQwj6wWVEOsredFn9w1yEF-XAZ8SSrCM_Hga5ZbvY_HnYdjl1JejL6EpnStLdalHctDjg2z8JZ78vmCcC4BWgVUo0Z964E8rDSHUmafoyN0S-gj8AW2jXKgrWrrB6B6oLdIrxuobJRoK4JWmg09Bq17eIjRmnomTg4OSlNO5KJoNTVHHEwiqSZywJgyUQcHFPprXCckIGmojduiIt8tYgIg25HynxhBOBA9OUbdx24GbUNDU2pC0q0nBN39b6mSOKinRLtuJzaAe0N3nUqlEjInRm_N62OrPebsaQAmm67Z4e0T0GRlcdp8mtMxoJwgZvWPL8CcXQX8r4fDYj6eugpRRE7RUbuAl0_kYhYTt2voS5hHqhoW4HGjGm4GgbPa73q3tZaQ35MFVWBeHosHpKa5w_UOmZlJkZ-QNp-F0fo49ciWe74alxzZsuCLUtawd7k3zLz6hmYhRDu1vqT_6zPKvIHwRmbZ_wuZC4o_3u4RRlpwZ5xM-flA4oIm8qATEtX62T4HYkFYbvctki_9OiD2hfVatoSaMr_McCLb01WTS0AdKvhEIRJA2zq6eXisiPHoiQkyMLVK5P1b1tAwTZnVvQyM","tags":[],"target":"","quantity":"0","data":"UmVtZW1iZXIgQml0Y29pbnM_IElmIHNvLCBzZW5kIHlvdXIgc3BhcmVzIHRvIHRoaXMgYWRkcmVzcywgc2luY2UgdGhleSdyZSBub3cgb2Jzb2xldGUhIDFlWnIyRWZWaGRCZnBGMkpIclZUckFIc2dIZ3l3V280RA","reward":"0","signature":"lJRC2RIDY6t_e0Br5nSwTRx7PnSfrMBm5Oz5P-k1uBqBLCxMUU-pinNyH0tKEsWZDIGUFS0hSl9h7B2YkbksOaGfLbNxEFsNN15sDWjYNCCntEMXXkwcNS4Pc4Kn-sXYAGqkFY_VPiHLjv-pz4HYZIYyoo8re-0ACFaWavZTQSIYpxZntVbJXb7mp6wG-mqCTExWGXI4sKUyehIYoRrKVMa0CIIWBAuPDvWnoOJq1ocuFi97zN4rFiSTyBwaNuWkC4Mlnytd6lcmbyysM1pZsm40v95ONlVu_U_IfnM9B3Fp-kSlEvSf3CSI4snqf4Z3JX0ItfaBdjCaZ6AqOfI4hyEs61KDulld3ox3bso1fPvMfkZCgPNQ2UbYyQhe2JuTLV90qmCMJDhekZAakzy7r-u-qZ6bCo00sW8J-Rkjdwp0UPdv6pfx45mQX7UINm2syxV_cgJTO80Rvh0x_bwJK3KE7qy6yNvWLNi2d9z5DjEHdeRihwAjvlVQM80q0_A3sDOiGPbYExhCQG7NYoZ5YdWHBzDACbIEs4rYLl1oWMPzJBzwpLioW-c_zfYRLmU1eQIlQjhXky4dfuIuTLANXcccvPNw0_8Y9Fk5ctgEiRDLUW_r_F0u03CghtUyywQzyXrxs_dCMC2yWvRRk8idVW4h8Fr_5DMOZ2JAsNt6HNQ"} ================================================ FILE: genesis_data/genesis_txs/mvGgGlFTDJ0ukM6Bssd8G8B5PrEppr4Sg1_NTvzzV1U.json ================================================ {"id":"mvGgGlFTDJ0ukM6Bssd8G8B5PrEppr4Sg1_NTvzzV1U","last_tx":"","owner":"pGSppYyxPUmpTgIYrZOBx5NuX0GNpEjT9wtQ4TAuje9UlkzVGNkjJy1YUiK41sDYDcLy_ayhHS2xee7Yyz_tNSsvfO0HZ70ZdJxpE7746fOlLJtMCJhog_IvCu1lttPQBd2vHKCqh2Kdn0KL_QcIQI_6OfSJS6p3RXFEWjkkjzgGZ8Y4cTBwOUGObofbzb4bjccgpUwf8M1SMmw2oR4ifJSlrLc19g2q2WwmlRBSd0tTLoDwnF9FfQh_4lXKhTSj7ibJMnp_szLy5jI-5VgEG568WKYDg4cSFKpVqvELnQtktn6spyXIUVVm_X5y0BgyqeR-jz_LWRJL-Iy-ct_aEkFaAUS-RoyQwzjIO7bQByVGNQsOZpOtzQaQ3DFyBr0aE4_JkDDOK4cqLYY8n3d2ERRELcST6pT3xtsV2UWI66K1anwifO29h1nXHq-3vuV1Jpd6cFvY1OrJQcvLkUw9MbcRm5PiABppNjlYT3hPsJJGQyC0O3Ebid1STJuH5WmsxdX_OnALQFcCgkO8ZHg1A4Du_ZR-QCX4cyLJidA3_NxoqN5zixhtBfEnM2doOC0w3fFp6QEHSVMTmQQv4hEqc1edEdYjMB0iEhus5MRorNu25iX4WVHO8XlavMspeNkc4emzenHBLcA9qQasVaMytiH1KmzCcHkYgnfo05WJ3Ts","tags":[],"target":"","quantity":"0","data":"TWFrZSBzb21lIGhpc3Rvcnkh","reward":"0","signature":"aC3hfBdG31cZXz0BlFaBeTuQeZUsbHR71cOEmbKmvyKNKJwBh1SBs7AUZiY5ropVHwk_zYwH4P0VECe8w50fLLJrv-RKvXuZRUMJoO6LokwSKctpb_fC3taoNEHFGj3k_L42Q6urSM6hN9fvHajSk7-gqObBlQkQ3hEG0fmi5sq1yV_p2wPIsCF52pCu-IN9F8iqfR5znuqGV6fpF3v4hG2hssy1ukfiiIgLXDrs7tbDYbId3u-x14s9yYSQxZWt6cMnFn576HJVIfFbtapBJiJY5cFdyLqa-Gz5hLghOF3ilTH71txvIfBXhrsTyeKbx_7H3GpEPdzOOggDUqF4ZDcx_jmvh0cGdwRTVW4kTJ19eK641nEalxYbcZnZ8ORX4SVfNGtg0aG_fPkGb-L6Hk-itGnP85WW3mMM_35P4b0ZVjDxj4xJAv8ZNmlPWgyr4pXYt-6cLzBf7nz4k7L-DF3h8CCtwDb2KDH-t6AQ6ftSthIid270E6jhj5FvrgneUfZlXBBSVVE3Y3uKKIdta6tUjJdSurUpNwv6rWX1urvzDlwi4SCaH3_FJ3bdTAYWWlCerd0dAYNjh5v6eGCaFx6Izwus8HKRDW5Kcql2dUZzBMxXMd5K9UX9ycTVPCsCPTnHSoeOo3RAfw3JaqnNqGBv4K7aiK3pm27rs669MJg"} ================================================ FILE: genesis_data/genesis_txs/n6TKbsqmGl2m3yH15RAe405vYZQ7DStlvYsHCHp1D0U.json ================================================ {"id":"n6TKbsqmGl2m3yH15RAe405vYZQ7DStlvYsHCHp1D0U","last_tx":"","owner":"vaYWXaJiRedVrwBFqn9YCh-s21DXUuR5TxMNxUM13sPET19RwH9_wFqjKzoHsAVDjt7OBtm8FkOiD8H6ZO449gWiGw1fVGiNEFQJMMQ9EanQ7NJ1vPD9u8bPAB6FCmPBPg-vpNCCArOPN2xY87YA_DdTGuKL_bVTxFrq4jYP5PMfPie5JhOSmD2KpmCrpLvW8GT1VFRVUAB9nbta7ec5U9KN7rd5aQwrnL2PkDPXR0cGigZLDB0UFUvlNXYR2SyBwAeGcBYfPLmKAmRTxtv8FhceH3UzlpYIWdiVDyk0RPuxtlYUzpvTi3yLEX_SyhltxUTd-qlwumi28pqUQRjPm7uqm2iC8diIZ8eBHnCRZHkgnXotBsMWhNAhmzQbG1GVS8EbpC50NmvZDGNwpnfnJuXHws1-RT_8Qw6MJHwhJclc1er9rTs5gqc56oBOBhDoZS_iV_XR4p3BFw24IqyPuoGQ4PIVcqbWhdns_uJ6aefUWrRp-WCsprHVWJZy-QtyuARld6HCpFmVYxkYHTyeEaVF7ywNX9sYvQzTM2y9bvmeLRYYaPeMvFs3XsS5is-yVSl7ICG0xs_TUvcGfOdByf3bumMqY08if4wdch-ye7tmVBtxGMfjekIeXpxMqAwNFgKjErLdAtr43Xfq_wcOGQBtVHiWEhQddakZueuLtok","tags":[],"target":"","quantity":"0","data":"VGhpcyBJcyBGb3IgTXkgU29ucyBGdXR1cmUhIFRoZSBLLkkuTi5HIEdpbHJveSB3YXMgSGVyZSE","reward":"0","signature":"rJxe03qPInWG-BddW1G9St2HSjnkUmGLg0BVPDgLtx6z2A91tP06XpuGG9PVBJ3W3MEp-biqFbYIeXaK7LktOc11lN0S8sj2XpZbxokgXA-C68UZ7lS8YJoQ53rPvGYwMfGSfl-deOCqGsKCICFt9_AY05rjj8KD3c1zJ3zRNlx5xl631z0GOGyrUBWflZTUQW_Kxp6azjylNez1JdF2EW37ggFOif1aYajks6LiFeYsdjRSeKJ8rE6jyLNxYZXzzyVUnEgbfb1tuffVET0npbN9xdx3-Rjh-pEU7wivgPZWJLnE0q4Q8Uo8iGfdlD23GuP0Zr3JwKfMdEJ2Xfas__M7AoD7wXyAsrsiONqxhMQoibNAOsFjl-T6z9Vl4Fam6rLZ8GIWG6F8rQhg11o4j7aMISoIfTJVOqcyWBfxao5S53CDsZEkPUahJaewRYs1lgK1XXJjf4dvnecmYY9BVf10DwEDNxlc6WrCX4GJYYxcrMblAONNe8YJQzZ3O_LI8ObQ6-7Zr1gLC1kD8pe8o1z0izDP52JJw5qAI3iBqf5D41l3rGwHUpSolw6GmImjQNNoJ7NP-YDsjWhSzvQsD1y6QsULcRhqR-ucEHo-PdYfwmcBn9WkVlE5bMXffQSvr7fx3Iosrx38TpHQ5QoO96hwmp6ZGttiCJd4XspBtrI"} ================================================ FILE: genesis_data/genesis_txs/nXGMduBKL3mpsnFNPctfjEa9Z9zlMpdxcRrdkK95D80.json ================================================ {"id":"nXGMduBKL3mpsnFNPctfjEa9Z9zlMpdxcRrdkK95D80","last_tx":"","owner":"xC9EjByPG3krj1s0MGPbJzxehrr98lKqshtXr6ZYBIhKyt67Piq0-ZjXPs7URa840-Cous8wchZGjdY9Vl6aTztBwMHfeHvyVmBj7rdnGUVBUefOZuNyhrvYJnnJVDcU7dva-gMQpCk1971AyXmyA8kSS5OAPL4-GBhWEbF55eFemxjIFHBVdgGEh2_mawhjvVGozncaKcvqLcQ8T7DCYddgUGdKaOpqXdsjYOeEevh9Kh_PJ5WBsamYqAS_y1O7EMWFaMtzcAV87lvHjmamauqXBnvMnrJG7QNvgcal71-Lgx3wa49fsFftxsfg_au9qw66IriVeRYBlFIK6bWrRAP2wyk9-UmJpaLk0NpKAudpMY2qWRhngu_tUNTjrVOhmDPLbyocJnbSDnAaBP6hUgc45x_A-XhrBzXB4ePsQObXrBg7tSOfbIyVqaIIymHD5Vtf7JRCPoeQPrS_jQ5FIqleg-fGEsqga-0EAWR8wRWg5kMCG5ssuHFuj5Qw9wUlIbLbN6AosbLwSh9YCEKshOEL1ivjOaWkR8lu2QvaJ4nRl0sK4OZsmzeiXfxcUGtHp0q8PZPrutqp4SVhvZ28w5co1WQgmY2GVDr4WO53YRoHPlhS6uRLk3Hx-Zhk4RcWdJxh_UNjR1Hl86LPlIx1nB2Js5ck2rAhWOgea73z2sU","tags":[],"target":"","quantity":"0","data":"Xy4tLl8uLS5fLi0uXy4tLl8uLS5fLi0uXy4tLl8uLS5fLi0uXy4tLl8uLS5fLi0uXy4tLl9fLi0uXy4tLl8uLS5fLi0uXy4tLl8uLS5fLi0uXy4tLl8uLS5fLi0uXy4tLl8uLS5fLi0uX18uLS5fLi0uXy4tLl8uLS5fLi0uXy4tLl8uLS5fLi0uXy4tLl8uLS5fLi0uXy4tLl8uLS5fXy4tLl8uLS5fLi0uXy4tLl8uLS5fLi0uXy4tLl8uLS5fLi0uXy4tLl8uLS5fLi0uXy4tLl9fLi0uXy4tLl8uLS5fLi0uXy4tLl8uLS5fLi0uXy4tLl8uLS5fLi0uXy4tLl8uLS5fLi0uX18uLS5fLi0uXy4tLl8uLS5fLi0uXy4tLl8uLS5fLi0uXy4tLl8uLS5fLi0uXy4tLl8uLS5fXy4tLl8uLS5fLi0uXy4tLl8uLS5fLi0uXy4tLl8uLS5fLi0uXy4tLl8uLS5fLi0uXy4tLl9fLi0uXy4tLl8uLS5fLi0uXy4tLl8uLS5fLi0uXy4tLl8uLS5fLi0uXy4tLl8uLS5fLi0uX18uLS5fLi0uXy4tLl8uLS5fLi0uXy4tLl8uLS5fLi0uXy4tLl8uLS5fLi0uXy4tLl8uLS5fXy4tLl8uLS5fLi0uXy4tLl8uLS5fLi0uXy4tLl8uLS5fLi0uXy4tLl8uLS5fLi0uXy4tLl9fLi0uXy4tLl8uLS5fLi0uXy4tLl8uLS5fLi0uXy4tLl8uLS5fLi0uXy4tLl8uLS5fLi0uX18uLS5fLi0uXy4tLl8uLS5fLi0uXy4tLl8uLS5fLi0uXy4tLl8uLS5fLi0uXy4tLl8uLS5fXy4tLl8uLS5fLi0uXy4tLl8uLS5fLi0uXy4tLl8uLS5fLi0uXy4tLl8uLS5fLi0uXy4tLl8gWE9YTyBDYWxsZXJibG9k","reward":"0","signature":"faBgU1z4hdhijeDlDUFO237huItulSISMDCfLrHISKLYN8EMJMeOt0zZFBAyyLExMtW68LQKKLRiudTjWoxoXU73jBbR8KluvDUB74xobMoAp8dAaX7XYoRq5D5HwTHYP2RAojMLgS-n6yqpfDoF4as6MfrNQXmAzlgsjD8hE8VXXWX22cTwPH7jvtxZaQrqyLB-uOarm_5FZShv6ubL7bzsYpHYD8-2tf9e18FdHY7cO1soFA8Ar278-epxYtc_BKRx2R7brF22LHrWgCEb2v1ZanVl4VF_xRrR0thUZQJ2MgY-nmvPFma-lVzn7Yp5R9TgHRjVZDyS1Lcp2mgtRREl8jU-LCzcOJcQ0c5t_hnIIAPp988bZziRuRj3mnqPvlja-kBgmpVXC3ZG7TGYZpDN8S3XczsRafa6IJdbmXvtDjyuM77oHwZlbnxDdHVvoSYgLlz6CNZHFc80hwTc4ajE2VTCoaNu6yMYLfQogA7zUQrnBwpztHbeiIRlDqyx7VzB_Di52njsBTM1zAM8wTeuAkx5uFU2cPGRdFnQMZcFMao7v17R8biR685Ou-gfFdEWSF70PRwvMjmQowDxvqXRrzow91j6vsS7IY5cZOjoETg_GrGfjlUEfhxZGmzweJAglH5-qcEIEEJdxKUSd9e2mqOGE3B3FDR0pwJrhDg"} ================================================ FILE: genesis_data/genesis_txs/nh2sbgjxu6MmU8yGV00w7X4q4XCJETeYE3zVtcj2ldk.json ================================================ {"id":"nh2sbgjxu6MmU8yGV00w7X4q4XCJETeYE3zVtcj2ldk","last_tx":"","owner":"60xroMkdahbCtcpxFC0U0NKakMMAFuarkVM7GH8bvubCxL0cb0Ci2xwyQquCJ2v8SKcv-HDR1bnjGvb6x_63sQahzqc5KJF67xTNi0L64u4UZpdGWj0etqm8rzE59rFZylhMbzcH4FiEJrIcX-6r3ZRjMEQPVCedji5gaVVokDWUjukPRyE_0Nk7alMXNrPaeEI_6bcU9V93VYx7rwoIfSBf9B6rBaFQqz0FCr4S5pt1rm0ung3DeHyls7Tq0M3h762G7LrJgwDcDx9yXuW2mHw3WoABj63uru02h_f_mEfk1ZLA9nDGSJTrd8MtPB7HhibfwIhMm2w9y3WMakzelCXZve_JsOsc2CYz9Um1TWOIm54y1JCJK7zYIRgteR4BdVii3G7XXE1nARSN_Lpor2Y2WlOnVEEWpg6cuDg2rXzGjT-YX3eTLYIdYuOfur3NKzTVViONLlnQZSaarCJGXKgrqFaqpM0w_InKseIAOvMFMfDbcmLvAzJI84mE6hmUeWzfKTaBAok4mq7EKePtEhCQvNJ_Qp_gIQH9bcmjTB8-Xtu0OX3NJbfEH14yCabuPgm0lK67Qb9JHeGUgsRMAL0SFF_P5nToVU9UwhQlLPBS3Gkvoum5HqJNBR-dJPrlVnc3uf0_PEWFJ_WzAse26FZ5PualZu0xUFsEgkPK-S8","tags":[],"target":"","quantity":"0","data":"SGF2ZSBhIGxvb2sgYXQgc29tZSB3b3JkcyBvZiB0aGUgZWFybHkgYWRvcHRlcnMh","reward":"0","signature":"1NzFNwdn7mTUEeG2e00V8g3EoWJt91WEvUzhKYjSq3z0y5w-uqxijeNKdAUyNveehpYI-gawS2jCbDnhWjTke5n7A5gBo_zV8TTjoK3j28rl9SMpbgU0ofd_Y1qkku-w8BOI6lvwiWT3lBaapOvK4TYTAFDDnS5kjBq1RL7EzW3mJZTlaRkM_lteKcaw82KqofL_OoN1maKIMcLx_3fWdl2MyKzqemtRwazSW-5aMYljt3hn3481FupxVjHB8J4aFDQDWUbfXNw4_iDjwUshLQ5zky4ZULshMLtDv0CaQ8yshtOiO-pnhpieg9qhp07-ohfX4rcwx0Swo2MXFepGSYWnSryxXplc72rxDdknr-bW5gxA7x7Tw28tdS_I_mxM47LKKdlYp2g0jGQ2HuO76aCeLyf1b1gWmMjJYwM2XXKe6euEbkupMMp6augvIBCnS7aVGiiiCPovcaRJW8BTZFBmUhC5izUgA-8nIburyAEWynWFYOraIjasQHJCpVVc0TfQrDux28Kdbd74Y0TnpoN-QNGUOln-6pLC1cig0FikJuQ--V2_qgAf7ofSlEZ-wYcRg3Q4iHkcZGbkN44fsMd-lFHhQGFNFgXJUMwsWM8PbDIgVyxi-aqv5tY9ch-gwOqwUX1u36pNrV71BFXFWRPnzDM0Zm9ojBBxn1lsKf4"} ================================================ FILE: genesis_data/genesis_txs/o0nw6fU4gPL7Ae45x1BEQr5GkXSzZUrWnZrdIWqgx6w.json ================================================ {"id":"o0nw6fU4gPL7Ae45x1BEQr5GkXSzZUrWnZrdIWqgx6w","last_tx":"","owner":"xIWQ43vEyFeOiUCOXgwcr05qeYO514wqrLD2hW3k8EJnvQKibwSOMAaVIPeiklHALEuvADoYNf1VlG7ezPwbbAHNCNvKa0v0-Q2v-Me0fFmK0BQyUU1QNKX6lPm5XfoqZ3nW-Z14uv4mBrIT1mx_nSIiEY2O_TfauMzMxkwZvFn55BtcNrdMfzDz6opIDPxfx_p04my4J-vtGXZDaI2psnJpAPHN0DPVT4Hi3PaNMllKcywmgzSm-XblgOSz_jJU75Ag6Vc3bjymiwO_ik-Co66hvEsjnvmdVXh5vXl3RVmi7iOQympWEVc6Cr1ak_EgAQNLGf6fXf9EoWaCBW3TWzv1oGT2upz3kKjxy96eAqe2Vi-reqEaaupt0hnMBtb8PHoIMSjyluxYg2QBfmjEnp4UkCvtj_exo9XwazVYnjUC_iAbiEXVTh7lZ5LIexj1Tm9qqFkfhwmUNchMdkpH8YAy6toBuu-DvZtDLUR77qKSxnVDSovPbJDyjFNv-90My6JN7l37X7leUbysxXIl60k6dqU1ylnrSUIgGnZtsU5h0ulWyux7kgzqJAu9HtxaVc2fP7_bxq2_fWOB6fwhD_vAhOO-4YmWYQenRJUsV1ys_3lADkk9WEuRQTPNtki4Z9QWB9wV-g9VQ9Mi1ngyd-QUQQ44lli8bFFlxLv-vYk","tags":[],"target":"","quantity":"0","data":"UExTIFNVQ0NFRUQ","reward":"0","signature":"e5nsoNFdAjguWEBIh8FtEioInh412ts2vHuzsZI2lQIAbmIU8TlCf_goAVADdch01vVwU33CEw1oU_nnHZ9fEL8qrldHo7t7tlEjni9c_gEL_TjWL-r6k1XY2dXG7isKLBOvBXzJEvadamY73XQ63-GUUhXkpDmgDcSVLl4ngm33Syr4fZhN-To6glAkSqsa0aB8spMFYZy1y3MJIVw8U-xhxASe_g6ApEsKYonCcxkdkiiUmhg73vg8OcQgW0HOh18KaVPOgWHbya0HYK6yCSWa72YndGfTmObEiHYu0eZIYe_3EdjMiDQDMuLBJd4HM4rQos4yiPDdhD44l9ahJpqR5rrO-pOu3pDHI_rrt5yyem_uR99RixA3PzRfuLQhCJJNdxiJl25S3hCwxgA9uqcP1t9XcuDc2aUT-_4Jmn6Y8MrC4beRonGuqcZcJ2PgaFJ6qJAoGcyNmhvOVOqvXpsVuetFrn0oVWCcZS0gsXn4P-Z3CnKg6BdSIQemCr3E9t2v1bq8pcwTS6i_dXFhrYRwZzhDUWFU8jOA9n-10CugZtvYj0JMsB2Aye0BbQ_vgRhpHdTschxAc9jc55M9lm6oJPgdSaZxfgwMPRqKb4Q7sH8ElAGwhHI7mFxMrCQWJXrZTR2lTHJWsQtILJwPvRdgnSimmXFmOUA7Sh_TWZ4"} ================================================ FILE: genesis_data/genesis_txs/oMP40Kgd9MxLfksmW_HAlGe8Rn1Px8tpF-NOHBfe9oo.json ================================================ {"id":"oMP40Kgd9MxLfksmW_HAlGe8Rn1Px8tpF-NOHBfe9oo","last_tx":"","owner":"wQlmBNAKnFasm2EcS1nOXBy4PzxbWTjbPx-v-Q6G4-RHdAb8meBTs8Vf1AMS3hNzvjDfn1-wUhsFYzd8hl9qMcjU6LqPoM_1sHl0jHveDgLVkEHrPlKI_JVb6uyIiJpHZZgPrz008blg0ov_0CmbbATl8DiDGWhguOIov3rNEQY36YULRoqb47ODkt1e_ZkVue9Bw3BE2931vpl73bn5qI5ReylM_zY70TPtKx4ntyhjYxdsqVxyJGbJCscXM8GQFV9VdIbozy3a58X3JLdaRaMsgEws5dGv4ILVwRt_SvKUVKMzlMMds3mZSWbxisLl4n_XinZMbdJySaPyNKwMjBpfT-wUtEfIJS_3RZYADoYhTTO_Q-OQA2elCDKVvmGhdJK7J8_SwBCdUQ7OEMA0QIye6yyIRAJFBvFZkeJ_PhCd0X5qRsGm8LuP6IC9k_UFOZppJmt4Um4XVsUiUrj9ngsJX3lhJmY17IZVSFiWivDk91Oh_EihbT3eBmF5iXUMvWQM5dfoNJARXRzALni_twjMGP3kW3AhtefeS5dZ5orgDLi4gJomu0tpY8_4VzoWz2EA1LgP-I6qpos06xQ_AWdc5UcsID-iIgSKuSuntEcyV7ZbpZ5anxrhta3Rw3lsEj_3WXR-WmWMHntuRNIHZxMi8aR5wVarFlXnDxFzUec","tags":[],"target":"","quantity":"0","data":"UmFr","reward":"0","signature":"v5HOJUO6EQrmGqhIrgjRbb5FDujsdSXOt6oPImSWrq7-o25L9pWZorhr7eBoixVeyMfp3_otzjIgFgO_O8aSQ6syGsFm7d6_k8pxgpipOMtwQKZugBVObvp9KUOXq7xntTzWYsgi5vTQzSyKkxuPWNgIxg487NFDbi0LUSyQgGNNH_NwY5ZDtq2Yu1IF4Uf7G1BXxAa0nLKInsUdg21EGDy_fKkF6869gmqA72cgRNxYe34x2dOhyznQhJNQf7BKX5Alr11r40_iAzQs38tZS7Z5FBqmZYIACZ06b3JsNEXxSkNNxR9-mPRF0levREi4JHmHCfo1gynZoMqxSAMCHyAtToHYvNn4xaqMBRZ8uCwUiEpAZwSEVznAGEbqOkErg6SknpFhy-qcJyLlCbbDAWu1RKb_UYHd_7FXFuB6ztrzGKD178swwn0mStGW0dcdchu7L52qcaqoBg-4lZYj5DXIDgJsZp7BVJtNj8vWRgsJABKyjeDwa1xuSPY4ybjDwcnNk8x_oVHcjtwMRLTipCm6rMVWQwQKu_GP3K_c7YMEsRzNmmuSzDV0O2kifbIyrrUtOSO8IjH8eoCCoKtSufZKwsGt-DfTcTcFSKGEYl6lQc7sUqbg9WjoSXVZ1iXJg1tkk8_PieceHRf0UOR3pvHG3WBSaNmmMVwyDTXf3rw"} ================================================ FILE: genesis_data/genesis_txs/oRvFwVpHVeo0iysSg2jFOAZKE-hKwbm6mGeZ6VUZmxk.json ================================================ {"id":"oRvFwVpHVeo0iysSg2jFOAZKE-hKwbm6mGeZ6VUZmxk","last_tx":"","owner":"1SJ5MkTz386sln5Fpaq68cW1A38Bys_WivwjsRApRPxAaTn-1igZbakDBIOTtLzgLgRzrJ4qy0xpIOQDNBklZvfNO8pRNGySCyTBUmSlPdR46BxqBG6XekEajaAwZJTAQQV4T1VpDTHagA3CdDq6ho820NuzOxuJlUK-qTkHs8wtqJIyKjIZzGwF-_OChT3VNHUwZ2x9oTy2T6gAGAZgrwv40T4PB9eCJFUqvlwzSnbCRt4MfYsYxkJGcQwpIf-EoMsZpDyvWgjfoVzkkrfJPUvEEZGJ-b8nWPS9w28Ucf-iIl_O4z45CgzMgIKs1ZkWA7DcforBK5eGT1yMr5pgTATw-Semuq4eTIscfx_DKB6zU1JFL0pi6OipT6GWPgJfG60zowGYaqb-eRT1o1Jpd-yEE2J3qJ-x34Si60pFM1YnvzcqGj7Yscs6nuETmIO5ZDxAqCQMxPnIjWIjFlt3SnVwveXs2HKfeLMsA20NyybCZpNciSPTiLNyTXURbwyImlunG_JtOOgrOErKS0UYgbGxhDzVFubqLgmwri4gyChpQKLoqlcXZGwTsSDZnt2nBUnLPieK9V83qkG7OV6U_9PvZ9fEaN7mLlI1Whx9fxDlKDuZGCMYCc5b7rZVJW0iEuscs90LvB8LkghfQQYrPjC_JS4ZK-gBctgMi_enS08","tags":[],"target":"","quantity":"0","data":"SGVsbG8gV29ybGQ","reward":"0","signature":"BSDRzmUlPnIw4auBQ0hcXSCw3ZeskKmsoD7UoQMmGR5NIDrsnr7BfHMqduf8XYVlg3ZnYgZrbFELwsk_nuh7MqqqpVWEGvwWy6IqbzsvBBXXJmoYN2b0FQcpnOZVelOC8y8fV6hLwO5w9-Xzu9fviYkcfbkHhYRPiDqlKtyL4avGSRvAWfLvowzPr3TBTcqNV3USRpzr9cjgfdN7tTn-hCVIVM0zgykcZRBlTor1KHs__N5oFCNRNCmqX7R6fOFvuIUSx9MjZrBKicNy8Bbp8t0HMSx-1AnsiQOJI1Q_ZzJ1Pae7mZJNz4Wmzf0hk0hUR2lzcqpgAYw4kHqxtsqrEBMM2iA6oCNHJTEe5vyx7420fxKULMAh3fYDp0e6EV_zFawXUro6l2xpr8PEFzBSQBe33kSUmxkvH7gs2UUx7ywIWpvtDGmqQ3-WHWTHlq3UQw_BxVRvrDmYtBJuEQx9pwAKZoByF5FI4wD-lHhy0eQFz0Sxp5qWFRCLOzdn5nLrqwdXY2bjySZYGB-p8a1Kyr661Z5mYkZtsc-G9ynwhq4gFb6FJdpm-axPO09BoPNzRqHY73W0vH67G0ea5yvuIMXXvdmFW8E6uUdo7vemHpBPsmqXCcGYXYQ7UE0uIjm3PbnTqUw50iA548NV9DqvV_IRKEf2NscDAYHLrEOUdZ4"} ================================================ FILE: genesis_data/genesis_txs/oWWJcAiBCxhtWkIqwir4-vTvD3JFpHgZRNIpS-Xjzp4.json ================================================ {"id":"oWWJcAiBCxhtWkIqwir4-vTvD3JFpHgZRNIpS-Xjzp4","last_tx":"","owner":"yXmINDaw-LbqQYSG-5e6mEJYb1VmGP2XRYrzY08JZfXbRYvUJLLHM1FT65pIbp75FLtvEP12sEpnuXJhipQI0YOk1eAiYThvbFB_sM6nH-Dr6Ex6PfhGJnMpHEbT-MDSwbW3ECIWR2QLd8WdNZE3cZmYUisd0liQs-tMl0GF2hW9yDmb5gAsgJp-z3d0xa2D5sRAz6gZYYpvKc50fczm-3VhJ_LlhzB-MHAFJ74hI7clKI_CAKQuf9SlqkYxuPMsD3utmks_xpQJGjHwHgDfpeAz2jYHPANCMBE12gUYyNebrHPwhiP4IJLfOPBGwRlRZ9amoVzytXVeki1wWOTzk1dk6bj4VMwa5ZJn6eIhbsHHuhL6EYv3MLRZiXfiUMKLakU-e5s3KUyx7C7GVRjyqQ4ffgxuzbP0f3mnhai3mXoWE9qfAoZi9azbrgRGyQyON3hxyDsk5D6-R5AXEXwZXsFFBdc8oaR31yt1AIoO7mjqNd5ouHNRRGSfga7QYO260rYS8vs4C848aQsP8q-Npa7hLBVLTIIROKGPuq9jI3QWpikQtj3DyThS_qvKlZk4umI2_MFPCBrwcWRd399OIU7NItCf5QWiFgZouvqQjNBH9QGZGtGlkZ676YQcHGJteDXzXgmllX30HASN4r4M1x1djMbTpPe_VpRa12qbukk","tags":[],"target":"","quantity":"0","data":"d2hvIGNhbiBiZSBhZ2FpbnN0IHVzPw","reward":"0","signature":"fogKMEx9wQCrns5vXPOVTnmVPGzkHLl3ECWiBxBzjWvDw-YaPyb3YwOM_7Bxdxx9y92Fz5otg4rOF4rSFPwwAxXP4kkbGOIWe7-NDC5WD2L9Qk3e-ahDIl2q_oEPoL7RBq-mDcwwfoZyyiDVKLQQrbEi_e4SBJbXV05oWysiZW6r1rMr-kdHQwEkig3l1T1eW6w4e7IPc2SlL1aaR_FfDV_YNWTGvbcZjAo2IAFMbz4VK7IuvTJl4T3NHnDN_hX55JCqOSdgREnNFSOjKKsUZRRZDBk2eEAWlswGIQCB3sMX4nwJ1zPi-UnOdCrBS6JkITOPXjA1MFXBtM8S9FDc-LNwIml3fXKx9Y-hD_9hYpsGL5QA0KjR-hQRZ4eWd7gJxiTONUG44RjTg1l4_mCxKV1sg24VLNvY7PA9pNwJKlya3VheZ3An6F3wScu3yozx-NgEDUiJTYcI7D2m8WDqSTUyidcPo9dPa9Zhj93dDSWz9aFRoAhRcq_Qd6DOzgGiqC3GxGWQOVkg9kQoqg9VaO6rzMgxrZETe-4G9IBLRnECnI3WkgxZJwl1HI16tQYu8TksK3wIC2lcoVAe13M_dQnaEpu5EwNmL4_PK_VEb8n_zg1nDkTNN7SqeqiPx6h2C4jAaifHtePCNEeiSv0Ozo43cDzhL5rZEefMPFgxV98"} ================================================ FILE: genesis_data/genesis_txs/ocUISm-0ItAS-N3Ydwe1swo4JmoVpRzWzngFt-pDwfo.json ================================================ {"id":"ocUISm-0ItAS-N3Ydwe1swo4JmoVpRzWzngFt-pDwfo","last_tx":"","owner":"zBKa2Dqcs23BLxS92xHrUzd-kQLL_JEWc5wRuN7-XqDF0Qj8uCnkLmgLMKWAHHVNSiw9wP02iF9CHp82tifqeternf1ZgaTKTrHEEPVfzfD70kB-C99XDArlzdlDRHlFcq61CBRaLKjncQ4mTNtxTKETJp1QDwHgdiZikOkpZu8RiFL3nBmX5hTN6k8h_b_OnNq_-JRs8tWXv651MTR9YNZqruVaeuXocGDj0_F5dDJRO4Dr6r9M3Iyd8O1n8PthICLmSAa_jEaGShq2gwuuMh1_JpQIq98ik6D2rBQMMiiwwLVtpO5jUzo-zjUluTNExQpJbjUr9mVherPAQi_ENW7PslQw-FCEOdZItcAKWeqDO5q2hodnhTjB_IiDXr5eD8bmvEVzSLy1nucU1UDdZBA6OGZ39L0eTXZs-a6p-w8ED20L68ED4YyLpO6by6zC5Uv-2hQPohyYllGev64fGta70KZ_HktFNxl2ieso6pt7yaORy5SBO1uD89fx7H7CbglaFzvj15ShQGxl2QRGuM1LmW1Bl--6WgNXdl0J8WkD-dUYsCnzV7vU2EVMNwAlQW6W_gFe1p1ghTPiUDeiidEzPuL_2fFFUnHQdJ7Ih3OO7EcpSf46EiSw5dHWBn3Vmt3i-EgySzzPkV1s6GhIcfgO7BHIqpEjal5t-ZvU1-c","tags":[],"target":"","quantity":"0","data":"Rm91dGlnaHQgbG92ZXMgTWluaS1Ccmln","reward":"0","signature":"jYFVv5XU3zNqpkAcxbs2xW5EWNHo4qBgPsTT-4GjSOMkII6aKYCaQlqhAiaS-cUCmXL4oZ9rIG4MkFm-gTo8T702_N3ahbFjrnS4awdf_xgm4W1pIm7dieMV1rL6DG1nbTlEBKGVKGWRefT6S7n_qNAUT2Qhj3XZodJXbMLP16rOdMUmr8J6JGqj__FD_nDr2WupXrN8pQneeu4lXfejsySbMMg5jkpQz14J5iqdMnekEsLeuZc1CFJDABCxng4vj_P5XKMxt9-i98JICHoZBSHNdHjbwMDuCP33Lm0y84odxODo9mA0Q2i4xOWLIfglai9g8rr7_2TC1hHfP1ukm0R2V73tuCi8SDDc4kQBjPQrQgiSePPIwcX3nclTY-7RDv7dHrymyhlZ32UecPysGR9BnAKp2wvlMs2AXh2DdfZVY5vBmwEvoRKP6Yn7dNJVnFydt0Xw0Vv2Yow0Sd9Mdo23MCLTgieuQy2DgYKz2gd7KwUmdXtfDis4v20cdqYQZ3SK6XfUmN4nR3CcsLjuAro5j6DL9GRkO7ObJ-L509RjNs9syhQB9EMm_7n6z0xVakti6Ss7FqeDN100zopMQVhMu0WhO5-ak0ux8zhjP_7PzFEjFqNFjSHBJcM1l1MqAse_KLJ7MLE2Tvzpy2sAjXgFghuaQiEZtHIt1Sj0Y90"} ================================================ FILE: genesis_data/genesis_txs/opfZTSNdqaxXZUmaKROD2sd4QkyNDnZE3u1A95eSw4E.json ================================================ {"id":"opfZTSNdqaxXZUmaKROD2sd4QkyNDnZE3u1A95eSw4E","last_tx":"","owner":"2FoOHlNiLqKgBz324I9pWVsRtWUN09fNIkQ3DzytimN3mInF88YxCVUNNA2sMnCm9gyEuh_5gYXD76JV3DDSBIthn-MTkjVD3V-Ut3A4yUN70oQ7UmmRqCeprbKMcPOd7RmWO4qgtpAnkBO7mW54meg61hTil9GYwcSmtGxsbzNAg3Vk-i0hwr6nrSMgZmaEINFQptjdheoai0zjMClZ62I_lcHsV1rhcrhcX2wiAoneDPnE4CFlTqUvfDoGC5OXHlvLXpUKW9baWKXq2jmIGP3c6-Spqkxiur-uNpifcuLWRXP7viJ6rE2nheIKxOwYsunuNzntsPVOZw0-cB9YlRLzQ49tGxl8ATNldqoNmRb7QiBohrsJHJdHzJxP00-4cp-y2qOIOKpfjGEY4SkBL8A2yskvSfj1HMoXauOM2NwOWj2bzdPgTE8EgQMxPg1Om1fseR9JYjp34WHa7PkF7AC8V_Ni5X9w0UrjVR2OBzMviywzHDoCKa_ALQ01MwuLUP2CH8bxu2_DdPiqNVZ_leRH8pHho0kWfaRS2voNZx3akiXkAKVnOauxtKL9RbVb4-ibIGlmhEOq2U6Q45ev60689MBjababt5I3DqShDfruy3yB_kSDpcBHq5EpQCgfsEBXoxlVbaLZ8QzyRPZ75rdlOiWvuE0zvZB8UMPsey8","tags":[],"target":"","quantity":"0","data":"VGhpcyBpcyB2ZXJ5IGV4Y2l0aW5nIGFuZCBJIHdob2xlaGVhcnRlZGx5IHN1cHBvcnQgdGhpcyBwcm9qZWN0IDop","reward":"0","signature":"bPrKowjT8NJ6gj6AbmRM6q06XA-WxylBt41HDudsMc6w160h3fULdId7EU1tGLQG0aIb_kj-K5-ruY8RHTK6jm0El8085o94aPlGUaALRBust99iQRcH9sTYrtZIUSsRa2kO-RPvIxBB6-pShmf2uoqc_Ou8QgrnsNS2R_xgWTjNlLAWXg8JMSvo693lw_Ev2zp9qFLIz4xo4Qmkr0POtNamJIINfDoSEwln1hRGibFZOJ4um4rmFpKdS9wPE9AtDSTJI4Ft_eJYP80-SuTaLtjN8eED0WDk51pU-QMvkJJeka0wLWf4BFwXPHwVDyEyvsNCPZrzpfyj8C4wns0mHfA5uWu0xApG5UuZrvB4C1xrLNZPIGILicYLtiZr2S_wjiyMq1dviSxNoagGRViMpzv6a420gQSNjCz6i7d-ruxgF0lKh5Z9frK-XNiMoDT8gPT07KJQzhX0HuwWUBTGjMImp8V0fqjvqF_9hACESBHzdZs7tWrFTAfDZyaECoa_S1y8namI-wueCsWc9uqFKN_lqNUWNnOOM131tAKzqhUw3d1PT0fwBV43QWFdpQS4O4eVpYW57jGsCay3LauQUiWI3csEpXYd_ZJ0kDl2hyPRnqw0D0GUFGq01gi09fnECyACzusv0Uqd150hh4-JVvywktnLgx81DTiBfJ8R7kE"} ================================================ FILE: genesis_data/genesis_txs/p9PJG5GkKZAxLyPJyDYw4_1CmhodHGGGqB785duwVwM.json ================================================ {"id":"p9PJG5GkKZAxLyPJyDYw4_1CmhodHGGGqB785duwVwM","last_tx":"","owner":"ujJ6LJOILRRwODjtK5Hi82vKhZk5wFNMqA0HV2nqdAtFPrq-1tw33QmPUhHEAm6CxPGScDYABPsk19ouzfWb7Hy8qFjKIZ1uxvodGtOR9TeJ2TNPgfEkFD9bl1q7cXEScoXgX6UdZCTJW7qSjIxJVMxxtoi2bfh_UWvrlDtsBK4Ru6s9KVC3nVTt38Ff0-zB5K3CjWppvAMQ4IjoBqjVfrlLbS-KbeGpV085lm3H8eC5WT2VBfIYCbJUwxzZS_6VgrJ6EI__cc78p9BXlf3E_abpV99feSO3AaheoMpHKmGYZORuhQXIuJucMpT0etofi5gCAvmCemYTJi_-40RFDNMjCkRCwEqCOjFL2APGBxPyIzAm4--rI7ggOyrghU6fqMmMs0FY9yQMPOUCiwEeWZP8km2PpfHge1d_iYE6tAvtMnWWvuhxgCk2KoRzkkRKcEruG39UF_ZS7lAh1Z1Q2HjSYyOcGtDET20W_YMf4te8rj84Jvr9ZPCc2RTbVLNrrz2i-SoDYXlkBiPSBwrpfG5xt9BFHQ4MxFPKSTBKkXmOyUgKqULukNbZfo2iJ9Skq6LVfZmiPhzQszoT2ACZeunphA9RJi5138g5-ZTwWi3LGBIan19xME1nBWLMDENygbDZnIWfIhUA7rUDzLYG03H_x9YCWnWvajyYrzIif6U","tags":[],"target":"","quantity":"0","data":"SVdMVFdJUEFBSUhBSA","reward":"0","signature":"iA2Hs961tb8LDKKyUfgwFzJ6wS9FmduKrYF7oLWbwHVKh6NEZt62uDunKQoljslZtalaSlotOaXKghDxHLMoF8oAJUrShRNZpXoUeYC-8Wqqw5lm840Gv26_2h2TIvm4cBO0RlXhd29w14tPemj9tanL9osdfByX7sM26ywzEF5KCJpl4Zq6aRRmnO631o7A0KiWEleWWx3KDc5maccIu6fMnF7-3NbJgWVEky50FsdXSrv7BukkklxYL0CjbsDYa6mFcl5BHcnueuWXnqOL-FqmCpqVllW37nu_0cW3Sb54SeVicFL5S3FaM_kDMpzOdrQrkA2lBneNs0i3nSMdakE3wD1T_wmlAUDS9co9fCv8MBFa6ZIDZEEjT86c-EpGreb0VNbabeKmOfxsN6ZZrslTi9qiGcJyR4rN0v83Fv_IPRrr9Bwbyf4PVaXO4grCLX2VJJW2hsVlLXHWswYsha_rbmwoQKW70gMO8WAwYP2PNAnN6DqiLIV7Ua2hwcA1lWq-eU1TwvT1sZQlCJcUzBOHy1IbAI-ekUC7PrR-tN847mipjLse5zGuFaQ20fZg6_QCi3O6GQmoIAhatyZ2P7CQd2S77LdntNxOb5V6UDxg6q3MDEtqcaikdfpGCgoXxlPJ7XO7Aj3XQfeRgQ2I-HRmggBp5Jtz56NHOLUDaoE"} ================================================ FILE: genesis_data/genesis_txs/pVZkxPK8F9VFM5lDp0oTBThaw1RvmwG64wIHFChYJKA.json ================================================ {"id":"pVZkxPK8F9VFM5lDp0oTBThaw1RvmwG64wIHFChYJKA","last_tx":"","owner":"wRuiCcHABPnZI4-8pFpMhWlIqTrNhpCbYTEH-LqZtIyUTkbwp1oX1CQKl_t_-rk-Kzz-Qb5J0ovI3Z1BY49sx67U60n3atSc35AOIzMc_994aslftWyiEj30Njtrt8fExhOj1Zb1_fdHrvQptEwUZWArLlzVbpTxzvLnr94uNVPcDunUQByb7TNVBiEH1NPTnS17AwM8RlTB2q--Iynd30JcU6NZk9p4a80C-SLWm0IMvUGIvVDtSkieLLlEqvq64JgAFMG8bDsHixnc4N3j1fkw1uEGzjBn62fvM4NOFT2CPLLISvFZAR0jUbeMo-a9NxTAZjFaiG5X0RWevDQhvels0hn47n0dwYC2OeP-mq0GR5quDczyouYWbZvKEgdS8d0FoaNWN2xwXvRoKGE7Ng7t3BrP3XwPTN8LdR6QHXMrh03UIYarTL-zfbOUDEZvNMf5qOIoulkrQYWUHRJJ8rSQ-0csCHcxDIhDkUKUe6aAp585i2Z06baoRrWApTqwTozszpVYGrCvIdtCbPzVrq_qZFRjTsZBHa4hKniVMTQmLktX_bujShhB5Dfi44PvPpLqbuLWR4lZNQzPF9VgCvNHMmey5FAdqzA-D1O9skMHyKILmF3D3TCqwqaD67oat0IhEhClCusaYhGP6rNbox0s329u3ub-LRUeoCzVuPM","tags":[],"target":"","quantity":"0","data":"R29vZCBsdWNrIGZvciB5b3VyIG5pY2UgcHJvamVjdA","reward":"0","signature":"o79EZls4QVSHHzLYmAsT9vHoeA7daZsgdqA6z_MhTgcAyjW64vPN2f23Z_hySUg0kFK_U8U-BJlg-QDmkbLdkiiLoSvjSQ30dg20L5NKpg6mYBLt-30qabBzMtZPO6udPBazv-pbnxt3Fdyg-0-tOx5NPV_72iDY4Cf3bP6jaLU3piqXlNJEFflA0VgJ9XYKqH3gMl1wfMqaRPCGZsuXm0yDFKQv2cS6xhU5fkxHtEs-4WNjqsDh-2YukpWHK3zarHDLBFaaKHLplPQzafW7BRWTRrrAujHr1YpRR1-TnAAk7DzhQAri9u-5ygpHH6TlEfB5_mD011unfRKJcCu-a4PX0G93Cr-1pnKYkwjvoEl_tSJdAhP7nHEGWIoVxeWK7OyzDav9K1YRQLHikF5vEkRF2JfXY4xzOQVfcFCHumqck7T4qB02IkXoMurKALdJN8CTopznDycJCZzkDn9ITi4Woj69v8E8OAWNPEJsJjSCvDussObB_RmQbFdAvSI8WAWg5CbUTXOFcu7fz0EUGneujwlL1zOmAsu7nPrbxKLNk4yfYAKQJkYGHqw3c8pdJGjtBrzP92Iq7qlbrrbmaORLuADYQm_0FokdG9u6Np0BZdEJQj1y47JRClNUB492UnKiyCn9t0mBsY6Tps8uJLkBtlFSFXiMfAAL7v59Cng"} ================================================ FILE: genesis_data/genesis_txs/piTZgtn2oBsWKt09CV8LqH3I3JaVdRjFwjOAJmC-Xp4.json ================================================ {"id":"piTZgtn2oBsWKt09CV8LqH3I3JaVdRjFwjOAJmC-Xp4","last_tx":"","owner":"nAqzD2kqyr7dLj9ClnZP0l_rvSUt69Mgcvjj7H1KhijzFQHkQm8EKGCKxTmZ9dwQ1sS5gp6k45UPfQcisDqF-yYav36AYwxtuZ5FPt3MJRSnhVIgUT9M2ZZIQ-9GxPCd1-ZV3J07uKitiMUxWzLZxRg_pSwQtpIOTKI3ILXv4mpcWZ4FHtbrYOUt69RshgW7LWeh1bb8eNanRaOJsFMfd9NikHd-cUXGXZ4RyaVqulEs-D81Q2yZq8it3XhJdTjD57TmpZtTEUa3eQIjDEW0Xk1dZHw8QxYvC3bc9GGMyhPgLWpweLWNFkj1aPS00WBmFid0bp7DP2WwBaS7q05ZGYO1TDNVB2_f9PtI--GahqyU-VO9ahEXzUuFuY56qKsLx0TFKnom6VLL694LJVoCrEyJYf1zEfSnCESNkWzDDzSweyX9fgeRTSGuWfrLF5OKCA6YNITtUoW_Sp10brbOXxbJ1ILxEnVm_IXdrX-e98nVdMo9TJuE-NKhxFdBn-YQVbIJ0QjVuxJzLcda-e8AK6bW4SMlKSvjiCfUXVDO86IogMw1LC3Wq5W0llDIWG6oaXHyQbFMl1Aa2pNzGlPHbWQdWwqJ6vVWqiFd68WB7PQCjxhOZo0mxvpDeZZnq_axqcHFYqhvKj72UUnliounc-ulFroWESxTFZBB9klEu0E","tags":[],"target":"","quantity":"0","data":"SXQncyBpbnRlcmVzdGluZyB0byBtZWV0IHNvbWVib2R5IGF0IHRoaXMgcGxhcmZvcm0","reward":"0","signature":"JiLzkfQVNvjbymuHU8vw-s_CaBAtke5h1HEdoSU7Obn1sLj9IJNM5x_i8WnCBBscvOBSPARcFlgeXGW8Tz81VF0s0DITnJTK-3tPIm8Cmm80lHOoCSWGfHJJrAGoy1d4qOp5PAHbVcIQnT7vsVCP-TJiuifr3PXQtqjpTxplzTbGMvHi_hn_fVi4Z8qRoJWQTnlFlHFf5HxjHOdl4fU62jgXTRB7XBCZ9qcA1pb3rFdKxseQ3vg0mxH1ARJw6cMPZ32M7TNzMVpYjCb4Sg97iqfEZ8tbK2B7LJbYnWH1uqABZJgL3cruVGMbdjVY7BkKBlWxUhvTJ7sIwF9O-_y4XSNSNthjpAe5ADR-7HDKhkjkj-YhztnAxeHC8qAL8pbd-tmB_1_Ytx6tqZ417wUU19b3sDd5zaZTBOLZpbTaA_6me-clYtVGAst9E0L1GdZ_vj5kGZgmAenpDCyfo1CmpZDX8EX2ehUFNFD1LgFlnM3O5Zi6aGxS8sB_X25OG8k34hvZ9R37tkzuCWvuGYZUqJKz-YplWqozgUwdRSRsQYaz3pjkbICGwuLYpTqntnUIRkCvdy7dAU4yHuG2s_OF0VNk_GkVIRG8Yd3ZmGoUohLM8k-dehO-9ejFDTS0HCr0Q2UjPm8RkdpS2AjKUsfOchyIOHARiHwzPVfekKCBUGw"} ================================================ FILE: genesis_data/genesis_txs/puLpw8OIIYCOatImKjpV5s0JWyKFq6bXFMz_qSf6mUA.json ================================================ {"id":"puLpw8OIIYCOatImKjpV5s0JWyKFq6bXFMz_qSf6mUA","last_tx":"","owner":"uY6wABun1y-QxAEi7aOCn8ZWZ5-lSmBXdw1Ad-rjPPxOiIzS3IiMKgPrVkGE9ckrjdoU9a-dxtPG9RygK-LbTFrYfFwNHDT5hcZ8eUd8wo3WYg_yUMwGdzq8fPlz3hbXzpBlDvKsuZ9bCJhtSbyxg57jb3KpaYf3Ke4bCy1ATFVhvUcPb5PEDXnp6dC1K4lK4A3zdtbDwWlc7n3jSKjQFIDgsBqslOebaoBvXCfsSdnJ5hdIQqqRCoF6qAgfTIwJ9D_DDaHMSppwXKw4Qbo7vHDh7aZCJMnrZRDAc3WtARtRXGWZ430N7R-ItAG7Es1S1SfVKs4gUhatYNC6f_tKl30jM6lt4z8QQbk1wJKI-sNh88Koutm03clcXqg7EBtoxOC8zqBWJUmvou1VHaCy2Hct85jWycdCMn7e4yIAJKjZzm1VF9suJaHdwnHWHSglR-FOs9dI7GpfeuCGrf6wRRWAKgsfb3Vmml_a8wuRQA_7m2zfz-77mKr1YgWTAfY3jzwQkWkivYfuyj0XA5rQ1wBq60AW5B0dz4mHHBAOQUHG3qreKMBisJRFGWwrANwCzVlvMg53_TZHkzKafh8g1R9FdjACda_pmw1o58rODMFUUxpG_BclWQjjJyT3an5k12Gdw9YHlhc6odcj2VnKZVRMl0qG5rkAZzOtxpbTEgc","tags":[],"target":"","quantity":"0","data":"T09IIFdFRQ","reward":"0","signature":"nIexemy4BbajYa5cbEVENtFW1jJBppP3yTKNoVfuNdAN9uvweY8CdJWUUM9wHA1CMMV26cBisuDca9qzBnidWG98qbFrmPwTnzkzC7t5DtzBCWeovJ53SEI49B9kLPhsPsUflCgtZpHvpZycFFwBbvcgtNJPngSZ90-btRy7Gp2DGmb58HPZ7NgXiNU7Oz7EZM36kTjSGN0IyYL6soUNSwbxYHHLQFfRUXrHo3gRf5OD730aLfj1XqI-OA643zgj736WcaB-pj_xUxpRyBWLDe3hag1Dk2yfe1wVly8eI_k7W6UdL1z65ggflxei-iz5lTt73AKlo4WUSVmI1TuLHOMbiPprdAvPKhiheyYvixKGIQ0kUxXYXRMjD_F-1CAj90oKj8gM04qG2VPC5OAV6sG6pk-eQzXbqmzrAXlMbgappyD7f-qRmqzLkcDDc10wl-M2H8-GkqAhoumhkoMJycNcSAWs-Q6YZQE7VStIuHPpl5ZJ_YoOIJS6OYP8bPSdvG7XvAluMZjOJa5tezOLw04IAp05mRzylHyBzZU_PcoLuqCS-62BHouxTS1BW2hln6XLVYVxaFW4rSALOmj0MrSuSwERCLRWkqRJ0732dMaTWMX5vHwmMYhw3KQER6tHmA_qXsDXq_KKwNwdS4ZsjEvGrbdiMP7Tv8ckvJuKVe8"} ================================================ FILE: genesis_data/genesis_txs/qU2Gu35-s9wMH1N4g_zMYKCqIStYzBZmRx0XlcIpjyk.json ================================================ {"id":"qU2Gu35-s9wMH1N4g_zMYKCqIStYzBZmRx0XlcIpjyk","last_tx":"","owner":"veEVwP9QqBfXV5b7hI4gtMHWMX63VkVilpoiIm5JJWYwiGphuxrzTGCeceTLHt8pTj2LEDW65nkDD-OcGvPd4jX5pb9ivkotxtKynbYfFC_ZgxMclBD-J6kl8fj1PATaGwLcdl7c2dL0lpHVXvud81pODL6u1AMC9CiGwpIDTwxgfP_wQIBbniwgM4RpRsu9-RThtwNfBlFN7wQkJagrRFQRRMMF1zKSy3gYglvT0tF_S_15ekx0R1CoIrpyCSSkDilGmAiE7SVZhbP29FUN4abIs6BeNsNXsiTDTge-6BF-L3FwHtDbsY8caBC95ctcBCd1t1sC_oUBccwBVFnOn4j3upgExNG-qeU5Ofw412V1e-x5VGIpF3Psu2ShQFmx6DEsmpSKE65BQX_Fg2jRniMeMemjcjvUT3LV3MOJUHIQN4rE8Eq8zvNlC4z155y6Fk_Klczt5tUFS7toCVS_fJBIfwsW9PNBreDWPVcoIQfPgtk0GDcZg7NyrHzJhUwBELDiWcIGOhA6ioGv3qStQ5CBblPkYEJQjXCcMBkc1T_4To2fghyrgJn7XIopVdEdPSGw_8rDin7MZVxPtoQhkLkuaUQKqbJzCga6UuGzpZJa2D27kZnDdy_uNIGhTq-29INsmW7yyC8kpICyVhxKhX7TJxbnh2ij5ATzSFGfnwk","tags":[],"target":"","quantity":"0","data":"SSBsb3ZlIHlvdQ","reward":"0","signature":"WLwbL-X1zguUYnBC_o0WN0vMZnw3fWBQkHBq6PQwiLTe_VjciW_9igGeqAyUZi0aLFkL5ScR0Ur1_ZblxiLKxj-awONTJn8Ed6iig363PTALNjfF5Szp3O1StVk0JLqFDs1rE3HXfeuLqd06X1u7SQpKWqogsILp0cSyCysnMZzlIaJedrU5nl58rxFz-224YL3BIaE0jLRsISGp0N_5NrQtGesC5XVPR3tEFoRDICYDgqdgUiuf6eE0_nug7v-U8Iye-FxzC1jGpt_LabHv64wnmupjZUT_5PL_7dpNfHaje9FGpnPewjcRqw8YjNQS-2iJqbLAN-AgFzr8SIuuy6lUvPFfsmDQIPvEQitPDGKvt9EEbYE0Kc0eXPTGUBS_yTFYgrHjWeeDoNjk-86G5tsliVpZqlUXb5eTqsiF6w0PlSZIYRFj3WhNOaV8bPUfSaiWvg7ZG01E09P39gS3C4nCnXpBfgAeQq6sYXQPHLMkAeL5ILB8BNyXeFkOizsEBCdfQC-L5_-XYTOfll_f0CEPiY8dUTFdJVRkq4ttHHtyvwHom61wo-Bg5ZV07CK4bT2mNzoX4ps9lxNYBRUN2JOAocTUURPguPL5V9LJ_-EfI3e-yad8tikJ74_-pay8-UwhN3Zo3saZFq_T9dYfRsZWyqzf7TC0IMjYCbEMaus"} ================================================ FILE: genesis_data/genesis_txs/qX9u_AprdhyXAPGfh3C94x9AbxwWx9nJSs7g8FSwITM.json ================================================ {"id":"qX9u_AprdhyXAPGfh3C94x9AbxwWx9nJSs7g8FSwITM","last_tx":"","owner":"lc4FcGs7g7-VwXe4i7ZyonjFN9k_JvM35oJvb0K5bsANeJwrZiS5FZFwzWcKeweVOD5AK3mjB2mwH6a20BELZc7eP8ItdjWA3T9t2hoDcZhaTDAEzGHqemHxbDFlOIbiVUasdfgZktuzWjyxaVNbpFDauRwgk3kuRdkvGVhU5c4JvNNcyOOsnbzSA9cTMeOcQShrWNBR26OfwWjIikINhK4cIxHOKryf1b9e_MoVB25a7mA9kv18GJV6xG0iKfUmY_GW1dT4aGtqRN6ygzK6AWKkzsNXfFEsJ3MKCbRWhrqOiyVmfx_3qu35sEdUGFOSKO-An8U5D2cQrY9lQEgKne00fwtx6Mbgpy1puFkHsOxUVo3AOodjmKp-45-dNnE82XFnVnrcSBlGo7NY-w3PSSYfYhGwCeV0AVX4KeGTMBsqWB3AnN5DiYPMLtfokuULVPtoq-R4dd-9yKKL-IXstDPR0gqyZCUSCUh0yYgRESVXt5X_uWEg4F1u8bPWlYltbngB1ewxs2d6Rfm4jdz2SjOPmFgH99Igh11LXaTBRnP7WrIZsd6-b5V-tsWRa3omIUqXlP91hD2aLsl2NbI3fITv9jPZk1J8gOv5aJRo_KODfpZx9K64hxdvvvWhgJ_mrmr5J-gFTHYdwbynBLWYi5jMMGGrRCK3OGArSlnKpqU","tags":[],"target":"","quantity":"0","data":"","reward":"0","signature":"crrljaRVMZPYnznrEhfM1_P-_FFhbi0YeRVYsSJu1Cb7WKnlG8LPlld5GtJmjy6JIy4FiJjriJfwUliHYDMvQRGgJR5ocAMEtyfrtYkTzfzqVIV7HXezVMpFmrZTDbmg1fp0S5E6ZT0Js_pTi2Kp1sZAfLTW6s-B-BvvJD0ByGTHHVQAGd0-H-EqjIeNpebIZxXWdBv4xDTxwhjJDF-cOrzuAu8am8_v_oET2Hg4XT3XYMKq_qUO_ml25kZFLXjogJufeC_TYUVwHCJPhh2ypxdzcuSUB09C29Pf6J4-YC8hQJ4x5E0CyAGKOMvwYSGE6TK4ZdWbBINZ6QYh6nYNjhO5sAZPTEdVu4PAJL5GeyeqmDwv2yOAwiGhO9jIED4Z5UcKBVOE8e9qFz_L5mTi4bO3Dnae7KS7HpKC2qirlY6y_Te9HkjdBDutv3KJZxla617QoxWOinUN6oYc4wYKLrdaa-CSKx2l-igs7M3jV35QXiltNpc_UtmnaV98FEgrUEL1wjM1seU1AGJRoyv_M_kzO_gCR-lCAn40BLmL0DKJ9gAL0N5gEwcSNWI_g9V0wPACxPOEovZ-ynNjHnB-fhqbbE_9MDRd4YmPv_8WHBLAEGWmOVknYIFtV1ROe8ip-s_tGY-lZT2zeV_2tD5eWG2FZXqeryTLSqHjGUIZV9g"} ================================================ FILE: genesis_data/genesis_txs/qyMWe-VUOzHXkQviMhNS0wJI_27nvCgDY9iiKANk-lI.json ================================================ {"id":"qyMWe-VUOzHXkQviMhNS0wJI_27nvCgDY9iiKANk-lI","last_tx":"","owner":"4Ohsnv784deAFv0h4Xmm-LBQYfWwmS5w2uBINF6heOPxbpYEZeItF-fJvQKtRuW2_cbUSII6QYJ5tdhEgujRuMm0ZjEHb_8agGcpNO9rIC0HwD6pU-ELl90S2m8oyeA39SqxaHMYIfK8UxNagERDEqAnPg69Dyk0PcBSijsOP6UY_hS3720stZCQEi3V-zdYTtt8D1ZfpSzj3dkfELukBnpfgbCubX3bqoWdnTqS0XVhpKBi_OgtCGSz9h-ulZBlMcOxgLCKFdYlJ86IMCBrmF0v6pYrRG5s5xXkqVltTxYib4scIPoWaOOfZanz5BjpuYnFlccTO7uuU3mSpW7R1PuNbe_K41CTGSaMWMjkU2hcenyf9Pe2Tsfpctw7lf80VdP3VwV-GmJAQDVl57Q99sL0XKBiNxG6wfY54K2y8wP7qXHA0orDMGZ_9tiPiL45atBZdSKX1FB1LlWc64yZx84SwemOzmTwK9JmrR1VmofT8qmJxku_L2zfDBeJlW4LHXvSayQY7lBYbYhR3hdWKRk2d6G2_eYdOcbM-ALQtCbp5YQZIUEhJQIslNSwxJ77bWn7a-lwi3RkSKDXFDJe6MXQgAET-2WJBT7F_VfNhODlxa42qMwlRyjHhrfVZkDk_XVD3wNM0OIzCPtjAKtfvtre484V7MEz4Jp382Ln9zE","tags":[],"target":"","quantity":"0","data":"SnVsaW8gU2FhdmVkcmE","reward":"0","signature":"bTI1-oRqbbAMhpoN6-5dn5DGiJbNUgiQmXeZZUCFSXbQtlSGRVRip5MvprVamYEYbLFGbZzBY_B54v4D460Jg2NlCt1GGxXsmb6lE271DtlZsiS27pmX6UGN5qDLg-iS3v8RxPWc_ycxtbl2C24lkrv1nVSGio8X9iY9FkqLDLcDXUNXBch2iR4Qp6p9vUph2XcQLR1VBN__Qivy8dQ6V2uXn6WMXtIQyIeLI7XUnhIOKO9HCA9T6kTy7gzjKgXPmwMR_gBbk6Ohc61KIHAoJI_EMAPYI4QtBpaARAIYNzrp65woma15JzXgzLwo9KRBreevwLn5nO8USD_B8ZKoxzGqB13H1Hqw5McWJpAAUxrTBtJTqeyYWfstVTxV0ogTdQ2BNwfeG-vcwMELGkqIbRlJr8h__CNkJ6eQhlWrAsNkuI48QAQBeZgM_5_4nRphi6XlUOY4dmU64nKLD_IOjLpSHPogSHX-BA82y_uus3uRiUogOU0sPTLzLZZpRVtFjO7rOdhtJc-WfQutfE_DvQAdRaJ1ZJZkiNqipU0L3dHJvZbwDk9LvmZ1153SU_feosbKrQVLE0eR8jNs_Dg-hS4chpBMkj1jziWVsPm1JY9wscOPhJHfIHBXQIeaU7aQaC2v-rwXJC5oX7O2t5elElK_Bf7M8SwF49c-MgpC5lQ"} ================================================ FILE: genesis_data/genesis_txs/r8Yq7Lvx0FjFYyXBLn29UM5Evv4AtGLZ00LCtE_hC60.json ================================================ {"id":"r8Yq7Lvx0FjFYyXBLn29UM5Evv4AtGLZ00LCtE_hC60","last_tx":"","owner":"x45U9G-JJYbFNiy51QMlXs0fYg70Oc1dUk7hIJBqecDpxierZjRZUUXk_Q0EmUZXBHbsWsg_PPN8SzYvXpvcR5hbXzGhBblMTzYijChhIk1QVP6YccVvWlt0nyC-Wl2HZGzsgk2jmHqJj7Ptm2qFuGkRufq_lyyHizq8KwythpJ8dHIVXXd4I2EguJsP39-b_WLTJFrMIOuCD7RLS5nVfAM_vjOd3__i9299SFkrpTHDB_8G_dgveHOqVg53Yt_5zRqTlMDjbeQkGEAlbKF2M69wRSEa4umrVDtm8J34_wLfCkrusB09TApMD5qQ7j-v8GVA1dN5DhVAIk72pg9a-o411ZWdY2A9FUMii3WTuzjQIt78p54gzllPfk7jpwsrarC5xuaxjHYKAsOIRCn2Wl_2rO4yzibWWQjksnL4AgNj6d3x4v7ovhpj4mL5eiJL-WKc6DT75uEf8Qkz5_83ODIGWDUtXWuQ8L1RKgOdB7Hes2aeXEC0aPTk_m0GF6T48lzFZXfvhCFUXkeDq301ZQhLqSrZZ0KNlc7GCY5NUO4MF0mfP6SsLzujRRHLkW_Qpzjn53nYsKYw9b-FXnMh8ohrg4h96n4VYby1vTzXNjN5sGc5Ww0jOBL8EmU7w-fFM3Tgbc-SXAmN4SAxH4N_swfm648uVHDoou1tlJrx6as","tags":[],"target":"","quantity":"0","data":"Rm9yIGV2ZXJ5b25lJ3MgZnV0dXJlLiAgTWF5IHdlIGFsbCBsaXZlIGEgcGVhY2VmdWwgYW5kIHByb3NwZXJvdXMgbGlmZSEs","reward":"0","signature":"WVScGGywvRCrQJSsnD0knVF2lPXaotQg2NNS3s58zht5s5C3Arn3Y9mI2HMWF5jiA7NJytMRSMaEKGkFko3a5ZWDyHMnwcDOy4oX53EfpDwd_MXcvUWPr9CTdmWxMjxmAQ8whtEuoqnpZ640rI3_AonNltXF-xtmd_Yl4dCGRJfjRX6cFMt5Buj8jdEWRcicEmKVGD4LTm_RpS-VNvcBmvZ49GbhES73u9NqQxqshlfuUX4Xqjfg_VLw7PbyD6r3DiISknZghyDn7h2E9akdsApB1rMcWIVFtxtNa9yyhb1LpgfOcOM2jL1P9qhx0HSJJh4Y8bpPUVJ3ernY1nVeW1P1Q2ZkXdh-cF1BzuFzlqQbKj70nM3yLBim7hUbZqSCPmYyuQwtvGHdizIHNbm4GB7PgURq3HH_s6n1unKcheP82dXnUOAdL9PEMBG-e07A8oSgp2M28TxaQBYiuCCewrnZzGLh9LpkLbap5P99dvy_4jeTC10KsEU_GF9EMy3bM1WWzP6-hDYzKBxOIj-fD1Jm-lVOA5lz6kqgFxm38rXuT9NshRc9JWn3QoH4wx8QvUknOGwVtbHTx9bl768eSQdaLogyxpoMHymIJHxOSZaCiZQk1_x08WvgC38zzycDKk9WBT0v0YWnFfkaiGu_liaUpNJ5foKDDs15kW81hUk"} ================================================ FILE: genesis_data/genesis_txs/rC7TOXwflo7w9Ky0ljTYlzdbR0A3g2GVRbRJbIIuBfY.json ================================================ {"id":"rC7TOXwflo7w9Ky0ljTYlzdbR0A3g2GVRbRJbIIuBfY","last_tx":"","owner":"1RwUoV2FwWW5AYFARemfgnWWJBMwSZiy_w0lCdI2Tsoo1hdcUBtY3g2wjGht6rpzI2WsD4_GkOXqXh-N4nquIF2x2BQxBLP9mjvfLwoVCX5ibbv_Bw1SkelsFEksqt986DB7UTF1LuVk6KLfYUIVyKwTM0l7tnximWZ58-qTmpHQtLxF-Kjo5Ui9GRfpbDZU6y3fPcNzmL3KjL8RoTn2GKHol_7ULci88F68BDgWzxCSAyJSIuSOXrCCAWuxF6qKTPOj2jmL4W4_aZUirpKiKQPF_IfSP6NTeQwNoN1-7DwFw-4NOfgurEnZVRs1xHVdRvRgyLBhOZGR3gFeAYuQhqe2XvTtW3k7xpUxhuSWVR8YBiVfhQzSQExCLSg4nG0LncOKps6H4p2xXT3AcfKYGslNZbz5aCOEe1tnPu8qVBEx5HFKsYPMidLjnHi9X1WpJr8v_fLM2Moduz9Tvm6JyeTcuXH6CfmzaeO2f0SbXSUyHqPTerE0LJMj1nIRXqKeZZnOwhsOkpbBSGEFZRILPRB-C_XwtAVa6LxNw1WwzDuZBeRtrdWdB23yMyv0nyGZ9i8z0urt1s88pNcskJr-s3vU1U-nNnQrYtWRHDwUkws5oY2VfE9WIma_0y-v0ebqR7JuyPr-7NVwIiyNzyKJ4oITAH2kMoiJR_1Yt-ic7K0","tags":[],"target":"","quantity":"0","data":"JiM2NTUzMzsmIzY1NTMzO--_ve-_vSAmIzY1NTMzO--_veuztSYjNjU1MzM777-97J6QISA","reward":"0","signature":"nk5-udQjt5ZjBo0MWgAugijICJXH3NS1b6FXR7869H97hJORCslP-t_QKhXovriDdgFNjBZcjp7WX6opZHbMJMpY28R45bkUIVTcH-wPWEZd0uSQu88aunm5gr9DKDtnmIMeHUyzxpXLqRGt57nhOeOnvPI_YxRRUJMLAkYOKv9A8Oy9gG5jVStxJZJcY3cgtgEo0bvqZJIGuOdpw-1WY7dt5CAYkimWwjXg3lo-he-hwAC4NqazW8YT2Jh_Huo_bdS8MPEYS-7vXQdNPZvlRwEhmOVhmr0UsYrBjYle94nbR2RW4VXe4f63V0sHCQwvalx2IfhWKHNd5GVPinMerE8KHx0LWAhmG9B4H5c3NkfGJojEDq-GKKXHPaXwTbc4dFf_SYSyyquKO-pLLTFJ2baj_0RUNsDyk9WujYa52YTrMbTYUwOV5MZBVMFur0UMzqPF39i1V2Rlecj1ikgsbtjn8ewcVhRvJRCjCuxkWkjXinHGOU5lxn05v_QTdPVNQU7Y1QHUeKP0jHu0TlMCqW9-uGVE_AuzsFGb4_Q1FyrPiscpvWyxaA98EZMBXGK4CaZ6cxkLCB8RfDJXmvQuoohl4q98wOWecBHZvfpeKc7oKW6RibC45ACHEM8uvDKulhc6BLR20z6jFZ0CxmW-eUkTwc8_MAw1dZ01IYfzgSg"} ================================================ FILE: genesis_data/genesis_txs/rRoy9jsUZ-Y10NIBksSD3P4HcVDfZheloItTTnc8_ZQ.json ================================================ {"id":"rRoy9jsUZ-Y10NIBksSD3P4HcVDfZheloItTTnc8_ZQ","last_tx":"","owner":"r37VKqMJgEJoiHsv8hD0wClL2AVWZGontCtXk0qucRNjB5WboQJFOQOGHB47RlCwxrWeWoljH3RuU7WS4Qm5ZI-BL4ZpvCARNMag2M03HrFUHiLHx6CjrT0XjtNP8EMudZ_pBMFtNNJMn3jFo-E9Wil7QyhC7mc4PKORdhxUpyF4KyJ2rzzERjvmqYDZnM5GpeDSW4Hqr5UbMMf8Q_4Pe68JrEILAVhsfGRR_uqh6xoMtUCgc1k57x-IcXFabwq3XsDP4tjb6Mfvkf6xZQsFDZCNXX8YCrfJIKm2mDYBUbTx9_cQtWgiuKRvk7nYe6RDhEwhPkpj6rHDlTonOMFJFjRL1locFeQyiFodwk9LFJ8peGOhZJ_g5VGEnAgRhyaOMc9LMR2fRM-FKTGSUFcHhuBYySn0VM3a9LuDBeUHDGVkHgUayOUhKZcDzfQ8hlK9CHadwO6AGM6eaRvAr2yPCjyozZuZUDrdvuC_SJDfAEkNQf2n01PJUr5HQZn2JOE2XKhTCX5zioHXn3Xp00_rM15cp6W-ol-opvccj76WXdJzuJfk4RakzI6eUGPZDhqOnpOe3u-N93Y04MnEqvsawNp_79wvsjLZaQ0OyTNYKBOx9mO5WzHF4R9kQ_iTAJEfKlw_1W4GuBduL4eRjaS-e5VUCOrccdr0QnlIHk6id20","tags":[],"target":"","quantity":"0","data":"V2lsbGluZyB0byBiZXQgb24gdGhpcyBiZWNvbWluZyBodWdlISE","reward":"0","signature":"MaickTyFgdf1aAB4Ko1kICwU4_fWT0CY-j736IcTLPkq63o3_15mYuWKZQI2ViB3FkSn824eLu8uU0xBeYcco5u7_f-4XhLaBHECW1SXXpnj_2wi2Xu6vudSubpfT2AEsrFur8khv4HPdnMta6bmgYRXg-6YkeR5GkzzsJcwCGHjpYH3rx0UHBSAbI1fnFeAVrvyCnfI3jlcaOfFBmFfIIZaOaV10R6PjihKP-6rThGINvIjrpp5nPBidqQFTj-QWL9_5HSpPYp3V-AfNM6mdqxzrWu58h25yXzcOPYqEvX-0iEjPZtlGhOaILDbb6sjXAWTCCovhrMkc5kV11xR0fbt68QU9NJ2Tf8tRGxJH82xhEvC3H_Ys7N-H3g7wkgOFUUAlXdAjMdatRGqpO5CV3fmNrNPEnjPdUwYQXmnAZ9dOEL0UOO2bD4GCNaZ0QsqvvaB5CC0eC_GZbN8BYXu8oLxK7G6GJ5eCS78dbCks5Tw2SDtzK44UYaHw4KBx5116hJKzb_gdcxZEC_hykgZ4TIek1RRpIMUshzu-P345wDSI-FyrTtcIDdpH7LVPhSI9Nv2jrR4JgcYEUGzdRjpAUf-LSQVnb3yNjcW-QnJejJPmOWX1gF6X-SNGHFwJuTwtIA9Pqbb9Vd-n6wPk7w6UBbf4fPeIvrUsPOJ5iVnECg"} ================================================ FILE: genesis_data/genesis_txs/rTY6dpq4KEhZtB-5moP1mWN1CtrTKurv7QSY8wAN758.json ================================================ {"id":"rTY6dpq4KEhZtB-5moP1mWN1CtrTKurv7QSY8wAN758","last_tx":"","owner":"nQAioD7X1ciSt8o9_WPYzsmYrAN6R4u87Du3-WLXaqUStdqRlnc7mv2y_7jI9lmdMb31Afd4xgRbl9Qcu5EJTHGTAoWZiAllkLkRouCd9WQdTDgG7oFI30IWEZYr0KHlGBq98nh-vRrp_CUrW5GtQ9Bt8zJHzadZR6GvTpfLExf2IrQ5XtWQvfWyg8IUoMPIe-g4QNt3mFsXZ-IMrQRMTaa-JmwEnYHEW-HBDWD9xB_8_88ZYcyQ46GOgfdIxUI9gpRcmDfdwqoC44mgNLa8NqAZFKEXSemkQW-Fr3w4_GO3dPwY1ola9zN_6mGXTX5ibEp6fT2-hT8iXujfyUvmHfj9XuNz95wFhgix2-MYhtvPwC819IuML73F-oEsgxRo2bprREFI6jPZEXxQNEHeI4GKBuaGDdL92SmxbuJKeYg1GCiLlf3IGfsd5ZRbZTBY7SncJVGtusPPL1sRvsY3Qgi2Rzck5koQ6qgYKzgZZf_FB9iVlZmXuFoQ9xLafa4swZUVeOVVyD2nn4-8PIZO8O_SamhfZw7GTfF98xaX8ABfCYedD4Y1ABfNr835FQgJp6PhtwiPFT0colSQqWILBJusiR0sZaG4uS20SJjwqPoOV9T1jCY1idD8RGbjP0H6DbNMQ1lMTOX2DZEtoj2EoYId2b1mFAiA7WS5H7Qxkic","tags":[],"target":"","quantity":"0","data":"VGFraW5vIFl1bWlrbyw","reward":"0","signature":"Kwm71mm0-gFtdniqrXRo1ecyJQuew0hXIojh-cukc-9e9uTUDcRwiO8LAppXnzfWkg23CqyfUslF7nx9z571rjJW2iSAOorNosn1CAy3r5o-wYafEkWvkEZ_Nwu6yTGijHG-VFoDA3if14kq43zmvYFEFA0QzPc8cCiWs1Q4xGGbZvX1kLOCi4vJtbB31dWZlzTBLT58WP8fuZIFZnEDr8Yyhhg56TCXqEIfHTnf72kMbSgLlY6UCAFh1EjW2giDrgPzScOzxEkJlO2E4j3jNFob4xOLGMaLA9KFZWO_-oa8XyDGKDtzonTBNYZnWdG31LdwCM2hZR7fJYEAjOjs133YuH86cdHNllXfUrnYcW7Ro8zb3lsWZjoH1gQdzzlOPnWmbDwvOzqsWJHVOqxKnDx8hkGr8XTKefwSqfP3-a8j5Zjgoyz9plB_13vl8Ymp5tNEhDnDFfd6qhhyu2uOlbrxRfefIUbZfqqu0LA209pLhMd2LZnfcvaqbkLORjztPdkPx8w4Fy70WtGbWeFNkEIW2Ucz0zeg9XTnqMfeEC28mfXtwLZ9aMZXw8nUYnIm6hYMbIb35-f7DB_tCagL-AVkpAKJ9ooLvGGitxe-OYaHXxt2m0IQ3HVHRjUKhG8uYeg2HRx7iJpkHk6si-dLctPqe2o-NzJSw2IpRLIkgKY"} ================================================ FILE: genesis_data/genesis_txs/rvbM0iB1HJ1YadedIDWjJ95J2XBHWwPAJD4VfpdQpxQ.json ================================================ {"id":"rvbM0iB1HJ1YadedIDWjJ95J2XBHWwPAJD4VfpdQpxQ","last_tx":"","owner":"vvZf-lOXuNWvqcvKzmtpuU2M7gJ_bsblXw32P_k80z6a9CKf0jAp4gb1ww-CVHOcI1GeeEe3iKSEvMGLAr_Y7bwzvHUen97Y5yLm04-clTeAD5vTkr3Kh3eGdS0rXeZhL3l_9W0TPdhKF9bnUehTLNCmEhh7ki7LI4Od4fBYr5v9AT4CMBCCXIqFcvjmg-0VyNpyefxYky6q-g09a5mBlsAPp2QjLg6ehimMSEJ_6QW3PrD8BaTtAo_E90W5w9inW6UKIREN9wtEUYRo9-sCkz8YESwSd3b_rYKmLNyK-VBU-fvyTkeQkt4errklNxGmSxQ17fkdyu487nr4Tb2fGvGKoBwhCKkC3cRdQ2_kPRaMN72QzgF656ihMbaBSEGbCJQ_as5PpiB0u8jlKKhgt1LFE-PsMgt_SNKNjFL5wTTS5vLcKbyva_PGKk8EgdH9YsNVwQ4cfUNoBXIbT7Z6gkXbpLjh1jnc1JT2v4kr6yLpQw2SzIllcUYPG64V3lhGnNdNNOnluNrFqZi2vurpSQys9yUIh8F6Wy7zk9KwEiJF8nxRV_EEiljk-a31W_54sswylkzySpiiIK45K-Pa7h9eqO6B-EdRbNQkDsjCdok-CbhqZqnxLY03f4zHOnEpleouOYtKTfCfajwID0IO8oIq9xgYw4d6fyswsCtWGQs","tags":[],"target":"","quantity":"0","data":"Sm9uYWg","reward":"0","signature":"W8N5avfsMy5n97KGdSieqM61SaGvD_SnaStVQ49O8USbAfSv9VbjV0EKjZmPq5KjbNSDw1p_OKz_tZlaDM58tjIBkejuGucbEwLlaJJvwQT8YgKFC4uaxDJ-ZXgcS3NIRiCGGz4jR2IkaUR7KnSCqt4s_6HwtM8j-O8kzqX7J3fqnsYTssWBAkZedwW--RS9is4Q2kSwJQyl7kn8Z8w3QN5x-c7A0L8mJUASJJFXxButgQmGTdjyuZfTucbm_7-_PvXRnC5erqDaQUUC7lbkbJh_8EBJDMsrGANVmelqmLcrKMptww9I1VQC-h4mrtRdvVf4rlw_lnMsBC1wNPOy1Eg_ZIO1KZeiGBKbtxR18b_OLiZkglHqmFRTvJVfEpKmAzGri53j3ogKd8z3esXpzNqYgoyUgq2tFZLio4JYzWhbOya3Kpy3RjcqYUAR2JMtlLCuGkNKZvpVrcHBwYLoczt0iy_G8pa8sYuVzHbcx0TlsMePN4dU83r7soxCXWLLFhkB0znj6RcV7KRCphvwo0XtTl519cB6lPj6TY0xNaiWUshISkeDQ2phD5JkfdA5dlHrpnG1MktkA8E95simfPJRuXfxnv4SI5DadiLHfFvcxj4kcsszuB1evwjOnp_S_tbbWR6A8B-ngAttR7RX4Vo8qTsYJtFvXXw6MpalVbY"} ================================================ FILE: genesis_data/genesis_txs/sB51Zz1HRjpwrWFhW6ZE2E-n5hl3joqxPQgnMCLX4ZM.json ================================================ {"id":"sB51Zz1HRjpwrWFhW6ZE2E-n5hl3joqxPQgnMCLX4ZM","last_tx":"","owner":"nsLQqPD-0m1Z2eEeIEEertS1Nu-ZB4IEZ5xkVasXFQsOK9knMDAeecmX_9gekZ135KFExd3akQr3gezmBno4aN_rFsJL6_Hiz9qi0oiIt9OnHvuClmTsLl9il-Sy_2ZOE01Pb1BGsbeZNGKbSuTrw8EYsGqipAb44RH1HWlomxJi3EMvwNxl8fOB5ryWxxlXUPt0mg5St7xqVOkNv5F-5A9NlNZkCrHpcnn0Idc9vdhb1Nx9yLQHOVVStR9dYhEaiU1OQvwqkPEcAoyBBAX2PTgAqdprvYcW1YAXgEaySZHSgqUL3mzmGkqFqWb0A1GQ3klt-v3W55rARAcWHsWG2DAQrQizlEo3bu1rg3IqzNEK0DrSVtTKtfrz9kJQwzSPTWzCHypG2mw21pQWzczEcO-tqwH2UPq2aCjUWwyYEEX7l1kFnD9bTTPyo04-DhrmY89cVl25PmsFHGnQTq6pBtvlkoz7SBcgpcJ9-VY5BCp6W5pP2PsWIG1CrVGmfXeqXNe707g7op_q0VvuYGZ7EPx48FvSEUGDZv0CNXsPp_kKc8_LFtx9E-GHZ-CQygiK7lLFOEIYKWbZEON0KZexIVldj0ANSlkUN4QF4Jaz4PUKoXWgrW57moTm8GvN2zuFm9qdBDlNCreCfhwbQzCw8OpRY14UTlWY1fs968K6qQk","tags":[],"target":"","quantity":"0","data":"SGVsbG8gZnV0dXJlIQ","reward":"0","signature":"Y3lDUvDYgMvcFtXkBT0ZNtH8t9pL52ZXdUKXbSzP3jvwaKxCnAL_Pdar_t4iIzedE0h-wk-oAkQ6_w8worn4J2qlMhe-BtcmMiTNw1Z2ET8lDks9q48OUVHAOzmdJlnSCdbIfuMJHfmN_PHSfrgVzJnTlWSx5h-c4fwPEOqnUFgj3xXRec0Ofw3k3Sq_La9ak8vD-5YidxrWEngH2j5nR_OVVWH9ft6YGhdJ4V8jTfV5aNxTZj3C0kq8ftlrAiAP44hN6MC9Ltw7sooP9rOMPV3uLMB7ORtur5z3PG7YUpGiyA3M-7D4HuJp9UXB0E0OaZ-ayK2lUHW4vBFEOSgwuX0t7265uZiU21k_UchrHZ_llkOqLGej_9tVw5fmaAoZk7_TW23krNBK1cOwyQ3z1brvic2tQOzPWhXy2IXFrcCw_jyHHvLS_Juw8Wla1BuPp0FWX4pBRqFccs30WG3SbHWa6sj6EH1F6qknQ0ylLtBtI29gGu9ZKZXYDNV-BjATNa9xWh1jXPTJNIJBkQsq7vojK_P2bVyQcdNfMMA8Uwze9GFRBv6zkEFndzPHOxOsjf99ySxZIc1q6nEW_ULzavekamKmS04jZ_hUAn2hd8wgml0mJJnlspxfvdjI8yXto1EwdmrTvENiJ5_XXNG0IvuN31AEvX6Fq7_ckIjmu2E"} ================================================ FILE: genesis_data/genesis_txs/sfAY_3fQ41LahxW45rXfndEzeHD1eeWJgI9ZaM3slFU.json ================================================ {"id":"sfAY_3fQ41LahxW45rXfndEzeHD1eeWJgI9ZaM3slFU","last_tx":"","owner":"sFo7nuc1VXOqOm9eaWYbQTPMG4pSwvVFUPnrz98LpKMeAXCF4lBzEywqTtEjH0UoRFXZqSRAyjtzNA9feouol5Z_4nj3hGmOW4hN1mgPAI3sqoe8NTrkL309Fk92K_s-vv2pzO39umytyDmUQtU-aA0JjPjqVlJGCulmeljaJhgWGj3OCnxu25JXckENQ3dAsHWMR0lAEFKAubCDeX4a1qnOPsvaVUOi9KJykiRRhrfw6nBNwgmdSdQT3wSmD-nl8GZHW_NIs2vYmPi83x_478jzjVEf9MYJQvHeA2_qnGMDJ0IIUZ3DMpmNtvYfc3ltCe6loJtvGnwJyy5OoDKGpAEy0rPoD0QiOnBVI2oh5MM4iAfbFk9OvG9wbzqppO1bV3e3MGsL43Ma9QJxUgKtMcJ-ZIk-2VyJe2duhYp0suIPhX-uO6lg95kWSOzH43qcQQEEnBNoIUHAKIYUCtVPYfBJlCbxXwCvl45NnPIkICQZjawdrqx7FzdzXMGNgfbqhzQV_KoOUoRtu97k9MlvfmxKgIIUjIguJtIIHmeSuWjoOezZMFo99nV9TfGnKT2Q2YwSsbGuvyxgAhkL4AZ8hywnN7j3HHMb-Ytem_406-7dTSX6Z_GXOb5o-Zd_NjcjSADs6UiRBlOUqc7z4dDKb_qqpR7imT9xxGHrNEWzL88","tags":[],"target":"","quantity":"0","data":"VG8gbXkgbG92aW5nIHdpZmUgQ2FyYQ","reward":"0","signature":"djkZuvcpCHherUC492MnjwmKhPZeQRvlDr0zTT1w4yc0UqJohNKHL9RH14iXeDoEC8lqw46LT1TYeLdljn8Gt57nVKFk-QXZ2CKQ_sWW8rFtil2xlg49XVhVvMh8JsgswLUrywrCHNuXweN2e2dHzUxLoI1ctRum7EW7hJN9JH-fgUXeBForiUlTggxcyhAxo3qAyuXVVtxNuFWR0toT1UgIO1nOIAOCoB4GihPDHhz8hCpjnrU6H_IgcsmQ8Pzb02HtiaxvrX3rDldyd4hTpKDmCa-jdU0-bKwGbMPYrUt46lKfBW8lldhpiaVKLgi5rmdRd3hz3M0n8xfQzetaCVAi2fWaOPkY6A5GZXDwG7BT-1OuP-v97StL09mqfItH_GpU3eP2RU5NRepl6PI8s51QkbtGO7Jz4sMxa9pvU8ZG7rFU55I35va77IHeGTvYE-lzPqHw8NRquuuLyyWhW7EgtnL6XcOiGpCm62WOUS3E_WkmP9CV3XquU0ox41hIc9jKPl1NApdqwGZHPR5eVzLFltXYlncOe-042bL347e14FxBh380CBJgCIZmRnFvNQl448Bbowadq1PSoGBGc7t5cODHyVMcE4hH_rOYlY6RlWCwK-W3CNvYJUZQnOo5eP7BwC9SLxDAPxf2KkV6bDMvZl5x-POI_BHbHwAnhfQ"} ================================================ FILE: genesis_data/genesis_txs/snWRgSI3vlTOy3RRkuNckM-ws-5lpFiPMpYlLx_zPyk.json ================================================ {"id":"snWRgSI3vlTOy3RRkuNckM-ws-5lpFiPMpYlLx_zPyk","last_tx":"","owner":"xCZick79YbWE_L7r5Jl2DaSGjgLpgUFtnJJVIPZS-pAVuahvMT3ZefiTgQTgPhOglf2x4Wg-ELcsbBzUf-l1J2aFVSjkodfXVLppoHMgJxKt90ozPdmYdb6QJdIXmJcSSJAltQW7Uuua_BrZQ-YWDMkei_12MqD15iZwT5YfMqKy2o62PIKaqew45oHLbSUOTfT3_CGNc8SWrS7rXN8FXEZgJtqDhI_jKwL2htZxQ69AfZyunWrfONdeTdPIjwY6VTojCUpMoCw7z1vdCfJKyMWveaUXzV4dhq2o3ZjC4QA3SjHuaH1AFb2ZM_018jKApXKg2yn0_37fav73rt2hDxxFL8h-j-YGsJp5EISVZU58VgxyVmkvJU_o22yST7O7Qe3p7Xxwh-dIaFUOI5HRmdoyMDAirnxGm3KcwlD25Sb6Ze-gLCuCJLdTMiaEhd0ZUjdWPzJ9KIn2wviIXdGLTs6coHZxLsoMOJQ58UcYJHmfeXVWNDBf62ETMqelSmgK-kldISYoY-i7aoZC9xpULiGiiIt7ofqNlKFaV2LMvfIzJXsutgqXlJ3EvME-Bz0h12c7cACjWSI3chF1sT2i2kEnqmioxE9S3E-JnhCEWynUedQPHkJLXDPThSju09CHTqlZuTK4Alrk0ec_neAlUDsOCiw24lZHMtflvrnTz-E","tags":[],"target":"","quantity":"0","data":"","reward":"0","signature":"oQ26WcCTzsQ0M9Mxrl-RtOSsYtbL2HmT_WGUvMWQxMrefZlRX9BfQMYO5sT7CQS-odShHPvQrSkgnc0QrRz3nrbG-9usmvB7oAGk-LL8LydUnfBSFH9uoiu0JiOnmwN8lOF2BEcW2GvwE396rDyaBMZESNsQiGQwwsXTRcHjRy5kYkswqrNa782q-rK7j9sFBHwzuQzw-4G6j6l34nVmPjgcQ5c16GFGgPhRn-ua_MAC1ymGdGeyDS_mtU87eK6yLT4ygvPmj-yrAyLBy9FDEZqmZNaUeeMxkHL5vqvEqF3EptA23XS9jA-LKTWkXNCUd2belzTsk46j4fxto-SJ47J1vL46o5eIjQz9mIpPjsItVq_RVIsSLiQBWSOi6_OtY3Z9PVhpNhguwjGpVMcQKF76SASOza-50eNwwqvo6sbSPqX6BE3L5bFwmG6zOaD5yXGuXsB1fz9LKhdlYLOTRXlmdu-anmEnqtcWp07MH3f1yJsY3Zdd8wm-wImUv1OxyNh4wG9ehStXNqq-dyhrV9L2fenNbKx2XTLxqYKa4aKR45hukjOg0KnA5dkw3uo4IkOFIDXMS1DFE4lwut181yhbftarurBLnwSqQ90OcvKw-fnPl5ZACPfYYXw2s7RNX6hAT3QJVZyCR32Vvz66WoWqrWn04SZyLRSqZ0iD3F0"} ================================================ FILE: genesis_data/genesis_txs/tOIFTqEef5fQYPzhlkC2Um7rddT6MyrHPzUWXDv_mJc.json ================================================ {"id":"tOIFTqEef5fQYPzhlkC2Um7rddT6MyrHPzUWXDv_mJc","last_tx":"","owner":"wITOQhpCsLUtJrV5TjdWhknpA1-DefF7TGI3EQxs0aclIW-lNRS-LJ8xOi0POgeqg8utaOIdkTIAsRcOKQNW1L4mV8IhAmoHdVN54yMJNOpHWcpQCWdgezTvOOeL7yABGZHLYmspS_xN1rDNSRck5M9xuctcsAlHGMC9JoaQja7MMhT4GoPMZR5NZlQMnC5QcRoM30N9wgMw79fGZmUB6uZyz6qsor74SKQNI3pVfp_4I8YIXjdMv7P-V6_5WO_FvfIIpVBxXoxYOIIQJ-GhlCauOGEZPSq0BTvW4WmgYNg4HAhTUrkoM-kDz8UzZ3okamZdqW6w0m5xwJX_W3PaORfhiEXK8-MVd4SKg9Ajpfzdt9j7qMAJNKIrU_ToemXDeVjaClugemMEiTPVyhEO6g53Dw96asUk0fi4n1JuUjyaXN_7tSV7NkUw5zsxbjs3M22K8Om-iBtGtlpnvWM1sqTMzzPHSwqy09bn9Q5WYO-Sd0PTtoRvJRwBujrOWLIfWAHY-mNhdc1QMtvTXCwk-QDeLZ6OC7CgxXDis4EFKF0qPXjouEZsnrmlya1FphmBy5-BSbkZDpe3C9wSQXiaj5_ciEkkbVsp4Ml32Db87scXpV8STy2Ze8SFMawMKB1P0h16oxxA_npLIRNbI39kMd-ibMM1SI5NCDLUrL8EIM0","tags":[],"target":"","quantity":"0","data":"VGhlIHN0YXJ0IG9mIGEgbmV3IGZ1dHVyZSBmb3IgRHIgYW5kIERyIExha2hvbywgYXMgb3VyIG5ldyBjaGlsZCBpcyBkdWUgaW4gMTEgZGF5cyAtIEkgbWFyayB0aGlzIGluIHRoZSBJbnRlcm5ldCBBcmNoaXZlIG9mIHRoZSBmdXR1cmUgOik","reward":"0","signature":"apYFT_uBANJQkj7XtXV7ao1vxF6w1nw4rNfysgZ0q2eZdR9rQojOduIT4zOy48gpXy0J5g8SMWCVjo4FAaAWRochDRaRKLFncQSSyNVSLJkygG2hCd-GjzGlc88PHci0aNv6iFy24mCNI1qRX_c-p0Vh41qNKzZSDnA7lF01Aej2PzYZKQXJg5voJK98Vl1QbAtG0tSPhx3D17oGdhuVrcks6l_AwJWWms2uQeS91jKVdWYHv2e5GPHsjk_LCCd3Mg2LsLYy9xtZFYXp0WTECYvbSraJcvjYsGcO4g6ZqV_aQZzs7XXPdeQTGk8KAgXDtZlZqS7Ra2tFYu1gK4S_iAErAjAiftpIfNj3MEgnQ_LrjP3F1F6xqsPIYTGo8QBIIdFWcWBhphIYIEDWD_JzpmAr6Gnnj_U_6qE6cgKfwGMJBLX7y6dhakbQJsH1URW6BsQX-Tuy_vo7XkrShQX63zhQTXcX92KcyPcat2pM5RQuh6p0A8D7AQfHN_1bkw8HEy8GWI2aysnrMs6n-A9zMdFxpWgZkoUsPVNTo0Hz5YIqZJYsJZZyyyMJhm4QDQAHfay7JHschSVIIiWTeP-WOj6l9pwg-CVrT8Ia3YIvdKHRXNYh99jqETlCbrJiz_OmyJN8EzoCyrqmoTM2BCKnXQLTyQlCt4PKMNux2SXMa9M"} ================================================ FILE: genesis_data/genesis_txs/tVLYd_62zbU-VPzQPOMHUo9TJR1dvSZ_pAHrC5Ubs8Q.json ================================================ {"id":"tVLYd_62zbU-VPzQPOMHUo9TJR1dvSZ_pAHrC5Ubs8Q","last_tx":"","owner":"nKK5DF-Tg_LMl_zRO9BEy0I_UQQs6Hu50ryjWR4zCthePATsNSdy-c4YOm08iyHNGS9lBVnLD7qICGa1mo2wfWzricfvx0EM549ME1EtKpSuzZ5gttmdIhy6GWWMlrrmtxOoBdJek_JMkorDT_2pvp858vRhp0sUxuJPES5TLRGH16uHNwd2HVnzVQPu0pKKdRjOJF9cM8IfiNfQeQLSwI_fkm4uHmYf2axKLvA01Dw-Ia6hP9_Cd2oD4OGfzpyPBtYkfqyNuO3KBpXID7WIHpw1tr__dUqj2PBCgq3IbAbINVFJfRIIo41-WYZFfIrSAr4R7iqzEfVerA0LNvM44aG9d4sAPKv3htxJo14x26dJqISln0KbtIsBi5Cfhf5XrroZfEhj9sgcyNqpEcsFAuYt68HE-0YXs-F28QJ-9C11CEJFw7GBtbMd-xBIH9sM4VlUjRSh1njCpeqkP4L8Cr58TFismqj8GMDkNzZNQe7DW-NglYzU6WGusQPSC6ySNkJvvAjUG4VsAciFSMQ2lPHqiZixFUFDo3RxBEnGn9g2CqHLVZMUyinEVf7GzhYgowo4n4mqjNqXtgVhx6C8fL3iGP3qztejwS44h9hLRm0KIejH5KpicyJZdyc0HUw43u30xt2b_jGMQqT9OXgPzvhfZ04eojSt-C3x9F0LnrM","tags":[],"target":"","quantity":"0","data":"dGVzdDI","reward":"0","signature":"Yz6ZHz8Dyi7cKtFB6BRkUfvtSCjlxrLSM8HQKuScxf1zzXdcTfNAywakz70stoT5FKDQ5UevfKS6utADj_jefsj6kMT5XfGATSApiw7X4aWpuaqcyFvCHizGukIKxuy3UnTOeMX4tBbDmkx7AbK7ChJcQrd7hYBcoQZpkuoSif34sWXogGL50C6O-mGU186ymFvOxWDeipBFfQjHaZmLUP4C25vgmeFbCQEebhrOfQk3VGGpbji3iyqDoxXR0zAX89kMrqpUqPRJhlAJuu7tdvrclESwiquNrM_svzX3VtqvAQocxPfDVjZY5n4EfSUt-iGwrsRJHNdOBLJJGfsOakG5U-vmI7TP-XC9wEzmlsM-iKB3Gx8JXPcm5eyB8-xIpdKieRrjijkozARjodQ4yrAE81AyaAQ-CvcF1AR31JRbOglMiwA6UVgwtaHUSAmOyelEvEM9DYWDEF7zI8CIh-vwL-JGxWA_DTZAHirkCD2APAfds434bPblMBGhY6UA3hcpYXw4aRAvvGnXPR7Gko5CCHm61ayVVx47d5OMypS8LCyWluP-SLPSGL6WZdph_jbHDoZHafPJUkKkNk4xReS9U5_E5LxvYpX99pCLgiwJAznYiO_yoWgSuWRFyYrF0RHbRdM_4qu5NAlRECvFu_F6PYesgbgAS2HfJAox3DM"} ================================================ FILE: genesis_data/genesis_txs/ud3zGJZA5tPRoitGG1c6HWm9W7iRS4ZF3u6PbZ-blns.json ================================================ {"id":"ud3zGJZA5tPRoitGG1c6HWm9W7iRS4ZF3u6PbZ-blns","last_tx":"","owner":"0u-MkiRuTdWAVEUZAFcE6D3Zba1BrJj42UF87fM53s4hezhvGn8kM7pBfu1-XRaotp6ntFX6aRe2hK6-EAaklG4XbiRWaG9u2uPMOKHcTNQ6D-BgKaGiE7yXs4l8Bx7HnblHCQu3n4pYj8UkNIWirANdN8xnX8eknBINLgWFfXYaJ1A7P5Sr2FQKww15dWPC2SAYgSowX1no0QJiIqxy_0gtFNi3gueSFSSPYU9WTUUrJWFkwW9wobxSmVDsW-idHtgAfciO07YmZYppRP-wMpmlGykrIWYgvRryiQXSCC8M7lnSEEtktIVRUyg4UR4v1z8UwCvDrzpS_4fCOit0P6ScuZL4gGk5D5cz56oe6Vb3161nQboemfy-GeDMtmkFXGmLB3RxVXxilIVxMj7SQmaddDEHpzu5wwjEH2xsyDqMeWPW6poyJug0zGOuEr4M26DJS0mgSRwdrOlja1wG7Y12ZsvOCQ6fN06IygxodA2YcDDmn7MhcIkvkcvnvFhzqhw2bTpXNt6iXyEqB6ejNkG838tGJmQElWPvg1PdWoQ6ySO6_0QUcsRqx_6glarGp9r7GYYQwIHidj_R9OwGjbGV0LD_V1iJo5bobx7FJ4IMtCrMETekgV-9ZdldyNsGprFtB70lkChxuc-6-bNJ1SndiMrt-CwZ7P5UWPORRRc","tags":[],"target":"","quantity":"0","data":"SSdtIGluISEgdGhpcyBpcyBnYW1lIGNoYW5naW5nLg","reward":"0","signature":"aonsO3wOUGuMirUalbQyq8T_kx237uVqltILJuJq8FE9NNpgBDuzdAkAgGPcXWVC7ZF3AOumS9EZyMzAF0R3joCttFuitXUu55HISclssVzP1pX1ajZJpcpARxDAzbWKP2UPoplrxdI7C18gSS6nmD-7ZqrNzJfJhshRwZqJiccm54qkTPjRoWJ5ANtB6TutDZAl6V0mIzKIu12WH0xe0dhmcKKY-wyKj0oIRa-Zpc7ANq2T5ng-MFlyD9yygOQHYnTOGhsyV6YZKzUfRZ6FV59eNr0INHe1d0dl7bWHNyXuE4ixBzK-XJC5i_bSZ1DfrUqhXuJfukV2P6uZd6btWISPgeqAGP5Li_BhYIaqsJWAXBWb7oWFCUQRT_sXnQXj3HM2jVGpkep2o4fbV-BqPsigFEwcZwp35f1gkwfSw2Kgk0J8UPYg83EMih1sDWMs3UozYrvjS_IX2TdyhAiCR-jOi6GoMt26BxU15pPYTwsjj0XRhq8cqKrrWdNbe8Tt_EILfbgkU5bBWdJtGs4h9YyHYk_XfkXmNurV2hZVVs3Lz9Xy6LbormtgeUPFUrWe_gfiy70kuEWteIHQd6ia9jkoAJdhBkr9rDaYXO6V-T53pTy33XPDWeJet-_VkMBJdpNEzpldVZrvFb8WjpziVQDaVglkMMkM6t2P_bfbnu8"} ================================================ FILE: genesis_data/genesis_txs/un3O49lggBX9raJKb6yuql_QTgZYWakWw5ydwUgUuXY.json ================================================ {"id":"un3O49lggBX9raJKb6yuql_QTgZYWakWw5ydwUgUuXY","last_tx":"","owner":"3JNVK0XgM0W4MXA4nqU-wjallXtv0SLEdRR9ATfdCTrpAidnSHBDya6U1M9KwHQ73S2t7HtB2yQbnYShhoNA1NUJC2Djh_GaqrnFdx2iGVhlIuB3pYNRg1GqLnVWl97fUwFSOxrKtNzdDDIjYufNOWvJaAB5eIOyxnalB5POS5VLX50X0UztTBpaKBvtN9riiJwIC_VFjstjMkn9IagbV8sfIR6ouBN9KiEV8xwSWc7vieFFHaSwUOHxGSeAzhHOJVk4WQpr1VygzmoiIgUW9w-wyPw5omtw8CehSLJqHumicRHRaxZDOP70M9YnCASPArPvLZl5Ulv33j9DU-4fLODf2SHIrupVfTjxI5OsMw3O0UUjdAqBpBDP-7zRFf_NU5DHZrWJDsXU_qQinAV9j8vgc-qtQREZXoEFkBb1wm4mg5Zi1PeSOm_W60Vg7355MxX11Ked0Xt1-KAL2p0QkQ72KJMSgzWFhxZd8YtSeynSzPVKst-XyhD9-89ndlSEC3V98-gDnNRrkaAZBBY1sImkFXx-wu5tyGKGEROtEtxi-2trQMKhZuDWyl_kzkrV_E-dZl3PgtpFyuwD75zjImtX3RSdJMnyj2sNyqbCnmtCH-7CU-vG9KB5TWSNbmpT1wHCkvcmhk5ScFENRpgab6t1Ecc-Ov1YtGRYjOMLEq0","tags":[],"target":"","quantity":"0","data":"QSBUcnlpbmcgTWFu","reward":"0","signature":"L-K9MHynT5tEJ892p3ehj7M7CtkHA6JVirZs-GWjzePFYYx1AFX2s8uUTFW5v04vue5a1VB2Y5elEzKh5abLnyPU6PQfDHIK6gL0em-NeoTyuRw62TvBncQVlJNEPC6QK5O2PbW2rapmwcOhvE97ckZlqfAaKz3lGNu3wK-Jo596KUSXDOqomvBQDzuscHpFfgl9E5eEolkuaCMgAcuNtxOXwVmUIfCzQnQfh02aVmLaoR-P6r8oqDESh4fHRNJwVHV3YCTqKnfh3LfHKG0LNSf-x14mFhxY534lDeXTaV2s2XUx0Ngo_CON21q9RvVHvTuhmPa5XDeUTT12epFnc4XYAiFIgNiVJHgnxm2o-00whfikjSJldLu31D5u3DQ8iiSelqp6V1oo-y0gYgQp1R7G5dunqk_amsQLc_NYK0KZZB1FdqnzUFQCmkc8GOdKbvYvGwPjlMoITkFxSzewNwoSAvPC0TO5_6tkYPd6FxdIjBxOCgpLHeXJeKpw30ixXXDve6jA8U6wovPhPViZ8VBIU2p9mFUlQnPGSDZRZQ4AnykrINFPm5B_nNq17y3kGaLvYduXyKRo4WCN2fpWUqFemzs7TB4wpIMC4MkKBQwGKcLTj9Ly68ChSkXBk_mWo1B_wphq4Hvy7KaQ2wo9hdnK6kPgKBnZPY5ZV4fR5MM"} ================================================ FILE: genesis_data/genesis_txs/utAoO_xht393CbJ_7P_ektVYeEpkySWLM-066yJ5HyI.json ================================================ {"id":"utAoO_xht393CbJ_7P_ektVYeEpkySWLM-066yJ5HyI","last_tx":"","owner":"44fkqwyKGn9tpZIllzfGN641scM0im0amYHIQFEj8J-SdvPTQ3CIWClgUaIKDbYR7ooKr-6AHr4G8yIIypfzf9HVfYJJOHo_Vz_BV4Sc9xV-nSxbYtzlstCjgyTdMRMZ8REWz99mTEIBrgmE00RDahM0xJ_G_8J4K1m9yVUg-t39X92i1wGt6HyHMHg1TiC8bjnVRCTziUSx46sGheWcXodjUZn_cVOgOq9g0fNUA5v6ZlFbqAp83VZJQz8D3lUcqQRKZVk9X940BCEO6skCKU1-0OwQiDj7IAOXwyCJg4KM6B8RexXHe4E8YEg8uAdWnmWEeQa37EPGjWZlnN-hpwcxi-nnfFjoQ3w9kdxQWoyLNLZKC9KSWaibFfX5zlHeaeh_kkhC89Wo2saefZ5LZNwlL2OVFfdZoxP93Po_LQ-_rqzNo4nfvABiAU_JSjUX7p9q6Iurwy6vXHvE-orZ7nbTKluzHkswpFeRVmyPnkrJs7Kr8vketKVVlJfJ5qcUM2XiAKejNEsWSFeaNZS1RHfnRRB6JOdXmKRSNZuu7VPtT2NovCpouY_fKJHW4T2392gQSaXQrSz6t7xrBwp7MEKc0f3jybjsLiB-sqKv7R9EEdorTuvgw9HtBMAaCSbRw9q4suZq7cOE8MJ-6zJIrngYxQFQY3vIQgtBQayPZ8k","tags":[],"target":"","quantity":"0","data":"SSBsb3ZlIGNpbmNpbGzDoA","reward":"0","signature":"wCGTrdB9sZHIzLzrMW-wpIPzsR5-0mAR7BzIwjv9FULpqqxHddtZoLcSVMu75o-v32dPJeDoMoTMNQr5b_Zg4wzTWEvW7KPW9D9AliIZGZuvz3CkMENojmQ1-cxmIVqWMJ4X7OXkHNdzuQBLtXJMCata7750e0ze3_ifT9MPkyldzmTIA3qhS_UdeaO3AfhvixPVpw5jw13dsD_HlG8M5LGw1iKxlG23K8kRDt1vm-Bzqxpwi6ymKZ4tBqofQiJaj-jOvEZfp1PwbYs2aWKO8w6KcEG3edXuPU2DkfnWkL0XaQVw3ZMYKfE7f_znsx10d4YAZaYhlP-ByW9afbRkEyOH_0HN5h6OKG94J212x_DFhx6A-8e7W8hd1-OIkIqlc162ci85XDIARqF-ph0xOTgT2RvXNIP0woSCryWls0VIFbq3Z8QS4Tdhash8-ER_Glse2H6UyKJ_QULZmh-iMbh7uBU7tsKR5_E0exHCYaP-VCKDnFIsdp8S6OFfUCMp1O-9sJQxqjB65oxM-DKNuuhfm7I-e7p3woHaid_BShHE9AIxBBBV1css9fjAX7MVizFcsxRmXzEteOJI8_OtUz9m4oCQgV89X1dBGUuphZU67BmAmIL3FlUXdjcDGWWRe_USc_2YLI-WF6z4MatrRMUQCVFzTk3zoz2By09EYSA"} ================================================ FILE: genesis_data/genesis_txs/v2UplxDprWwaIwbB6z3KNEj3GjloqM8SinvVahZ1Wpk.json ================================================ {"id":"v2UplxDprWwaIwbB6z3KNEj3GjloqM8SinvVahZ1Wpk","last_tx":"","owner":"rzVp0mivRGlTKQkQsHlt5ysXnXSsJ_6ep01_Y1pfejhLGbOMSSmQ93gZUlzxsIY4uGWuz-kl9hviWv_oLNTRIdIJWZrYet_-H0vePvFvSGOoTU05Rj0zU3yLxmdW9cm6dmMWaMdoPfN1VCTu6EA3ZO46pPZVAU_oKAogkQPJdWJc9kn4Pmb3MNrTD-Ab3-_paE4EVP_mQ4DH1xLoMU4sjLPsmbgrdU2S5WyjhtOLzxnPcl90Mk0UPY5wJw44AjQOdmI7aC6yghnwrxJ7cjJDyhoc8TbyKIeI3szRGr4aojLiLqS9Qe8i8bNw6hrE7oaSwcYUg2N9gqfzO2KYonnyoVvzoTf46yAA8-syhL0PcV-Ruv0d81gSqk7uAcQnNQreAKpnr4qrE19g5tJ1Dx5CJIgzXjrtwNsgRWu91V1pymdAW-cMHNahaEPyOaswzi97xjvrv9xqr6ESbZcDxf3ySZtNZXk7_NhTsN7h0HiASIl5wxTuktar6v8kR9WRs5Na-nE5yFm-oSTs7tPbMoU_iH2lKf6BXkMkxi_MmJyyMKF0xJj2RUjCj2O_51Gk3eyOa08GpKvHfIhXYNDVQIGHRb7tTvCutkMu5TgdQXN4i_UUFm8Vis7Pc_N5GkZUBmz6wJ3xa3KMKQxOg5EpjJZDanaLr5OpmcKQ-7m5OKSETNc","tags":[],"target":"","quantity":"0","data":"dGhpcyBzb3VuZHMgbGlrZSBhIHJlYWxseSBwcm9taXNpbmcgcHJvamVjdCE","reward":"0","signature":"Pd5VOkBU36aZyKpU8lpJjA5eAQW-Lu9NAYgaz4KiBRT6YuI2K1WWn9JigzdUyH95TpsFm7kmYAd3IhxlBTIJk_ARuWo0AorwHv63de-NVYDE5gGypyJQH88U3uc0f1v4sklDsM-DWoRf35Jo2-6uBzCqM05CKnbsIxmESslpKeIoXLu92f8k8zEpGKcDwsuGMJhc2wTuPtg48Vbol42m83n6qBenwfLPFDvDW-754q6mirUnsiSiod_i0zXHzvsY0I1uDWLAtaPrt1RwjyaNEhKDhoVa5nqqTNLo-hGbxOAoiSP5xhiSYrjFKFXylMPk6gfDll2-eUNaJcWi-eZx_pRDFxuzsw4f_FgKHPB2u3xOJTtkmCWy2NRxOuMmqb9WHXvHycfLH5s5wSLTjXoSQX2-ha6PYCbi164aSXjOPlbsI2Fz3g2UlEUK127lxsMcNMaDO0x9ZwD1LOXE94psQyLvEF4rUq_VJvwbzU8Mf-6GNHPPTyyJZGWzCaNMc-5n91TKGVzj36G4oKSbtO1Xsa0gtDOipgoM5pR6t9eXtyaSwxTrIJe_kgRtO-coek-x9hmX4-m3oDRSfkbY6Nx4WC8EOn52Qdfbfa3XQwFZX1RXD2Wu_EnM7wfdgu_sUiKq_KJzWnnRNb1QYVdRZqF87ZID8TVhbl4W7maHCcT6YsE"} ================================================ FILE: genesis_data/genesis_txs/vQ4zTq--De8FHdVnE7sYCemwiaqoZDS4emR_y6o6ZFA.json ================================================ {"id":"vQ4zTq--De8FHdVnE7sYCemwiaqoZDS4emR_y6o6ZFA","last_tx":"","owner":"8cK_ACUmfZLwqCxEKIv0ug5Ky064Z2jP_JiUEP4XjvtIo1HXGyZgKzNq_p35yOaLl9_5ezhBofXhDKuLb1_AoRAhNIEbtt_xGnV7e3s6vtTQtddMTuXO4cGicWNO41IA_AtJrnaKUmjFghaAZcGGKogAf7hv_x-pcpp451D37Y3D_ckK_I4An0i-cAN7p-3grKcxYlqA2ShtzUiH0R74muI2RroGg-IV4Z64ojol9rg_mikIrYVrpG-s5MSZD9zQuIQn1qp14OqNHpAIQrQQMx0iSdTX1YaS6oqGogE2DlxM9qSTYOrQzbvzd62LMvjU2-vldIw89vwlbG8l_2HHlN0z3Isst1ElS3s2wgjuEMPwPJDZH8G9Ye7el5BqEq_nhhdfF4s2XA5NbemVBzhYEPWu58AdoF5q5mdYrOEStdJeZ551Xn6mnkniBeADqDbNOv2sZPJF78nNmHP7QDPU7jqDzzKxPSP5N-X02L8oZmZX5O0NNubkl-61Sdm5IPqoS9UJ702C_kgPf2lZmapfI6TMVg0SeU2JOcY1Plfm9lo6BKtOFNY1njVihdqQeGhHeUto5AaEMCs54512R57c0Kj5TlcOyKtJujLpsggG5hk_qmMenk5s8cogZgHn6qaa-YtcVGduR5OSVE9z3NSK2zzGp5ejulOWwf0mn-wS9wE","tags":[],"target":"","quantity":"0","data":"Ym91bmRsZXNzIGFuZCBiYXJl","reward":"0","signature":"U3Pds3yfvNSoZOWHbVVmpu2dzTXSW271a-QMdSy5AQgGb1r_t22OU7eGI4e1oLxHejcuwu9CxSAk9tWNXPYJQ1X8zbd_tviTLIZgeLOEVlWyzl5GjJlpqO4GQv60ruY9KnaqitvPeus0TjHXcwYiIKmDlePPXhYfmc7bFL9XK9A4Rt7pmI2DZ1AXkSDZW848CDZRQ4uD820WUSvwKTs-F50SAzdCNdymAgX6c-p6iOFbNQwNJqmBBG5fX7HZPQX1ZHsKoAmowwOLY3IJXcPTCfepn0_-8eF4WiJmrcYft1JLmLG8xlN46GwFjsxR7tWCRI_1wY5w1pSDI9nqz6Jn-fm_9cvQrU_h9ohEcUHZCqRJfCAryM5CJM6aYILMAlbFxiEybJM-ea1jcD3zkGVH5zNDKUOuUn_pRYQMPKHpcPXhWy1OdRqbmVOJKO4AV_7MjUekmclhzY2rUA94acEB1WV89A1n-9dcXl5fZgYqbFfoNHk7F2-gdTLFloy8SYuIyRyUZZvCq9oYaX0QY5NP10iwfTSmA7cqdZ8k0gm1hFM3o2fqA6m5jT6UxppthnorEAW_hZlwFr2_hFSTI50hRv_TopBb__Nsl4Pq0kY6pIwVa4Yqa2F_Y5RKZGmzJfnIQjLb5qBrQXpbiVPuhvFiiJGr0Guh2zy0pYbEPdwmyBA"} ================================================ FILE: genesis_data/genesis_txs/vWeY4yJSJF9LXogRZb3Qr6QyLtEIL_8IY4bzJ2e7O5I.json ================================================ {"id":"vWeY4yJSJF9LXogRZb3Qr6QyLtEIL_8IY4bzJ2e7O5I","last_tx":"","owner":"2AV7XaIi2w6q8fMi7zuTVn-ZjxHjy9Cy0Uq-uPTtGT87G0Mwhb2pOBBEaaYSPWhABq5HbfVBNyKueZfyqsH3tUpoGqc6qjLEkfqGSNxhMIHZnigj7icoTeYMeTxF8prG1QkMFMSCOG13Fvued_xHkM-8FxXJec_KQ3vkiV2dpKP0pPrvyWynsIpIdVn9ji_Od2548-VBWIKD8odyOv05hf1zCv0B0u72P2UPNMDpMPk2I6FCqd3wFr_j_vKtPuOG3BMQkbpjzfgoOCdvb3ETeQjNFCOt6Sr8YIkJVBUL3MePRJarmz1WdpZ9jA5jbFTode9pjk1RgUywFz6PKmEu3HPFA-J95-N5T6zgrl1MI_eUxnNcC6F5ab6-38696qhQ-041Rgq3ptK1XBpLdHbsyCMdfyr5TiMS9jZlQBcWHyFjIuLqEgK-rQrJAANDx1bFGkOLtJC7ottkaPAhxGIBXsYgXJ8Qs1B2WwvrdojQiAvdY_QJctHryjgBC7-WN08_M0fKjEZU-tKD5Ay0Bhv6fzQHUPPBQOGjnGK9i5qDIK6fFXuHQnfWwG8qzkWMK3S4k8sQ0_c_Xo_sRVrBpVyQskokdeuU7Hl4Q2cXEDpM-wJdkbJe-8viLav1O33rR3l_k_dP0-KBwTlh3FY83LQ6K7j7Yv4LCIXXeyYQkHZtXa8","tags":[],"target":"","quantity":"0","data":"RGVkaWNhdGVkIHRvIG15IHNvbnM","reward":"0","signature":"VjD2hrAIkkFFGg5vanY5czWYnUnN9sI7fvcdM5ixyfIZJq71YMm3T55AiX3NbywRO6N1XhxWYh5QtQpC1FAXeQmuwUBhJxnkV4BLAnlcUAYBxsCJS1G1ALB9VieYqiarXDmRMEV3XQeOsYfJVZL1loJTf47fw-MueYDynOBShA9NmsjFjQoJ6YAz1BWBvbUbRwDC98_j0wNumN8ieC1oUf-jtqX-n9_re0NoyAtxOtJEuS8S3BdMH8WJ7krjQRQdZHCoToUsCOcY4Icvaai49-9trHfQBsHPw9g3g4SiyC4x7K7CyAblp4I419g8GDBOylHYbgufMUyGQCTAB9RsZCL5EyvEEYz9ftmgDfpFqCTSoqKvzj2YG9YjsOV2E-Y_psyRIRhvM-YUuBdOYrZQVom8wOTKBN3P40H8ithTEta9RH_PpXAZ-__X8c6hQSI9h1Zt7ZknwxujyypNBPuY1D_PVHG12pFK6lHHhDWObzOBlYab-uV_3512ZOrE5dk_YHyao01X9HAUqUKRZ5Af7B0BEPP4gHu8yfzMuo_baq4GNz9fEw-bgk7fopSM9AkwxNQgyGU-hp3VLP-Sx-Sn4zrc76uvsn3bgrj63fsCXz3TZ6Eyih8s2XiuHzAxwCNjGBPH-xSNTxSsQQt1UKG7YgOJ1tS7gHo79PtF8loWRyQ"} ================================================ FILE: genesis_data/genesis_txs/vaJOh_TzVSoEgbgDyKz6ABzd_wt2-ouBTe0gA1F3oMY.json ================================================ {"id":"vaJOh_TzVSoEgbgDyKz6ABzd_wt2-ouBTe0gA1F3oMY","last_tx":"","owner":"pOroELdvJ7uzhtuqm6ACzrzJCx2FQcBL8UKiJIoYT_3kibFK-tarNAobO7GudmYBDBBwQMerJhv84iE0aNyAb-pisdkZ5iAOe6IxePGavAlXVVOLwOoG3IbnrSworaCokNmlmlI3_RzZRJtUOIC41i5KnllaHMpPBReEXrT3pjvBzJZktkjyYORu_PU3FvjYmEsQ8JZVyeSzMilr14_4QWJp3_PkLFqflZ_TAbYALB9326YKRzCzRXYrezrG8aw6LVmN4CIKyIjpVsMfcB7axjbRi23g0r66GNURZmQDvKeFafHzU8sNUIMVkQTwe8ga8jKEzpg85QxvkbZe6_BG5fmHbPx4WFdJUiWY1kJGVrVWtJMtbloCuIASszKyebyHaYhNoVM4jBVObyrlBOajKRm3XBI_jjUVCDuAxgR-nIqmkJxjVmae9d0xVgOCGDZUmw94LzGnwrHv2pFUV1Zj_jcxVw4wUTit4Ur08Df_CCob5YXbbR60Bya_dkC_WEnR4vLFYXYZ0Oc9NLm3Inz31F_o04OFtephbUpCZLO2ctk3nHOXJBsin-U1oxsvEfZb9W8NQjL9WD3c9xL5z7iFVdixvBxj2xPbGl1qvt_Rbr9xdlwwobsTZfn8SSjhvDUf4Qw0hssvYHfeVFgGbpkUPhz1ptr8XrDOm_phWd0at88","tags":[],"target":"","quantity":"0","data":"R2l0IEdvb2Q","reward":"0","signature":"jE0ubYO4Wom3rFK6WFkBArQJvYEOCSTVBQTSpmi70LyvhbMLIgG1rRskP4YSDBNNJ2eNfgpJLLDq84wPLhGer9nGsAfz9J_DqkOerR7U6N0RJSek1GvxR4Xu0DnPoNsLHW3rfyJdeo5uw7wp29fFpoxbqBzOERPnWdvQ8fIvdun9YtGE4Beun6i-XQYPI6w9r7pIMJUmt1Vlu67pl6WHXTocslRAxfDsJZLOPUqxQhGLAO0oppn4GL_-hmN2ciJb5UR5c__jYhS4_E2Cf6gyMhrfvKKAFXC43lH0jGSTy9OGVjsSmbIxdVbZX6Z3wPQi-A6Hoing9De8utvImMjeqczls6dHcqehV40VBRBVgyu3csctQtSDzQq2Fo1x-BPcDAP0bVSzbS4bpNVTk5C78YxjUbuNLk6dpv43WddlxGYC3Hk1XE3psreUZmDoin_juYg5tYO25Jk3PQZfajX_KbupkHfk22LYNqas9Rp_4m3isHHhuwd4JsMqESvtTrW8Dmcy0wFhyILCfcXbTbIY_Tyy-KkelWhxmoGd9VOjNbe1WhmUKe9qrA85JJ_OLW0jOmj_h3gnZklBWs4WrPDFfsHQbZsS6mrfYqKHcFvnnj0eC20wPev8_XEtMYN_OTQDVwTGWaUXELDT0yK02dTj7K5QX_3BcQnsxIYNj0lVSM0"} ================================================ FILE: genesis_data/genesis_txs/wFjsB5Y9GV61NqjCeyPCdkfXKUJOYccq8Bl9aljvwGc.json ================================================ {"id":"wFjsB5Y9GV61NqjCeyPCdkfXKUJOYccq8Bl9aljvwGc","last_tx":"","owner":"r49svNp3E2C9gWAjqt7TWwMgeLbhLlwYA3MSdTP_trnPvkxh6K12B4bgREdWqe-NydZLZxwzEzHlUtigTavyRoMn5l7SQ9l9tGRXTef9yoTBdM9QLc8iJYtWFQ_M9l6waD2-YKsuuVLejtJMRoX59YVW_U_7kflYSXHNg8A1x-wDFGYqiO-mqewwQI6Gz2r00Z8VobltMoqr6-oZ2ZO1K3oN4y2cRX2E-5k82VI3PHq9OxR6CxFGGl1H-EPWuAX2Kqk4WvrNMQ2pwFA9cgD9sfBTK6Kaxif3o56uw5kbU8rFHIanvml75YXcHVCpw1iUSpdqi2_mqlsx-hlHaOymjtEDdlC-34w7zs-6QRvEs4O8gVtisoDAXO_HkPG9E0NHRfPd6_JAkMFBB2Kz2ZrXUaUyzt1elTHqkosFLGS-maphsIcIPB7nuNUdWveugSAPjgx_9Ftcv_y1irRb8lnebFS0_BWZzMUZFa_NBeDhHChp5DH8x-q1olhFPBy-NwmLfY9LPosbwDFI1FAR1kvJ5WHtsvONNWRJiHA492sL1UJtnllMj8yG7t1rpSnZXOyRZOnMiF72f_IQ_ng4Iq0p6lTBkNhehuQgQUbz01PsrLGv1Li_JlihPm0fnnISyFBC9avPOhMxzm_qYVzf21WvXgNPmJBxqeT9DoepQL0OCSk","tags":[],"target":"","quantity":"0","data":"VGhlIGVnZyBpcyBoYXRjaGVkLiBUaGUgc2VlZCBoYXMgZ3Jvd24u","reward":"0","signature":"ee3QGuXGjifu448031I3Wlg6eRq4ta_73zh38RjS8P1W7XiCvVFbAKxnX0oUrPIsBhY4Zm23t7t8b7DCNpMZzUZMD10xGkFniWZ6sCXWTWC3uL5Unfhk835RepYIYmAeMJi8KUZshACnJ5Jp8NJOt82PF_L524P1DiXrTURjCtNnFgJD9ydi1aT04jJH_G6EimqupbqccfQsxNRCMwoNfUeaJTr16YTU7CnlCknOa_Ac1UXzqWD8Llv7AB1J55KmCMMaKNd5UZ7sQHHXfpYsngWGgzAbI25Bt8a0_adshG3HoPlFakagyVRSrcWwb0oXvVRej6cQ4e5_If2AVTCEXZfRshmUtB0h65Q8xjMrpAI4bHAgWVpmBn19ofx7FlpL1xXYkqc_TLENfxCepfN8xVPjNOoVIQvRgFgHT7xGEG_FWRdGHyeooZgKae5Pru28MTumbklEroh7k9M4A3-lwJNqs8Jj7PKT-kvGBCQr0Nyzkxe5HWHrO_y7HR-Ax9SFTs5X0eJX0Wk2gP3qnPPjvHC_K-m1IEbeMLxyPAAaUu4azQaHkyLKptR6a5px4GZzIE9Zq3fcxf0D-xo9wHfUefEUJOBAGITOd4rjowpnZTkooqlMLi6OGGohIuq-z_k46UhOoaOoVRXYPobwPV7UnmRH_u4l_I4Ggisx7Bsn9-o"} ================================================ FILE: genesis_data/genesis_txs/wUhEm861foyWdxy0SI7CvXRcWuohItlX6Ydqo2NvtY8.json ================================================ {"id":"wUhEm861foyWdxy0SI7CvXRcWuohItlX6Ydqo2NvtY8","last_tx":"","owner":"mQyj4SGFKBKQuEt_8fpqcD2IPfd6MypdDtsJ7DMmvGaB1xKVuz1DtL67nVKZk3WWUDRARouDMxFbH8T78b3C_-IkZhp8tAImfhSFYg_fNTuI5qLYjKsZgPZpGZP2Gmi7e_2JoCAAXxka1ZI4eKy-SydA69iLQcl_NYyUoGW3dzyWSTwunZyfrNq9vdNifqViBR_In_dRFdGQtjhyvkLj6LHuvrv6ftGcEAZoqi9FBJpaBkC1BV3zv7rTeX83LDLjmGSXer6cWnrCoRzhR7aDhldsY98gtBpsTSgZ0lfUsn-Uurjx0VHvzKapVHWB-XTvRdJJaF687S6vozPSxYM-d6wsjNkpOGVxESo4W44q5Vh-LVcPWqVULJuKQhXVTbmJp6OW9INcTrJNyEpB4c04W-IKzJHphZVVdF17tENosaIIR2Z9ERC5VJZjSWrC7LKdl43NsJm6xfJiG9XEi7KdSvPATa2bo_MIIL-GIt5EkVjulod4xXrDpjAZQ6vp_6cxgnBLofThxTkbU6eU9oSJTvrXJpDPJ-EufZ4wabMfttKzZDEkcKD8qyffMcAVIOQIgWtjFK0t0PKcKNmqYuSyfhKT9jOeDg1H_5C2Ui1EcM_eziJSnppOeX8lFuRdvHyNWsq5u_N235WzKiuDINPHV-NxiPlo892dV0UTaQ-xJKU","tags":[],"target":"","quantity":"0","data":"c29uYXRpeA","reward":"0","signature":"d6h0ojQ09kWGzsaIWN2Q61Si_tgflU1VBs9BjaA7I2PphoDoJmOMEYR27f1Q5sugeatNHDgVDJ9IztsdAINYnDg3Y7XJ9HF6iyXZCj4zIXQeuuY4Syx2bOTUosp-FLo6Rtf6TZzmQ72HtI-uUxpKm3G0xQfbhA3YAwC4wgB2837usZRdPvIMjJPoc2HlkktpJ7RfdRkTPQLeWbCmSjp7nVSIyllIgrA4x-_VA-6TQNFcMU12h3fVuTCYNT3d6HdbCm60d-aopB_jo4RqDL5vbEQcd3hy-Y88g8rl-HhEKwZ44C8MvVpCzIskQXtYzKpSz77DlqSIZ7g1f2QDWg777PGrCKXLG0IvCECuYhxUk7FKOfSKAmunq1vQikgs7OMQghK-6ogXHuICVYSpnz6f97zgw15UmXLsa1VI02ROkawbA1wkZmiaNnl5aLUwkKTP-UpH6znl-tXLBuc8c2cAHX2X69-W6hZUv6seLwq382eqqC3nFL_JmEBDUlfQFCQ5VA4KDFQClFp3uU03hy6X23MRMTb2NAxpwC4T4WPU2oxpLib1YXGUBmlU5vvdpA2FoaSok-1BrAF2QlLNXJjS--PqAJ4yvZHp6FVMHcqE4q3sc1XlblyI9VF-roNm1E2zApPAzM37H6vhr87UKqYWSEmIHRp5QAtEpDRmDPaomvs"} ================================================ FILE: genesis_data/genesis_txs/weff0Y0_3-H7Vy1HrbpIzUmbTM1rZ8Lw0wgDGYmlsrM.json ================================================ {"id":"weff0Y0_3-H7Vy1HrbpIzUmbTM1rZ8Lw0wgDGYmlsrM","last_tx":"","owner":"w9FugqJ1NTs2Qb6f0W1RbwruuvFa_dGfA0Nf5B_yEIa0MN65X4ad85KubXVkpraXdBPSIimD2GbXHT9Fsuqkxn04O2V9MzoZqdU0ZqvqrXY0h-eF_CqFc9_uM8PGwI7Go4_Mn1tfTcqq6lxag7Aj9kmuKi9pGs-8EH3qc7mDwSTuQBSl9VleDYys-M4HRCfEjWeXBsaDEhNNtZj3rjO4WpWWTa0gnaOPIPqPoGmxXhoYUN6Edj3QpQOVA0O_TmRHpBgP799lF6V8N74wWge59t9bGWx4LuZ8EV06ilMW1VWyEVe7XPn49B2b-iQIXzuGagVxEG5mu82Asosf8df2Psib8StNKojzTJg05A1qKzw6rTcacWZrvRDedAfKJAohTMDLnPAuHgX9odFvQp_cc_t6ZocYVHZcONiMEdUmQma4RUKAcBmjWK0H-9dGE8b7WVi4rRptgqKo7NW65YyV-QA2o1pYF_6Tq4V4Q_s2mYNSZlsguwM6pE4wjrms1wFTtcYg_9BNsvWDeO_qk_kOQ0NHAzzx892nJFJ8BR8mUOefbGEXu8cpqOfCziSk0N3JFnmBiZ0NVAvQ6T347fQ7CYzUPwJf800Y3JPxoVWVTO0wC30lHvUewnmbfFc-b0cvSw-9uIByJ1XYd1nAUIZ7BsCvt0sa4qqRUwAEJ82f-qs","tags":[],"target":"","quantity":"0","data":"SSBhbSBzb3JyeSBKb3NjaC4gSSdtIGFmcmFpZCBJIGNhbid0IGRvIHRoYXQu","reward":"0","signature":"oevWSZ4CvB41EPVCYQHZdE25xoFUMwOkY-f-du00UjZh4GIIlTZoap1Ras7uYravlSrh9C6FRSJJo0imnmcl8XUSm1bs7hk17C2T1wA-QRKS-PeiKAsR8oJI2PcSOqTiSTCuWQ0Md15w0V_B2FK4zB6LNHChwvvg57x8K5sZZYC8Y3FxoRtyYzWoYwfZTHm6_sfemQsMe3Lx4QaYkEKmk1mRsu0tGiIMs1wbw_ovSXClyqXVgR9H7Ldm0oFjCB8ir9WUgT9F0oZUrnMoX4yJV1Mo7cRwk1sn2yqwagI9Cl5Oer3E_qR58_1BPtOPNMvH9tPUaeyMUQTQvRYTY9jqgVtIufOTGt6NWxF0MKFLM3OGy98m6gfdHxSqyRfuUUvGiuf2NGF5ka5_CD9IIiAaSULgx1mtPdw4GQN-rxG17UgYRwQ9KlBUGbhN-zr5E8v_x3QNDSoIlkzNhp3UGIPm9YDPMOUatbWw0rE1QlQmzBemKtQbb4VVwF8CtGCJDf9FVIbMHGnuyBWVhl8EL5YcCa-ZUbv0Dtiq5kIxljPdXovIB15cWyIgj6mkVjqEsgLuX6ebvyLmtLx1RLN9DlEGz9k5qflzuqrQLEamTKlBmZF5drup7EMpbOnRuFTehs47uWgXgwGzcRWWjp2rJZwbBIwWzoHVxe6JsW0qrcGHQJA"} ================================================ FILE: genesis_data/genesis_txs/wmZTwziFc_VlvYJz_4nyxYd3WxznBmsn5QQyRKDcWXU.json ================================================ {"id":"wmZTwziFc_VlvYJz_4nyxYd3WxznBmsn5QQyRKDcWXU","last_tx":"","owner":"3xxBY98XEjijL3Bf-6Crm245kkG96ForebXNAFT8UIxAKoLtyg8crIWAM8HQzSSy5uBcXP-4vQ5NF4IXqkCREjbPWJ98gIqcqsI8LgOubHlIo904bfKNu8tGHIEyvBkOPHA-YxVsoH5qD5NK0xpbnY8F4vl1UTvHEFN4XgrA-uPecpcOT0wMzbLNfQfdwwCwIoU-R53jziNBipWncj5spoC7X--73utKYK1FEVhK5ix4pf6yWajQrwjOLY0kNdQP_vtPV__XNkhZBZJ1Khhx0FjTU3LnnuADLkCEiH2O5x4r44n3UKGvfsQD-NNFB3C-IwYEYJYOvWjpjb7AgHfIClX6rJydsHYAKycFD0Fx3SrE4nQr-5OwIzDkFpCUZyUC9vApbLdZJIr-h9PQYUVzO61UHXLZYlBiHVu88CzoWK9A7bHjohnhYyvojS9kIO24KOkm6mv2hrQ31RB21QkRuWJwIEUSpqNrBBdikLxRSr75_OuxsS9BE0HGk9Hj9MmKDLleViXB84I8grj86LwsjqZlMv5TKRYgt5b5rZBM37tL2c2F6LEXxFHhYQkFWoFhc48jSbVkeAA8m33YktMVKifmto-Z71jR99iRfcXHA9bmyhje-6FH8VXYeSbgQzfEfFRnw3GZxdFoi_kKyPdt4FzGO2lzr83tY6bBUxm3t3c","tags":[],"target":"","quantity":"0","data":"V2VsY29tZSB0byB0aGUgcGFydHk","reward":"0","signature":"r2TagUwxUz8oU6MlFb4dIqPfhuCjDGShFa3pMvEi2SIWVL0EaEhhbfzDrl0rS3_oTzUWsyZnX0ZdZybJhFZi3jB79OMPab_E5p0zalR_ZjmPHTaWmNMsjU6t1TO5bKcSLRhUdozDVS3ldHFxrQstXYN7bJUQWIRUMIl_9TlsxVgeYfliVpuCNzsug07UhmMTb87JE7_yVt5vxk6s59pe0qHChON_7GSbIIP0AEaBEHAqpszBqbHUc0x-3__9FNkWh9bsclCxL8nkzCZfL8v-iKTOgQz8eYIoe81g6ZvFg_BscY-JNjxcSiNkhB4i7pDlRW79xtwcGENlVdoUbiDotD5zJMJW6QTrWWNDZHFgRZ_6xAseMZ9oi-S6iY3bDmo1-Pg0NxNox5ZZkEst5Y6hObW3qI52fFc4cyvWaY5iKHehFUUqaa89VArLbHG4QwPOQnH4pMFah8srSZbzW96WQbrJ855jUtuxccATLEIgYjf9HEE0sOEPL2UTB-SZGXsb6zU1-rsICIJNZfWr1hiUWLfaMFoW2f-rSEO7dD2rC_NOgV7IdK2kKcKHwtH4lpGghxMQJMRFjPD60Kz0tmy8Gqvlj_cY3oVKJdyNFLN2rmpoz2jBSWxqBil7kRXRSBVl8Eg2iGH3yGzGvIAlc-MGDYNKpj7214Y6MWFvIQFEO_8"} ================================================ FILE: genesis_data/genesis_txs/wnOghJX4aZlbm7SDDb4UUX8_6GZYpYYx3GireamHwAc.json ================================================ {"id":"wnOghJX4aZlbm7SDDb4UUX8_6GZYpYYx3GireamHwAc","last_tx":"","owner":"m3gdez1BdV97-__3MyYNe_ddMaixdc7wjsnBMZTNy9MCsSaH1ziuCTk1i67BbvA44pcZKH2TpOXV9b8rBBK9tJmQXCZxM24-8SqKzkCoeAE04f2MY67QO8-NK1Tkx5mqRsA1jGQVfP3XhqO85R3Gapr3UqFC3Nv7B667BBMDN1TxrA5CCqx7nWyZQ77vKJcmazO1BLBSpJCdthHuZ18wcTOPWvnRWUxjF1f9ef7EWMLuDVLtukJRor3sq4YJvObxHBtGmTS0tYhy7ted1v1O7eL1qUxcui-FIeB4NCjsBdHikQWvHxFR28FhQTclhe69lNzF8bucnBIIjyIk7IAqScdDZjNzkRqgjSIqIQ8uHcKLmSLBXUns9jGggo-qkuaQ4fMXyYKj9RtPV6JF4X1D44Uxf1bNQjtI5KIlLziiF41Fx4VAVGtMAzno5wjpOdDn6fKDeLBkLvztAoXRsJ9XWIsC8JipnPOGEtAHLVWCi3I1GXokeVu9YivzyZzWFbZ3FOqp1pfqdyJzueIuAgwaUCUAWcA14Xr3e19u7TgnBJu6M03JxLv0nTTSi0ScmtjThyQFAp-QGuFkMxxxQ8GejN6CkZrgl7uO6ZscIuNPyB849YoMhSETdbtxqO5VdCtjSajm1fqB1VfN_nAoJoDZebx9QoNM_Kc3RtZrrIeOjBk","tags":[],"target":"","quantity":"0","data":"TXkgdHJ1c3QgaW4gdGhpcyBwcm9qZWN0IGlzIHJlZmxlY3RlZCBieSBteSBjb250cmlidXRpb25zLiA","reward":"0","signature":"gwIuY_0w3e5WBY6TrFfQNyxb38N8r_k0xag-rxkXW0lQ3HDtaGbeZc6NIpdZP0gnKp2WNAQn8PHFXUPL6Znh6qJvNfv_ozbuTq7VSfEi4nqnF2CujZaVorFUofiNvUUKczHHk8GRyD3xN6DPvwXQ0YHY_iVwj8Y7_gn8lwWcc1XbOODWR0z2s42_rU9kwgb44GxeQSvqDvhio2Wj-EpK_TlnnQPaxu6v92M-489NFRKVlU9z2sXpGNH7zGPxp56spMIyQu90El8khEdp0XJmlJvvdoWBHXXyEJ6Wz168kiMRiVp4ZSSh0D8TCSd5TaO2muX7nD7i7BbZwCsu3mu7z73PKLKuSYq8A4oVRHhj1hXQram4cVjkltc_om5DG5uT9FuZLW88CvxiNcGG99cIvSlJZGqFSBjI1AQVigRBWrHFFusYkcHShAnoE9mRKcu4N8G4x6A-qjM14oVAMNCw92cq61_9j7u4uuGNC0-FnuB_17lkyPqFwrMdrndGhN-rufUVGl2cDE28tEzDy-Y8QaRxif9K1yzWvPunObScMzxlKLUHW6drjt8qBxvDItlTQ1FulbRRpdy5HmMv_lYOH9P28cLhegKEOPB-GR0s9ZXwv7Bo4ThVYJtcbB2yZnOHRrbKodLWJQ3vOotbyANXUqHwPalPeOzsRwTONdrOBIM"} ================================================ FILE: genesis_data/genesis_txs/x8KM69OVm6lzslK6ccAE-3EX5sW6CUHBZB-1hbc-J0A.json ================================================ {"id":"x8KM69OVm6lzslK6ccAE-3EX5sW6CUHBZB-1hbc-J0A","last_tx":"","owner":"vX_rs7GY4ZWlXTkJc5l6Aa0KljBrZR7e0JaeLVW91_fSF9UT1bGGVoFYTq9kFxvQBt5hTKMK_JbFCWU_F_saYrc9m3_jB6ng3coVALDomiHP5Apa--rr3n3lDHzq5XxYsJxIi6o3HlrFGHY_yvv5qZWgEvzx62uQ334NicNUf1sdq2kKQcGTA3wtpNqdpwCvRx0MUX8-ru5ywxEeM8bo5E2kLuZcU0UOcG_F_zJVFyQhsyDyASp17q9wCTv4M64rXtlOlzDkzF-AIwfIh2wJB0Ly33ZRLcyV2XW8MiEMMu9o7cQh58z4D7H-dyDbvMXo5ervVGvf1GU938lheLQ1Wcyq6-lIQSNVcjw--AlHeR0HPpwo1tIauDVEqbjZIydOPkdX4mLncK6Am1sV41T440n9mH-9lkHJ8NFANQjH_WmZNk5CFeb6u8v0RYB0iRZuZlGKngREBKyoJd9oi2FYnkmAqRtDJMuA-vCr3dHiBTmcis1QON21vIx5z2Fk3VY9J-g8z2d71BRaIoPoX0sBEPYvTIMUwVZkO1Spj3DfZKUCxG3gA4CLEwO0aHHb1rifRyEUbtH2Cn0nYB1xe3AzAC2IaGt3IGoOKg2YnUxP8mnN0D8AM1oBEgTVN9EI91nx1y_twHfE4in2B48SBrU9dvALQoGeczpY6jqwGfmUEwk","tags":[],"target":"","quantity":"0","data":"Tm90aGluZyBiZXNpZGUgcmVtYWlucy4gUm91bmQgdGhlIGRlY2F5","reward":"0","signature":"kC74HE-IVTgNo2dyPFZXpJlrWiLEFtVSzzvWJ-yT2XUbdZ-yWOScsunVUJTIyWpfwrueXs2OUFCIlhNKFSO30YNvjcuu0-zAXflI1FKYPopUKBriBIciiHOkAajhn2qWQouzF_FTGxdynRTCHn62TPEpD6gLNLFrp3ZI50qowgU5IgPecy-R7_rL04d4jTw883h4n3ZWtDcp067MZP6PhQaKEKwTBBiUcAbXIsHy_r177JlaukdwZW0PDo-67u25-AWUwzMruxsIA5Kzmi6ypMelpOOLu31Dmj8qQ6nphYdMjR4VS-p719GPFd1UGg1yvYEvh0_DI-KwcNsivy0UmuMIhb-GSi-DNLQcNbZ0p5Xbseai-1JK7Oa8QNNxwxD9koYbQAdvRQauymRbp698HPAX3augMq8FtVWC_Qie6aaAvWMydRHU8U3X-iF79G3jHCJrS5QTBixJJuAaIX5V3PfDuLHipTwOROfcAsiIr8u2jYsZ2Rdcwce40XdyISk2sUhdrD32l6E-2hftDO6wyb8pn494l0KTYoiCFDthV7a8n0Xha4csLwkDf_70gVlpMAiiLRRPT3eIFFRsMmYVj0r-o2Qr06nRkMIeVXTdBLkda48TH-S4miyZQDuNsz5zj1KH-jxKEvOIqVb0AVefOPzS1ZzypAQ9j4TuqY5XX-8"} ================================================ FILE: genesis_data/genesis_txs/xC7ski_qpcrRwRkxxHwPZd2lOX6Q---2qdQ4Rr-wxAM.json ================================================ {"id":"xC7ski_qpcrRwRkxxHwPZd2lOX6Q---2qdQ4Rr-wxAM","last_tx":"","owner":"vnSo2oc4e7DalUTlFYC4X6UPiYPcRirnWVgOVhDF6hTXPw98mTH1cDjEGTQN8SfvaGRTQs9wWskEiPPE9gt41ouAXNOUxxUixMRGT7xstOA5Ysg6q_O6mYOvJDP0N7egz7IyBSlBNTJrhoTE3mJiYOeHptQX7-ABmwYK4EV3NCTOd2a6utI5ZHIawnj3NFf8P6J73Uh-eQkwUW6fpHfCJ9vz7ghPzMuam0I3HKG8A36BRsmolGNFlZ1xHQE4tUlVoP_vCpmqDVXjwARD_H7x1JvJ49W9nhPq4TnTFs3jCxapHEt8MFKDgoXIwVOi4vk4bXO8iNl12o7uT1ZGYWmJ3xCjEEjPlh0T20pOYBMIbGnVr3U5Cvnmd_o6NIK3H6kuEjooQ8V2vCV-pk2AwekDn1ezfESScFqBuFxfNzotlCgjq5ktv_ijzR3zwIYDozUfDrIiUSazcseRepHYKR0C54Ru3zQEnGPLasYAy_xtpdGWT3Y78ZlOBL1vkyh_8X-y_OSUxnI55WhKazMa23Sy6Cih4S5YQadt4nK3gwOxIEXS-S6RTxkG1-vyPQ0Pt2yR0mjCObQ8i2aopuK9T7UckkSkEOc4sWnAxlavT23UyH2mvtvYwEqtxHPg1z4Cw3MEaRGVkasJy2TDS43yDJQI-g4YkLgJ0I9u1aoQLvFG9NM","tags":[],"target":"","quantity":"0","data":"SSBleHBlY3QgdGhpcyBwcm9qZWN0IShAbmlsczAwMDAwKQ","reward":"0","signature":"C8a3AjmT-OgW_TTH_xIQJcHAl45ZC6UCWTHW8NqUouRC1Bco5rhhdIn_ITAr0avZvUZzr3Sxim6ps5c-XgHJVwijv5qS2vWoKEaTB0r7clw2qWLe8-fEN3YCdAQFSh183daTiEk7Gp5KExzNA87OsfYrWqAHJ-KZlpZx7lS7jehfpzALK7uH4btH_acgmItq985sSMb8oeqFgy8yfJgaO5uAYyK7jYMDYrGjMW3rRwGbQjPimBnyVfPQSykOvt7nRyrjabQjw7yCsQ3KCSEdyRlWF0kpLICe7A7_PG7cnKhv5AN9Ctocn29EZJCBeAGSexoTA0TkC_87lPtjcCfmPsNC1goniDdytbQB9p9iWvvN6CvOVSpQUES3ta96Mg6SA7h2vPshAC0jEcOu-kb1bXwXgut1ZbjpKT6FjPIjI-F12je-kzOVSE3aoc3hblGI6johDQ6jIwQk5Rfwl5l7VRywMbzVB7Pf2YNBzgNizJU_aU4Un10rl-WlQTtyhyse4J9akPPWs8i5jFynhdXoZMWAOtxwwyXaN92HRFb5j9G9NR39LJNTnOGgf78NiONZLEfBa11AX8Qbs-1Odhjvkw7MMz6IA7tybEg1EWW_5OXG-Pnww3T1KiWbiYv6uv7QeYuWwI4bHKwlPoTOWvcc7UJa7EuOO5L1EwES270mqN8"} ================================================ FILE: genesis_data/genesis_txs/xSkMzFablxREj8H_RwoMseAFk-TCwaLVIZMHqXh5DHY.json ================================================ {"id":"xSkMzFablxREj8H_RwoMseAFk-TCwaLVIZMHqXh5DHY","last_tx":"","owner":"xftrZtKEf_UB3kg1DRwhyCIWzk3Xlqu1rqG1RzLIPJaqJjMXpJ4dCFsQ1CK8G0lQ41DLcVlofDHUtUtnje4rmE7cGF4tjuGfV8gxMq73D7jtAkr_j4mWl9H-mm7-JyiMUFXxOs8SFpxL2zS6cyi7TK_0CcAPEEBmzcStUxlJXwpLu5fZNrUiZ57j1bGi46uAmM5ThGEGK67LVQ-0BpYEp7blVa2Vl86NYjPd1nXEdBkjPZsrxUmNcvPt2nUp9N7OSAWWaui0bH1nP5e3SmARO7kmCrBj5SBLydtoYVLDo-jvjWRvDSZAIP_DYffcV5NVFP9FWO6HFPdt8OwKfjEYtAfRW_E-oxfhvNajDdT9BhtkUm410w4ryTbv1UVgPzxUkG3rgs9XBBfviq4TWXXA-xkYzpdmcQUzzilyj0ULFLiEfTFjfeG0sy2PKr5mzk7GgU01S54o76xWsYA_BleOssYbsFFsjOhgXapZMzyh3-STNau0BqwVT5rGHjDCDyho9GU3sMMiitIFdYFkPHTewF6GOtX-lbak2vKrlY7j8XngvLJ5arM4Xd6WkDKNjS06nhrlKuTx9YIuT9PE_7GVPE2UkwcavdR0FG1vj_q9o34PqT1-1KxZtk4f2nT3nsjST2vrv10ggJ5P482MSXuKJnj966aZ7eml8tlebnudaAs","tags":[],"target":"","quantity":"0","data":"aGFja2cgd2FzIGhlcmU","reward":"0","signature":"W_B7wG-fPJWvl7SfmAgpV6h1gdKcdgbyD7DtnX51WI-vm7CjS8qnNDBiUXugFUJiart1i57uqQbGXtXoZS3MBQN9JGvwtktrfldLXVo561TR44VvxtkV7je_78TYHHIFBuko0mX4_MrBdAr-KNGRqWy83GCx1t-XOEOyDDihUNCTyz-g0P467yHOG4KRKrus6mJmulkXdAuQ1F71Yfad1wO4_NYKIKzOUE1Mrc-pQYrrjRsNbA9lDiD52eCnVuiVfDlK62cQyrxvGcDBdC-oEa2P8zjns2aSPQ6wUMStn2suxrfbqB4Pw4jMgO2s-cNUm0OtHuXzkW8AXIFrQxvqtsvgI28qWo5MJbyJPzn08_-hhj9Ctf8-1wxqxEb-RQ9pDtGUS8PZ-BpackYjqGB39DM76kxOP2tA9J7ogFCnHvd-7TGvPDOb5KrMR953OtKDxhwsh1o8hwpKKxLvW51fw4G_VI0sSrFInNfq4LHlkKmvET2bDGeIQ92gR4xgXmuEqwFScMGfo-SLzaAodfDQPAWuyvyxSlWf3HRWB4H9UFP_qk_73XcYYaZ5mYRi05Y4iXiMyxCJN26uQE1rrO818GnHKaKa6KXLDeoM5flvAzDbs3iK-vxFG9Puv4RP0hEyGM7FNPOdOseTJBgzZ8aRw5AQrOoUa2X_3q6tMVji20Q"} ================================================ FILE: genesis_data/genesis_txs/xYpSRRpO8ejUGeohlRutNt9qUMgvuZJGkPGCyu1kSas.json ================================================ {"id":"xYpSRRpO8ejUGeohlRutNt9qUMgvuZJGkPGCyu1kSas","last_tx":"","owner":"tDoZ681tEfQ1v7gcLlcY17LJ5ul0ONYkP5vDzPyBZehkWMFwj-dA2hOlo7O66VTGsOOxNgl-B_eFG-bNEEbcOtVGwouxUxlJRnkCz_e1AZ_AYKcjz_omVZ62tqLAZEp-9JowEQO_FCsMl7_h4ipwptvggFbKxhm092TU59ijgB_YkLRP-g5BUdEK3WbsqbH3pJwzn69lsxaooSuYEQW3EWrFLMTodNWskANTelFXu1nbcJyNG7oLJp8rfkXUwqIqJ4vXhVVonVXo7iBIWwDRjlkybzKpF40jRBEFhjLGudvUkLBS8SJCnhdm8fgAuiv82xNLv_fANRLluGIo4Zj_MkzXmyxb9NznRN8Yilv1bivEX9vjQzaq7TBS45zXs5wY1UWHKAMQ1yVzPrgmz1F9Nrh1GRKx0VhDSKuB_Cj0jI9d7CUXHiiYDHseh3lOdMZ2It5sWnQZvEaogWMMTO02cJVFYoX7MUnV3E7pbWzlLI0-ZO3V4bruXhJ_uItwypVMtAN8Xp6AKRgDZxVcY1WZrfkApoD7oVBJKVHlrtFgFNVGZERCtlVwyDU9MblZxq_Y0RfGDD2WHBXxmY_ZbNiz1MCf1KBJgZKr6G7gUoRPP_XE7cF2nVoTPRu8XlMMU_7diy4LreRyaBUJFbOeLiJYb6LFZXzryTYCg00gToFIT3U","tags":[],"target":"","quantity":"0","data":"bG9va2luZyBmb3J3YXJkIHRvIHRoaXMh","reward":"0","signature":"PKKH9IoQo4ZzoKvwtGFSa76ZFcbKFqKQlIUfKEl6SGEYbqS4zc4w1Za7T3lAhEHRmeCjtw4XAArtiZDu_SV0MiiqZmvhxUc0OMb9Ao8gLeG6t7brixkiDyfSCBMELk28mgVDOcFmAZIR2Wgvf0TTl7uiggu3IsDIdzjzVs_5QacICK6uZQTObZHFXiMhGaZ4QqIVXWigPsNJbGwVZxeA__2tJp_G6PDsXbVuFTKgN41rN1n_0p7AXifIkYSqfyybjA_BY4K0Ha1K1KGM9xzSeDnAWLGKRxnVBXKgRE3cwpgLujTDGHgMyfqNVmVYM8yNu-CMXgKWQLTf9OZg3CMJHmfOTD3OshPXLze-8RYdihDe3hslJ7yt-ZIy3doDqD6Mia3XVvcoFlQYzcOU8Na0cxBjnVDY8d5DuNJwk8VU1yc0he12jpJxb36QLXhynj-yt8YLldujhnjtufbmkNzj54WIaevTaaGmj8OBbfooyIL2p4dyNMfzNuozj0JuGXe09sWWWG-hYkSZboxcNhUmliyepLQH3WEfEbGPtCE_cyYD0mrVby5eLUHstV4rzX6w1LlyZhOJ7e2noBgjeYpD_cNlG-_iakU6e44p-EiDMCL485Y9WR0Csxn9NzxF0939tiW9s0WOVUPFr_39ypEVuB3_EAm6NputwdhmmNj1nE4"} ================================================ FILE: genesis_data/genesis_txs/xavUY4L0L0nLNVvHiYfBqGL5iqUvdwQ-iY_nLLMB6J4.json ================================================ {"id":"xavUY4L0L0nLNVvHiYfBqGL5iqUvdwQ-iY_nLLMB6J4","last_tx":"","owner":"rO-PhO_pirBgP0uc68hGt3Lr5L45-SSfvTKicicm4wJrrPiofuF5y9Ckw2jCc56RtRYEPImmwzZFlYt8RAsA0ywZSxtxiRqJD394lPNfHjMRZGDOJKUV429rvfQR4_ZmxlnEYXG0fgI3U3Jf7CKqt7Wz9ZqrZot1jwf5eHLgGrHuSHbhT5JIhKFt54tu2wiswuk5doVdicJQuKBInt3A4y7yr3EUdoBIiEKBJHFaiqalnso7JGAKeT5D-8kUUpwqCob8VuBY0puJpcTOBvPfZxQhIhMhOSHj-AmFuWdl967BoaWa5F5epLBEtjlUsveYzZixYcGclz33ouKGOM7iY5p3RQWfJpIyx_AIs-rcSF4loBX6wLV09RSSL67fJCAF012EWjPCUpiZTV4uwlMvFU0qnxDFNs4rQUTW70EvLqRGv2n7qlgfPRt_QeW49UTwucO98XBD6iKIGQaT3J-stYmbIbF-RBOQtYA1KebII-NukUEvn_jzwbRU0JaCopSPo4JKC9fkv6CGUR5nq3WUTpLrcBWy6XSuGobQPN3u1qICFRwBMG8BKTs6CMw8wjEYPIYXNFV6QhRZLnpeUVff7mKZvG2egKtQw8SWGNBT-beZrQeUyNc1y7CQwN44KNIykXwY8MsAOfrT_UAnu59ZqK3jsKZ7oDhXsJtpfsiUlK0","tags":[],"target":"","quantity":"0","data":"TXkgYm95cw","reward":"0","signature":"o_QCkOU4pv-RRcEAiOfua6hVDTxvdKNUJu4VoCEI8yaUvNrIRDh1jMCr-amIJE6HI9-RfDb1x_NMaONK86vV0FB4_9UDFaviOfvPinMw8T7vJr1ysjMXftU79HyyWHxHb0tMYz-lVc4fxq3sGcu3bfGTDIA_mhX_R-WrHqbNE3SyBW8D5BxXVMFggAUTlvKFi38G2YwurP761CfIm4vBWs_gBSiaF9NWF1V0voKdfGCkjO4SBb_RhfC8Pn3ykV8OEWO5t0MMKGIRpY2ZXaJJ2HQj58F2APzym3no9qUeNZf9FVXSfxNNHEtV89iHbQKo-AgDvzS-IU4es0SHNE9bPck35QoM-YZL9yg-ZEmZjoqhofXmdxUgw6l2A6yNp4S4raRngKmUc5KYCrU6GKbnoypqbX7iRB-q7-oZDSrMRjZWKgdUOzmkdiyTLu6Y_0wfpJjUzSOJo61DlwA-F0z2vAyXmK-jSXA_N3D8loziPx7p-NsgwE7jlMRqua-6eO3K6BnZhiV3pvij95ohuo-UX0U4WcjnVPlQ9i7WOeFoCX4zwXowoNAS4Zfh9Ilii8Vye_PmdRFCMnF5iiu7Y1WhIvisIV3MEEWyC6q2yBDt2KwgAZJEVKJXbTiqJyxo4yEshFDGDoy8PN3EbGyI71GLR2iziZ7dms_zKEjM42eNxuA"} ================================================ FILE: genesis_data/genesis_txs/xiQYsaUMtlIq9DvTyucB4gu0BFC-qnFRIDclLv8wUT8.json ================================================ {"id":"xiQYsaUMtlIq9DvTyucB4gu0BFC-qnFRIDclLv8wUT8","last_tx":"","owner":"sc5BnGCv62aO1aWZGbVmfr4muDfbCpfHKGa-ekzdw3Nx4KQQUcE1fiKfOpcgyrCpkNq7E5EMnGJhF1ADgXa_yvrqRjhfRDMrs9uRY5XbLbDF0tzgHV3_IvPCF87piABVWROQSA85oUgxtop6FZemZ8ppQ35p8Cmo13_n764Hwi6leHu3hMD6W6IQ1OsAT6JMQkQd9B_GfWKu7TqnfYoR_Bk97qcnVBpdBS6W6TquoBTJFpBpMnzIgfRpduesip1J9FZzYSkzY2SiJ4KxWVAAgSM5Y4cSPvBxIFmDJv-y_ue7xm2Bh7wAJkJs-QcqrLuUjgOdkElGJkfs5bY8enxSjPpJypbSDqlqQBUTQemoDrEe9cAfUwG6r9juNl9ErxFZ7fvfqqxDOU3fBJV7L02kqOsMqmYgOvEn3lPFof51tb8syUdZdAqSU2P4TObEyrgZhsotWfp1dHBFDwRQrtL5Op65j5Jl-5ImiWDHKZ6xQxOeuKLJatvRM3tTgq9do5UpGTyXtamVEHNkTWDF4Cw4Nb94XT-NlhHWh3WUilhfMmbgoj4kljR-7cJGxDUvL4fTer908emhYrTrcqppkqBvYILUiXQMTE1O1FJZkE3FawDua38hqVbcDCmirde-U2tQwmLUcEbNX8sHckrWDABtCAcFfuH41RyW1LWPx-Q9Ugs","tags":[],"target":"","quantity":"0","data":"Q3J5cHRvc1JVcy5jb20gc3VwcG9ydHMgQXJjaGFpbg","reward":"0","signature":"MnK8cVufE-YpkUqpB1UCEQ_gNxoCpR3ONXnaqfONXGer2i78aeSas2blqZ93Js4VBLRyBls5II2jgjp-GWq-It-f1P4yuwHIxW2RfYLciy_JCxjyafAgdU5lWBI2hLWiFMpmtTi5UZWAw47fECDktuA5uNAs2SAJ77bLMq5JH8RE-hbi8PcSeTP2s7-BXtZ4y0jdPToKGkDu95PQarOWGhGFFgt_Y0J83WivXBn5nEeNVpN7qzBT40Ycauyw0wEyVCL-aUvrqjXzjj2qyzA9E0DsTX_fhJGDnASLNV2cPE2Wt_9iZHyqJwcH98QljZHUt_6lCEL41tCoKqs6CyM8Vzu95mp7S5IWc8IljEkrnkJ3M8hX368Rvp3zzPno8xv4EujVhqpdR0TormM_Q0rfjWMqg4A0TAwbMjI4iIEVA4i1fA4VOOiOdxn8CJ3h5we2G1Y8m7CYx6L6V70OzyrxNstGsprKWxRLuLedTI8hXJR98jGDCDVZAHCqy-zX3e5spq3EPArhORPSUC0DQQFWbeSJef-pLOMFNgUrthwMDkIOWNlxk8c3YqK4jFDlT0ODJDSFfEBuyc6_z_KmY5-XKjqlWi3y9tj8qvV4QGj8N6fSV2M05JgQIyYzIenPV-J7JLx_Ct_tYoKfUYrLLSJVINFN4oxvw-IOkLyXCfXTjAQ"} ================================================ FILE: genesis_data/genesis_txs/y-k4KjdSmwYmIugoObrtx5JWYczlEZBzwBHGMLqNP-0.json ================================================ {"id":"y-k4KjdSmwYmIugoObrtx5JWYczlEZBzwBHGMLqNP-0","last_tx":"","owner":"7tg_P7JrI18LLl8UHGXQP5_FgcmgNyOK9Y_A9tX9hLcSq2_Yn_DfrS8ZjGz-GeC4jXbFrK-g_DPSaxcAx5HqM0NXlijlC6ahbXOS9Q16E_vg0GyJVc84OQeRclEAkq33OYy_3935OGT7I1z1gxqQptkTgeOJ1scEFggr-eTC3msgq_ldGITufDwqTsc3PkzR5ehbmFCQ_y2XtWbxFjY4gPX7w3PRRaK5pQvgR3yfcG6pM6xgQgiswtc74A9KP3RKbuyP9duuoWkXdAo8C_605uPBCSnTd7d52NHFTQa8mKU6Vhmq4Np1V9NfogoRzAxUkP_3YpXZiNdWmd7cRlLO2lXgnz5dgRsRx5KRDBEXvRIS-IVehwkotZqdWlTcjSza-2xj22xfwSdvKbzoo-jzoMUkJxzfxwrcH2KKhUI4meuTNo9pUWg-nlo-mRLuD6YajDr4tJzndEUBksIMuHi7vWAfnWT_D9hvr91PfxT9hF0vE745G7eg86pIo4t_O6LA6j5XiOKO99wtiZt5RHCp2PpCMg5SpNnqq2UHbL1bBEwe3Kxmaq2Heyr6Dwk6WlN4mY_cnmvLaU4NZ9jHSFyjHgVQlRqe_UwnQ0PTIoNNgicOIgRHbtVcouDdu7E2umV2yTctLDf_YtaG50plWMQdOlQlfvE1mRw7j0Ib2KxvZyU","tags":[],"target":"","quantity":"0","data":"ZnJlZSB3aWxs","reward":"0","signature":"1q-UVH4HOi8A9TidtKWrUbP7mKw9Dt1BKlZZM0BsemTh8S_MPyTPmjDWT8_mBFJE-RBl1967LM5RGGmUDhOrOT5ZEeyXRz3BX1o3gScIc5OO0oviHexhe2E-BKdPbohwGP5HySrTcpJu8sKxurWfQ9DeUs_LgdntPa8lh3Yz8Jf6oa4nEu8FSPYpnuruh25EpqK2ptaSGq_UNnE7N33RHR45r0eRKKKhho4zgKDD3NAObISeGtPUELS41QVf5SUrrkAjie-bS23LHMCV1awoXdlQYEG4twhjfVLUeauCjVDh0zUDpAuQr59nzX6byUIHzWxCYV3V1KGisGuKCcsCKySgq_Z_q9v0vARURhhoTkjIvoy-Omkg73BXm5mjOr7kNxGbnGzA2Q6uOiQ54aplwvgmbmPh_f5bMBKIhD1rN69pXotqGSCDUA8iZSFS3yGpKGb76dMc7XCZ1IxvDySUs5rWj41ctU7QPrXN2UvO6zbsufE9Bqyz6NgrDj9YOvDx6dMQ2f0BKIOYR-0-brL2UhXXJiX_MY0Icd3UqNahbomn0z68BwV1G0aULTr-55eBEz_q0njvJ81tuzml-oL4nNNj-NDLa3UZuRsbIdWeDwbNlsXXAu1CvfVVMNV0wKiYthevdHRzqr1e92oNWfGNeuPUPv6gf68n0zm9NaC9FGc"} ================================================ FILE: genesis_data/genesis_txs/y0PrXtX7PonEbIG3uEdu-k-McGeLLAjzUriUTCMTGcw.json ================================================ {"id":"y0PrXtX7PonEbIG3uEdu-k-McGeLLAjzUriUTCMTGcw","last_tx":"","owner":"w_-P8SIVHxQllPLiCH3mH1iWUmtR-sTNwFF52NB-DHmvReMFZQKRjls0DA2_DplV4mR7tXCvlYkizsKSLayVXEYGknttz_F5BDI700NrlkbYZz31OVAWpJkploHljD_h-pOU7QbnKvuC6fUQ0DzKDEQK2aSrO-GGw0qUZHw818_xaP2kRJZtTrSUpgKv7yDPqbsHRqAJCEhs6ymNSbk6HhnRXvBn6Dx4nBEQ0QtRqa_s2XYs-ifk8GY3bR60Kebpb7f5GB3A3j5pVDhRFLeh01gslsuyzUbw9P6M2voEGpZFmTw2QPOdpKfaUrYMdj5Dc3pxDMPonCo_ZDqYLQYscvQ81b5biZfNEqpJctqT2LiV0tWxV6N6siCLQAc8PGnSaOvoUSahoFhBBdYAcH3uYvGwToajHWHHqsZ4K9wPTHgLavAC88IfL7SjWilKNz9S2hDLkatduALVga4EE1ALawlvIqM1ThUmZ8ZNO8tnj3nu2HLDVh2AYC-F_bU2OBja--7TDp1mWlV-G8YGHposedBfbg6IuVRIl-h0PRSO-1fagXkrxZSJtXlvEh_ib1pd1YHl9JsZnfz6HIdHIRqka4Wu3YqMOUyvDvs1idQqYO9av-qjygrDEIhh6YCPXXwIYX749iIpKHXvCn1gTHzxx1n6KvZYpBg8AbLZzt8wjGE","tags":[],"target":"","quantity":"0","data":"QWRhbSBTaGlyIHN1cHBvcnRzIGJ1aWxkaW5nIHRoZSBmdXR1cmUgZGVjZW50cmFsaXplZCBzb2NpZXR5IGZvciBhbGwgcGVvcGxlLCBuYXRpb25zIGFuZCBnZW5lcmF0aW9ucy4g","reward":"0","signature":"gwkSqfePg6gjTfMxjn0WVmazdIBxHjtl97B_OXtwps3kcFHJc6rQY7IZ_Q7hCgg_QoWFALkbfJwkESYo9E6gHW3Y5dFhR7VTP8tAXmR3tSHf88LJx3kJ8ZsbKGUZ1bK6hf7k_3R6EpKHKF_VEzYRkB1Ql_w-xZ2FnTTTg0Nf0qafoYePEfklRPPmIMRiyHTmd4NhlhNi-HUncyXnkUW5npyQG_IDg9jxMjileR5-U_xpYrQBNsCwz8iIstpAxNfIK8-ThKV3BGpo1025-HRwudU_8YnRpUNl6cJCsD7xqV6lH1hRgQS3FSXNmgd4DHNQMyJNzbaUvuk3oi_oyo8UYD-yNMono7zvphssV_piLuV4xab8ocKG7WIQOosTpf67BG1R57xsJSSIxIYcdXSrIYNFsJLDIa6HRSCROgtDqXdQBTPBM_GBW7KImiptZ8wrv54_wYHIP8hd2JvHV5sfbIxa3zla5J8k3oyziESXE7dQiWr-SQBm_oq_2IbezF1Q7J1snbOBaT74pSNDOUqo4v0v3wtlZGz5MipPE_YwSzh8M9_JjryXXqDbdxR7wFLWr6aRi2n0U5inXzOFftX79J-itC2gRXrRycY85XQxo7JAYe-idgpznGeKB0LZTjoydfE_MRxIZnKE8Lo5bFCGlvWJ43JUCvnfac3v2Nx1JUM"} ================================================ FILE: genesis_data/genesis_txs/y6WPKL6MHzZp2ktvb1cETmNMBJyCEPlxdisKlroEBtc.json ================================================ {"id":"y6WPKL6MHzZp2ktvb1cETmNMBJyCEPlxdisKlroEBtc","last_tx":"","owner":"4jEID33qOqyRWUr22JGyknuCFSuMkEedbLM0vSjfuAKDLO5vMuwhuY_JPQDE2itFqVWFXZRGsLPUKQCPNGT4zNlXTOsVp-audtAZlu3dchSDBpa-Jt6G8dnhZ72T5wFQNxPtfED-a0Cqse16WTORzZAYbuuuoKyJLu4UbHz7-C6vNc27LKZwP1sTfaVZRMhlbqDArOQ54tLgM8hBR3ludqSQMMkVIDAB77cg5qRiUJ03sprWe-5SNV3eKYpCkFR-KD8-xF3K7Ni_rndJv5m895BFletsw8S9QkBrqArUDKDWVEACxxAQilKmkyPpYXgSh4xsyOiRWs6P1ljzonqDIO4QwWfGmTz_jwhKdQnyLLRsd16fvmN1CHcvneLErJAfsd6u77knIbZZyd-zXpBCn-TZLG-qhpQIT1j6IuIDPusG4dTi4oHZn-TO6EiHq72Sa9iQOuO18-6NApq0BX8Qg8HG0Wr99xaJS-ZmC-ya1XALY4Jeq9KtcWfh95MfAsE8jIKNd3ZPiNyvR1AWKPyrUPbpUJytk4zXLVoF5usGyRHQm39ewZD7bjY6M973Tfczvt9IVO2fjNsVLi5lUApZE7JqERnjq1G3OCXfhDI6frEabMw7eLUDz-oZdcc7Dz_r51FHKB5fuZA3sjf22bE22Ilmx_mbK_8RiCqYxhhpt-8","tags":[],"target":"","quantity":"0","data":"TGV0IHRoZSBoaXN0b3J5IHNheSB0aGF0IGkgYmVsaWV2ZWQgaW4gdGhpcyBwcm9qZWN0IQ","reward":"0","signature":"bv6MNOKCahx8yI6lQPbNOe_JccghRFJnStzBAWXBQB7av2F1YEN-rNawO2YKYLcwOP1TSKMbY0wJnoVjLdTeMS0t-sjOS3u7KmO7cflQOXax5sK6E1gbUS_nGr7SZevh9Hbt6K-woV_fmvgdl14WMfUSaTpubMdMeuKPIR23tJgc6ZYo5R46VREIza3u07z3rHzhjx7klMgHP8F8kvY_q_o4FoQrs5PGvNCNaHdpovIKHqD9EUUaPVEjWmfa_HnsPE1vVXO6c_2ypytLdZ8bFzTN7XKgbcgcZAuwEaoo-j5_9Zdstsf8kj0gMy5z_FuNY4Y1ywqvV-NQZhtO8D6AAO2IVA6nL-KzOcQH20YvHo5P2tJLVTm9mgSn5rF5Twc75UHxI6YGOfRvvoRsfKXWcQraIRZ7l0bm87WVjS_VwCh4jZl04b47D18BG0lWBZ4lNLUSZMnh2dAgRXz_o8tvasYAXQTGWwWR6U_ML9eFRTphxwHqcCN2g7BHgnbrFgUYJhxD1nMswnJ38OfCpt2Gq87Melj_Te-1DCUetMkiXGy9qYJIGJi0TT7jtFuLIHX-ktZieqXjAcEz0e-P19eNCFV-iIVGHc0t5XFUvnLkvbctn8ZZpnV-OSFRq2pfDVEiOk3ULb-UEGvE6Vb68o7DFjcy_0zIc2le1Ft_LPrO6h8"} ================================================ FILE: genesis_data/genesis_txs/ydvI6weQPIRj2hcNg4RPqzDpFOhqiTc9iDqQ-fUUl4I.json ================================================ {"id":"ydvI6weQPIRj2hcNg4RPqzDpFOhqiTc9iDqQ-fUUl4I","last_tx":"","owner":"luF96I-rFf5pXPQQN6uhdK6mCL7soAuT69l8kP5SUHMp-JPBupSBz-8g0_dU47ljSMLA7rVJmmwCBnVcgdplZ0unyo6f8B7HDBqyO_3L1qnzKFoET-hI8zv2i7qvnTJXK1js1jBCzuOLtBQ3nvGkgeMFw6b1GuzGRx55sI_rIAzfycE7rqocLINmE6ymG54rJ40m6_3GNrpDbHp106N16kJTwkkrktprrRFeELvP-eCMJUOfUUeXXNCPhd_0Mf3pAaYe3F_LXixzYnUWDKa5n0Aa1F-NUufsJSHtJDhkIdCYG7ueFsFqakM_C5SSTuIxySPU9u3IwFLZfYdKW_e-euZ2bws40uJHI5EiqdmW1mKXKjZB7SpJbg-o9yWujDy1oolaAhek3Zhj729vVu3rOLLlzigpuAlkV_6V1k6Xs1mbOzhTfWe9dBUV0XQ3nJ2HDgCF28qSczpnF8k9HFzKA3atL9IAOsjkUbz-dC5YYi_HF8da9FRWhVOVg3DRYPeizXJJ6UFpHTtPYOQ6xDzZkekec7o4lIy0oqSiaunMtL6ybLorggdeZbs_qwyBf0_KnCg2dGn4nfStZKQGkZhpvm4zmsRZ6PNaoljtKke-Kpg-YnZKil8h8v1TC8_V4QUW4uCmMldFcTh6VEx9YNYO29ASZwRfkKjXLSlsquqL4n8","tags":[],"target":"","quantity":"0","data":"RmFudGFzdGljIGlkZWEgc29uISBMb3ZpbmcgdGhlIGRpZ2l0YWwgYWR2ZW50dXJlcyB5b3UgYXJlIHRha2luZyBtZSBvbiEgTG92ZSBhbHdheXMgeW91ciBwcm91ZCBtdW0gWC4uLg","reward":"0","signature":"heB0JqM846yphzy-9H5Iu9NPwSIA_wxGFaWRUChZn2xkWmrCWwVMSEE6lIXcjnNUFD_ArzbWDZsCZMYHV2Zra-xA_eWQjJS70Aj_IDLz6b11avQgnXMyRD3sIrzL3vF4Z-QoK-wkeqdrbzO84kqjLXK0KViJ8Jkpwrgv-zsKetQlRikc3Rm9nsVY4gnPPrFvT-Wldogu4DL1r75XKwSEEjOU7Sq_uOXk9anXGUSme0V9cBQaPf0UWIuH2DLMDIP1kyfNd0I0Oj9FOUyj7LR52yiry-vGEi7tvovN0pTtKo4P4tKQKTLzpzZLiaf4pvh1re13XlCox5EH5_HZNTfJczZeQz4k4eaPBp10NnFJjz5Jc3RCeK0RZiulwtK3qj5StMgZ5glo7iKA32BCz5CSMimbt8U5BBk0R84XkU23OfyNdMDXlcoLPEU7CKEaQglKF1475873N2IWiJbJVGH6qzzXxQhxVUrb3xN720mIex6l0U_-OJpUIdwbJsKT9qfhKqkRffmvzPFz2b7o05gKmlWM2Ju2WzFt4DvN2R7yKFcZtqbvVoNMnw1dubPInixotHAGsFKoDsxdoFQOS26H4Btu9kWSwCREmdbK0L7TuR9eiZCfTtH6fM0N6zSvkj-zwLxCm_SZOcEqIq5nPWYcZqHkCq6T43LxSsW8HF4DVA8"} ================================================ FILE: genesis_data/genesis_txs/z7Xvravldr4BhTI4KPOEWtG325_1ORaLQ4aUPOAe_us.json ================================================ {"id":"z7Xvravldr4BhTI4KPOEWtG325_1ORaLQ4aUPOAe_us","last_tx":"","owner":"pobim0WtaOOBZtttT8RSMo5C673oWSs-yVaEwSROPfYVeMsO1kr7-ohUE4I7Oj7c6nbLtKRHESBCjx2llN8DdDjUcWT5o43rMMVfv4JxiudEQ28k4DNq7SIGpOQfDJNZ6_6RTKtGA5DzZis_qj2USYKpt0C1TKygHur1OpXPxOszr-YI02Ot2PD8nP25t6bgQqbuYFWM8tNhR4weP3XTIXVjPnKQM_CEBEf6WirMUeG0N4Ml_RSxjaGLBeF4F6u_2un6PZHqPaxu24wN_l2sJmP00o_wZF4Ill2BP4ZWLsUEt1xnbAiSUNXZu5rN7-razZMus-8Sl01c4qwetMdlKwvHNibdrIjJgl-QAK5KjrOcue9_GbRsIndiLY4NUBMirKaYTCp5Th3Ck9QOY8JlPu512UG1wcDEmWuklc59SSSGOE1naLtlrsAC-fW60FXQwmnYtYn-YbhHOWXFcguvZpcht_ULP60UdK-WxXxkuQcK3AcY3ne5QdeBhlTjdLThPGCWHfN_LdvihrGqgjwi3tD5sl80wuuw-SyvSEB1dD4V4PATKQlPvZ-ipVTmzFLnyrqJC_gwz7C8Tu5qRtaxkqqj79XMfbGn_Xby77kCsfdQpBkd585cEcwLkt6RDaoyVTyqRWrXePhacoz-zFp0w7-ek1D4SG3WU4Wh4belykU","tags":[],"target":"","quantity":"0","data":"UmlzayB2cy4gUmV3YXJk","reward":"0","signature":"e6vGNraL7T4AnwPHDn7pJwA_Mf-EXrvBXcNJOkZjFMSTXX-FIMJYiXHnF0N8Og0kmVywJEFTX07sIZmFj4RJXuJUJqchkBiegLI6R4A-TFtWw4xzK2aAbJre4otFcFBy1ogbz8sviUQidksazJhSa4yPupujySYIQ70v5t06jIY5Onf6FPTk3dCUoPVEcE8M3F-D2DpCbq0w7TPKcLhE2jVY-XmPYMsKCBLA0Khlou7tNEVJ5WnoYkcoutXOMLAEP_co70m-wmT1Yitfs_Bt_Pj4ErR9u5vKmhLHIVqLoLsGApB2tJQnOqIhGRNBmIO_2kfAwvlmUPXwkEVBqcwUqVe2NjzQxb-_fJHNfRL7D9aszha2uf9lhdeX2oRmZ-h8Kdqd_cM5K9F9w0ygYC2CkC3Hn3Uthb9Wa8REZjtrD7uMmhn08W-EMiBlrKNYl5rEmU7bIlHC_cRr2d6NYB6QwoBTebYlQIrinNbGqOtB_TFE-1uaDe3bzmQRx_FuQh6N_DegvIr02LmE_xxfPdnAGpo3Iug92OoO9m1hup7wj_1UgkTo3giDmpTjEtKWe1drgWSeVxgi3Ao8Fx45JKTh4k1pF8Q--HV7keDdE6ABY2JeRLuczyrX4HX_ZREiBXA25hgVEOmoE2zhZaDyBc7V8pYlxd1S5Fcz3H5nUsv3ZzM"} ================================================ FILE: genesis_data/genesis_txs/zCOtSnXKGGhXgrWld31Ak9qQA_SjpOqB6n-9sF74rhk.json ================================================ {"id":"zCOtSnXKGGhXgrWld31Ak9qQA_SjpOqB6n-9sF74rhk","last_tx":"","owner":"uc2Cy5PjGjjQuKUye3qnDdr9RYfVwPAvk5839BlSSM-IVsGAddjgY2k-HO6HrOIXBZwmAwLdtxBGVo8mwshL6oeFlAzhjyamOAiyt3hm0sExXKWrpA7oT_X8F4DFNy5jbeMkEOZUb16NROynKLwLdx_b8VHRC58KUPrsHmPNlsu8FtrRTmSZcfAmyeNXWgXEVFAdDmO15ku-WyYPCMJSz8i-6mcNoy-T5yxTZkG2JHnK9v59pa8YBOb3hFi0vIKVr5Iw6wVHR2-5ujkz1jNXgVAD0FxZpyU4c-Cm2zfZzWP1aLhp4B0YQPJbnMx6VwRtKBV-oaYUlsE0Xs9DfsO5GcGuCF9cB2AdMWdSjBjIG2L7PPoJCKcWGhBqQgfrjzhZC9To7SF14djEF5zA4u0TXR6xCDf67L1emaWijyzbji4GUVuBFrOBWU9-0lCEXUkh83csll-PLhA5xVdk_-ApNcQ4P2jYs7WO99KtLs1kDi6GjAVONaHv_e9OCxYXTRmsaWLTG1gSebusEPK4YH6VNwCkfkSnOJVD0PjZNRQSSGOdBjsYsdFWfHnheeZMmFziHX7fiV2_syTA56y3F--hN2nprMAXO4obKblV-lCoi1ApQksrhMN3YU7I535etyF2hF-DzFiHoi0MgtjCwpUoU3tePE_5cz5oj2LgzvwH4VE","tags":[],"target":"","quantity":"0","data":"VGhpcyBpcyBhIGdyZWF0IGJ1c2luZXNzIG1vZGVsIGFuZCBJJ20gc28gZXhjaXRlZCB0byBiZSBhIHBhcnQuIA","reward":"0","signature":"bD6UYyKuZgQQERt0WbopAnAk5K30krwwMZTAEuX0SGCrttfGoJ8vCAqQ2atTzCMjpUh69MoR8sGGcR6dRAfaPr0Su2mc6AP12Rd7Ksiz6TEjvhQCe08uFhn1Ay8TvFaw4RJCzkyAgRINgbXeCuO2iTkOro-9377G4WNMTpz7TygrC0we9ZpzixQVIuylVRsExuQ5VF3fPI5Xv--32A40vsv5K3Ia3ZHYWjCClnjSPEHe9roDeAnO3ye76LOaunaELf9CIM48MazCJa4CBzNfDBS6m1rSraySlMH9kbSh_wyKm2eFQ9LDrvpgPIvJUuY2bJTGiSS4UEh_nOWUIGyDPgBDvbetXReE8jPkJxYB8Xn4a2Lulr0qbWYV2JgVWKphKDUJnjWRzvDZ7B0uSW2knyUvKsOwXfk3XgBqlLzioPgx2uciOQVhDzeKcoiqHy9GMTcjQflF59_KHEuN937svRHEGHEZhGfpiSjifd-YKttCY4dwXX-00V5AXad83pTZ4RwhDKyfPL5mfWyBhD-ZGa5-f9vnss_C1xQWg8VgnuG0iXaG16Kz-e45or2QAWGEYFED1VFmsWqzQ6Igpp3HvuaUgC6W6xMCUuo5ilod9GPGDyzWy04MoE26pAJwzCPXNDkzUmzFrizsdP5eDjcBJ2aKqZPcgMMXarNtls1-9ig"} ================================================ FILE: genesis_data/genesis_txs/zUFRBcWpPAUyMlojffeTnPgsLo6YgU6JaJgOR0mpBuM.json ================================================ {"id":"zUFRBcWpPAUyMlojffeTnPgsLo6YgU6JaJgOR0mpBuM","last_tx":"","owner":"1kZfS-jYsIUeE-2kD4T6Uu0oGya6rABfznEkLA2GUl9UPR-yYbz4GEHIU9ev98D3AD3cxoQRAtmaaGnU1SANrvNnL4_QvdUtSFiSW-3gOcEIlHqJbht_PMlmy5yFtVikYlU-KL3uyUebhgw8zhdW6GyDI6n3KWxdW86zLs-01a84tZJKAqFBSxgZVr0QlTZ0FJiIH-Veo8kV-nku59Si6ZDrDXZytWGoN1dIs4Q5tojjAmTyesc564NSMuZRurW-SS5nKyxb-zanL8vd91RRxqVpArW_W25qB9VN0sRd8r9bjbhun-P713hO8tM3yQKPge-2PyEBD82PPw6ItUZoCrlSY-hFrcBaiv2ZHsX6eoyEOk1-MIlgqdF8psCqLTXsScnaKmIqRaKfS9-lo36FvpCEq3RZI8XpCg6_Xjo2oQj7W2OuovlCnlSdNWwg8odzputXZCxj3RpGmuAKWSCJFWecWDk8d19jqI9pSNqWyUZLLtyP3flLtHjWkzhWgQ_n5oJkaJD_QEHuVOfdjTHtVxtNuFZnQuY1K9TDQXJs1JSigh4XgvVfjsyabLt1zivL5-QFTIAcSjmbU0390oajw6NFYWpgwF8tfsuYZwCQu71UrLvWZ-HkT0xiyS8tWuXQ1ci8BjkLuRbro5fMrEnxKC1KZJyWxkCQiS7gKDEXhA8","tags":[],"target":"","quantity":"0","data":"anVzdCBtYWRlIG15IDFzdCBpY28gcHVyY2hhc2Uu","reward":"0","signature":"mTJRElP3Udwg-O4MbpcHEyFSGHAU4XJ6LdBLArIPOS9455OOToem_WWb5FvdZXZN9yGZlbuMlbK1nYiSGXsZ-Mh6Jv8iBj82lGdTD73LXGRKOhRy8srztMcfUYWGWCaiTAl6QEXOpglCMN0HdKpQpnWlu-SGHsdB4w7CnQU8XP-RahCyjWqezZerTFm7kxIGpIQ6U4SpIbIfnQ9TB53zCxaAo_uJbwRnEOmH9-b9zOK4BGsbk10Dzs8sPOJSHOAZfO0T3v0Whf8RpEZ7tPp-YBJ1Y0l50uRrKWmg8ttKY1EBpc2qQbSgPe3MycjfE8kKnzjl5Pp23iNIiajNuFhLFs7nv0mBiAdVUXNmDY807UQIcoCywXEUOGjwBdQ0abZ3e7RZP_KDrVbxAsF0WgNiWlPB7u2zmV5tMi0jir8P1HFCS4OVOMcnM0Zi5v2uZKLOieYIBvin9l8mriqAr5AY-3UwaKXaGqsK0AvGsqQFylNUtXSKxy3tkC22kgfiEicw6OHCq3Lj-VfpgdGB2Lg6GhFPMn4VPGEZJTQrXGCbuRhHtFKljNvaQVvKtFeMODIe3seQIq0sx_7grpP_B1o3HCOf1Hji8-kdoeX3I_WtiqXRgmT0un79cJzCxqxFspy5O36evuSpqnQfRbCRXGajGalF2REe2lSHZC2rX6vOgmQ"} ================================================ FILE: genesis_data/genesis_txs/zwl046ia6I5VWLRYPJzBI70ypBQN2VlvLH9a_ndNKxA.json ================================================ {"id":"zwl046ia6I5VWLRYPJzBI70ypBQN2VlvLH9a_ndNKxA","last_tx":"","owner":"9ojklidHkD-812ugepCwDRiuKmkgCgjmWcLUOXn0gPUbD4pZCwHsQQisYG0BNFriIhutPWZlku-lENcYmlgr6rdh0UcWiJd0g3PY8PEXx-YjleRB-A0oBlWwcDgKtBrDMcRMSFNoQLylM6Y3wvt5mGZA9mUct1FUYUM70uXz3gZS7NP0mBzWpDa9ApgpS6LUcN_v2m35ZyZbi-IWE8Jx275Xv-EObPKTZGuyIM-tRpdyZ6MfC4Chhr7ybkHYp0rdKblvRWB0h_dl1JC74rm8UOSfrV0z-LZH9PJNtquhAeYf4GBhwCq8EMFOi-H3AjtOwfWTacMaQdAapw63JRKikgUhczPRAlxiNxbIV90g2NtT9b1Xxgksxa6fCXXmEz-i1yZbwmFgbOtxbjO7UlGwoT_UpZk2CjA5tNAaujExRFN7yA66AQm_UX-XM_7OqQZhR1ysYnF0SpHkdAePDiDANPNi9-8yooTbbm-RULBLacYd18XIv3WD1_VP0OOxnR8IyQ7d1O_R0M7Jy8015pIuAYguzCUCe7LbVkpxjsEXdR0GrwlVkg9hQInNIfZw9Cw__k9BZDadfis5MtQBTgAqOo6FgRnA7cAFokfcnhsft3g38Hwq52UvegvzXuHwIuLo28a2CVBjmX_89luneJOiO3kseE16D2nfwZWx5UTWkWs","tags":[],"target":"","quantity":"0","data":"VGhpcyBzb3VuZHMgbGlrZSBpdCBjb3VsZCBtYWtlIHNvbWVvbmUgdmVyeSBwb3dlcmZ1bCB2ZXJ5IGFuZ3J5","reward":"0","signature":"BAhYUoRJCzt9nxxFDiDWFeJGf-Kt3Dxttc5MngrE2YMZuLN1AtNlz_EZT96OoaGmcc2gIkUHl_CZ0-cidat1lkyGOslUd-XfvCk9oYU0wT0dj7mVKT8UQbUt-iJ3-b8w_r1F5JKYOyKyG-1vYbN3iaFEkWVNEZWBV4cEn4Ge3PoL1JIcmpDIOHHypfSuqutP2XwrbTPysRNOAHm-Yq5uU9RbFWrQeo3Qg9AGGkBIqFGj5QPfG3_uvIfXqF7f2vDl1ckf5uJWOgfiYAmAKK_ToW6qVkcXkrtuhvJ_B5wwsafd8sUMWZyAqfDjErLHNQAUNadcoiNrycqwEVMwn2xkiTx6edPEQU8qP_HamyIeljf07U4DfNb5cMfcZgocEMYrg9HCvOgeyinDmEmdaV5UkycNV7QEqysC2J3ng34suEQt-wg3R0G4mWqzaQfXW-YfLm436W234S7rpjk0bkkfj9nMcNH7lkzW2ceZLlEWgTtL5gaNymn3WWkjB5JiDRGpPdC99yXY5sYp7UN9H0mn72W5wlCHz7m0R3wzGE9EVlVjAxiO7cD1zG2lsFt1nx33-K2BqNxsevvnOvwRbzS7plp-ZiVmJghbyi-aFo3ydGCs3L2wL2QeQrPefteEGzEtyZup3SKy4JOF16h4MzUA26O6VOnVelU9k7fLMtHPhp8"} ================================================ FILE: genesis_data/genesis_wallets.csv ================================================ YaWA7ZhpNNiE_h8wYWiR9CHZ0fNprrdYlvqMg2kr_oY,100000 eZUX3x-PVNQth-U8APNH28SEkJ3B5d_0NQQ0sUmwH0k,100000 s1BSJI_58-151xoV5Ed6p3589bf09JCPw1kJ421t4os,100000 FAO2kzRviKXc3CIjGrPvZBY7WeC54bDnG3UQpZz56Ug,100000 AL5bM0nM3cb-yWxhlvqr3cipTerpl-8qjlpMZboWuuU,100000 WVZS7gulelBS_0XVWJxke5V5nAvaCf0w09mu4M_AbFo,100000 Rr4zI9DehKsG3g7ASYKOp9b3REwK6ch-K9bhtk5Zyog,100000 hHRBQFl-LB9NysdcKp6x2NcL8xzPfdsoC6dyllOC5Uc,100000 xuV1p7S8FFn4tj-fjgydZD1UdoTstu19cDdOpJ__Qyo,100000 JqwIBdgzUL7EoxIFkLMJh-haJ0YhmPno4SM1zT1P6gI,100000 JgMvMTXEw7g6q2dOzO-60M86HblGXYXmK6NkMFBMYbg,100000 PdExigxolhAMzR-ayBI0JJcQjM_J7cBCUv91_PNxBig,100000 vcDdvYf-Xm9MBDO-7f674vE4hjOZwhXneFZfuLGjTAs,100000 HXS05jfe8SUgtqnaJ7cgBe3LBK_kiVIsFrj2Oe_0EuY,100000 Er1sUW1YgIm-ALbZ_qLH_3Azd41fer-_9nH5g42Wv5Y,100000 NozAy8OM1qjWIMyYcHjLDYtgHkBqgqKudlICyEP62RU,100000 HKRsez1U8OvZAzYSzGwatQ0bLmnRHdvGm1osJYtU278,100000 dRx5dziGUNNtTdSemIxZwQJOnkVVYyfEQNw5mLWHjsM,100000 ghqR-A0076PqnYO84fWj2KCXn3O22JvOgTU9HxWCFcU,100000 p0xxTtRdgNPI9zyAPspZDXI_3fKGVYyn_l-eRtWYDG8,100000 gEPyxwWr_ehQjvaD9VgYshoZcZpG697Z8-Hvisoj8V0,100000 xljHr1vaop-iX9B65k336T2SyWieigtjUSmREHujEmA,100000 qi5jFyVhJrcS45HkG-wdvukxOlVISL6JPd5KB3iIvJM,100000 YYO8Cxqz81wEyNCbavHhFK2LxZvOvDXJWqrLAbjxF1A,100000 Y4PkdzXV50-2cARGf9NhVd4kCVlhEdJvFwsGFW4tyfc,100000 5rW5FLp8kY4vSBcZ1dX7Xv0S-cC6Xok3G7g2DQMBkao,100000 Q2bfyGaYuh5kgRh_55wETu7VNn23WE3-CqbVX5ezh3I,100000 N1EB72-FEJq-IMWrhXTbVIQwKB6UfxN6ePdk06jQ97E,100000 WgV_x78rOvipprA-oO3brGFmhzQ5soEHsz2ZFEnYWd0,100000 tLkPYMdUevh0dOcdp1MBxwv1ugIxmIEsN-Mg_oW0Xhw,100000 NNanwgsOFAIoLVScLp4hI7s3AH22X2F9y4UciRBV7rA,100000 V6G6fZT4OCE3wKsAuy0D1Ubvx4XNSqFoVvFC2tkxsbw,100000 WPB-7OngQt0vj3SwBDOvpnXy9HcVtTAQaskwD-5I-kc,100000 pAg2nXhfX9Z_MI3zvBxrUE0lYE-6c92-Ukub3z0VNoc,100000 n9BJeVmMQvH3cv3lx-z1_2ZEKwpLTtZL3mX1X-XEkVE,100000 eaEIrUM8VUo76mTY7dyOQQTYKHZu-seCfnaNp-SpWZY,100000 V--KnZ0Iq5atP0zHitsGJyeGP-3itE9iKhfgQ4OCr7k,100000 5odgBLQk9varjhIYF-0ZD7gf3lzPr1woFlQtIanN800,100000 YAz_B6VeWTjL4R3AXqK3arEtA6PzUt7TTtamSp0KS5k,100000 4Rtjb83GDQfCo7138x64CosWcFizwyDmM-CKeDt93pA,100000 lH42QnW4xW44fa9eE9LL8NqBgMUeCcT5Nz0LoRnbxAU,100000 vLSvn5M21JlPcKAzvcnZbKCamYiPp3hHRndVP2KCT8A,100000 3ujAWw6fCa3D9rJTKC4eBmQwNmGVeb-wmGUpejBM7NM,100000 kYLOUWwRu4WhjPku-P__TOQqi767rSF_v_bwaH6Beo0,100000 uHmZez_09pg_uiv3ee1KjklrwnR01mtI4RL0q43iS14,100000 tN08TH6UR--rdoS1V2RPKRrrN3Wp6tQMeUp9NqYbis4,100000 sU4AF7zUMFe1tVHprB-CnQ4zFZKU3qmSS9FyFVBQ9-Y,100000 FjzcJjiXhRLxuJquIA4LdQG1ZKeEHidvHFVTxP9HZ4A,100000 KNYLhHPA4Lwbzu8yBygk_ulntcrpNi5F7HXASc0ljdg,100000 C_bkulS-DSpBLlViItMhaCVnb2S_eNiCISuAV1_X1e0,100000 ayO8POFu6MN490Ynq21J180nIVUEhK058FK3tKskGXU,100000 DI-mcMIyReCqbxiUNqjVkOEUbg5jZxtrC6dY4pp4Udg,100000 uCxZw-TqQtmdoBrJpSzfZZTLVHvGYWJx_57cMTSfK_A,100000 -6_H0nHLaSJcmWQhoMoEzBfDUp0FoX3o2KC0H-llh8g,100000 DQe4EyeAqgzEx0TsaHmhFvozP8FO9dYi0j6eOvGXJ3M,100000 SRxlg3twQUckGBXZNCWk2S35gSYHJ5EPVJ3LJ8g2-qc,100000 5ng3VFd5JGFX40enqdseMoWj7N8ZV_8Kl2bb4SZT7ys,100000 bA-Gx3RURNpnCdGQBky8oJH8mCAN7SHRx4kQaQUM3as,100000 UKutS2o6owRXXvWyWKDZWtZDMBKr-S93vbV_0GNIisg,100000 aCda9LEgQ0-uMs-3-4OIvAB-fWWD-MPRFA3m1em2ZCM,100000 _qStzwSDEQ4GGVqP7UpHxyo0BdXjwDky2Nj56eYLr9w,100000 rcEpiafzs-nE6h17e3GqSlkyjw9G98q3jiBd2kBiYbs,100000 SS33ruZnZgacoU_QUPMDl8Kz4_Ibz-swF4BkWKSICls,100000 wWf2f4UwbjZKIPU7Jqx2e6dYdjbLnV9yzZCfV9Tm8fA,100000 D1-8HwIuVw29vBLIxMGZvbZvCJ5C5JFi4ctTSPVq-8I,100000 6TIT-X_xnza-rhnbq8PDbO1ZdAIp9Po_ebrDow0b3U4,100000 UXx2wg0kHIP55CL0fqETgofvqnJLk8eHLNHjbrDZR8o,100000 SPhwwr639nfNL1S_uL2M01Xe6RQGIBCXX6dbMifFYhY,100000 D8zD1IIrgfJxQ0OH13XCmICqHcesXvgRuyXtyyCYYTA,100000 vmGFbgOVsAaLrDpFUesq-ub7i38j6Ft8potzNuPql6g,100000 sHnhiEQwCcBzriBBMHbLQiEmjHgHXWJbz3yX1yPqDiI,100000 DlQGRbXi9YSMUeLZ2KyeT49BM-QEK_oI1OtR7UXF6qU,100000 HK8aEpEGE8e3kamFK863MG5JGZWywU_vBWAI5jj1tdo,100000 rSRIjSXYBPADyDy04OgogP2gMy59O7MPMkm5b8FXB0M,100000 _FqDDNlu9sukCX_5MFDxXv3rTeI-A1bA6k9dT3OTPYc,100000 NbVcIWQU3YERyBz2Z50YmxcGBQwuZQ6gpgHzAulnQGo,100000 KKyG2bSV7kWiaa7Uw-njmlomvrNvFv2hltXRA4zdXjk,100000 C3frptJWsCsy6fWEYOjhylRjnfarxgGGcUhnD8MOg2A,100000 mE3v3UYQcdnmvCvuQ_6VSMOBkr-renRiu4HABtqYcIk,100000 MmSSuz9jk3Kxp_RKH_Z3k-w5HRjg1KIR2Xar-OQSB7c,100000 DlAREOK-dCpPBA67SiCwlW_2aQNp6BMoHtjgNGAECwU,100000 KjRhLchXKUpmDvmr_ct82tMwMxK9qFyNoHC2IX6ZBBc,100000 0OZC9fBlcXSF32SEX_fFdN9oRq5H5dLBfTF_pkRJnG4,100000 _YPfOvaYHo2oK9HVA3NRVyzBOjrTSBuJQXsKnwoIb6g,100000 xAcisk4-K2Eyx5O5gO6rUxCuOj_K2CXCj1P9qBSCX_g,100000 A5VoQqNZ8AhdChe7MjwieS1rPcT3VZFTm6X0hNf0orA,100000 oG_bkTYxN4IzbvbMg3XJJxheug8uQCDtYf1BC-lNZQ0,100000 8ckE0F95yAKpa36e5GB2k861Z0Wapp6QFaXwEQJz6bI,100000 XWjT6pZUailFL-cuxKFs8pe41X58aN5MhAHCjl1AcIQ,100000 Y69MKJ-J1X59gt6nQQ44b3bJfxhEQWVD4IT6NjZOBH4,100000 CYi1a__7XF0DI6WZOuYrjm_VMWlRl7wxkpY_DbyeRf8,100000 8egCrE8CsSD8VLE3nIuP9FQZdX0k4L2kdZ4b97DwsgA,100000 HA89R1nmsqq67DWFwEF0VdckuoNY9M4RvHu8bda42qg,100000 --BwkLm3Ch8ZsVvevUPixP5z4KMIchK5f0a_zz3NHew,100000 Kq8w3RbTDWSWqhV-8YLhxHtP6S_ySZvc8cI8QwUd1-4,100000 q27Kbj5OK_T6IBqgE9U406knDZrjr_niE1udAcn5h4g,100000 OST-Z2mESfX4wiiTZJ5eVBYC0zG6hSQqhqxT6ZgYkr0,100000 5ybm3BFl-lIsmBFiw6Y0To7y9710jAeqGelfuSFTem4,100000 PVSrkpym_PbiK60hZHguexQEeGU3s8-_7Y8M9-zW-nQ,100000 sEZPm875WJmZHAoNJR_V7ixr0McrGkym5HJNMhrEwZI,100000 0TvTwYu30wsSqYaNT54rqugDrCC5Aodd86WXykXT0mc,100000 gWOhYKmZ5jRLIEIERDp3qAuRxyy7khzzWzQoWv1SNuI,100000 prxJBhGqxPPzzvNAuCb0WVGckWsNQIHYt0Z0XnaDdYA,100000 fg8caO4Igv9OShNunTZ2jKEquS-3hBhcxnAzHA2Hxxc,100000 xRbjPgA4u_fHHyZDfQPPWEzO_a2g5JspMQ4ptrsEsNI,100000 RnYstFsZP1PV3QKa-34zqn9-k3SItXAOUKw102zE-iQ,100000 85LRqrf4UpQxnkdnT2EM2jJFLJg2AR3P0r_3ng8O-Nk,100000 2zgo3-DzCThwYmbDS9PgVqVkgNe5zxD9Obsl3Ah2ET8,100000 gDsBkz5H-yEoJZqz4bXBLfHLjfIgJ4hMKErNpXMimp0,100000 BvBx5VmMLHXoH-5hkuBQVVKWDNED8PMJXHBULFTaKZg,100000 Yucza2I4r2aKoN0No_dZN_KPUkizhKqyHH9GYEFrIB8,100000 YK3ara_DRb0FqG2Fl5r9mJE_f7nMYjdr_GetolosEEc,100000 qi7CAP92aKEefficbYKPdtMjrRegQ2GzHKeb79GBIUU,100000 nFNlPKkhoVX5l4i-1N2aeM1Sdnp3vr6dVZUumjys5gc,100000 OJLHCm7jw4-11LFL9wEb57PpgE7x-R1-3VjTlc27AwI,100000 kKBf-E2Clh2TtHmLgfnCd9ZPZOfrqFp1zQHeINyPFUo,100000 vBnjCjV7lt0tFemF-uFO6mvBePiRUfY7ugyMvgzB8Xc,100000 g4h6phLV4jAQGBKO6Pt4bcmm__VV3i5AqJkUquKLXQI,100000 5goh1U4AkkERdUKGUsZDk6-XQKko0bvBWMIk16Y2CaE,100000 cNUKGTndf23r98qq7gMG2yU_85XKyb7blLPqO_ze5wI,100000 1zIrx11nWzERkyZMRU3B-Uf7pNcZchTqvnpxdAKylQk,100000 FYQBmRL4mQB_kDnkVVzXBcdpTW8viydZ3WvxpbUk9oc,100000 _qa4arkdjK2X9SjechexnWzTtbOKcPkBPhrDDej6lI8,100000 bp3Fpodi2l3MRXAlPb1JFYfYy5Cgq65aFPMmwdtL87o,100000 wOrPJnrdOe-K475bxZm4ZsD-PR9ym_h3D4f6sTxgDxc,100000 ccsasQeTTwr5vm04xu0bxJdZkFE79NIxpM-QjB7ZYjI,100000 MzOlJmKOUi_1oqqkxjeQe4ehy_4jWjLp54kiYY2CGl4,100000 ka2KKBo3xDhD4rLZtfMF1hzC1HVDtKp_LSe5WQQ6oiU,100000 X_FKZeJVi-x-MumQVrEjgnDYy7J4ZWNXmIjzBOWGG4o,100000 ijaHRivbWlsa5QqPoVMPU8uvWDxHRaQFQjSJ3nvjhDE,100000 B2Jp3rHgc0MJpb4Zmu6QATAzk_HxZnA4PMybkY8hse0,100000 AzSQ71EuBPIq9kZ5MRKbghSS8yr0qya6bFPaimRwpaQ,100000 ktKRCltBqfSxsmVlsFE0XejV-XX_GeBUZYIvQMqUIVc,100000 WwUDKA1q2k15jksOjNURQ6J96snTT9bR_kuUTLCywP8,100000 7nIVIV2nUAMCOBWan6ncqstjWcMHqo2lrxWbAW5ncm0,100000 I-0efTaGI8bH2KcSWEns41Pjt_muv5Ly-r4dAul4UOA,100000 Y4yACPcMLXybmpUgH1kqHP6reQT-ojMVVSjCMJwgXEQ,100000 qTwdoLNxG_OjeLdcNTh9AsKP5MDjtPO5eGtzIOk_8f8,100000 HSQjmlP96FiXAHDDeaU6ZBPDLMz2PF4FlZnt6BWXsto,100000 zzTdoixXhqAl-hIReKlFmVWpEswRxVL1gqF3gEXV9mU,100000 L3kKRgPRZsK9VOVtiS-_Fze3jm9xjHG8Et8gKj86Xu4,100000 oaqgnZD4I5kknRE4cgyL4DJgmsyKDG-vvDS-A3ztpwM,100000 6LyoBAN_csKOz7H84yRpKN_N75FjQ3lzwHEme4kS6zY,100000 GrjpTeiOX8ZYZ2_gcJ-9dW40xpxAQSK6NNmmFtpypuY,100000 SAvejE4krzFUBqhh7Kz-ATIaHi5_v6IhFriHcJFZ8-s,100000 LQ_aPHM0YYRYjViADRYTGfu9QiIxlL7V8eb7qrAr_Ds,100000 THV6Yo_w6llWQkovq1H-3ex8O_SKDc-oNpopIYMPkH4,100000 eLR-J9hhs03sAeEQl-13T5RgQ8R1v4yt-iJyUp-qPts,100000 1CMo73uPr4AqnUi0dnNFUtejVvDv_9NsRC7k3RQ4Pto,100000 PWjmKHIO7WQoekaC_5r57zn4F03N8TyyBm3er2IpLhM,100000 k3rS1xQTWqdcMqX9PDvN9M1EUUHG2xP1eP5-V8A73mE,100000 zn4A4Fhue-GYOK2EIk2tl4tLGIrn1tQ2igK96_-eBJ4,100000 W6JCs231rNWHMl0tMGHE93R8ZXl7Cj_5TGSWx1DTF7o,100000 YW4YRRHChAsy-xe3hAM-EbwP5Z16RDodjynexjwkuoA,100000 DsKUU_PmDJJCmN-bZRtJmzdIhalnhTo75947aUsLX8s,100000 msNLAFvxOul8Tfox6on7L_8D8ABYWIqQplcfjInzNvs,100000 rY17qDmA9EIPLc-DldJDGEkgjmUjMRfWKR8IDrxrMoo,100000 1-Wv0mWANZBve1XcccIbjIuj1dZAKmbo_cvdFqKhaoM,100000 ot7yut_QoYxO90n8_9BuGauj_Q9MPn5mOjVEO60kEHM,100000 GJg0h_BNf013RRjqWUiEUGJmF3PnblIJxK0WUBapbl0,100000 D6bMR6cpMZYKKCNjkdiJASL_Qp4oIkQN4xu4dx70rbs,100000 b1TYqJWCs2ivAM2PgXmhmXsnjelrIt0o5OmHpzaFyzs,100000 0IL-feSHbtIZ_VIzPCFQoIM0D1F60TFRb0j59Sh9SFw,100000 96Yk1fDrdL9q_cBPAMAvwoeHnS-11kQ92oAHWEzWVuI,100000 rf9Pud10bBc2VDz25jtMujcP9xu7w96v8qJDJp_mdos,100000 LvfCowuw-7H6Wp7w_nt53XSpt_gH5LfRbp7Z-deq1mw,100000 qAOLHhObB6-lF-FiHrXx2aHJtE5h1walpRmnvn_TTbg,100000 uESNnUefGjM3cmKMu02MtFU_wiicuAOIxGA2HOTlZGE,100000 sigyluoKLnk_IVD-Iy3vb9EcWUq2QGHYVBTz9_Bu_jw,100000 ZtrRqK91qmRJzm_vPEcxhZJHH6E4D8_l5CndA4PH2TQ,100000 t36rLTMEbQKC0do3CkEC4tsVgIKRVA7HSsznh_tbD5U,100000 3h_JnzAjalVIiWDCjfzwXUPBwL3rWenob2O_e-ESsh0,100000 WATo0mu4SKTTpBn2iiqEl38qS7lM7Q_1zLeust-q4B8,100000 YO5R5hvBNCbM6v-HukkUFFKP4Mvks-cf6ncZlgidAFk,100000 s2kZ1XuRaTh06hVcSCHe6XdBuui2c6cGibqZ-KGVu0o,100000 Fg6W0XqVAgl_IQ9s5zzv4pZA8coQ1og23MKjv5Vt850,100000 K_ur1650mVIZhkTIJRb39WgeNPatmg6d3_eWFm1t_DI,100000 sVIOKpp4WiowCmS-JzUIGiT-Heot16RIciIJM8oJDY0,100000 uKTPMIrsCl-rUVtwVYB4MndvmmQQX9FXdfOTBufGPJ0,100000 y7qGneUmbjF1QAUHAwle8-_oTf166a12yY42YtVen1c,100000 5RBU2MFQtqOl9alOYDBAPSuhwd_0ZMGsdjaFVxloTJg,100000 pPnrLCQSrHiYk96UP2Y4Kco6-gn7rUnk5zhi0X3MiRw,100000 B35J_ZwuM3CZd-DxLj4XCJ9qTzjwxTHuHOmvMvgErzw,100000 8Be_7cgyFMYF3fk8QjOPbfG0uy9IBuT2nWXs6ZSwzeg,100000 QZ0Gjj0CfsGlWvXjMMGx5el5qYo_ytbHJQSxtsBqjls,100000 GfuVD8mc8ZDecmioNCg7rrz_7f_OyAUqkLwsHMS15oo,100000 rgcoLJgLtFtQfuIHQrmhoZ4n5_6-qZEQHHqx3N3IrZs,100000 ty8eKQcis9g08r9tPuLxLQ8X-bLGWfiDaHcqTMy2-8Y,100000 LEJGkcPxJTCyo-bat63iWgTLDoh3k22CItmChoH-dTM,100000 8gIaI1Ylxuhf9ahssNFQSUfh2Tiu9uTKWqCQZef7Zi8,100000 ES6Q6YtugV5zcqI6XxlqhAL0R0hEYmZZGfQ8z935BKA,100000 eSaAYmJ0xQV373m523pkTlCjmisJow8bs1tuNq0XhfM,100000 pdEaOWS3T62n2eUy5d_6GLdp3vIdw7JwZ7ALFPi9_Cs,100000 cKp4e7X1b7TMeiy3Nb4E2hScNiGLxY3_1N2tLJS3ZUU,100000 qgTs_vpYAeCoaFJxg8RZVUTYAQXG1KQXaikkU2FlKVw,100000 ne4jtyau_7GN7iTcK7lC6-l3o7hkb_TK09MuHcpIDig,100000 ED7hMCYbdR8Pm0xxPeitzkgMJbkDQn6hcZbLjuqSrFM,100000 HIFk454nZSGLxzWTpd1zAg33RydqmZKDIiNintx7V1k,100000 QdGFQcB1Fu6UtWa_b8x-t3c4tEZkFFbGLPlq_vFn7Q4,100000 DY0tsIKuabnQ_8uBBCLmwNUjy30P3sD22yoEV4eGhbM,100000 YoCGmMAZgU3STIIEEsyesnpR5P7toUKoFpNtnuN05uA,100000 1roVFPIdxXtDjCgzUNvJs36ShP_aKMP9PelpOZqxDy4,100000 EgIuGXNI7RzrPTRBvlepMkffOX3QO21enIqR2ewepd8,100000 72Xcd59k1uTg6B5Tk-6upA-F_RccCkmuwrupa3hIE2U,100000 duHWSYLVz6b_tTf2UR5KZkZJ9PgvIHTDTRGisArGknY,100000 2QRgPfEhSQY43AqotKkDRlWmHkPbPdi9IWR8bTX4wCE,100000 2NqPaii5HltyCiPIuCHVpz82_CHZLiQOWD1UkToKpKc,100000 vJP4MtM0q8NVAZC4ej4MTH1vskxd-MEGV5JBE8o2vUs,100000 LmtaJ7MiMJn00sAsBlhgqZ3xBCXnZrLEiwwnNFkt90Y,100000 OWu_TeloAgLLzvPHAXTP1rKeza6eEfjmlDWd3-v952E,100000 Scgq0UTsKElqWWby2z4VJvAJ5oFWe64wBiY-qr6OS0U,100000 7_Ofo_YBuxXLVcK31t35MOWHcM3yrgoM9vJDAztAujg,100000 YIWZ6GPfICBp4gSY3_CRo0G5T6_nKJRvP_4Ski7LUGE,100000 4GibH5U4mY0ErMzTQJsBrvJOEl6KmcLkj2NAdhOaWPU,100000 uWc_WKdMWhSV-JY6tGA7K0HCh_6MhairQqiL7K7093Y,100000 q1h4yY50Fs0m0ua7Wxcw3jAKLX0JHOF9zgQfHZn_nN0,100000 IMk0PfgF86fnO39Kr8x1UgC0mTYUw1_gcv0jjijM-CM,100000 nOEtSWZ1wX0paNxmw-GD99NgnLNPtToccRxg6-YTWDA,100000 pwbbAvcF7W8UtHsx-q4Nbilauj-qQMdDVBXBQQAhCdk,100000 2FjjL0B9aT3LIfx7omr_owaWLBKyL2L9rWBiFQUorHQ,100000 0dyEqjGLAMDzaRnCzH0camhX0XirrCEqVs_vc1btmG8,100000 V501Rjl_YPqtJ7OZSXU7VAFlP0oZOSzEscf10cjPzmw,100000 j-3b7b9bIh46BmuYd12DC48CSrj3mmkyFPzv97EAk2g,100000 TsSp1_eQRCMIkhqUaI8HdR8MPOiU7uspM_pAcf5AGvM,100000 kOHPE75QFQ3i8Mnt6tstgZ9iVe-YdBw-pf5XaScukl0,100000 kwLRY5fWtjjOqxzp6tujCP5PcW5G9w5fiGrIuAfPcls,100000 l79Dgbh3OBfflHNtMCnVpYUyb1N43Bh1Mk2P3Gt55Fw,100000 p2t0yC8h0nAvKlzX9U79mJ459m5vNbKrNckX4XlXCsA,100000 PI5oL2_bonvAuqb2AKYEXgUSYkA-KXs1Wx1uhJ8abNU,100000 PQimTX4gv68yXYSLjz5IOXRQGwBXBpHqukGnqhipzks,100000 5sXKWDhvOrSw_cy6mX7-pj7-8c8ARvicLuzPahaNMpI,100000 o1-7qYYfBTlXTcI8sCl2LQR7R2eTPngGiGi7KaAHDfY,100000 pwXjbD7G0x4nQaifUdnEgpgiywdjm20sHf0e72RvkLA,100000 xt4gKDTdQp_FM1CUOJJ_JO9w4vSEy8tPmErjg2xikiw,100000 alga-vnK3HwW3mUuRZQ56zWaUKXkC2rOAI9eZ5NGqzk,100000 yaMqBdwOQ0B2xAvZ82ncR0bOILu1OUCadp9vCZkWnKg,100000 LXIjPNRDoBS8oV5XU9NBYVD6jDXQeOTL2uwUBUiWQ1I,100000 Os3Vymfuii_7NJznPJMEUoYNZ4j8BlbHCj5ZRFrlGB0,100000 LSg7NFC5KdYvUZcmi08-hs9BoY7QdJMKZtqubZdqBV8,100000 H951WTmb2pBAhPjWf1WmmXbnugnLSBKUY7NtAsMw1_o,100000 PaFmcufEYOpQS59C7E-NqqaQSbYQJJgPyQFjgdC5Ngg,100000 P6HuOmT5gZQa93F5QwjE6hA39BuH8Jw_apbpnIVi2zc,100000 zLsqSjfyjFofuNL3Z-a11jwUSJ7V_aeGyKrhyJ3fX6o,100000 JHqRF3fovy0J8nlw_mo0r4Y9ZclO75PGBu0GD-PDOkM,100000 VkUrj89DtJ8eZPllycwMrck8PiWUKcFwgos3_f_mH8c,100000 2vPhgBAf8XaF2gJ6Awt1A8Oy6Ch55nJovy2JjLISdEM,100000 wrvZSeM9MWr3pwjubsAeJdxCr1dVrwgD5m9OPtFduko,100000 fdcmVLeAYRDzsu81BkKwyhQnztpxLdfDuBOj9ckBOO4,100000 MteJRiQojXkdzBu7T6RtCOFEEKrI9U9IIz_GJtkRG54,100000 _wtKD-bvlOP-qzmjtQWAn-zWB8ufJmGko9lBu6XllAg,100000 dllCeWC-Y2d6bULNBNLPQON_YnLKHHrQDxXilfpWfVk,100000 AC_xfhPRl-jsJEwo0x_q7Lj0LDvMA8QP0YH4PKKUyv0,100000 Vnck5x3aBBhb0xuzNfBgmzQPj_3G_fx2nhbMmp-idE0,100000 H1Shevwg2PDfHcbarmNc4ExGffBtanzurQILPwyia9I,100000 wJJpoWsJQdqUl6VHfv7uwtZ20ldgw1Lg82R_g7CsDOk,100000 Ts_Ad3YcFNNIXsyz6rCxIfKvI0p4pQp_KXERG4hTa_Q,100000 Vy8q29Pymza_LA3fNHAJL2RWzLgKBh9cPPwa_hnyGjk,100000 6OwXkXZE4NtDrAY__xZbxK3fGRbsdxoe5_GIcdAeoFE,100000 SWNoqHaifMtG3XcmUv8iFEIttqPAFDwZKwaOeQubzDI,100000 wjRlg5gK0H0Gh0s2qvUI2kQVugT8bpheqVjNjgEgcw8,100000 3rceYiNNCBQfQQdNujwSXNkQy3DZ-jKDpKaZlokvR6M,100000 FQvY1Me_yfzA_5McnkspMW_nFJ3fsnsJu0JamGTyc7U,100000 o0lQQMxOZfzv475XIMu8P6POBqAvx-RQyqa4aXYuke8,100000 N5vW5WndSKXofLOguIvPHfxMYb3DdNcBjBROUMqBMZM,100000 t6EtWfOId5RN1icotTlFt0t8Fxraqc9QEINuEEdywp8,100000 K07vS5kq_9PSI3d2SuzXuLJ1NmRSEgoqduaMa3vY9P8,100000 Wh7-stpgNWsW_l_er6uFeTUUJvm55rvlwc-f6_Xd_58,100000 Nw4kgALHC34qreeyjlwSTi8UNZ0MPiwUYQEgc6vsgo4,100000 AbvZMwU2Wdkk9vHrqTAIyirUoGR16VSGvAcOOFrFUPs,100000 yrvqJaJyKgSySxIE8HF_Co7SkgtFfga1h7ZeJ0lkDqo,100000 Myp8h_krYnzoXT3frgPY9DcXD-QgicN6yQhFaz5k6lk,100000 kdBz3Y5-e74ToYcSacJ4ED1mOlsHPuhi38gBNXph9z4,100000 3ap19_7t1AH7PU3HZdWIUarjwMt3kaAlIQ-DSLaXC6w,100000 Qtgn1nDRx2CzWJGQcOkbfgnM6H5O8Rh1gYF5jJTN3s0,100000 wtKzpomzx9s2V9YmxDtOc-7xw-3bleuTcqU2VCiuN8s,100000 fKU7moqKN9R3BuAw2RcpE0oyANlVwCCC1J7TK_V0m8Y,100000 JLNp6nNz7QLx_V7yyhzHojF0oJIk-JLOi9c3FmoZH3s,100000 ckQi3i9iSO0BbQl_wkq0I-WLdznYL4gTtN2DsewQGpg,100000 0lNbaMLcmMJKI5U8ve8OCHi7jyjytTkBlD21bcRlDEY,100000 eX64gYzRzR5onoucH2sFmxznY-IczrGDQqFQuOSxun4,100000 zBtF7atvry7rt-P0uCWSi-rIlkfBeDHOGC8n3qU2MxE,100000 izQDLBGPPmhLL22A3yf8esrCgpdOzUNxWOLIFwtyGXQ,100000 OlbY3rr-oro04Tf0hzy0ceXlD81b7Jwez6EaDRNO0bo,100000 xS8cXSwVnmUaR6G2odaLr5fe9MRyIl0QDeS_VGPM1Cg,100000 f0WZmIx4rSw98M96VRVTTBgiWFrw4Kz-TzeLSWs5NzI,100000 Dju86aXgYZJra0hzXydy7-NiYiCQ-bPwid9__MtVTJQ,100000 AdedGIx6DI6DVeX0wfPTtUT_j9TOYGpalYE4HHrVRcs,100000 zXEzZSZGGbfVHDkGL6PEDlX0UWAkWmjHRjTJaN5b2OM,100000 01Uxoj7yjU2zIguQWipFkJqq_AOkN-GGOfUQ5Lo4pUY,100000 ruXmZDgZLzJQrnvdA3dK5ajVgTdy2PzqZdusiDjsp0Y,100000 ng91fPVHjwDvH5xQZAFrtzpLrcf5KQNznz98QSclEJ4,100000 dc6pcipMvMxdOvXTU98zDKHp9L6uHWui7OgjVJKqrmo,100000 VQdib_wCmpUmPEZURr9s-b-PYZjZ0WRyAL0UjncGccY,100000 4QaLsvVQPN_IHErAH4JcNC-bBSMwYwMPkrDB4goRHj0,100000 IXRXjO6hTHY1f4yBYf5GKeitbCeMoJm-UFuKj2PlQfw,100000 _NCf0i2ke9kF8IrnVQX5PycddgCuuNPhmwdeaQTJ6B4,100000 MREBlAb7tTN-1z1P70HkhSyBAj5KwVc6f3HZ_suIFEQ,100000 P1jjEqRgxqVzdoDas5xSjxBzsLxc33FfKh5vy366_S4,100000 gj0uVfYLX2qqkCvAUSM6Ps2_kIRgLlPpx2ovBr2j38w,100000 xNJXaxQFvjjC-b_gN2AeDo7IwXwOXc-Oe9T_8SA0-8U,100000 5HNg-BjwbzMn5bzUNpZhwh3qGnBFxEYO6fAu-3hCoSs,100000 SGj4G47-jCH4bQJYNTMZKf364WZgJmi9WYmhngO0WU8,100000 zkMhdI43u9cc5lKbz7uv47Vf1Olz9dI3fgJhszkoO1c,100000 6nGqQ9dwTpK1lfD_eLv4eEKEfnvpWTbRYUSt27Me39Y,100000 wBAh3OfCGOXslcxVTA1cFVCzhWYsKMj8_cVAXvVqE28,100000 RXVGYTxomNgmYQqn6EzunelYqKfLHqfX-iWMD2hPPUI,100000 WNs9cZgL33BlsJFyy4_Mz6CRigqde96s12JojYF-0Nc,100000 HdW3WSkFcjHOWnVSYfIgyFwQyz89lgNF0NXuPjTrQkE,100000 5110bgT0UBt0_y54Cjcv-GSsImMvgyvQSNyOS1QqwYc,100000 MBXUPwU6VLQjtmTUJ9nQArac-pV62jSu7zG2lSjGUco,100000 97b0_etS4DR5R_MT6aYTrknmxLAP6NDIy48aUYOJWR4,100000 epbg-jmh7P3IqScEIfEDwiEKlz9VKSiHeN4NvhOPwZY,100000 wfi-Dl4Ko6jh-0wzTo_KwmCWStBTb0QXTy0DN2SWYvQ,100000 hCRwbhbZiqGZnx7visaSlXuHOEzMZrRet0qanIpwHW0,100000 l2XiYWbAHo1nzq4Zq-nAYGYiDcKltroCfs1QVrrZaJ4,100000 8CfM6tXFXT3d6yUAnU9ZW_8BcSnSdzLkbV_RMhL9G-U,100000 yk7wGfir_Vm9zZbgydLAi171Uet-OedLlDLc_D16wiM,100000 Cw8V_sh-5V09dIKcMHawG6YkQNhnkkUuzVz86HnQeek,100000 22ecdIsmZUgDW2NXFcnlyH7Sh457xTtGjq361V6aRVU,100000 dC-zw0_PXRaIMJUj2LLz5DGv0BB3Cr-JYfgr8pU1U5g,100000 z1MhfZjvU15JRBNEAjIr22nQgdqsnObvC_O7VKV5_4g,100000 Zqrt69Q_KBDBKCOLxRUh9ejsIYLuClVQrtRWienr0Ns,100000 KnNyZSjKzVn0fsZXXWSJsOUjASHH33GFwcurQmG1NOw,100000 sJz4cx9yH7BRZ6_rSYSlOwt61PEBa2earbG0_oE7vyE,100000 REbC3bHwvn9VSU6pq4HkWeT8cTxvJXb6jfg8gjXhFtk,100000 RMI44MyRQKQgClDIvkpw82x-mTS9h6JtNH6iBshV55k,100000 OIngBDqOr0vWSH3gjuJnLqTNhTF0rPReTrNVYW-pQuI,100000 e72Na_tuN6FMplJ0HdOL_OJGuKeZ7VIUjk4EEJf8BXY,100000 9oeXwmOGvpnSXTmcQ_hiCASsr0dyaontOLjAwfVIbuY,100000 6Use-agJfTVBXXntJoVVBWgjzRwpjMjyM3FvAAJqc9A,100000 Ds-X9cj4LnDhcply-su13NtMvQsltnJu4aU5Alz-qqA,100000 ejib69qAuALTl1kFg5u2mdi5ipa14tjr0NpwPkXTX-E,100000 NuYOtLfqBK8hxf67umC4StvBpX-k6f0JrqxAfFlMuAs,100000 gGDlNNGlkIg_efzS4lMwJdDoWxRjLCuz-zMMaP-gVBg,100000 FS75Hz7tKCWxPt2AKcC0B4AtZ3gCjPDrX8a0CDEeuVs,100000 lQIPWJEQksUndcEcezS-0XCkHmgTAZaiAJJbFLMq9kQ,100000 M7jOGOrR0abysoVH0pKUrZe_5yxR4cKr_sBL3KRumeo,100000 77Vsh4LkyL-K4mzI-eWm75X1fSgUXnuKoB6qbRVgF1w,100000 MBJ-MNHAyOfrfJzVMkdgMl_DPD9pBu2EjzDoPhU0RjA,100000 dFyiJqXrpcMUwWGp79ikMkq1S5J92zN3VqW7FagwPaQ,100000 cKelbJBpSWfeSZvaqJCSx_tSuYP9CvJlKWDM8QDT5f8,100000 k8TAlm_rnlIDaB55H6HOaE2HF4p-F4sduYoMYfxag8E,100000 thVPhcneZo6bu2tHEONdDfmm92EuBtm7ivp5zN1gAQU,100000 bkv1PSM2lOkoRnsS5BGsBgsTFha6eI76qguTH7Ovxvg,100000 1Nf1j5LRHLsymK8gYiZzG-LAderhpX9pm8kpaiavvjY,100000 shikV1ViLIW_YizxGzMktaTwTCKNHgYzyhFRp3hv7J8,100000 1M-RT-GLlui9NKIwbpECE4toAFhaPZxk50SsV4-IqnU,100000 nxgegMTWC32yBBZQnn7HEoM--QrADFnlk_U3yf5Bkuw,100000 RNv73h6Iax404afhHC5VXsWODK_PQixEBqDpGPZDZnI,100000 xwMz_OILB20jBsXRyy29Wdp-ysZdxtbpOScjO0oCrGk,100000 D8xJu8shDX6zTo2uz8vCv0QFJCaYdarK-HTVsSKH_7Y,100000 KRlZwpMHRf97SmsQGN6oydRIJcjWHEkvx6AxtYXHuYo,100000 Qv1i_AewPEnZpEwKZ37-apjOzASKmJKc0dYuG4wv6iU,100000 LlSUxfTQAlce2ntAz8KHZc17UKfkq63SFLYtEsfeIcg,100000 sXdYMSm5-1m-1J13_Ldp164jibG07q64689wKpCEvnc,100000 wFoh_n1vrDFRNlJb_smUZtNtIvxp5LCHT8v30u94RRo,100000 GTuT4rtvvEpEyP1O3sg92KnliQUq49jJJQwTk24PWWQ,100000 18CFP6iXdF_idY562dYFLnHQzbm6HiDk1igfZ2XeppM,100000 9wvXjWPg2NhThNXnkTFsWOLTV_5vkT93_stL88Gr7rc,100000 tXqvhW0-hmbfJ-oDloPdgkMHgB3rU8nGG9VNpFQvF8U,100000 fkoCDwklnuwWydLo90AUxsc65AwkfgOo10fbirqEbgY,100000 XE0NUfYzln90a6JbvnAphKfdXxV-67I4ShD--nqqyrg,100000 Sus23I8bcFlxD0x_cLTcSh2u-VpsxEZG2PLwqiZfknQ,100000 o0lF6gAMwXYFXN1W1fXQOOJgEp56vFuW2fq0rcQj6UI,100000 IWTtz_qUZrrkAiC8b_JngEjpfrkd7u-f1_GGaTc_brY,100000 AtTCKy7nKQ7WGhyX_AH1v3bgWYTTy3d-9azSoJOxVck,100000 eH9WH178a72QVO2pKdkHvKHshggotiCIoS8_jpXCeOI,100000 NbJfLHQ-84hYVRajwBteZo3htR7HP31dVdwbkmANwZw,100000 KZAL-LfoGaPCG8z5R5_9Rbb0ajU4l1V3hVMDADgEQ8g,100000 VRoozxUkICQfaYGQMgD0ISZ68lNU18VT1B3Kc6Xr2T4,100000 D7AFGznB703S4r4z0_4Ld3lJxaIUVP86J6O8xlMHy9Y,100000 uCqr3-Wpmg4Vv1qHPLSzxI-q1_PSkijAWUvJml8zmEo,100000 Qa3SaFt9KBIqQTweiHlr7X-mKmiRBeS_m7JhB1Kl7DQ,100000 d_t2FebkpIYKcHPpPYMtr7tii0JJhUH3cWcyHGad29Y,100000 MDlNV-mEAiUPoGhV_mpEVikAMBx4ed-2jNNtzYR3GFI,100000 4e6GZrMbHorUhDbBn3rWtLaArR7PT1AfVmF3HQJZRmw,100000 FRMdoqDMhnOj8ty-YG4pLNJfXkEk8vpRS3JjK9tghiM,100000 HmxMPXS2JI7T3f-8oHeO9m6D4Dz6thU7Ti6D4X5VOv8,100000 xHo51AHw_0KodpfGzESbxebbMv46DxOH4tfhGDj7IkA,100000 B5ISYFdt99-4Tn_DrCDB9jjKTwnevLDrTtPsiblZgxQ,100000 vTP2pznFk1HF4lc2VFbXGcITvcl2TPycNCkMc_S3Nvk,100000 nTHdzcJy9RGNefLrbxHb5UzzRrJSPaW4v80ApzUCdtk,100000 BJvIrWXL4VJh4UanL3K7frafkwaI9DDhFj6WC1mKL_g,100000 8_OXB1ymhgFxvXpvAMKj1a7zLhhFPmLH9fRXqkDBVGg,100000 CHsLZAzMgTYanJPwAO5e5HxNg4EnC-R97k_FOJFb07Q,100000 7uXn3Vf6RM8-nslYmBre6Ou2d7iVzV4o1gcyP6bZTz0,100000 hPQcIcg6kr-bug_zMxHIDNwxy2n862Qw0Yc3f6YSyeQ,100000 VHimOmH-qmo9ceWXRHqTDxiUqEN84itY-HkfqzWuqQo,100000 pfTkWRqMEM6YAHNgLJcWqloFEjchT58qHWf4oCau_Ik,100000 ZxqbDPGECMWd-KwnfgteL0XbfY1UDLN5-Jf86tx8GVg,100000 rEQosEY-vENaKdoeAWAC3UZQ5sUVESk_aQ9BnmWIf_o,100000 SvzrX-2YYFZGR0AUy1v8qwl0JZlDsgspCZsPkDXWcQc,100000 kDhfH3FXFxn9VQnkWvsRQRv8GMo7OZ1kN0I93ojyjS0,100000 PYajxGD-kAg2UA6patL9nhdt6gMutQpONsPtidgJeRo,100000 KqOEJ8jxFzL_LXR7VbPKPpwSRmU2q9cq_1wEWJmRs9A,100000 9B4yWJ6ym1laiB2VGHWorDOS6QEZ3LsqDJ-p8LDFy3o,100000 oyL6eSytDUG9ql0ZYcVazR9Hg6PU-U66aXlNFPIubLI,100000 b55mBuv2zfo6BeGm_03GIfVR7eiUD9HvNLKQcErwfMM,100000 sTGgxxTCiZvzPIFDhKDEIF6h4Tgx9gAtu8dh6P7FruQ,100000 5BUcvGZ22WSvxYn3oLvPuwAf0ka5fWy7dySvA1e1Q7I,100000 r97SMyj6zFJyJyJYYNlTEZixBKd5uphY7uOZpQE1lyY,100000 55ZKCjJKEFXzAWyual9CKO75CWMS5-jgBrLWVAsE8Tg,100000 p0MCgwBLvO8r4DpW31dDJ0E9pVRBND79eO-0nZeCyGU,100000 0xueTpDRFYCarfItXzG-RBGdif8hzR2UgY16WpJjLlk,100000 tXZjyKczUe80wNEmUnwKScMghCJ7X-0rjkZ8f6zkgVo,100000 ABPXQ3UzGLenVZ1Vq77G27ZETEgxIBDjvwtxt0J7Hy4,100000 umyMMLIsHYPefks5fGhal2XtTIzJmYI-b3VTbtiHZO0,100000 fCuC7AmG_kSADTZG13OSL5tKoYhkoXAjCOD0MHXQ-QM,100000 RwanwykA8PUjubdNvyk8QGf6yNiTD77k2O43x4wDQ4g,100000 l0YKhSrbG7aGx63d0qdxTPHEwG9dtZlJQpMLSHLu6xM,100000 y0EaYTjGpFNkBXmY__yk2XYfJVSL3syiRHUQygY9Nq0,100000 0kJ4SQWyxeOL_JAf0IByaCCVCyWryLhNibRm4ncCHnI,100000 WNPgL6bj3_6YZ1dFViOj8YhaVRqphEyav7KkIbzDq2I,100000 ZzgI5A0fr9B-ryOAxNK8QW42Nm0H4DvnMsDrutdvq3Q,100000 zMqR7fF9OtIxQo-qTYC6bIp3_NSttvaTM6w0AaFazDk,100000 poWvKCvB1Utyaud1vDQ3271fdt0rtlkTrpRBwU02R_E,100000 IgYM9m6MmNUarG4vYjqvSsr9Dw-xpepTElp8_aDSLJA,100000 XeJ9tec0I9YVO0qtl8nXaW3_d4TTuxMes8gH1Je6MvY,100000 iAxWFAKjvtQceYHP4C9eF8itlCK7D_fmHnMWG45egQA,100000 d_TJQHw-Rtiuy9SQ9JxzLtzpEQglCBLS2zXk069qnMU,100000 07yiQCnZddNZsU8DMuiuQw3-Gnm2ixR1JLTDXmWl_So,100000 6h25oYs4eqFi4wZF62N0serXExrIDEVcK-T6GBKhgZM,100000 m5GU-OYyz13CdVqtpUMeIrXtVKdgQ6kfCmwolud8yiw,100000 TA7J9eH2Dga5qtTKPnyEa8JWZ6OVosn_aufHlMeh8WM,100000 secbAR3WR-CMdpYc-Yr6_-qZyChYUGuza99f8It4RdA,100000 p95cgaFUfkHP8Z3CLh4OhVqRl1Ei_J8XBTQjNNHGiP8,100000 bc0k3hjkC7fxbYJu2D_EIoKHBObNKjXQ8GLaIxu4lh4,100000 BIcC1VnKu-1xfb7E-AAMMnx4DJeYeW5dnh1YrQRaYBw,100000 Xwfl80ZJy-ID3yjU72_QM-jr05MoGSn40RT1Mm_1srU,100000 gGCzfdkfR1z-Dc0VHdW4l8-PDG49mQIeGe-XGtrRW1o,100000 fXBOy33AydzrX5HnJ2Q_6AbYGpNn1lw99S8cxFsFY2s,100000 8yLsfCpDCPo68HBkcwBwlHcPyMeH8o7wdQOJLv08xPY,100000 rhs0ycajW96qljThp86I-OrrzpR05wlqRMyjDYbOsNw,100000 Q47PAIDBZBLWv37Zyww9brhn90IWAWBdeygDtiV1Mcw,100000 53DjCWpsoxDGbGhv7jKzJE8ITGd8e-z1riTkAP4gi8U,100000 Vnbq4gW5HRASqmfrAmqLN6vLd_L0-tqdocnKVdZLNmI,100000 MGNe8vUZmjIgkD0kSTx_gzQMkc7fYzh2QJZX8ioltao,100000 t5XS8PLSeKzL6yz5PiNhL1ck955WXXQjaW0AuW6sAzk,100000 0PhQGxgxwozdTh1aY8_kbVquqr6JF09nZuaY4gdP76M,100000 24YxjBulMcIy0rmC_DxejUeDlbHWXNopjMLu1ZBhnt4,100000 L206EedYLJi9aw1XjdMzKcTHGmsa4qM-SWS2wKOOhJM,100000 7C3V6EOSfHYXPjn0U0Fkp_fhEhQa6g74akCC97dLQ7w,100000 ZDojENNTPqKxgdURnrOPz8gULktlQlG3SEvjQdTWkP0,100000 L4Hn741pXfn11vMiAQ2ustw0gAY2W6CdrNqepYbnVDA,100000 OLXFYAUv1NSt8D9XwfAIECm646XBPeExEQwxIK7VqVU,100000 htteP26LEgvzoTV-WcR20NRzHQjpoB9njCmiZwDcL9w,100000 Xd1aO0BT9fRmvipFP9NgB89Wai2_27fZ7ZAz_NDeBeo,100000 VPPnauxrBGzyVlb4aGu6nGrDmtXH6NbiI1tT6vRfGmg,100000 TdA1iGWvDD6_JMMhK7wNtkSP_0AThQ3FVkjVkRzJZgw,100000 ZJdcjiOPXXS0T1_zgqAPWNlLpc36SJ5P3CvGHzQCoC4,100000 -1U7D-l8DRgqi5nSrgNHH9wkMe6YpsREUv7MJNUZdws,100000 0M3oMvC6T2Lo1nK-KAq60GPVLU_5tB30130nJ4IjLqU,100000 NXULugRS3ty4rKIjZ8hBkViSECwHwBd4cObddrl_PvU,100000 Z2b6fyu1DRlzGzuksiU12RqshrPYiIHpxcHM_y_VYhA,100000 KMHXU7VIUTj37ffZ7iSNjNNku5u2F3yaQUDIYpA3AX4,100000 RQDZjak3dEffaqsNFtWY5g5Y9dvL0UK38zu0um7ETek,100000 JXEAbD62XHZdBJFFD31mkFChxyfhjYx13SSNw5gFWGI,100000 TQK8fBibYFZAxbtk6f8gjIB_et2pAaWo280Sbyxu0N4,100000 k5tQ8mtbCR3aCUUIa2aFFdQhTl6q6wgcdgxXpZ0WPbo,100000 BIPk50bJpRUwq6Wvp3O0wl2BRYCkEt1mczwzOHWxsPI,100000 JnZ2o-2TUgGWn2CyQEEAf8mziUJA0BQEcTH-fMfPARw,100000 o2xeJEezizR68ru1s4Jc82_UtVOuTJrbg8Zm1X8Yps8,100000 wSzMKnXHy-BMGpUsq5mDjHatqRbFdLEmSr6RvWe9Pe0,100000 0fvcYz7tLqs6p1P_yg60MStdvBHJnnbhmSPZqIolDFE,100000 61w7MUruclzCpiaULu4EIryU990E9neE2sloyOLvbVg,100000 q5-ZM9KXk8uYvZMIqIX-M8v0wEmoZygs5jGK49EOdXY,100000 r7jogId7A_OlMLAdBmqp2xhzYzHCwbf35r63OEm_adU,100000 1GHgJuZcl0ydhYz7wZBfnNj0aPOa5n3DudrtdxzW7KM,100000 rXbHmWO3uPLqm4I8x26gOAiW25gOImxer9ywQME1S64,100000 qUOhM-xRMksrk3jt5i8Wrlm068WU1vWwTbclFK_pI18,100000 V20XLd_zPsxjf5WqAddH5cwAVeoi54mbNnP21JGxzbo,100000 hVbSqm-1bnI-G9T3gGtiMmfK7Pf8i0zO23WUxhdMcAc,100000 0IU1RkK7hiM7m9rmnOXwGu_02QhVpyAHwdKXVtswMzw,100000 icEctMjF5gnqC_NU2HoRqNUUSEvpMIpNOqAjbv9S63g,100000 1njYhiAgEWrihVvRkUH7manm_EpHydYzcQeOC9H3gGM,100000 6JjvYVrgeiigZ-zQ_6dL5UgWxPvBIXIXwMa3blt4VmI,100000 QQ68TqO4M6XN_8IjCUBXcF1XAMxQOOcPcuySz-RkOGY,100000 MqMJDeF9T1_BBtiPjac4SvJMHXchzKLjrbMK8Q5NPIc,100000 ecJJ7u5-wU-iPYTiSbYtHp4x2V2Lbtbb3TtXIyOH1c4,100000 Wj1jovtgWKVVTJ6JGgLjwbb7PxLt3pSRUIXhmNftcAE,100000 euz1BHurar-6EfFzuXHZytg2ftBVO2EMFD28ZQNS_1A,100000 w_Ml47RQ3SmsxcDOUFhLXjSVLwWx83NoyKuJvBACga8,100000 DfSYzyj0Q5zZYSo_g7x7jFrsE2U0J9zakWgKt5JZ2BI,100000 C2q2BdDclzm8jBFLGSBJnufNByFhVGaVXbOGaqfcmm0,100000 1sEWR5zgtUdmWJm_LfwcdeiOaqdokSJhSCKCWLQL5Ag,100000 -bcEnnGAlRCy6dtOYxYFY9lICwLINWRRXXdANkjymic,100000 E0HW-yksKqzEqF2HJ0Fx3EdhaWW0Knpi_LumHsB1hk8,100000 A3mdjlqzXvVbS6kHtUbqe1Byoi-CnFrJG5ZKuef3Ogs,100000 xbIBFBHahAxD0VyC2l0gH7yU4busVGFW6u8qJgX73gc,100000 55xthd91GM9qp2wK9ep9H_ARy2GATioZpwSp3l-qLVY,100000 Fe8SXHp_WeCxJOoZWDRVbOcn3h_XZFAtv3QcsWqu2pY,100000 56Y-NCaqmrn4hADi0KTJTLkJusJJxE3XVcgiotAnniI,100000 lwjZT6foBqwqwCpsc7zdCGhPektWLF9yynvQ4VH6eeo,100000 7c8vGdyTXmeOOVTZwo2J_L2ZDC62DW5Y2K8vTvtwktE,100000 JFrHdlHFcHhdtd8BdXYGAQH5_lx4p6Z9QNxYYMWyt0w,100000 jHxuyJNsrzbPbVxNTazfqWscJPpU1AZvmbl1Oa6_HsI,100000 8SOIdvoHMUx5PkjGLziEAdcFvTF_4s_DHFD3dKoLLhI,100000 WmlCYiDhYiP7bqyndLaV19pJmpuB__0W5SfqMuWuU38,100000 _Tjtr4UmyW9-2gClt5JzqYp0Od2Y7-LTAWfkzbP3ojQ,100000 05lNyHqJVMfOYuFKuB6acm17roptrwJrwxWJUJ33DK8,100000 gE-0R5ix6WQ1-765zivzI_R0YCZTisBsPgG0wdHS9ys,1000000 9_666Wkk2GzL0LGd3xhb0jY7HqNy71BaV4sULQlJsBQ,1000000 NEcMFcdT-aliSrCto8u5uZVfeaIfLyK0HPvOB3pUVDQ,1000000 WxLW1MWiSWcuwxmvzokahENCbWurzvwcsukFTGrqwdw,1000000 kzJ8p6DyaqWFdb62S1JTdUKqxo7cs3B_J5SPK4X2c0w,1000000 ================================================ FILE: genesis_data/hash_list_1_0 ================================================ [File too large to display: 27.0 MB] ================================================ FILE: genesis_data/not_found.html ================================================ Page not found.

This page cannot be found, yet.

You might have to wait for it to be mined into a block.

================================================ FILE: http_iface_docs.md ================================================ # Arweave HTTP Interface Documentation You can run this HTTP interface using Postman [![Postman](https://run.pstmn.io/button.svg)](https://app.getpostman.com/run-collection/8af0090f2db84e979b69) or you can find the documentation [here](https://documenter.getpostman.com/view/5500657/RWgm2g1r). ## GET network information Retrieve the current network information from a specific node. - **URL** `/info` - **Method** GET #### Example Response A JSON array containing the network information for the current node. ```javascript { "network": "arweave.N.1", "version": "3", "height": "2956", "blocks": "3495", "peers": "12" } ``` #### JavaScript Example Request ```javascript var node = 'http://127.0.0.1:1984'; var path = '/info'; var url = node + path; var xhr = new XMLHttpRequest(); xhr.open('GET', url); xhr.onreadystatechange = function() { if(xhr.readystate == 4 && xhr.status == 200) { // Do something. } }; xhr.send(); ``` ## GET full transaction via ID Retrieve a JSON transaction record via the specified ID. - **URL** `/tx/[transaction_id]` - **Method** GET - **URL Parameters** [transaction_id] : Base64 encoded ID associated with the transaction #### Example Response A JSON transaction record. ```javascript { "id": "VvNF3aLS28MXD_o4Lv0lF9_WcxMibFOp166qDqC1Hlw", "last_tx": "bUfaJN-KKS1LRh_DlJv4ff1gmdbHP4io-J9x7cLY5is", "owner": "1Q7RfP...J2x0xc", "tags": [], "target": "", "quantity": "0", "data": "3DduMPkwLkE0LjIxM9o", "reward": "1966476441", "signature": "RwBICn...Rxqi54" } ``` ## GET additional info about the transaction via ID - **URL** `/tx/[transaction_id]/status` - **Method** GET - **URL Parameters** [transaction_id] : base64url encoded ID associated with the transaction #### Example Response ```javascript {"block_indep_hash": "KCdtB29b5V0rz2hX_sSGfEd5Fw7iTEiuXp5M34dWPEIdhxPqf3rsNyRFUznAhDzb","block_height":10,"number_of_confirmations":3} ``` #### JavaScript Example Request ```javascript var node = 'http://127.0.0.1:1984'; var path = '/tx/VvNF3aLS28MXD_o4Lv0lF9_WcxMibFOp166qDqC1Hlw'; var url = node + path; var xhr = new XMLHttpRequest(); xhr.open('GET', url); xhr.onreadystatechange = function() { if(xhr.readystate == 4 && xhr.status == 200) { // Do something. } }; xhr.send(); ``` ## GET specific transaction fields via ID Retrieve a string of the requested field for a given transaction. - **URL** `/tx/[transaction_id]/[field]` - **Method** GET - **URL Parameters** [transaction_id] : Base64url encoded ID associated with the transaction [field] : A string containing the name of the data field being requested - **Fields** id | last_tx | owner | target | quantity | data | reward | signature #### Example Response A string containing the requested field. ```javascript "bUfaJN-KKS1LRh_DlJv4ff1gmdbHP4io-J9x7cLY5is" ``` #### JavaScript Example Request ```javascript var node = 'http://127.0.0.1:1984'; var path = '/tx/VvNF3aLS28MXD_o4Lv0lF9_WcxMibFOp166qDqC1Hlw/last_tx'; var url = node + path; var xhr = new XMLHttpRequest(); xhr.open('GET', url); xhr.onreadystatechange = function() { if(xhr.readystate == 4 && xhr.status == 200) { // Do something. } }; xhr.send(); ``` ## GET transaction body as HTML via ID Retrieve the data segment of the transaction body decoded from base64url encoding. If the transaction was an archived website then the result will be browser rendererable HTML. - **URL** `/tx/[transaction_id]/data.html` - **Method** GET - **URL Parameters** [transaction_id] : Base64url encoded ID associated with the transaction #### Example Response A string containing the requested field. ```javascript "Hello World" ``` #### JavaScript Example Request ```javascript var node = 'http://127.0.0.1:1984'; var path = '/tx/B7j_bkDICQyl_y_hBM68zS6-p8-XiFCUmEBaXRroFTM/data.html' var url = node + path; var xhr = new XMLHttpRequest(); xhr.open('GET', url); xhr.onreadystatechange = function() { if(xhr.readystate == 4 && xhr.status == 200) { // Do something. } }; xhr.send(); ``` ## GET estimated transaction price Returns an estimated cost for a transaction of the given size. The returned amount is in winston (the smallest division of AR, 1 AR = 1000000000000 winston). The endpoint is pessimistic, it reports the price as if the network difficulty was smaller by one, to account for the possible difficulty change. - **URL** `/price/[byte_size]` - **Method** GET - **URL Parameters** [byte_size] : The size of the transaction's data field in bytes. For financial transactions without associated data, this should be zero. #### Example Response A string containing the estimated cost of the transaction in Winston. ```javascript "1896296296" ``` #### JavaScript Example Request ```javascript var node = 'http://127.0.0.1:1984'; var path = '/price/2048'; var url = node + path; var xhr = new XMLHttpRequest(); xhr.open('GET', url); xhr.onreadystatechange = function() { if(xhr.readystate == 4 && xhr.status == 200) { // Do something. } }; xhr.send(); ``` ## GET block via ID Retrieve a JSON array representing the contents of the block specified via the ID. - **URL** `/block/hash/[block_id]` - **Method** GET - **URL Parameters** [block_id] : Base64url encoded ID associated with the block #### Example Response A JSON array detailing the block. ```javascript { "nonce": "c7V-8dLmmqo", "previous_block": "yeCiFpWcguWtWRJnJ_XOKhQXw6xtiOHh-rAw-RjX0YE", "timestamp": 1517563547, "last_retarget": 1517563547, "diff": 8, "height": 30, "hash": "-3-oyxTcYAgbbNoFyDz8hqs7KCJHI4qb4VdER9Jotbs", "indep_hash": "oyxTcYAgbbNoFyDz8hqs7KCJHI4qb4VdER9Jotbs", "txs": [...], "hash_list": [...], "wallet_list": [...], "reward_addr": "unclaimed" } ``` #### JavaScript Example Request ```javascript var node = 'http://127.0.0.1:1984'; var path = '/block/hash/oyxTcYAgbbNoFyDz8hqs7KCJHI4qb4VdER9Jotbs';//Use "indep_hash" above,not hash var url = node + path; var xhr = new XMLHttpRequest(); xhr.open('GET', url); xhr.onreadystatechange = function() { if(xhr.readystate == 4 && xhr.status == 200) { // Do something. } }; xhr.send(); ``` ## GET block via height Retrieve a JSON array representing the contents of the block specified via the block height. - **URL** `/block/height/[block_height] - **Method** GET - **URL Parameters** [block_height] : The height at which the block is being requested for #### Example Response A JSON array detailing the block. ```javascript { "nonce": "c7V-8dLmmqo", "previous_block": "yeCiFpWcguWtWRJnJ_XOKhQXw6xtiOHh-rAw-RjX0YE", "timestamp": 1517563547, "last_retarget": 1517563547, "diff": 8, "height": 30, "hash": "-3-oyxTcYAgbbNoFyDz8hqs7KCJHI4qb4VdER9Jotbs", "indep_hash": "oyxTcYAgbbNoFyDz8hqs7KCJHI4qb4VdER9Jotbs", "txs": [...], "hash_list": [...], "wallet_list": [...], "reward_addr": "unclaimed" } ``` #### JavaScript Example Request ```javascript var node = 'http://127.0.0.1:1984'; var path = '/block/height/1101'; var url = node + path; var xhr = new XMLHttpRequest(); xhr.open('GET', url); xhr.onreadystatechange = function() { if(xhr.readystate == 4 && xhr.status == 200) { // Do something. } }; xhr.send(); ``` ## GET current block Retrieve a JSON array representing the contents of the current block, the network head. - **URL** `/current_block` - **Method** GET #### Example Response A JSON array detailing the block. ```javascript { "nonce": "rihlezm7XAc", "previous_block": "pc-0MvV6lQOWt0O2L3VcSheOfIdymntOBVcloERVbQQ", "timestamp": 1517564276, "last_retarget": 1517564044, "diff": 24, "height": 166, "hash": "mGe34a3DcT8HLE0BfaME38XUelENSjPQA-vcYJG6PGs", "indep_hash": "ntoWN8DMFSuxPsdF8CelZqP03Gr4GahMBXX8ZkyPA3U", "txs": [...], "hash_list": [...], "wallet_list": [...], "reward_addr": "unclaimed" } ``` #### JavaScript Example Request ```javascript var node = 'http://127.0.0.1:1984'; var path = '/current_block'; var url = node + path; var xhr = new XMLHttpRequest(); xhr.open('GET', url); xhr.onreadystatechange = function() { if(xhr.readystate == 4 && xhr.status == 200) { // Do something. } }; xhr.send(); ``` ## GET wallet balance via address Retrieve the balance of the wallet specified via the address. The returned amount is in winston (the smallest division of AR, 1 AR = 1000000000000 winston). - **URL** `/wallet/[wallet_address]/balance` - **Method** GET - **URL Parameters** [wallet_address] : A base64url encoded SHA256 hash of the raw RSA modulus. #### Example Response A string containing the balance of the wallet. ```javascript "1249611338095239" ``` #### JavaScript Example Request ```javascript var node = 'http://127.0.0.1:1984'; var path = '/wallet/VukPk7P3qXAS2Q76ejTwC6Y_U_bMl_z6mgLvgSUJIzE/balance'; var url = node + path; var xhr = new XMLHttpRequest(); xhr.open('GET', url); xhr.onreadystatechange = function() { if(xhr.readystate == 4 && xhr.status == 200) { // Do something. } }; xhr.send(); ``` ## GET last transaction via address Retrieve the ID of the last transaction made by the given address. - **URL** `/wallet/[wallet_address]/last_tx` - **Method** GET - **URL Parameters** [wallet_address] : A Base64 encoded SHA256 hash of the public key. #### Example Response A string containing the ID of the last transaction made by the given address. ```javascript "bUfaJN-KKS1LRh_DlJv4ff1gmdbHP4io-J9x7cLY5is" ``` #### JavaScript Example Request ```javascript var node = 'http://127.0.0.1:1984'; var path = '/wallet/VukPk7P3qXAS2Q76ejTwC6Y_U_bMl_z6mgLvgSUJIzE/last_tx'; var url = node + path; var xhr = new XMLHttpRequest(); xhr.open('GET', url); xhr.onreadystatechange = function() { if(xhr.readystate == 4 && xhr.status == 200) { // Do something. } }; xhr.send(); ``` ## GET transactions made by the wallet Retrieve identifiers of transactions made by the given wallet. - **URL** `/wallet/[wallet_address]/txs/[earliest_tx]` - **Method** GET - **URL Parameters** - [wallet_address] : A Base64 encoded SHA256 hash of the public key. - [earliest_tx] (optional) : A Base64 encoded ID of the earliest transaction to fetch. If not specified, all transactions made by the given wallet are returned. #### Example Response A JSON list of base64url encoded transaction identifiers. ```javascript ["bUfaJN-KKS1LRh_DlJv4ff1gmdbHP4io-J9x7cLY5is","b23...xg"] ``` #### JavaScript Example Request ```javascript var node = 'http://127.0.0.1:1984'; var path = '/wallet/VukPk7P3qXAS2Q76ejTwC6Y_U_bMl_z6mgLvgSUJIzE/txs/bUfaJN-KKS1LRh_DlJv4ff1gmdbHP4io-J9x7cLY5is'; var url = node + path; var xhr = new XMLHttpRequest(); xhr.open('GET', url); xhr.onreadystatechange = function() { if(xhr.readystate == 4 && xhr.status == 200) { // Do something. } }; xhr.send(); ``` ## Get transactions sent to the given address Retrieve identifiers of transfer transactions depositing to the given wallet. The index is partial - only transactions known by the given node are returned. - **URL** `/wallet/[wallet_address]/deposits/[earliest_deposit]` - **Method** GET - **URL Parameters** - [wallet_address] : A Base64 encoded SHA256 hash of the public key. - [earliest_deposit] (optional) : A Base64 encoded ID of the earliest transaction to fetch. If not specified, all deposits known by the node are fetched. #### Example Response A JSON list of base64url encoded transaction identifiers. ```javascript ["bUfaJN-KKS1LRh_DlJv4ff1gmdbHP4io-J9x7cLY5is","b23...xg"] ``` ## GET nodes peer list Retrieve the list of peers held by the contacted node. - **URL** `/peers` - **Method** GET #### Example Response A list containing the IP addresses of all of the nodes peers. ```javascript [ "127.0.0.1:1985", "127.0.0.1.:1986" ] ``` #### JavaScript Example Request ```javascript var node = 'http://127.0.0.1:1984'; var path = '/peers'; var url = node + path; var xhr = new XMLHttpRequest(); xhr.open('GET', url); xhr.onreadystatechange = function() { if(xhr.readystate == 4 && xhr.status == 200) { // Do something. } }; xhr.send(); ``` ## POST transaction to network Post a transaction to the network. - **URL** `/tx` - **Method** POST #### Data Parameter (Post body) ```javascript { "last_tx": "", // Base64 encoded ID of the last transaction made by this wallet. Empty if this is the first transaction. "owner": "", // The public key making this transaction. "target": "", // Base64 encoded SHA256 hash of recipient's public key. Empty for data transactions. "quantity": "", // Decimal string representation of the amount of sent AR in winston. Empty for data transactions. "data": "", // The Base64 encoded data being store in the transaction. Empty for transfer transactions. "reward": "", // Decimal string representation of the mining reward AR amount in winston. "signature": "" // Base64 encoded signature of the transaction } ``` #### JavaScript Example Request ```javascript var node = 'http://127.0.0.1:1984'; var path = '/tx'; var url = node + path; var xhr = new XMLHttpRequest(); var post = { "id": "VvNF3aLS28MXD_o4Lv0lF9_WcxMibFOp166qDqC1Hlw", "last_tx": "bUfaJN-KKS1LRh_DlJv4ff1gmdbHP4io-J9x7cLY5is", "owner": "1Q7RfP...J2x0xc", "tags": [], "target": "", "quantity": "0", "data": "3DduMPkwLkE0LjIxM9o", "reward": "1966476441", "signature": "RwBICn...Rxqi54" }; xhr.open('POST', url); xhr.onreadystatechange = function() { if(xhr.readystate == 4 && xhr.status == 200) { // Do something. } }; xhr.send(post); ``` > Please note that in the JSON transaction records all winston value fields (quantity and reward) are strings. This is to allow for interoperability between environments that do not accommodate arbitrary-precision arithmetic. JavaScript for instance stores all numbers as double precision floating point values and as such cannot natively express the integer number of winston. Providing these values as strings allows them to be directly loaded into most 'bignum' libraries. # Contact If you have questions or comments on the Arweave HTTP interface you can get in touch by finding us on [Twitter](https://twitter.com/ArweaveTeam/), [Reddit](https://www.reddit.com/r/arweave), [Discord](https://discord.gg/2ZpV8nM) or by emailing us at team@arweave.org. # License The Arweave project is released under GNU General Public License v2.0. See [LICENSE](LICENSE.md) for full license conditions. ================================================ FILE: http_post_unsigned_tx_docs.md ================================================ # Internal HTTP API for generating wallets and posting unsigned transactions ## **Warning** only use it if you really really know what you are doing. These HTTP endpoints are only available if the `internal_api_secret` startup option is set when `arweave-server` is started. ## Generate a wallet and receive its access code - **URL** `/wallet` - **Method** POST - **Request Headers** * `X-Internal-Api-Secret` : must match `internal_api_secret` #### Example Response An access code which can be used to sign transactions via `POST /unsigned_tx`. ```javascript {"wallet_access_code":"UEhkVh0LBqfIj60-EB-yaDSrMpR2_EytWrY0bGJc_AZaiITJ4PrzRZ_xaEH5KBD4"} ``` ## POST unsigned transaction to the network Post a transaction to be signed and sent to the network. - **URL** `/unsigned_tx` - **Method** POST - **Request Headers** * `X-Internal-Api-Secret` : must match `internal_api_secret` #### Data Parameter (Post body) ```javascript { "last_tx": "", // Base64 encoded ID of the last transaction made by this wallet. "target": "", // Base64 encoded SHA256 hash of recipient's public key. Empty for data transactions. "quantity": "", // Decimal string representation of the amount of sent AR in winston. Empty for data transactions. "data": "", // The Base64 encoded data being store in the transaction. Empty for transfer transactions. "reward": "", // Decimal string representation of the mining reward AR amount in winston. "wallet_access_code": "" // The wallet access code as returned by the POST /wallet endpoint. } ``` #### Example Response A transaction ID (Base64 encoded hash of the signature). ```javascript {"id": "F8ITA-zojpRtUNnULnKasJCHL46rcqQBpSyqBekWnF30S7GCd58LcIcOXhYnYL6U"} ``` ================================================ FILE: localnet_snapshot/ar_tx_blacklist/ar_tx_blacklist ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:c2a7cfde6e6f8a241eb3437fbaf693fbd70972a87d6568a738779d25d9ded802 size 5464 ================================================ FILE: localnet_snapshot/ar_tx_blacklist/ar_tx_blacklist_offsets ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:c2a7cfde6e6f8a241eb3437fbaf693fbd70972a87d6568a738779d25d9ded802 size 5464 ================================================ FILE: localnet_snapshot/ar_tx_blacklist/ar_tx_blacklist_pending_data ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:c2a7cfde6e6f8a241eb3437fbaf693fbd70972a87d6568a738779d25d9ded802 size 5464 ================================================ FILE: localnet_snapshot/ar_tx_blacklist/ar_tx_blacklist_pending_headers ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:c2a7cfde6e6f8a241eb3437fbaf693fbd70972a87d6568a738779d25d9ded802 size 5464 ================================================ FILE: localnet_snapshot/ar_tx_blacklist/ar_tx_blacklist_pending_restore_headers ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:c2a7cfde6e6f8a241eb3437fbaf693fbd70972a87d6568a738779d25d9ded802 size 5464 ================================================ FILE: localnet_snapshot/data_sync_state ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:8d97533661cc29384cc0be8981f8ce15a4f2c54dfb8d3cef3b65bc548063e8b0 size 700090 ================================================ FILE: localnet_snapshot/header_sync_state ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:479db92a860a592e92fd62762a0f6de494595fb84f660bcbeea343bfd29bcad0 size 700042 ================================================ FILE: localnet_snapshot/mempool ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:b29d129a466afbe559b487da949f24085df7d01826113ddb8888ac8c8b4ec2a5 size 14 ================================================ FILE: localnet_snapshot/peers ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:1dbc103f784fa48be48b9d983a0f8372f901890fd5598f64c8b9781d9b0ee678 size 33460 ================================================ FILE: localnet_snapshot/rocksdb/account_tree_db/000009.sst ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:ebe6813b0fbf07c577beb5dd15927c9f3bb2e620c2e1676333c84b612e952494 size 61230933 ================================================ FILE: localnet_snapshot/rocksdb/account_tree_db/CURRENT ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:0861415cada612ea5834d56e2cf1055d3e63979b69eb71d32ae9ae394d8306cd size 16 ================================================ FILE: localnet_snapshot/rocksdb/account_tree_db/IDENTITY ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:88a6151884275a8da7ba70344f03992cec60c470f6d7624694554faba5a42773 size 36 ================================================ FILE: localnet_snapshot/rocksdb/account_tree_db/LOCK ================================================ ================================================ FILE: localnet_snapshot/rocksdb/account_tree_db/MANIFEST-000004 ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:fadf666120762b32b66cc7c431dc2764664687a1339987cbaf70a16b275d7fc7 size 231 ================================================ FILE: localnet_snapshot/rocksdb/account_tree_db/OPTIONS-000007 ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:6219c1ad01d0668b3b27ce37250aa4189f76ca48d1a7ce6b33a45034c65cdad7 size 6270 ================================================ FILE: localnet_snapshot/rocksdb/ar_storage_block_db/CURRENT ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:0861415cada612ea5834d56e2cf1055d3e63979b69eb71d32ae9ae394d8306cd size 16 ================================================ FILE: localnet_snapshot/rocksdb/ar_storage_block_db/IDENTITY ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:d57680e02d7a8475df317b1c8d0def94d5d1b1208494c89afebf6ab18e4a6e91 size 36 ================================================ FILE: localnet_snapshot/rocksdb/ar_storage_block_db/LOCK ================================================ ================================================ FILE: localnet_snapshot/rocksdb/ar_storage_block_db/MANIFEST-000004 ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:e523d8c767e2a5228ebfd0799b0091933e16f003db341e26db9a9c02302f7a7f size 57 ================================================ FILE: localnet_snapshot/rocksdb/ar_storage_block_db/OPTIONS-000007 ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:eea8aec5435395643c5e1b608482d1d2f06dee1ea91c1f474347c2864faac3a4 size 6274 ================================================ FILE: localnet_snapshot/rocksdb/ar_storage_tx_confirmation_db/CURRENT ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:0861415cada612ea5834d56e2cf1055d3e63979b69eb71d32ae9ae394d8306cd size 16 ================================================ FILE: localnet_snapshot/rocksdb/ar_storage_tx_confirmation_db/IDENTITY ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:2befe560d21c205b2676ebf59ad5ec735d4d825f3fda70b0fe7fef5146b47864 size 36 ================================================ FILE: localnet_snapshot/rocksdb/ar_storage_tx_confirmation_db/LOCK ================================================ ================================================ FILE: localnet_snapshot/rocksdb/ar_storage_tx_confirmation_db/MANIFEST-000004 ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:e523d8c767e2a5228ebfd0799b0091933e16f003db341e26db9a9c02302f7a7f size 57 ================================================ FILE: localnet_snapshot/rocksdb/ar_storage_tx_confirmation_db/OPTIONS-000007 ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:848a834e65c92b857647b77f48cc72d749c5ba9f0d848ebd29cee6c7faa0c6af size 6284 ================================================ FILE: localnet_snapshot/rocksdb/ar_storage_tx_db/CURRENT ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:0861415cada612ea5834d56e2cf1055d3e63979b69eb71d32ae9ae394d8306cd size 16 ================================================ FILE: localnet_snapshot/rocksdb/ar_storage_tx_db/IDENTITY ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:9f6e524b72f97aa94cc2b59980587eed60e330f8c6b66531b52c93f16ba18b82 size 36 ================================================ FILE: localnet_snapshot/rocksdb/ar_storage_tx_db/LOCK ================================================ ================================================ FILE: localnet_snapshot/rocksdb/ar_storage_tx_db/MANIFEST-000004 ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:e523d8c767e2a5228ebfd0799b0091933e16f003db341e26db9a9c02302f7a7f size 57 ================================================ FILE: localnet_snapshot/rocksdb/ar_storage_tx_db/OPTIONS-000007 ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:95a37a08373ab6e525c8a19339478495631b6702245a84b29d2e43b6ca8889c6 size 6271 ================================================ FILE: localnet_snapshot/rocksdb/block_index_db/000009.sst ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:7432d330dffc80de80aeccacfd4463b33e71d1036b530a019fa6fd8252d1acc1 size 51436345 ================================================ FILE: localnet_snapshot/rocksdb/block_index_db/000011.sst ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:e283f0cb4d4beeb1e48b39385c2cf5b67ce9bc6fd80b27e99b7046af0f92cafe size 54196169 ================================================ FILE: localnet_snapshot/rocksdb/block_index_db/000013.sst ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:4753761abf73e35d6128b6fce94bbb92b52c27ae23098aac9852e7b2939f476f size 54467463 ================================================ FILE: localnet_snapshot/rocksdb/block_index_db/000015.sst ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:f96e0c901344179e4ba749c5367e2a2ef4245dfb48629aeeaa671253df432110 size 54400486 ================================================ FILE: localnet_snapshot/rocksdb/block_index_db/000017.sst ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:d31f685265ced9aeb4ed7a8370638b3a616d4e668abe3d6a72a5c91886eb8538 size 54383883 ================================================ FILE: localnet_snapshot/rocksdb/block_index_db/000019.sst ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:b7e85ee62ffddaa2ee725be73b0bfb45ef566cd5b94022ab272a704edb63b2f7 size 53604952 ================================================ FILE: localnet_snapshot/rocksdb/block_index_db/CURRENT ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:0861415cada612ea5834d56e2cf1055d3e63979b69eb71d32ae9ae394d8306cd size 16 ================================================ FILE: localnet_snapshot/rocksdb/block_index_db/IDENTITY ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:ac2d876bd5c5af44e498329d3ecd149908db442214b66a2d6673f8d474d5bdc0 size 36 ================================================ FILE: localnet_snapshot/rocksdb/block_index_db/LOCK ================================================ ================================================ FILE: localnet_snapshot/rocksdb/block_index_db/MANIFEST-000004 ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:feb03baab5b5ea6d5978741dd40403cb012a2b218d4058b058992c8bef1890fa size 1300 ================================================ FILE: localnet_snapshot/rocksdb/block_index_db/OPTIONS-000007 ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:618e1352efb16fc5fea6f963556b5c2cb9bc8077296349910526b2bdfc8d5ae5 size 6269 ================================================ FILE: localnet_snapshot/rocksdb/block_time_history_db/CURRENT ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:0861415cada612ea5834d56e2cf1055d3e63979b69eb71d32ae9ae394d8306cd size 16 ================================================ FILE: localnet_snapshot/rocksdb/block_time_history_db/IDENTITY ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:6210eeb4c995dbafecba96b593bd358bea1d8e634fcb421a8a768dc41b60f5d3 size 36 ================================================ FILE: localnet_snapshot/rocksdb/block_time_history_db/LOCK ================================================ ================================================ FILE: localnet_snapshot/rocksdb/block_time_history_db/MANIFEST-000004 ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:e523d8c767e2a5228ebfd0799b0091933e16f003db341e26db9a9c02302f7a7f size 57 ================================================ FILE: localnet_snapshot/rocksdb/block_time_history_db/OPTIONS-000007 ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:8fae387eaa1b0bb58df6b48f4b881f83a4b809caa7e00b7f076af07d13e42803 size 6276 ================================================ FILE: localnet_snapshot/rocksdb/reward_history_db/CURRENT ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:0861415cada612ea5834d56e2cf1055d3e63979b69eb71d32ae9ae394d8306cd size 16 ================================================ FILE: localnet_snapshot/rocksdb/reward_history_db/IDENTITY ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:551f7815cfcd974f1a8e3d2122ca307a3975bd34665ea3cc4c46253f8c8c3288 size 36 ================================================ FILE: localnet_snapshot/rocksdb/reward_history_db/LOCK ================================================ ================================================ FILE: localnet_snapshot/rocksdb/reward_history_db/MANIFEST-000004 ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:e523d8c767e2a5228ebfd0799b0091933e16f003db341e26db9a9c02302f7a7f size 57 ================================================ FILE: localnet_snapshot/rocksdb/reward_history_db/OPTIONS-000007 ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:b865b50830e8e1c80dc982ed9a8abc74b3d38636d1540959f211ebfde1d720f8 size 6272 ================================================ FILE: localnet_snapshot/seed_txs/-B7wF8TF5AodemKM2UjeFySwA_-Q12Ai8z9FSqgIEyA.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:94ffc9b1ab8a0cf2617b0b500f12eeb5384d40309e0c9f48ebce6fe0e625daec size 1707 ================================================ FILE: localnet_snapshot/seed_txs/0KMeq830vwvxUUM7RLCwE0ve4i0h_XHugbUTCkPNH-M.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:d74073902bb101a48df99e91b3e51e3aa52e40c3c4418bbc636bcfd05eb34041 size 8120 ================================================ FILE: localnet_snapshot/seed_txs/1QGjyW1AEFlrFAs6VtUcmwOVOEZJjxaBR_z61W9mftI.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:7c4046462367bf38fec408330919bcc2bac870c89b1f2a80c064988d14581a08 size 41806 ================================================ FILE: localnet_snapshot/seed_txs/1VknqhhAXRQ6hzeZL-IMVBznTFCdiWcwlXhzpLKS8Zk.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:78540758f8d59397f42832666809e9adc102f23a7aacc1a3c5277ea69347676f size 51670 ================================================ FILE: localnet_snapshot/seed_txs/1fzKf0Ygc-z3ejpZ1ZLOiNBYDRzViGRdPLtUqRS1nKY.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:b207347d0bdfeb570c6a90cf1478efc0ee27191e50e384899d8ae6b589014759 size 801045 ================================================ FILE: localnet_snapshot/seed_txs/2dxNaIAvkAuL_N2qpTGSl7d7rU3Hu7d4l4IkYb9jgDU.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:1444aeea018aa54ae25a5ab789c7261831ab600258a45d6b597b2a9aca4997a0 size 296983 ================================================ FILE: localnet_snapshot/seed_txs/35wYULjhQBiTFh9u-PJz6ki0v7Zi1whk_AhowUt99Ac.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:735798411ac703ff09ef7c5aec0b18366aa8a7cf2d325e7879731ad8e6d391a4 size 1159364 ================================================ FILE: localnet_snapshot/seed_txs/3DSCNJ5H9Hpyy7auT9qG5vom9jHBrCgjs48w_R6iSJg.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:c9bbf02788b5cd8508d0f685844b7ca6566f6695838c32808d03981b02b77480 size 132658 ================================================ FILE: localnet_snapshot/seed_txs/4QcodvSlgZnuz5uWGmBARsGUJ5XaYORIO5jYM1dTucI.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:3a546d575d77afa5a6d9209a69db0293ff943ab4e6d726b151707a749c82be77 size 664907 ================================================ FILE: localnet_snapshot/seed_txs/4tlIV1x4YRWtNMut11ox9SS-lWt3xIzcXnrBBbNxGYs.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:f747c57ca6b2c028f39d54ba6233c5d3d665b3389df53ff810b187978f882be1 size 27894 ================================================ FILE: localnet_snapshot/seed_txs/5iK4mPnFqGdUxpiZmGtTbj7xoSC2una7sjsbUyZkOmM.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:5c09b493861a658759bea5a6df826736a3f028d177d033e5a4db1eb725183230 size 1663 ================================================ FILE: localnet_snapshot/seed_txs/7M4KyVB4Wr-Le3Knb7JExgnsXTtG7718JIlhVBNstlE.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:fc68af7f1348231e72f9e5068c6f44e7cb0199ecbe500b61273e7e856bf0671f size 2956 ================================================ FILE: localnet_snapshot/seed_txs/7fat_nqzDJCTfJMqyEpOcavt1cZNM-tfSzASJd0wrHo.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:f61fe12e2e9fdb2e30db3b08b782760b855ac7e0b515dcbe4442ab591c6c369e size 1708 ================================================ FILE: localnet_snapshot/seed_txs/8CPVZq-zPdMQ2to1P91vl6XBXyL7sLH8-vNclnOCug8.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:0cd0c3c13f98f0825f57e63771d2fafb4ded62c242975de6fece8d62111ed4ee size 1703 ================================================ FILE: localnet_snapshot/seed_txs/8TiSScQCv06oS9b8Tt5WBnf7sUVgzPAFGJ3Lq2bt8rY.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:f71cbe941da884b7d27aa9433582cd57461bfd3e41083f3ad10d13554cfa1ec4 size 591724 ================================================ FILE: localnet_snapshot/seed_txs/8qtH9T9jgYLHH-xi39w1OCNJykqew1O5qzrDkhAxN0U.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:44490825c7035ddc86b6587b2b07c2bd73a3035211c0bebbe1bc5978b66469d1 size 446843 ================================================ FILE: localnet_snapshot/seed_txs/9hX3cS3Vjr6vAqJW3WtPN665NpLJegcxyaDZO2esElM.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:ab05537199fe081db35ec7f47e710046a8ce074f3cf102aa2cc60dbd773d0e6e size 205671 ================================================ FILE: localnet_snapshot/seed_txs/A5oMEDa7ZEm1kjPlXpwjuZd40rqP6eo3GobNGQY4HlY.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:804e9daef0af59e77e867a43be039c70356a96c9d11d232acf784635b0ac11ae size 94081 ================================================ FILE: localnet_snapshot/seed_txs/Ace-njSprwHMwZaW5nuD0y1lKFoaafU3T8d7PLBeEIA.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:77009765d454c8468bb7a27976e3ac100fd69861b9522b2c7db8263ad59ed699 size 155501 ================================================ FILE: localnet_snapshot/seed_txs/AoCuo7S7ewDIqhYheBX6AjShrbyTgIv6Fp1AwQgmGqg.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:fbe83db0824779a860376a54129f584682801fdc86c598d2012fafb24abfe0a8 size 563392 ================================================ FILE: localnet_snapshot/seed_txs/BFfNP1eCeYIkLiWWAVvHNLzk1N2pxkOChFzQbdv1IiA.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:924683314a8475ad709de86eef1386be8093b794ff79a6ca0e0290103b37f698 size 110148 ================================================ FILE: localnet_snapshot/seed_txs/B_F4zIV1I5DXM-lR-Ko1tVUTTSmLCOYR7PoY8V8wFas.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:192d0fe4fbec6c56f4a7917fec95c0b3ffac633eeb94e0daf9df4acb0086f891 size 4362 ================================================ FILE: localnet_snapshot/seed_txs/CCH2h2MzMP7WMh0Xf3GYL7zZDbU7E4CZPJWngp1qmDc.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:99e3d5245cf82a4182c33040b90884bcaa8ccb997e91cf70ef324e7f63a4adc4 size 100111 ================================================ FILE: localnet_snapshot/seed_txs/CQv9OVOCzntq2DRqNJ9j_WnWPcsniyGRXpt4i_a8Iy0.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:a3b5980caf07a41d0c0a52d3142372664338c84b281c01af1ff7c56852dcbd88 size 235225 ================================================ FILE: localnet_snapshot/seed_txs/Cdcx7-UZJN324I9L47rrph9dIVy8RwfJa9mY7cJp9gk.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:1b827558452d68e24a47d8181727c79f65dcdbe948c9c76c3152a9cdd35d296c size 1709 ================================================ FILE: localnet_snapshot/seed_txs/D29DVKVYAe74sAj9NBQ351rI6SseWZ5MMsSedGtydS8.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:6e1fb83d306ad585b99f9f2b5a32cae6775513454bac984e60879f056871a920 size 1709 ================================================ FILE: localnet_snapshot/seed_txs/D_3jwPKLfcTpWcrDV1Q7k3D4sMtyfw7vd45D2C9pUNA.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:359ceb534412b628eaf09d962686a828ed9f1e85b6dc20cac89a39e1aca05248 size 15114 ================================================ FILE: localnet_snapshot/seed_txs/DlRct3GdPx7oYi3MSdmv16CgGWqhLJjbrKcIfU0E48I.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:a00f713cb17426e67937716085e6bb25fa8fafd37213636dcef66ab0d8758d06 size 569037 ================================================ FILE: localnet_snapshot/seed_txs/EDt8sO0AWKJyNeUxd-U6ihy0rgRKUPjpfRGarEHlOCs.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:b88f228cbead2fe25c81b997beef32cc0fcda3062d54938602c8adf352f33836 size 52845 ================================================ FILE: localnet_snapshot/seed_txs/EayO1EsmOinnbi-NVa2V7cVraoI0TZ6xE5-sNU7fc94.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:ffe317204936347b6c9adadaa145f50dc4327238b506e2be1c243acfc4c261ad size 1664 ================================================ FILE: localnet_snapshot/seed_txs/F3c9tsVvmCiFNxK7hVEzROraVm477QdyQ8t6afBs5E4.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:45a73469f91e4ccf1bede858c5bbba16253e3730e843ca9a16dcddace1ad3b33 size 193135 ================================================ FILE: localnet_snapshot/seed_txs/FIrCkHY8jVkXcIkWYbMpuQSRYxavkOQ3wtUZPwMS1hM.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:209109372cf5bb05b373f9d25e60ba01461b810c4011ee1927d91de22633296c size 19518 ================================================ FILE: localnet_snapshot/seed_txs/FbeSRhJR00VPygimhm47VwirSeBATnlf240hv4a2G4E.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:b9c90985221f79000d52ff1d8d92fdb30efeb93e42a6d54993778ae61137749f size 403896 ================================================ FILE: localnet_snapshot/seed_txs/G6JD1n-FXMSyTSryo0HoX7L3i7e4KEFK_ekDMEn9Bcg.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:199930f740bce0cf3071afa3bfc6005ed99a1cfb122eb8226deaa88501afbe81 size 48987 ================================================ FILE: localnet_snapshot/seed_txs/HOMVwtocaJIRPdCeKgzorJZJq1jw_lVGz0pQ3POj7No.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:5fd3c801bc07c554ae52a1ad5de0f467688c299467f9b8986eb9d78a0b6ad448 size 1630 ================================================ FILE: localnet_snapshot/seed_txs/Hiu5cti9FefwcvT6xRCIoADUMkuDEm_6pZo92CK3fiw.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:8b4605177358b28dcb2dbc677322fc3b91f671ff909aec48be0ac8ceeb96ae41 size 2140 ================================================ FILE: localnet_snapshot/seed_txs/HoEZ6sK46bzTg4Jzrfy1kHFzkFQgI2UMm9pm0qJS3as.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:a11dc8f25249f8528f099a6ac30fc98e6af83c1e34c4fb63e0394a348ebe7b41 size 312840 ================================================ FILE: localnet_snapshot/seed_txs/Hs-Yj4ZE9ACfQIjzS8E-qvxSkQALsCIDHwcLEMnlz90.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:1c56de0144ed3087bf505619042b8e7534455f5192cc27b34617fe6e14bcae3c size 2183 ================================================ FILE: localnet_snapshot/seed_txs/Hv0Q5APV6ARfDXDpxI-07R1YFSJAQpxTFh1Z8_nCk3U.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:c6677efcc2da8446061135ed99a646ad9928b4abdc5437850adba8cfb009e760 size 1664 ================================================ FILE: localnet_snapshot/seed_txs/I4ifBnOF6OQFautfisGFTVIn2NsrvqrdnQ-O7JOMouE.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:99b44653108cadedfbe9e4a8fdede4d7108b2e4b76a8685e6f96a462e8219df8 size 490304 ================================================ FILE: localnet_snapshot/seed_txs/Ie-fxxzdBweiA0N1ZbzUqXhNI310uDUmaBc3ajlV6YY.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:449483524ed5970adddf15c623af03c26a18c98a8cad7936584b045ebe7d4512 size 82093 ================================================ FILE: localnet_snapshot/seed_txs/IeEkQUBq3aE2CSbCF2Bk126lLaLZEYjUPJ_IO601tZg.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:a70a4aac76f318c991575385a1c92c29fbe41b66d8d752bc0fac836a33960894 size 8671 ================================================ FILE: localnet_snapshot/seed_txs/IqJf6iISeiEj3oof9491-jQX4drDZ92VoFuZqNmoixk.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:d80dc01c2f0b5ba26d78ab3b38bf5418333a0fe1b4516c7f9242ec3f6555d843 size 597781 ================================================ FILE: localnet_snapshot/seed_txs/JDG-HBsrHGDodot2clC3nNkRKV5cvuhRWZjCwVFHG_Q.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:fbe8502514db37c0cb74fa125a97fe4855bf8bda6d5dce455a7b8f24958d4a80 size 107653 ================================================ FILE: localnet_snapshot/seed_txs/JDS1sGkpC0ua7UGfpLEJSF-jXUnjAs2fa5V7y6rccdY.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:9bd8fd71df829c78f4c073b5c8a3c16ecd15524324151764e2b5082e5cb3e5a7 size 47947 ================================================ FILE: localnet_snapshot/seed_txs/JNCYRy7XYR_20vvXEAwpT43ovKB23np9yE9cqQfsIJk.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:29840c1de7e7faf20e2aeb2280e666bbea5b59ee2eee8797db4cb23987932218 size 237904 ================================================ FILE: localnet_snapshot/seed_txs/JUf6alhhrfuL22XuQ0yrZ6_xBFBIQqi85wRxv2nUCMs.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:5a54d775979915767604824bf0831b48dc68a7b86d544b5fd904a851d9c6f9f7 size 745383 ================================================ FILE: localnet_snapshot/seed_txs/JeP9HaxmjN-TcbCkhKDIQejkGdKTlOgp68O5cy_2GRc.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:11298c590ffa24f41bad18af4b215f3c95eea6f0b6030113757b1fcd3f2390e1 size 3726 ================================================ FILE: localnet_snapshot/seed_txs/JfTiLBj5Gxr1v7JwoNf9-7sRAiLOrg1AZ6kqwSkEpTc.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:4f4016312c3535d0d069c9c64a0416084acb2591818edde842b3fc1768b24eb4 size 1707 ================================================ FILE: localnet_snapshot/seed_txs/Jo3rf0JPJR2kCHBqZG71xouWzuOSY-MXJufpfzFl7sE.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:fa11392dd22d4342c19dd9442fe5f33c55fdc0bcbf7400b0bf42c61fab7a3961 size 1705 ================================================ FILE: localnet_snapshot/seed_txs/K0w8hOO1oCu4sQipWDQGyEFvn6kAXO-M93neMZmRoUc.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:086357c70964972cdccddeba2831fef1dee26b0b9102260a53ab0d99dc167e24 size 1709 ================================================ FILE: localnet_snapshot/seed_txs/Lt7WJclVu4iYHqGHIYIBia3ABMnvmd5cW4ELIzUTfPE.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:4c1e510f03bcbe79977929f6edbde08f0433306e199b5d847cc852724a52af9d size 1707 ================================================ FILE: localnet_snapshot/seed_txs/MCCCpl9AGNAzy3WvM5lniJ88iC3-8NPiiWIsxcLZZxQ.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:e6da268a212d3b1169014be995377fa8609710d83c9d70f066c74298cc7ccf2f size 58629 ================================================ FILE: localnet_snapshot/seed_txs/MGDpPk3LsexVpFBF43-FIIvc0vyeEDroYcIONJ6abd0.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:8cd364264d81edc69c2b6e6db764bbcac1721b9b7e11c760182be3f84d4a2f1c size 129770 ================================================ FILE: localnet_snapshot/seed_txs/MQD8-8yIZwNC4A006TC1FVZSyCDHeIAN6YpDbTiX2RU.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:2137cd96017ff6ced88f0710cc15c8f4f51357cc2a57bfed546a3c3205fa69a3 size 422204 ================================================ FILE: localnet_snapshot/seed_txs/MklsZ_cDz470C40UGZUJoVfMeVA89-r7SHxuomBeCPM.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:637436f7c9bbaa07790f0df80c8691d5ecffee30b338da202bed7655a63689ae size 2140 ================================================ FILE: localnet_snapshot/seed_txs/NBjbIMFIdd6jFhSZ20izEke9Ju8jMuvYl8O4bqe4wC4.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:7a87521e4beeff05f23cb323a80c016d4ba5bcb571570551b40191bc54803d80 size 47262 ================================================ FILE: localnet_snapshot/seed_txs/NixeAD5Y_8sQfcrMBWkODQuoXgJouUBmQmQzwTzlaKU.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:52e7939d520c10400da857b670adc52427cd49d3cd6198501ed70d3822840517 size 310189 ================================================ FILE: localnet_snapshot/seed_txs/OGA55Jyg2c-Jhkx5zDNyiDvbFZiRXF0S_JESMhWAWcs.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:73d30f3dc41adabed66f17b211f927965bf2cb553f7012cefc4a0f030e1ea67f size 782013 ================================================ FILE: localnet_snapshot/seed_txs/P5KQo3QSWLzTLWkq3wgJlii11CEUSKMG_O2NMN6y_8c.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:ddcd87e969821488786e4b5f6bd6afae79081b40bf2b2cde5e87b4ae86328bc1 size 244515 ================================================ FILE: localnet_snapshot/seed_txs/PgxqlgdluUGnmGCal3dgB6PYCd5S7FtBpI0zKDc8-AY.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:deee61d29fc9f7d0102bab68fd320c5339e0b75b1f6152cf063678ec6fe8d1dc size 1673 ================================================ FILE: localnet_snapshot/seed_txs/QAQ-134At0mSPVrwBzTTUalyL_zqE_dMR_WggkZvF5E.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:165afbfc7b9eb6daf0f6b98b6c0ca462e9150eaeb1b9de33dfb8b4e31a6614bd size 588285 ================================================ FILE: localnet_snapshot/seed_txs/QyQL1TYdwmguUIBjTV-shWqrwS6AwxhZ6lf7Rx-vxH0.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:bcd7f96b5fa558417b42671518985abb33920d344a4fa1cf1dc5b1d9944b1832 size 462904 ================================================ FILE: localnet_snapshot/seed_txs/R5utplMYRQsJwA9Y63cL3Na4mXtYzE4gWG6g6zwgEQE.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:9ca7e388eababce3a5f8806458451d0bc5add84fe47d5b4810e7c06aad8cc126 size 437080 ================================================ FILE: localnet_snapshot/seed_txs/RJzScDd1IYIVaVOMo8zV2sXaGE4ZtKxwO2ONPFK-ou4.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:125b994c2c67dba8dcced8b889e0da7a8cfbe21ab12eed6c86ac0bb2094f89ff size 73529 ================================================ FILE: localnet_snapshot/seed_txs/Re-7lkSGlYP4SFddz0rrXIF0r4MVYZuagjkVpEm79bY.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:aee377a00717235783f3840413f7dd74283b91d540dfa8f988053f20900cd4a3 size 115739 ================================================ FILE: localnet_snapshot/seed_txs/TMjINkrJIS3kbGu8bmcVt_34TaFN8lINFQPR_YGzHss.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:d6217030428dbced29da075d2edaaaf1fff0bb39733d35bf9db7c31a8a8b4402 size 8431 ================================================ FILE: localnet_snapshot/seed_txs/TNj-jk-KpKzz84xb1SRiKqyp8LNBnONxA9SIXs3XU7k.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:e61661211eeec609e6c7f10d3745d553c4e9ab3a81580cad24ab109d7103e0a4 size 1664 ================================================ FILE: localnet_snapshot/seed_txs/Tg9QZvUPJoAZKRkPhPgQrgnlTY6s9UxRSQaMw6shhOU.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:fa57c66befe411dfb5d7f81cc6de8c2aea67daef946118ec7e53956e398afd59 size 47411 ================================================ FILE: localnet_snapshot/seed_txs/U2DZlRhnzhZrC7GsVNX0TxnXbHh03P3g-cU4fkHpiXA.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:9aab9de1d36cda0bc42e8a9e754d7eee836770971f523dd7939f96a280578e48 size 24403 ================================================ FILE: localnet_snapshot/seed_txs/U4o0STLxwOEf42F4DF22ooOoA5Ykdp5j_D1io-4w1lc.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:9b7391cc7d5f0d193b89a63890e46b2971192fdd3c3f0c3c47445272215c4353 size 1705 ================================================ FILE: localnet_snapshot/seed_txs/UH3C65dDo62rp5ciK3XzyhufE71xorL7r7MWVwdhavk.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:064ecc9e064964d8b4510dd34aef7175b6e6e1d0b36ffa06a3af1d589e55685c size 599103 ================================================ FILE: localnet_snapshot/seed_txs/U_1PPd40n2grpuhkMJcMXPVuJhtaQoUWei63iN2rS7o.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:6c8b14abc39aa771884fdd3c5025ac638908f7c283558189594bf83762c4b03c size 582091 ================================================ FILE: localnet_snapshot/seed_txs/U_UF7e-hOd5uLIj10fYZVxQ5mXyZUxvMxhWWgAMaj0s.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:ba26cb1cc80d8db7b9db63894d96cab7d767cf9a87967a67e7beb4b48842c987 size 97395 ================================================ FILE: localnet_snapshot/seed_txs/UbW68tRQtThl9ah8tJb-X_af5M8FHYARiGZFiPGk_90.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:bb1311369a6127ca33e728f643c34aa472cf039c980edd6c94566aeca155fcc1 size 229910 ================================================ FILE: localnet_snapshot/seed_txs/VL10zUkfmLz5eRxQsZi0G5wsfo8mvyN3p82updP17D4.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:bb9fd29e85c3433cf4b2624dadf1eb6e6716cb71eebf9158606a68b24e4bdf95 size 116961 ================================================ FILE: localnet_snapshot/seed_txs/ViCjDXb4IEZcXBtlYvTm3HCB6cf4gDbrXCCdvVVgB1g.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:39be5b6fb52e117de611490872381575997ac467febfef08feedd070c6a4442d size 62537 ================================================ FILE: localnet_snapshot/seed_txs/WJTACYoRG89VIpjzsIZLIy93U7HoC4OJyLy6WAlqv-0.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:2a867500ec760f986f4dfb45bcbf9d171b26315448d9405969bf649071552211 size 62430 ================================================ FILE: localnet_snapshot/seed_txs/WwgngUwH7mXX15tdbfcjG_9gX2t8N8wbbfW2N34b3dA.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:a38dc1d10f8cc0492006c8cc0d6c77134ae85ee7061a2ad66aa06817db6c2a0f size 1740 ================================================ FILE: localnet_snapshot/seed_txs/XrtNbxWFUGlP-SYqQm8aYawQJU6H7CSyHpRZM1iLdKg.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:db3b3609ff90166af3b5cdee93b04d916aa0f1709c33f12e84b70eb137814343 size 1673 ================================================ FILE: localnet_snapshot/seed_txs/YMnQwrWWVRmkMs0B41lz-VdixskatlPcY7j4r0iSLbQ.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:e82a596cd35270ffeec53c88b26fe12ab2035e002a25821716a9764804640b23 size 1707 ================================================ FILE: localnet_snapshot/seed_txs/YcTBCg3mLRFByb1cnjrq9DzEBnnOT9jQtfYEE34QZ1M.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:02b1869c5f7c8eb2dfa5688810c619437f95c5bd1e4db38baf3de485543759a0 size 18008 ================================================ FILE: localnet_snapshot/seed_txs/Yk_dta-f75GShvyUvXq132pohaNpiQgerfIKJA0vdCw.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:697a104a1a814b1a76e9467d3f1847d8ad7fcb5f67c9f4f2144f7d8539230e4f size 880158 ================================================ FILE: localnet_snapshot/seed_txs/Zu9CSLWidXEnbSAQVuXGk62eMrVAGQb4qHmrtQrOQIQ.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:a3d1710f09dc716c1e40055a163cd9f94f6bace69b2fde63730ce3c1298919e1 size 1567719 ================================================ FILE: localnet_snapshot/seed_txs/_AiF52l4uqTkKOVpQw9hr6l6FCIdWs8PCFtFxEBOopU.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:8145ec8e54bab340bc41a7f2098a0bcb8843d536581a4c6dae49a40dd50ddc17 size 92318 ================================================ FILE: localnet_snapshot/seed_txs/_BN_07s59sawk5e9YcjHTX2qtYX9q7nCBYrlSWXoEsc.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:c34f7f121221fe03da2748a459854d51cd188e5758d24f27628a649dc64a1082 size 37967 ================================================ FILE: localnet_snapshot/seed_txs/_KI9ocPARF5JjaDPIbtpqw2hj_qRonw-AERjWOs5ZYM.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:931592ec10513c8dcfc1049c060e6e652180c6f80f4851ad144b1744d68af43e size 63409 ================================================ FILE: localnet_snapshot/seed_txs/_gduN41u7Xxac_Gm3pBQI3icoKhOfiRV2TKhDnlyakU.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:a08775295f1dbc6aa37aec9afec725e0f488a79380421e1dc1baffb4d7a2e449 size 1708 ================================================ FILE: localnet_snapshot/seed_txs/bcbIZq2gy8ivQiUlEch7tjNoCcUMTTLhInMlj9P2P88.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:c0e721cf00c857aa37b6431f4b5ac51fdd6d05c61dcb8b4200a21c36d44efa50 size 1707 ================================================ FILE: localnet_snapshot/seed_txs/bhEMgsj4Yf5tdCDlwK9KpHmsgVLAsBDPOLtYeUDLw0M.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:3d325fba35ec20330a3ae4aa5a96223b29cf9b72d620fd725bc39e40974d9895 size 119653 ================================================ FILE: localnet_snapshot/seed_txs/cTmKy32Fbmlybl-WtbyuVFNhO11Efr4e_rGbzwAkPbs.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:a5db61766b85754dc53d1f99216ffd1bf0849fa003f07a2f64b48b07c7b8bb7f size 1662 ================================================ FILE: localnet_snapshot/seed_txs/clMyhm_qgwUJq68xb8Yf9EEaN3F7jgdqgKnKgjVRom8.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:c4da2d2d3434290ee9be01ce0a1711ecb3d255b70b06d05f8b9ec85c8b4799ef size 1802 ================================================ FILE: localnet_snapshot/seed_txs/d8CQoDBSrekoGZXqTatc7Y5JkHtNviX1D3JD-fxFDmU.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:6052eb02f3c62a7ffde324432f6a8fb4c3346e7e13da3b92f98f3001de4b35a1 size 237029 ================================================ FILE: localnet_snapshot/seed_txs/dMTZgKHD-NkP3iM5RjFNhppiwfTlYd-Imi9aA6IK0So.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:a7e36e7e3fd45809a677fd240589fbb3bba622c595db57e64c0dac2aea03cc30 size 52845 ================================================ FILE: localnet_snapshot/seed_txs/eGYHUFl46laNa8v_WjdadvCkIErWqmx0hoia7PCSmSw.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:b6488a80f2590d3b8edf4cc9fdc11d9050d31a7cc42c09817b4824502da08604 size 63083 ================================================ FILE: localnet_snapshot/seed_txs/ehTWq16I6ixhFOVkpTKi7s4jgYjNzGJ5CoJW3xjHDTE.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:084f6c71c7ea0b1c18ce57e172cb4f1093cd40c15e9bb56e0a62db118bca5d18 size 46230 ================================================ FILE: localnet_snapshot/seed_txs/fAnOUj-jmlzPMtIN90ZvowG9VUmBtD36MZ8-tRP1Ut4.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:dd8118930526dc47a9bd20094e0f3d98140eccf747b2b015b71013c3c803882f size 227537 ================================================ FILE: localnet_snapshot/seed_txs/fr3nkF8AHXTcq9bT_b7x2X7Mun2A--Ssb7eyoKgQEwI.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:29c80411c4080a2ace61e78768dc5184d77bf7b564ebd96fe0b7c1416018375b size 341621 ================================================ FILE: localnet_snapshot/seed_txs/goAmthhGPdbYUqbAymyG_MjBUWVdS9OBm78mOoiITHo.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:ea3331aeda40742e1ac214fc9552665d4c02d04ad505d7981b7d156970219209 size 222620 ================================================ FILE: localnet_snapshot/seed_txs/hB1Hj0mfuh_x3ijhqkw1s3wdCh8qdPz_IMs0MPraVCk.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:f3abc45056b72be91ad35daa7d5d401c80d97f0af97901c00766ddb4574afeec size 23595 ================================================ FILE: localnet_snapshot/seed_txs/hMfNPSlINViUDVnor18GgPs0Ut0i9XY7dwM9MVOL-2I.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:6f91f182c3f05fec999522be99feeb9b0cbd2e5b5c11bef14cca886654ab0002 size 210027 ================================================ FILE: localnet_snapshot/seed_txs/hPnpcoVcfRdkyUyhYSFNhsEcz7nQU0UU-fPSiRalDvw.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:0bc6c7c8e7d94d811754552609b95123547a721173100ff6e464f537967cd7dc size 2358 ================================================ FILE: localnet_snapshot/seed_txs/hQvPcHPcBhyxv7GPx-E3bZWiNBhnCpFIDwWa3XBcYEU.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:bfa76b27dedac10615de43b2d39e344e99117737febb86584903e33a3705e830 size 2140 ================================================ FILE: localnet_snapshot/seed_txs/hXNDNwQ6zA7aHAqvfBj_az9CovV37bJywdgPdb_ooIA.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:617057ed3f076f08ee022d8890d416431f8614e6023920aad1391e9505f41337 size 1664 ================================================ FILE: localnet_snapshot/seed_txs/hxyn3yZ7-LCgKqfkCljyM7Hq7HJnmPnEKaXoybXJjHo.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:44b131f73559061b36a7971d474930051f953ee27e364dc7d75de69780e46ab5 size 112922 ================================================ FILE: localnet_snapshot/seed_txs/iWUFDucATDE8gjbsL-9KpOIW9l8Ipsh1wliv4e05xhg.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:e83e9824c1888e9c9d212eb50493a106169e14bdca32d145dbe3d4de34fd1d20 size 1791 ================================================ FILE: localnet_snapshot/seed_txs/ivWTdg5M9XqjP-Iu4C97r3qZQhotJgfF17g__7EH7VM.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:247c6463baa3b0207d09fa7dd89c911af38792e66a8fd44026ce1fa32cfbefd4 size 154760 ================================================ FILE: localnet_snapshot/seed_txs/jOFeroI0Oz4TWcOx8mgv4iOZLv6ncbRXFRtJfqS4Pq0.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:73867d7f15dfa0f49cb05735924b239915dc4dda7d3dc6d2a1a382679fff96c3 size 106958 ================================================ FILE: localnet_snapshot/seed_txs/jStDc8gP5lyHVSFIJiT_2RrXhT26GpAhNItDEje07_Y.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:378a38cc1c634cd6b36fc63b109a64ff288cbc47110f6acaa70eb7aebeba07f4 size 1276655 ================================================ FILE: localnet_snapshot/seed_txs/kLP-8ILxdLSAQsrC6IwvfqQL6Loq2Q6lqOzwrnb6QoE.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:fe6b658bedba36ff18c087aaa1c5e13002239c69cceadd7ce37f0c5bc59abe01 size 456765 ================================================ FILE: localnet_snapshot/seed_txs/kVNsLH0kpIkFnBBGWxoIajVLSpvzmsKHpsATPAcR86Y.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:dee5c0396408c4e116ad11e3e48f91d7aa5d81e7ce39e71d7e73e9975f216ef3 size 44287 ================================================ FILE: localnet_snapshot/seed_txs/ks0ODNqrNY4CCDxJcrgRY324WykCeTiSH4Tmdi30I2E.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:761cfb280c62c24dd5822373c5ad9a8c41464a963d2745a160d47a916ee32f61 size 1712 ================================================ FILE: localnet_snapshot/seed_txs/lF7NSIz6CNf8WsMNQl8It8HbJem3MAllokozblLdU5A.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:fc65713ec75c86594e33fee24f2952751515fa1d82bf0f23a247aae67510f3b3 size 138406 ================================================ FILE: localnet_snapshot/seed_txs/ldoaD2NbG9VRhLOXddM1ypoAU3W5gR_zabUWZa4r6lM.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:32994592f5338b3e5a63feb37c7bc5e3d43199b92e456532bf3ae627cd5c11ff size 38813 ================================================ FILE: localnet_snapshot/seed_txs/lxtOUAEj-E1jb6J8uGCRlRgJDHJyFOu0O73jQHnAhpg.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:a3e112c6544c6b7c407863cef2fee56762e9d4661f70f1ed40b5d9909e586651 size 1707 ================================================ FILE: localnet_snapshot/seed_txs/m1DnUoXf7wMtIGkkDZAALobw0GbGehfEMX_jNLvs3i8.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:237957c4fb4c278f68dde4fba9707f515adcf4624d80633ece545a2bdb05f9bf size 601967 ================================================ FILE: localnet_snapshot/seed_txs/n1GVITzrvCF95Vz7l6hH7fdYzebDDAJav5z4-9C7lB4.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:d633d744a4ef30be5da0811766354dc1a838dc50db0c249eb341ea3bcdee56ac size 234105 ================================================ FILE: localnet_snapshot/seed_txs/ntnx85KcYZ_ZhR6dL2A_p8foCmStgD-69ODoOUdipiA.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:b73dea3f4d2bf9f3d76b24e5a29bd99d342d9c6085257d001f0e747308585ac6 size 1710 ================================================ FILE: localnet_snapshot/seed_txs/o9ArU5IxydvpJo2iiPI-p4EGBwlpBlyFIfbnz8Qrg6c.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:15971a2d81d62a89b695325157e87213899566eae8d8b91b607d2968357c046a size 1710 ================================================ FILE: localnet_snapshot/seed_txs/oNZMr_dB-L40nSUj6Fc19-FGteHQu7ZaRZu9_mgM1BI.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:a9883e0b47d2d05c7cd05d87b554649f8a0f2e1e4fca9390f684fbb4ac93cf29 size 12984 ================================================ FILE: localnet_snapshot/seed_txs/oO7raEVlJC6KhfK-UbNuppzbYPGdKWbh1e6rOymd_-o.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:5e37e35a8b4a22984555475e1837546d533b3e208942c6755f7689ab3b417155 size 52834 ================================================ FILE: localnet_snapshot/seed_txs/oiYeEvWqOkaHzCSunznZ09U_tuHqP1UyZkRrKYHgNBw.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:697f697aee17f1812c5b618c55feb0ebd5f30c33c273cf7279e6c36f7b675420 size 210533 ================================================ FILE: localnet_snapshot/seed_txs/ojgJyXT8qwRXj1hOVx2gbeJDT0xEOIye0o9EbfU2LRM.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:f33643e62bf7ac2fbe1907abb6810597fde7e8dcfdc6bd85c8795024df646089 size 289068 ================================================ FILE: localnet_snapshot/seed_txs/p0MVPvnv_lkWwfhSuSCgQ3NUj83shBffAx1NKPn4oy8.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:362939649ac22def7c3861aef513319c0ff8e5767aed61fe706bcb84756b771f size 233689 ================================================ FILE: localnet_snapshot/seed_txs/p4oyXU5C3T0ZycNhEwBZ0MbpV0j3voWV4mr__3fhOek.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:87aa264f34df934e8aa831cdae2605857cd2c722dc44fecde8bd74b047dc39ab size 41774 ================================================ FILE: localnet_snapshot/seed_txs/q8aw85uHTIPxuXcv2Awts4JVVHEMCl7J-61WfnvbYuQ.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:7803cbb60354e1b0eae8a3de9a95fadfbd0cecebec83dafcce9b5830e820b6dd size 9453 ================================================ FILE: localnet_snapshot/seed_txs/qDEFXj8hSgOuuqWM52y6pbUX1cyp7bS4qItfctgtVx8.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:29b39d612e3225b2636d477d9f0c988758a170004ce85d09ee6ef29eaec2119c size 116431 ================================================ FILE: localnet_snapshot/seed_txs/qHvSpQXYh9RZmXIoIOexmDs0iQgjCubl6KSsgg7cDz8.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:0b58cc8cb236bdb830ecc7ef7ddd9a6ea4f178d1e2bcfe4a67a13229cac09e9c size 89795 ================================================ FILE: localnet_snapshot/seed_txs/rAARxLc7tOdjUXEdNmSpOtsJIAw0XS229YHO1KOeUqI.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:e1d00403e69bf3a3d874124b55432c728f28afda9abc02b060dd92bd03a2541e size 18011 ================================================ FILE: localnet_snapshot/seed_txs/rTaanqa6Z5KxtBV4Kj2Fu2KKqAWlstE0JeUbZ3AuN3o.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:fcb315ae0caec453a9104c49e1ee1a0575c257b01682d695bc38b07e92f679f7 size 295972 ================================================ FILE: localnet_snapshot/seed_txs/rY4cJeAtYkg3bnTdqk4Vb0ojEcfS76L4B-iqyvQZ2VA.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:ef9b3b9da518ae39edfe3ed37def0834ea5c27ec6dacd8c53926ffc0caf4bdc6 size 67673 ================================================ FILE: localnet_snapshot/seed_txs/rcc-B4OWqf0dbVY7Eq6q3pRDHLUjJ8tix8UeLQ4D68w.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:2fd712b03ef582b919af568fc08ed73b4e6001a855432b164e7e2cd6e22a54f9 size 1673 ================================================ FILE: localnet_snapshot/seed_txs/sEw-yqeADuF0n_M6jTPLrOgH3coalIQHYPLrwM87nmo.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:fcf12acbfc1e5f379a8fe46eb78519b9e423d0fd581959708a7cec6c5898ce0d size 266311 ================================================ FILE: localnet_snapshot/seed_txs/sMF6pWIkJFygBbR2IS10liEsjsLAMDja_E9_yUvUgeI.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:30f6ad77dac924e92ce3166e8a17dc9162565a8d865bcaf9077175447ba75b2f size 56136 ================================================ FILE: localnet_snapshot/seed_txs/t81tluHdoePSxjq7qG-6TMqBKmQLYr5gupmfvW25Y_o.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:86cc0825c860ca118a5d6c447d86c1c9df5b91084ae2857a2cebcc32657980ee size 800358 ================================================ FILE: localnet_snapshot/seed_txs/tn3FQGSVFt_TE5nyQNpuf_gnHdaWF85hZg1iE5hPQSE.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:af5d6a6b09a7ec5683bb62158ac5c03b27e6e31a2fe64e55baabc864eeae4dfc size 9315 ================================================ FILE: localnet_snapshot/seed_txs/uNiZ8TfAQ8GWjtbqhVi90qO3U5dl9afmKE1-KbHQYM4.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:e8e22b7c0b8de9bf80c7bc0b7b38c3b71f0d72abaf1bc606c6265ca6262b7fe6 size 1088960 ================================================ FILE: localnet_snapshot/seed_txs/uOqsnEjVGQCbtrKI7QbHYxbbLUdCKC-792SgZr5KUKM.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:9d716b9fea4d2b7cc34fcd4970b304b794655270def6aec3536aab2f170cad35 size 314852 ================================================ FILE: localnet_snapshot/seed_txs/ujON59jsellR3M8hq9unBPISOwRgEVUogdi3FG_pVMk.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:2f38947eb1a80fb23cd00a67d6b524efd0c406d1227d9e9de9d6d935d1921847 size 220997 ================================================ FILE: localnet_snapshot/seed_txs/uoTzfoaN81h2_JyFkrvXTLFMnoSlWiuc9Yu1CmsFkH0.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:e8c830125cce357e14ad500f900e2258e058f4ffd2a5247b8ae9723b430ba398 size 198199 ================================================ FILE: localnet_snapshot/seed_txs/vDtQzZ9jl6r7yzczhoKhvzekCQYx-qskwYdzQO92eWo.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:85a346aefa8db778071c8fe7594b6f71ac7f1583e032020c019adbf9a7c5241d size 48731 ================================================ FILE: localnet_snapshot/seed_txs/vFP1U-4lk3GypDZFceLvRXjoadcB2FRKrcNQf9WjzpQ.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:9a1738f9dcb080deb7219d2cf8f8a8e5f8b072dd13c393cfbae4e5784d298886 size 604349 ================================================ FILE: localnet_snapshot/seed_txs/vYnzbcbBQbPQB7GKrXzPlz1MuT9cfnNI_NBVajaTnPg.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:060a759611f6fbacfb6bcaba31232c77049185916fb5fb56048defcdb05fd6c0 size 49122 ================================================ FILE: localnet_snapshot/seed_txs/vvPtX1U0EZS9PMsQBVk3mjD9yS6EHIt0FXdKf2dOELw.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:792c95d15dcfc257d95f16381838f1503979df6f287e9b39547276965a6a2996 size 1664 ================================================ FILE: localnet_snapshot/seed_txs/wntmnG9yRP9aoioRDILKkmSZqdemR-XDCIKJS-wpRYw.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:902879cde871b3796a621efac4526cfa4ff13ec4ba02a27d8e56e84448f64342 size 1664 ================================================ FILE: localnet_snapshot/seed_txs/xCUsF5aatMdiiUAkGjg29_TiQGKqXpbzoMsB0yI-Dd8.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:f2fff45e228ff16c8b4bf623063cecfc832ef840484b25e089288d4de69e27d7 size 1664 ================================================ FILE: localnet_snapshot/seed_txs/xK4fFG-PbnQx6EGmmj1A0JVWQ9Bg7q-FncaU7hHk9ds.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:072a735d4765ea75eb92acffaac2fd0a432dcc0377e327d67a41d086e7dcaef0 size 7682 ================================================ FILE: localnet_snapshot/seed_txs/xaB3eS6qbtKSrfFACMcYpgxWRtaJfT1kmOVpyaE45tI.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:efea0a5a20a068648ef387dd36ae4c08f8e0e896b320c6e5c8d8022f801b6e03 size 475974 ================================================ FILE: localnet_snapshot/seed_txs/y9wJkLq6Q0hKSDD67ilFqtMMatw9qpsKM9W2uy2Rfjc.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:43171dc4cbaa683c7ad96827338d10344a4cfacb0e6c11b05635b1b3234efd80 size 141311 ================================================ FILE: localnet_snapshot/seed_txs/ycjvsn3A9cUMjnbDaSUpf1HRQd4duP9AL1YVwSjwuAQ.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:b80a234c19fde1b4256a6757247d975ae08f7177f386deb8247993d523a21380 size 1661 ================================================ FILE: localnet_snapshot/seed_txs/yo8VtPVXWBpTqLbLL-ZeOmZTW2HTqTzsf9RPzgHM-bQ.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:2e13b6db4a5ddef8e861dbe870c2392bc50504f80b40a6abd58b6562cad0560a size 1708 ================================================ FILE: localnet_snapshot/seed_txs/zNae10gPNkFt5aRVaSL2eSgxZiRDG79B9oDIeYqyzDY.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:6e97d895fc433aa2d811579020d4d1a6b39ed1fc2c7911a4207689f06b05d1a0 size 514966 ================================================ FILE: localnet_snapshot/seed_txs/zavm_CqSq0KuWfc-E0JccEyrrQzjigxt7yuW1ceYjE0.json ================================================ version https://git-lfs.github.com/spec/v1 oid sha256:fcd3606560602d23554cd0b7afc44a8820531ca3ec1c859bb9eec1c2c5001a54 size 67399 ================================================ FILE: nix/README.md ================================================ ## Building Arweave in Nix Easiest way to import arweave as systemd service, is via flakes ```nix { inputs.arweave.url = "github:ArweaveTeam/arweave"; outputs = { self, nixpkgs, arweave }: { nixosSystem = nixpkgs.lib.nixosSystem { modules = [ arweave.nixosModules."x86_64-linux".arweave ]; }; } ``` In non nixos system, the package derivation can be accessed and used as standalone. ```nix { inputs.arweave.url = "github:ArweaveTeam/arweave"; outputs = { self, nixpkgs, arweave }: let system = "x86_64-linux"; pkgs = import nixpkgs { inherit system; overlays = [ arweave.overlay ]; }; in { # your flake here... # pkgs.arweave should exist } ``` Module extraArgs are also a good way to access pkgs.arweave for overrides if needed ```nix { inputs.arweave.url = "github:ArweaveTeam/arweave"; outputs = { self, nixpkgs, arweave }: let system = "x86_64-linux"; pkgs = import nixpkgs { inherit system; }; extraArgs = { inherit pkgs; }; in { nixosSystem = nixpkgs.lib.nixosSystem { inherit extraArgs system; modules = [ arweave.nixosModules."${system}".arweave ]; }; }; } ``` ## Using services.arweave In your configuration.nix you can enable arweave node as service. Note that this is limited to nixos the operating system (as opposed to just nix the package manager). ```nix { config = { services.arweave = { enable = true; peer = [ "188.166.200.45" "188.166.192.169" "163.47.11.64" ]; # see more options below }; }; } ``` _A schema of the available options as json_ ```json { "dataDir": { "defaultValue": "/arweave-data", "description": "Data directory path for arweave node.\n", "option": "dataDir" }, "enable": { "defaultValue": false, "description": "Whether to enable Enable arweave node as systemd service\n.", "option": "enable" }, "featuresDisable": { "defaultValue": [], "description": "List of features to disable.\n", "option": "featuresDisable" }, "group": { "defaultValue": "users", "description": "Run Arweave Node under this group.", "option": "group" }, "headerSyncJobs": { "defaultValue": 10, "description": "The pace for which to sync up with historical data.", "option": "headerSyncJobs" }, "maxDiskPoolDataRootBufferMb": { "defaultValue": 500, "description": "Max disk-pool buffer size in mb.", "option": "maxDiskPoolDataRootBufferMb" }, "maxMiners": { "defaultValue": 0, "description": "Max amount of miners to spawn, 0 means no mining will be performed.", "option": "maxMiners" }, "maxParallelBlockIndexRequests": { "defaultValue": 2, "description": "As semaphore, the max amount of parallel block index requests to perform.", "option": "maxParallelBlockIndexRequests" }, "maxParallelGetAndPackChunkRequests": { "defaultValue": 10, "description": "As semaphore, the max amount of parallel get chunk and pack requests to perform.", "option": "maxParallelGetAndPackChunkRequests" }, "maxParallelGetChunkRequests": { "defaultValue": 100, "description": "As semaphore, the max amount of parallel get chunk requests to perform.", "option": "maxParallelGetChunkRequests" }, "maxParallelGetSyncRecord": { "defaultValue": 2, "description": "As semaphore, the max amount of parallel get sync record requests to perform.", "option": "maxParallelGetSyncRecord" }, "maxParallelGetTxDataRequests": { "defaultValue": 10, "description": "As semaphore, the max amount of parallel get transaction data requests to perform.", "option": "maxParallelGetTxDataRequests" }, "maxParallelPostChunkRequests": { "defaultValue": 100, "description": "As semaphore, the max amount of parallel post chunk requests to perform.", "option": "maxParallelPostChunkRequests" }, "maxParallelWalletListRequests": { "defaultValue": 2, "description": "As semaphore, the max amount of parallel block index requests to perform.", "option": "maxParallelWalletListRequests" }, "metricsDir": { "defaultValue": "/var/lib/arweave/metrics", "description": "Directory path for node metric outputs\n", "option": "metricsDir" }, "package": { "defaultValue": "pkgs.arweave", "description": "The Arweave expression to use\n", "option": "package" }, "peer": { "defaultValue": [], "description": "List of primary node peers\n", "option": "peer" }, "transactionBlacklists": { "defaultValue": [], "description": "List of paths to textfiles containing blacklisted txids\n", "option": "transactionBlacklists" }, "transactionWhitelists": { "defaultValue": [], "description": "List of paths to textfiles containing whitelisted txids\n", "option": "transactionWhitelists" }, "user": { "defaultValue": "arweave", "description": "Run Arweave Node under this user.", "option": "user" } } ``` ================================================ FILE: nix/arweave.nix ================================================ { pkgs, crashDumpsDir ? null, erlangCookie ? null, vcPatches ? [ ] }: let gitignoreSrc = fetchFromGitHub { owner = "hercules-ci"; repo = "gitignore.nix"; rev = "a20de23b925fd8264fd7fad6454652e142fd7f73"; sha256 = "sha256-8DFJjXG8zqoONA1vXtgeKXy68KdJL5UaXR8NtVMUbx8="; }; inherit (import gitignoreSrc { inherit (pkgs) lib; }) gitignoreFilterWith; inherit (pkgs) stdenv lib beamPackages fetchFromGitHub fetchHex fetchurl; randomx = fetchFromGitHub { owner = "ArweaveTeam"; repo = "RandomX"; rev = "913873c13a2dffb7c4188c39b4eb188f912f523e"; sha256 = "sha256-obxX/b5o/RY46kCtHOhWMFX29jT5y8oigzVLwZRFHgQ="; fetchSubmodules = true; }; buildRebar = beamPackages.buildRebar3.override { openssl = pkgs.openssl_1_1; }; b64fast = buildRebar rec { name = "b64fast"; version = "0.2.2"; beamDeps = [ beamPackages.pc ]; compilePort = true; src = fetchFromGitHub { owner = "arweaveteam"; repo = name; rev = "a0ef55ec66ecf705848716c195bf45665f78818a"; sha256 = "sha256-CSBsTRqkrQWwX7oxPZWERss5Pk0mE1ETe7s4fhZEUaA="; fetchSubmodules = true; }; postBuild = '' env rebar3 pc compile ''; }; erlang-rocksdb = buildRebar rec { name = "erlang-rocksdb"; version = "f580865c0bc18b0302a6190d7fa85e68ec0762e0"; beamDeps = [ beamPackages.pc ]; nativeBuildInputs = [ pkgs.cmake ]; buildInputs = [ pkgs.getconf ]; configurePhase = "true"; src = fetchFromGitHub { owner = "ArweaveTeam"; repo = name; rev = version; sha256 = "sha256-hSqQsLEg1d/WdvwZxYlFbTS8wXdAfkDVddVJT+69nz8="; }; postInstall = '' mv $out/lib/erlang/lib/erlang-rocksdb-${version} $out/lib/erlang/lib/rocksdb-1.6.0 ''; }; meck = buildRebar rec { name = "meck"; version = "0.8.13"; src = fetchHex { inherit version; pkg = name; sha256 = "sha256-008BPBVttRrVfMVWiRuXIOahwd9f4uFa+ZnITWzr6xo="; }; }; rebar3_hex = buildRebar { name = "rebar3_hex"; version = "none"; src = fetchFromGitHub { owner = "erlef"; repo = "rebar3_hex"; rev = "203466094b98fcbed9251efa1deeb69fefd8eb0a"; sha256 = "gVmoRzinc4MgcdKtqgUBV5/TGeWulP5Cm1pTsSWa07c="; fetchSubmodules = true; }; }; geas_rebar3 = buildRebar { name = "geas_rebar3"; version = "none"; src = fetchFromGitHub { owner = "crownedgrouse"; repo = "geas_rebar3"; rev = "e3170a36af491b8c427652c0c57290011190b1fb"; sha256 = "ooMalh8zZ94WlCBcvok5xb7a+7fui4/b+gnEEYpn7fE="; }; }; accept = buildRebar rec { name = "accept"; version = "0.3.5"; src = fetchHex { inherit version; pkg = name; sha256 = "sha256-EbGMIgvMLqtjtUcMA47xDrZ4O8sfzbEapBN976WsG7g="; }; }; double-conversion = fetchFromGitHub { owner = "google"; repo = "double-conversion"; rev = "32bc443c60c860eb6b4843533a614766d611172e"; sha256 = "sha256-ysWwhvcVSWnF5HoJW0WB3MYpJ+dvqz3068G/uX9aBlU="; }; jiffy = buildRebar rec { name = "jiffy"; version = "1.0.8"; nativeBuildInputs = with pkgs; [ gnumake pkg-config ]; buildInputs = [ pkgs.gnumake ]; configureFlags = [ "-fno-lto" ]; hardeningDisable = [ "all" ]; src = fetchFromGitHub { owner = "ArweaveTeam"; repo = name; rev = "82792758e61be7d303a11290f859a7b3b20eaf95"; sha256 = "R7kbdMh5wOIN/aA7KFrICjlFAym3OJs9sYWrfdU06GM="; }; patchPhase = '' sed -i -e 's|-compile.*||g' rebar.config rm -rf c_src/double-conversion cp -rf ${double-conversion}/double-conversion c_src/double-conversion chmod -R +rw c_src/double-conversion ''; }; quantile_estimator = buildRebar rec { name = "quantile_estimator"; version = "0.2.1"; src = fetchHex { inherit version; pkg = name; sha256 = "sha256-KCqKMjyiqEXJ5veH0WY0j3dsHUpB7eYwRtctQi49qUY="; }; }; prometheus = buildRebar rec { name = "prometheus"; version = "4.11.0"; buildInputs = [ quantile_estimator ]; src = fetchHex { inherit version; pkg = name; sha256 = "sha256-cZhiNRqr9N9webBdwIXSu8vjrArDAJ6VZnGx1auIJH0="; }; }; prometheus_httpd = buildRebar rec { name = "prometheus_httpd"; version = "2.1.11"; src = fetchHex { inherit version; pkg = name; sha256 = "sha256-C76DFFLP35WIU46y9XCybzDDSK2uXpWn2H81pZELz5I="; }; }; prometheus_cowboy = buildRebar rec { name = "prometheus_cowboy"; version = "0.1.8"; src = fetchHex { inherit version; pkg = name; sha256 = "sha256-uihr7KkwJhhBiJLTe81dxmmmzAAfTrbWr4X/gfP080w="; }; }; prometheus_process_collector = buildRebar rec { name = "prometheus_process_collector"; version = "1.6.0"; buildInputs = [ rebar3_archive_plugin rebar3_hex ]; patchPhase = '' rm -rf .git ''; src = fetchFromGitHub { owner = "deadtrickster"; repo = name; rev = "78697537f01a858959a26a9c74db5aad2971b244"; sha256 = "sha256-3Bb4d63JMdexzAI68Q+ASsj4FfNxQ9OUlG41fhFkMds="; }; postInstall = '' mv $out/lib/erlang/lib/prometheus_process_collector-${version}/priv/source.so \ $out/lib/erlang/lib/prometheus_process_collector-${version}/priv/prometheus_process_collector.so ''; }; rebar3_archive_plugin = buildRebar rec { name = "rebar3_archive_plugin"; version = "0.0.2"; src = fetchHex { inherit version; pkg = name; sha256 = "sha256-hMa0F1EdeazKg3WrLHXSD+zG0OK0C/puDz1i3OsyBYQ="; }; }; rebar3_elvis_plugin = buildRebar rec { name = "rebar3_elvis_plugin"; version = "0b7dd1a3808dbe2e2e916ecf3afd1ff24e723021"; src = fetchFromGitHub { owner = "deadtrickster"; repo = name; rev = version; sha256 = "zM3WPLlbi05aYqMR5AhlNejBaPa6/nSIlq6CG7uNBoo="; }; }; cowlib = buildRebar rec { name = "cowlib"; version = "e9448e5628c8c1d9083223ff973af8de31a566d1"; src = fetchFromGitHub { owner = "ninenines"; repo = "cowlib"; rev = version; sha256 = "1j7b602hq9ndh0w3s7jcs923jclmiwfdmbfxaljcra5sl23ydwgf"; }; }; cowboy = buildRebar rec { name = "cowboy"; version = "2.10.0"; buildInputs = [ cowlib rebar3_archive_plugin ranch ]; beamDeps = [ cowlib rebar3_archive_plugin ranch ]; plugins = [ beamPackages.pc ]; src = fetchHex { inherit version; pkg = name; sha256 = "sha256-Ov3Mtxg8xvFDyxTTz1H6AOU9ueyAzc1SVIL16ZvEHWs="; }; }; gun = buildRebar rec { name = "gun"; version = "1.3.3"; beamDeps = [ beamPackages.pc geas_rebar3 rebar3_hex cowlib ]; src = fetchHex { inherit version; pkg = name; sha256 = "sha256-MQbOFn+clyP4SeT7VOpKTYFOOZauJDocgoslbnSQQeA="; }; }; ranch = buildRebar rec { name = "ranch"; version = "1.8.0"; src = fetchFromGitHub { owner = "ninenines"; repo = name; rev = version; sha256 = "sha256-9tFgIQU5rhYE0/EY4NKRNrKoCG2xlZCoSvtihDNXyg4="; }; }; stopScript = pkgs.writeTextFile { name = "stop-nix"; text = '' #! ${pkgs.stdenv.shell} -e PATH= ROOT_DIR= PROFILE_DIR= cd $ROOT_DIR export ERL_EPMD_ADDRESS=127.0.0.1 erl -pa $(echo $PROFILE_DIR/lib/*/ebin) \ -noshell \ -config config/sys.config \ -name stopper@127.0.0.1 \ -setcookie arweave \ -s ar shutdown arweave@127.0.0.1 -s init stop ''; }; startScript = pkgs.writeTextFile { name = "start-nix"; text = '' #! ${pkgs.stdenv.shell} -e PATH= ROOT_DIR= PROFILE_DIR= ${if crashDumpsDir == null then "" else "mkdir -p ${crashDumpsDir}"} export ERL_CRASH_DUMP=${if crashDumpsDir == null then "$(pwd)/erl_crash.dump" else "${crashDumpsDir}/erl_crash_$(date \"+%Y-%m-%d_%H-%M-%S\").dump"} ${if erlangCookie == null then "" else "export ERLANG_COOKIE=${erlangCookie}"} cd $ROOT_DIR $ROOT_DIR/bin/check-nofile if [ $# -gt 0 ] && [ `uname -s` == "Darwin" ]; then RANDOMX_JIT="disable randomx_jit" else RANDOMX_JIT= fi : "''${ERL_EPMD_ADDRESS:=127.0.0.1}" export ERL_EPMD_ADDRESS erl +MBas aobf +MBlmbcs 512 +A100 +SDio100 +A100 +SDio100 +Bi \ -pa $(echo $PROFILE_DIR/lib/*/ebin) \ -config $ROOT_DIR/config/sys.config \ -args_file $ROOT_DIR/config/vm.args.dev \ -run ar main $RANDOMX_JIT "$@" ''; }; startScriptForeground = pkgs.writeTextFile { name = "start-nix-foreground"; text = '' #! ${pkgs.stdenv.shell} -e PATH= ROOT_DIR= PROFILE_DIR= ${if crashDumpsDir == null then "" else "mkdir -p ${crashDumpsDir}"} export ERL_CRASH_DUMP=${if crashDumpsDir == null then "$(pwd)/erl_crash.dump" else "${crashDumpsDir}/erl_crash_$(date \"+%Y-%m-%d_%H-%M-%S\").dump"} ${if erlangCookie == null then "" else "export ERLANG_COOKIE=${erlangCookie}"} cd $PROFILE_DIR $ROOT_DIR/bin/check-nofile if [ $# -gt 0 ] && [ `uname -s` == "Darwin" ]; then RANDOMX_JIT="disable randomx_jit" else RANDOMX_JIT= fi : "''${ERL_EPMD_ADDRESS:=127.0.0.1}" : "''${ERL_EPMD_PATH:=${pkgs.erlang}/bin}" export ERL_EPMD_ADDRESS export ERL_EPMD_PATH export BINDIR=$ROOT_DIR/erts/bin export EMU="beam" export TERM="dumb" BOOTFILE=$(echo $PROFILE_DIR/releases/*/start.boot | sed -e "s/\.boot$//") erlexec -noinput +Bd -boot "$BOOTFILE" \ -config $ROOT_DIR/config/sys.config \ -mode embedded \ +MBas aobf +MBlmbcs 512 +A100 +SDio100 +A100 +SDio100 +Bi -pa $(echo $PROFILE_DIR/lib/*/ebin) \ -args_file $ROOT_DIR/config/vm.args.dev \ -run ar main $RANDOMX_JIT "$@" ''; }; arweaveSources = ../.; sourcesFilter = src: let srcIgnored = gitignoreFilterWith { basePath = src; extraRules = '' .github/* doc ''; }; in path: type: srcIgnored path type; arweaveVersion = "2.7.4"; mkArweaveApp = { installPhase, profile, releaseType }: beamPackages.rebar3Relx { inherit profile releaseType; pname = "arweave-${profile}"; version = arweaveVersion; src = lib.cleanSourceWith { filter = sourcesFilter arweaveSources; src = arweaveSources; name = "arweave-source"; }; patches = vcPatches; plugins = [ pkgs.beamPackages.pc rebar3_archive_plugin rebar3_elvis_plugin ]; doStrip = false; nativeBuildInputs = with pkgs; [ clang-tools cmake pkg-config ]; beamDeps = [ beamPackages.pc geas_rebar3 rebar3_hex b64fast erlang-rocksdb jiffy accept gun ranch cowlib meck cowboy quantile_estimator prometheus prometheus_process_collector prometheus_cowboy prometheus_httpd ]; buildInputs = with pkgs; [ darwin.sigtool erlang git gmp beamPackages.pc ncurses which ]; postConfigure = '' rm -rf apps/arweave/lib/RandomX mkdir -p apps/arweave/lib/RandomX cp -rf ${randomx}/* apps/arweave/lib/RandomX cp -rf ${jiffy}/lib/erlang/lib/* apps/jiffy ''; postPatch = '' sed -i -e 's|-arch x86_64|-arch ${pkgs.stdenv.targetPlatform.linuxArch}|g' \ apps/arweave/c_src/Makefile sed -i -e 's|{b64fast,.*|{b64fast, "0.2.2"},|g' rebar.config sed -i -e 's|{meck, "0.8.13"}||g' rebar.config ''; installPhase = '' mkdir -p $out/bin cp -rf ./bin/* $out/bin ${installPhase} # broken symlinks fixup rm -f $out/${profile}/rel/arweave/releases/*/{sys.config,vm.args.src} ln -s $out/config/{sys.config,vm.args.src} $out/${profile}/rel/arweave/releases/*/ rm -f $out/${profile}/lib/arweave/{include,priv,src} ln -s $out/${profile}/rel/arweave/lib/arweave-*/{include,priv,src} $out/${profile}/lib/arweave rm -f $out/${profile}/lib/jiffy/{include,priv,src} ln -s $out/${profile}/rel/arweave/lib/jiffy-*/{include,priv,src} $out/${profile}/lib/jiffy rm -rf $out/${profile}/rel/arweave/lib/jiffy-*/priv cp -rf ${jiffy}/lib/erlang/lib/jiffy-*/priv $out/${profile}/rel/arweave/lib/jiffy-* rm -rf $out/${profile}/rel/arweave/lib/arweave-*/priv cp -rf ./apps/arweave/priv $out/${profile}/rel/arweave/lib/arweave-* ''; }; arweaveTestProfile = mkArweaveApp { profile = "test"; releaseType = "release"; installPhase = '' mkdir -p $out; cp -rf ./_build/test $out cp -r ./config $out ln -s ${meck}/lib/erlang/lib/meck-${meck.version} $out/test/rel/arweave/lib/ ARWEAVE_LIB_PATH=$(basename $(echo $out/test/rel/arweave/lib/arweave-*)) JIFFY_LIB_PATH=$(basename $(echo $out/test/rel/arweave/lib/jiffy-*)) rm -f $out/test/rel/arweave/lib/arweave-* rm -f $out/test/rel/arweave/lib/jiffy-* ln -s $out/test/lib/arweave $out/test/rel/arweave/lib/$ARWEAVE_LIB_PATH ln -s $out/test/lib/jiffy $out/test/rel/arweave/lib/$JIFFY_LIB_PATH ''; }; arweaveProdProfile = mkArweaveApp { profile = "prod"; releaseType = "release"; installPhase = '' mkdir -p $out/bin; cp -rf ./_build/prod $out cp ${startScript.outPath} $out/bin/start-nix cp ${startScriptForeground.outPath} $out/bin/start-nix-foreground cp ${stopScript.outPath} $out/bin/stop-nix chmod +xw $out/bin/start-nix chmod +xw $out/bin/start-nix-foreground chmod +xw $out/bin/stop-nix sed -i -e "s|ROOT_DIR=|ROOT_DIR=$out|g" $out/bin/start-nix sed -i -e "s|PROFILE_DIR=|PROFILE_DIR=$out/prod/rel/arweave|g" $out/bin/start-nix sed -i -e "s|PATH=|PATH=$PATH:$out/erts/bin|g" $out/bin/start-nix sed -i -e "s|ROOT_DIR=|ROOT_DIR=$out|g" $out/bin/start-nix-foreground sed -i -e "s|PROFILE_DIR=|PROFILE_DIR=$out/prod/rel/arweave|g" $out/bin/start-nix-foreground sed -i -e "s|PATH=|PATH=$PATH:$out/erts/bin|g" $out/bin/start-nix-foreground sed -i -e "s|ROOT_DIR=|ROOT_DIR=$out|g" $out/bin/stop-nix sed -i -e "s|PROFILE_DIR=|PROFILE_DIR=$out/prod/rel/arweave|g" $out/bin/stop-nix sed -i -e "s|PATH=|PATH=$PATH:$out/erts/bin|g" $out/bin/stop-nix cp -r ./config $out ln -s $out/prod/rel/arweave/erts* $out/erts ''; }; in { inherit arweaveTestProfile arweaveProdProfile; arweave = arweaveProdProfile; } ================================================ FILE: nix/generate-config.nix ================================================ { arweaveConfig, pkgs, ... }: let inherit (pkgs) lib; filterTopLevelNulls = set: let isNotNull = value: value != null; in lib.filterAttrs (name: value: isNotNull value) set; in pkgs.writeText "config.json" (builtins.toJSON (filterTopLevelNulls { data_dir = arweaveConfig.dataDir; log_dir = arweaveConfig.logDir; storage_modules = arweaveConfig.storageModules; start_from_block_index = arweaveConfig.startFromBlockIndex; transaction_blacklists = arweaveConfig.transactionBlacklists; transaction_whitelists = arweaveConfig.transactionWhitelists; transaction_blacklist_urls = arweaveConfig.transactionBlacklistURLs; max_disk_pool_buffer_mb = arweaveConfig.maxDiskPoolBufferMb; max_disk_pool_data_root_buffer_mb = arweaveConfig.maxDiskPoolDataRootBufferMb; max_nonce_limiter_validation_thread_count = arweaveConfig.maxVDFValidationThreadCount; block_pollers = arweaveConfig.blockPollers; polling = arweaveConfig.polling; tx_validators = arweaveConfig.txValidators; disable = arweaveConfig.featuresDisable; enable = arweaveConfig.featuresEnable; header_sync_jobs = arweaveConfig.headerSyncJobs; sync_jobs = arweaveConfig.syncJobs; disk_pool_jobs = arweaveConfig.diskPoolJobs; debug = arweaveConfig.debug; packing_rate = arweaveConfig.packingRate; block_throttle_by_ip_interval = arweaveConfig.blockThrottleByIPInterval; block_throttle_by_solution_interval = arweaveConfig.blockThrottleBySolutionInterval; semaphores = { get_chunk = arweaveConfig.maxParallelGetChunkRequests; get_and_pack_chunk = arweaveConfig.maxParallelGetAndPackChunkRequests; get_tx_data = arweaveConfig.maxParallelGetTxDataRequests; post_chunk = arweaveConfig.maxParallelPostChunkRequests; get_block_index = arweaveConfig.maxParallelBlockIndexRequests; get_wallet_list = arweaveConfig.maxParallelWalletListRequests; get_sync_record = arweaveConfig.maxParallelGetSyncRecord; arql = 10; gateway_arql = 10; }; requests_per_minute_limit = arweaveConfig.requestsPerMinuteLimit; max_connections = arweaveConfig.maxConnections; requests_per_minute_limit_by_ip = lib.lists.foldr (ipObj: acc: acc // { "${ipObj.ip}" = { chunk = ipObj.chunkLimit; data_sync_record = ipObj.dataSyncRecordLimit; default = ipObj.defaultLimit; }; }) { } arweaveConfig.requestsPerMinuteLimitByIp; })) ================================================ FILE: nix/module.nix ================================================ { config, lib, pkgs, ... }: let cfg = config.services.arweave; arweavePkg = (pkgs.callPackage ./arweave.nix { inherit pkgs; crashDumpsDir = cfg.crashDumpsDir; erlangCookie = cfg.erlangCookie; vcPatches = cfg.patches; }).arweave; generatedConfigFile = "${import ./generate-config.nix { arweaveConfig = cfg; inherit pkgs; }}"; arweave-service-start = let command = "${cfg.package}/bin/start-nix-foreground config_file ${cfg.configFile}"; peers = "${builtins.concatStringsSep " " (builtins.concatMap (p: ["peer" p]) cfg.peer)}"; vdf-peers = "${builtins.concatStringsSep " " (builtins.concatMap (p: ["vdf_client_peer" p]) cfg.vdfClientPeer)}"; vdf-server-peers = "${builtins.concatStringsSep " " (builtins.concatMap (p: ["vdf_server_trusted_peer" p]) cfg.vdfServerTrustedPeer)}"; in pkgs.writeScriptBin "arweave-start" '' #!${pkgs.bash}/bin/bash # Function to handle termination and cleanup cleanup() { echo "Terminating erl and killing epmd..." kill $(${pkgs.procps}/bin/pgrep epmd) || true exit 0 } # Set up a trap to call the cleanup function when the script is terminated trap cleanup INT TERM ${command} ${peers} ${vdf-peers} ${vdf-server-peers} & ARWEAVE_ERL_PID=$! # capture PID of the background process i=0 until [[ "$(${pkgs.procps}/bin/ps -C beam &> /dev/null)" -eq 0 || "$i" -ge "200" ]] do sleep 1 i=$((i+1)) done if [[ "$i" -ge "200" ]]; then echo "beam process failed to start" exit 0 fi echo "beam process started..." wait $ARWEAVE_ERL_PID || true counter=0 until [[ "$(ps -C beam &> /dev/null)" -ne 0 ]] || [[ $counter -ge 30 ]] do sleep 1 let counter++ done cleanup ''; in { options.services.arweave = import ./options.nix { inherit lib; defaultArweaveConfigFile = generatedConfigFile; defaultArweavePackage = arweavePkg; }; config = lib.mkIf cfg.enable { systemd.services.arweave-screen = { enable = false; }; systemd.services.arweave = { after = [ "network.target" ]; serviceConfig.Type = "simple"; serviceConfig.ExecStart = "${arweave-service-start}/bin/arweave-start"; serviceConfig.TimeoutStartSec = "60"; serviceConfig.ExecStop = "${pkgs.bash}/bin/bash -c '${cfg.package}/bin/stop-nix || true; ${pkgs.procps}/bin/pkill beam || true; sleep 15'"; serviceConfig.TimeoutStopSec = "120"; serviceConfig.RestartKillSignal = "SIGINT"; }; }; } ================================================ FILE: nix/options.nix ================================================ { lib, defaultArweaveConfigFile ? null, defaultArweavePackage ? null }: let inherit (lib) mkEnableOption literalExpression mkOption mkOptionals mkForce mkForceOption types; in { enable = mkEnableOption '' Enable arweave node as systemd service ''; peer = mkOption { type = types.nonEmptyListOf types.str; default = [ ]; example = [ "http://domain-or-ip.com:1984" ]; description = '' List of primary node peers ''; }; vdfServerTrustedPeer = mkOption { type = types.listOf types.str; default = [ ]; example = [ "http://domain-or-ip.com:1984" ]; description = '' List of trusted peers to fetch VDF outputs from ''; }; vdfClientPeer = mkOption { type = types.listOf types.str; default = [ ]; example = [ "http://domain-or-ip.com:1984" ]; description = '' List of peers to serve VDF updates to ''; }; package = mkOption { type = types.package; default = defaultArweavePackage; defaultText = literalExpression "pkgs.arweave"; example = literalExpression "pkgs.arweave"; description = '' The Arweave expression to use ''; }; dataDir = mkOption { type = types.path; default = "/arweave-data"; description = '' Data directory path for arweave node. ''; }; logDir = mkOption { type = types.path; default = "/var/lib/arweave/logs"; description = '' Logging directory path. ''; }; crashDumpsDir = mkOption { type = types.path; default = "/var/lib/arweave/dumps"; description = '' Crash dumps directory path. ''; }; erlangCookie = mkOption { type = with types; nullOr str; default = null; description = '' Erlang cookie for distributed erlang. ''; }; storageModules = mkOption { type = types.listOf types.str; default = [ ]; example = [ "0,1000000000000,unpacked" "1,1000000000000,unpacked" ]; description = '' List of configured storage modules. ''; }; startFromBlockIndex = mkOption { type = types.bool; default = false; description = "If set, starts from the locally stored state."; }; debug = mkOption { type = types.bool; default = false; description = "Enable debug logging."; }; user = mkOption { type = types.str; default = "arweave"; description = "Run Arweave Node under this user."; }; group = mkOption { type = types.str; default = "users"; description = "Run Arweave Node under this group."; }; transactionBlacklists = mkOption { type = types.listOf types.str; default = [ ]; example = [ "/user/arweave/blacklist.txt" ]; description = '' List of paths to textfiles containing blacklisted txids and/or byte ranges ''; }; transactionBlacklistURLs = mkOption { type = types.listOf types.str; default = [ ]; example = [ "http://example.org/blacklist.txt" ]; description = '' List of URLs of the endpoints serving blacklisted txids and/or byte ranges ''; }; transactionWhitelists = mkOption { type = types.listOf types.str; default = [ ]; example = [ "/user/arweave/whitelist.txt" ]; description = '' List of paths to textfiles containing whitelisted txids ''; }; maxDiskPoolBufferMb = mkOption { type = types.int; default = 2000; description = "Max disk-pool buffer size in mb."; }; maxDiskPoolDataRootBufferMb = mkOption { type = types.int; default = 500; description = "Max disk-pool data-root buffer size in mb."; }; blockPollers = mkOption { type = types.int; default = 10; description = "The number of block polling jobs."; }; polling = mkOption { type = types.int; default = 2; description = "The frequency of block polling, in seconds."; }; blockThrottleByIPInterval = mkOption { type = types.int; default = 1000; description = ""; }; blockThrottleBySolutionInterval = mkOption { type = types.int; default = 2000; description = ""; }; txValidators = mkOption { type = types.int; default = 10; description = "The number of transaction validation jobs."; }; packingRate = mkOption { type = types.int; default = 30; description = "The maximum number of chunks the node will pack per second."; }; featuresDisable = mkOption { type = types.listOf types.str; default = [ ]; example = [ "packing" ]; description = '' List of features to disable. ''; }; featuresEnable = mkOption { type = types.listOf types.str; default = [ ]; example = [ "repair_rocksdb" ]; description = '' List of features to enable. ''; }; headerSyncJobs = mkOption { type = types.int; default = 10; description = "The pace for which to sync up with historical headers."; }; syncJobs = mkOption { type = types.int; default = 10; description = "The pace for which to sync up with historical data."; }; diskPoolJobs = mkOption { type = types.int; default = 50; description = "The number of disk pool jobs to run."; }; maxParallelGetChunkRequests = mkOption { type = types.int; default = 100; description = "As semaphore, the max amount of parallel get chunk requests to perform."; }; maxParallelGetAndPackChunkRequests = mkOption { type = types.int; default = 10; description = "As semaphore, the max amount of parallel get chunk and pack requests to perform."; }; maxParallelGetTxDataRequests = mkOption { type = types.int; default = 10; description = "As semaphore, the max amount of parallel get transaction data requests to perform."; }; maxParallelPostChunkRequests = mkOption { type = types.int; default = 100; description = "As semaphore, the max amount of parallel post chunk requests to perform."; }; maxParallelBlockIndexRequests = mkOption { type = types.int; default = 2; description = "As semaphore, the max amount of parallel block index requests to perform."; }; maxParallelWalletListRequests = mkOption { type = types.int; default = 2; description = "As semaphore, the max amount of parallel block index requests to perform."; }; maxParallelGetSyncRecord = mkOption { type = types.int; default = 2; description = "As semaphore, the max amount of parallel get sync record requests to perform."; }; maxVDFValidationThreadCount = mkOption { type = with types; nullOr int; default = null; description = '' The number of threads to use for VDF validation. Note that the default value (null) defaults in runtime to `max(1, (erlang:system_info(schedulers_online) div 2))).` ''; }; requestsPerMinuteLimit = mkOption { type = types.int; default = 2500; description = "A rate limiter to prevent the node from receiving too many http requests over 1 minute period."; }; requestsPerMinuteLimitByIp = mkOption { type = types.listOf (types.submodule { options = { ip = mkOption { type = types.str; description = '' ip address of client to rate limit ''; }; chunkLimit = mkOption { type = types.int; description = '' rate of chunk data requests over 1 minute period to limit ''; }; dataSyncRecordLimit = mkOption { type = types.int; description = '' rate of sync_data_record requests over 1 minute period to limit ''; }; defaultLimit = mkOption { type = types.int; description = '' the default rate of requests over 1 minute period to limit ''; }; }; }); default = [ ]; description = "A rate limiter to prevent the node from receiving too many http requests over 1 minute period."; }; maxConnections = mkOption { type = types.int; default = 1024; description = "Maximum allowed TCP connections."; }; configFile = mkOption { type = types.path; default = defaultArweaveConfigFile; description = "The generated Arweave config file"; }; patches = mkOption { type = types.listOf (types.either types.path types.str); default = [ ]; example = [ "https://example.com/patch1" ]; description = '' List of paths to apply to version control system before building from sources ''; }; } ================================================ FILE: notebooks/README.md ================================================ # Jupyter Erlang Notebooks This directory contains Jupyter notebooks using the Erlang kernel for interactive Arweave tests. ## Prerequisites - Python 3 with venv + pip - Erlang kernel for Jupyter Quick setup: ``` scripts/setup_notebook_env.sh ``` The setup script writes a local kernelspec under `.tmp/jupyter` and configures it to call `ierl_kernel.sh` from the `scripts/` directory (added to PATH by the runner scripts). On kernel startup, the wrapper compiles the localnet profile (if needed) and adds `_build/localnet/lib` via `ERL_LIBS`, so local modules are available in the notebooks. ## Run headless ``` scripts/run_notebook_headless.sh pricing_transition_localnet ``` ## Run interactive ``` scripts/run_notebook.sh ``` You can open specific notebooks after jupyter launches. ## Notebook outputs When using the repo scripts, notebooks are configured to strip cell outputs on save. To keep outputs in the file for a session, set `NOTEBOOK_SAVE_OUTPUTS=1` before launching the notebook (interactive or headless). ## Environment variables - `ERLANG_JUPYTER_KERNEL`: kernelspec name (default `erlang`) - `IERL_URL`: download URL for the ierl escript - `IERL_PATH`: local path for the ierl escript (default `.tmp/ierl`) - `JUPYTER_DATA_DIR`: where kernelspecs live (default `.tmp/jupyter`) - `JUPYTER_CONFIG_DIR`: where Jupyter config lives (default `.jupyter`) - `JUPYTER_PORT`: interactive server port (default `8888`) - `JUPYTER_OPEN_BROWSER`: set to `true` to open a browser (default `true`) - `EXEC_TIMEOUT_SEC`: nbconvert execution timeout in seconds (default `1200`) - `LOCALNET_NODE_NAME`: shortname only (default `main-localnet`). If you pass `name@host`, the host is used for RPC node selection, but shortnames are still used for local nodes. - `NOTEBOOK_SKIP_COMPILE`: set to `1` to skip compile on kernel startup. ================================================ FILE: notebooks/autoredenomination_localnet.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "3c79455f", "metadata": {}, "source": [ "# Autoredenomination in localnet\n", "\n", "## Overview\n", "\n", "There is a mechanism in place where Arweave may redenominate its token: every balance, price, and reward\n", "is multiplied by 1000 once the circulating supply crosses a protocol-defined threshold.\n", "\n", "This notebook drives a full redenomination cycle on a running localnet node and validates\n", "that every observable quantity transitions correctly:\n", "\n", "1. **Setup** – connect to the node, compile record accessors, define HTTP and utility helpers.\n", "2. **Pre-redenomination** – mine past the pricing transition, load the mining wallet,\n", " and override redenomination parameters so the cycle triggers quickly.\n", "3. **Trigger** – submit transactions to push the reward pool past the\n", " threshold, mine the trigger block, and assert block reward and endowment pool updates.\n", "4. **Redenomination** – mine through the scheduled redenomination height, verify the\n", " denomination increments, all HTTP pricing/wallet endpoints scale by 1000×, and\n", " block rewards and endowment pool updates follow the expected formula across the boundary.\n", "5. **Post-redenomination** – validate every HTTP endpoint independently at the new\n", " denomination.\n", "6. **Cleanup** – restore overridden parameters." ] }, { "cell_type": "markdown", "id": "a8b745a8", "metadata": {}, "source": [ "## Setup" ] }, { "cell_type": "markdown", "id": "cf75077e", "metadata": {}, "source": [ "### Connect to the localnet node\n", "\n", "Starts a distributed Erlang node with long names, sets the cookie to `localnet`, and pings `main-localnet@127.0.0.1` to confirm connectivity." ] }, { "cell_type": "code", "execution_count": 1, "id": "46391a9c", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:45:52.000000Z", "iopub.status.busy": "2026-02-19T20:45:52.000000Z", "iopub.status.idle": "2026-02-19T20:45:52.000000Z", "shell.execute_reply": "2026-02-19T20:45:52.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "true\n" ] }, "execution_count": 1, "metadata": {}, "output_type": "execute_result" } ], "source": [ "Cookie = 'localnet',\n", "Node = 'main-localnet@127.0.0.1',\n", "\n", "HostHasDot =\n", "\tcase string:split(atom_to_list(node()), \"@\") of\n", "\t\t[_Name, Host] ->\n", "\t\t\tcase string:find(Host, \".\") of\n", "\t\t\t\tnomatch ->\n", "\t\t\t\t\tfalse;\n", "\t\t\t\t_ ->\n", "\t\t\t\t\ttrue\n", "\t\t\tend;\n", "\t\t_ ->\n", "\t\t\tfalse\n", "\tend,\n", "\n", "_ =\n", "\tcase {node(), HostHasDot} of\n", "\t\t{nonode@nohost, _} ->\n", "\t\t\tnet_kernel:start([list_to_atom(\"redenom_notebook@127.0.0.1\"), longnames]);\n", "\t\t{_, true} ->\n", "\t\t\tok;\n", "\t\t{_, false} ->\n", "\t\t\tnet_kernel:stop(),\n", "\t\t\tnet_kernel:start([list_to_atom(\"redenom_notebook@127.0.0.1\"), longnames])\n", "\tend,\n", "\n", "erlang:set_cookie(node(), Cookie)." ] }, { "cell_type": "code", "execution_count": 2, "id": "269375fb", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:45:52.000000Z", "iopub.status.busy": "2026-02-19T20:45:52.000000Z", "iopub.status.idle": "2026-02-19T20:45:52.000000Z", "shell.execute_reply": "2026-02-19T20:45:52.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "{'main-localnet@127.0.0.1',pong}\n" ] }, "execution_count": 2, "metadata": {}, "output_type": "execute_result" } ], "source": [ "{Node, net_adm:ping(Node)}." ] }, { "cell_type": "markdown", "id": "484f8d02", "metadata": {}, "source": [ "### RPC and mining helpers" ] }, { "cell_type": "markdown", "id": "766529a1", "metadata": {}, "source": [ "- `RPCCall(M, F, A)` -- calls `M:F(A)` on the remote node.\n", "- `RPCHeight()` -- returns the current block height.\n", "- `MineUntilHeight(H)` -- asks localnet to mine up to height H and polls until the node reaches it.\n", "- `RPCBlockHashByHeight(H)`, `RPCBlockByHeight(H)` -- fetch block hash/record from the remote node's storage." ] }, { "cell_type": "code", "execution_count": 3, "id": "4135962d", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:45:52.000000Z", "iopub.status.busy": "2026-02-19T20:45:52.000000Z", "iopub.status.idle": "2026-02-19T20:45:52.000000Z", "shell.execute_reply": "2026-02-19T20:45:52.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "#Fun\n" ] }, "execution_count": 3, "metadata": {}, "output_type": "execute_result" } ], "source": [ "RPCCall = fun(M, F, A) -> rpc:call(Node, M, F, A) end,\n", "RPCHeight = fun() -> RPCCall(ar_node, get_height, []) end,\n", "\n", "WaitForHeight =\n", "\tfun\n", "\t\t(_, _TargetHeight, 0) ->\n", "\t\t\t{error, mine_until_height_timeout};\n", "\t\t(Self, TargetHeight, AttemptsLeft) ->\n", "\t\t\tcase RPCHeight() >= TargetHeight of\n", "\t\t\t\ttrue ->\n", "\t\t\t\t\tok;\n", "\t\t\t\tfalse ->\n", "\t\t\t\t\ttimer:sleep(100),\n", "\t\t\t\t\tSelf(Self, TargetHeight, AttemptsLeft - 1)\n", "\t\t\tend\n", "\tend,\n", "\n", "MineUntilHeight =\n", "\tfun(TargetHeight) ->\n", "\t\tMineResult = RPCCall(ar_localnet, mine_until_height, [TargetHeight]),\n", "\t\tok =\n", "\t\t\tcase MineResult of\n", "\t\t\t\tok ->\n", "\t\t\t\t\tok;\n", "\t\t\t\t[] ->\n", "\t\t\t\t\tok;\n", "\t\t\t\tOther ->\n", "\t\t\t\t\t{error, {unexpected_mine_until_height_result, Other}}\n", "\t\t\tend,\n", "\t\tWaitForHeight(WaitForHeight, TargetHeight, 1200)\n", "\tend." ] }, { "cell_type": "code", "execution_count": 4, "id": "a2069d05", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:45:52.000000Z", "iopub.status.busy": "2026-02-19T20:45:52.000000Z", "iopub.status.idle": "2026-02-19T20:45:52.000000Z", "shell.execute_reply": "2026-02-19T20:45:52.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "#Fun\n" ] }, "execution_count": 4, "metadata": {}, "output_type": "execute_result" } ], "source": [ "RPCBlockHashByHeight =\n", "\tfun(Height) ->\n", "\t\tRPCCall(ar_block_index, get_element_by_height, [Height])\n", "\tend,\n", "\n", "RPCBlockByHeight =\n", "\tfun(Height) ->\n", "\t\tHash = RPCBlockHashByHeight(Height),\n", "\t\tRPCCall(ar_storage, read_block, [Hash])\n", "\tend." ] }, { "cell_type": "markdown", "id": "e95d49fe", "metadata": {}, "source": [ "### Record accessor modules" ] }, { "cell_type": "markdown", "id": "de137927", "metadata": {}, "source": [ "Compiles accessor modules at runtime so the notebook can read Erlang record fields:\n", "- `nb_block` -- accessors for `#block{}` fields (height, denomination, reward_pool, reward, etc.).\n", "- `nb_config` -- accessor for `#config.mining_addr`.\n", "- `nb_tx` -- accessors for `#tx{}` fields (reward, denomination, id).\n", "- `nb_pricing` -- exposes `?TOTAL_SUPPLY`, `?GiB`, and computes miner/endowment fee shares using `?MINER_FEE_SHARE`." ] }, { "cell_type": "code", "execution_count": 5, "id": "a6ed630a", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:45:52.000000Z", "iopub.status.busy": "2026-02-19T20:45:52.000000Z", "iopub.status.idle": "2026-02-19T20:45:52.000000Z", "shell.execute_reply": "2026-02-19T20:45:52.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "ok\n" ] }, "execution_count": 5, "metadata": {}, "output_type": "execute_result" } ], "source": [ "TmpDir = \".tmp/notebooks/\",\n", "ok = filelib:ensure_dir(filename:join([TmpDir, \"keep\"])),\n", "\n", "CompileModule =\n", "\tfun(Name, Source) ->\n", "\t\tPath = filename:join([TmpDir, Name ++ \".erl\"]),\n", "\t\tok = file:write_file(Path, Source),\n", "\t\t{ok, Module, Bin} = compile:file(Path, [binary]),\n", "\t\t{module, Module} = code:load_binary(Module, Path, Bin)\n", "\tend,\n", "\n", "BlockAccessors =\n", "\tlists:flatten([\n", "\t\t\"-module(nb_block).\\n\",\n", "\t\t\"-export([height/1, denomination/1, redenomination_height/1, reward_pool/1,\\n\",\n", "\t\t\" reward/1, wallet_list/1, txs/1, weave_size/1, reward_addr/1,\\n\",\n", "\t\t\" debt_supply/1, price_per_gib_minute/1, reward_history/1,\\n\",\n", "\t\t\" kryder_plus_rate_multiplier/1, kryder_plus_rate_multiplier_latch/1]).\\n\",\n", "\t\t\"-include_lib(\\\"arweave/include/ar.hrl\\\").\\n\",\n", "\t\t\"height(B) -> B#block.height.\\n\",\n", "\t\t\"denomination(B) -> B#block.denomination.\\n\",\n", "\t\t\"redenomination_height(B) -> B#block.redenomination_height.\\n\",\n", "\t\t\"reward_pool(B) -> B#block.reward_pool.\\n\",\n", "\t\t\"reward(B) -> B#block.reward.\\n\",\n", "\t\t\"wallet_list(B) -> B#block.wallet_list.\\n\",\n", "\t\t\"txs(B) -> B#block.txs.\\n\",\n", "\t\t\"weave_size(B) -> B#block.weave_size.\\n\",\n", "\t\t\"reward_addr(B) -> B#block.reward_addr.\\n\",\n", "\t\t\"debt_supply(B) -> B#block.debt_supply.\\n\",\n", "\t\t\"price_per_gib_minute(B) -> B#block.price_per_gib_minute.\\n\",\n", "\t\t\"reward_history(B) -> B#block.reward_history.\\n\",\n", "\t\t\"kryder_plus_rate_multiplier(B) -> B#block.kryder_plus_rate_multiplier.\\n\",\n", "\t\t\"kryder_plus_rate_multiplier_latch(B) -> B#block.kryder_plus_rate_multiplier_latch.\\n\"\n", "\t]),\n", "\n", "ConfigAccessors =\n", "\tlists:flatten([\n", "\t\t\"-module(nb_config).\\n\",\n", "\t\t\"-export([mining_addr/1]).\\n\",\n", "\t\t\"-include_lib(\\\"arweave_config/include/arweave_config.hrl\\\").\\n\",\n", "\t\t\"mining_addr(C) -> C#config.mining_addr.\\n\"\n", "\t]),\n", "\n", "TXAccessors =\n", "\tlists:flatten([\n", "\t\t\"-module(nb_tx).\\n\",\n", "\t\t\"-export([reward/1, denomination/1, id/1]).\\n\",\n", "\t\t\"-include_lib(\\\"arweave/include/ar.hrl\\\").\\n\",\n", "\t\t\"reward(TX) -> TX#tx.reward.\\n\",\n", "\t\t\"denomination(TX) -> TX#tx.denomination.\\n\",\n", "\t\t\"id(TX) -> TX#tx.id.\\n\"\n", "\t]),\n", "\n", "PricingAccessors =\n", "\tlists:flatten([\n", "\t\t\"-module(nb_pricing).\\n\",\n", "\t\t\"-export([total_supply/0, miner_fee_share/1, endowment_fee_share/1, gib/0]).\\n\",\n", "\t\t\"-include_lib(\\\"arweave/include/ar.hrl\\\").\\n\",\n", "\t\t\"-include_lib(\\\"arweave/include/ar_pricing.hrl\\\").\\n\",\n", "\t\t\"total_supply() -> ?TOTAL_SUPPLY.\\n\",\n", "\t\t\"gib() -> ?GiB.\\n\",\n", "\t\t\"miner_fee_share(TXFee) ->\\n\",\n", "\t\t\"\\t{Dividend, Divisor} = ?MINER_FEE_SHARE,\\n\",\n", "\t\t\"\\tTXFee * Dividend div Divisor.\\n\",\n", "\t\t\"endowment_fee_share(TXFee) ->\\n\",\n", "\t\t\"\\tTXFee - miner_fee_share(TXFee).\\n\"\n", "\t]),\n", "\n", "CompileModule(\"nb_block\", BlockAccessors),\n", "CompileModule(\"nb_config\", ConfigAccessors),\n", "CompileModule(\"nb_tx\", TXAccessors),\n", "CompileModule(\"nb_pricing\", PricingAccessors),\n", "ok." ] }, { "cell_type": "markdown", "id": "b70f2845", "metadata": {}, "source": [ "### HTTP helpers" ] }, { "cell_type": "markdown", "id": "7386e71d", "metadata": {}, "source": [ "Defines HTTP helpers for the localnet node:\n", "- `HTTPGet(URL)` – GET request returning `{ok, Body}` or `{error, Reason}`.\n", "- `HTTPPostJSON(URL, Body)` – POST with JSON content type.\n", "- `HTTPGetInteger(URL)` – GET, parse body as a non-negative integer.\n", "- `HTTPGetJSONFee(URL)` – GET, parse the `\"fee\"` field from a JSON response.\n", "- `HTTPGetJSONMap(URL)` – GET, parse body as a JSON map.\n", "- `HTTPGetJSONList(URL)` – GET, parse body as a JSON list.\n", "- `HTTPAssertNonEmpty(URL)` – GET, assert body is non-empty.\n", "- `HTTPAssertBase64Bytes(URL, ExpectedSize)` – GET, decode base64 body and assert byte size.\n", "- `HTTPGetBlockHash(Height)` – GET block JSON by height, extract `indep_hash`." ] }, { "cell_type": "code", "execution_count": 6, "id": "d4a35d59", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:45:52.000000Z", "iopub.status.busy": "2026-02-19T20:45:52.000000Z", "iopub.status.idle": "2026-02-19T20:45:52.000000Z", "shell.execute_reply": "2026-02-19T20:45:52.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "ok\n" ] }, "execution_count": 6, "metadata": {}, "output_type": "execute_result" } ], "source": [ "{ok, _} = application:ensure_all_started(inets),\n", "\n", "LocalnetHTTPHost =\n", "\tcase os:getenv(\"LOCALNET_HTTP_HOST\") of\n", "\t\tfalse ->\n", "\t\t\t\"127.0.0.1\";\n", "\t\tValue ->\n", "\t\t\tValue\n", "\tend,\n", "\n", "LocalnetHTTPPort =\n", "\tcase os:getenv(\"LOCALNET_HTTP_PORT\") of\n", "\t\tfalse ->\n", "\t\t\t\"1984\";\n", "\t\tValue ->\n", "\t\t\tValue\n", "\tend,\n", "\n", "LocalnetNetworkName =\n", "\tcase os:getenv(\"LOCALNET_HTTP_NETWORK\") of\n", "\t\tfalse ->\n", "\t\t\tcase os:getenv(\"LOCALNET_NETWORK_NAME\") of\n", "\t\t\t\tfalse ->\n", "\t\t\t\t\t\"arweave.localnet\";\n", "\t\t\t\tValue2 ->\n", "\t\t\t\t\tValue2\n", "\t\t\tend;\n", "\t\tValue ->\n", "\t\t\tValue\n", "\tend,\n", "\n", "BaseUrl = \"http://\" ++ LocalnetHTTPHost ++ \":\" ++ LocalnetHTTPPort,\n", "\n", "HTTPGet =\n", "\tfun(Url) ->\n", "\t\tHeaders = [{\"x-network\", LocalnetNetworkName}],\n", "\t\tcase httpc:request(get, {Url, Headers}, [], []) of\n", "\t\t\t{ok, {{_, 200, _}, _RespHeaders, Body}} ->\n", "\t\t\t\t{ok, Body};\n", "\t\t\t{ok, {{_, Status, _}, _RespHeaders, Body}} ->\n", "\t\t\t\t{error, {http_status, Status, Body}};\n", "\t\t\tError ->\n", "\t\t\t\t{error, Error}\n", "\t\tend\n", "\tend,\n", "\n", "HTTPPostJSON =\n", "\tfun(Url, Body) ->\n", "\t\tHeaders = [{\"content-type\", \"application/json\"}, {\"x-network\", LocalnetNetworkName}],\n", "\t\tcase httpc:request(post, {Url, Headers, \"application/json\", Body}, [], []) of\n", "\t\t\t{ok, {{_, 200, _}, _RespHeaders, RespBody}} ->\n", "\t\t\t\t{ok, RespBody};\n", "\t\t\t{ok, {{_, Status, _}, _RespHeaders, RespBody}} ->\n", "\t\t\t\t{error, {http_status, Status, RespBody}};\n", "\t\t\tError ->\n", "\t\t\t\t{error, Error}\n", "\t\tend\n", "\tend,\n", "\n", "HTTPGetInteger =\n", "\tfun(Url) ->\n", "\t\tcase HTTPGet(Url) of\n", "\t\t\t{ok, Body} ->\n", "\t\t\t\tBin = iolist_to_binary(Body),\n", "\t\t\t\tcase catch binary_to_integer(Bin) of\n", "\t\t\t\t\t{'EXIT', _} ->\n", "\t\t\t\t\t\t{error, {not_integer, Bin}};\n", "\t\t\t\t\tValue ->\n", "\t\t\t\t\t\tcase Value >= 0 of\n", "\t\t\t\t\t\t\ttrue ->\n", "\t\t\t\t\t\t\t\tValue;\n", "\t\t\t\t\t\t\tfalse ->\n", "\t\t\t\t\t\t\t\t{error, {negative_integer, Value}}\n", "\t\t\t\t\t\tend\n", "\t\t\t\tend;\n", "\t\t\t{error, Reason} ->\n", "\t\t\t\t{error, Reason}\n", "\t\tend\n", "\tend,\n", "\n", "HTTPGetJSONFee =\n", "\tfun(Url) ->\n", "\t\tcase HTTPGet(Url) of\n", "\t\t\t{ok, Body} ->\n", "\t\t\t\tMap = jiffy:decode(Body, [return_maps]),\n", "\t\t\t\tcase maps:get(<<\"fee\">>, Map, undefined) of\n", "\t\t\t\t\tundefined ->\n", "\t\t\t\t\t\t{error, {missing_fee, Map}};\n", "\t\t\t\t\tFeeBin ->\n", "\t\t\t\t\t\tcase catch binary_to_integer(FeeBin) of\n", "\t\t\t\t\t\t\t{'EXIT', _} ->\n", "\t\t\t\t\t\t\t\t{error, {not_integer, FeeBin}};\n", "\t\t\t\t\t\t\tValue ->\n", "\t\t\t\t\t\t\t\tcase Value >= 0 of\n", "\t\t\t\t\t\t\t\t\ttrue ->\n", "\t\t\t\t\t\t\t\t\t\tValue;\n", "\t\t\t\t\t\t\t\t\tfalse ->\n", "\t\t\t\t\t\t\t\t\t\t{error, {negative_integer, Value}}\n", "\t\t\t\t\t\t\t\tend\n", "\t\t\t\t\t\tend\n", "\t\t\t\tend;\n", "\t\t\t{error, Reason} ->\n", "\t\t\t\t{error, Reason}\n", "\t\tend\n", "\tend,\n", "\n", "HTTPGetJSONMap =\n", "\tfun(Url) ->\n", "\t\tcase HTTPGet(Url) of\n", "\t\t\t{ok, Body} ->\n", "\t\t\t\tcase jiffy:decode(Body, [return_maps]) of\n", "\t\t\t\t\tMap when is_map(Map) ->\n", "\t\t\t\t\t\tMap;\n", "\t\t\t\t\tOther ->\n", "\t\t\t\t\t\t{error, {not_map, Other}}\n", "\t\t\t\tend;\n", "\t\t\t{error, Reason} ->\n", "\t\t\t\t{error, Reason}\n", "\t\tend\n", "\tend,\n", "\n", "HTTPGetJSONList =\n", "\tfun(Url) ->\n", "\t\tcase HTTPGet(Url) of\n", "\t\t\t{ok, Body} ->\n", "\t\t\t\tcase jiffy:decode(Body) of\n", "\t\t\t\t\tList when is_list(List) ->\n", "\t\t\t\t\t\tList;\n", "\t\t\t\t\tOther ->\n", "\t\t\t\t\t\t{error, {not_list, Other}}\n", "\t\t\t\tend;\n", "\t\t\t{error, Reason} ->\n", "\t\t\t\t{error, Reason}\n", "\t\tend\n", "\tend,\n", "\n", "HTTPAssertNonEmpty =\n", "\tfun(Url) ->\n", "\t\tcase HTTPGet(Url) of\n", "\t\t\t{ok, Body} ->\n", "\t\t\t\tcase byte_size(iolist_to_binary(Body)) > 0 of\n", "\t\t\t\t\ttrue ->\n", "\t\t\t\t\t\tok;\n", "\t\t\t\t\tfalse ->\n", "\t\t\t\t\t\t{error, {empty_body, Url}}\n", "\t\t\t\tend;\n", "\t\t\t{error, Reason} ->\n", "\t\t\t\t{error, Reason}\n", "\t\tend\n", "\tend,\n", "\n", "HTTPAssertBase64Bytes =\n", "\tfun(Url, ExpectedSize) ->\n", "\t\tcase HTTPGet(Url) of\n", "\t\t\t{ok, Body} ->\n", "\t\t\t\tBin = iolist_to_binary(Body),\n", "\t\t\t\tcase catch ar_util:decode(Bin) of\n", "\t\t\t\t\t{'EXIT', _} ->\n", "\t\t\t\t\t\t{error, {invalid_base64, Bin}};\n", "\t\t\t\t\tDecoded ->\n", "\t\t\t\t\t\tcase byte_size(Decoded) == ExpectedSize of\n", "\t\t\t\t\t\t\ttrue ->\n", "\t\t\t\t\t\t\t\tok;\n", "\t\t\t\t\t\t\tfalse ->\n", "\t\t\t\t\t\t\t\t{error, {unexpected_size, byte_size(Decoded), ExpectedSize}}\n", "\t\t\t\t\t\tend\n", "\t\t\t\tend;\n", "\t\t\t{error, Reason} ->\n", "\t\t\t\t{error, Reason}\n", "\t\tend\n", "\tend,\n", "\n", "HTTPGetBlockHash =\n", "\tfun(Height) ->\n", "\t\tUrl = BaseUrl ++ \"/block/height/\" ++ integer_to_list(Height),\n", "\t\tMap = HTTPGetJSONMap(Url),\n", "\t\tcase maps:get(<<\"indep_hash\">>, Map, undefined) of\n", "\t\t\tundefined ->\n", "\t\t\t\t{error, {missing_indep_hash, Height, Map}};\n", "\t\t\tHash when is_binary(Hash) ->\n", "\t\t\t\tHash;\n", "\t\t\tHash ->\n", "\t\t\t\tiolist_to_binary(Hash)\n", "\t\tend\n", "\tend,\n", "\n", "ok." ] }, { "cell_type": "markdown", "id": "utility-helpers-header", "metadata": {}, "source": [ "### Utility helpers" ] }, { "cell_type": "markdown", "id": "utility-helpers-doc", "metadata": {}, "source": [ "- `DecodeBase64(Value)` – decodes a base64url-encoded value, returning `{ok, Decoded}` or `{error, Reason}`." ] }, { "cell_type": "code", "execution_count": 7, "id": "utility-helpers-code", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:45:52.000000Z", "iopub.status.busy": "2026-02-19T20:45:52.000000Z", "iopub.status.idle": "2026-02-19T20:45:52.000000Z", "shell.execute_reply": "2026-02-19T20:45:52.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "#Fun\n" ] }, "execution_count": 7, "metadata": {}, "output_type": "execute_result" } ], "source": [ "DecodeBase64 =\n", "\tfun(Value) ->\n", "\t\tBin = iolist_to_binary(Value),\n", "\t\tcase catch ar_util:decode(Bin) of\n", "\t\t\t{'EXIT', _} ->\n", "\t\t\t\t{error, {invalid_base64, Bin}};\n", "\t\t\tDecoded ->\n", "\t\t\t\t{ok, Decoded}\n", "\t\tend\n", "\tend." ] }, { "cell_type": "markdown", "id": "8f29972b", "metadata": {}, "source": [ "## Pre-redenomination" ] }, { "cell_type": "markdown", "id": "a64732c9", "metadata": {}, "source": [ "### Read chain state\n", "\n", "Reads the current block height and displays the starting denomination, redenomination_height, reward_pool, and debt_supply." ] }, { "cell_type": "code", "execution_count": 8, "id": "ff2f165f", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:45:52.000000Z", "iopub.status.busy": "2026-02-19T20:45:52.000000Z", "iopub.status.idle": "2026-02-19T20:45:53.000000Z", "shell.execute_reply": "2026-02-19T20:45:53.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "#{height => 2069888,denomination => 4,redenomination_height => 2069886,\n", " reward_pool => 292873436146306634442575000,debt_supply => 0}\n" ] }, "execution_count": 8, "metadata": {}, "output_type": "execute_result" } ], "source": [ "Height0 = RPCHeight(),\n", "Block0 = RPCBlockByHeight(Height0),\n", "#{ height => nb_block:height(Block0),\n", " denomination => nb_block:denomination(Block0),\n", " redenomination_height => nb_block:redenomination_height(Block0),\n", " reward_pool => nb_block:reward_pool(Block0),\n", " debt_supply => nb_block:debt_supply(Block0) }." ] }, { "cell_type": "markdown", "id": "b0217a01", "metadata": {}, "source": [ "### Load mining address and wallet key" ] }, { "cell_type": "markdown", "id": "089e1b85", "metadata": {}, "source": [ "Reads the mining address from the node's config and loads the corresponding wallet key pair via RPC. The wallet is used to sign transactions later." ] }, { "cell_type": "code", "execution_count": 9, "id": "e3422be8", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:45:53.000000Z", "iopub.status.busy": "2026-02-19T20:45:53.000000Z", "iopub.status.idle": "2026-02-19T20:45:53.000000Z", "shell.execute_reply": "2026-02-19T20:45:53.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "#{mining_addr => <<\"yjrOLPSHP1cvZ_y1bSLkLd8lIWhu9dsbSufXm9-QjsY\">>}\n" ] }, "execution_count": 9, "metadata": {}, "output_type": "execute_result" } ], "source": [ "{ok, Config} = RPCCall(arweave_config, get_env, []),\n", "MiningAddr = nb_config:mining_addr(Config),\n", "\n", "#{mining_addr => ar_util:encode(MiningAddr)}." ] }, { "cell_type": "code", "execution_count": 10, "id": "e9f8e98b", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:45:53.000000Z", "iopub.status.busy": "2026-02-19T20:45:53.000000Z", "iopub.status.idle": "2026-02-19T20:45:53.000000Z", "shell.execute_reply": "2026-02-19T20:45:53.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "{{{ecdsa,secp256k1},\n", " <<157,27,57,209,187,167,148,55,118,249,37,206,94,220,143,242,208,115,56,154,\n", " 175,32,127,73,136,71,4,177,87,89,241,31>>,\n", " <<2,215,4,40,91,61,226,129,118,138,18,7,154,221,140,229,244,16,43,144,42,9,\n", " 26,114,36,102,67,23,84,139,196,122,22>>},\n", " {{ecdsa,secp256k1},\n", " <<2,215,4,40,91,61,226,129,118,138,18,7,154,221,140,229,244,16,43,144,42,9,\n", " 26,114,36,102,67,23,84,139,196,122,22>>}}\n" ] }, "execution_count": 10, "metadata": {}, "output_type": "execute_result" } ], "source": [ "MiningWallet = RPCCall(ar_wallet, load_key, [MiningAddr])." ] }, { "cell_type": "markdown", "id": "4331a098", "metadata": {}, "source": [ "### Override redenomination parameters" ] }, { "cell_type": "markdown", "id": "7f02ad39", "metadata": {}, "source": [ "Saves the current values of `redenomination_threshold`, `redenomination_delay_blocks`, and `locked_rewards_blocks`, then overrides them on the remote node:\n", "- `redenomination_delay_blocks = 2` (redenomination fires 2 blocks after scheduling).\n", "- `locked_rewards_blocks = 1` (rewards unlock after 1 block).\n", "- The redenomination threshold is set later, after mining reveals the available supply.\n", "\n", "Also defines local helpers `Pow1000Local` (computes 1000^N) and `RedenominateLocal` (scales amounts between denominations)." ] }, { "cell_type": "code", "execution_count": 11, "id": "e3c51b45", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:45:53.000000Z", "iopub.status.busy": "2026-02-19T20:45:53.000000Z", "iopub.status.idle": "2026-02-19T20:45:53.000000Z", "shell.execute_reply": "2026-02-19T20:45:53.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "#{pool_growth_target => 1000000000,\n", " prev_threshold => {ok,65707142423137694720779001},\n", " prev_delay => {ok,2},\n", " prev_locked_rewards => {ok,1}}\n" ] }, "execution_count": 11, "metadata": {}, "output_type": "execute_result" } ], "source": [ "PrevRedenomThreshold = RPCCall(application, get_env, [arweave, redenomination_threshold]),\n", "PrevRedenomDelay = RPCCall(application, get_env, [arweave, redenomination_delay_blocks]),\n", "PrevLockedRewards = RPCCall(application, get_env, [arweave, locked_rewards_blocks]),\n", "\n", "Pow1000Local =\n", "\tfun\n", "\t\tPow(0) ->\n", "\t\t\t1;\n", "\t\tPow(N) when N > 0 ->\n", "\t\t\t1000 * Pow(N - 1)\n", "\tend,\n", "\n", "RedenominateLocal =\n", "\tfun(Amount, FromDenom, ToDenom) ->\n", "\t\tcase ToDenom >= FromDenom of\n", "\t\t\ttrue ->\n", "\t\t\t\tAmount * Pow1000Local(ToDenom - FromDenom);\n", "\t\t\tfalse ->\n", "\t\t\t\tAmount div Pow1000Local(FromDenom - ToDenom)\n", "\t\tend\n", "\tend,\n", "\n", "PoolGrowthTarget = 1000000000,\n", "\n", "ok = RPCCall(application, set_env, [arweave, redenomination_delay_blocks, 2]),\n", "ok = RPCCall(application, set_env, [arweave, locked_rewards_blocks, 1]),\n", "\n", "#{ pool_growth_target => PoolGrowthTarget,\n", " prev_threshold => PrevRedenomThreshold,\n", " prev_delay => PrevRedenomDelay,\n", " prev_locked_rewards => PrevLockedRewards }." ] }, { "cell_type": "markdown", "id": "06abbfae", "metadata": {}, "source": [ "### Wallet balance helpers" ] }, { "cell_type": "markdown", "id": "9f4e7767", "metadata": {}, "source": [ "Defines `WalletBalanceFromBlock(Block, Addr)`, which reads the balance from the block's wallet tree via RPC and redenominates it to the block's denomination. Also defines `MinerBalanceAt(Height)` as a shorthand for the mining address." ] }, { "cell_type": "code", "execution_count": 12, "id": "5241f7f7", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:45:53.000000Z", "iopub.status.busy": "2026-02-19T20:45:53.000000Z", "iopub.status.idle": "2026-02-19T20:45:53.000000Z", "shell.execute_reply": "2026-02-19T20:45:53.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "#Fun\n" ] }, "execution_count": 12, "metadata": {}, "output_type": "execute_result" } ], "source": [ "Pow1000 = Pow1000Local,\n", "Redenominate = RedenominateLocal,\n", "\n", "WalletBalanceFromBlock =\n", "\tfun(Block, Addr) ->\n", "\t\tRoot = nb_block:wallet_list(Block),\n", "\t\tBlockDenom = nb_block:denomination(Block),\n", "\t\tAccountsMap = RPCCall(ar_wallets, get, [Root, Addr]),\n", "\t\tcase maps:get(Addr, AccountsMap, not_found) of\n", "\t\t\tnot_found ->\n", "\t\t\t\t0;\n", "\t\t\t{Balance, _LastTX} ->\n", "\t\t\t\tRedenominate(Balance, 1, BlockDenom);\n", "\t\t\t{Balance, _LastTX, AccountDenom, _Perm} ->\n", "\t\t\t\tRedenominate(Balance, AccountDenom, BlockDenom)\n", "\t\tend\n", "\tend,\n", "\n", "MinerBalanceAt =\n", "\tfun(Height) ->\n", "\t\tBlock = RPCBlockByHeight(Height),\n", "\t\tWalletBalanceFromBlock(Block, MiningAddr)\n", "\tend." ] }, { "cell_type": "markdown", "id": "de72ffaf", "metadata": {}, "source": [ "### Mine past the pricing transition and unlock the first reward" ] }, { "cell_type": "markdown", "id": "30fc3239", "metadata": {}, "source": [ "Mines `max(3, TransitionEnd - StartHeight)` blocks to ensure the 2.7.2 pricing transition is complete and at least one reward has been unlocked (with `locked_rewards_blocks = 1`, a reward earned at height H is applied at H+1).\n", "\n", "**Assertions:**\n", "- Miner balance is positive after mining.\n", "- Sets the redenomination threshold to `AvailableSupply + 1` (where `AvailableSupply = TotalSupply * 1000^denomination + DebtSupply - RewardPool`), so any further endowment fee will push the circulating supply past the threshold and trigger redenomination." ] }, { "cell_type": "code", "execution_count": 13, "id": "d5ad9c9a", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:45:53.000000Z", "iopub.status.busy": "2026-02-19T20:45:53.000000Z", "iopub.status.idle": "2026-02-19T20:46:49.000000Z", "shell.execute_reply": "2026-02-19T20:46:49.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "{2069888,2069891,2069870}\n" ] }, "execution_count": 13, "metadata": {}, "output_type": "execute_result" } ], "source": [ "StartHeight = RPCHeight(),\n", "TransitionEnd =\n", "\tRPCCall(ar_pricing_transition, transition_start_2_7_2, []) +\n", "\t\tRPCCall(ar_pricing_transition, transition_length_2_7_2, []),\n", "TargetHeight = erlang:max(StartHeight + 3, TransitionEnd),\n", "ok = MineUntilHeight(TargetHeight),\n", "{StartHeight, TargetHeight, TransitionEnd}." ] }, { "cell_type": "code", "execution_count": 14, "id": "cf2fa971", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:46:49.000000Z", "iopub.status.busy": "2026-02-19T20:46:49.000000Z", "iopub.status.idle": "2026-02-19T20:46:49.000000Z", "shell.execute_reply": "2026-02-19T20:46:49.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "ok\n" ] }, "execution_count": 14, "metadata": {}, "output_type": "execute_result" } ], "source": [ "BalanceAfterUnlock = MinerBalanceAt(TargetHeight),\n", "ok = case BalanceAfterUnlock > 0 of\n", "\ttrue ->\n", "\t\tok;\n", "\tfalse ->\n", "\t\t{error, {mining_balance_not_unlocked, BalanceAfterUnlock}}\n", "end." ] }, { "cell_type": "code", "execution_count": 15, "id": "175c7fc1", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:46:49.000000Z", "iopub.status.busy": "2026-02-19T20:46:49.000000Z", "iopub.status.idle": "2026-02-19T20:46:49.000000Z", "shell.execute_reply": "2026-02-19T20:46:49.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "#{threshold => 65707142423133030322557425001,pool_growth_target => 1000000000,\n", " available_supply => 65707142423133030322557425000}\n" ] }, "execution_count": 15, "metadata": {}, "output_type": "execute_result" } ], "source": [ "BlockAfterTransition = RPCBlockByHeight(RPCHeight()),\n", "DenomAfterTransition = nb_block:denomination(BlockAfterTransition),\n", "TotalSupplyAfterTransition = RedenominateLocal(nb_pricing:total_supply(), 1, DenomAfterTransition),\n", "RewardPoolAfterTransition = nb_block:reward_pool(BlockAfterTransition),\n", "DebtSupplyAfterTransition = nb_block:debt_supply(BlockAfterTransition),\n", "AvailableSupplyAfterTransition =\n", "\tTotalSupplyAfterTransition + DebtSupplyAfterTransition - RewardPoolAfterTransition,\n", "ThresholdAfterTransition = AvailableSupplyAfterTransition + 1,\n", "ok = RPCCall(application, set_env, [arweave, redenomination_threshold, ThresholdAfterTransition]),\n", "\n", "#{ available_supply => AvailableSupplyAfterTransition,\n", " threshold => ThresholdAfterTransition,\n", " pool_growth_target => PoolGrowthTarget }." ] }, { "cell_type": "markdown", "id": "8eb2197a", "metadata": {}, "source": [ "## Trigger redenomination" ] }, { "cell_type": "markdown", "id": "74fb7004", "metadata": {}, "source": [ "Builds and submits transactions to push the reward pool past the redenomination threshold. No blocks are mined yet; transactions go to the mempool and are mined in the next step." ] }, { "cell_type": "markdown", "id": "b498f821", "metadata": {}, "source": [ "### TX builder module" ] }, { "cell_type": "markdown", "id": "3e83e9e9", "metadata": {}, "source": [ "Defines `nb_tx_builder:build_tx/5` (compiled dynamically). Also defines helpers: `GetTXAnchor` (fetches anchor via HTTP), `BuildTX(Reward, Data)` (builds a format-2 TX with a given reward and data payload), and `PostTX(TX)` (serializes and POSTs a transaction)." ] }, { "cell_type": "code", "execution_count": 16, "id": "4079c8ff", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:46:49.000000Z", "iopub.status.busy": "2026-02-19T20:46:49.000000Z", "iopub.status.idle": "2026-02-19T20:46:49.000000Z", "shell.execute_reply": "2026-02-19T20:46:49.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "ok\n" ] }, "execution_count": 16, "metadata": {}, "output_type": "execute_result" } ], "source": [ "TXBuilder =\n", "\tlists:flatten([\n", "\t\t\"-module(nb_tx_builder).\\n\",\n", "\t\t\"-export([build_tx/5]).\\n\",\n", "\t\t\"-include_lib(\\\"arweave/include/ar.hrl\\\").\\n\",\n", "\t\t\"build_tx(Reward, Data, Anchor, Denomination, Wallet) ->\\n\",\n", "\t\t\"\\tDataSize = byte_size(Data),\\n\",\n", "\t\t\"\\tDataRoot =\\n\",\n", "\t\t\"\\t\\tcase DataSize > 0 of\\n\",\n", "\t\t\"\\t\\t\\ttrue ->\\n\",\n", "\t\t\"\\t\\t\\t\\tTreeTX = ar_tx:generate_chunk_tree(#tx{ data = Data }),\\n\",\n", "\t\t\"\\t\\t\\t\\tTreeTX#tx.data_root;\\n\",\n", "\t\t\"\\t\\t\\tfalse ->\\n\",\n", "\t\t\"\\t\\t\\t\\t<<>>\\n\",\n", "\t\t\"\\t\\tend,\\n\",\n", "\t\t\"\\tBaseTX = #tx{\\n\",\n", "\t\t\"\\t\\tformat = 2,\\n\",\n", "\t\t\"\\t\\tdata = Data,\\n\",\n", "\t\t\"\\t\\tdata_size = DataSize,\\n\",\n", "\t\t\"\\t\\tdata_root = DataRoot,\\n\",\n", "\t\t\"\\t\\treward = Reward,\\n\",\n", "\t\t\"\\t\\tlast_tx = Anchor,\\n\",\n", "\t\t\"\\t\\ttarget = <<>>,\\n\",\n", "\t\t\"\\t\\tquantity = 0,\\n\",\n", "\t\t\"\\t\\tdenomination = Denomination\\n\",\n", "\t\t\"\\t},\\n\",\n", "\t\t\"\\tar_tx:sign(BaseTX, Wallet).\\n\"\n", "\t]),\n", "\n", "CompileModule(\"nb_tx_builder\", TXBuilder),\n", "ok." ] }, { "cell_type": "code", "execution_count": 17, "id": "18474861", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:46:49.000000Z", "iopub.status.busy": "2026-02-19T20:46:49.000000Z", "iopub.status.idle": "2026-02-19T20:46:49.000000Z", "shell.execute_reply": "2026-02-19T20:46:49.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "#Fun\n" ] }, "execution_count": 17, "metadata": {}, "output_type": "execute_result" } ], "source": [ "GetTXAnchor =\n", "\tfun() ->\n", "\t\tcase HTTPGet(BaseUrl ++ \"/tx_anchor\") of\n", "\t\t\t{ok, AnchorB64} ->\n", "\t\t\t\tar_util:decode(iolist_to_binary(AnchorB64));\n", "\t\t\t{error, Reason} ->\n", "\t\t\t\terlang:error({tx_anchor_failed, Reason})\n", "\t\tend\n", "\tend,\n", "\n", "BuildTX =\n", "\tfun(Reward, Data) ->\n", "\t\tAnchor = GetTXAnchor(),\n", "\t\tBlock = RPCBlockByHeight(RPCHeight()),\n", "\t\tDenom = nb_block:denomination(Block),\n", "\t\tnb_tx_builder:build_tx(Reward, Data, Anchor, Denom, MiningWallet)\n", "\tend,\n", "\n", "PostTX =\n", "\tfun(TX) ->\n", "\t\tBody = ar_serialize:jsonify(ar_serialize:tx_to_json_struct(TX)),\n", "\t\tHTTPPostJSON(BaseUrl ++ \"/tx\", Body)\n", "\tend." ] }, { "cell_type": "markdown", "id": "0c11180c", "metadata": {}, "source": [ "### Compute transaction count" ] }, { "cell_type": "markdown", "id": "d57cd75d", "metadata": {}, "source": [ "No blocks mined. Queries the minimum fee for 4 bytes via `/price/4/{addr}`. Sets `RewardPerTX = max(100000000, MinFee)`. Splits each fee as `MinerShare = Fee div 21`, `EndowmentShare = Fee - MinerShare`. Computes `RequiredTXs = ceil(PoolGrowthTarget / EndowmentShare)` and caps at `MaxTXs = BalanceAfterUnlock div RewardPerTX`.\n", "\n", "**Assertion:** `TXCount >= RequiredTXs` (the miner has enough balance to cover all needed transactions)." ] }, { "cell_type": "code", "execution_count": 18, "id": "ad9d8894", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:46:49.000000Z", "iopub.status.busy": "2026-02-19T20:46:49.000000Z", "iopub.status.idle": "2026-02-19T20:46:50.000000Z", "shell.execute_reply": "2026-02-19T20:46:50.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "#{reward_per_tx => 4897618132653043,miner_share_per_tx => 233219911078716,\n", " endowment_share => 4664398221574327,required_txs => 1,tx_count => 1}\n" ] }, "execution_count": 18, "metadata": {}, "output_type": "execute_result" } ], "source": [ "AddrB64 = binary_to_list(ar_util:encode(MiningAddr)),\n", "MinFeeFor4Bytes = HTTPGetInteger(BaseUrl ++ \"/price/4/\" ++ AddrB64),\n", "RewardPerTX = max(100000000, MinFeeFor4Bytes),\n", "MinerSharePerTX = RewardPerTX div 21,\n", "EndowmentShare = RewardPerTX - MinerSharePerTX,\n", "RequiredTXs = (PoolGrowthTarget + EndowmentShare - 1) div EndowmentShare,\n", "MaxTXs = BalanceAfterUnlock div RewardPerTX,\n", "TXCount = erlang:min(RequiredTXs, MaxTXs),\n", "\n", "ok = case TXCount >= RequiredTXs of\n", "\ttrue ->\n", "\t\tok;\n", "\tfalse ->\n", "\t\t{error, {insufficient_balance, BalanceAfterUnlock, RequiredTXs, RewardPerTX}}\n", "end,\n", "\n", "#{ reward_per_tx => RewardPerTX,\n", " miner_share_per_tx => MinerSharePerTX,\n", " endowment_share => EndowmentShare,\n", " required_txs => RequiredTXs,\n", " tx_count => TXCount }." ] }, { "cell_type": "markdown", "id": "daab126e", "metadata": {}, "source": [ "### Submit transactions" ] }, { "cell_type": "markdown", "id": "f089a33a", "metadata": {}, "source": [ "Builds all transactions with unique data payloads and posts them via the HTTP `/tx` endpoint. Asserts all submissions succeed." ] }, { "cell_type": "code", "execution_count": 19, "id": "cef6699c", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:46:50.000000Z", "iopub.status.busy": "2026-02-19T20:46:50.000000Z", "iopub.status.idle": "2026-02-19T20:46:50.000000Z", "shell.execute_reply": "2026-02-19T20:46:50.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "{tx,2,\n", " <<139,172,22,196,100,45,173,245,156,11,227,184,28,54,206,233,222,210,38,\n", " 113,148,195,169,70,236,87,68,5,15,80,113,11>>,\n", " <<253,63,204,233,203,204,115,239,32,134,4,214,184,100,165,193,180,84,171,\n", " 198,148,28,152,234,44,193,176,146,214,230,209,245,44,32,9,254,99,89,31,\n", " 142,22,88,155,13,102,255,102,228>>,\n", " <<2,215,4,40,91,61,226,129,118,138,18,7,154,221,140,229,244,16,43,144,42,9,\n", " 26,114,36,102,67,23,84,139,196,122,22>>,\n", " <<202,58,206,44,244,135,63,87,47,103,252,181,109,34,228,45,223,37,33,104,\n", " 110,245,219,27,74,231,215,155,223,144,142,198>>,\n", " [],<<>>,0,<<\"tx_1\">>,4,[],\n", " <<80,150,207,185,223,249,233,156,209,143,234,81,171,110,46,139,135,104,51,\n", " 164,66,97,241,245,151,19,123,99,218,110,6,65>>,\n", " <<229,118,71,208,177,55,47,223,163,169,232,69,227,132,137,19,98,177,244,99,\n", " 221,219,19,234,232,69,153,146,134,186,52,8,93,109,153,251,116,125,75,153,\n", " 170,0,66,188,202,114,255,42,43,180,4,28,225,108,251,151,22,187,148,7,145,\n", " 196,48,79,0>>,\n", " 4897618132653043,4,\n", " {ecdsa,secp256k1}}\n" ] }, "execution_count": 19, "metadata": {}, "output_type": "execute_result" } ], "source": [ "TX1 = BuildTX(RewardPerTX, <<\"tx_1\">>)." ] }, { "cell_type": "code", "execution_count": 20, "id": "38c443d8-03bd-49aa-a62d-afa43dd484bd", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:46:50.000000Z", "iopub.status.busy": "2026-02-19T20:46:50.000000Z", "iopub.status.idle": "2026-02-19T20:46:50.000000Z", "shell.execute_reply": "2026-02-19T20:46:50.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "<<139,172,22,196,100,45,173,245,156,11,227,184,28,54,206,233,222,210,38,113,\n", " 148,195,169,70,236,87,68,5,15,80,113,11>>\n" ] }, "execution_count": 20, "metadata": {}, "output_type": "execute_result" } ], "source": [ "TXID = nb_tx:id(TX1)." ] }, { "cell_type": "code", "execution_count": 21, "id": "b0a69c70-1b91-4c61-9da7-fd61774c2f77", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:46:50.000000Z", "iopub.status.busy": "2026-02-19T20:46:50.000000Z", "iopub.status.idle": "2026-02-19T20:46:50.000000Z", "shell.execute_reply": "2026-02-19T20:46:50.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "not_found\n" ] }, "execution_count": 21, "metadata": {}, "output_type": "execute_result" } ], "source": [ "RPCCall(ar_tx_db, get_error_codes, [TXID])." ] }, { "cell_type": "code", "execution_count": 22, "id": "8d9f53eb", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:46:50.000000Z", "iopub.status.busy": "2026-02-19T20:46:50.000000Z", "iopub.status.idle": "2026-02-19T20:46:50.000000Z", "shell.execute_reply": "2026-02-19T20:46:50.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "ok\n" ] }, "execution_count": 22, "metadata": {}, "output_type": "execute_result" } ], "source": [ "RemainingTXs =\n", "\t[BuildTX(RewardPerTX, <<\"tx_\", (integer_to_binary(N))/binary>>)\n", "\t\t|| N <- lists:seq(2, TXCount)],\n", "TXs = [TX1 | RemainingTXs],\n", "Results = [PostTX(TX) || TX <- TXs],\n", "ok = case lists:all(fun({ok, _}) -> true; (_) -> false end, Results) of\n", "\ttrue ->\n", "\t\tok;\n", "\tfalse ->\n", "\t\t{error, {tx_submission_failures, [R || R <- Results, element(1, R) =/= ok]}}\n", "end." ] }, { "cell_type": "markdown", "id": "84a6a171", "metadata": {}, "source": [ "### Mine the trigger block" ] }, { "cell_type": "markdown", "id": "1deb3bdf", "metadata": {}, "source": [ "Mines 1 block. The transactions submitted above are included in this block. Displays the block's reward_pool and reward." ] }, { "cell_type": "code", "execution_count": 23, "id": "3a4e6da8", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:46:50.000000Z", "iopub.status.busy": "2026-02-19T20:46:50.000000Z", "iopub.status.idle": "2026-02-19T20:47:17.000000Z", "shell.execute_reply": "2026-02-19T20:47:17.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "#{reward_pool => 292873436150971032664149327,reward => 123478299179911078716,\n", " redenom_trigger_block_height => 2069892}\n" ] }, "execution_count": 23, "metadata": {}, "output_type": "execute_result" } ], "source": [ "RedenomTriggerBlockHeight = RPCHeight() + 1,\n", "ok = MineUntilHeight(RedenomTriggerBlockHeight),\n", "RedenomTriggerBlock = RPCBlockByHeight(RedenomTriggerBlockHeight),\n", "#{ redenom_trigger_block_height => RedenomTriggerBlockHeight,\n", " reward_pool => nb_block:reward_pool(RedenomTriggerBlock),\n", " reward => nb_block:reward(RedenomTriggerBlock) }." ] }, { "cell_type": "markdown", "id": "3038cb88", "metadata": {}, "source": [ "### Assert reward pool and miner reward" ] }, { "cell_type": "markdown", "id": "da295ed0", "metadata": {}, "source": [ "Reads the trigger block and its transactions. Queries the inflation reward at this height\n", "and the average block interval from the node. Computes expected reward and endowment pool\n", "from the protocol's reward formula:\n", "\n", "**Expected block reward** = `max(Inflation + MinerFeeShare, StorageCost)`, where:\n", "- `Inflation = redenominate(ar_inflation:calculate(Height), 1, PrevDenomination)`\n", "- `MinerFeeShare = sum(TXFee div 21)` over block transactions\n", "- `StorageCost = N_REPLICATIONS * WeaveSize * PricePerGiBMinute * BlockInterval / (60 * GiB)`\n", "\n", "**Expected endowment pool** = `PrevPool + EndowmentFeeShare - max(0, StorageCost - BaseReward)`\n", "\n", "Also defines `ComputeExpectedRewardAndPool` helper reused later for redenomination assertions.\n", "\n", "**Assertions:**\n", "- `block.reward` matches the expected reward within 0.1%.\n", "- `block.reward_pool` matches the expected pool within 0.1%.\n", "\n", "The 0.1% tolerance accounts for `ar_inflation:calculate/1` being queried once\n", "at the trigger block height and reused for nearby blocks. The inflation reward\n", "decays with height but changes by less than 0.001% per block at mainnet heights,\n", "so 0.1% is conservative." ] }, { "cell_type": "code", "execution_count": 24, "id": "ed28dffe", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:47:17.000000Z", "iopub.status.busy": "2026-02-19T20:47:17.000000Z", "iopub.status.idle": "2026-02-19T20:47:17.000000Z", "shell.execute_reply": "2026-02-19T20:47:17.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "#{miner_fee_share => 233219911078716,endowment_fee_share => 4664398221574327,\n", " expected_reward => 123478299179911078716,\n", " expected_pool => 292873436150971032664149327,\n", " inflation => 123478065960000000000,storage_cost => 0,\n", " actual_reward => 123478299179911078716,\n", " actual_pool => 292873436150971032664149327}\n" ] }, "execution_count": 24, "metadata": {}, "output_type": "execute_result" } ], "source": [ "EnsureTXs =\n", "\tfun(TXs) ->\n", "\t\tcase TXs of\n", "\t\t\t[] -> [];\n", "\t\t\t_ ->\n", "\t\t\t\tcase lists:any(fun(TX) -> is_binary(TX) end, TXs) of\n", "\t\t\t\t\ttrue ->\n", "\t\t\t\t\t\t[RPCCall(ar_storage, read_tx, [TXID]) || TXID <- TXs];\n", "\t\t\t\t\tfalse ->\n", "\t\t\t\t\t\tTXs\n", "\t\t\t\tend\n", "\t\tend\n", "\tend,\n", "\n", "ComputeFeeShares =\n", "\tfun(BlockTXs, Denom) ->\n", "\t\tcase BlockTXs of\n", "\t\t\t[] ->\n", "\t\t\t\t{0, 0};\n", "\t\t\t_ ->\n", "\t\t\t\tlists:foldl(\n", "\t\t\t\t\tfun(TX, {MinerAcc, EndowmentAcc}) ->\n", "\t\t\t\t\t\tTXFeeBase = nb_tx:reward(TX),\n", "\t\t\t\t\t\tTXDenom = nb_tx:denomination(TX),\n", "\t\t\t\t\t\tTXFee = RedenominateLocal(TXFeeBase, TXDenom, Denom),\n", "\t\t\t\t\t\tMinerShare = TXFee div 21,\n", "\t\t\t\t\t\t{MinerAcc + MinerShare, EndowmentAcc + (TXFee - MinerShare)}\n", "\t\t\t\t\tend,\n", "\t\t\t\t\t{0, 0},\n", "\t\t\t\t\tBlockTXs\n", "\t\t\t\t)\n", "\t\tend\n", "\tend,\n", "\n", "PrevRedenomTriggerBlock = RPCBlockByHeight(RedenomTriggerBlockHeight - 1),\n", "RefInflationBase = RPCCall(ar_inflation, calculate, [RedenomTriggerBlockHeight]),\n", "RefBlockInterval = RPCCall(ar_block_time_history, compute_block_interval,\n", "\t[PrevRedenomTriggerBlock]),\n", "NReplications = 20,\n", "GiB = nb_pricing:gib(),\n", "\n", "ComputeExpectedRewardAndPool =\n", "\tfun(Height, Block, PrevBlock, BlockTXs) ->\n", "\t\tPrevDenomLocal = nb_block:denomination(PrevBlock),\n", "\t\tBlockDenomLocal = nb_block:denomination(Block),\n", "\t\tInflationLocal = RedenominateLocal(RefInflationBase, 1, PrevDenomLocal),\n", "\t\t{MinerFeeShareLocal, EndowmentFeeShareLocal} =\n", "\t\t\tComputeFeeShares(BlockTXs, PrevDenomLocal),\n", "\t\tWeaveSizeLocal = nb_block:weave_size(Block),\n", "\t\tPriceLocal = nb_block:price_per_gib_minute(PrevBlock),\n", "\t\tStorageCostLocal = NReplications * WeaveSizeLocal * PriceLocal\n", "\t\t\t* RefBlockInterval div (60 * GiB),\n", "\t\tBaseRewardLocal = InflationLocal + MinerFeeShareLocal,\n", "\t\tPrevPoolLocal = nb_block:reward_pool(PrevBlock),\n", "\t\tPool2 = PrevPoolLocal + EndowmentFeeShareLocal,\n", "\t\t{ExpReward0, ExpPool0} =\n", "\t\t\tcase BaseRewardLocal >= StorageCostLocal of\n", "\t\t\t\ttrue ->\n", "\t\t\t\t\t{BaseRewardLocal, Pool2};\n", "\t\t\t\tfalse ->\n", "\t\t\t\t\tTakeLocal = StorageCostLocal - BaseRewardLocal,\n", "\t\t\t\t\tcase TakeLocal > Pool2 of\n", "\t\t\t\t\t\ttrue ->\n", "\t\t\t\t\t\t\t{StorageCostLocal, 0};\n", "\t\t\t\t\t\tfalse ->\n", "\t\t\t\t\t\t\t{StorageCostLocal, Pool2 - TakeLocal}\n", "\t\t\t\t\tend\n", "\t\t\tend,\n", "\t\tExpReward = RedenominateLocal(ExpReward0, PrevDenomLocal, BlockDenomLocal),\n", "\t\tExpPool = RedenominateLocal(ExpPool0, PrevDenomLocal, BlockDenomLocal),\n", "\t\t#{\n", "\t\t\texpected_reward => ExpReward,\n", "\t\t\texpected_pool => ExpPool,\n", "\t\t\tinflation => InflationLocal,\n", "\t\t\tstorage_cost => StorageCostLocal,\n", "\t\t\tminer_fee_share => MinerFeeShareLocal,\n", "\t\t\tendowment_fee_share => EndowmentFeeShareLocal\n", "\t\t}\n", "\tend,\n", "\n", "TriggerBlockTXs = EnsureTXs(nb_block:txs(RedenomTriggerBlock)),\n", "TriggerExpected = ComputeExpectedRewardAndPool(\n", "\tRedenomTriggerBlockHeight, RedenomTriggerBlock,\n", "\tPrevRedenomTriggerBlock, TriggerBlockTXs),\n", "\n", "ActualReward = nb_block:reward(RedenomTriggerBlock),\n", "ActualPool = nb_block:reward_pool(RedenomTriggerBlock),\n", "ExpectedReward = maps:get(expected_reward, TriggerExpected),\n", "ExpectedPool = maps:get(expected_pool, TriggerExpected),\n", "\n", "RewardTolerance = max(1, ExpectedReward div 1000),\n", "ok = case abs(ActualReward - ExpectedReward) =< RewardTolerance of\n", "\ttrue ->\n", "\t\tok;\n", "\tfalse ->\n", "\t\t{error, {reward_mismatch, ActualReward, ExpectedReward, RewardTolerance}}\n", "end,\n", "\n", "PoolTolerance = max(1, ExpectedPool div 1000),\n", "ok = case abs(ActualPool - ExpectedPool) =< PoolTolerance of\n", "\ttrue ->\n", "\t\tok;\n", "\tfalse ->\n", "\t\t{error, {pool_mismatch, ActualPool, ExpectedPool, PoolTolerance}}\n", "end,\n", "\n", "TriggerExpected#{\n", "\tactual_reward => ActualReward,\n", "\tactual_pool => ActualPool\n", "}." ] }, { "cell_type": "markdown", "id": "8a714115", "metadata": {}, "source": [ "## Redenomination" ] }, { "cell_type": "markdown", "id": "bb426304", "metadata": {}, "source": [ "### Schedule redenomination\n", "\n", "Mines 1 block. Because the redenomination threshold was set just above the circulating\n", "supply, the node schedules redenomination at the next opportunity.\n", "\n", "**Assertion:** The new block's `redenomination_height` is greater than the current height\n", "(redenomination has been scheduled for a future block, with a 2-block delay as configured)." ] }, { "cell_type": "code", "execution_count": 25, "id": "42c64336", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:47:17.000000Z", "iopub.status.busy": "2026-02-19T20:47:17.000000Z", "iopub.status.idle": "2026-02-19T20:47:44.000000Z", "shell.execute_reply": "2026-02-19T20:47:44.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "#{scheduled_height => 2069893}\n" ] }, "execution_count": 25, "metadata": {}, "output_type": "execute_result" } ], "source": [ "ScheduleStartHeight = RPCHeight(),\n", "ScheduleStartBlock = RPCBlockByHeight(ScheduleStartHeight),\n", "PrevRedenomHeight = nb_block:redenomination_height(ScheduleStartBlock),\n", "\n", "ok = MineUntilHeight(ScheduleStartHeight + 1),\n", "ScheduleBlock = RPCBlockByHeight(ScheduleStartHeight + 1),\n", "NewRedenomHeight = nb_block:redenomination_height(ScheduleBlock),\n", "\n", "ok = case NewRedenomHeight > ScheduleStartHeight of\n", "\ttrue ->\n", "\t\tok;\n", "\tfalse ->\n", "\t\t{error, {redenomination_not_scheduled, PrevRedenomHeight, NewRedenomHeight}}\n", "end,\n", "\n", "#{ scheduled_height => NewRedenomHeight }." ] }, { "cell_type": "markdown", "id": "e598ee92", "metadata": {}, "source": [ "### Snapshot HTTP values before redenomination" ] }, { "cell_type": "markdown", "id": "91e4bf2a", "metadata": {}, "source": [ "Mines to `NewRedenomHeight` (the block just before redenomination fires). Captures all HTTP pricing and wallet values into a `PreHTTP` map for comparison after redenomination." ] }, { "cell_type": "code", "execution_count": 26, "id": "578a9950", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:47:44.000000Z", "iopub.status.busy": "2026-02-19T20:47:44.000000Z", "iopub.status.idle": "2026-02-19T20:47:44.000000Z", "shell.execute_reply": "2026-02-19T20:47:44.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "#{price0 => 59246720252252,price1g => 18458446185029267217,\n", " price0_addr => 59246720252252,price1g_addr => 18458446185029267217,\n", " price2_0 => 59246720252252,price2_1g => 18458446185029267217,\n", " price2_0_addr => 59246720252252,price2_1g_addr => 18458446185029267217,\n", " opt0 => 59246720252252,opt1g => 18458446185029267217,\n", " opt0_addr => 59246720252252,opt1g_addr => 18458446185029267217,\n", " v2_0 => 59246720252252,v2_1g => 18458446185029267217,\n", " v2_0_addr => 59246720252252,v2_1g_addr => 18458446185029267217,\n", " wallet_balance => 3332960484717335850673,\n", " reserved_rewards => 123477740282000000000}\n" ] }, "execution_count": 26, "metadata": {}, "output_type": "execute_result" } ], "source": [ "PreRedenomHeight = NewRedenomHeight,\n", "ok = MineUntilHeight(PreRedenomHeight),\n", "\n", "PreInfoMap = HTTPGetJSONMap(BaseUrl ++ \"/info\"),\n", "PreInfoHeight =\n", "\tcase maps:get(<<\"height\">>, PreInfoMap, undefined) of\n", "\t\tHeight when is_integer(Height) ->\n", "\t\t\tHeight;\n", "\t\tHeightBin when is_binary(HeightBin) ->\n", "\t\t\tbinary_to_integer(HeightBin);\n", "\t\tOther ->\n", "\t\t\terlang:error({unexpected_info_height, Other})\n", "\tend,\n", "ok = case PreInfoHeight == PreRedenomHeight of\n", "\ttrue ->\n", "\t\tok;\n", "\tfalse ->\n", "\t\t{error, {pre_height_mismatch, PreInfoHeight, PreRedenomHeight}}\n", "end,\n", "\n", "AddrB64 = binary_to_list(ar_util:encode(MiningAddr)),\n", "\n", "PreHTTP = #\n", "{\n", "\tprice0 => HTTPGetInteger(BaseUrl ++ \"/price/0\"),\n", "\tprice1g => HTTPGetInteger(BaseUrl ++ \"/price/1000000000\"),\n", "\tprice0_addr => HTTPGetInteger(BaseUrl ++ \"/price/0/\" ++ AddrB64),\n", "\tprice1g_addr => HTTPGetInteger(BaseUrl ++ \"/price/1000000000/\" ++ AddrB64),\n", "\tprice2_0 => HTTPGetJSONFee(BaseUrl ++ \"/price2/0\"),\n", "\tprice2_1g => HTTPGetJSONFee(BaseUrl ++ \"/price2/1000000000\"),\n", "\tprice2_0_addr => HTTPGetJSONFee(BaseUrl ++ \"/price2/0/\" ++ AddrB64),\n", "\tprice2_1g_addr => HTTPGetJSONFee(BaseUrl ++ \"/price2/1000000000/\" ++ AddrB64),\n", "\topt0 => HTTPGetJSONFee(BaseUrl ++ \"/optimistic_price/0\"),\n", "\topt1g => HTTPGetJSONFee(BaseUrl ++ \"/optimistic_price/1000000000\"),\n", "\topt0_addr => HTTPGetJSONFee(BaseUrl ++ \"/optimistic_price/0/\" ++ AddrB64),\n", "\topt1g_addr => HTTPGetJSONFee(BaseUrl ++ \"/optimistic_price/1000000000/\" ++ AddrB64),\n", "\tv2_0 => HTTPGetInteger(BaseUrl ++ \"/v2price/0\"),\n", "\tv2_1g => HTTPGetInteger(BaseUrl ++ \"/v2price/1000000000\"),\n", "\tv2_0_addr => HTTPGetInteger(BaseUrl ++ \"/v2price/0/\" ++ AddrB64),\n", "\tv2_1g_addr => HTTPGetInteger(BaseUrl ++ \"/v2price/1000000000/\" ++ AddrB64),\n", "\twallet_balance => HTTPGetInteger(BaseUrl ++ \"/wallet/\" ++ AddrB64 ++ \"/balance\"),\n", "\treserved_rewards => HTTPGetInteger(BaseUrl ++ \"/wallet/\" ++ AddrB64 ++ \"/reserved_rewards_total\")\n", "},\n", "PreHTTP." ] }, { "cell_type": "markdown", "id": "71caee34", "metadata": {}, "source": [ "### Assert redenomination at scheduled height" ] }, { "cell_type": "markdown", "id": "5d8dd92d", "metadata": {}, "source": [ "Mines 1 block to `NewRedenomHeight + 1` (the redenomination block). Captures all HTTP\n", "pricing and wallet values in `PostHTTP`.\n", "\n", "**Assertions:**\n", "- `block.denomination` incremented exactly once.\n", "- `/info` height matches; network name unchanged.\n", "- **Wallet balance (exact):** `Post == (Pre + PreBlockReward) * 1000`.\n", "- **Pricing endpoints (approximate):** expected to scale by 1000× but one block of\n", " economic activity occurs between snapshots. Tolerance: `max(1000, Expected div 10000)`\n", " (0.01% or 1000 Winston, whichever is larger)." ] }, { "cell_type": "code", "execution_count": 27, "id": "2fff60f2", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:47:44.000000Z", "iopub.status.busy": "2026-02-19T20:47:44.000000Z", "iopub.status.idle": "2026-02-19T20:47:58.000000Z", "shell.execute_reply": "2026-02-19T20:47:58.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "#{price0 => 59246720252252719,price1g => 18458446185029267217100,\n", " price0_addr => 59246720252252719,price1g_addr => 18458446185029267217100,\n", " price2_0 => 59246720252252719,price2_1g => 18458446185029267217100,\n", " price2_0_addr => 59246720252252719,\n", " price2_1g_addr => 18458446185029267217100,opt0 => 59246720252252719,\n", " opt1g => 18458446185029267217100,opt0_addr => 59246720252252719,\n", " opt1g_addr => 18458446185029267217100,v2_0 => 59246720252252719,\n", " v2_1g => 18458446185029267217100,v2_0_addr => 59246720252252719,\n", " v2_1g_addr => 18458446185029267217100,\n", " wallet_balance => 3456438224999335850673000,\n", " reserved_rewards => 123477414604000000000000}\n" ] }, "execution_count": 27, "metadata": {}, "output_type": "execute_result" } ], "source": [ "RedenomBlockHeight = NewRedenomHeight + 1,\n", "ok = MineUntilHeight(RedenomBlockHeight),\n", "PreRedenomBlock = RPCBlockByHeight(RedenomBlockHeight - 1),\n", "RedenomBlock = RPCBlockByHeight(RedenomBlockHeight),\n", "DenomBefore = nb_block:denomination(PreRedenomBlock),\n", "DenomAt = nb_block:denomination(RedenomBlock),\n", "\n", "ok = case DenomAt == DenomBefore + 1 of\n", "\ttrue ->\n", "\t\tok;\n", "\tfalse ->\n", "\t\t{error, {denomination_not_incremented, DenomBefore, DenomAt}}\n", "end,\n", "\n", "PostInfoMap = HTTPGetJSONMap(BaseUrl ++ \"/info\"),\n", "PostInfoHeight =\n", "\tcase maps:get(<<\"height\">>, PostInfoMap, undefined) of\n", "\t\tPostH when is_integer(PostH) ->\n", "\t\t\tPostH;\n", "\t\tPostHBin when is_binary(PostHBin) ->\n", "\t\t\tbinary_to_integer(PostHBin);\n", "\t\tOther2 ->\n", "\t\t\terlang:error({unexpected_info_height, Other2})\n", "\tend,\n", "ok = case PostInfoHeight == RedenomBlockHeight of\n", "\ttrue ->\n", "\t\tok;\n", "\tfalse ->\n", "\t\t{error, {post_height_mismatch, PostInfoHeight, RedenomBlockHeight}}\n", "end,\n", "\n", "PreNetwork = maps:get(<<\"network\">>, PreInfoMap, undefined),\n", "PostNetwork = maps:get(<<\"network\">>, PostInfoMap, undefined),\n", "ok = case PreNetwork == PostNetwork of\n", "\ttrue ->\n", "\t\tok;\n", "\tfalse ->\n", "\t\t{error, {network_changed, PreNetwork, PostNetwork}}\n", "end,\n", "\n", "PostHTTP = #\n", "{\n", "\tprice0 => HTTPGetInteger(BaseUrl ++ \"/price/0\"),\n", "\tprice1g => HTTPGetInteger(BaseUrl ++ \"/price/1000000000\"),\n", "\tprice0_addr => HTTPGetInteger(BaseUrl ++ \"/price/0/\" ++ AddrB64),\n", "\tprice1g_addr => HTTPGetInteger(BaseUrl ++ \"/price/1000000000/\" ++ AddrB64),\n", "\tprice2_0 => HTTPGetJSONFee(BaseUrl ++ \"/price2/0\"),\n", "\tprice2_1g => HTTPGetJSONFee(BaseUrl ++ \"/price2/1000000000\"),\n", "\tprice2_0_addr => HTTPGetJSONFee(BaseUrl ++ \"/price2/0/\" ++ AddrB64),\n", "\tprice2_1g_addr => HTTPGetJSONFee(BaseUrl ++ \"/price2/1000000000/\" ++ AddrB64),\n", "\topt0 => HTTPGetJSONFee(BaseUrl ++ \"/optimistic_price/0\"),\n", "\topt1g => HTTPGetJSONFee(BaseUrl ++ \"/optimistic_price/1000000000\"),\n", "\topt0_addr => HTTPGetJSONFee(BaseUrl ++ \"/optimistic_price/0/\" ++ AddrB64),\n", "\topt1g_addr => HTTPGetJSONFee(BaseUrl ++ \"/optimistic_price/1000000000/\" ++ AddrB64),\n", "\tv2_0 => HTTPGetInteger(BaseUrl ++ \"/v2price/0\"),\n", "\tv2_1g => HTTPGetInteger(BaseUrl ++ \"/v2price/1000000000\"),\n", "\tv2_0_addr => HTTPGetInteger(BaseUrl ++ \"/v2price/0/\" ++ AddrB64),\n", "\tv2_1g_addr => HTTPGetInteger(BaseUrl ++ \"/v2price/1000000000/\" ++ AddrB64),\n", "\twallet_balance => HTTPGetInteger(BaseUrl ++ \"/wallet/\" ++ AddrB64 ++ \"/balance\"),\n", "\treserved_rewards => HTTPGetInteger(BaseUrl ++ \"/wallet/\" ++ AddrB64 ++ \"/reserved_rewards_total\")\n", "},\n", "\n", "ParseIntValue =\n", "\tfun(Value) ->\n", "\t\tcase Value of\n", "\t\t\tInt when is_integer(Int) ->\n", "\t\t\t\tInt;\n", "\t\t\tBin when is_binary(Bin) ->\n", "\t\t\t\tbinary_to_integer(Bin);\n", "\t\t\tOther ->\n", "\t\t\t\terlang:error({unexpected_integer_value, Other})\n", "\t\tend\n", "\tend,\n", "\n", "PreRewardBlock = HTTPGetJSONMap(BaseUrl ++ \"/block/height/\" ++ integer_to_list(RedenomBlockHeight - 1)),\n", "PreRewardValue = maps:get(<<\"reward\">>, PreRewardBlock, undefined),\n", "PreReward =\n", "\tcase PreRewardValue of\n", "\t\tundefined ->\n", "\t\t\terlang:error({missing_reward, PreRewardBlock});\n", "\t\t_ ->\n", "\t\t\tParseIntValue(PreRewardValue)\n", "\tend,\n", "PreWalletBalance = maps:get(wallet_balance, PreHTTP),\n", "PostWalletBalance = maps:get(wallet_balance, PostHTTP),\n", "ExpectedPostWallet = (PreWalletBalance + PreReward) * 1000,\n", "ok = case PostWalletBalance == ExpectedPostWallet of\n", "\ttrue ->\n", "\t\tok;\n", "\tfalse ->\n", "\t\t{error, {wallet_balance_scale_mismatch, PreWalletBalance, PreReward, PostWalletBalance}}\n", "end,\n", "\n", "Scale = 1000,\n", "\n", "CheckApproxScale =\n", "\tfun(Key) ->\n", "\t\tPreVal = maps:get(Key, PreHTTP),\n", "\t\tPostVal = maps:get(Key, PostHTTP),\n", "\t\tExpected = PreVal * Scale,\n", "\t\tDiff = abs(PostVal - Expected),\n", "\t\tMaxDiff = max(1000, Expected div 10000),\n", "\t\tcase Diff =< MaxDiff of\n", "\t\t\ttrue ->\n", "\t\t\t\tok;\n", "\t\t\tfalse ->\n", "\t\t\t\terlang:error({redenom_approx_scale_mismatch, Key, PreVal, PostVal, Diff, MaxDiff})\n", "\t\tend\n", "\tend,\n", "\n", "ApproxScaleKeys = [\n", "\tprice0, price1g, price0_addr, price1g_addr,\n", "\tprice2_0, price2_1g, price2_0_addr, price2_1g_addr,\n", "\topt0, opt1g, opt0_addr, opt1g_addr,\n", "\tv2_0, v2_1g, v2_0_addr, v2_1g_addr,\n", "\treserved_rewards\n", "],\n", "lists:foreach(CheckApproxScale, ApproxScaleKeys),\n", "\n", "PostHTTP." ] }, { "cell_type": "markdown", "id": "5515c8c7", "metadata": {}, "source": [ "### Assert block reward and endowment pool around redenomination" ] }, { "cell_type": "markdown", "id": "ef36e6e3", "metadata": {}, "source": [ "Mines 1 block to `RedenomBlockHeight + 1`. Uses `ComputeExpectedRewardAndPool` (defined earlier)\n", "to verify the block reward and endowment pool at three heights around the redenomination\n", "boundary: `RedenomBlockHeight - 1` (pre), `RedenomBlockHeight` (redenomination block),\n", "and `RedenomBlockHeight + 1` (post).\n", "\n", "**Assertions:**\n", "- At each height, `block.reward` matches the expected reward within 0.1%.\n", "- At each height, `block.reward_pool` matches the expected pool within 0.1%.\n", "\n", "See the trigger block cell above for the tolerance justification." ] }, { "cell_type": "code", "execution_count": 28, "id": "617e583f", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:47:58.000000Z", "iopub.status.busy": "2026-02-19T20:47:58.000000Z", "iopub.status.idle": "2026-02-19T20:48:09.000000Z", "shell.execute_reply": "2026-02-19T20:48:09.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "#{at =>\n", " #{height => 2069894,miner_fee_share => 0,endowment_fee_share => 0,\n", " expected_reward => 123478065960000000000000,\n", " expected_pool => 292873436150971032664149327000,\n", " inflation => 123478065960000000000,storage_cost => 0,\n", " actual_reward => 123477414604000000000000,\n", " actual_pool => 292873436150971032664149327000},\n", " pre =>\n", " #{height => 2069893,miner_fee_share => 0,endowment_fee_share => 0,\n", " expected_reward => 123478065960000000000,\n", " expected_pool => 292873436150971032664149327,\n", " inflation => 123478065960000000000,storage_cost => 0,\n", " actual_reward => 123477740282000000000,\n", " actual_pool => 292873436150971032664149327},\n", " post =>\n", " #{height => 2069895,miner_fee_share => 0,endowment_fee_share => 0,\n", " expected_reward => 123478065960000000000000,\n", " expected_pool => 292873436150971032664149327000,\n", " inflation => 123478065960000000000000,storage_cost => 0,\n", " actual_reward => 123477088927000000000000,\n", " actual_pool => 292873436150971032664149327000}}\n" ] }, "execution_count": 28, "metadata": {}, "output_type": "execute_result" } ], "source": [ "ok = MineUntilHeight(RedenomBlockHeight + 1),\n", "\n", "AssertRewardAndPool =\n", "\tfun(CheckHeight) ->\n", "\t\tBlock = RPCBlockByHeight(CheckHeight),\n", "\t\tPrevBlock = RPCBlockByHeight(CheckHeight - 1),\n", "\t\tBlockTXs = EnsureTXs(nb_block:txs(Block)),\n", "\t\tExpected = ComputeExpectedRewardAndPool(CheckHeight, Block, PrevBlock, BlockTXs),\n", "\t\tActReward = nb_block:reward(Block),\n", "\t\tActPool = nb_block:reward_pool(Block),\n", "\t\tExpReward = maps:get(expected_reward, Expected),\n", "\t\tExpPool = maps:get(expected_pool, Expected),\n", "\t\tRTol = max(1, ExpReward div 1000),\n", "\t\tok =\n", "\t\t\tcase abs(ActReward - ExpReward) =< RTol of\n", "\t\t\t\ttrue ->\n", "\t\t\t\t\tok;\n", "\t\t\t\tfalse ->\n", "\t\t\t\t\t{error, {reward_mismatch, CheckHeight, ActReward, ExpReward, RTol}}\n", "\t\t\tend,\n", "\t\tPTol = max(1, ExpPool div 1000),\n", "\t\tok =\n", "\t\t\tcase abs(ActPool - ExpPool) =< PTol of\n", "\t\t\t\ttrue ->\n", "\t\t\t\t\tok;\n", "\t\t\t\tfalse ->\n", "\t\t\t\t\t{error, {pool_mismatch, CheckHeight, ActPool, ExpPool, PTol}}\n", "\t\t\tend,\n", "\t\tExpected#{\n", "\t\t\theight => CheckHeight,\n", "\t\t\tactual_reward => ActReward,\n", "\t\t\tactual_pool => ActPool\n", "\t\t}\n", "\tend,\n", "\n", "PreResult = AssertRewardAndPool(RedenomBlockHeight - 1),\n", "AtResult = AssertRewardAndPool(RedenomBlockHeight),\n", "PostResult = AssertRewardAndPool(RedenomBlockHeight + 1),\n", "\n", "#{ pre => PreResult, at => AtResult, post => PostResult }.\n" ] }, { "cell_type": "markdown", "id": "53ef2398", "metadata": {}, "source": [ "### Assert miner balance deltas around redenomination" ] }, { "cell_type": "markdown", "id": "cfeb5e87", "metadata": {}, "source": [ "No blocks mined. Checks the miner balance delta at `RedenomBlockHeight - 1` (pre-redenomination) and `RedenomBlockHeight` (redenomination block).\n", "\n", "For each height H, the expected balance delta at H+1 is `Redenominate(block(H).reward, block(H).denomination, block(H+1).denomination)`. The actual delta is `MinerBalanceAt(H+1) - Redenominate(MinerBalanceAt(H), block(H).denomination, block(H+1).denomination)`. Asserts exact equality." ] }, { "cell_type": "code", "execution_count": 29, "id": "6d74e620", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:48:09.000000Z", "iopub.status.busy": "2026-02-19T20:48:09.000000Z", "iopub.status.idle": "2026-02-19T20:48:09.000000Z", "shell.execute_reply": "2026-02-19T20:48:09.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "ok\n" ] }, "execution_count": 29, "metadata": {}, "output_type": "execute_result" } ], "source": [ "CheckMinerDelta =\n", "\tfun(RewardHeight) ->\n", "\t\tRewardBlock = RPCBlockByHeight(RewardHeight),\n", "\t\tApplyBlock = RPCBlockByHeight(RewardHeight + 1),\n", "\t\tReward = nb_block:reward(RewardBlock),\n", "\t\tRewardDenom = nb_block:denomination(RewardBlock),\n", "\t\tApplyDenom = nb_block:denomination(ApplyBlock),\n", "\t\tExpectedApplied = Redenominate(Reward, RewardDenom, ApplyDenom),\n", "\t\tBalanceBefore = MinerBalanceAt(RewardHeight),\n", "\t\tBalanceAfter = MinerBalanceAt(RewardHeight + 1),\n", "\t\tBalanceBeforeNormalized = Redenominate(BalanceBefore, RewardDenom, ApplyDenom),\n", "\t\tDelta = BalanceAfter - BalanceBeforeNormalized,\n", "\t\tcase Delta == ExpectedApplied of\n", "\t\t\ttrue ->\n", "\t\t\t\tok;\n", "\t\t\tfalse ->\n", "\t\t\t\t{error, {miner_balance_delta_mismatch, RewardHeight, Delta, ExpectedApplied}}\n", "\t\tend\n", "\tend,\n", "\n", "ok = CheckMinerDelta(RedenomBlockHeight - 1),\n", "ok = CheckMinerDelta(RedenomBlockHeight),\n", "\n", "ok." ] }, { "cell_type": "markdown", "id": "c165093d", "metadata": {}, "source": [ "### Per-height summary table" ] }, { "cell_type": "markdown", "id": "f30f0dd6", "metadata": {}, "source": [ "No blocks mined. Displays a summary map for every height from `RedenomTriggerBlockHeight - 2` through `RedenomBlockHeight + 1`, showing denomination, redenomination_height, reward_pool, and miner_balance. This is an informational cell with no assertions." ] }, { "cell_type": "code", "execution_count": 30, "id": "19514b7a", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:48:09.000000Z", "iopub.status.busy": "2026-02-19T20:48:09.000000Z", "iopub.status.idle": "2026-02-19T20:48:09.000000Z", "shell.execute_reply": "2026-02-19T20:48:09.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "[#{height => 2069890,denomination => 4,redenomination_height => 2069886,\n", " reward_pool => 292873436146306634442575000,\n", " miner_balance => 2962529974195557425000},\n", " #{height => 2069891,denomination => 4,redenomination_height => 2069886,\n", " reward_pool => 292873436146306634442575000,\n", " miner_balance => 3086008691515557425000},\n", " #{height => 2069892,denomination => 4,redenomination_height => 2069893,\n", " reward_pool => 292873436150971032664149327,\n", " miner_balance => 3209482185537424771957},\n", " #{height => 2069893,denomination => 4,redenomination_height => 2069893,\n", " reward_pool => 292873436150971032664149327,\n", " miner_balance => 3332960484717335850673},\n", " #{height => 2069894,denomination => 5,redenomination_height => 2069893,\n", " reward_pool => 292873436150971032664149327000,\n", " miner_balance => 3456438224999335850673000},\n", " #{height => 2069895,denomination => 5,redenomination_height => 2069893,\n", " reward_pool => 292873436150971032664149327000,\n", " miner_balance => 3579915639603335850673000}]\n" ] }, "execution_count": 30, "metadata": {}, "output_type": "execute_result" } ], "source": [ "SummaryStart0 = RedenomTriggerBlockHeight - 2,\n", "SummaryStart =\n", "\tcase SummaryStart0 < 0 of\n", "\t\ttrue ->\n", "\t\t\t0;\n", "\t\tfalse ->\n", "\t\t\tSummaryStart0\n", "\tend,\n", "SummaryEnd = RedenomBlockHeight + 1,\n", "Heights = lists:seq(SummaryStart, SummaryEnd),\n", "Summary =\n", "\t[begin\n", "\t\tBlock = RPCBlockByHeight(Height),\n", "\t\t#{ height => Height,\n", "\t\t denomination => nb_block:denomination(Block),\n", "\t\t redenomination_height => nb_block:redenomination_height(Block),\n", "\t\t reward_pool => nb_block:reward_pool(Block),\n", "\t\t miner_balance => WalletBalanceFromBlock(Block, MiningAddr) }\n", "\t end || Height <- Heights],\n", "Summary." ] }, { "cell_type": "markdown", "id": "37e8f267", "metadata": {}, "source": [ "## Post-redenomination HTTP endpoint checks" ] }, { "cell_type": "markdown", "id": "0229eac0", "metadata": {}, "source": [ "Validates every HTTP pricing, wallet, and block endpoint at the current height\n", "(post-redenomination). Each cell fetches a value from the HTTP API and asserts a property:\n", "\n", "- **`/price/{size}`**: price for 0 bytes and 1 GB. `price(1GB) > price(0)`.\n", "- **`/price/{size}/{addr}`**: with the miner address, equals the no-address variant.\n", "- **`/price2/` and `/optimistic_price/`**: JSON fee endpoints. `price2 == price`,\n", " `optimistic_price <= price`.\n", "- **`/v2price/`**: positive, monotonic in data size, address variant equals no-address.\n", "- **`/wallet/{addr}/balance`**: equals the per-block balance endpoint.\n", "- **`/wallet/{addr}/last_tx`**: decodes to 32 bytes; the TX JSON has matching `id`\n", " and non-empty `signature`.\n", "- **`/wallet/{addr}/reserved_rewards_total`**: equals the current block's reward\n", " (`locked_rewards_blocks = 1`, sole miner).\n", "- **`/tx_anchor`**: decodes to a block hash from the last 10 blocks.\n", "- **`/block/height/{h}`**: returned height matches the request.\n", "- **`/info`**: network name matches localnet.\n", "- **`/tx/pending`**: every entry is a valid 32-byte base64-encoded TX id.\n", "- **Redenomination scaling:** all pricing and wallet endpoints are approximately\n", " 1000× their pre-redenomination values (tolerance: 0.01% or 1000 base units)." ] }, { "cell_type": "code", "execution_count": 31, "id": "7e4e23c8", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:48:09.000000Z", "iopub.status.busy": "2026-02-19T20:48:09.000000Z", "iopub.status.idle": "2026-02-19T20:48:09.000000Z", "shell.execute_reply": "2026-02-19T20:48:09.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "#{addr => \"yjrOLPSHP1cvZ_y1bSLkLd8lIWhu9dsbSufXm9-QjsY\",height => 2069895}\n" ] }, "execution_count": 31, "metadata": {}, "output_type": "execute_result" } ], "source": [ "AddrB64 = binary_to_list(ar_util:encode(MiningAddr)),\n", "InfoMap = HTTPGetJSONMap(BaseUrl ++ \"/info\"),\n", "CurrentHeight =\n", "\tcase maps:get(<<\"height\">>, InfoMap, undefined) of\n", "\t\tCurH when is_integer(CurH) ->\n", "\t\t\tCurH;\n", "\t\tCurHBin when is_binary(CurHBin) ->\n", "\t\t\tbinary_to_integer(CurHBin);\n", "\t\tOther ->\n", "\t\t\terlang:error({unexpected_info_height, Other})\n", "\tend,\n", "#{addr => AddrB64, height => CurrentHeight}. " ] }, { "cell_type": "markdown", "id": "post-http-redenomination-scaling-doc", "metadata": {}, "source": [ "### Assert post-redenomination values are ~1000× pre-redenomination\n", "\n", "Compares every pricing and wallet endpoint at the current post-redenomination height\n", "against the `PreHTTP` snapshot captured before redenomination. Between the two snapshots,\n", "2 blocks were mined (redenomination block + 1 post block), so values may drift slightly\n", "due to normal economic activity. Tolerance: `max(1000, Expected div 10000)` (0.01% or\n", "1000 base units, whichever is larger)." ] }, { "cell_type": "code", "execution_count": 32, "id": "post-http-redenomination-scaling", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:48:09.000000Z", "iopub.status.busy": "2026-02-19T20:48:09.000000Z", "iopub.status.idle": "2026-02-19T20:48:09.000000Z", "shell.execute_reply": "2026-02-19T20:48:09.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "ok\n" ] }, "execution_count": 32, "metadata": {}, "output_type": "execute_result" } ], "source": [ "PostHTTPCheck = #\n", "{\n", "\tprice0 => HTTPGetInteger(BaseUrl ++ \"/price/0\"),\n", "\tprice1g => HTTPGetInteger(BaseUrl ++ \"/price/1000000000\"),\n", "\tprice0_addr => HTTPGetInteger(BaseUrl ++ \"/price/0/\" ++ AddrB64),\n", "\tprice1g_addr => HTTPGetInteger(BaseUrl ++ \"/price/1000000000/\" ++ AddrB64),\n", "\tv2_0 => HTTPGetInteger(BaseUrl ++ \"/v2price/0\"),\n", "\tv2_1g => HTTPGetInteger(BaseUrl ++ \"/v2price/1000000000\"),\n", "\tv2_0_addr => HTTPGetInteger(BaseUrl ++ \"/v2price/0/\" ++ AddrB64),\n", "\tv2_1g_addr => HTTPGetInteger(BaseUrl ++ \"/v2price/1000000000/\" ++ AddrB64),\n", "\twallet_balance => HTTPGetInteger(BaseUrl ++ \"/wallet/\" ++ AddrB64 ++ \"/balance\"),\n", "\treserved_rewards => HTTPGetInteger(BaseUrl ++ \"/wallet/\" ++ AddrB64 ++ \"/reserved_rewards_total\")\n", "},\n", "\n", "PostHTTPScaleKeys = [\n", "\tprice0, price1g, price0_addr, price1g_addr,\n", "\tv2_0, v2_1g, v2_0_addr, v2_1g_addr,\n", "\treserved_rewards\n", "],\n", "\n", "lists:foreach(\n", "\tfun(Key) ->\n", "\t\tPreVal = maps:get(Key, PreHTTP),\n", "\t\tPostVal = maps:get(Key, PostHTTPCheck),\n", "\t\tExpected = PreVal * 1000,\n", "\t\tDiff = abs(PostVal - Expected),\n", "\t\tMaxDiff = max(1000, Expected div 10000),\n", "\t\tcase Diff =< MaxDiff of\n", "\t\t\ttrue ->\n", "\t\t\t\tok;\n", "\t\t\tfalse ->\n", "\t\t\t\terlang:error({post_redenomination_scale_mismatch,\n", "\t\t\t\t\tKey, PreVal, PostVal, Expected, Diff, MaxDiff})\n", "\t\tend\n", "\tend,\n", "\tPostHTTPScaleKeys\n", "),\n", "\n", "ok." ] }, { "cell_type": "code", "execution_count": 33, "id": "3fedfb74", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:48:09.000000Z", "iopub.status.busy": "2026-02-19T20:48:09.000000Z", "iopub.status.idle": "2026-02-19T20:48:09.000000Z", "shell.execute_reply": "2026-02-19T20:48:09.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "59246720252252719\n" ] }, "execution_count": 33, "metadata": {}, "output_type": "execute_result" } ], "source": [ "Price0 = HTTPGetInteger(BaseUrl ++ \"/price/0\"),\n", "Price0." ] }, { "cell_type": "code", "execution_count": 34, "id": "43fb689b", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:48:09.000000Z", "iopub.status.busy": "2026-02-19T20:48:09.000000Z", "iopub.status.idle": "2026-02-19T20:48:09.000000Z", "shell.execute_reply": "2026-02-19T20:48:09.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "ok\n" ] }, "execution_count": 34, "metadata": {}, "output_type": "execute_result" } ], "source": [ "Price1G = HTTPGetInteger(BaseUrl ++ \"/price/1000000000\"),\n", "ok = case Price1G > Price0 of\n", "\ttrue ->\n", "\t\tok;\n", "\tfalse ->\n", "\t\t{error, {price_not_monotonic, Price0, Price1G}}\n", "end." ] }, { "cell_type": "code", "execution_count": 35, "id": "b084310a", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:48:09.000000Z", "iopub.status.busy": "2026-02-19T20:48:09.000000Z", "iopub.status.idle": "2026-02-19T20:48:09.000000Z", "shell.execute_reply": "2026-02-19T20:48:09.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "ok\n" ] }, "execution_count": 35, "metadata": {}, "output_type": "execute_result" } ], "source": [ "Price0Addr = HTTPGetInteger(BaseUrl ++ \"/price/0/\" ++ AddrB64),\n", "ok = case Price0Addr == Price0 of\n", "\ttrue ->\n", "\t\tok;\n", "\tfalse ->\n", "\t\t{error, {price_addr_mismatch, Price0Addr, Price0}}\n", "end." ] }, { "cell_type": "code", "execution_count": 36, "id": "0244bc6b", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:48:09.000000Z", "iopub.status.busy": "2026-02-19T20:48:09.000000Z", "iopub.status.idle": "2026-02-19T20:48:09.000000Z", "shell.execute_reply": "2026-02-19T20:48:09.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "ok\n" ] }, "execution_count": 36, "metadata": {}, "output_type": "execute_result" } ], "source": [ "Price1GAddr = HTTPGetInteger(BaseUrl ++ \"/price/1000000000/\" ++ AddrB64),\n", "ok = case Price1GAddr == Price1G of\n", "\ttrue ->\n", "\t\tok;\n", "\tfalse ->\n", "\t\t{error, {price_addr_mismatch, Price1GAddr, Price1G}}\n", "end." ] }, { "cell_type": "code", "execution_count": 37, "id": "5e175c9e", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:48:09.000000Z", "iopub.status.busy": "2026-02-19T20:48:09.000000Z", "iopub.status.idle": "2026-02-19T20:48:09.000000Z", "shell.execute_reply": "2026-02-19T20:48:09.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "ok\n" ] }, "execution_count": 37, "metadata": {}, "output_type": "execute_result" } ], "source": [ "Price2_0 = HTTPGetJSONFee(BaseUrl ++ \"/price2/0\"),\n", "ok = case Price2_0 == Price0 of\n", "\ttrue ->\n", "\t\tok;\n", "\tfalse ->\n", "\t\t{error, {price2_mismatch, Price2_0, Price0}}\n", "end." ] }, { "cell_type": "code", "execution_count": 38, "id": "7e58671c", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:48:09.000000Z", "iopub.status.busy": "2026-02-19T20:48:09.000000Z", "iopub.status.idle": "2026-02-19T20:48:09.000000Z", "shell.execute_reply": "2026-02-19T20:48:09.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "ok\n" ] }, "execution_count": 38, "metadata": {}, "output_type": "execute_result" } ], "source": [ "Price2_1G = HTTPGetJSONFee(BaseUrl ++ \"/price2/1000000000\"),\n", "ok = case Price2_1G == Price1G of\n", "\ttrue ->\n", "\t\tok;\n", "\tfalse ->\n", "\t\t{error, {price2_mismatch, Price2_1G, Price1G}}\n", "end." ] }, { "cell_type": "code", "execution_count": 39, "id": "0c077c44", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:48:09.000000Z", "iopub.status.busy": "2026-02-19T20:48:09.000000Z", "iopub.status.idle": "2026-02-19T20:48:09.000000Z", "shell.execute_reply": "2026-02-19T20:48:09.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "ok\n" ] }, "execution_count": 39, "metadata": {}, "output_type": "execute_result" } ], "source": [ "Price2_0Addr = HTTPGetJSONFee(BaseUrl ++ \"/price2/0/\" ++ AddrB64),\n", "ok = case Price2_0Addr == Price0Addr of\n", "\ttrue ->\n", "\t\tok;\n", "\tfalse ->\n", "\t\t{error, {price2_mismatch, Price2_0Addr, Price0Addr}}\n", "end." ] }, { "cell_type": "code", "execution_count": 40, "id": "fcb181b3", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:48:09.000000Z", "iopub.status.busy": "2026-02-19T20:48:09.000000Z", "iopub.status.idle": "2026-02-19T20:48:09.000000Z", "shell.execute_reply": "2026-02-19T20:48:09.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "ok\n" ] }, "execution_count": 40, "metadata": {}, "output_type": "execute_result" } ], "source": [ "Price2_1GAddr = HTTPGetJSONFee(BaseUrl ++ \"/price2/1000000000/\" ++ AddrB64),\n", "ok = case Price2_1GAddr == Price1GAddr of\n", "\ttrue ->\n", "\t\tok;\n", "\tfalse ->\n", "\t\t{error, {price2_mismatch, Price2_1GAddr, Price1GAddr}}\n", "end." ] }, { "cell_type": "code", "execution_count": 41, "id": "800c0aac", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:48:09.000000Z", "iopub.status.busy": "2026-02-19T20:48:09.000000Z", "iopub.status.idle": "2026-02-19T20:48:09.000000Z", "shell.execute_reply": "2026-02-19T20:48:09.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "ok\n" ] }, "execution_count": 41, "metadata": {}, "output_type": "execute_result" } ], "source": [ "Opt0 = HTTPGetJSONFee(BaseUrl ++ \"/optimistic_price/0\"),\n", "ok = case Opt0 =< Price0 of\n", "\ttrue ->\n", "\t\tok;\n", "\tfalse ->\n", "\t\t{error, {optimistic_too_high, Opt0, Price0}}\n", "end." ] }, { "cell_type": "code", "execution_count": 42, "id": "4a69ee36", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:48:09.000000Z", "iopub.status.busy": "2026-02-19T20:48:09.000000Z", "iopub.status.idle": "2026-02-19T20:48:09.000000Z", "shell.execute_reply": "2026-02-19T20:48:09.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "ok\n" ] }, "execution_count": 42, "metadata": {}, "output_type": "execute_result" } ], "source": [ "Opt1G = HTTPGetJSONFee(BaseUrl ++ \"/optimistic_price/1000000000\"),\n", "ok = case Opt1G =< Price1G of\n", "\ttrue ->\n", "\t\tok;\n", "\tfalse ->\n", "\t\t{error, {optimistic_too_high, Opt1G, Price1G}}\n", "end." ] }, { "cell_type": "code", "execution_count": 43, "id": "24c08591", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:48:09.000000Z", "iopub.status.busy": "2026-02-19T20:48:09.000000Z", "iopub.status.idle": "2026-02-19T20:48:09.000000Z", "shell.execute_reply": "2026-02-19T20:48:09.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "ok\n" ] }, "execution_count": 43, "metadata": {}, "output_type": "execute_result" } ], "source": [ "Opt0Addr = HTTPGetJSONFee(BaseUrl ++ \"/optimistic_price/0/\" ++ AddrB64),\n", "ok = case Opt0Addr =< Price0Addr of\n", "\ttrue ->\n", "\t\tok;\n", "\tfalse ->\n", "\t\t{error, {optimistic_too_high, Opt0Addr, Price0Addr}}\n", "end." ] }, { "cell_type": "code", "execution_count": 44, "id": "53865da9", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:48:09.000000Z", "iopub.status.busy": "2026-02-19T20:48:09.000000Z", "iopub.status.idle": "2026-02-19T20:48:09.000000Z", "shell.execute_reply": "2026-02-19T20:48:09.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "ok\n" ] }, "execution_count": 44, "metadata": {}, "output_type": "execute_result" } ], "source": [ "Opt1GAddr = HTTPGetJSONFee(BaseUrl ++ \"/optimistic_price/1000000000/\" ++ AddrB64),\n", "ok = case Opt1GAddr =< Price1GAddr of\n", "\ttrue ->\n", "\t\tok;\n", "\tfalse ->\n", "\t\t{error, {optimistic_too_high, Opt1GAddr, Price1GAddr}}\n", "end." ] }, { "cell_type": "code", "execution_count": 45, "id": "7615efe7", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:48:09.000000Z", "iopub.status.busy": "2026-02-19T20:48:09.000000Z", "iopub.status.idle": "2026-02-19T20:48:09.000000Z", "shell.execute_reply": "2026-02-19T20:48:09.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "ok\n" ] }, "execution_count": 45, "metadata": {}, "output_type": "execute_result" } ], "source": [ "V2_0 = HTTPGetInteger(BaseUrl ++ \"/v2price/0\"),\n", "ok = case V2_0 > 0 of\n", "\ttrue ->\n", "\t\tok;\n", "\tfalse ->\n", "\t\t{error, {v2price_not_positive, V2_0}}\n", "end." ] }, { "cell_type": "code", "execution_count": 46, "id": "fce9ac9f", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:48:09.000000Z", "iopub.status.busy": "2026-02-19T20:48:09.000000Z", "iopub.status.idle": "2026-02-19T20:48:09.000000Z", "shell.execute_reply": "2026-02-19T20:48:09.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "ok\n" ] }, "execution_count": 46, "metadata": {}, "output_type": "execute_result" } ], "source": [ "V2_1G = HTTPGetInteger(BaseUrl ++ \"/v2price/1000000000\"),\n", "ok = case V2_1G > V2_0 of\n", "\ttrue ->\n", "\t\tok;\n", "\tfalse ->\n", "\t\t{error, {v2price_not_monotonic, V2_0, V2_1G}}\n", "end." ] }, { "cell_type": "code", "execution_count": 47, "id": "96a051c4", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:48:09.000000Z", "iopub.status.busy": "2026-02-19T20:48:09.000000Z", "iopub.status.idle": "2026-02-19T20:48:09.000000Z", "shell.execute_reply": "2026-02-19T20:48:09.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "ok\n" ] }, "execution_count": 47, "metadata": {}, "output_type": "execute_result" } ], "source": [ "V2_0Addr = HTTPGetInteger(BaseUrl ++ \"/v2price/0/\" ++ AddrB64),\n", "ok = case V2_0Addr == V2_0 of\n", "\ttrue ->\n", "\t\tok;\n", "\tfalse ->\n", "\t\t{error, {v2price_addr_mismatch, V2_0Addr, V2_0}}\n", "end." ] }, { "cell_type": "code", "execution_count": 48, "id": "1c20fe9d", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:48:09.000000Z", "iopub.status.busy": "2026-02-19T20:48:09.000000Z", "iopub.status.idle": "2026-02-19T20:48:09.000000Z", "shell.execute_reply": "2026-02-19T20:48:09.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "ok\n" ] }, "execution_count": 48, "metadata": {}, "output_type": "execute_result" } ], "source": [ "V2_1GAddr = HTTPGetInteger(BaseUrl ++ \"/v2price/1000000000/\" ++ AddrB64),\n", "ok = case V2_1GAddr == V2_1G of\n", "\ttrue ->\n", "\t\tok;\n", "\tfalse ->\n", "\t\t{error, {v2price_addr_mismatch, V2_1GAddr, V2_1G}}\n", "end." ] }, { "cell_type": "code", "execution_count": 49, "id": "f86858ec", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:48:09.000000Z", "iopub.status.busy": "2026-02-19T20:48:09.000000Z", "iopub.status.idle": "2026-02-19T20:48:09.000000Z", "shell.execute_reply": "2026-02-19T20:48:09.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "ok\n" ] }, "execution_count": 49, "metadata": {}, "output_type": "execute_result" } ], "source": [ "WalletBalanceHTTP = HTTPGetInteger(BaseUrl ++ \"/wallet/\" ++ AddrB64 ++ \"/balance\"),\n", "WalletBalanceAtHeight =\n", "\tHTTPGetInteger(\n", "\t\tBaseUrl ++ \"/block/height/\" ++ integer_to_list(CurrentHeight) ++ \"/wallet/\" ++ AddrB64 ++ \"/balance\"\n", "\t),\n", "ok = case WalletBalanceHTTP == WalletBalanceAtHeight of\n", "\ttrue ->\n", "\t\tok;\n", "\tfalse ->\n", "\t\t{error, {wallet_balance_mismatch, WalletBalanceHTTP, WalletBalanceAtHeight}}\n", "end." ] }, { "cell_type": "code", "execution_count": 50, "id": "a4e360b2", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:48:09.000000Z", "iopub.status.busy": "2026-02-19T20:48:09.000000Z", "iopub.status.idle": "2026-02-19T20:48:09.000000Z", "shell.execute_reply": "2026-02-19T20:48:09.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "ok\n" ] }, "execution_count": 50, "metadata": {}, "output_type": "execute_result" } ], "source": [ "LastTXB64 =\n", "\tcase HTTPGet(BaseUrl ++ \"/wallet/\" ++ AddrB64 ++ \"/last_tx\") of\n", "\t\t{ok, LastTXBody} ->\n", "\t\t\tiolist_to_binary(LastTXBody);\n", "\t\t{error, LastTXErr} ->\n", "\t\t\terlang:error({last_tx_failed, LastTXErr})\n", "\tend,\n", "ok = case DecodeBase64(LastTXB64) of\n", "\t{ok, LastTXDecoded} ->\n", "\t\tcase byte_size(LastTXDecoded) == 32 of\n", "\t\t\ttrue ->\n", "\t\t\t\tok;\n", "\t\t\tfalse ->\n", "\t\t\t\t{error, {last_tx_size_mismatch, byte_size(LastTXDecoded)}}\n", "\t\tend;\n", "\t{error, LastTXDecErr} ->\n", "\t\t{error, LastTXDecErr}\n", "end,\n", "LastTXMap = HTTPGetJSONMap(BaseUrl ++ \"/tx/\" ++ binary_to_list(LastTXB64)),\n", "ok = case maps:get(<<\"id\">>, LastTXMap, undefined) of\n", "\tundefined ->\n", "\t\t{error, {missing_tx_id, LastTXMap}};\n", "\tLastTXB64 ->\n", "\t\tok;\n", "\tOtherId ->\n", "\t\t{error, {last_tx_id_mismatch, OtherId, LastTXB64}}\n", "end,\n", "ok = case maps:get(<<\"signature\">>, LastTXMap, undefined) of\n", "\tSig when is_binary(Sig), byte_size(Sig) > 0 ->\n", "\t\tok;\n", "\tOtherSig ->\n", "\t\t{error, {invalid_tx_signature, OtherSig}}\n", "end." ] }, { "cell_type": "code", "execution_count": 51, "id": "20c58916", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:48:09.000000Z", "iopub.status.busy": "2026-02-19T20:48:09.000000Z", "iopub.status.idle": "2026-02-19T20:48:09.000000Z", "shell.execute_reply": "2026-02-19T20:48:09.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "ok\n" ] }, "execution_count": 51, "metadata": {}, "output_type": "execute_result" } ], "source": [ "ReservedHTTP = HTTPGetInteger(BaseUrl ++ \"/wallet/\" ++ AddrB64 ++ \"/reserved_rewards_total\"),\n", "\n", "BlockForReserved = RPCBlockByHeight(CurrentHeight),\n", "ExpectedReserved = nb_block:reward(BlockForReserved),\n", "\n", "ok = case ReservedHTTP == ExpectedReserved of\n", "\ttrue ->\n", "\t\tok;\n", "\tfalse ->\n", "\t\t{error, {reserved_rewards_mismatch, ReservedHTTP, ExpectedReserved}}\n", "end." ] }, { "cell_type": "code", "execution_count": 52, "id": "bfbb8047", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:48:09.000000Z", "iopub.status.busy": "2026-02-19T20:48:09.000000Z", "iopub.status.idle": "2026-02-19T20:48:10.000000Z", "shell.execute_reply": "2026-02-19T20:48:10.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "ok\n" ] }, "execution_count": 52, "metadata": {}, "output_type": "execute_result" } ], "source": [ "AnchorB64 =\n", "\tcase HTTPGet(BaseUrl ++ \"/tx_anchor\") of\n", "\t\t{ok, AnchorBody} ->\n", "\t\t\tiolist_to_binary(AnchorBody);\n", "\t\t{error, AnchorErr} ->\n", "\t\t\terlang:error({tx_anchor_failed, AnchorErr})\n", "\tend,\n", "AnchorBin =\n", "\tcase DecodeBase64(AnchorB64) of\n", "\t\t{ok, AnchorDecoded} ->\n", "\t\t\tAnchorDecoded;\n", "\t\t{error, AnchorDecErr} ->\n", "\t\t\terlang:error({tx_anchor_invalid, AnchorDecErr})\n", "\tend,\n", "RecentStart0 = CurrentHeight - 10,\n", "RecentStart =\n", "\tcase RecentStart0 < 0 of\n", "\t\ttrue ->\n", "\t\t\t0;\n", "\t\tfalse ->\n", "\t\t\tRecentStart0\n", "\tend,\n", "RecentHeights = lists:seq(RecentStart, CurrentHeight),\n", "RecentHashes =\n", "\t[begin\n", "\t\tHashB64 = HTTPGetBlockHash(Height),\n", "\t\tcase DecodeBase64(HashB64) of\n", "\t\t\t{ok, HashBin} ->\n", "\t\t\t\tHashBin;\n", "\t\t\t{error, HashErr} ->\n", "\t\t\t\terlang:error({invalid_block_hash, Height, HashErr})\n", "\t\tend\n", "\t end || Height <- RecentHeights],\n", "ok = case lists:member(AnchorBin, RecentHashes) of\n", "\ttrue ->\n", "\t\tok;\n", "\tfalse ->\n", "\t\t{error, {tx_anchor_not_recent, AnchorB64, RecentHeights}}\n", "end." ] }, { "cell_type": "code", "execution_count": 53, "id": "b759b251", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:48:10.000000Z", "iopub.status.busy": "2026-02-19T20:48:10.000000Z", "iopub.status.idle": "2026-02-19T20:48:10.000000Z", "shell.execute_reply": "2026-02-19T20:48:10.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "ok\n" ] }, "execution_count": 53, "metadata": {}, "output_type": "execute_result" } ], "source": [ "BlockMap = HTTPGetJSONMap(BaseUrl ++ \"/block/height/\" ++ integer_to_list(CurrentHeight)),\n", "ok = case maps:get(<<\"height\">>, BlockMap, undefined) of\n", "\tCurrentHeight ->\n", "\t\tok;\n", "\tHeightVal ->\n", "\t\t{error, {block_height_mismatch, HeightVal, CurrentHeight}}\n", "end." ] }, { "cell_type": "code", "execution_count": 54, "id": "ce6f6345", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:48:10.000000Z", "iopub.status.busy": "2026-02-19T20:48:10.000000Z", "iopub.status.idle": "2026-02-19T20:48:10.000000Z", "shell.execute_reply": "2026-02-19T20:48:10.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "ok\n" ] }, "execution_count": 54, "metadata": {}, "output_type": "execute_result" } ], "source": [ "ok = case maps:get(<<\"network\">>, InfoMap, undefined) of\n", "\tundefined ->\n", "\t\t{error, {missing_network, InfoMap}};\n", "\tNetwork ->\n", "\t\tcase Network == list_to_binary(LocalnetNetworkName) of\n", "\t\t\ttrue ->\n", "\t\t\t\tok;\n", "\t\t\tfalse ->\n", "\t\t\t\t{error, {network_mismatch, Network, LocalnetNetworkName}}\n", "\t\tend\n", "end." ] }, { "cell_type": "code", "execution_count": 55, "id": "c7c6835b", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:48:10.000000Z", "iopub.status.busy": "2026-02-19T20:48:10.000000Z", "iopub.status.idle": "2026-02-19T20:48:10.000000Z", "shell.execute_reply": "2026-02-19T20:48:10.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "ok\n" ] }, "execution_count": 55, "metadata": {}, "output_type": "execute_result" } ], "source": [ "Pending = HTTPGetJSONList(BaseUrl ++ \"/tx/pending\"),\n", "ok = case lists:all(\n", "\tfun(PendingItem) ->\n", "\t\tcase DecodeBase64(PendingItem) of\n", "\t\t\t{ok, PendingDec} ->\n", "\t\t\t\tbyte_size(PendingDec) == 32;\n", "\t\t\t{error, _} ->\n", "\t\t\t\tfalse\n", "\t\tend\n", "\tend,\n", "\tPending\n", ") of\n", "\ttrue ->\n", "\t\tok;\n", "\tfalse ->\n", "\t\t{error, {pending_invalid_entries, Pending}}\n", "end." ] }, { "cell_type": "markdown", "id": "f1001a0d", "metadata": {}, "source": [ "## Cleanup" ] }, { "cell_type": "markdown", "id": "5261b331", "metadata": {}, "source": [ "### Restore overridden parameters\n", "\n", "Restores `redenomination_threshold`, `redenomination_delay_blocks`, and `locked_rewards_blocks` to the values saved before overriding." ] }, { "cell_type": "code", "execution_count": 56, "id": "53246420", "metadata": { "execution": { "iopub.execute_input": "2026-02-19T20:48:10.000000Z", "iopub.status.busy": "2026-02-19T20:48:10.000000Z", "iopub.status.idle": "2026-02-19T20:48:10.000000Z", "shell.execute_reply": "2026-02-19T20:48:10.000000Z" } }, "outputs": [ { "data": { "text/plain": [ "ok\n" ] }, "execution_count": 56, "metadata": {}, "output_type": "execute_result" } ], "source": [ "RestoreEnv =\n", "\tfun(Key, Prev) ->\n", "\t\tcase Prev of\n", "\t\t\t{ok, Value} ->\n", "\t\t\t\tRPCCall(application, set_env, [arweave, Key, Value]);\n", "\t\t\tundefined ->\n", "\t\t\t\tRPCCall(application, unset_env, [arweave, Key]);\n", "\t\t\t_ ->\n", "\t\t\t\tRPCCall(application, unset_env, [arweave, Key])\n", "\t\tend\n", "\tend,\n", "\n", "RestoreEnv(redenomination_threshold, PrevRedenomThreshold),\n", "RestoreEnv(redenomination_delay_blocks, PrevRedenomDelay),\n", "RestoreEnv(locked_rewards_blocks, PrevLockedRewards),\n", "\n", "ok." ] }, { "cell_type": "code", "execution_count": null, "id": "57f813d8-1961-42b1-a72d-5fba85538df6", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Erlang", "language": "erlang", "name": "erlang" }, "language_info": { "file_extension": ".erl", "name": "erlang", "version": "26.2.1" } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: notebooks/pricing_transition_localnet.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "089ad431", "metadata": {}, "source": [ "# Pricing Transition on Localnet\n", "\n", "## Overview\n", "\n", "The Arweave protocol transitions to the new dynamic (v2) pricing over\n", "a multi-phased transition period. The final phase — the **2.7.2 transition** — linearly\n", "interpolates between a cap price (340 Winston/GiB-minute) and the dynamically computed\n", "v2 price over 518,400 blocks (~24 months).\n", "\n", "This notebook validates pricing behavior around the **transition end** — the first block\n", "where pure v2 pricing takes effect with no interpolation or bounds. The localnet snapshot\n", "starts 5 blocks before the transition boundary.\n", "\n", "**Sections:**\n", "1. **Setup** – connect to the node, define RPC helpers and dollar-price conversions.\n", "2. **Transition window** – query the transition parameters and confirm heights.\n", "3. **Mine past transition** – mine ~40 blocks to cross the boundary and then trigger the first\n", " post-transition price adjustment.\n", "4. **Fetch & display** – collect block pricing data and print a table with\n", " $\\$/GiB$ upload costs (assuming $10/AR).\n", "5. **Validate `is_v2_pricing_height`** – the flag transitions at the right height.\n", "6. **Validate interpolation** – pre-transition target prices follow the interpolation formula.\n", "7. **Validate V2 pricing** – post-transition target prices equal the raw v2 price.\n", "8. **Validate continuity** – no sudden price jump at the boundary.\n", "9. **Validate block field evolution** – `price_per_gib_minute` and\n", " `scheduled_price_per_gib_minute` evolve per the EMA recalculation rule." ] }, { "cell_type": "markdown", "id": "1f4cab16", "metadata": {}, "source": [ "## Setup" ] }, { "cell_type": "markdown", "id": "356bee12", "metadata": {}, "source": [ "### Connect to the localnet node\n", "\n", "Starts a distributed Erlang node with long names, sets the cookie to `localnet`,\n", "and pings `main-localnet@127.0.0.1` to confirm connectivity." ] }, { "cell_type": "code", "execution_count": null, "id": "47493a69", "metadata": {}, "outputs": [], "source": [ "Cookie = 'localnet',\n", "Node = 'main-localnet@127.0.0.1',\n", "\n", "HostHasDot =\n", "\tcase string:split(atom_to_list(node()), \"@\") of\n", "\t\t[_Name, Host] ->\n", "\t\t\tcase string:find(Host, \".\") of\n", "\t\t\t\tnomatch ->\n", "\t\t\t\t\tfalse;\n", "\t\t\t\t_ ->\n", "\t\t\t\t\ttrue\n", "\t\t\tend;\n", "\t\t_ ->\n", "\t\t\tfalse\n", "\tend,\n", "\n", "_ =\n", "\tcase {node(), HostHasDot} of\n", "\t\t{nonode@nohost, _} ->\n", "\t\t\tnet_kernel:start([list_to_atom(\"pricing_notebook@127.0.0.1\"), longnames]);\n", "\t\t{_, true} ->\n", "\t\t\tok;\n", "\t\t{_, false} ->\n", "\t\t\tnet_kernel:stop(),\n", "\t\t\tnet_kernel:start([list_to_atom(\"pricing_notebook@127.0.0.1\"), longnames])\n", "\tend,\n", "\n", "true = erlang:set_cookie(node(), Cookie),\n", "ok." ] }, { "cell_type": "code", "execution_count": null, "id": "4c084bc1", "metadata": {}, "outputs": [], "source": [ "{Node, pong} = {Node, net_adm:ping(Node)},\n", "ok." ] }, { "cell_type": "markdown", "id": "b4553ce6", "metadata": {}, "source": [ "### RPC and mining helpers\n", "\n", "- `RPCCall(M, F, A)` – calls `M:F(A)` on the remote node (30 s timeout).\n", "- `RPCHeight()` – current block height.\n", "- `MineUntilHeight(H)` – asks localnet to mine to height H and polls until reached.\n", "- `RPCBlockByHeight(H)` – reads the block record at height H.\n", "- `RPCGetPricePerGiBMinute(H, Block)` – `ar_pricing:get_price_per_gib_minute/2`.\n", "- `RPCGetV2PricePerGiBMinute(H, Block)` – `ar_pricing:get_v2_price_per_gib_minute/2`.\n", "- `RPCIsV2PricingHeight(H)` – `ar_pricing_transition:is_v2_pricing_height/1`.\n", "- `RPCGetTxFee(Size, Price, Kryder, H)` – `ar_pricing:get_tx_fee/1`." ] }, { "cell_type": "code", "execution_count": null, "id": "0d539cd3", "metadata": {}, "outputs": [], "source": [ "RPCCall = fun(M, F, A) -> rpc:call(Node, M, F, A, 30000) end,\n", "RPCHeight = fun() -> RPCCall(ar_node, get_height, []) end,\n", "\n", "WaitForHeight =\n", "\tfun\n", "\t\t(_, _TargetHeight, 0) ->\n", "\t\t\terror(mine_until_height_timeout);\n", "\t\t(Self, TargetHeight, AttemptsLeft) ->\n", "\t\t\tcase RPCHeight() >= TargetHeight of\n", "\t\t\t\ttrue ->\n", "\t\t\t\t\tok;\n", "\t\t\t\tfalse ->\n", "\t\t\t\t\ttimer:sleep(100),\n", "\t\t\t\t\tSelf(Self, TargetHeight, AttemptsLeft - 1)\n", "\t\t\tend\n", "\tend,\n", "\n", "MineUntilHeight =\n", "\tfun(TargetHeight) ->\n", "\t\tMineResult = RPCCall(ar_localnet, mine_until_height, [TargetHeight]),\n", "\t\tok =\n", "\t\t\tcase MineResult of\n", "\t\t\t\tok ->\n", "\t\t\t\t\tok;\n", "\t\t\t\t[] ->\n", "\t\t\t\t\tok;\n", "\t\t\t\tOther ->\n", "\t\t\t\t\terror({unexpected_mine_until_height_result, Other})\n", "\t\t\tend,\n", "\t\tWaitForHeight(WaitForHeight, TargetHeight, 6000)\n", "\tend,\n", "\n", "RPCBlockHashByHeight =\n", "\tfun(H) ->\n", "\t\tRPCCall(ar_block_index, get_element_by_height, [H])\n", "\tend,\n", "\n", "RPCBlockByHeight =\n", "\tfun(H) ->\n", "\t\tHash = RPCBlockHashByHeight(H),\n", "\t\tRPCCall(ar_storage, read_block, [Hash])\n", "\tend,\n", "\n", "RPCIsV2PricingHeight =\n", "\tfun(H) ->\n", "\t\tRPCCall(ar_pricing_transition, is_v2_pricing_height, [H])\n", "\tend,\n", "\n", "RPCGetTxFee =\n", "\tfun(DataSize, Price, Kryder, H) ->\n", "\t\tRPCCall(ar_pricing, get_tx_fee, [{DataSize, Price, Kryder, H}])\n", "\tend,\n", "\n", "ok." ] }, { "cell_type": "markdown", "id": "e87bc3d3", "metadata": {}, "source": [ "### Record accessors and dollar-price helpers\n", "\n", "Compiles `nb_block` at runtime for record field access. Defines HTTP helpers\n", "and dollar conversion assuming **$10/AR**:\n", "\n", "- `WinstonToUSD(W, D)` – converts Winston to USD at denomination `D`.\n", " Formula: $$W × $10 / (10^{12} × 1000^{D−1})$$\n", "- `UploadCostUSD(Price, Kryder, H, D)` – the cost to upload 1 GiB in USD.\n", " Uses `ar_pricing:get_tx_fee/1` which accounts for perpetual storage\n", " (200+ years with 0.5 %/year decay), 20 replicas, Kryder+ rate, and the 5 % miner share." ] }, { "cell_type": "code", "execution_count": null, "id": "8c785044", "metadata": {}, "outputs": [], "source": [ "TmpDir = \".tmp/notebooks/\",\n", "ok = filelib:ensure_dir(filename:join([TmpDir, \"keep\"])),\n", "\n", "CompileModule =\n", "\tfun(Name, Source) ->\n", "\t\tPath = filename:join([TmpDir, Name ++ \".erl\"]),\n", "\t\tok = file:write_file(Path, Source),\n", "\t\t{ok, Module, Bin} = compile:file(Path, [binary]),\n", "\t\t{module, Module} = code:load_binary(Module, Path, Bin)\n", "\tend,\n", "\n", "BlockAccessors =\n", "\tlists:flatten([\n", "\t\t\"-module(nb_block).\\n\",\n", "\t\t\"-export([height/1, price_per_gib_minute/1, scheduled_price_per_gib_minute/1,\\n\",\n", "\t\t\" denomination/1, kryder_plus_rate_multiplier/1]).\\n\",\n", "\t\t\"-include_lib(\\\"arweave/include/ar.hrl\\\").\\n\",\n", "\t\t\"height(B) -> B#block.height.\\n\",\n", "\t\t\"price_per_gib_minute(B) -> B#block.price_per_gib_minute.\\n\",\n", "\t\t\"scheduled_price_per_gib_minute(B) -> B#block.scheduled_price_per_gib_minute.\\n\",\n", "\t\t\"denomination(B) -> B#block.denomination.\\n\",\n", "\t\t\"kryder_plus_rate_multiplier(B) -> B#block.kryder_plus_rate_multiplier.\\n\"\n", "\t]),\n", "\n", "CompileModule(\"nb_block\", BlockAccessors),\n", "\n", "%% Remote helper: runs on the node to avoid sending block records over RPC.\n", "%% get_target_and_v2/1 reads the previous block from local storage\n", "%% and computes both the target price and the v2 price on-node.\n", "RemotePricingHelper =\n", "\tlists:flatten([\n", "\t\t\"-module(nb_remote_pricing).\\n\",\n", "\t\t\"-export([get_target_and_v2/1]).\\n\",\n", "\t\t\"-include_lib(\\\"arweave/include/ar.hrl\\\").\\n\",\n", "\t\t\"get_target_and_v2(Height) ->\\n\",\n", "\t\t\" PrevHash = element(1, ar_block_index:get_element_by_height(Height - 1)),\\n\",\n", "\t\t\" PrevBlock = ar_block_cache:get(block_cache, PrevHash),\\n\",\n", "\t\t\" case PrevBlock of\\n\",\n", "\t\t\" not_found ->\\n\",\n", "\t\t\" {error, error};\\n\",\n", "\t\t\" _ ->\\n\",\n", "\t\t\" V2 = try ar_pricing:get_v2_price_per_gib_minute(Height, PrevBlock)\\n\",\n", "\t\t\" catch _:_ -> error end,\\n\",\n", "\t\t\" Target = try ar_pricing:get_price_per_gib_minute(Height, PrevBlock)\\n\",\n", "\t\t\" catch _:_ -> error end,\\n\",\n", "\t\t\" {Target, V2}\\n\",\n", "\t\t\" end.\\n\"\n", "\t]),\n", "RemotePricingPath = filename:join([TmpDir, \"nb_remote_pricing.erl\"]),\n", "ok = file:write_file(RemotePricingPath, RemotePricingHelper),\n", "{ok, nb_remote_pricing, RemotePricingBin} = compile:file(RemotePricingPath, [binary]),\n", "{module, nb_remote_pricing} = code:load_binary(nb_remote_pricing, RemotePricingPath, RemotePricingBin),\n", "%% Load on the remote node\n", "{module, nb_remote_pricing} =\n", "\tRPCCall(code, load_binary, [nb_remote_pricing, RemotePricingPath, RemotePricingBin]),\n", "\n", "RPCGetTargetAndV2 =\n", "\tfun(H) ->\n", "\t\tRPCCall(nb_remote_pricing, get_target_and_v2, [H])\n", "\tend,\n", "\n", "{ok, _} = application:ensure_all_started(inets),\n", "\n", "LocalnetHTTPHost =\n", "\tcase os:getenv(\"LOCALNET_HTTP_HOST\") of\n", "\t\tfalse ->\n", "\t\t\t\"127.0.0.1\";\n", "\t\tV ->\n", "\t\t\tV\n", "\tend,\n", "\n", "LocalnetHTTPPort =\n", "\tcase os:getenv(\"LOCALNET_HTTP_PORT\") of\n", "\t\tfalse ->\n", "\t\t\t\"1984\";\n", "\t\tV ->\n", "\t\t\tV\n", "\tend,\n", "\n", "LocalnetNetworkName =\n", "\tcase os:getenv(\"LOCALNET_NETWORK_NAME\") of\n", "\t\tfalse ->\n", "\t\t\t\"arweave.localnet\";\n", "\t\tV ->\n", "\t\t\tV\n", "\tend,\n", "\n", "BaseUrl = \"http://\" ++ LocalnetHTTPHost ++ \":\" ++ LocalnetHTTPPort,\n", "\n", "HTTPGet =\n", "\tfun(Url) ->\n", "\t\tHeaders = [{\"x-network\", LocalnetNetworkName}],\n", "\t\tcase httpc:request(get, {Url, Headers}, [], []) of\n", "\t\t\t{ok, {{_, 200, _}, _, Body}} ->\n", "\t\t\t\t{ok, Body};\n", "\t\t\t{ok, {{_, Status, _}, _, Body}} ->\n", "\t\t\t\t{error, {http_status, Status, Body}};\n", "\t\t\tError ->\n", "\t\t\t\t{error, Error}\n", "\t\tend\n", "\tend,\n", "\n", "HTTPGetInteger =\n", "\tfun(Url) ->\n", "\t\tcase HTTPGet(Url) of\n", "\t\t\t{ok, Body} ->\n", "\t\t\t\tbinary_to_integer(iolist_to_binary(Body));\n", "\t\t\t{error, Reason} ->\n", "\t\t\t\t{error, Reason}\n", "\t\tend\n", "\tend,\n", "\n", "GiB = 1073741824,\n", "\n", "WinstonToUSD =\n", "\tfun(Winston, Denom) ->\n", "\t\tPow = lists:foldl(fun(_, Acc) -> Acc * 1000 end, 1, lists:seq(1, Denom - 1)),\n", "\t\tWinston * 10.0 / (1000000000000 * Pow)\n", "\tend,\n", "\n", "UploadCostUSD =\n", "\tfun(Price, Kryder, H, Denom) ->\n", "\t\tTxFee = RPCGetTxFee(GiB, Price, Kryder, H),\n", "\t\tWinstonToUSD(TxFee, Denom)\n", "\tend,\n", "\n", "ok." ] }, { "cell_type": "markdown", "id": "00169a37", "metadata": {}, "source": [ "## Transition Window\n", "\n", "Queries the 2.7.2 pricing transition parameters via RPC.\n", "\n", "The 2.7.2 transition linearly interpolates between a **start price** (the 2.7.2 cap =\n", "340 Winston/GiB-minute) and the dynamic v2 price. The start price is obtained by\n", "evaluating `get_transition_price(TransitionStart, 0)`: at the transition start all weight\n", "is on the start price, so passing a v2 price of 0 yields the start price exactly.\n", "\n", "**Queried values:**\n", "- `ar_pricing_transition:transition_start_2_7_2()` – first height of the 2.7.2 transition.\n", "- `ar_pricing_transition:transition_length_2_7_2()` – number of transition blocks.\n", "- Transition end = start + length (first block with pure v2 pricing).\n", "- `PRICE_ADJUSTMENT_FREQUENCY` = 50 blocks (production/localnet value)." ] }, { "cell_type": "code", "execution_count": null, "id": "4a09108e", "metadata": {}, "outputs": [], "source": [ "TransitionStart = RPCCall(ar_pricing_transition, transition_start_2_7_2, []),\n", "TransitionLength = RPCCall(ar_pricing_transition, transition_length_2_7_2, []),\n", "TransitionEnd = TransitionStart + TransitionLength,\n", "Height0 = RPCHeight(),\n", "\n", "TransitionStartPrice =\n", "\tRPCCall(ar_pricing_transition, get_transition_price, [TransitionStart, 0]),\n", "\n", "PriceAdjustFreq = 50,\n", "FirstPostAdjust =\n", "\tcase TransitionEnd rem PriceAdjustFreq of\n", "\t\t0 ->\n", "\t\t\tTransitionEnd;\n", "\t\t_ ->\n", "\t\t\t((TransitionEnd div PriceAdjustFreq) + 1) * PriceAdjustFreq\n", "\tend,\n", "\n", "true = is_integer(TransitionStart),\n", "true = is_integer(TransitionLength),\n", "true = is_integer(TransitionEnd),\n", "true = (TransitionLength > 0),\n", "true = (TransitionEnd == TransitionStart + TransitionLength),\n", "340 = TransitionStartPrice,\n", "SnapshotBlocksBeforeEnd = TransitionEnd - Height0,\n", "5 = SnapshotBlocksBeforeEnd,\n", "\n", "io:format(\" Transition start (2.7.2): ~p~n\", [TransitionStart]),\n", "io:format(\" Transition length: ~p blocks~n\", [TransitionLength]),\n", "io:format(\" Transition end: ~p~n\", [TransitionEnd]),\n", "io:format(\" Transition start price: ~p Winston/GiB-min~n\", [TransitionStartPrice]),\n", "io:format(\" Current height: ~p~n\", [Height0]),\n", "io:format(\" Blocks until transition end: ~p~n\", [SnapshotBlocksBeforeEnd]),\n", "io:format(\" Price adjustment frequency: ~p blocks~n\", [PriceAdjustFreq]),\n", "io:format(\" First post-transition adjust: ~p~n\", [FirstPostAdjust]),\n", "ok." ] }, { "cell_type": "markdown", "id": "faf2a84d", "metadata": {}, "source": [ "## Mine Past Transition End\n", "\n", "The snapshot starts 5 blocks before the transition end. We mine past the boundary and\n", "past the first post-transition price-adjustment height so we can validate the EMA\n", "recalculation under pure v2 pricing.\n", "\n", "**Mined blocks:** from the current height to `FirstPostAdjust + 5` (~40 blocks).\n", "**Submitted txs:** none.\n", "**Expected:** mining succeeds without errors through the transition boundary." ] }, { "cell_type": "code", "execution_count": null, "id": "f1ce445b", "metadata": {}, "outputs": [], "source": [ "MineTarget = FirstPostAdjust + 5,\n", "io:format(\" Mining from ~p to ~p (~p blocks)...~n\",\n", "\t[Height0, MineTarget, MineTarget - Height0]),\n", "ok = MineUntilHeight(MineTarget),\n", "HeightAfterMine = RPCHeight(),\n", "true = (HeightAfterMine >= MineTarget),\n", "io:format(\" Done. Current height: ~p~n\", [HeightAfterMine]),\n", "ok." ] }, { "cell_type": "markdown", "id": "59f5936c", "metadata": {}, "source": [ "## Fetch and Display Pricing Data\n", "\n", "Fetches block records from `TransitionEnd - 5` to `MineTarget` and builds a table\n", "with the following columns:\n", "\n", "| Column | Source |\n", "|--------|--------|\n", "| **Height** | block height |\n", "| **Price** | `block.price_per_gib_minute` (stored, EMA-smoothed; updates every 50 blocks) |\n", "| **Scheduled Price** | `block.scheduled_price_per_gib_minute` (next value for Price at the next adjustment) |\n", "| **Target** | `ar_pricing:get_price_per_gib_minute(H, PrevBlock)` — the price at the given height adjusted for the transition |\n", "| **V2 Price** | `ar_pricing:get_v2_price_per_gib_minute(H, PrevBlock)` — raw dynamic price (no transition) |\n", "| **V2?** | `ar_pricing_transition:is_v2_pricing_height(H)` |\n", "| $\\mathbf{\\$/GiB}$| Upload cost for 1 GiB in USD at $\\$10/AR$ (via `get_tx_fee`, includes decay, 20 replicas, miner share) |\n", "\n", "Why `price_per_gib_minute` can look small: it is a **Winston per GiB-minute unit rate**, not a direct upload fee. The upload fee path annualizes this rate and applies replication, Kryder+, and miner-share factors in `get_tx_fee/1`, so a single-digit GiB-minute rate can still produce a meaningful `$ / GiB` upload cost." ] }, { "cell_type": "code", "execution_count": null, "id": "7cc608c1", "metadata": {}, "outputs": [], "source": [ "RangeStart = TransitionEnd - 5,\n", "RangeEnd = MineTarget,\n", "true = (RangeStart =< RangeEnd),\n", "\n", "Blocks = maps:from_list(\n", "\t[{H, RPCBlockByHeight(H)} || H <- lists:seq(RangeStart, RangeEnd)]\n", "),\n", "\n", "Heights = lists:seq(RangeStart, RangeEnd),\n", "true = (maps:size(Blocks) == length(Heights)),\n", "\n", "Rows = lists:map(\n", "\tfun(H) ->\n", "\t\tBlock = maps:get(H, Blocks),\n", "\t\tPrice = nb_block:price_per_gib_minute(Block),\n", "\t\tScheduled = nb_block:scheduled_price_per_gib_minute(Block),\n", "\t\tDenom = nb_block:denomination(Block),\n", "\t\tKryder = nb_block:kryder_plus_rate_multiplier(Block),\n", "\t\tIsV2 = RPCIsV2PricingHeight(H),\n", "\t\t{TargetPrice, V2Price} = RPCGetTargetAndV2(H),\n", "\t\tUSD = UploadCostUSD(Price, Kryder, H, Denom),\n", "\t\t#{ height => H, price => Price, scheduled => Scheduled,\n", "\t\t denomination => Denom, kryder => Kryder,\n", "\t\t is_v2 => IsV2, v2_price => V2Price, target => TargetPrice,\n", "\t\t upload_usd => USD }\n", "\tend,\n", "\tHeights),\n", "\n", "TargetErrors = [maps:get(height, R) || R <- Rows, maps:get(target, R) == error],\n", "V2Errors = [maps:get(height, R) || R <- Rows, maps:get(v2_price, R) == error],\n", "[] = TargetErrors,\n", "[] = V2Errors,\n", "\n", "io:format(\"~n~10s | ~6s | ~6s | ~7s | ~7s | ~3s | ~s~n\",\n", "\t[\"Height\", \"Price\", \"Sched\", \"Target\", \"V2\", \"V2?\", \"$/GiB upload\"]),\n", "io:format(\"~s~n\", [lists:duplicate(72, $-)]),\n", "\n", "lists:foreach(\n", "\tfun(Row) ->\n", "\t\t#{height := H, price := P, scheduled := S, target := T,\n", "\t\t v2_price := V2, is_v2 := IV2, upload_usd := U} = Row,\n", "\t\tV2Flag =\n", "\t\t\tcase IV2 of\n", "\t\t\t\ttrue ->\n", "\t\t\t\t\t\"yes\";\n", "\t\t\t\tfalse ->\n", "\t\t\t\t\t\"no \"\n", "\t\t\tend,\n", "\t\tMark =\n", "\t\t\tcase H of\n", "\t\t\t\t_ when H == TransitionEnd ->\n", "\t\t\t\t\t\" <-- transition end\";\n", "\t\t\t\t_ when H == FirstPostAdjust ->\n", "\t\t\t\t\t\" <-- 1st post-adjust\";\n", "\t\t\t\t_ ->\n", "\t\t\t\t\t\"\"\n", "\t\t\tend,\n", "\t\tFmtInt =\n", "\t\t\tfun(error) -> \"err\";\n", "\t\t\t (N) -> integer_to_list(N)\n", "\t\t\tend,\n", "\t\tUStr = lists:flatten(io_lib:format(\"$~.4f\", [U])),\n", "\t\tio:format(\"~10s | ~6s | ~6s | ~7s | ~7s | ~3s | ~s~s~n\",\n", "\t\t\t[integer_to_list(H), integer_to_list(P), integer_to_list(S),\n", "\t\t\t FmtInt(T), FmtInt(V2), V2Flag, UStr, Mark])\n", "\tend,\n", "\tRows),\n", "\n", "FirstRow = hd(Rows),\n", "Price0 = maps:get(price, FirstRow),\n", "Kryder0 = maps:get(kryder, FirstRow),\n", "HeightPrice0 = maps:get(height, FirstRow),\n", "Denom0 = maps:get(denomination, FirstRow),\n", "UploadWinston0 = RPCGetTxFee(GiB, Price0, Kryder0, HeightPrice0),\n", "UploadUSD0 = UploadCostUSD(Price0, Kryder0, HeightPrice0, Denom0),\n", "io:format(\"~n Sample at height ~p: price_per_gib_minute=~p, fee_1GiB=~p Winston (~.4f USD)~n\",\n", "\t[HeightPrice0, Price0, UploadWinston0, UploadUSD0]),\n", "true = (UploadWinston0 > 0),\n", "\n", "ok." ] }, { "cell_type": "markdown", "id": "fdcb9357", "metadata": {}, "source": [ "## Validate `is_v2_pricing_height`\n", "\n", "`is_v2_pricing_height(H)` must be `false` for all `H < TransitionEnd`\n", "and `true` for all `H >= TransitionEnd`.\n", "\n", "**Expected:** the flag transitions exactly at `TransitionEnd`.\n", "**Queried:** `ar_pricing_transition:is_v2_pricing_height/1` via RPC (already fetched in `Rows`)." ] }, { "cell_type": "code", "execution_count": null, "id": "5409c33d", "metadata": {}, "outputs": [], "source": [ "PreV2Rows = [R || R <- Rows, maps:get(height, R) < TransitionEnd],\n", "PostV2Rows = [R || R <- Rows, maps:get(height, R) >= TransitionEnd],\n", "true = (length(PreV2Rows) > 0),\n", "true = (length(PostV2Rows) > 0),\n", "\n", "true = lists:all(\n", "\tfun(R) -> maps:get(is_v2, R) == false end,\n", "\tPreV2Rows\n", "),\n", "\n", "true = lists:all(\n", "\tfun(R) -> maps:get(is_v2, R) == true end,\n", "\tPostV2Rows\n", "),\n", "\n", "io:format(\" All pre-transition heights: is_v2 = false [OK]~n\"),\n", "io:format(\" All post-transition heights: is_v2 = true [OK]~n\"),\n", "ok." ] }, { "cell_type": "markdown", "id": "0a1b454e", "metadata": {}, "source": [ "## Validate Pre-Transition Interpolation\n", "\n", "For every height `H < TransitionEnd`, the target price returned by\n", "`ar_pricing:get_price_per_gib_minute(H, PrevBlock)` must match the interpolation\n", "formula used inside `get_transition_price`:\n", "\n", "```\n", "Interval1 = H - TransitionStart\n", "Interval2 = TransitionEnd - H\n", "Expected = (StartPrice * Interval2 + V2Price * Interval1) div (Interval1 + Interval2)\n", "```\n", "\n", "where `StartPrice` = 340 (the 2.7.2 cap) and `V2Price = get_v2_price_per_gib_minute(H, PrevBlock)`.\n", "\n", "During the 2.7.2 transition, bounds are `[0, infinity)`, so the `between` clamp is a no-op.\n", "\n", "Expectation note: this catches interpolation/transition arithmetic issues in\n", "`get_price_per_gib_minute/2`, but still depends on `get_v2_price_per_gib_minute/2`\n", "for the V2 component." ] }, { "cell_type": "code", "execution_count": null, "id": "929a34c3", "metadata": {}, "outputs": [], "source": [ "PreRows = [R || R <- Rows, maps:get(height, R) < TransitionEnd],\n", "PreRowsValid = [R || R <- PreRows,\n", "\tmaps:get(target, R) /= error, maps:get(v2_price, R) /= error],\n", "true = (length(PreRows) > 0),\n", "true = (length(PreRowsValid) > 0),\n", "\n", "lists:foreach(\n", "\tfun(Row) ->\n", "\t\tH = maps:get(height, Row),\n", "\t\tV2 = maps:get(v2_price, Row),\n", "\t\tTarget = maps:get(target, Row),\n", "\t\tInterval1 = H - TransitionStart,\n", "\t\tInterval2 = TransitionEnd - H,\n", "\t\tExpected = (TransitionStartPrice * Interval2 + V2 * Interval1)\n", "\t\t\tdiv (Interval1 + Interval2),\n", "\t\tcase Target == Expected of\n", "\t\t\ttrue ->\n", "\t\t\t\tok;\n", "\t\t\tfalse ->\n", "\t\t\t\terror({interpolation_mismatch, H, Target, Expected})\n", "\t\tend\n", "\tend,\n", "\tPreRowsValid),\n", "\n", "io:format(\" ~p/~p pre-transition heights match interpolation formula [OK]~n\",\n", "\t[length(PreRowsValid), length(PreRows)]),\n", "ok." ] }, { "cell_type": "markdown", "id": "d8fee1e4", "metadata": {}, "source": [ "## Validate Post-Transition V2 Pricing\n", "\n", "For every height `H >= TransitionEnd`, the transition is complete and\n", "`get_price_per_gib_minute(H, PrevBlock)` must equal\n", "`get_v2_price_per_gib_minute(H, PrevBlock)` — no interpolation, no bounds.\n", "\n", "Expectation note: this directly validates the transition handoff, but if both\n", "functions shared the same V2 defect it would not catch that defect by itself." ] }, { "cell_type": "code", "execution_count": null, "id": "c7f0ba6b", "metadata": {}, "outputs": [], "source": [ "PostRows = [R || R <- Rows, maps:get(height, R) >= TransitionEnd],\n", "PostRowsValid = [R || R <- PostRows,\n", "\tmaps:get(target, R) /= error, maps:get(v2_price, R) /= error],\n", "true = (length(PostRows) > 0),\n", "true = (length(PostRowsValid) > 0),\n", "\n", "lists:foreach(\n", "\tfun(Row) ->\n", "\t\tH = maps:get(height, Row),\n", "\t\tV2 = maps:get(v2_price, Row),\n", "\t\tTarget = maps:get(target, Row),\n", "\t\tcase Target == V2 of\n", "\t\t\ttrue ->\n", "\t\t\t\tok;\n", "\t\t\tfalse ->\n", "\t\t\t\terror({v2_price_mismatch, H, Target, V2})\n", "\t\tend\n", "\tend,\n", "\tPostRowsValid),\n", "\n", "io:format(\" ~p/~p post-transition heights: target == V2 price [OK]~n\",\n", "\t[length(PostRowsValid), length(PostRows)]),\n", "ok." ] }, { "cell_type": "markdown", "id": "4bb658c6", "metadata": {}, "source": [ "## Validate Price Continuity at Transition Boundary\n", "\n", "At `TransitionEnd - 1` (last interpolated block), almost all weight is on V2Price:\n", "\n", "```\n", "weight_on_V2 = (TransitionLength - 1) / TransitionLength ~ 0.999998\n", "```\n", "\n", "The gap between the last interpolated price and the first pure-V2 price is at most\n", "`|StartPrice - V2Price| / TransitionLength`, which for `TransitionLength = 518,400`\n", "is negligible.\n", "\n", "**Expected:** relative price change < 0.1 % (1e-3).\n", "**Queried:** target prices from `Rows`." ] }, { "cell_type": "code", "execution_count": null, "id": "7f205bf4", "metadata": {}, "outputs": [], "source": [ "LastPreValid = [R || R <- PreRows, maps:get(target, R) /= error],\n", "FirstPostValid = [R || R <- PostRows, maps:get(target, R) /= error],\n", "true = (length(LastPreValid) > 0),\n", "true = (length(FirstPostValid) > 0),\n", "\n", "LastPreTarget = maps:get(target, lists:last(LastPreValid)),\n", "FirstPostTarget = maps:get(target, hd(FirstPostValid)),\n", "Gap = abs(FirstPostTarget - LastPreTarget),\n", "RelGap = Gap / max(1, LastPreTarget),\n", "io:format(\" Last pre-transition target: ~p~n\", [LastPreTarget]),\n", "io:format(\" First post-transition target: ~p~n\", [FirstPostTarget]),\n", "io:format(\" Absolute gap: ~p~n\", [Gap]),\n", "io:format(\" Relative gap: ~.8f~n\", [RelGap]),\n", "true = RelGap < 0.001,\n", "io:format(\" Relative gap < 0.1 %% [OK]~n\"),\n", "ok." ] }, { "cell_type": "markdown", "id": "ee9e099f", "metadata": {}, "source": [ "## Validate Block Price Field Evolution\n", "\n", "The block's `price_per_gib_minute` field (stored on-chain) updates only at\n", "**price adjustment heights** (`Height rem 50 == 0`). The recalculation rule\n", "(post fork 2.7.1) is:\n", "\n", "```\n", "NewPrice = PrevBlock.scheduled_price_per_gib_minute\n", "TargetPrice = get_price_per_gib_minute(Height, PrevBlock)\n", "EMAPrice = (9 * PrevScheduled + TargetPrice) div 10\n", "NewScheduled = max(PrevScheduled div 2, min(PrevScheduled * 2, EMAPrice))\n", "```\n", "\n", "Between adjustments both fields are unchanged.\n", "\n", "The nearest adjustment height **after** the transition end is **`FirstPostAdjust`**\n", "(= `TransitionEnd` rounded up to the next multiple of 50). At that height the\n", "target price is a pure v2 price for the first time — this verifies the\n", "recalculation correctly handles the transition boundary.\n", "\n", "Expectation note: this cell derives `ExpectedTargetPrice` from transition math +\n", "`v2_price`, then checks EMA against block fields, instead of reusing the already\n", "queried `target` value for the EMA expectation." ] }, { "cell_type": "code", "execution_count": null, "id": "55a1d0f5", "metadata": {}, "outputs": [], "source": [ "Pairs = lists:zip(lists:droplast(Rows), tl(Rows)),\n", "true = (length(Pairs) > 0),\n", "\n", "lists:foreach(\n", "\tfun({PrevRow, CurrRow}) ->\n", "\t\tH = maps:get(height, CurrRow),\n", "\t\tPrice = maps:get(price, CurrRow),\n", "\t\tSched = maps:get(scheduled, CurrRow),\n", "\t\tPrevPrice = maps:get(price, PrevRow),\n", "\t\tPrevSched = maps:get(scheduled, PrevRow),\n", "\t\tIsAdjust = (H rem PriceAdjustFreq == 0),\n", "\t\tcase IsAdjust of\n", "\t\t\tfalse ->\n", "\t\t\t\tcase {Price == PrevPrice, Sched == PrevSched} of\n", "\t\t\t\t\t{true, true} ->\n", "\t\t\t\t\t\tok;\n", "\t\t\t\t\t_ ->\n", "\t\t\t\t\t\terror({unexpected_price_change, H,\n", "\t\t\t\t\t\t\t{price, Price, PrevPrice},\n", "\t\t\t\t\t\t\t{sched, Sched, PrevSched}})\n", "\t\t\t\tend;\n", "\t\t\ttrue ->\n", "\t\t\t\tcase Price == PrevSched of\n", "\t\t\t\t\ttrue ->\n", "\t\t\t\t\t\tok;\n", "\t\t\t\t\tfalse ->\n", "\t\t\t\t\t\terror({price_not_prev_scheduled, H, Price, PrevSched})\n", "\t\t\t\tend,\n", "\t\t\t\tV2Price = maps:get(v2_price, CurrRow),\n", "\t\t\t\tcase V2Price of\n", "\t\t\t\t\terror ->\n", "\t\t\t\t\t\terror({v2_price_missing_for_ema_expectation, H});\n", "\t\t\t\t\t_ ->\n", "\t\t\t\t\t\tExpectedTargetPrice =\n", "\t\t\t\t\t\t\tcase H < TransitionEnd of\n", "\t\t\t\t\t\t\t\ttrue ->\n", "\t\t\t\t\t\t\t\t\tInterval1 = H - TransitionStart,\n", "\t\t\t\t\t\t\t\t\tInterval2 = TransitionEnd - H,\n", "\t\t\t\t\t\t\t\t\t(TransitionStartPrice * Interval2 + V2Price * Interval1)\n", "\t\t\t\t\t\t\t\t\t\tdiv (Interval1 + Interval2);\n", "\t\t\t\t\t\t\t\tfalse ->\n", "\t\t\t\t\t\t\t\t\tV2Price\n", "\t\t\t\t\t\t\tend,\n", "\t\t\t\t\t\tEMAPrice = (9 * PrevSched + ExpectedTargetPrice) div 10,\n", "\t\t\t\t\t\tExpectedSched = max(PrevSched div 2,\n", "\t\t\t\t\t\t\tmin(PrevSched * 2, EMAPrice)),\n", "\t\t\t\t\t\tcase Sched == ExpectedSched of\n", "\t\t\t\t\t\t\ttrue ->\n", "\t\t\t\t\t\t\t\tok;\n", "\t\t\t\t\t\t\tfalse ->\n", "\t\t\t\t\t\t\t\terror({scheduled_price_mismatch, H,\n", "\t\t\t\t\t\t\t\t\tSched, ExpectedSched,\n", "\t\t\t\t\t\t\t\t\t{expected_target, ExpectedTargetPrice},\n", "\t\t\t\t\t\t\t\t\t{ema, EMAPrice},\n", "\t\t\t\t\t\t\t\t\t{v2, V2Price},\n", "\t\t\t\t\t\t\t\t\t{prev_sched, PrevSched}})\n", "\t\t\t\t\t\tend\n", "\t\t\t\tend\n", "\t\tend\n", "\tend,\n", "\tPairs),\n", "\n", "AdjustHeights = [H || H <- Heights, H rem PriceAdjustFreq == 0],\n", "true = (length(AdjustHeights) > 0),\n", "io:format(\" Block price fields consistent across ~p consecutive blocks [OK]~n\",\n", "\t[length(Rows)]),\n", "io:format(\" Recalculation verified at adjustment heights: ~p [OK]~n\",\n", "\t[AdjustHeights]),\n", "ok." ] }, { "cell_type": "code", "execution_count": null, "id": "b978df1d-d7ab-4061-af8a-1042b790eb0e", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Erlang", "language": "erlang", "name": "erlang" }, "language_info": { "file_extension": ".erl", "name": "erlang", "version": "26.2.1" } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: notebooks/test.ipynb ================================================ { "cells": [ { "cell_type": "code", "execution_count": null, "metadata": { "vscode": { "languageId": "plaintext" } }, "outputs": [], "source": [ "2 = 2." ] } ], "metadata": { "language_info": { "name": "python" } }, "nbformat": 4, "nbformat_minor": 2 } ================================================ FILE: priv/templates/README.md ================================================ # relx templates These templates are available on [relx github repository](https://github.com/erlware/relx/blob/main/priv/templates). Only `extended_bin` is required for now. ================================================ FILE: priv/templates/extended_bin ================================================ #!/usr/bin/env bash ###################################################################### # EXTRA_DIST_ARGS environment variable can be set to set extra VM # arguments. ###################################################################### set -e ###################################################################### # Switch to user or dev mode. bin/arweave should be the script used # only by users, and bin/arweave-dev should be the one used only # for developers. ###################################################################### case ${0##*/} in arweave-dev) export ARWEAVE_DEV=1 ;; esac ###################################################################### # EPMD Configuration. force epmd to listen on loopback interface. ###################################################################### export ERL_EPMD_ADDRESS="${ERL_EPMD_ADDRESS:=127.0.0.1,::1}" export ERL_EPMD_PORT="${ERL_EPMD_PORT:=4369}" # http://erlang.org/doc/man/run_erl.html # If defined, disables input and output flow control for the pty # opend by run_erl. Useful if you want to remove any risk of accidentally # blocking the flow control by using Ctrl-S (instead of Ctrl-D to detach), # which can result in blocking of the entire Beam process, and in the case # of running heart as supervisor even the heart process becomes blocked # when writing log message to terminal, leaving the heart process unable # to do its work. RUN_ERL_DISABLE_FLOWCNTRL=${RUN_ERL_DISABLE_FLOWCNTRL:-true} export RUN_ERL_DISABLE_FLOWCNTRL RUN_ERL_LOG_GENERATIONS=${RUN_ERL_LOG_GENERATIONS:-1} export RUN_ERL_LOG_GENERATIONS RUN_ERL_LOG_MAXSIZE=${RUN_ERL_LOG_MAXSIZE:-$((100*1024*1024))} export RUN_ERL_LOG_MAXSIZE RUN_ERL_LOG_ALIVE_MINUTES=${RUN_ERL_LOG_ALIVE_MINUTES:-15} export RUN_ERL_LOG_ALIVE_MINUTES if [ "$TERM" = "dumb" ] || [ -z "$TERM" ]; then export TERM=screen fi # OSX does not support readlink '-f' flag, work # around that # shellcheck disable=SC2039,SC3000-SC4000 case $OSTYPE in darwin*) SCRIPT=$(readlink "$0" || true) ;; *) SCRIPT=$(readlink -f "$0" || true) ;; esac [ -z "$SCRIPT" ] && SCRIPT=$0 export SCRIPT_DIR="$(cd "$(dirname "$SCRIPT")" && pwd -P)" export PARENT_DIR="$(cd "${SCRIPT_DIR}/.." && pwd -P)" export SYSTEM_NAME="$(uname -s)" export RELEASE_ROOT_DIR="$(cd "${SCRIPT_DIR}/.." && pwd -P)" export REBAR_CONFIG="${RELEASE_ROOT_DIR}/rebar.config" export BUILD_DIR="${RELEASE_ROOT_DIR}/_build" # let extract release relx information from rebar.config. # the following erlang code will read/parse the file # and extract the information required. In case of issue # it print an error message and return 1, else 0. extract_release_from_rebar_config() { erl -noshell -eval ' try % extract file from REBAR_CONFIG variable C = case os:getenv("REBAR_CONFIG") of false -> throw("REBAR_CONFIG not set"); VRC -> VRC end, % read/parse rebar.config F = case file:consult(C) of {ok, FC} -> FC; {error, EC} -> throw(EC) end, % extract relx section R = case proplists:get_value(relx, F) of undefined -> throw("relx section not found"); RX -> RX end, % extract release section V = case lists:keyfind(release, 1, R) of M = {release, {_, VX}, _} -> VX; _ -> throw("release not found") end, io:format("~s~n", [V]), erlang:halt(0) catch _:E -> io:format(standard_error, "error: ~p~n", [E]), erlang:halt(255) end. ' return $? } # Make the value available to variable substitution calls below # The following variables are usually hardcoded by rebar3 export REL_NAME="{{ release_name }}" export REL_VSN="{{ release_version }}" export RELEASE_NAME="{{ release_name }}" export RELEASE_VSN="{{ release_version }}" export RELEASE_GIT_REV="{{ git_rev }}" export RELEASE_DATETIME="{{ datetime }}" export RELEASE_ERTS="{{ release_erts_version }}" export RELEASE_CC="{{ cc_version }}" export RELEASE_CMAKE="{{ cmake_version }}" export RELEASE_GMAKE="{{ gmake_version }}" export ERTS_VSN="{{ erts_vsn }}" export RELEASE_PROG="${SCRIPT}" # ensure REL_NAME and RELEASE_NAME variables are set # by default, if the script is running from sources, # the release name must be arweave. test -z "${REL_NAME}" && export REL_NAME="arweave" test -z "${RELEASE_NAME}" && export RELEASE_NAME="arweave" # check REL_VSN variable content. This one is quite important # to be able to start arweave. if test -z "${REL_VSN}" then REL_VSN=$(extract_release_from_rebar_config) if test $? -ne 0 then echo "error: failed to read rebar file" 1>&2 exit 1 fi if test -z "${REL_VSN}" then echo "error: no release found" 1>&2 exit 1 fi export REL_VSN export REL_PATH="${BUILD_DIR}/default/rel/${REL_NAME}/${REL_VSN}" export REL_PATH_ALT="${BUILD_DIR}/default/rel/${REL_NAME}/releases/${REL_VSN}" if ! test -e ${REL_PATH} then echo "error: ${REL_PATH} does not exist" 1>&2 if ! test "${ARWEAVE_DEV}" then exit 1 fi fi fi export REL_DIR="${RELEASE_ROOT_DIR}/releases/${REL_VSN}" export RUNNER_LOG_DIR="${RUNNER_LOG_DIR:-$RELEASE_ROOT_DIR/logs}" export ESCRIPT_NAME="${ESCRIPT_NAME-$SCRIPT}" # if RELX_RPC_TIMEOUT is set then use that # otherwise check for NODETOOL_TIMEOUT and convert to seconds if [ -z "$RELX_RPC_TIMEOUT" ]; then # if NODETOOL_TIMEOUT exists then turn the old nodetool timeout into the rpc timeout if [ -n "$NODETOOL_TIMEOUT" ]; then # will exit the script if NODETOOL_TIMEOUT isn't a number RELX_RPC_TIMEOUT=$((NODETOOL_TIMEOUT / 1000)) else RELX_RPC_TIMEOUT=60 fi fi export RELX_RPC_TIMEOUT # start/stop/install/upgrade pre/post hooks PRE_START_HOOKS="{{{ pre_start_hooks }}}" POST_START_HOOKS="{{{ post_start_hooks }}}" PRE_STOP_HOOKS="{{{ pre_stop_hooks }}}" POST_STOP_HOOKS="{{{ post_stop_hooks }}}" PRE_INSTALL_UPGRADE_HOOKS="{{{ pre_install_upgrade_hooks }}}" POST_INSTALL_UPGRADE_HOOKS="{{{ post_install_upgrade_hooks }}}" STATUS_HOOK="{{{ status_hook }}}" EXTENSIONS="{{{ extensions }}}" _warning() { printf -- "warning: %s\n" "${*}" 1>&2 } _error () { printf -- "error: %s\n" "${*}" 1>&2 } ###################################################################### # Arweave Section ###################################################################### # Not all systems supports randomx jit if test ${SYSTEM_NAME} = "Darwin" then export RANDOMX_JIT="disable randomx_jit" else export RANDOMX_JIT="" fi # This variable is the main own used to start arweave ARWEAVE_OPTS="-run ar main ${RANDOM_JIT}" ###################################################################### # Arweave System Check Section ###################################################################### arweave_check() { case "${1}" in help) arweave_check_help ;; *) arweave_check_nofile arweave_check_hugepages ;; esac } arweave_check_help() { echo "Usage: ${REL_NAME} check" echo "Check system configuration. Examples:" echo " ${REL_NAME} check" exit 1 } arweave_check_nofile() { recommendation="1000000" limit="$(ulimit -n)" if [ "$limit" -lt "$recommendation" ] then _warning "************************************************************************" _warning "Your maximum number of open file descriptors is currently set to $limit." _warning "We recommend setting that limit to $recommendation or higher." _warning "Otherwise, consider setting your max_connections setting to something" _warning "lower than your file descriptor limit. This value can be check with:" _warning " sysctl fs.file-max" _warning "or" _warning " ulimit -n" _warning "see more at https://docs.arweave.org/" _warning "************************************************************************" fi } arweave_check_hugepages() { # execute this check only on linux test ${SYSTEM_NAME} != "Linux" && return 0 recommendation="3500" value=$(sysctl -n vm.nr_hugepages) if test ${value} -lt ${recommendation} then _warning "************************************************************************" _warning "huge pages is not configured on this system." _warning "It should be set to ${recommendation}. This value can be check with:" _warning " sysctl vm.nr_hugepages" _warning "see more at https://docs.arweave.org/" _warning "************************************************************************" fi } ###################################################################### # Arweave Benchmark Section ###################################################################### arweave_benchmark() { case "${1}" in 2.9) shift arweave_benchmark_2_9 ${*} ;; hash) shift arweave_benchmark_hash ${*} ;; packing) shift arweave_benchmark_packing ${*} ;; vdf) shift arweave_benchmark_vdf ${*} ;; *) arweave_benchmark_help ;; esac } arweave_benchmark_help() { echo "Usage: ${REL_NAME} benchmark [2.9|hash|packing|vdf]" echo "Execute Arweave benchmarks. Examples:" echo " ${REL_NAME} benchmark 2.9" echo " ${REL_NAME} benchmark hash" echo " ${REL_NAME} benchmark packing" echo " ${REL_NAME} benchmark vdf" exit 1 } arweave_benchmark_2_9() { ARWEAVE_OPTS="-run ar benchmark_2_9" echo ${*} } arweave_benchmark_hash() { ARWEAVE_OPTS="-run ar benchmark_hash" echo ${*} } arweave_benchmark_packing() { ARWEAVE_OPTS="-run ar benchmark_packing" echo ${*} } arweave_benchmark_vdf() { ARWEAVE_OPTS="-run ar benchmark_vdf" echo ${*} } ###################################################################### # Arweave Wallet Management Section ###################################################################### arweave_wallet() { case "${1}" in create) shift arweave_wallet_create ${*} ;; *) arweave_wallet_help ;; esac } arweave_wallet_help() { echo "Usage: ${REL_NAME} wallet [create]" echo "Manage Arweave wallets. Examples:" echo " ${REL_NAME} wallet create rsa" echo " ${REL_NAME} wallet create ecdsa" exit 1 } arweave_wallet_create() { case "${1}" in rsa) shift arweave_wallet_create_rsa ${*} ;; ecdsa) shift arweave_wallet_create_ecdsa ${*} ;; *) arweave_wallet_create_help ;; esac } arweave_wallet_create_help() { echo "Usage: ${REL_NAME} wallet create [rsa|ecdsa]" echo "Create Arweave wallet. examples:" echo " ${REL_NAME} wallet create rsa" echo " ${REL_NAME} wallet create ecdsa" exit 1 } arweave_wallet_create_rsa() { ARWEAVE_OPTS="-run ar create_wallet" echo ${*} } arweave_wallet_create_ecdsa() { ARWEAVE_OPTS="-run ar create_ecdsa_wallet" echo ${*} } ###################################################################### # Arweave Data Doctor Section ###################################################################### arweave_doctor() { ARWEAVE_OPTS="-run ar_data_doctor main" echo ${*} } arweave_doctor_help() { echo "Usage: ${REL_NAME} doctor" echo "Execute data doctor analyzer" exit 1 } ###################################################################### # Arweave Developer mode Section ###################################################################### # when ARWEAVE_DEV environment variable is set, the release is rebuild arweave_developer_mode() { ( cd ${PARENT_DIR} \ && ./ar-rebar3 ${ARWEAVE_BUILD_TARGET:-default} release sleep 1 ) } # check if a command (subcommand) is a developer command. is_arweave_developer_command() { local commands="test test_e2e" local value="${1}" if test "${ARWEAVE_DEV}" then return 1 fi for command in ${commands} do if test "${command}" = "${value}" then return 0 fi done return 1 } ###################################################################### # Arweave Version Section ###################################################################### arweave_version() { case "${1}" in *) arweave_version_light ;; esac } arweave_version_light() { echo "${RELEASE_NAME} ${RELEASE_VSN} (${RELEASE_GIT_REV}) ${RELEASE_DATETIME}" echo " erts ${RELEASE_ERTS}" echo " ${RELEASE_CC}" echo " ${RELEASE_GMAKE}" echo " ${RELEASE_CMAKE}" exit 0 } arweave_version_help() { echo "Usage: ${REL_NAME} version" echo "Return Arweave release" exit 1 } ###################################################################### # test section ###################################################################### arweave_test() { TEST_CONFIG="./config/sys.config" TEST_PROFILE="test" TEST_NODE_NAME="${NODE_NAME:-main-localtest}" TEST_NODE_HOST="${NODE_HOST:-127.0.0.1}" TEST_COOKIE="${COOKIE:-test}" TEST_MODULE="tests" TEST_LOG="main-localtest.out" arweave_test_run ${*} } arweave_test_help() { echo "Usage: ${REL_NAME} test MODULE" echo "Run Arweave Test Suite for module MODULE" } arweave_e2e() { TEST_CONFIG="./config/sys.config" TEST_PROFILE="e2e" TEST_NODE_NAME="${NODE_NAME:-main-e2e}" TEST_NODE_HOST="${NODE_HOST:-127.0.0.1}" TEST_COOKIE="${COOKIE:-e2e}" TEST_MODULE="e2e" TEST_LOG="main-e2e.out" arweave_test_run ${*} } arweave_e2e_help() { echo "Usage: ${REL_NAME} test_e2e MODULE" echo "Run Arweave e2e Test Suite for module MODULE" } # test and e2e features are sharing the same procedures. arweave_test_run() { ( echo -e "\033[0;32m===> Enter into ${PARENT_DIR}\033[0m" cd ${PARENT_DIR} echo -e "\033[0;32m===> Compile ${TEST_PROFILE} profile\033[0m" ./ar-rebar3 "${TEST_PROFILE}" compile # if a specific test is specified if test "${1}" then TEST_NODE="${TEST_NODE_NAME}-${1}@${TEST_NODE_HOST}" else TEST_NODE="${TEST_NODE_NAME}@${TEST_NODE_HOST}" fi TEST_PATH="$(./rebar3 as ${TEST_PROFILE} path)" TEST_PATH_BASE="$(./rebar3 as ${TEST_PROFILE} path --base)/lib/arweave/test" PARAMS="-pa ${TEST_PATH} ${TEST_PATH_BASE} -config ${TEST_CONFIG} -noshell" ENTRY_POINT="-run ar ${TEST_MODULE} ${*} -s init stop" command="erl ${PARAMS} -name ${TEST_NODE} -setcookie ${TEST_COOKIE} ${ENTRY_POINT}" echo -e "\033[0;32m===> Execute command ${command}\033[0m" set -xe -o pipefail ${command} | tee "${TEST_LOG}" exit $? ) } ###################################################################### # Relx section ###################################################################### relx_usage() { command="$1" case "$command" in benchmark) arweave_benchmark_help ;; check) arweave_check_help ;; doctor) arweave_doctor_help ;; version) arweave_version_help ;; packing) arweave_packing_help ;; wallet) arweave_wallet_help ;; daemon) echo "Usage: ${REL_NAME} daemon" echo "Start Arweave as daemon (in background)" ;; daemon_attach) echo "Usage: ${REL_NAME} daemon_attach" echo "Attach to a running Arweave daemonized process" ;; rpc) echo "Usage: $REL_NAME rpc [Mod [Fun [Args]]]]" echo "Applies the specified function and returns the result." echo "Mod must be specified. However, start and [] are assumed" echo "for unspecified Fun and Args, respectively. Args is to " echo "be in the same format as for erlang:apply/3 in ERTS." ;; escript) echo "Usage: ${REL_NAME} escript [ESCRIPT]" echo "Execute an Erlang script in the Arweave release environment." echo "Note: it will not start Arweave." ;; "eval") echo "Usage: $REL_NAME eval [Exprs]" echo "Executes a sequence of Erlang expressions, separated by" echo "comma (,) and ended with a full stop (.)" ;; foreground) echo "Usage: $REL_NAME foreground" echo "Starts the Arweave release in the foreground, meaning all output" echo "going to stdout but without an interactive shell." echo "The entry point is set to -run ar main" ;; foreground_clean) echo "Usage: $REL_NAME foreground" echo "Starts the Arweave release in the foreground, meaning all output" echo "going to stdout but without an interactive shell." echo "No entry point is configured" ;; console) echo "Usage: $REL_NAME console" echo "Starts Arweave with an interactive shell." ;; console_clean) echo "Usage: ${REL_NAME} console_clean" echo: "Starts an interactived Erlang shell without Arweave started." ;; remote_console|remote|remsh) echo "Usage: $REL_NAME remote" echo "Attach a remote shell to an already running Erlang node for this release." ;; reboot) echo "Usage: ${REL_NAME} reboot" echo "Reboot the entire Arweave VM." ;; restart) echo "Usage: ${REL_NAME} restart" echo "Restart the running applications but not the Arweave VM." ;; pid) echo "Usage: ${REL_NAME} pid" echo "Returns the system PID of Arweave release (if running)." ;; ping) echo "Usage: ${REL_NAME} ping" echo "Checks if the Arweave node is running." ;; status) echo "Usage: $REL_NAME status" echo "Obtains node status information through optionally defined hooks." ;; stop) echo "Usage: ${REL_NAME} stop" echo "Stop the Arweave node." ;; test) arweave_test_help ;; test_e2e) arweave_e2e_help ;; *) # check for extension IS_EXTENSION=$(relx_is_extension "$command") if [ "$IS_EXTENSION" = "1" ]; then EXTENSION_SCRIPT=$(relx_get_extension_script "$command") relx_run_extension "$EXTENSION_SCRIPT" help else EXTENSIONS=$(echo $EXTENSIONS | sed -e 's/|undefined//g') echo "Usage: ${REL_NAME} [COMMAND] [ARGS]" echo "" echo "Arweave Commands:" echo "" echo " benchmark Run Arweave Benchmarks" echo " check Check system parameters for Arweave" echo " console Start Arweave with an interactive Erlang shell" echo " console_clean Start an interactive Erlang shell without the Arweave release's applications" echo " daemon Start Arweave in the background with run_erl (named pipes)" echo " daemon_attach Connect to Arweave node started as daemon with to_erl (named pipes)" echo " doctor Start Arweave Data Analyzer tool" echo " escript Run an escript in the same environment as the Arweave release" echo " eval [Exprs] Run Erlang expressions on Arweave node" echo " foreground Start Arweave with output to stdout" echo " foreground_clean Start Arweave VM without any entry-point as arguments" echo " pid Print the PID of the Arweave OS process" echo " ping Print pong if the Arweave node is alive" echo " reboot Reboot the entire Arweave VM" echo " reload Restart only Arweave application in the VM" echo " remote_console Connect remote shell to the Arweave node" echo " restart Restart the running applications but not the Arweave VM" echo " rpc [Mod [Fun [Args]]]] Run apply(Mod, Fun, Args) on the Arweave node" echo " status Verify if the Arweave node is running and then run status hook scripts" echo " stop Stop the Arweave node" echo " version Print the Arweave version" echo " wallet Manage Arweave wallets" if test "$EXTENSIONS" then echo "$EXTENSIONS" fi if test "${ARWEAVE_DEV}" then echo "" echo "Arweave Commands (developer mode):" echo " test Run Arweave test Suite" echo " test_e2e Run Arweave e2e Test Suite" fi fi ;; esac } find_erts_dir() { __erts_dir="$RELEASE_ROOT_DIR/erts-$ERTS_VSN" if [ -d "$__erts_dir" ]; then ERTS_DIR="$__erts_dir"; else __erl="$(command -v erl)" code="io:format(\"~s\", [code:root_dir()]), halt()." __erl_root="$("$__erl" -boot no_dot_erlang -sasl errlog_type error -noshell -eval "$code")" ERTS_DIR="$__erl_root/erts-$ERTS_VSN" if [ ! -d "$ERTS_DIR" ]; then erts_version_code="io:format(\"~s\", [erlang:system_info(version)]), halt()." __erts_version="$("$__erl" -boot no_dot_erlang -sasl errlog_type error -noshell -eval "$erts_version_code")" ERTS_DIR="${__erl_root}/erts-${__erts_version}" if [ -d "$ERTS_DIR" ]; then echo "Exact ERTS version (${ERTS_VSN}) match not found, instead using ${__erts_version}. The release may fail to run." 1>&2 ERTS_VSN=${__erts_version} else echo "Can not run the release. There is no ERTS bundled with the release or found on the system." exit 1 fi fi fi } find_erl_call() { # users who depend on stdout when running rpc calls must still use nodetool # so we have an overload option to force use of nodetool instead of erl_call if [ "$USE_NODETOOL" ]; then ERL_RPC=relx_nodetool else # only OTP-23 and above have erl_call in the erts bin directory # and only those versions have the features and bug fixes needed # to work properly with this script __erl_call="$ERTS_DIR/bin/erl_call" if [ -f "$__erl_call" ]; then ERL_RPC="$__erl_call"; else ERL_RPC=relx_nodetool fi fi } # Get node pid relx_get_pid() { if output="$(erl_rpc os getpid 2>/dev/null)" then echo "$output" | sed -e 's/"//g' return 0 else echo "$output" return 1 fi } ping_or_exit() { if ! erl_rpc erlang is_alive > /dev/null 2>&1; then echo "Node is not running!" exit 1 fi } relx_get_nodename() { id="longname$(relx_gen_id)-${NAME}" if [ -z "$COOKIE" ]; then # shellcheck disable=SC2086 "$BINDIR/erlexec" -boot "$REL_DIR"/start_clean \ -mode interactive \ -boot_var SYSTEM_LIB_DIR "$SYSTEM_LIB_DIR" \ -eval '[_,H]=re:split(atom_to_list(node()),"@",[unicode,{return,list}]), io:format("~s~n",[H]), halt()' \ -dist_listen false \ ${START_EPMD} \ -noshell "${NAME_TYPE}" "$id" else # running with setcookie prevents a ~/.erlang.cookie from being created # shellcheck disable=SC2086 "$BINDIR/erlexec" -boot "$REL_DIR"/start_clean \ -mode interactive \ -boot_var SYSTEM_LIB_DIR "$SYSTEM_LIB_DIR" \ -eval '[_,H]=re:split(atom_to_list(node()),"@",[unicode,{return,list}]), io:format("~s~n",[H]), halt()' \ -setcookie "${COOKIE}" \ -dist_listen false \ ${START_EPMD} \ -noshell "${NAME_TYPE}" "$id" fi } # Connect to a remote node relx_rem_sh() { # Remove remote_nodename when OTP-23 is the oldest version supported by rebar3/relx. # sort the used erts version against 11.0 to see if it is less than 11.0 (OTP-23) # if it is then we must generate a node name to use for the remote node. # But this feature is only for short names in 23.0 (erts 11.0). It can be used # for long names with 23.1 (erts 11.1) and above. if [ "${NAME_TYPE}" = "-sname" ] && [ "11.0" = "$(printf "%s\n11.0" "${ERTS_VSN}" | sort -V | head -n1)" ] ; then remote_nodename="${NAME_TYPE} undefined@${RELX_HOSTNAME}" # if the name type is longnames then make sure this is erts 11.1+ elif [ "${NAME_TYPE}" = "-name" ] && [ "11.1" = "$(printf "%s\n11.1" "${ERTS_VSN}" | sort -V | head -n1)" ] ; then remote_nodename="${NAME_TYPE} undefined@${RELX_HOSTNAME}" else # Generate a unique id used to allow multiple remsh to the same node transparently remote_nodename="${NAME_TYPE} remsh$(relx_gen_id)-${NAME}" fi # Get the node's ticktime so that we use the same one TICKTIME="$(erl_rpc net_kernel get_net_ticktime)" # Setup remote shell command to control node # -dist_listen is new in OTP-23. It keeps the remote node from binding to a listen port # and implies the option -hidden # shellcheck disable=SC2086 exec "$BINDIR/erlexec" ${remote_nodename} -remsh "$NAME" -boot "$REL_DIR"/start_clean -mode interactive \ -boot_var SYSTEM_LIB_DIR "$SYSTEM_LIB_DIR" \ -setcookie "$COOKIE" -hidden -kernel net_ticktime "$TICKTIME" \ -dist_listen false \ $DIST_ARGS \ $EXTRA_DIST_ARGS } erl_rpc() { case "$ERL_RPC" in "relx_nodetool") relx_nodetool rpc "$@" ;; *) command=$* # erl_call -R is recommended for generating dynamic node name but is only available in 23.0+ if [ "11.0" = "$(printf "%s\n11.0" "${ERTS_VSN}" | sort -V | head -n1)" ] ; then DYNAMIC_NAME="-R" else DYNAMIC_NAME="-r" fi if [ "$ADDRESS" ]; then result=$("$ERL_RPC" "${DYNAMIC_NAME}" -c "${COOKIE}" -address "${ADDRESS}" -timeout "${RELX_RPC_TIMEOUT}" -a "${command}") else result=$("$ERL_RPC" "$NAME_TYPE" "$NAME" "${DYNAMIC_NAME}" -c "${COOKIE}" -timeout "${RELX_RPC_TIMEOUT}" -a "${command}") fi code=$? if [ $code -eq 0 ]; then echo "$result" else return $code fi ;; esac } erl_eval() { case "$ERL_RPC" in "relx_nodetool") relx_nodetool eval "$@" ;; *) local command="${*}" if [ "$ERL_DIST_PORT" ]; then result=$(echo "${command}" | eval "$ERL_RPC" "${DYNAMIC_NAME}" -c "${COOKIE}" -address "${ADDRESS}" -timeout "${RELX_RPC_TIMEOUT}" -e) else result=$(echo "${command}" | eval "$ERL_RPC" "$NAME_TYPE" "$NAME" "${DYNAMIC_NAME}" -c "${COOKIE}" -timeout "${RELX_RPC_TIMEOUT}" -e) fi code=$? if [ $code -eq 0 ]; then echo "$result" | sed 's/^{ok, \(.*\)}$/\1/' else return $code fi ;; esac } # Generate a random id relx_gen_id() { # To prevent exhaustion of atoms on target node, optionally avoid # generation of random node prefixes, if it is guaranteed calls # are entirely sequential. if [ -z "${NODETOOL_NODE_PREFIX}" ]; then dd count=1 bs=4 if=/dev/urandom 2> /dev/null | od -x | head -n1 | awk '{print $2$3}' else echo "${NODETOOL_NODE_PREFIX}" fi } # Control a node with nodetool if erl_call isn't from OTP-23+ relx_nodetool() { command="$1"; shift # Generate a unique id used to allow multiple nodetool calls to the # same node transparently nodetool_id="maint$(relx_gen_id)-${NAME}" if [ -z "${START_EPMD}" ]; then ERL_FLAGS="${ERL_FLAGS} ${DIST_ARGS} ${EXTRA_DIST_ARGS} ${NAME_TYPE} $nodetool_id -setcookie ${COOKIE} -dist_listen false" \ "$ERTS_DIR/bin/escript" \ "$ROOTDIR/bin/nodetool" \ "$NAME_TYPE" "$NAME" \ "$command" "$@" else # shellcheck disable=SC2086 ERL_FLAGS="${ERL_FLAGS} ${DIST_ARGS} ${EXTRA_DIST_ARGS} ${NAME_TYPE} $nodetool_id -setcookie ${COOKIE} -dist_listen false" \ "$ERTS_DIR/bin/escript" \ "$ROOTDIR/bin/nodetool" \ $START_EPMD "$NAME_TYPE" "$NAME" "$command" "$@" fi } # Run an escript in the node's environment relx_escript() { scriptpath="$1"; shift export RELEASE_ROOT_DIR "$ERTS_DIR/bin/escript" "$ROOTDIR/$scriptpath" "$@" } # Convert {127,0,0,1} to 127.0.0.1 (inet:ntoa/1) addr_tuple_to_str() { addr="$1" saved_IFS="$IFS" IFS="{,}'\" " # shellcheck disable=SC2086 eval set -- $addr IFS="$saved_IFS" case $# in 4) printf '%u.%u.%u.%u' "$@";; 8) printf '%.4x:%.4x:%.4x:%.4x:%.4x:%.4x:%.4x:%.4x' "$@";; *) echo "Cannot parse IP address tuple: '$addr'" 1>&2;; esac } make_out_file_path() { # Use output directory provided in the RELX_OUT_FILE_PATH environment variable # (default to the current location of vm.args and sys.config) DIR=$(dirname "$1") [ -d "${RELX_OUT_FILE_PATH}" ] && DIR="${RELX_OUT_FILE_PATH}" FILE=$(basename "$1") IN="${DIR}/${FILE}" PFX=$(echo "$IN" | awk '{sub(/\.[^.]+$/, "", $0)}1') SFX=$(echo "$FILE" | awk -F . '{if (NF>1) print $NF}') if [ "$RELX_MULTI_NODE" ]; then echo "${PFX}.${NAME}.${SFX}" else echo "${PFX}.${SFX}" fi } # Replace environment variables replace_os_vars() { awk '{ while(match($0,"[$]{[^}]*}")) { var=substr($0,RSTART+2,RLENGTH -3) slen=split(var,arr,":-") v=arr[1] e=ENVIRON[v] gsub("&","\\\\\\&",e) if(slen > 1 && e=="") { i=index(var, ":-"arr[2]) def=substr(var,i+2) gsub("[$]{"var"}",def) } else { gsub("[$]{"var"}",e) } } }1' < "$1" > "$2" } add_path() { # Use $CWD/$1 if exists, otherwise releases/VSN/$1 local FILE=${1}; shift local IN_FILE_PATH=${1}; shift local EXTRA_PATHS=${*} if [ "${IN_FILE_PATH}" ] then echo "${IN_FILE_PATH}" return 0 fi for e in "${RELEASE_ROOT_DIR}" "${REL_DIR}" ${EXTRA_PATHS} do if [ -f "${e}/${FILE}" ] then echo "${e}/${FILE}" return 0 fi done return 1 } multi_check_replace_os_vars() { local file="${1}"; shift while test "${*}" do local path=${1}; shift local ret=$(check_replace_os_vars ${file} ${path}) if test "${ret}" then echo ${ret} return 0 fi done return 1 } check_replace_os_vars() { IN_FILE_PATH=$(add_path "$1" "$2") OUT_FILE_PATH="$IN_FILE_PATH" SRC_FILE_PATH="$IN_FILE_PATH.src" ORIG_FILE_PATH="$IN_FILE_PATH.orig" if [ -f "$SRC_FILE_PATH" ]; then OUT_FILE_PATH=$(make_out_file_path "$IN_FILE_PATH") replace_os_vars "$SRC_FILE_PATH" "$OUT_FILE_PATH" elif [ "$RELX_REPLACE_OS_VARS" ]; then OUT_FILE_PATH=$(make_out_file_path "$IN_FILE_PATH") # If vm.args.orig or sys.config.orig is present then use that if [ -f "$ORIG_FILE_PATH" ]; then IN_FILE_PATH="$ORIG_FILE_PATH" fi # apply the environment variable substitution to $IN_FILE_PATH # the result is saved to $OUT_FILE_PATH # if they are both the same, then ensure that we don't clobber # the file by saving a backup with the .orig extension if [ "$IN_FILE_PATH" = "$OUT_FILE_PATH" ]; then cp "$IN_FILE_PATH" "$ORIG_FILE_PATH" replace_os_vars "$ORIG_FILE_PATH" "$OUT_FILE_PATH" else replace_os_vars "$IN_FILE_PATH" "$OUT_FILE_PATH" fi else # If vm.arg.orig or sys.config.orig is present then use that if [ -f "$ORIG_FILE_PATH" ]; then OUT_FILE_PATH=$(make_out_file_path "$IN_FILE_PATH") cp "$ORIG_FILE_PATH" "$OUT_FILE_PATH" fi fi echo "$OUT_FILE_PATH" } relx_run_hooks() { HOOKS=$1 for hook in $HOOKS do # the scripts arguments at this point are separated # from each other by | , we now replace these # by empty spaces and give them to the `set` # command in order to be able to extract them # separately # shellcheck disable=SC2046 set $(echo "$hook" | sed -e 's/|/ /g') HOOK_SCRIPT=$1; shift # all hook locations are expected to be # relative to the start script location # shellcheck disable=SC1090,SC2240 [ -f "$SCRIPT_DIR/$HOOK_SCRIPT" ] && . "$SCRIPT_DIR/$HOOK_SCRIPT" "$@" done } relx_disable_hooks() { PRE_START_HOOKS="" POST_START_HOOKS="" PRE_STOP_HOOKS="" POST_STOP_HOOKS="" PRE_INSTALL_UPGRADE_HOOKS="" POST_INSTALL_UPGRADE_HOOKS="" STATUS_HOOK="" } relx_is_extension() { EXTENSION=$1 case "$EXTENSION" in # {{ extensions }}) # echo "1" # ;; *) echo "0" ;; esac } relx_get_extension_script() { EXTENSION=$1 # below are the extensions declarations # of the form: # foo_extension="path/to/foo_script";bar_extension="path/to/bar_script" {{{extension_declarations}}} # get the command extension (eg. foo) and # obtain the actual script filename that it # refers to (eg. "path/to/foo_script" eval echo "$""${EXTENSION}_extension" } relx_run_extension() { # drop the first argument which is the name of the # extension script EXTENSION_SCRIPT=$1 shift # all extension script locations are expected to be # relative to the start script location # shellcheck disable=SC1090,SC2240 [ -f "$SCRIPT_DIR/$EXTENSION_SCRIPT" ] && . "$SCRIPT_DIR/$EXTENSION_SCRIPT" "$@" } # given a list of arguments, identify the internal ones # --relx-disable-hooks # and process them accordingly process_internal_args() { for arg in "$@" do shift case "$arg" in --relx-disable-hooks) relx_disable_hooks ;; *) ;; esac done } # This function takes a list of terms (usually arguments) # and split them in two categories, the one before -- # and the one after. The one before is used as Erlang # VM parameters and should overwrite default configuration, # The last part (LOCAL_PARAMS) contains arweave parameters. # This function export LOCAL_PARAMS and VM_PARAMS variables. parse_args() { local separator="--" local vm_params="" local params="" while test "${*}" do local arg="${1}" if test "${arg}" = ${separator} then test "${vm_params}" \ && vm_params="${vm_params} ${params}" \ || vm_params="${params}" params="" else test "${params}" \ && params="${params} ${arg}" \ || params="${arg}" fi # don't forget to shift to remove the previous # argument from the list shift done export VM_PARAMS="${vm_params}" export LOCAL_PARAMS="${params}" } # if ARWEAVE_DEV environment is defined, then # we start by rebuild a release. if test "${ARWEAVE_DEV}" then arweave_developer_mode fi # process internal arguments process_internal_args "$@" find_erts_dir find_erl_call export ROOTDIR="$RELEASE_ROOT_DIR" export BINDIR="$ERTS_DIR/bin" export EMU="beam" export PROGNAME="erl" export LD_LIBRARY_PATH="$ERTS_DIR/lib:$LD_LIBRARY_PATH" SYSTEM_LIB_DIR="$(dirname "$ERTS_DIR")/lib" # vm_args configuration, we can use priv/files/vm_args or # the path from the release. VMARGS_PATH=$(add_path \ vm.args \ "${VMARGS_PATH}" \ "${REL_DIR}" \ "${REL_PATH}" \ "${REL_PATH_ALT}" \ "${RELEASE_ROOT_DIR}/config" \ "${RELEASE_ROOT_DIR}/priv/templates") VMARGS_PATH=$(multi_check_replace_os_vars \ vm.args \ "${VMARGS_PATH}"\ "${REL_DIR}" \ "${REL_PATH}" \ "${REL_PATH_ALT}" \ "${RELEASE_ROOT_DIR}/config") RELX_CONFIG_PATH=$(multi_check_replace_os_vars \ sys.config \ "${RELX_CONFIG_PATH}" \ "${REL_DIR}" \ "${REL_PATH}" \ "${REL_PATH_ALT}" \ "${RELEASE_ROOT_DIR}/config") # Check vm.args and other files referenced via -args_file parameters for: # - nonexisting -args_files # - circular dependencies of -args_files # - relative paths in -args_file parameters # - multiple/mixed occurrences of -name and -sname parameters # - missing -name or -sname parameters # If all checks pass, extract the target node name set +e TMP_NAME_ARG=$(awk 'function shell_quote(str) { gsub(/'\''/,"'\'\\\\\'\''", str); return "'\''" str "'\''" } function check_name(file) { # if file exists, then it should be readable if (system("test -f " shell_quote(file)) == 0 && system("test -r " shell_quote(file)) != 0) { print file" not readable" exit 3 } while ((getline line0) { if (line~/^-args_file +/) { gsub(/^-args_file +| *$/, "", line) if (line in files) { print "circular reference to "line" encountered in "file exit 5 } files[line]=line check_name(line) } else if (line~/^-s?name +/) { if (name!="") { print "\""line"\" parameter found in "file" but already specified as \""name"\"" exit 2 } name=line } } } BEGIN { split("", files) name="" } { files[FILENAME]=FILENAME check_name(FILENAME) if (name=="") { print "need to have exactly one of either -name or -sname parameters but none found" exit 1 } print name exit 0 }' "$VMARGS_PATH") TMP_NAME_ARG_RC=$? case $TMP_NAME_ARG_RC in 0) NAME_ARG="$TMP_NAME_ARG";; *) echo "$TMP_NAME_ARG" exit $TMP_NAME_ARG_RC;; esac unset TMP_NAME_ARG unset TMP_NAME_ARG_RC set -e # Perform replacement of variables in ${NAME_ARG} NAME_ARG=$(eval echo "${NAME_ARG}") # Extract the name type and name from the NAME_ARG for REMSH NAME_TYPE="$(echo "$NAME_ARG" | awk '{print $1}')" NAME="$(echo "$NAME_ARG" | awk '{print $2}')" # Extract dist arguments DIST_ARGS="" PROTO_DIST="$(grep '^-proto_dist' "$VMARGS_PATH" || true)" if [ "$PROTO_DIST" ]; then DIST_ARGS="${PROTO_DIST}" fi START_EPMD="$(grep '^-start_epmd' "$VMARGS_PATH" || true)" if [ "$START_EPMD" ]; then DIST_ARGS="${DIST_ARGS} ${START_EPMD}" fi EPMD_MODULE="$(grep '^-epmd_module' "$VMARGS_PATH" || true)" if [ "$EPMD_MODULE" ]; then DIST_ARGS="${DIST_ARGS} ${EPMD_MODULE}" fi INET_DIST_USE_INTERFACE="$(grep '^-kernel *inet_dist_use_interface' "$VMARGS_PATH" || true)" if [ "$INET_DIST_USE_INTERFACE" ]; then DIST_ARGS="${DIST_ARGS} ${INET_DIST_USE_INTERFACE}" fi if [ "$ERL_DIST_PORT" ]; then if [ "$INET_DIST_USE_INTERFACE" ]; then ADDRESS="$(addr_tuple_to_str "${INET_DIST_USE_INTERFACE#*inet_dist_use_interface }"):$ERL_DIST_PORT" else ADDRESS="$ERL_DIST_PORT" fi if [ "11.1" = "$(printf "%s\n11.1" "${ERTS_VSN}" | sort -V | head -n1)" ] ; then # unless set by the user, set start_epmd to false when ERL_DIST_PORT is used if [ ! "$START_EPMD" ]; then EXTRA_DIST_ARGS="-erl_epmd_port ${ERL_DIST_PORT} -start_epmd false" else EXTRA_DIST_ARGS="-erl_epmd_port ${ERL_DIST_PORT}" fi else ERL_DIST_PORT_WARNING="ERL_DIST_PORT is set and used to set the port, but doing so on ERTS version ${ERTS_VSN} means remsh/rpc will not work for this release" if ! command -v logger > /dev/null 2>&1 then echo "WARNING: ${ERL_DIST_PORT_WARNING}" else logger -p warning -t "${REL_NAME}[$$]" "${ERL_DIST_PORT_WARNING}" fi EXTRA_DIST_ARGS="-kernel inet_dist_listen_min ${ERL_DIST_PORT} -kernel inet_dist_listen_max ${ERL_DIST_PORT}" fi fi # Force use of nodetool if proto_dist set as erl_call doesn't support proto_dist if [ "$PROTO_DIST" ]; then ERL_RPC=relx_nodetool fi # Extract the target cookie # Do this before relx_get_nodename so we can use it and not create a ~/.erlang.cookie if [ -n "$RELX_COOKIE" ]; then COOKIE="$RELX_COOKIE" else COOKIE_ARG="$(grep '^-setcookie' "$VMARGS_PATH" || true)" DEFAULT_COOKIE_FILE="$HOME/.erlang.cookie" if [ -z "$COOKIE_ARG" ]; then if [ -f "$DEFAULT_COOKIE_FILE" ]; then COOKIE="$(cat "$DEFAULT_COOKIE_FILE")" else echo "No cookie is set or found. This limits the scripts functionality, installing, upgrading, rpc and getting a list of versions will not work." fi else # Extract cookie name from COOKIE_ARG COOKIE="$(echo "$COOKIE_ARG" | awk '{print $2}')" fi fi # User can specify an sname without @hostname # This will fail when creating remote shell # So here we check for @ and add @hostname if missing case "${NAME}" in *@*) ;; # Nothing to do *) NAME=${NAME}@$(relx_get_nodename);; # Add @hostname esac # Export the variable so that it's available in the 'eval' calls export NAME # create a variable of just the hostname part of the nodename RELX_HOSTNAME=$(echo "${NAME}" | cut -d'@' -f2) test -z "$PIPE_DIR" && PIPE_BASE_DIR='/tmp/erl_pipes/' PIPE_DIR="${PIPE_DIR:-/tmp/erl_pipes/$NAME/}" cd "$ROOTDIR" if is_arweave_developer_command "${1}" then relx_usage exit 1 fi # Check the first argument for instructions case "$1" in check) shift arweave_check ${*} ;; version) shift arweave_version ${*} ;; daemon|daemon_boot) arweave_check case "$1" in daemon) shift START_OPTION="console" HEART_OPTION="daemon" ;; daemon_boot) shift START_OPTION="console_boot" HEART_OPTION="daemon_boot" ;; esac ARGS="$(printf "'%s' " "$@")" # shellcheck disable=SC2174 test -z "$PIPE_BASE_DIR" || mkdir -m 1777 -p "$PIPE_BASE_DIR" mkdir -p "$PIPE_DIR" if [ ! -w "$PIPE_DIR" ] then echo "failed to start, user '$USER' does not have write privileges on '$PIPE_DIR', either delete it or run node as a different user" exit 1 fi # Make sure log directory exists mkdir -p "$RUNNER_LOG_DIR" relx_run_hooks "$PRE_START_HOOKS" # check system configuration arweave_check "$BINDIR/run_erl" \ -daemon "$PIPE_DIR" \ "$RUNNER_LOG_DIR" \ "exec \"$RELEASE_ROOT_DIR/bin/$REL_NAME\" \"$START_OPTION\" ${ARGS}" # wait for node to be up before running hooks while ! erl_rpc erlang is_alive > /dev/null 2>&1 do sleep 1 done relx_run_hooks "$POST_START_HOOKS" ;; stop) relx_run_hooks "$PRE_STOP_HOOKS" # Wait for the node to completely stop... PID="$(relx_get_pid)" if ! erl_rpc init stop > /dev/null 2>&1; then exit 1 fi while kill -s 0 "$PID" 2>/dev/null; do sleep 1 done # wait for node to be down before running hooks while erl_rpc erlang is_alive > /dev/null 2>&1 do sleep 1 done relx_run_hooks "$POST_STOP_HOOKS" ;; restart) ## Restart the VM without exiting the process if ! erl_rpc init restart > /dev/null; then exit 1 fi ;; reboot) ## Restart the VM completely (uses heart to restart it) if ! erl_rpc init reboot > /dev/null; then exit 1 fi ;; reload) ## Reload only arweave application in the vm RELX_RPC_TIMEOUT=3600 # first arweave and prometheus application must be stopped if erl_eval '[application:stop(A) || A <- [arweave, prometheus]].' then # then arweave application can be restarted erl_eval 'application:ensure_all_started(arweave).' test $? -ne 0 && exit 1 exit $? else exit 1 fi ;; pid) ## Get the VM's pid if ! relx_get_pid; then exit 1 fi ;; ping) ## See if the VM is alive ping_or_exit echo "pong" ;; escript) ## Run an escript under the node's environment shift if ! relx_escript "$@"; then exit 1 fi ;; daemon_attach|attach) case "$1" in attach) # TODO, add here the right annoying message asking users to consider # instead using systemd or some such other init system echo "'attach' has been deprecated, replaced by 'daemon_attach' and will be removed in the short-term, please consult rebar3.org on why you should be"\ "using 'foreground' and an init tool such as 'systemd'" ;; esac # Make sure a node IS running ping_or_exit if [ ! -w "$PIPE_DIR" ] then echo "failed to attach, user '$USER' does not have sufficient privileges on '$PIPE_DIR', please run node as a different user" exit 1 fi shift exec "$BINDIR/to_erl" "$PIPE_DIR" ;; remote_console|remote|remsh) # Make sure a node IS running ping_or_exit shift relx_rem_sh ;; console|console_clean|console_boot|foreground|foreground_clean|benchmark|wallet|doctor) FOREGROUNDOPTIONS="" # .boot file typically just $REL_NAME (ie, the app name) # however, for debugging, sometimes start_clean.boot is useful. # For e.g. 'setup', one may even want to name another boot script. subcommand="${1}" case "$1" in console) shift if [ -f "$REL_DIR/$REL_NAME.boot" ]; then BOOTFILE="$REL_DIR/$REL_NAME" else BOOTFILE="$REL_DIR/start" fi ARGS=${*} ;; foreground|foreground_clean|benchmark|wallet|doctor) shift # start up the release in the foreground for use by runit # or other supervision services if [ -f "$REL_DIR/$REL_NAME.boot" ]; then BOOTFILE="$REL_DIR/$REL_NAME" else BOOTFILE="$REL_DIR/start" fi FOREGROUNDOPTIONS="-noinput +Bd" # all these arweave commands are being executed in # foreground mode, ARGS will be modified. case ${subcommand} in benchmark) arweave_benchmark ${*} ARGS=$(arweave_benchmark ${*}) ;; wallet) arweave_wallet ${*} ARGS=$(arweave_wallet ${*}) ;; doctor) arweave_doctor ${*} ARGS=$(arweave_doctor ${*}) ;; foreground_clean) ARWEAVE_OPTS="" ARGS=${*} ;; *) ARGS=${*} ;; esac ;; console_clean) shift # if not set by user use interactive mode for console_clean CODE_LOADING_MODE="${CODE_LOADING_MODE:-interactive}" BOOTFILE="$REL_DIR/start_clean" ARGS=${*} ;; console_boot) shift BOOTFILE="$1" shift ARGS=${*} ;; esac # split the argument in two parts based on the previously # passed args, LOCAL_PARAMS is for arweave, VM_PARAMS is for # the vm. parse_args ${ARGS} ARGS=${LOCAL_PARAMS} # if not set by user or console_clean use embedded CODE_LOADING_MODE="${CODE_LOADING_MODE:-embedded}" # Setup beam-required vars EMU="beam" PROGNAME="${0#*/}" export EMU export PROGNAME # Dump environment info for logging purposes # shellcheck disable=SC2086 echo "Exec: $BINDIR/erlexec" \ ${VM_PARAMS} \ ${EXTRA_DIST_ARGS} \ ${FOREGROUNDOPTIONS} \ -boot "$BOOTFILE" \ -mode "$CODE_LOADING_MODE" \ -boot_var SYSTEM_LIB_DIR "$SYSTEM_LIB_DIR" \ -config "$RELX_CONFIG_PATH" \ -args_file "$VMARGS_PATH" \ -- ${ARWEAVE_OPTS} ${ARGS} echo "Root: $ROOTDIR" # Log the startup echo "$RELEASE_ROOT_DIR" if ! command -v logger > /dev/null 2>&1 then echo "${REL_NAME}[$$] Starting up" else logger -t "${REL_NAME}[$$]" "Starting up" fi relx_run_hooks "$PRE_START_HOOKS" # check system configuration arweave_check # Start the VM # The variabre FOREGROUNDOPTIONS must NOT be quoted. # shellcheck disable=SC2086 exec "$BINDIR/erlexec" \ ${VM_PARAMS} \ ${EXTRA_DIST_ARGS} \ ${FOREGROUNDOPTIONS} \ -boot "$BOOTFILE" \ -mode "$CODE_LOADING_MODE" \ -boot_var SYSTEM_LIB_DIR "$SYSTEM_LIB_DIR" \ -config "$RELX_CONFIG_PATH" \ -args_file "$VMARGS_PATH" \ -- ${ARWEAVE_OPTS} ${ARGS} # exec will replace the current image and nothing else gets # executed from this point on, this explains the absence # of the pre start hook ;; rpc) # Make sure a node IS running ping_or_exit shift erl_rpc "$@" ;; eval) # Make sure a node IS running ping_or_exit shift erl_eval "$@" ;; status) # Make sure a node IS running ping_or_exit # shellcheck disable=SC1090,SC2240 [ -n "${STATUS_HOOK}" ] && [ -f "$SCRIPT_DIR/$STATUS_HOOK" ] && . "$SCRIPT_DIR/$STATUS_HOOK" "$@" ;; tunnel) # prepare a tunnel to the remote node shift target="${1}" # if epmd is running locally, try to kill it pgrep epmd && pkill epmd # fetch the port of the remote arweave node REMOTE_EPMD_PORT=$(ssh ${target} "epmd -names | sed 1d | awk '$2==\"^arweave$\" {print $NF}'") # create a local forward tunnel ssh -L ${ERL_EPMD_PORT}:localhost:${ERL_EPMD_PORT} \ -L ${REMOTE_EPMD_PORT=}:localhost:${REMOTE_EPMD_PORT} \ ${target} echo "epmd tunnel is ready on localhost:${REMOTE_EPMD_PORT}" ;; remote_observer) # start observer locally, assuming a tunnel has been previouly # created OBSERVER_ID=$(($(date "+%N")%6421)) erl -name observer-${OBSERVER_ID}@127.0.0.1 \ -setcookie ${COOKIE} \ -hidden -run observer ;; test) shift arweave_test ${*} ;; test_e2e) shift arweave_e2e ${*} ;; help) if [ -z "$2" ]; then relx_usage exit 1 fi TOPIC="$2"; shift relx_usage "$TOPIC" ;; *) # check for extension IS_EXTENSION=$(relx_is_extension "$1") if [ "$IS_EXTENSION" = "1" ]; then EXTENSION_SCRIPT=$(relx_get_extension_script "$1") shift relx_run_extension "$EXTENSION_SCRIPT" "$@" # all extension scripts are expected to exit else relx_usage "$1" fi exit 1 ;; esac exit 0 ================================================ FILE: priv/templates/vm_args ================================================ ###################################################################### ## Default vm arguments templates used by Arweave. ## ## Some useful links to configure emulator flags: ## https://www.erlang.org/doc/apps/erts/erl_cmd.html#emulator-flags ## ## Some useful links on Erlang's memory management: ## https://www.erlang-factory.com/static/upload/media/139454517145429lukaslarsson.pdf ## https://www.youtube.com/watch?v=nuCYL0X-8f4 ## ## Note for testing it's sometimes useful to limit the number of ## schedulers that will be used, to do that: +S 16:16 ###################################################################### ## Name of the node -name ${ARNAME:-{{ release_name }}@127.0.0.1} ## Cookie for distributed erlang -setcookie ${ARCOOKIE:-{{ release_name }}} ## This is now the default as of OTP-26 ## Multi-time warp mode in combination with time correction is the ## preferred configuration. ## It is only not the default in Erlang itself because it could break ## older systems. # +C multi_time_warp ## Uncomment the following line if running in a container. ## +sbwt none ## Increase number of concurrent ports/sockets ##-env ERL_MAX_PORTS 4096 ## Tweak GC to run more often ##-env ERL_FULLSWEEP_AFTER 10 ## +B [c | d | i] ## Option c makes Ctrl-C interrupt the current shell instead of ## invoking the emulator break ## handler. Option d (same as specifying +B without an extra option) ## disables the break handler. # Option i makes the emulator ignore any ## break signal. ## If option c is used with oldshell on Unix, Ctrl-C will restart the ## shell process rather than ## interrupt it. ## Disable the emulator break handler ## it easy to accidentally type ctrl-c when trying ## to reach for ctrl-d. ctrl-c on a live node can ## have very undesirable results +Bi ## Enables the kernel poll functionality. +Ktrue ## +A1024: emulator number of threads in the Async long thread pool for linked ## in drivers, mostly unused +A1024 ## +SDio1024: emulator Scheduler thread count for Dirty I/O, 200 ## threads for file access +SDio1024 ## +MBsbct 103424: binary_alloc singleblock carrier threshold (in KiB) ## (101MiB, default 512KiB). Blocks larger than the threshold are ## placed in singleblock carriers. However multi-block carriers are ## more efficient. Since we have so many 100MiB binary blocks due to ## the recall range, set the threshold so that they are all placed in ## multi-block carriers and not single-block carriers. +MBsbct 103424 ## +MBsmbcs 10240: binary_alloc smallest multi-block carrier size (in ## KiB) (10MiB, default 256KiB). +MBsmbcs 10240 ## MBlmbcs 410629: binary_alloc largest multi-block carrier size (in ## KiB) (~401MiB, default 5MiB). Set so that a single multi-block ## carrier can hold roughly 4 full recall ranges. +MBlmbcs 410629 ## +MBmmsbc 1024: binary_alloc maximum mseg_alloc singleblock carriers ## (1024 carriers, default 256). Once exhausted, the emulator will start ## using sys_alloc rather than mseg_alloc for singleblock carriers. ## This can be slower. +MBmmmbc 1024 ## +MBas aobf: emulator Memory Binary Allocation Strategy set to Address ## Order Best Fit. ## see: https://www.erlang.org/doc/man/erts_alloc.html#strategy +MBas aobf ## Sets scheduler busy wait threshold. Defaults to medium. The ## threshold determines how long schedulers are to busy wait when ## running out of work before going to sleep. +sbwt very_long ## Sets dirty scheduler busy wait threshold. +sbwtdcpu very_long ## Sets dirty IO scheduler busy wait threshold +sbwtdio very_long ## Sets scheduler wakeup threshold. +swt very_low ## Sets dirty scheduler wakeup threshold. +swtdcpu very_low ## Sets dirty IO scheduler wakeup threshold. +swtdio very_low ================================================ FILE: rebar.config ================================================ {minimum_otp_vsn, "26.0"}. {deps, [ {b64fast, {git, "https://github.com/ArweaveTeam/b64fast.git", {ref, "58f0502e49bf73b29d95c6d02460d1fb8d2a5273"}}}, {jiffy, {git, "https://github.com/ArweaveTeam/jiffy.git", {ref, "073da726e07bafb5d140020a9e8765c703da3ef7"}}}, {ranch, "1.8.1"}, {gun, "2.2.0"}, {cowboy, "2.12.0"}, {cowlib, "2.15.0"}, {prometheus, "4.11.0"}, {prometheus_process_collector, {git, "https://github.com/ArweaveTeam/prometheus_process_collector.git", {ref, "1362b608ffa4748cdf5dba92b85c981218fd4fa2"}}}, {prometheus_cowboy, "0.1.8"}, {rocksdb, {git, "https://github.com/ArweaveTeam/erlang-rocksdb.git", {ref, "0e0b2f051e8f5720ceaea19dc51a7561f2472279"}}}, {recon, {git, "https://github.com/ferd/recon.git", {tag, "2.5.6"}}}, {yamerl, {git, "https://github.com/ArweaveTeam/yamerl", {ref, "bdb3b032f972a397c527667254393cd3c8942df3"}}}, {tomerl, {git, "https://github.com/ArweaveTeam/tomerl", {ref, "be6d7ccf9fe357c5ec3b6411d2245a21b97e48d7"}}} ]}. {overrides, [ {override, b64fast, [ {plugins, [{pc, {git, "https://github.com/blt/port_compiler.git", {tag, "v1.12.0"}}}]}, {artifacts, ["priv/b64fast.so"]}, {provider_hooks, [ {post, [ {compile, {pc, compile}}, {clean, {pc, clean}} ]} ]} ]}, % this is a quick and dirty patch due to rebar3 % versioning issue. % see: https://github.com/erlang/rebar3/issues/2364 {override, gun, [ {deps, [ {cowlib,".*",{git,"https://github.com/ninenines/cowlib",{tag,"2.15.0"}}} ]} ]} ]}. {relx, [ {release, {arweave, "2.9.6-alpha1"}, [ yamerl, tomerl, arweave_config, {arweave_limiter, load}, {arweave_diagnostic, load}, {arweave, load}, {recon, load}, b64fast, jiffy, rocksdb, prometheus_process_collector ]}, {sys_config, "./config/sys.config"}, {vm_args_src, "./config/vm.args.src"}, % dynamically generated overlay variable, required for % extra variables during script generation. {overlay_vars, "_vars.config"}, {overlay, [ {template, "priv/templates/extended_bin", "bin/arweave"}, {template, "priv/templates/extended_bin", "bin/arweave-{{release_version}}"}, {template, "priv/templates/extended_bin", "{{output_dir}}/{{release_version}}/bin/arweave"}, {template, "priv/templates/vm_args", "{{output_dir}}/{{release_version}}/vm.args"}, {template, "priv/templates/vm_args", "releases/{{ release_version }}/vm.args"}, {copy, "bin/start", "bin/start"}, {copy, "bin/stop", "bin/stop"}, {copy, "bin/console", "bin/console"}, {copy, "bin/create-wallet", "bin/create-wallet"}, {copy, "bin/benchmark-hash", "bin/benchmark-hash"}, {copy, "bin/benchmark-packing", "bin/benchmark-packing"}, {copy, "bin/benchmark-vdf", "bin/benchmark-vdf"}, {copy, "bin/data-doctor", "bin/data-doctor"}, {copy, "bin/logs", "bin/logs"}, {copy, "bin/debug-logs", "bin/debug-logs"}, {copy, "genesis_data/not_found.html", "genesis_data/not_found.html"}, {copy, "genesis_data/hash_list_1_0", "genesis_data/hash_list_1_0"}, {copy, "genesis_data/genesis_wallets.csv", "genesis_data/genesis_wallets.csv"}, {copy, "genesis_data/genesis_txs/ZC44Bxrx6AtNJYLwhvpALuINZRBXklme3tpeJbJ2rdw.json", "genesis_data/genesis_txs/ZC44Bxrx6AtNJYLwhvpALuINZRBXklme3tpeJbJ2rdw.json"}, {copy, "genesis_data/genesis_txs/6NaT-Mz8QAiQS8atFaOu_ezqZnfu_XaQb-Grng-hvHc.json", "genesis_data/genesis_txs/6NaT-Mz8QAiQS8atFaOu_ezqZnfu_XaQb-Grng-hvHc.json"}, {copy, "genesis_data/genesis_txs/1qVeYpf2sY8Qkz0iVomVPVb15NA7QUtF3eFDoMwa8PI.json", "genesis_data/genesis_txs/1qVeYpf2sY8Qkz0iVomVPVb15NA7QUtF3eFDoMwa8PI.json"}, {copy, "genesis_data/genesis_txs/6GNIVQ-23jPJTxQkQITbSKE7SYm6J3MF4qbSgH3-AXU.json", "genesis_data/genesis_txs/6GNIVQ-23jPJTxQkQITbSKE7SYm6J3MF4qbSgH3-AXU.json"}, {copy, "genesis_data/genesis_txs/3T6mnguMWl8GeiqZWiBZrGXHHtwm12mIWciusoSACkQ.json", "genesis_data/genesis_txs/3T6mnguMWl8GeiqZWiBZrGXHHtwm12mIWciusoSACkQ.json"}, {copy, "genesis_data/genesis_txs/EQh5rYFJ5Z5yESi4DIuvl2n6iVZS899tA6V6rf2Xwhk.json", "genesis_data/genesis_txs/EQh5rYFJ5Z5yESi4DIuvl2n6iVZS899tA6V6rf2Xwhk.json"}, {copy, "genesis_data/genesis_txs/128KaPgVaZyrl8Vuzt795ZlWidERzih15pNDAJgahI0.json", "genesis_data/genesis_txs/128KaPgVaZyrl8Vuzt795ZlWidERzih15pNDAJgahI0.json"}, {copy, "genesis_data/genesis_txs/xiQYsaUMtlIq9DvTyucB4gu0BFC-qnFRIDclLv8wUT8.json", "genesis_data/genesis_txs/xiQYsaUMtlIq9DvTyucB4gu0BFC-qnFRIDclLv8wUT8.json"}, {copy, "genesis_data/genesis_txs/I6s8Z6gEPLQABFstkCoLVv_gdQNGb-uuMMut-R7q2hA.json", "genesis_data/genesis_txs/I6s8Z6gEPLQABFstkCoLVv_gdQNGb-uuMMut-R7q2hA.json"}, {copy, "genesis_data/genesis_txs/kXu3jTQwgYsphIUFbaVGg9rNiil96fNjw0RBa6oPRtU.json", "genesis_data/genesis_txs/kXu3jTQwgYsphIUFbaVGg9rNiil96fNjw0RBa6oPRtU.json"}, {copy, "genesis_data/genesis_txs/CUu1gtu6L5tJxkOAu13tNBGDKECohV8M4qgCOOPNtas.json", "genesis_data/genesis_txs/CUu1gtu6L5tJxkOAu13tNBGDKECohV8M4qgCOOPNtas.json"}, {copy, "genesis_data/genesis_txs/g6TUtTIi_rwlAHNuO6ACsQqIChWACugTPmZxaaJltDM.json", "genesis_data/genesis_txs/g6TUtTIi_rwlAHNuO6ACsQqIChWACugTPmZxaaJltDM.json"}, {copy, "genesis_data/genesis_txs/L9J9SkTWI_Fx5KhujeWGokIchHTSFlSIC0blr0JIz80.json", "genesis_data/genesis_txs/L9J9SkTWI_Fx5KhujeWGokIchHTSFlSIC0blr0JIz80.json"}, {copy, "genesis_data/genesis_txs/AX6ZZxDpFlNhoN5Am5Hi4DER4zOBGVnQm_bse5PfHNw.json", "genesis_data/genesis_txs/AX6ZZxDpFlNhoN5Am5Hi4DER4zOBGVnQm_bse5PfHNw.json"}, {copy, "genesis_data/genesis_txs/3MMMUrHDmjbCn_-TOZJJHvjLBp8PffZKUNfm_Ziy0Vk.json", "genesis_data/genesis_txs/3MMMUrHDmjbCn_-TOZJJHvjLBp8PffZKUNfm_Ziy0Vk.json"}, {copy, "genesis_data/genesis_txs/2vn7V0FR0JMXrVbj3Ofvc_2nvrFYCCpRoFjc7UYpJcA.json", "genesis_data/genesis_txs/2vn7V0FR0JMXrVbj3Ofvc_2nvrFYCCpRoFjc7UYpJcA.json"}, {copy, "genesis_data/genesis_txs/daTnztzTMlA8Ras9XgQ05Fr9ZYwOg4-UDfjW875yQeQ.json", "genesis_data/genesis_txs/daTnztzTMlA8Ras9XgQ05Fr9ZYwOg4-UDfjW875yQeQ.json"}, {copy, "genesis_data/genesis_txs/_QEE09XylMYgab9MYPvrrMy7v1jKWh0bGwqFvsBsO8s.json", "genesis_data/genesis_txs/_QEE09XylMYgab9MYPvrrMy7v1jKWh0bGwqFvsBsO8s.json"}, {copy, "genesis_data/genesis_txs/5WKzIeQrDGC86IQvl2NhRtgPNKHGRA9oyjRByV1F7p4.json", "genesis_data/genesis_txs/5WKzIeQrDGC86IQvl2NhRtgPNKHGRA9oyjRByV1F7p4.json"}, {copy, "genesis_data/genesis_txs/Tnf6b1F67AEV2r9Flj8ktSSHYoV8SeL9dFvHRkavlZo.json", "genesis_data/genesis_txs/Tnf6b1F67AEV2r9Flj8ktSSHYoV8SeL9dFvHRkavlZo.json"}, {copy, "genesis_data/genesis_txs/m1Vv28IVJIuYiToBhxFVp3dA47je3L8WkzSjggAWXAo.json", "genesis_data/genesis_txs/m1Vv28IVJIuYiToBhxFVp3dA47je3L8WkzSjggAWXAo.json"}, {copy, "genesis_data/genesis_txs/iPb5JLzNajAzUNByVeIGSEPR0rzGOV5iIYjWpi99APQ.json", "genesis_data/genesis_txs/iPb5JLzNajAzUNByVeIGSEPR0rzGOV5iIYjWpi99APQ.json"}, {copy, "genesis_data/genesis_txs/KOm2FJzmNXa_yjYC-58DkysCdk7FRFMcRmBx3DF6S9A.json", "genesis_data/genesis_txs/KOm2FJzmNXa_yjYC-58DkysCdk7FRFMcRmBx3DF6S9A.json"}, {copy, "genesis_data/genesis_txs/R0Mhun4e-WmLLGxnJq4SDTRqyNvTDTKC-uXuol1s63A.json", "genesis_data/genesis_txs/R0Mhun4e-WmLLGxnJq4SDTRqyNvTDTKC-uXuol1s63A.json"}, {copy, "genesis_data/genesis_txs/g8ZQaQTNUbg-jGeE61og18FrGqpFeZxjFDypGuhT7zI.json", "genesis_data/genesis_txs/g8ZQaQTNUbg-jGeE61og18FrGqpFeZxjFDypGuhT7zI.json"}, {copy, "genesis_data/genesis_txs/DC6gmByeCki7uyXHJhX_A9x3pkMgmJ8Tv6wDRnh7vGs.json", "genesis_data/genesis_txs/DC6gmByeCki7uyXHJhX_A9x3pkMgmJ8Tv6wDRnh7vGs.json"}, {copy, "genesis_data/genesis_txs/y-k4KjdSmwYmIugoObrtx5JWYczlEZBzwBHGMLqNP-0.json", "genesis_data/genesis_txs/y-k4KjdSmwYmIugoObrtx5JWYczlEZBzwBHGMLqNP-0.json"}, {copy, "genesis_data/genesis_txs/OIOqGvvuafD_5J9QzfxyPiNlnqzIcL96i6u4PTUeDmA.json", "genesis_data/genesis_txs/OIOqGvvuafD_5J9QzfxyPiNlnqzIcL96i6u4PTUeDmA.json"}, {copy, "genesis_data/genesis_txs/mcFln0_6FIuLwE9GtMRzmdQts4QALV3dxQkXdgSdO2s.json", "genesis_data/genesis_txs/mcFln0_6FIuLwE9GtMRzmdQts4QALV3dxQkXdgSdO2s.json"}, {copy, "genesis_data/genesis_txs/R2h2i6y-KFxuHukxmHIjSncPZSiS4tpuzH0tD1NAooI.json", "genesis_data/genesis_txs/R2h2i6y-KFxuHukxmHIjSncPZSiS4tpuzH0tD1NAooI.json"}, {copy, "genesis_data/genesis_txs/rC7TOXwflo7w9Ky0ljTYlzdbR0A3g2GVRbRJbIIuBfY.json", "genesis_data/genesis_txs/rC7TOXwflo7w9Ky0ljTYlzdbR0A3g2GVRbRJbIIuBfY.json"}, {copy, "genesis_data/genesis_txs/UdCfZG1jBYUKgeLc13zjRxmQHO4_13B-NigE57jmJ5A.json", "genesis_data/genesis_txs/UdCfZG1jBYUKgeLc13zjRxmQHO4_13B-NigE57jmJ5A.json"}, {copy, "genesis_data/genesis_txs/8gTAwQ3f17PKI9KCX1cjuXCs9F8Hcdz8KyhsecKuCJ0.json", "genesis_data/genesis_txs/8gTAwQ3f17PKI9KCX1cjuXCs9F8Hcdz8KyhsecKuCJ0.json"}, {copy, "genesis_data/genesis_txs/HFUR5ZwLihdaonJWHRHBuLay6cw8ZMV0bM870xhE6Qk.json", "genesis_data/genesis_txs/HFUR5ZwLihdaonJWHRHBuLay6cw8ZMV0bM870xhE6Qk.json"}, {copy, "genesis_data/genesis_txs/QDbVk-efwdVbHDGL1vZO3mQ3g65ol5RR-1wOvPLUkkE.json", "genesis_data/genesis_txs/QDbVk-efwdVbHDGL1vZO3mQ3g65ol5RR-1wOvPLUkkE.json"}, {copy, "genesis_data/genesis_txs/F5R2EA-gM8AtQ9_NymKwtr_Im3_ljMR38ndzCs5c77Y.json", "genesis_data/genesis_txs/F5R2EA-gM8AtQ9_NymKwtr_Im3_ljMR38ndzCs5c77Y.json"}, {copy, "genesis_data/genesis_txs/O6qlkPRgr7H3WLHjVov-CTm-q66Q4TuvhP6GC-c5ZjY.json", "genesis_data/genesis_txs/O6qlkPRgr7H3WLHjVov-CTm-q66Q4TuvhP6GC-c5ZjY.json"}, {copy, "genesis_data/genesis_txs/0qob-AeHGTS5EDamY6Mtsnxf1MCyUk18l09bqHAYQjU.json", "genesis_data/genesis_txs/0qob-AeHGTS5EDamY6Mtsnxf1MCyUk18l09bqHAYQjU.json"}, {copy, "genesis_data/genesis_txs/VuXQZjhUaZ2Hyi6Pl8_VTOu2mUWjoEemYb5TKXPFOS0.json", "genesis_data/genesis_txs/VuXQZjhUaZ2Hyi6Pl8_VTOu2mUWjoEemYb5TKXPFOS0.json"}, {copy, "genesis_data/genesis_txs/Osgzf9EDK9j7TMlqSJ_5Y1rzZgOA6qfR7ktiakLPk4A.json", "genesis_data/genesis_txs/Osgzf9EDK9j7TMlqSJ_5Y1rzZgOA6qfR7ktiakLPk4A.json"}, {copy, "genesis_data/genesis_txs/1xh_NCIFYbprcgNM4AVvZ47jRxsQmJYvCG-L-oEK4iE.json", "genesis_data/genesis_txs/1xh_NCIFYbprcgNM4AVvZ47jRxsQmJYvCG-L-oEK4iE.json"}, {copy, "genesis_data/genesis_txs/DpEoi9F4g952ajGuT4g1HWY-xndyE77dn0VfdNXkrC8.json", "genesis_data/genesis_txs/DpEoi9F4g952ajGuT4g1HWY-xndyE77dn0VfdNXkrC8.json"}, {copy, "genesis_data/genesis_txs/Z6IgRWClifhTSnomxJet2WLw8UUaslmqAi2nynj3Ke4.json", "genesis_data/genesis_txs/Z6IgRWClifhTSnomxJet2WLw8UUaslmqAi2nynj3Ke4.json"}, {copy, "genesis_data/genesis_txs/Xjz72yVLd_Qzl8_GfSPqZA1MAkxxhjr2Lsf2tGCj_ZQ.json", "genesis_data/genesis_txs/Xjz72yVLd_Qzl8_GfSPqZA1MAkxxhjr2Lsf2tGCj_ZQ.json"}, {copy, "genesis_data/genesis_txs/r8Yq7Lvx0FjFYyXBLn29UM5Evv4AtGLZ00LCtE_hC60.json", "genesis_data/genesis_txs/r8Yq7Lvx0FjFYyXBLn29UM5Evv4AtGLZ00LCtE_hC60.json"}, {copy, "genesis_data/genesis_txs/7kT0is0QnxdjqkPi0BKamhLW6z6_SK55LMAVKQC6F0M.json", "genesis_data/genesis_txs/7kT0is0QnxdjqkPi0BKamhLW6z6_SK55LMAVKQC6F0M.json"}, {copy, "genesis_data/genesis_txs/-wzIQJ19Hq8Zyf1L85Ga3uGTrdWA2W-UNyr8aH4a4iE.json", "genesis_data/genesis_txs/-wzIQJ19Hq8Zyf1L85Ga3uGTrdWA2W-UNyr8aH4a4iE.json"}, {copy, "genesis_data/genesis_txs/QDBM2PowqCX0eUCKzgV-DgdzeDz5TXLKYS3HVXLyqoo.json", "genesis_data/genesis_txs/QDBM2PowqCX0eUCKzgV-DgdzeDz5TXLKYS3HVXLyqoo.json"}, {copy, "genesis_data/genesis_txs/ZAk05et7CFN69E9NwET2mSRI0ISRigjMEjcy8kbO-Y8.json", "genesis_data/genesis_txs/ZAk05et7CFN69E9NwET2mSRI0ISRigjMEjcy8kbO-Y8.json"}, {copy, "genesis_data/genesis_txs/LJ2QSdjHftgyCOSgy9Ub0OkTTN25rxCY7D7mt6u8Uy8.json", "genesis_data/genesis_txs/LJ2QSdjHftgyCOSgy9Ub0OkTTN25rxCY7D7mt6u8Uy8.json"}, {copy, "genesis_data/genesis_txs/luyHFFFOvjKPqi6nVrxngcHaQ3RwbMDMqVTLqPagHy0.json", "genesis_data/genesis_txs/luyHFFFOvjKPqi6nVrxngcHaQ3RwbMDMqVTLqPagHy0.json"}, {copy, "genesis_data/genesis_txs/eJ2aSQ4nm-i8XAZW2pcRq6GoEjW9K8EBM6w7rLiuSHw.json", "genesis_data/genesis_txs/eJ2aSQ4nm-i8XAZW2pcRq6GoEjW9K8EBM6w7rLiuSHw.json"}, {copy, "genesis_data/genesis_txs/SHxtj5_gLdJMI-6CcspsDbFBuU_74df3I4-sAJkAr6w.json", "genesis_data/genesis_txs/SHxtj5_gLdJMI-6CcspsDbFBuU_74df3I4-sAJkAr6w.json"}, {copy, "genesis_data/genesis_txs/CZ181FVir4NaSJ7JsVb50-xCaZtd3dmKbDer7jpTSyI.json", "genesis_data/genesis_txs/CZ181FVir4NaSJ7JsVb50-xCaZtd3dmKbDer7jpTSyI.json"}, {copy, "genesis_data/genesis_txs/Mk8XJgQPSOIsx_QX_XDPxdEG5NcKgO92q9i37uLZsrs.json", "genesis_data/genesis_txs/Mk8XJgQPSOIsx_QX_XDPxdEG5NcKgO92q9i37uLZsrs.json"}, {copy, "genesis_data/genesis_txs/Y0PLaTBQ73JXn_jHvldOKC3jdbqDbqTMkcW0x65_Jek.json", "genesis_data/genesis_txs/Y0PLaTBQ73JXn_jHvldOKC3jdbqDbqTMkcW0x65_Jek.json"}, {copy, "genesis_data/genesis_txs/G5FyMvm8E0_07vFgz-XISJN3VEviSrbtih9_Wptef9w.json", "genesis_data/genesis_txs/G5FyMvm8E0_07vFgz-XISJN3VEviSrbtih9_Wptef9w.json"}, {copy, "genesis_data/genesis_txs/fx1EmDF4yioha3ms_VbddDQjl4bt6pBLpFCESuEIT6E.json", "genesis_data/genesis_txs/fx1EmDF4yioha3ms_VbddDQjl4bt6pBLpFCESuEIT6E.json"}, {copy, "genesis_data/genesis_txs/EUMtkWCJU0L23RnhXKfQ1wtD3Jh2O-vpFnLcQXynoAQ.json", "genesis_data/genesis_txs/EUMtkWCJU0L23RnhXKfQ1wtD3Jh2O-vpFnLcQXynoAQ.json"}, {copy, "genesis_data/genesis_txs/BYJCPwCLpd9a5K1HFy5F6ZvnemPiPFtV4hz5wMHr1NI.json", "genesis_data/genesis_txs/BYJCPwCLpd9a5K1HFy5F6ZvnemPiPFtV4hz5wMHr1NI.json"}, {copy, "genesis_data/genesis_txs/5dsjbEwH2r-EWCkfOznV4JkCOLSK9vNY-0iqPr4RZUM.json", "genesis_data/genesis_txs/5dsjbEwH2r-EWCkfOznV4JkCOLSK9vNY-0iqPr4RZUM.json"}, {copy, "genesis_data/genesis_txs/71M1E7A4e0PFW_6C0gly77iCg7ykX17647i00eEiA-s.json", "genesis_data/genesis_txs/71M1E7A4e0PFW_6C0gly77iCg7ykX17647i00eEiA-s.json"}, {copy, "genesis_data/genesis_txs/98kadyXY0OPfEZKeeZcCyQ7z5mRToZklK-D6f1a-Lxw.json", "genesis_data/genesis_txs/98kadyXY0OPfEZKeeZcCyQ7z5mRToZklK-D6f1a-Lxw.json"}, {copy, "genesis_data/genesis_txs/EvKHSfokNyuiTarFKOuQ_-SaBwtllGpQGc7IFkRfBfc.json", "genesis_data/genesis_txs/EvKHSfokNyuiTarFKOuQ_-SaBwtllGpQGc7IFkRfBfc.json"}, {copy, "genesis_data/genesis_txs/hRTkBAH0k74HlmlWXTWmetXcIFXvM_Zrz3i1JXULZSM.json", "genesis_data/genesis_txs/hRTkBAH0k74HlmlWXTWmetXcIFXvM_Zrz3i1JXULZSM.json"}, {copy, "genesis_data/genesis_txs/p9PJG5GkKZAxLyPJyDYw4_1CmhodHGGGqB785duwVwM.json", "genesis_data/genesis_txs/p9PJG5GkKZAxLyPJyDYw4_1CmhodHGGGqB785duwVwM.json"}, {copy, "genesis_data/genesis_txs/rTY6dpq4KEhZtB-5moP1mWN1CtrTKurv7QSY8wAN758.json", "genesis_data/genesis_txs/rTY6dpq4KEhZtB-5moP1mWN1CtrTKurv7QSY8wAN758.json"}, {copy, "genesis_data/genesis_txs/K_ae8Bfvql0dGhIfRH-R7W-zWoeB95kYGJNi3HjFyrs.json", "genesis_data/genesis_txs/K_ae8Bfvql0dGhIfRH-R7W-zWoeB95kYGJNi3HjFyrs.json"}, {copy, "genesis_data/genesis_txs/z7Xvravldr4BhTI4KPOEWtG325_1ORaLQ4aUPOAe_us.json", "genesis_data/genesis_txs/z7Xvravldr4BhTI4KPOEWtG325_1ORaLQ4aUPOAe_us.json"}, {copy, "genesis_data/genesis_txs/SCN8yn0cQASui1DeV4mMYeQrRn8eXKr7Cp9ll7L3UfI.json", "genesis_data/genesis_txs/SCN8yn0cQASui1DeV4mMYeQrRn8eXKr7Cp9ll7L3UfI.json"}, {copy, "genesis_data/genesis_txs/piTZgtn2oBsWKt09CV8LqH3I3JaVdRjFwjOAJmC-Xp4.json", "genesis_data/genesis_txs/piTZgtn2oBsWKt09CV8LqH3I3JaVdRjFwjOAJmC-Xp4.json"}, {copy, "genesis_data/genesis_txs/gbYMogbLVx3rOmm7K-o3nfGPKauLMLkGMSXcKkXW13Q.json", "genesis_data/genesis_txs/gbYMogbLVx3rOmm7K-o3nfGPKauLMLkGMSXcKkXW13Q.json"}, {copy, "genesis_data/genesis_txs/3BSgxVi4vtVtgMBtDE8xPMqU0PmkiKtKX6P_Iw0kMsM.json", "genesis_data/genesis_txs/3BSgxVi4vtVtgMBtDE8xPMqU0PmkiKtKX6P_Iw0kMsM.json"}, {copy, "genesis_data/genesis_txs/M_wQsQbFGtGiEaH0uW2swBubAnFab3ZcCN8IYWZvVzo.json", "genesis_data/genesis_txs/M_wQsQbFGtGiEaH0uW2swBubAnFab3ZcCN8IYWZvVzo.json"}, {copy, "genesis_data/genesis_txs/un3O49lggBX9raJKb6yuql_QTgZYWakWw5ydwUgUuXY.json", "genesis_data/genesis_txs/un3O49lggBX9raJKb6yuql_QTgZYWakWw5ydwUgUuXY.json"}, {copy, "genesis_data/genesis_txs/wFjsB5Y9GV61NqjCeyPCdkfXKUJOYccq8Bl9aljvwGc.json", "genesis_data/genesis_txs/wFjsB5Y9GV61NqjCeyPCdkfXKUJOYccq8Bl9aljvwGc.json"}, {copy, "genesis_data/genesis_txs/kcb41aN752OE__qEKDQAsbpzCUXMdlzI3clCBuxdVts.json", "genesis_data/genesis_txs/kcb41aN752OE__qEKDQAsbpzCUXMdlzI3clCBuxdVts.json"}, {copy, "genesis_data/genesis_txs/gE-2fjp2ncJ0ZRg12UBfqnCBb75OtAOksEX3wGZguqw.json", "genesis_data/genesis_txs/gE-2fjp2ncJ0ZRg12UBfqnCBb75OtAOksEX3wGZguqw.json"}, {copy, "genesis_data/genesis_txs/SJXMM0tlXown7l3ffjhsiKf311FDTRa7QkKX8tgyEZ8.json", "genesis_data/genesis_txs/SJXMM0tlXown7l3ffjhsiKf311FDTRa7QkKX8tgyEZ8.json"}, {copy, "genesis_data/genesis_txs/EPZ0hBh1wp-7T4JED4v6DOItd-9MNWkRfbLyizDLBsE.json", "genesis_data/genesis_txs/EPZ0hBh1wp-7T4JED4v6DOItd-9MNWkRfbLyizDLBsE.json"}, {copy, "genesis_data/genesis_txs/IACLRsWq-T6aesGEAjfFTZJd2sy7sFvWL7O6FI9A39U.json", "genesis_data/genesis_txs/IACLRsWq-T6aesGEAjfFTZJd2sy7sFvWL7O6FI9A39U.json"}, {copy, "genesis_data/genesis_txs/Dxrsx0xuPVY7oz9yHbL6wOFxo6ws7ycVe778C2bc9J8.json", "genesis_data/genesis_txs/Dxrsx0xuPVY7oz9yHbL6wOFxo6ws7ycVe778C2bc9J8.json"}, {copy, "genesis_data/genesis_txs/_01J_SIBJ164H0EedSfQ8h0dMfqet66WKHwcOFQEsMc.json", "genesis_data/genesis_txs/_01J_SIBJ164H0EedSfQ8h0dMfqet66WKHwcOFQEsMc.json"}, {copy, "genesis_data/genesis_txs/OILhne7UcvACtB4peA4osAjRMthaZZSW9OWhe3NpLBw.json", "genesis_data/genesis_txs/OILhne7UcvACtB4peA4osAjRMthaZZSW9OWhe3NpLBw.json"}, {copy, "genesis_data/genesis_txs/06dr4mrXcKlfPbK8t9vWOBCDJznyG-AsKxED-Jr0U88.json", "genesis_data/genesis_txs/06dr4mrXcKlfPbK8t9vWOBCDJznyG-AsKxED-Jr0U88.json"}, {copy, "genesis_data/genesis_txs/SBhaeMSTQm3rS6puYacdT-4wzlnkBlZ1agn6IW6Oyg8.json", "genesis_data/genesis_txs/SBhaeMSTQm3rS6puYacdT-4wzlnkBlZ1agn6IW6Oyg8.json"}, {copy, "genesis_data/genesis_txs/efqI0eDfp0OcYB-Ms5ELukIUr8-qtlX7Ica-ikhVZLU.json", "genesis_data/genesis_txs/efqI0eDfp0OcYB-Ms5ELukIUr8-qtlX7Ica-ikhVZLU.json"}, {copy, "genesis_data/genesis_txs/j2IiBCd5Vf2Q8ciTVxeHbN6JgrXUFiv0xtoMTA_VtqQ.json", "genesis_data/genesis_txs/j2IiBCd5Vf2Q8ciTVxeHbN6JgrXUFiv0xtoMTA_VtqQ.json"}, {copy, "genesis_data/genesis_txs/lFqBd1sEhgw1e_adedkee2hXP9beiNYbF625KV0vObU.json", "genesis_data/genesis_txs/lFqBd1sEhgw1e_adedkee2hXP9beiNYbF625KV0vObU.json"}, {copy, "genesis_data/genesis_txs/rRoy9jsUZ-Y10NIBksSD3P4HcVDfZheloItTTnc8_ZQ.json", "genesis_data/genesis_txs/rRoy9jsUZ-Y10NIBksSD3P4HcVDfZheloItTTnc8_ZQ.json"}, {copy, "genesis_data/genesis_txs/k6UueT0FWSSUbAAH4Uc1Oz6BivunVR0nSMTEILnB_dQ.json", "genesis_data/genesis_txs/k6UueT0FWSSUbAAH4Uc1Oz6BivunVR0nSMTEILnB_dQ.json"}, {copy, "genesis_data/genesis_txs/dYBZuFcCEgGVcfXgS9tmeJsue_qwaCRO3Mg2OHCZh_A.json", "genesis_data/genesis_txs/dYBZuFcCEgGVcfXgS9tmeJsue_qwaCRO3Mg2OHCZh_A.json"}, {copy, "genesis_data/genesis_txs/sfAY_3fQ41LahxW45rXfndEzeHD1eeWJgI9ZaM3slFU.json", "genesis_data/genesis_txs/sfAY_3fQ41LahxW45rXfndEzeHD1eeWJgI9ZaM3slFU.json"}, {copy, "genesis_data/genesis_txs/h0MlFXsvtNQlFwgTh6y7-gjXEj0CbGECgz77EwQsca0.json", "genesis_data/genesis_txs/h0MlFXsvtNQlFwgTh6y7-gjXEj0CbGECgz77EwQsca0.json"}, {copy, "genesis_data/genesis_txs/IwSIt1P5I_mM-gAeAvXiyxRVb73hqkQAMfxLIHbbZYk.json", "genesis_data/genesis_txs/IwSIt1P5I_mM-gAeAvXiyxRVb73hqkQAMfxLIHbbZYk.json"}, {copy, "genesis_data/genesis_txs/bqhG__MMablNhNpiSp8nopeKDCzXy97jLuSBlsKk_u8.json", "genesis_data/genesis_txs/bqhG__MMablNhNpiSp8nopeKDCzXy97jLuSBlsKk_u8.json"}, {copy, "genesis_data/genesis_txs/N6-1fOVDkoeDwKyoNdLxCVoyy-c0EF178A_oQeEchs8.json", "genesis_data/genesis_txs/N6-1fOVDkoeDwKyoNdLxCVoyy-c0EF178A_oQeEchs8.json"}, {copy, "genesis_data/genesis_txs/ez-ItWkyBvBZ6J7_Mobrpqc9RTp6I2JBmkPDV_xCQVY.json", "genesis_data/genesis_txs/ez-ItWkyBvBZ6J7_Mobrpqc9RTp6I2JBmkPDV_xCQVY.json"}, {copy, "genesis_data/genesis_txs/Z5e9G5QMZ_scJQ62qoqUs2XSuhknTuuAIhhGmfg3Ye8.json", "genesis_data/genesis_txs/Z5e9G5QMZ_scJQ62qoqUs2XSuhknTuuAIhhGmfg3Ye8.json"}, {copy, "genesis_data/genesis_txs/Ah6I8y8q0jb15KXjn0PyNfe7FR3v2xobg09Lfj7n1Mo.json", "genesis_data/genesis_txs/Ah6I8y8q0jb15KXjn0PyNfe7FR3v2xobg09Lfj7n1Mo.json"}, {copy, "genesis_data/genesis_txs/VUfaTp1eAzjnbxLR6xx_qQGVn_WOTna3rTolM8wY5BA.json", "genesis_data/genesis_txs/VUfaTp1eAzjnbxLR6xx_qQGVn_WOTna3rTolM8wY5BA.json"}, {copy, "genesis_data/genesis_txs/0FJrLrxrFkVTBwRrzCCh88Gm2tG1xPxg8s_IuRZDVzw.json", "genesis_data/genesis_txs/0FJrLrxrFkVTBwRrzCCh88Gm2tG1xPxg8s_IuRZDVzw.json"}, {copy, "genesis_data/genesis_txs/qU2Gu35-s9wMH1N4g_zMYKCqIStYzBZmRx0XlcIpjyk.json", "genesis_data/genesis_txs/qU2Gu35-s9wMH1N4g_zMYKCqIStYzBZmRx0XlcIpjyk.json"}, {copy, "genesis_data/genesis_txs/PySb_0NIjROmsIgwz4kMwC9MVmeY1MwuKdil0WeUzxw.json", "genesis_data/genesis_txs/PySb_0NIjROmsIgwz4kMwC9MVmeY1MwuKdil0WeUzxw.json"}, {copy, "genesis_data/genesis_txs/DJf1SRoKaPo1h3F-7oKIMu4A-r9dXXMjE57WQilPdTk.json", "genesis_data/genesis_txs/DJf1SRoKaPo1h3F-7oKIMu4A-r9dXXMjE57WQilPdTk.json"}, {copy, "genesis_data/genesis_txs/zCOtSnXKGGhXgrWld31Ak9qQA_SjpOqB6n-9sF74rhk.json", "genesis_data/genesis_txs/zCOtSnXKGGhXgrWld31Ak9qQA_SjpOqB6n-9sF74rhk.json"}, {copy, "genesis_data/genesis_txs/DDrS8BD0XTUVJt5E8kwisVTBX4PBWp0lCnSkSD3PJto.json", "genesis_data/genesis_txs/DDrS8BD0XTUVJt5E8kwisVTBX4PBWp0lCnSkSD3PJto.json"}, {copy, "genesis_data/genesis_txs/Yzj2WZ-3q5vKkBJtrmGlVjZND7iqtzvMRafS0TnQiLE.json", "genesis_data/genesis_txs/Yzj2WZ-3q5vKkBJtrmGlVjZND7iqtzvMRafS0TnQiLE.json"}, {copy, "genesis_data/genesis_txs/H_0S6x36tsFH-x1h77jV_zzGGp97V8UjmgC0RZYwbtM.json", "genesis_data/genesis_txs/H_0S6x36tsFH-x1h77jV_zzGGp97V8UjmgC0RZYwbtM.json"}, {copy, "genesis_data/genesis_txs/mvGgGlFTDJ0ukM6Bssd8G8B5PrEppr4Sg1_NTvzzV1U.json", "genesis_data/genesis_txs/mvGgGlFTDJ0ukM6Bssd8G8B5PrEppr4Sg1_NTvzzV1U.json"}, {copy, "genesis_data/genesis_txs/AN48OPO2-1mh4PKtpyoNm7SWJK2j8dF0-TFLU7Z1C9g.json", "genesis_data/genesis_txs/AN48OPO2-1mh4PKtpyoNm7SWJK2j8dF0-TFLU7Z1C9g.json"}, {copy, "genesis_data/genesis_txs/3Q5gJrbqc-PeOvD4QQ4WCNp-f5cYzTyHyg6P9b-WvwM.json", "genesis_data/genesis_txs/3Q5gJrbqc-PeOvD4QQ4WCNp-f5cYzTyHyg6P9b-WvwM.json"}, {copy, "genesis_data/genesis_txs/MOoLwb8S881q3-gM4GK7DuCEoh5CZnF1tMIZG300X58.json", "genesis_data/genesis_txs/MOoLwb8S881q3-gM4GK7DuCEoh5CZnF1tMIZG300X58.json"}, {copy, "genesis_data/genesis_txs/KPNGfBMOznCXZwOVvCXHRR6sVJx1akVkmXTV98lCMKY.json", "genesis_data/genesis_txs/KPNGfBMOznCXZwOVvCXHRR6sVJx1akVkmXTV98lCMKY.json"}, {copy, "genesis_data/genesis_txs/1nu07yo-0eB5GLxIJzzlxZW6nFTFiZ3XCDobJUcNyP4.json", "genesis_data/genesis_txs/1nu07yo-0eB5GLxIJzzlxZW6nFTFiZ3XCDobJUcNyP4.json"}, {copy, "genesis_data/genesis_txs/utAoO_xht393CbJ_7P_ektVYeEpkySWLM-066yJ5HyI.json", "genesis_data/genesis_txs/utAoO_xht393CbJ_7P_ektVYeEpkySWLM-066yJ5HyI.json"}, {copy, "genesis_data/genesis_txs/4UEhkNbsGdJUjx1lJQgX9KorwSf_RRZG8VMW6jMmf8Y.json", "genesis_data/genesis_txs/4UEhkNbsGdJUjx1lJQgX9KorwSf_RRZG8VMW6jMmf8Y.json"}, {copy, "genesis_data/genesis_txs/21Kfm2Apa8QWeqdMqyQAcxg9HbiluZXfQFu4-6xe-AY.json", "genesis_data/genesis_txs/21Kfm2Apa8QWeqdMqyQAcxg9HbiluZXfQFu4-6xe-AY.json"}, {copy, "genesis_data/genesis_txs/chdl-kIl4zG7VcJbKk0Q_5TeGwuH8Xp2YFPLRJJKTWw.json", "genesis_data/genesis_txs/chdl-kIl4zG7VcJbKk0Q_5TeGwuH8Xp2YFPLRJJKTWw.json"}, {copy, "genesis_data/genesis_txs/aGqWG70qjD5P8spXLMtyXnYxS9k7Net-u932EyIFl28.json", "genesis_data/genesis_txs/aGqWG70qjD5P8spXLMtyXnYxS9k7Net-u932EyIFl28.json"}, {copy, "genesis_data/genesis_txs/dn3p_BqD1gIcZQqdA8r6TucwycKGave22IqNjzKSHqI.json", "genesis_data/genesis_txs/dn3p_BqD1gIcZQqdA8r6TucwycKGave22IqNjzKSHqI.json"}, {copy, "genesis_data/genesis_txs/WE5eBi6hEq90HQvDjtJr-EmZATWJthgxh3HPPuQ7410.json", "genesis_data/genesis_txs/WE5eBi6hEq90HQvDjtJr-EmZATWJthgxh3HPPuQ7410.json"}, {copy, "genesis_data/genesis_txs/BRD5ARo8tiY64RqIoxYZ6jwbE-LQT_7jA513nHwWyRE.json", "genesis_data/genesis_txs/BRD5ARo8tiY64RqIoxYZ6jwbE-LQT_7jA513nHwWyRE.json"}, {copy, "genesis_data/genesis_txs/nXGMduBKL3mpsnFNPctfjEa9Z9zlMpdxcRrdkK95D80.json", "genesis_data/genesis_txs/nXGMduBKL3mpsnFNPctfjEa9Z9zlMpdxcRrdkK95D80.json"}, {copy, "genesis_data/genesis_txs/fkbFeVpiaAOtvt_-M9_U4HzbA8Elh5sa8xJXObrItYM.json", "genesis_data/genesis_txs/fkbFeVpiaAOtvt_-M9_U4HzbA8Elh5sa8xJXObrItYM.json"}, {copy, "genesis_data/genesis_txs/duSw-WaGKAabAztyg2zkj6hjgaVaRGBrJuvZ5Gd2Pzk.json", "genesis_data/genesis_txs/duSw-WaGKAabAztyg2zkj6hjgaVaRGBrJuvZ5Gd2Pzk.json"}, {copy, "genesis_data/genesis_txs/HSlgnBu2Yxros7zyehPgiu2u7h80dJfCCqrA88UnkB4.json", "genesis_data/genesis_txs/HSlgnBu2Yxros7zyehPgiu2u7h80dJfCCqrA88UnkB4.json"}, {copy, "genesis_data/genesis_txs/DTGNdsYZDXoU1nE82yEjG5ZEssxwUmkFTkM3_i6oSx8.json", "genesis_data/genesis_txs/DTGNdsYZDXoU1nE82yEjG5ZEssxwUmkFTkM3_i6oSx8.json"}, {copy, "genesis_data/genesis_txs/b96k6w6qUyLSSWZlmupyBmav6XYMsdt0xTc2yIUZtOA.json", "genesis_data/genesis_txs/b96k6w6qUyLSSWZlmupyBmav6XYMsdt0xTc2yIUZtOA.json"}, {copy, "genesis_data/genesis_txs/S5Uv2W6erubrzYjzm9QHKij51XE-j-GFdYwcV2uPIAA.json", "genesis_data/genesis_txs/S5Uv2W6erubrzYjzm9QHKij51XE-j-GFdYwcV2uPIAA.json"}, {copy, "genesis_data/genesis_txs/5ynd-L6Z1vrR7Vlyr-rkrga_Jw2ibALkIgldNmsVRcQ.json", "genesis_data/genesis_txs/5ynd-L6Z1vrR7Vlyr-rkrga_Jw2ibALkIgldNmsVRcQ.json"}, {copy, "genesis_data/genesis_txs/ocUISm-0ItAS-N3Ydwe1swo4JmoVpRzWzngFt-pDwfo.json", "genesis_data/genesis_txs/ocUISm-0ItAS-N3Ydwe1swo4JmoVpRzWzngFt-pDwfo.json"}, {copy, "genesis_data/genesis_txs/ByvrfeR4UNmWJwF2fU41mBo6ThFl49u24rEGpbeSI0Q.json", "genesis_data/genesis_txs/ByvrfeR4UNmWJwF2fU41mBo6ThFl49u24rEGpbeSI0Q.json"}, {copy, "genesis_data/genesis_txs/FTYnf3Z3QqEpNzTigfAlGTkgpgCWtFA7R8i-I1ik_Vo.json", "genesis_data/genesis_txs/FTYnf3Z3QqEpNzTigfAlGTkgpgCWtFA7R8i-I1ik_Vo.json"}, {copy, "genesis_data/genesis_txs/1Lwuom2q3FFI2pZz5EYgOzJRymgVWE3F9ZIl4vi3-kU.json", "genesis_data/genesis_txs/1Lwuom2q3FFI2pZz5EYgOzJRymgVWE3F9ZIl4vi3-kU.json"}, {copy, "genesis_data/genesis_txs/QR75we1zHW-qO7dsI932kXX0YrAIyuC2XIDRhfmK-fE.json", "genesis_data/genesis_txs/QR75we1zHW-qO7dsI932kXX0YrAIyuC2XIDRhfmK-fE.json"}, {copy, "genesis_data/genesis_txs/PjeEg7GpKT8twlBkp8GHAsEqfMvmNd3RaAx-l0R_i2w.json", "genesis_data/genesis_txs/PjeEg7GpKT8twlBkp8GHAsEqfMvmNd3RaAx-l0R_i2w.json"}, {copy, "genesis_data/genesis_txs/Kl1zrMIDIC9yW8yLMnSKQYDoV0PY41ymzJQw91qaZvY.json", "genesis_data/genesis_txs/Kl1zrMIDIC9yW8yLMnSKQYDoV0PY41ymzJQw91qaZvY.json"}, {copy, "genesis_data/genesis_txs/ijroBK9n_uKCS97V7iege_5Av2E-tm6ujquAazT_sBI.json", "genesis_data/genesis_txs/ijroBK9n_uKCS97V7iege_5Av2E-tm6ujquAazT_sBI.json"}, {copy, "genesis_data/genesis_txs/Kgr-XWwHYos5Y95ZJ9mAUwjYjj_rP0I-GnWctQDNlp8.json", "genesis_data/genesis_txs/Kgr-XWwHYos5Y95ZJ9mAUwjYjj_rP0I-GnWctQDNlp8.json"}, {copy, "genesis_data/genesis_txs/snWRgSI3vlTOy3RRkuNckM-ws-5lpFiPMpYlLx_zPyk.json", "genesis_data/genesis_txs/snWRgSI3vlTOy3RRkuNckM-ws-5lpFiPMpYlLx_zPyk.json"}, {copy, "genesis_data/genesis_txs/5qRekKepIlFbUhGMq_nNy89bzx_K44e4GmUKYAe9MRU.json", "genesis_data/genesis_txs/5qRekKepIlFbUhGMq_nNy89bzx_K44e4GmUKYAe9MRU.json"}, {copy, "genesis_data/genesis_txs/gyG1bGFt7qkMyUCrKiEfMzMzc3_3PooewqNeJpy-3Xk.json", "genesis_data/genesis_txs/gyG1bGFt7qkMyUCrKiEfMzMzc3_3PooewqNeJpy-3Xk.json"}, {copy, "genesis_data/genesis_txs/Z7gfizrPOypT4Pagg3oli5g8wA8pbKB0ZJnrw-FVyys.json", "genesis_data/genesis_txs/Z7gfizrPOypT4Pagg3oli5g8wA8pbKB0ZJnrw-FVyys.json"}, {copy, "genesis_data/genesis_txs/lsuH-ITPI--6KSzhIFclsEAWOSoRQu-8tlnOSxj_Er0.json", "genesis_data/genesis_txs/lsuH-ITPI--6KSzhIFclsEAWOSoRQu-8tlnOSxj_Er0.json"}, {copy, "genesis_data/genesis_txs/xavUY4L0L0nLNVvHiYfBqGL5iqUvdwQ-iY_nLLMB6J4.json", "genesis_data/genesis_txs/xavUY4L0L0nLNVvHiYfBqGL5iqUvdwQ-iY_nLLMB6J4.json"}, {copy, "genesis_data/genesis_txs/3khTH_o8WZHSCzP-AThkmt7zZL-d_lcqUKC8nz7c8lk.json", "genesis_data/genesis_txs/3khTH_o8WZHSCzP-AThkmt7zZL-d_lcqUKC8nz7c8lk.json"}, {copy, "genesis_data/genesis_txs/5FL2C4l-5cTl9wg4CblgIxzko8hGsB5URVA_yTAd4Nk.json", "genesis_data/genesis_txs/5FL2C4l-5cTl9wg4CblgIxzko8hGsB5URVA_yTAd4Nk.json"}, {copy, "genesis_data/genesis_txs/NBxewjnZAfekK0hKmwL_OpF1521JTeIpLk2a2TLDnTk.json", "genesis_data/genesis_txs/NBxewjnZAfekK0hKmwL_OpF1521JTeIpLk2a2TLDnTk.json"}, {copy, "genesis_data/genesis_txs/xC7ski_qpcrRwRkxxHwPZd2lOX6Q---2qdQ4Rr-wxAM.json", "genesis_data/genesis_txs/xC7ski_qpcrRwRkxxHwPZd2lOX6Q---2qdQ4Rr-wxAM.json"}, {copy, "genesis_data/genesis_txs/h7qIFbn0LoexuVwBcjKW7v5A65iQDQFYZUQjuowfIbk.json", "genesis_data/genesis_txs/h7qIFbn0LoexuVwBcjKW7v5A65iQDQFYZUQjuowfIbk.json"}, {copy, "genesis_data/genesis_txs/DQ6WaBfLEMEFhKoMoutuPyO_zFg1hWTDXT13CD8n1nw.json", "genesis_data/genesis_txs/DQ6WaBfLEMEFhKoMoutuPyO_zFg1hWTDXT13CD8n1nw.json"}, {copy, "genesis_data/genesis_txs/8y-ghHqMT2lEHQn86jRXkQ8I5cLWWtKW1CQROp8mzIs.json", "genesis_data/genesis_txs/8y-ghHqMT2lEHQn86jRXkQ8I5cLWWtKW1CQROp8mzIs.json"}, {copy, "genesis_data/genesis_txs/zUFRBcWpPAUyMlojffeTnPgsLo6YgU6JaJgOR0mpBuM.json", "genesis_data/genesis_txs/zUFRBcWpPAUyMlojffeTnPgsLo6YgU6JaJgOR0mpBuM.json"}, {copy, "genesis_data/genesis_txs/0mFNtCi-u34uwOj3BimQTPOT9PgLGE8uqCbtXhnwoKI.json", "genesis_data/genesis_txs/0mFNtCi-u34uwOj3BimQTPOT9PgLGE8uqCbtXhnwoKI.json"}, {copy, "genesis_data/genesis_txs/luQlV_58e9qjm7EZpoO6f5Y1j349Q34UwTW1Lx9J_vE.json", "genesis_data/genesis_txs/luQlV_58e9qjm7EZpoO6f5Y1j349Q34UwTW1Lx9J_vE.json"}, {copy, "genesis_data/genesis_txs/TFX7m_Kf56rV6LNuyQ31NeVoDHJ3x0YqhIv4-IBQ-3s.json", "genesis_data/genesis_txs/TFX7m_Kf56rV6LNuyQ31NeVoDHJ3x0YqhIv4-IBQ-3s.json"}, {copy, "genesis_data/genesis_txs/LFQ5iV6E5wyBbJmJoFJdH39ZxfW-y7mZFKou2H-ONvg.json", "genesis_data/genesis_txs/LFQ5iV6E5wyBbJmJoFJdH39ZxfW-y7mZFKou2H-ONvg.json"}, {copy, "genesis_data/genesis_txs/ud3zGJZA5tPRoitGG1c6HWm9W7iRS4ZF3u6PbZ-blns.json", "genesis_data/genesis_txs/ud3zGJZA5tPRoitGG1c6HWm9W7iRS4ZF3u6PbZ-blns.json"}, {copy, "genesis_data/genesis_txs/dv28G4IsYul7liWrycsx4UKSYHA4sWUY6xFQzRPi4p4.json", "genesis_data/genesis_txs/dv28G4IsYul7liWrycsx4UKSYHA4sWUY6xFQzRPi4p4.json"}, {copy, "genesis_data/genesis_txs/Ykh5TAI6koBN4UTQZ3GNIDr_uHNjlpHH9HsvtEkoWLA.json", "genesis_data/genesis_txs/Ykh5TAI6koBN4UTQZ3GNIDr_uHNjlpHH9HsvtEkoWLA.json"}, {copy, "genesis_data/genesis_txs/BQ2RVL6XY99AIkPKDBCfUfRmJGejkZ8YKgKZc2LewhU.json", "genesis_data/genesis_txs/BQ2RVL6XY99AIkPKDBCfUfRmJGejkZ8YKgKZc2LewhU.json"}, {copy, "genesis_data/genesis_txs/_fLFu_BOzTEPdX35rqUruuyNxi7f_La8T1_JG7pIPd0.json", "genesis_data/genesis_txs/_fLFu_BOzTEPdX35rqUruuyNxi7f_La8T1_JG7pIPd0.json"}, {copy, "genesis_data/genesis_txs/N3lqe8CUwPfChinYVV4OZZQNjtXc26JkOJyqgoKhq7E.json", "genesis_data/genesis_txs/N3lqe8CUwPfChinYVV4OZZQNjtXc26JkOJyqgoKhq7E.json"}, {copy, "genesis_data/genesis_txs/i9xaFWy0avtyCCxQdmWfGNDgh-PaJgIHkNK1pcJzmV8.json", "genesis_data/genesis_txs/i9xaFWy0avtyCCxQdmWfGNDgh-PaJgIHkNK1pcJzmV8.json"}, {copy, "genesis_data/genesis_txs/6J1sN2nhGpqe9iJwgdfnxxCK4af88__HoEG8MLeqtyM.json", "genesis_data/genesis_txs/6J1sN2nhGpqe9iJwgdfnxxCK4af88__HoEG8MLeqtyM.json"}, {copy, "genesis_data/genesis_txs/Juzb8MlmGd2qomIUwgfGzIFO7c7ZcY87kJPmqpSkt18.json", "genesis_data/genesis_txs/Juzb8MlmGd2qomIUwgfGzIFO7c7ZcY87kJPmqpSkt18.json"}, {copy, "genesis_data/genesis_txs/y0PrXtX7PonEbIG3uEdu-k-McGeLLAjzUriUTCMTGcw.json", "genesis_data/genesis_txs/y0PrXtX7PonEbIG3uEdu-k-McGeLLAjzUriUTCMTGcw.json"}, {copy, "genesis_data/genesis_txs/1yvqJKdnb9SRRKoBg1m0kWAsSh9S0R5r9T9TE0YHfRQ.json", "genesis_data/genesis_txs/1yvqJKdnb9SRRKoBg1m0kWAsSh9S0R5r9T9TE0YHfRQ.json"}, {copy, "genesis_data/genesis_txs/oRvFwVpHVeo0iysSg2jFOAZKE-hKwbm6mGeZ6VUZmxk.json", "genesis_data/genesis_txs/oRvFwVpHVeo0iysSg2jFOAZKE-hKwbm6mGeZ6VUZmxk.json"}, {copy, "genesis_data/genesis_txs/wnOghJX4aZlbm7SDDb4UUX8_6GZYpYYx3GireamHwAc.json", "genesis_data/genesis_txs/wnOghJX4aZlbm7SDDb4UUX8_6GZYpYYx3GireamHwAc.json"}, {copy, "genesis_data/genesis_txs/4gLPD5njSRtiaJwjcjmNOyI5Vw8sFBQQWOefmy4SPmQ.json", "genesis_data/genesis_txs/4gLPD5njSRtiaJwjcjmNOyI5Vw8sFBQQWOefmy4SPmQ.json"}, {copy, "genesis_data/genesis_txs/bnT7410oaZtnCdurp5jNgLKju9d_RRxhgggnxa5frMQ.json", "genesis_data/genesis_txs/bnT7410oaZtnCdurp5jNgLKju9d_RRxhgggnxa5frMQ.json"}, {copy, "genesis_data/genesis_txs/cgU_TlXi5gJ7hShSBYsS4UVi-sLTtfFv1y1sy2nNhos.json", "genesis_data/genesis_txs/cgU_TlXi5gJ7hShSBYsS4UVi-sLTtfFv1y1sy2nNhos.json"}, {copy, "genesis_data/genesis_txs/cIXdvNTNHJSmA6Rt5UgSNfMcGfvxDnYTa3a1ulS1SiY.json", "genesis_data/genesis_txs/cIXdvNTNHJSmA6Rt5UgSNfMcGfvxDnYTa3a1ulS1SiY.json"}, {copy, "genesis_data/genesis_txs/LixFbPqM1ZZ-5JWo339FMfPCpD_6M85rVK8IVmmt8m8.json", "genesis_data/genesis_txs/LixFbPqM1ZZ-5JWo339FMfPCpD_6M85rVK8IVmmt8m8.json"}, {copy, "genesis_data/genesis_txs/DkBAprUInkCbFa6A_WJJNL1z_PnhEavvyZtF09lmyvw.json", "genesis_data/genesis_txs/DkBAprUInkCbFa6A_WJJNL1z_PnhEavvyZtF09lmyvw.json"}, {copy, "genesis_data/genesis_txs/puLpw8OIIYCOatImKjpV5s0JWyKFq6bXFMz_qSf6mUA.json", "genesis_data/genesis_txs/puLpw8OIIYCOatImKjpV5s0JWyKFq6bXFMz_qSf6mUA.json"}, {copy, "genesis_data/genesis_txs/0biLy8DoOhucpeYzOj5jnopxxwe0XDRfCOMjyz_a74U.json", "genesis_data/genesis_txs/0biLy8DoOhucpeYzOj5jnopxxwe0XDRfCOMjyz_a74U.json"}, {copy, "genesis_data/genesis_txs/hX6nohfkKZ_9ajziHJ6g5V5cIe1EX9H9rg7eScK988s.json", "genesis_data/genesis_txs/hX6nohfkKZ_9ajziHJ6g5V5cIe1EX9H9rg7eScK988s.json"}, {copy, "genesis_data/genesis_txs/GlWMQUuiL80knS07G7NpoYat3w18VMuyLEuC_Pmijng.json", "genesis_data/genesis_txs/GlWMQUuiL80knS07G7NpoYat3w18VMuyLEuC_Pmijng.json"}, {copy, "genesis_data/genesis_txs/f3jE7NK419FZzwkx9VjTkrcX5FEgl2Ky3KSK0vH-wj0.json", "genesis_data/genesis_txs/f3jE7NK419FZzwkx9VjTkrcX5FEgl2Ky3KSK0vH-wj0.json"}, {copy, "genesis_data/genesis_txs/UMk64563QZfxgZr_vKOTDrcp5XJNENF82Pji4a078YY.json", "genesis_data/genesis_txs/UMk64563QZfxgZr_vKOTDrcp5XJNENF82Pji4a078YY.json"}, {copy, "genesis_data/genesis_txs/0EzNUQy_5b7CwNNLVAi7CnameMgnxVh-XyahT2kn74Y.json", "genesis_data/genesis_txs/0EzNUQy_5b7CwNNLVAi7CnameMgnxVh-XyahT2kn74Y.json"}, {copy, "genesis_data/genesis_txs/3ku6XelnvBsaRjoNxDWb_kT_PRlQ88U0pbWURziCj7s.json", "genesis_data/genesis_txs/3ku6XelnvBsaRjoNxDWb_kT_PRlQ88U0pbWURziCj7s.json"}, {copy, "genesis_data/genesis_txs/RU5mkM_3UrjRMffwgj7ovDMYxxjhfXvliozhpIqw0sA.json", "genesis_data/genesis_txs/RU5mkM_3UrjRMffwgj7ovDMYxxjhfXvliozhpIqw0sA.json"}, {copy, "genesis_data/genesis_txs/oMP40Kgd9MxLfksmW_HAlGe8Rn1Px8tpF-NOHBfe9oo.json", "genesis_data/genesis_txs/oMP40Kgd9MxLfksmW_HAlGe8Rn1Px8tpF-NOHBfe9oo.json"}, {copy, "genesis_data/genesis_txs/UYoJMT0QxMtB6ctUB-9iQlcx6fF8R3s8ahM4_iF4wiQ.json", "genesis_data/genesis_txs/UYoJMT0QxMtB6ctUB-9iQlcx6fF8R3s8ahM4_iF4wiQ.json"}, {copy, "genesis_data/genesis_txs/96Ijx5TWSxZmZaDH1pteGHFjIYY0aHmGWNHiMYeSYIM.json", "genesis_data/genesis_txs/96Ijx5TWSxZmZaDH1pteGHFjIYY0aHmGWNHiMYeSYIM.json"}, {copy, "genesis_data/genesis_txs/n6TKbsqmGl2m3yH15RAe405vYZQ7DStlvYsHCHp1D0U.json", "genesis_data/genesis_txs/n6TKbsqmGl2m3yH15RAe405vYZQ7DStlvYsHCHp1D0U.json"}, {copy, "genesis_data/genesis_txs/XtDRu-1SyoRL21gpKcxWtxyksVwTF9kvW26hvQ_bPzE.json", "genesis_data/genesis_txs/XtDRu-1SyoRL21gpKcxWtxyksVwTF9kvW26hvQ_bPzE.json"}, {copy, "genesis_data/genesis_txs/YIEEyYfNIRSjzm_gzv6l5CelyL4AOzKX9M4XPXRk2Yo.json", "genesis_data/genesis_txs/YIEEyYfNIRSjzm_gzv6l5CelyL4AOzKX9M4XPXRk2Yo.json"}, {copy, "genesis_data/genesis_txs/NptjIrqZrQMSdLbXAGyQCr8audCzArV3EofsjRCqrQw.json", "genesis_data/genesis_txs/NptjIrqZrQMSdLbXAGyQCr8audCzArV3EofsjRCqrQw.json"}, {copy, "genesis_data/genesis_txs/vWeY4yJSJF9LXogRZb3Qr6QyLtEIL_8IY4bzJ2e7O5I.json", "genesis_data/genesis_txs/vWeY4yJSJF9LXogRZb3Qr6QyLtEIL_8IY4bzJ2e7O5I.json"}, {copy, "genesis_data/genesis_txs/fBVa04p7MEL8BsPpyD_Pwv3uqBnBMVzG9YpXsCwZLtc.json", "genesis_data/genesis_txs/fBVa04p7MEL8BsPpyD_Pwv3uqBnBMVzG9YpXsCwZLtc.json"}, {copy, "genesis_data/genesis_txs/opfZTSNdqaxXZUmaKROD2sd4QkyNDnZE3u1A95eSw4E.json", "genesis_data/genesis_txs/opfZTSNdqaxXZUmaKROD2sd4QkyNDnZE3u1A95eSw4E.json"}, {copy, "genesis_data/genesis_txs/-M5_EBM4MayX8ZpuLFoANHO00c4pdrSmAQbPYv7fq4U.json", "genesis_data/genesis_txs/-M5_EBM4MayX8ZpuLFoANHO00c4pdrSmAQbPYv7fq4U.json"}, {copy, "genesis_data/genesis_txs/8rKBfpmkPlxnnYr6t0xIpUDubdidK0Fpnois7-xQJtc.json", "genesis_data/genesis_txs/8rKBfpmkPlxnnYr6t0xIpUDubdidK0Fpnois7-xQJtc.json"}, {copy, "genesis_data/genesis_txs/rvbM0iB1HJ1YadedIDWjJ95J2XBHWwPAJD4VfpdQpxQ.json", "genesis_data/genesis_txs/rvbM0iB1HJ1YadedIDWjJ95J2XBHWwPAJD4VfpdQpxQ.json"}, {copy, "genesis_data/genesis_txs/_Hf1lw_E6Lyd-0PGkCRQaN10cdEx4M-hl9y-zWiDo8k.json", "genesis_data/genesis_txs/_Hf1lw_E6Lyd-0PGkCRQaN10cdEx4M-hl9y-zWiDo8k.json"}, {copy, "genesis_data/genesis_txs/tOIFTqEef5fQYPzhlkC2Um7rddT6MyrHPzUWXDv_mJc.json", "genesis_data/genesis_txs/tOIFTqEef5fQYPzhlkC2Um7rddT6MyrHPzUWXDv_mJc.json"}, {copy, "genesis_data/genesis_txs/4LwZwAVcaBXhXsP5b4mnE11tUXefuRUTtTibtvoozDQ.json", "genesis_data/genesis_txs/4LwZwAVcaBXhXsP5b4mnE11tUXefuRUTtTibtvoozDQ.json"}, {copy, "genesis_data/genesis_txs/XxgirNr3QGaJTKxPWqK9byYLj7SdbfZudKd9rbynWyM.json", "genesis_data/genesis_txs/XxgirNr3QGaJTKxPWqK9byYLj7SdbfZudKd9rbynWyM.json"}, {copy, "genesis_data/genesis_txs/IvyUOghXQ31LnYE3bYEkS82gTAvpIa1rGGQKmiJuuMk.json", "genesis_data/genesis_txs/IvyUOghXQ31LnYE3bYEkS82gTAvpIa1rGGQKmiJuuMk.json"}, {copy, "genesis_data/genesis_txs/X9biR_ZA-rnpzk4gfLi0-pBSsjjT2l9Rk0VfYwf1WMo.json", "genesis_data/genesis_txs/X9biR_ZA-rnpzk4gfLi0-pBSsjjT2l9Rk0VfYwf1WMo.json"}, {copy, "genesis_data/genesis_txs/8b-7D96aRFJgDm8z5Tg47vBbdjseW0rRi17TYDcaQ5Q.json", "genesis_data/genesis_txs/8b-7D96aRFJgDm8z5Tg47vBbdjseW0rRi17TYDcaQ5Q.json"}, {copy, "genesis_data/genesis_txs/0O-UnzBvSFYoMQrbcsKHRH_YqNNylC1n9KWXmm-rr90.json", "genesis_data/genesis_txs/0O-UnzBvSFYoMQrbcsKHRH_YqNNylC1n9KWXmm-rr90.json"}, {copy, "genesis_data/genesis_txs/TUIdVI5yQH50laHvkxgAnTV6uuE2LXXH3pxIe6Q2S7I.json", "genesis_data/genesis_txs/TUIdVI5yQH50laHvkxgAnTV6uuE2LXXH3pxIe6Q2S7I.json"}, {copy, "genesis_data/genesis_txs/0Mxvgz6_wL0FBOxJmHcRcNwiaV8B90whDxG4Vh_GFic.json", "genesis_data/genesis_txs/0Mxvgz6_wL0FBOxJmHcRcNwiaV8B90whDxG4Vh_GFic.json"}, {copy, "genesis_data/genesis_txs/qyMWe-VUOzHXkQviMhNS0wJI_27nvCgDY9iiKANk-lI.json", "genesis_data/genesis_txs/qyMWe-VUOzHXkQviMhNS0wJI_27nvCgDY9iiKANk-lI.json"}, {copy, "genesis_data/genesis_txs/LC-_5GDhs09OvN7r8GPmjMa6A9xSeVtsAmDgYCgspvc.json", "genesis_data/genesis_txs/LC-_5GDhs09OvN7r8GPmjMa6A9xSeVtsAmDgYCgspvc.json"}, {copy, "genesis_data/genesis_txs/sB51Zz1HRjpwrWFhW6ZE2E-n5hl3joqxPQgnMCLX4ZM.json", "genesis_data/genesis_txs/sB51Zz1HRjpwrWFhW6ZE2E-n5hl3joqxPQgnMCLX4ZM.json"}, {copy, "genesis_data/genesis_txs/NEXnMz8Yuw-xfIPprKT2iwx5A1UjWwRHCH7XCpeXIPg.json", "genesis_data/genesis_txs/NEXnMz8Yuw-xfIPprKT2iwx5A1UjWwRHCH7XCpeXIPg.json"}, {copy, "genesis_data/genesis_txs/5OdjYWAipCjWzpqfNoNhyJ673d4pRMNva8la_SFfu_c.json", "genesis_data/genesis_txs/5OdjYWAipCjWzpqfNoNhyJ673d4pRMNva8la_SFfu_c.json"}, {copy, "genesis_data/genesis_txs/drYsyF85HcvC7LM1hkzPPgTj3_zp3amcNVNobBmOxvc.json", "genesis_data/genesis_txs/drYsyF85HcvC7LM1hkzPPgTj3_zp3amcNVNobBmOxvc.json"}, {copy, "genesis_data/genesis_txs/vQ4zTq--De8FHdVnE7sYCemwiaqoZDS4emR_y6o6ZFA.json", "genesis_data/genesis_txs/vQ4zTq--De8FHdVnE7sYCemwiaqoZDS4emR_y6o6ZFA.json"}, {copy, "genesis_data/genesis_txs/zwl046ia6I5VWLRYPJzBI70ypBQN2VlvLH9a_ndNKxA.json", "genesis_data/genesis_txs/zwl046ia6I5VWLRYPJzBI70ypBQN2VlvLH9a_ndNKxA.json"}, {copy, "genesis_data/genesis_txs/xYpSRRpO8ejUGeohlRutNt9qUMgvuZJGkPGCyu1kSas.json", "genesis_data/genesis_txs/xYpSRRpO8ejUGeohlRutNt9qUMgvuZJGkPGCyu1kSas.json"}, {copy, "genesis_data/genesis_txs/wmZTwziFc_VlvYJz_4nyxYd3WxznBmsn5QQyRKDcWXU.json", "genesis_data/genesis_txs/wmZTwziFc_VlvYJz_4nyxYd3WxznBmsn5QQyRKDcWXU.json"}, {copy, "genesis_data/genesis_txs/SWNkfm9ZZPCiYKFg6oIW_IgqJp5Ypbp-Fs9S7YgPm0c.json", "genesis_data/genesis_txs/SWNkfm9ZZPCiYKFg6oIW_IgqJp5Ypbp-Fs9S7YgPm0c.json"}, {copy, "genesis_data/genesis_txs/y6WPKL6MHzZp2ktvb1cETmNMBJyCEPlxdisKlroEBtc.json", "genesis_data/genesis_txs/y6WPKL6MHzZp2ktvb1cETmNMBJyCEPlxdisKlroEBtc.json"}, {copy, "genesis_data/genesis_txs/0ogs8DTdSrNxfE2LzrScPvnyf7CQ7jMdFaS_l0-K-GU.json", "genesis_data/genesis_txs/0ogs8DTdSrNxfE2LzrScPvnyf7CQ7jMdFaS_l0-K-GU.json"}, {copy, "genesis_data/genesis_txs/328-6fOVCfCid4QTxHjkAMkQLMHZgDg-hZo5PnVfp2Q.json", "genesis_data/genesis_txs/328-6fOVCfCid4QTxHjkAMkQLMHZgDg-hZo5PnVfp2Q.json"}, {copy, "genesis_data/genesis_txs/iRF6OnneKHJLhLMdCXpo6LsxVyWIGyklFEpu1bN3cyE.json", "genesis_data/genesis_txs/iRF6OnneKHJLhLMdCXpo6LsxVyWIGyklFEpu1bN3cyE.json"}, {copy, "genesis_data/genesis_txs/g19-Tkf4xuM9golcjx0mA1RkJUYocQJ3uYnH8MU1ePs.json", "genesis_data/genesis_txs/g19-Tkf4xuM9golcjx0mA1RkJUYocQJ3uYnH8MU1ePs.json"}, {copy, "genesis_data/genesis_txs/aPxbCROotxwkdovWbQEhw18UNAzVy-AmjYwjo9lb5u4.json", "genesis_data/genesis_txs/aPxbCROotxwkdovWbQEhw18UNAzVy-AmjYwjo9lb5u4.json"}, {copy, "genesis_data/genesis_txs/COXhhpbcLSEe2iP2kp4SDj5NjjBAC8CucsAgOHRF_lc.json", "genesis_data/genesis_txs/COXhhpbcLSEe2iP2kp4SDj5NjjBAC8CucsAgOHRF_lc.json"}, {copy, "genesis_data/genesis_txs/TGdhJ01pPw49A0ZIaCCcYBnL-RPK_3KZH3cA6E9dVqc.json", "genesis_data/genesis_txs/TGdhJ01pPw49A0ZIaCCcYBnL-RPK_3KZH3cA6E9dVqc.json"}, {copy, "genesis_data/genesis_txs/v2UplxDprWwaIwbB6z3KNEj3GjloqM8SinvVahZ1Wpk.json", "genesis_data/genesis_txs/v2UplxDprWwaIwbB6z3KNEj3GjloqM8SinvVahZ1Wpk.json"}, {copy, "genesis_data/genesis_txs/P_pvvzlCIX7Yaiuv6zt1voLcn69gb9jAHPRhHaHjLng.json", "genesis_data/genesis_txs/P_pvvzlCIX7Yaiuv6zt1voLcn69gb9jAHPRhHaHjLng.json"}, {copy, "genesis_data/genesis_txs/WsYJKhqhppBF6_eGbd0OACdu3LU6-CUuMcLeG3ST2qc.json", "genesis_data/genesis_txs/WsYJKhqhppBF6_eGbd0OACdu3LU6-CUuMcLeG3ST2qc.json"}, {copy, "genesis_data/genesis_txs/weff0Y0_3-H7Vy1HrbpIzUmbTM1rZ8Lw0wgDGYmlsrM.json", "genesis_data/genesis_txs/weff0Y0_3-H7Vy1HrbpIzUmbTM1rZ8Lw0wgDGYmlsrM.json"}, {copy, "genesis_data/genesis_txs/oWWJcAiBCxhtWkIqwir4-vTvD3JFpHgZRNIpS-Xjzp4.json", "genesis_data/genesis_txs/oWWJcAiBCxhtWkIqwir4-vTvD3JFpHgZRNIpS-Xjzp4.json"}, {copy, "genesis_data/genesis_txs/Mv-TFhA3639O4JbKzoO3wo8LNPcFwA_vaaOLHfWRfSo.json", "genesis_data/genesis_txs/Mv-TFhA3639O4JbKzoO3wo8LNPcFwA_vaaOLHfWRfSo.json"}, {copy, "genesis_data/genesis_txs/iuTLZ3xxGpaBCggV5xfUkJ6hMdUQKHw6f_vEn6sbmPo.json", "genesis_data/genesis_txs/iuTLZ3xxGpaBCggV5xfUkJ6hMdUQKHw6f_vEn6sbmPo.json"}, {copy, "genesis_data/genesis_txs/tVLYd_62zbU-VPzQPOMHUo9TJR1dvSZ_pAHrC5Ubs8Q.json", "genesis_data/genesis_txs/tVLYd_62zbU-VPzQPOMHUo9TJR1dvSZ_pAHrC5Ubs8Q.json"}, {copy, "genesis_data/genesis_txs/KhQeu3CG_X1zoHbyy99GUlC9gVFFexf6vVPOlLgCj9I.json", "genesis_data/genesis_txs/KhQeu3CG_X1zoHbyy99GUlC9gVFFexf6vVPOlLgCj9I.json"}, {copy, "genesis_data/genesis_txs/o0nw6fU4gPL7Ae45x1BEQr5GkXSzZUrWnZrdIWqgx6w.json", "genesis_data/genesis_txs/o0nw6fU4gPL7Ae45x1BEQr5GkXSzZUrWnZrdIWqgx6w.json"}, {copy, "genesis_data/genesis_txs/K47jh6Jr6TmZeZ_TadmyLLy1V6ZvLNpvV5FWcICohnk.json", "genesis_data/genesis_txs/K47jh6Jr6TmZeZ_TadmyLLy1V6ZvLNpvV5FWcICohnk.json"}, {copy, "genesis_data/genesis_txs/7SfLhJLtevo0zu-1bo8q6zX98WbGgpDNuY6PXbzS_j0.json", "genesis_data/genesis_txs/7SfLhJLtevo0zu-1bo8q6zX98WbGgpDNuY6PXbzS_j0.json"}, {copy, "genesis_data/genesis_txs/C3auX8HXhc2dChmvSBUfgGyYynuAr6P3g0p7420GG78.json", "genesis_data/genesis_txs/C3auX8HXhc2dChmvSBUfgGyYynuAr6P3g0p7420GG78.json"}, {copy, "genesis_data/genesis_txs/m5zFPHB-2VjCgTLStD9TLZwD1CHfLELPKkVXFJGIptM.json", "genesis_data/genesis_txs/m5zFPHB-2VjCgTLStD9TLZwD1CHfLELPKkVXFJGIptM.json"}, {copy, "genesis_data/genesis_txs/MPP4fxmSkvM2BVq8rumeT5yvDNu3QAT_kqpOlAq5s2E.json", "genesis_data/genesis_txs/MPP4fxmSkvM2BVq8rumeT5yvDNu3QAT_kqpOlAq5s2E.json"}, {copy, "genesis_data/genesis_txs/6YbxtptbO-sidrnYdgn0G_CiNBh-az5ZzWrSCP9DYKA.json", "genesis_data/genesis_txs/6YbxtptbO-sidrnYdgn0G_CiNBh-az5ZzWrSCP9DYKA.json"}, {copy, "genesis_data/genesis_txs/mGAMsTqBzau-MjTkMS5Z3g2_nUD-qQWeLtq6qlzkVl0.json", "genesis_data/genesis_txs/mGAMsTqBzau-MjTkMS5Z3g2_nUD-qQWeLtq6qlzkVl0.json"}, {copy, "genesis_data/genesis_txs/CSkFcCmNgvnp7jp7aK0tEGsLWiZVMF-QBkEFaJrAG48.json", "genesis_data/genesis_txs/CSkFcCmNgvnp7jp7aK0tEGsLWiZVMF-QBkEFaJrAG48.json"}, {copy, "genesis_data/genesis_txs/FkZzg_-5eSdFlbq9XnHe3wRhYidHJPXwUQ6YLuJijS0.json", "genesis_data/genesis_txs/FkZzg_-5eSdFlbq9XnHe3wRhYidHJPXwUQ6YLuJijS0.json"}, {copy, "genesis_data/genesis_txs/9JWfraRekKtgXiIjssn0tVSzhaCaN682jECsrKtR0_E.json", "genesis_data/genesis_txs/9JWfraRekKtgXiIjssn0tVSzhaCaN682jECsrKtR0_E.json"}, {copy, "genesis_data/genesis_txs/Ms9gCRdVwT9u8-ewYd6c-T0bet-n24n_q_Hn0-BlMow.json", "genesis_data/genesis_txs/Ms9gCRdVwT9u8-ewYd6c-T0bet-n24n_q_Hn0-BlMow.json"}, {copy, "genesis_data/genesis_txs/CbV_CDXgVNjV6fyoBDkYmbAcaC5VsLDYXgEIwj2Ewyo.json", "genesis_data/genesis_txs/CbV_CDXgVNjV6fyoBDkYmbAcaC5VsLDYXgEIwj2Ewyo.json"}, {copy, "genesis_data/genesis_txs/4pNPqxodBesN6jQl51nH17GA1fWYfHVm8cIEfusnPLY.json", "genesis_data/genesis_txs/4pNPqxodBesN6jQl51nH17GA1fWYfHVm8cIEfusnPLY.json"}, {copy, "genesis_data/genesis_txs/0_GKZOdtRH-nc094U5kFBlvQSjPz_oX0tcIroqLFD3U.json", "genesis_data/genesis_txs/0_GKZOdtRH-nc094U5kFBlvQSjPz_oX0tcIroqLFD3U.json"}, {copy, "genesis_data/genesis_txs/G1GqspPmLkJTiT35QUTWBT4def7j5ORSfHCtrYzrrng.json", "genesis_data/genesis_txs/G1GqspPmLkJTiT35QUTWBT4def7j5ORSfHCtrYzrrng.json"}, {copy, "genesis_data/genesis_txs/4ewYAvsgaT-6Oy23qPqK29O_AgfvNbhLvol13yN1PdQ.json", "genesis_data/genesis_txs/4ewYAvsgaT-6Oy23qPqK29O_AgfvNbhLvol13yN1PdQ.json"}, {copy, "genesis_data/genesis_txs/LBTipZADoYfO-9UecE07Z83ijiLl0f2wAGXyRFQqKCY.json", "genesis_data/genesis_txs/LBTipZADoYfO-9UecE07Z83ijiLl0f2wAGXyRFQqKCY.json"}, {copy, "genesis_data/genesis_txs/OaumRLT8oE6J8gqrQ9DrY_grMuSfWtai95VnqrX24hs.json", "genesis_data/genesis_txs/OaumRLT8oE6J8gqrQ9DrY_grMuSfWtai95VnqrX24hs.json"}, {copy, "genesis_data/genesis_txs/DMtXbcR_qHwdYXvkuCGOQARs_QtN9iWPw4x6TTaWOcw.json", "genesis_data/genesis_txs/DMtXbcR_qHwdYXvkuCGOQARs_QtN9iWPw4x6TTaWOcw.json"}, {copy, "genesis_data/genesis_txs/Eeo6rANLMAXonDFLDG2nu7n99O3Ymfk01wYXJBbEixY.json", "genesis_data/genesis_txs/Eeo6rANLMAXonDFLDG2nu7n99O3Ymfk01wYXJBbEixY.json"}, {copy, "genesis_data/genesis_txs/ZEB62vqKvkPK2s_RmxgQ2IhafMxJ_TXCGswrrKLhYiQ.json", "genesis_data/genesis_txs/ZEB62vqKvkPK2s_RmxgQ2IhafMxJ_TXCGswrrKLhYiQ.json"}, {copy, "genesis_data/genesis_txs/TkN4QLdC4tu-_Po50RYwF33shyHcanHSe_BKpryK0JA.json", "genesis_data/genesis_txs/TkN4QLdC4tu-_Po50RYwF33shyHcanHSe_BKpryK0JA.json"}, {copy, "genesis_data/genesis_txs/YfHEyNUGsOUiuqCgHV127cg2Z5Yap9tcQB1LH7tq9ZA.json", "genesis_data/genesis_txs/YfHEyNUGsOUiuqCgHV127cg2Z5Yap9tcQB1LH7tq9ZA.json"}, {copy, "genesis_data/genesis_txs/NvGRQrdis2HV22enpSpPqsb0M8s-pN_nl7eJtalZyC4.json", "genesis_data/genesis_txs/NvGRQrdis2HV22enpSpPqsb0M8s-pN_nl7eJtalZyC4.json"}, {copy, "genesis_data/genesis_txs/L8tkBBP7fyYfK4txqP-fGk_ODOU4UfIgFV79O-qd5vY.json", "genesis_data/genesis_txs/L8tkBBP7fyYfK4txqP-fGk_ODOU4UfIgFV79O-qd5vY.json"}, {copy, "genesis_data/genesis_txs/ISiC3yaTW9KnZmgs39osghIg0HP8ISh77bzH7u2m55Q.json", "genesis_data/genesis_txs/ISiC3yaTW9KnZmgs39osghIg0HP8ISh77bzH7u2m55Q.json"}, {copy, "genesis_data/genesis_txs/IQgiEwMLp1bb6muuB_G7Q3sRaaZ3OZHUSjgshUq5YMU.json", "genesis_data/genesis_txs/IQgiEwMLp1bb6muuB_G7Q3sRaaZ3OZHUSjgshUq5YMU.json"}, {copy, "genesis_data/genesis_txs/07u3F6WH-ohqBclh6UanAQ9Tau089eLJrIYM-8qkAbw.json", "genesis_data/genesis_txs/07u3F6WH-ohqBclh6UanAQ9Tau089eLJrIYM-8qkAbw.json"}, {copy, "genesis_data/genesis_txs/nh2sbgjxu6MmU8yGV00w7X4q4XCJETeYE3zVtcj2ldk.json", "genesis_data/genesis_txs/nh2sbgjxu6MmU8yGV00w7X4q4XCJETeYE3zVtcj2ldk.json"}, {copy, "genesis_data/genesis_txs/ydvI6weQPIRj2hcNg4RPqzDpFOhqiTc9iDqQ-fUUl4I.json", "genesis_data/genesis_txs/ydvI6weQPIRj2hcNg4RPqzDpFOhqiTc9iDqQ-fUUl4I.json"}, {copy, "genesis_data/genesis_txs/5Hatfzkj7ivvIsUIDjdOSp-4CdkClH6B7S_SNX0B2-o.json", "genesis_data/genesis_txs/5Hatfzkj7ivvIsUIDjdOSp-4CdkClH6B7S_SNX0B2-o.json"}, {copy, "genesis_data/genesis_txs/1Q2plP5JFTLwdTC27VfIgDJ-ri5h3mVsKxZploTrRmQ.json", "genesis_data/genesis_txs/1Q2plP5JFTLwdTC27VfIgDJ-ri5h3mVsKxZploTrRmQ.json"}, {copy, "genesis_data/genesis_txs/YlalzFjBD8CgZxDlI6eNWE3PIIflHGzXyY9VzPPeCFo.json", "genesis_data/genesis_txs/YlalzFjBD8CgZxDlI6eNWE3PIIflHGzXyY9VzPPeCFo.json"}, {copy, "genesis_data/genesis_txs/vaJOh_TzVSoEgbgDyKz6ABzd_wt2-ouBTe0gA1F3oMY.json", "genesis_data/genesis_txs/vaJOh_TzVSoEgbgDyKz6ABzd_wt2-ouBTe0gA1F3oMY.json"}, {copy, "genesis_data/genesis_txs/f6MY8LMCwGbKZqXd4dkCROQK0qFMjS5OJAbZq-UhMGA.json", "genesis_data/genesis_txs/f6MY8LMCwGbKZqXd4dkCROQK0qFMjS5OJAbZq-UhMGA.json"}, {copy, "genesis_data/genesis_txs/_u44CiJCcYiOrGffgZoQSmUrJe8CfYD7Nw0MdPX0tUw.json", "genesis_data/genesis_txs/_u44CiJCcYiOrGffgZoQSmUrJe8CfYD7Nw0MdPX0tUw.json"}, {copy, "genesis_data/genesis_txs/5mt79Uz6p83vdLtYRiByyWLqLI2GZBeSTutDRmzw7tM.json", "genesis_data/genesis_txs/5mt79Uz6p83vdLtYRiByyWLqLI2GZBeSTutDRmzw7tM.json"}, {copy, "genesis_data/genesis_txs/CEXuGv3KvVtkf5gkV0ip3g1FF-i12WIDo6IOigORIZA.json", "genesis_data/genesis_txs/CEXuGv3KvVtkf5gkV0ip3g1FF-i12WIDo6IOigORIZA.json"}, {copy, "genesis_data/genesis_txs/NPLj86idALmTczSq2vrZdTs0bjI-e-KI0j3EOWWpu54.json", "genesis_data/genesis_txs/NPLj86idALmTczSq2vrZdTs0bjI-e-KI0j3EOWWpu54.json"}, {copy, "genesis_data/genesis_txs/IpwG_74praZjsu9L91_KWYHrVTpEDwyHZrsHgum4Z8o.json", "genesis_data/genesis_txs/IpwG_74praZjsu9L91_KWYHrVTpEDwyHZrsHgum4Z8o.json"}, {copy, "genesis_data/genesis_txs/qX9u_AprdhyXAPGfh3C94x9AbxwWx9nJSs7g8FSwITM.json", "genesis_data/genesis_txs/qX9u_AprdhyXAPGfh3C94x9AbxwWx9nJSs7g8FSwITM.json"}, {copy, "genesis_data/genesis_txs/87ieWrloTFUdW7YjJqJcINd1M_PBWCzA1dIRFzF4RKM.json", "genesis_data/genesis_txs/87ieWrloTFUdW7YjJqJcINd1M_PBWCzA1dIRFzF4RKM.json"}, {copy, "genesis_data/genesis_txs/xSkMzFablxREj8H_RwoMseAFk-TCwaLVIZMHqXh5DHY.json", "genesis_data/genesis_txs/xSkMzFablxREj8H_RwoMseAFk-TCwaLVIZMHqXh5DHY.json"}, {copy, "genesis_data/genesis_txs/NE7AIvW60iQL_6aagNTSiaMpmLfAfRwbxau5FZLA10g.json", "genesis_data/genesis_txs/NE7AIvW60iQL_6aagNTSiaMpmLfAfRwbxau5FZLA10g.json"}, {copy, "genesis_data/genesis_txs/wUhEm861foyWdxy0SI7CvXRcWuohItlX6Ydqo2NvtY8.json", "genesis_data/genesis_txs/wUhEm861foyWdxy0SI7CvXRcWuohItlX6Ydqo2NvtY8.json"}, {copy, "genesis_data/genesis_txs/1QoMjs6Q3XKklJ9LfovRmGbe4bAy9xY247JfDZqN3Eo.json", "genesis_data/genesis_txs/1QoMjs6Q3XKklJ9LfovRmGbe4bAy9xY247JfDZqN3Eo.json"}, {copy, "genesis_data/genesis_txs/24VRr4yT-_fOndcFYtK2oSO-p9Pm6lNtzQv8E-U43Bc.json", "genesis_data/genesis_txs/24VRr4yT-_fOndcFYtK2oSO-p9Pm6lNtzQv8E-U43Bc.json"}, {copy, "genesis_data/genesis_txs/pVZkxPK8F9VFM5lDp0oTBThaw1RvmwG64wIHFChYJKA.json", "genesis_data/genesis_txs/pVZkxPK8F9VFM5lDp0oTBThaw1RvmwG64wIHFChYJKA.json"}, {copy, "genesis_data/genesis_txs/EnPMt9yzTsxLPR5mD9zUvndxicdYBUNzOlcCPvQlOK8.json", "genesis_data/genesis_txs/EnPMt9yzTsxLPR5mD9zUvndxicdYBUNzOlcCPvQlOK8.json"}, {copy, "genesis_data/genesis_txs/0ooE635sVsd6vdhX3Pb8Ufvuqd7XRjfUbG2eXde_CmI.json", "genesis_data/genesis_txs/0ooE635sVsd6vdhX3Pb8Ufvuqd7XRjfUbG2eXde_CmI.json"}, {copy, "genesis_data/genesis_txs/mJUxc7XyUp1HV_VRoi_54geidr26I9PUaiNL4msSNxk.json", "genesis_data/genesis_txs/mJUxc7XyUp1HV_VRoi_54geidr26I9PUaiNL4msSNxk.json"}, {copy, "genesis_data/genesis_txs/4bPVo0hCI3E-ry2mBjvOZsBpNwPM108NT0vnJCxCeJw.json", "genesis_data/genesis_txs/4bPVo0hCI3E-ry2mBjvOZsBpNwPM108NT0vnJCxCeJw.json"}, {copy, "genesis_data/genesis_txs/lq4SrnweWCHnEhw_AV69gMLyBrPxYOmOdVdRIXkHwOg.json", "genesis_data/genesis_txs/lq4SrnweWCHnEhw_AV69gMLyBrPxYOmOdVdRIXkHwOg.json"}, {copy, "genesis_data/genesis_txs/B4e9FBfqZGBszHAhZqTq-TNjb-oG7rYdlMWrQa4CPZU.json", "genesis_data/genesis_txs/B4e9FBfqZGBszHAhZqTq-TNjb-oG7rYdlMWrQa4CPZU.json"}, {copy, "genesis_data/genesis_txs/576xa7WLVidNoEcYPhAm7OlyYgbrp7Z1RBIfqLbVFzw.json", "genesis_data/genesis_txs/576xa7WLVidNoEcYPhAm7OlyYgbrp7Z1RBIfqLbVFzw.json"}, {copy, "genesis_data/genesis_txs/IJsiiIbd-Qs39TAJ67hiRJFsBye_rgQdU9GBid_PnZw.json", "genesis_data/genesis_txs/IJsiiIbd-Qs39TAJ67hiRJFsBye_rgQdU9GBid_PnZw.json"}, {copy, "genesis_data/genesis_txs/CMr-rV5FdlQcRBo4loZzj66EFqwHBmA36tWiRMKGigQ.json", "genesis_data/genesis_txs/CMr-rV5FdlQcRBo4loZzj66EFqwHBmA36tWiRMKGigQ.json"}, {copy, "genesis_data/genesis_txs/gXd75eQL5Yzcn1ba51nORAvb6f_surSnz3xcNlLAxEQ.json", "genesis_data/genesis_txs/gXd75eQL5Yzcn1ba51nORAvb6f_surSnz3xcNlLAxEQ.json"}, {copy, "genesis_data/genesis_txs/eGhF0za2qN5WuadlVZ1iak1S5LxXswHRzIa3j_P-sUM.json", "genesis_data/genesis_txs/eGhF0za2qN5WuadlVZ1iak1S5LxXswHRzIa3j_P-sUM.json"}, {copy, "genesis_data/genesis_txs/00nFXThK86Aog_HfLJc9j0nnXzXSlU6VdGC8qZc5ekI.json", "genesis_data/genesis_txs/00nFXThK86Aog_HfLJc9j0nnXzXSlU6VdGC8qZc5ekI.json"}, {copy, "genesis_data/genesis_txs/Znw-6H_ayGJBReeQm9z9WKulBH1ZzrOovdMsNPcIe_Y.json", "genesis_data/genesis_txs/Znw-6H_ayGJBReeQm9z9WKulBH1ZzrOovdMsNPcIe_Y.json"}, {copy, "genesis_data/genesis_txs/AoSTMf_ZxlcY12bK6_sWj02kssD00K4E-vkHx2vRxG4.json", "genesis_data/genesis_txs/AoSTMf_ZxlcY12bK6_sWj02kssD00K4E-vkHx2vRxG4.json"}, {copy, "genesis_data/genesis_txs/Achd6pqJVZ-1vNMLC977Lu8f20eBmgAv4dIddXql51s.json", "genesis_data/genesis_txs/Achd6pqJVZ-1vNMLC977Lu8f20eBmgAv4dIddXql51s.json"}, {copy, "genesis_data/genesis_txs/HTt6lPYQfcIgUxKPjUt3aQrpwE5e3UA4UT2EI9RxSbw.json", "genesis_data/genesis_txs/HTt6lPYQfcIgUxKPjUt3aQrpwE5e3UA4UT2EI9RxSbw.json"}, {copy, "genesis_data/genesis_txs/QJlE99-614f6XzZ-7VctQjX9DYe5wnO21aHSgg1RhnA.json", "genesis_data/genesis_txs/QJlE99-614f6XzZ-7VctQjX9DYe5wnO21aHSgg1RhnA.json"}, {copy, "genesis_data/genesis_txs/h37LQjpChpTPMquvaxpfFeKt_7oAB5ElDzsdbCQ61n0.json", "genesis_data/genesis_txs/h37LQjpChpTPMquvaxpfFeKt_7oAB5ElDzsdbCQ61n0.json"}, {copy, "genesis_data/genesis_txs/LUdFh6g9auj1LRtk8IUwLoY3e91jIkcSyPKuQQekPY4.json", "genesis_data/genesis_txs/LUdFh6g9auj1LRtk8IUwLoY3e91jIkcSyPKuQQekPY4.json"}, {copy, "genesis_data/genesis_txs/YukfPvGxtYmXFF6wJjDiZcvqmH5YItxwsoLbMxWCVFg.json", "genesis_data/genesis_txs/YukfPvGxtYmXFF6wJjDiZcvqmH5YItxwsoLbMxWCVFg.json"}, {copy, "genesis_data/genesis_txs/FmfkuPmh0vkdv_qbjXBUX1sQ-DmwBFbjuC4punobGy0.json", "genesis_data/genesis_txs/FmfkuPmh0vkdv_qbjXBUX1sQ-DmwBFbjuC4punobGy0.json"}, {copy, "genesis_data/genesis_txs/j3l4tvphmVOyVyFkNdS7ulmexBqPqEvsSJrBsjAFJXc.json", "genesis_data/genesis_txs/j3l4tvphmVOyVyFkNdS7ulmexBqPqEvsSJrBsjAFJXc.json"}, {copy, "genesis_data/genesis_txs/GypgExivgblZSA-1n7KjdI0SJOyXwFJkuzzPWS4NID8.json", "genesis_data/genesis_txs/GypgExivgblZSA-1n7KjdI0SJOyXwFJkuzzPWS4NID8.json"}, {copy, "genesis_data/genesis_txs/h0sgGEeQQcmSxg8uyiCOigWtI_r2ex-58nk1xso004c.json", "genesis_data/genesis_txs/h0sgGEeQQcmSxg8uyiCOigWtI_r2ex-58nk1xso004c.json"}, {copy, "genesis_data/genesis_txs/M7oOLbk7TPBanLCS0pzkJSbV1CYoJabbsSDe_pCjhEo.json", "genesis_data/genesis_txs/M7oOLbk7TPBanLCS0pzkJSbV1CYoJabbsSDe_pCjhEo.json"}, {copy, "genesis_data/genesis_txs/LiitFWnODMUA7esa_f49IiMEdN7cTKoKw1cgG2J_eNE.json", "genesis_data/genesis_txs/LiitFWnODMUA7esa_f49IiMEdN7cTKoKw1cgG2J_eNE.json"}, {copy, "genesis_data/genesis_txs/x8KM69OVm6lzslK6ccAE-3EX5sW6CUHBZB-1hbc-J0A.json", "genesis_data/genesis_txs/x8KM69OVm6lzslK6ccAE-3EX5sW6CUHBZB-1hbc-J0A.json"}, {copy, "genesis_data/genesis_txs/TGp-18LYjSWQQ36gs5prU-vDgteOL79aywxXoDS-w0c.json", "genesis_data/genesis_txs/TGp-18LYjSWQQ36gs5prU-vDgteOL79aywxXoDS-w0c.json"} ]}, {dev_mode, true}, {include_erts, false}, {extended_start_script, false}, {extended_start_script_hook, [ {post_start, [ wait_for_vm_start, {pid, "/tmp/arweave.pid"}, {wait_for_process, ar_sup} ]} ]} ]}. {pre_hooks, [ {"(darwin|linux)", compile, "make --jobs --max-load 4 -C apps/arweave/lib"}, {"(freebsd|netbsd|openbsd)", compile, "gmake --jobs --max-load 4 -C apps/arweave/lib"}, % Compile NIFs {"(linux)", compile, "env AR=gcc-ar make all -C apps/arweave/c_src"}, {"(darwin)", compile, "make all -C apps/arweave/c_src"}, {"(freebsd|netbsd|openbsd)", compile, "gmake all -C apps/arweave/c_src"} ]}. {post_hooks, [ {"(darwin|linux)", clean, "make -C apps/arweave/lib clean"}, {"(freebsd|netbsd|openbsd)", clean, "gmake -C apps/arweave/lib clean"}, % Clean NIFs {"(linux|darwin)", clean, "make -C apps/arweave/c_src clean"}, {"(freebsd|netbsd|openbsd)", clean, "gmake -C apps/arweave/c_src clean"}, {"(linux|darwin|freebsd|netbsd|openbsd)", clean, "make -C apps/arweave/lib/openssl-sha-lite clean"} ]}. {erl_opts, [ {i, "apps"} ]}. {profiles, [ {prod, [ {relx, [ {dev_mode, false}, {include_erts, true} ]} ]}, {test, [ {deps, [{meck, "1.1.0"}]}, {erl_opts, [ {d, 'DEBUG', debug}, {d, 'FORKS_RESET', true}, {d, 'NETWORK_NAME', "arweave.localtest"}, {d, 'AR_TEST', true}, %% 8 * 256 * 1024 {d, 'PARTITION_SIZE', 2_000_000}, %% If PARTITION_SIZE changes, REPLICA_2_9_ENTROPY_COUNT might need to change %% as well. %% 2_000_000 / 32_768 = 61.03515625 %% (61 + 3) = 64 - the nearest multiple of 32 {d, 'REPLICA_2_9_ENTROPY_COUNT', 64}, {d, 'STRICT_DATA_SPLIT_THRESHOLD', 786432}, %% lower multiplier to allow single-block solutions in tests {d, 'POA1_DIFF_MULTIPLIER', 1}, %% use sha256 instead of randomx to speed up tests {d, 'STUB_RANDOMX', true}, %% Configure VDF to be fast for tests, but not too fast to avoid %% excessive pressure on the mining server triggering a log flood. {d, 'VDF_DIFFICULTY', 6000}, % ~10ms/step {d, 'INITIAL_VDF_DIFFICULTY', 6000}, {d, 'REPLICA_2_9_PACKING_DIFFICULTY', 2} ]}, {relx, [ {overlay, [{template, "priv/templates/extended_bin", "bin/arweave"}]} ]} ]}, {e2e, [ {deps, [{meck, "1.1.0"}]}, {erl_opts, [ {src_dirs, ["src", "test", "e2e"]}, {d, 'DEBUG', debug}, {d, 'FORKS_RESET', true}, {d, 'NETWORK_NAME', "arweave.e2e"}, {d, 'AR_TEST', true}, {d, 'PARTITION_SIZE', 2_000_000}, %% If PARTITION_SIZE changes, REPLICA_2_9_ENTROPY_COUNT might need to change %% as well. %% 2_000_000 / 32_768 = 61.03515625 %% (61 + 3) = 64 - the nearest multiple of 32 {d, 'REPLICA_2_9_ENTROPY_COUNT', 64}, {d, 'STRICT_DATA_SPLIT_THRESHOLD', 786432}, %% The partition upper bound only gets increased when the vdf session changes %% (i.e. every ?NONCE_LIMITER_RESET_FREQUENCY VDF steps), so we need to set %% the reset frequency low enough that the VDF session can change during a %% single e2e test run. {d, 'NONCE_LIMITER_RESET_FREQUENCY', 10}, {d, 'COMPOSITE_PACKING_EXPIRATION_PERIOD_BLOCKS', 0} ]}, {relx, [ {overlay, [{template, "priv/templates/extended_bin", "bin/arweave"}]} ]} ]}, {localnet, [ {erl_opts, [ {src_dirs, ["src", "test"]}, %% All peers in your localnet must specify the same NETWORK_NAME, and all requests %% to nodes in your network must specify NETWORK_NAME in their X-Network header. %% If you clear this value, the mainnet will be assumed. {d, 'NETWORK_NAME', "arweave.localnet"}, %% When a request is received without specifing the X-Network header, this network %% name is assumed. Rather than change this, it's better to make sure your clients %% specify the X-Network name as this will avoid potential issues (e.g. %% accidentally transferring mainnet AR tokens when you only intended to transfer %% localnet tokens). This variable is provided for situations where you can't %% control the client headers, need for them to be able to make requests to your %% localnet, and can manage the risk of an accidental mainnet request getting %% processed. %% {d, 'DEFAULT_NETWORK_NAME', "arweave.localnet"}, {d, 'LOCALNET', true}, export_all, no_inline ]}, {relx, [ {release, {arweave, "2.9.6-alpha1"}, [ arweave_config, arweave_limiter, {arweave_diagnostic, load}, {arweave, load}, {recon, load}, b64fast, jiffy, rocksdb, prometheus_process_collector ]}, {dev_mode, false}, {include_erts, true} ]} ]}, {testnet, [ {deps, [{meck, "1.1.0"}]}, {erl_opts, [ %% ------------------------------------------------------------------------------------- %% Required configuration for testnet %% All values below must be set for the testnet to function properly %% ------------------------------------------------------------------------------------- {d, 'TESTNET', true}, {d, 'NETWORK_NAME', "arweave.fast.testnet"}, {d, 'TEST_WALLET_ADDRESS', "MXeFJwxb4y3vL4In3oJu60tQGXGCzFzWLwBUxnbutdQ"}, {d, 'TOP_UP_TEST_WALLET_AR', 1000000}, %% The following values all assume the testnet is restarted from height 1588329 using %% the flag: %% start_from_block 3lIjFuR6nMYwELWwQqZxYn_sj1tESmZgk6bVZewwxtr0X6a8mXG0JH7KAV_5AE2s %% TESTNET_FORK_HEIGHT should meet the following requirements: %% 1. Set to a difficulty retargeting height - i.e. a multiple of %% ?RETARGET_BLOCKS (currently 10) %% 2. Set to 1 more than the testnet initialization height. %% 3. Set to the height of a block which has not yet been mined on the %% testnet, or one which was already mined on the testnet (i.e. after the testnet %% was forked from mainnet) %% %% For example, if the testnet was forked off mainnet at %% height 1265219 (either through the use of start_from_latest_state or %% start_from_block), then TESTNET_FORK_HEIGHT should be set to 1265220. {d, 'TESTNET_FORK_HEIGHT', 1588330}, %% ------------------------------------------------------------------------------------- %% Optional configuration for testnet %% Any values below here are not required and can be cleared/deleted as needed %% ------------------------------------------------------------------------------------- {d, 'TESTNET_REWARD_HISTORY_BLOCKS', 120}, {d, 'TESTNET_LEGACY_REWARD_HISTORY_BLOCKS', 40}, {d, 'TESTNET_LOCKED_REWARDS_BLOCKS', 40}, {d, 'TESTNET_TARGET_BLOCK_TIME', 45}, {d, 'FORK_2_9_HEIGHT', 1588500} ]}, {relx, [ {dev_mode, false}, {include_erts, true}, {overlay, [ {copy, "scripts/testnet/benchmark", "bin/benchmark"} ]} ]} ]} ]}. % for some reason, probably due to the number of modules % arweave got, when testing with cover, the compilation % takes a while. to avoid this, few modules have been % excluded. If it's not enough, arweave application % can be disabled as well. {cover_excl_mods, [ ar_block_pre_validator_sup, ar_bridge_sup, ar_chunk_storage_sup, ar_data_root_sync_sup, ar_data_sync_sup, ar_events_sup, ar_header_sync_sup, ar_http_sup, ar_kv_sup, ar_mining_sup, ar_node_sup, ar_nonce_limiter_sup, ar_packing_sup, ar_peer_worker_sup, ar_poller_sup, ar_rx4096_nif, ar_rx512_nif, ar_rxsquared_nif, ar_storage_sup, ar_sup, ar_sync_record_sup, ar_tx_emitter_sup, ar_vdf_nif, ar_verify_chunks_sup, ar_webhook_sup, secp256k1_nif, ar_rx4096_nif ]}. {cover_excl_apps, [ arweave ]}. % generate surefire report by default when using eunit with % rebar3 {eunit_opts, [ {report,{eunit_surefire,[{dir,"./_build/test/surefire"}]}} ]}. ================================================ FILE: release_notes/N.2.9.5/README.md ================================================ **This is a substantial update. This software was prepared by the Digital History Association, in cooperation from the wider Arweave ecosystem.** This release is primarily a bug fix, stability, and performance release. It includes all changes from all of the 2.9.5 alpha releases. Full details can be found in the release notes for each alpha: - [alpha1](https://github.com/ArweaveTeam/arweave/releases/tag/N.2.9.5-alpha1) - [alpha2](https://github.com/ArweaveTeam/arweave/releases/tag/N.2.9.5-alpha2) - [alpha3](https://github.com/ArweaveTeam/arweave/releases/tag/N.2.9.5-alpha3) - [alpha4](https://github.com/ArweaveTeam/arweave/releases/tag/N.2.9.5-alpha4) - [alpha5](https://github.com/ArweaveTeam/arweave/releases/tag/N.2.9.5-alpha5) - [alpha6](https://github.com/ArweaveTeam/arweave/releases/tag/N.2.9.5-alpha6) Some of the changes described above were to address regressions introduced in a prior alpha. The full set of changes you can expect when upgrading from 2.9.4.1 are described below. A special call out to the mining community members who installed and tested each of the alpha releases. Their help was critical in addressing regressions, fixing bugs, and implementing imrprovements. Thank you! Full list of contributors [here](#community-involvement). ## New Binaries This release includes an updated set of pre-built binaries: - Ubuntu 22:04, erlang R26 - Ubuntu 24:04, erlang R26 - rocky9, erlang R26 - MacOS, erlang R26 The default `linux` release refers to Ubuntu 22:04, erlang R26. Going forward we recommend Arweave be built with erlang R26 rather than erlang R24. The MacOS binaries are intended to be used for VDF Servers. Packing and mining on MacOS is still unsupported. ## Changes to miner config - Several changes to options related to repack-in-place. See [below](#support-for-repack-in-place-from-the-replica29-format). - `vdf`: see [below](#optimized-vdf). - Several changes to options related to the `verify` tool. See [below](#verify-tool-improvements). - `disable_replica_2_9_device_limit`: Disable the device limit for the replica.2.9 format. By default, at most one worker will be active per physical disk at a time, setting this flag removes this limit allowing multiple workers to be active on a given physical disk. - Several options to manually configure low level network performance. See help for options starting with `network.`, `http_client.` and `http_api.`. - `mining_cache_size_mb`: the default is set to 100MiB per partition being mined (e.g. if you leave `mining_cache_size_mb` unset while mining 64 partitions, your mining cache will be set to 6,400 MiB). - The process for running multiple nodes on a single server has changed. Each instance will need to set distinct/unique values for the `ARNODE` and `ARCOOKIE` environment variables. Here is an example script to launch 2 nodes one named `exit` and one named `miner`: ``` #!/usr/bin/env bash ARNODE=exit@127.0.0.1 \ ARCOOKIE=exit \ screen -dmSL arweave.exit -Logfile ./screenlog.exit \ ./bin/start config_file config.exit.json; ARNODE=miner@127.0.0.1 \ ARCOOKIE=miner \ screen -dmSL arweave.miner -Logfile ./screenlog.miner \ ./bin/start config_file config.miner.json ``` ## Optimized VDF This release includes the optimized VDF algorithm developed by Discord user `hihui`. To use this optimized VDF algorithm set the `vdf hiopt_m4` config option. By default the node will run with the legacy `openssl` implementation. ## Support for repack-in-place from the `replica.2.9` format This release introduces support for repack-in-place from `replica.2.9` to `unpacked` or to a different `replica.2.9` address. In addition we've made several performance improvements and fixed a number of edge case bugs which may previously have caused some chunks to be skipped by the repack process. ### Performance Due to how replica.2.9 chunks are processed, the parameters for tuning the repack-in-place performance have changed. There are 4 main considerations: - **Repack footprint size**: `replica.2.9` chunks are grouped in footprints of chunks. A full footprint is 1024 chunks distributed evenly across a partition. - **Repack batch size**: The repack-in-place process reads some number of chunks, repacks them, and then writes them back to disk. The batch size controls how many contiguous chunks are read at once. Previously a batch size of 10 would mean that 10 chunks would be read, repacked, and written. However in order to handle `replica.2.9` data efficiently, a batch size indicates the number of *footprints* to process at once. So a batch size of 10 means that 10 footprints will be read, repacked, and written. Since a full footprint is 1024 chunks, the amount of memory required to process a batch size of 10 is now 10,240 chunks or roughly 2.5 GiB. - **Available RAM**: The footprint size and batch size drive how much RAM is required by the repack in place process. And if you're repacking multiple partitions at once, the RAM requirements can grow quickly. - **Disk IO**: If you determine that disk IO is your bottleneck, you'd want to increase the batch size as much as you can as reading contiguous chunks are generally much faster than reading non-contiguous chunks. - **CPU**: However in some cases you may find that CPU is your bottleneck - this can happen when repacking from a legacy format like `spora_2_6`, or can happen when repacking many partitions between 2 `replica.2.9` addresses. The saving grace here is that if CPU is your bottleneck, you can reduce your batch size or footprint size to ease off on your memory utilization. To control all these factors, repack-in-place has 2 config options: - `repack_batch_size`: controls the batch size - i.e. the number of footprints processed at once - `repack_cache_size_mb`: sets the total amount of memory to allocate to the repack-in-place process *per* partition. So if you set `repack_cache_size_mb` to `2000` and are repacking 4 partitions, you can expect the repack-in-place process to consume roughly 8 GiB of memory. Note: the node will automatically set the footprint size based on your configured batch and cache sizes - this typically means that it will *reduce* the footprint size as much as needed. A smaller footprint size will *increase* your CPU load as it will result in your node generating the same entropy multiple times. For example, if your footprint size is 256 the node will need to generate teh same entropy 4 times in order to process all 1024 chunks in the full footprint. ### Debugging This release also includes a new option on the `data-doctor inspect` tool that may help with debugging packing issues. ``` /bin/data-doctor inspect bitmap ``` Example: `/bin/data-doctor inspect bitmap /opt/data 36,En2eqsVJARnTVOSh723PBXAKGmKgrGSjQ2YIGwE_ZRI.replica.2.9` Will generate a bitmap where every pixel represents the packing state of a specific chunk. The bitmap is laid out so that each *vertical* column of pixels is a complete entropy footprint. Here is an example of bitmap: ![bitmap_storage_module_5_En2eqsVJARnTVOSh723PBXAKGmKgrGSjQ2YIGwE_ZRI replica 2 9](https://github.com/user-attachments/assets/d84728c6-6cc0-447d-8f98-2db793fd66a4) This bitmap shows the state of one node's partition 5 that has been repacked to replica.2.9. The green pixels are chunks that are in the expected replica.2.9 format, the black pixels are chunks that are missing from the miner's dataset, and the pink pixels are chunks that are too small to be packed (prior to partition ~9, users were allowed to pay for chunks that were smaller than 256KiB - these chunks are stored `unpacked` and can't be packed). ## Performance Improvements - Improvements to both syncing speed and memory use while syncing - In our tests using solo as well as coordinated miners configured to mine while syncing many partitions, we observed steady memory use and full expected hashrate. This improves on 2.9.4.1 performance. Notably: the same tests run on 2.9.4.1 showed growing memory use, ultimately causing an OOM. - Reduce the volume of unnecessary network traffic due to a flood of `404` requests when trying to sync chunks from a node which only serves `replica.2.9` data. Note: the benefit of this change will only be seen when most of the nodes in the network upgrade. - Performance improvements to HTTP handling that should improve performance more generally. - Optimization to speed up the collection of peer intervals when syncing. This can improve syncing performance in some situations. - Fix a bug which could cause syncing to occasionally stall out. - Optimize the shutdown process. This should help with, but not fully address, the slow node shutdown issues. - Fix a bug where a VDF client might get pinned to a slow or stalled VDF server. - Several updates to the mining cache logic. These changes address a number of edge case performance and memory bloat issues that can occur while mining. - Improve the transaction validation performance, this should reduce the frequency of "desyncs". I.e. nodes should now be able to handle a higher network transaction volume without stalling - Do not delay ready_for_mining on validator nodes - Make sure identical tx-status pairs do not cause extra mempool updates - Cache the owner address once computed for every TX - Reduce the time it takes for a node to join the network: - Do not re-download local blocks on join - Do not re-write written txs on join - Reduce per peer retry budget on join 10 -> 5 - Fix edge case that could occasionally cause a mining pool to reject a replica.2.9 solution. - Fix edge case crash that occurred when a coordinated miner timed out while fetching partitions from peers - Fix bug where storage module crossing weave end may cause syncing stall - Fix bug where crash during peer interval collection may cause syncing stall - Fix race condition where we may not detect double-signing - Optionally fix broken chunk storage records on the fly - Set `enable fix_broken_chunk_storage_record` to turn the feature on. - Several fixes to improve the Arweave shutdown process - Close hanging HTTP connections - Prevent new HTTP connections from being created during shutdown - Fix a timeout issue In addition to the above fixes, we have some guidance on how to interpret some new logging and metrics: ### Guidance: 2.9.5 hashrate appears to be slower than 2.9.4.1 (Reported by discord users EvM, Lawso2517, Qwinn) #### Symptoms - 2.9.4.1 hashrate is higher than 2.9.5 - 2.9.4.1 hashrate when solo mining might even be higher than the "ideal" hashrate listed in the mining report or grafana metrics #### Resolution The 2.9.4.1 hashrate included invalid hashes and the 2.9.5 hashrate, although lower, includes only valid hashes. #### Root Cause 2.9.4.1 (and earlier releases) had a bug which caused miners to generate hashes off of entropy in addition to valid packed data. The replica.2.9 data format lays down a full covering of entropy in each storage module before adding packed chunks. The result that is that for any storage module with less than 3.6TB of packed data, there is some amount of data on disk that is just entropy. A bug in the 2.9.4.1 mining algorithm generated hashes off of this entropy causing an inflated hashrate. Often the 2.9.4.1 hashrate is above the estimated "ideal" hashrate even when solo mining. Another symptom of this bug is the `chunk_not_found` error occasionally reported by miners. This occurs under 2.9.4.1 (and earlier releases) when the miner hashes a range of entropy and generates a hash that exceeds the network difficulty. The miner believes this to be a valid solution and begins to build a block. At some point in the block building process the miner has to validate and include the packed chunk data. However since no packed chunk data exists (only entropy), the solution fails and the error is printed. 2.9.5 fixes this bug so that miners correctly exclude entropy data when mining. This means that under 2.9.5 and later releases miners spend fewer resources hashing entropy data, and generate fewer failed solution errors. The reported hashrate on 2.9.5 is lower than 2.9.4.1 because the invalid hashes are no longer being counted. ### Guidance: `cache_limit_exceeded` warning during solo and coordinated mining (Reported by discord users BerryCZ, mousetu, radion_nizametdinov, qq87237850, Qwinn, Vidiot) #### Symptoms - Logs show `mining_worker_failed_to_reserve_cache_space` warnings, with reason set to `cache_limit_exceeded` #### Resolution The warning, if seen periodically, is expected and safe to ignore. #### Root Cause All VDF servers - even those with the exact same VDF time - will be on slightly different steps. This is because new VDF epochs are opened roughly every 20 minutes and are opened when a block is added to the chain. Depending on when your VDF server receives that block it may start calculating the new VDF chain earlier or later than other VDF servers. This can cause there to be a gap in the VDF steps generated by two different servers even if they are able to compute new VDF steps at the exact same speed. When a VDF server receives a block that is ahead of it in the VDF chain, it is able to quickly validate and use all the new VDF steps. This can cause the associated miners to receive a batch of VDF steps all at once. In these situations, the miner may exceed its mining cache causing the `cache_limit_exceeded` warning. However this ultimately does not materially impact the miner's true hashrate. A miner will process VDF steps in reverse order (latest steps first) as those are the most valuable steps. The steps being dropped from the cache will be the oldest steps. Old steps *may* still be useful, but there is a far greater chance that any solution mined off an old step will be orphaned. The older the VDF step, the less useful it is. **TLDR:** the warning, if seen periodically, is expected and safe to ignore. **Exception:** If you are continually seeing the warning (i.e. not in periodic batches, but constantly and all the time) it may indicate that your miner is not able to keep up with its workload. This can indicate a hardware configuration issue (e.g. disk read rates are too slow), or perhaps a hardware capacity issue (E.g. CPU not fast enough to run hashes on all attached storage module), or some other performance-related issue. ## Prometheus metrics - `ar_mempool_add_tx_duration_milliseconds`: The duration in milliseconds it took to add a transaction to the mempool. - `reverify_mempool_chunk_duration_milliseconds`: The duration in milliseconds it took to reverify a chunk of transactions in the mempool. - `drop_txs_duration_milliseconds`: The duration in milliseconds it took to drop a chunk of transactions from the mempool - `del_from_propagation_queue_duration_milliseconds`: The duration in milliseconds it took to remove a transaction from the propagation queue after it was emitted to peers. - `chunk_storage_sync_record_check_duration_milliseconds`: The time in milliseconds it took to check the fetched chunk range is actually registered by the chunk storage. - `fixed_broken_chunk_storage_records`: The number of fixed broken chunk storage records detected when reading a range of chunks. - `mining_solution`: allows tracking mining solutions. Uses labels to differentiate the mining solution state. - `chunks_read`: The counter is incremented every time a chunk is read from `chunk_storage` - `chunk_read_rate_bytes_per_second`: The rate, in bytes per second, at which chunks are read from storage. The type label can be 'raw' or 'repack'. - `chunk_write_rate_bytes_per_second`: The rate, in bytes per second, at which chunks are written to storage. - `repack_chunk_states`: The count of chunks in each state. 'type' can be 'cache' or 'queue'. - `replica_2_9_entropy_generated`: The number of bytes of replica.2.9 entropy generated. - `mining_server_chunk_cache_size`: now includes additional label `type` which can take the value `total` or `reserved`. - `mining_server_tasks`: Incremented each time the mining server adds a task to the task queue. - `mining_vdf_step`: Incremented each time the mining server processes a VDF step. - `kryder_plus_rate_multiplier`: Kryder+ rate multiplier. - `endowment_pool_take`: Value we take from endowment pool to miner to compensate difference between expected and real reward. - `endowment_pool_give`: Value we give to endowment pool from transaction fees. ## `verify` Tool Improvements This release contains several improvements to the `verify` tool. Several miners have reported block failures due to invalid or missing chunks. The hope is that the `verify` tool improvements in this release will either allow those errors to be healed, or provide more information about the issue. ### New `verify` modes The `verify` tool can now be launched in `log` or `purge` modes. In `log` mode the tool will log errors but will not flag the chunks for healing. In `purge` mode all bad chunks will be marked as invalid and flagged to be resynced and repacked. To launch in `log` mode specify the `verify log` flag. To launch in `purge` mode specify the `verify purge` flag. Note: `verify true` is no longer valid and will print an error on launch. ### Chunk sampling The `verify` tool will now sample 1,000 chunks and do a full unpack and validation of the chunk. This sampling mode is intended to give a statistical measure of how much data might be corrupt. To change the number of chunks sampled you can use the the `verify_samples` option. E.g. `verify_samples 500` will have the node sample 500 chunks. ### More invalid scenarios tested This latest version of the `verify` tool detects several new types of bad data. The first time you run the `verify` tool we recommend launching it in `log` mode and running it on a single partition. This should avoid any surprises due to the more aggressive detection logic. If the results are as you expect, then you can relaunch in `purge` mode to clean up any bad data. In particular, if you've misnamed your `storage_module` the `verify` tool will invalidate *all* chunks and force a full repack - running in `log` mode first will allow you to catch this error and rename your `storage_module` before purging all data. ## Miscellaneous - Fix several issues which could cause a node to "desync". Desyncing occurs when a node gets stuck at one block height and stops advancing. - Add TX polling so that a node will pull missing transactions in addition to receiving them via gossip - Add support for DNS pools (multiple IPs behind a single DNS address). - Add webhooks for the entire mining solution lifecycle. New `solution` webhook added with multiple states `solution_rejected`, `solution_stale`, `solution_partial`, `solution_orphaned`, `solution_accepted`, and `solution_confirmed`. - Add a `verify` flag to the `benchmark-vdf` script - When running `benchmark-vdf` you can specify the `verify true` flag to have the script verify the VDF output against a slower "debug" VDF algorithm. - Support CMake 4 on MacOS - Bug fixes to address `chunk_not_found` and `sub_chunk_mismatch` errors. ## Community involvement A huge thank you to all the Mining community members who contributed to this release by testing the alpha releases, providing feedback, and helping us debug issues! Discord users (alphabetical order): - AraAraTime - BerryCZ - bigbang - BloodHunter - Butcher_ - core_1_ - dlmx - doesn't stay up late - dzeto - edzo - Evalcast - EvM - Fox Malder - grumpy.003 - hihui - Iba Shinu - JanP - JamsJun - JF - jimmyjoe7768 - lawso2517 - MaSTeRMinD - MCB - Merdi Kim - metagravity - Methistos - Michael | Artifact - mousetu - Niiiko - qq87237850 - Qwinn - radion_nizametdinov - RedMOoN - sam - sk - smash - sumimi - T777 - tashilo - Thaseus - U genius - Vidiot - Wednesday - wybiacx ================================================ FILE: release_notes/N.2.9.5-alpha5/README.md ================================================ **This is an alpha update and may not be ready for production use. This software was prepared by the Digital History Association, in cooperation from the wider Arweave ecosystem.** This release includes several syncing and mining performance improvements. It passes all automated tests and has undergone a base level of internal testing, but is not considered production ready. We only recommend upgrading if you wish to take advantage of the new performance improvements. ## Performance improvements In all cases we ran tests on a full-weave solo miner, as well as a full-weave coordinated mining cluster. We believe the observed performance improvements are generalizable to other miners, but, as always, the performance observed by a given miner is often influenced by many factors that we are not been able to test for. TLDR: your mileage may vary. ### Syncing Improvements to both syncing speed and memory use while syncing. The improvements address some regressions that were reported in the 2.9.5 alphas, but also improve on 2.9.4.1 performance. ### Mining This release addresses the significant hashrate loss that was observed during Coordinated Mining on the 2.9.5 alphas. ### Syncing + Mining In our tests using solo as well as coordinated miners configured to mine while syncing many partitions, we observed steady memory use and full expected hashrate. This addresses some regressions that were reported in the 2.9.5 alphas, but also improves on 2.9.4.1 performance. Notably: the same tests run on 2.9.4.1 showed growing memory use, ultimately causing an OOM. ## Community involvement A huge thank you to all the Mining community members who contributed to this release by identifying and investigating bugs, sharing debug logs and node metrics, and providing guidance on performance tuning! Discord users (alphabetical order): - BerryCZ - Butcher_ - edzo - Evalcast - EvM - JF - lawso2517 - MaSTeRMinD - qq87237850 - Qwinn - radion_nizametdinov - RedMOoN - smash - T777 - Vidiot ================================================ FILE: release_notes/N.2.9.5-alpha6/README.md ================================================ **This is an alpha update and may not be ready for production use. This software was prepared by the Digital History Association, in cooperation from the wider Arweave ecosystem.** This release addresses several of the mining performance issues that had been reported on previous alphas. It passes all automated tests and has undergone a base level of internal testing, but is not considered production ready. We only recommend upgrading if you wish to take advantage of the new performance improvements. ## Fix: Crash during coordinated mining when a solution is found (Reported by discord user Vidiot) ### Symptoms - After mining well for some time, hashrate dropped to 0 - Logs had messages like: `Generic server ar_mining_server terminating. Reason: {badarg,[{ar_block,compute_h1,3` ## Fix: `session_not_found` error during coordinated mining (Reported by discord user Qwinn) ### Symptoms - Hahrate lower than expected - Logs had `mining_worker_failed_to_add_chunk_to_cache` errors, with reason set to `session_not_found` ## Guidance: `cache_limit_exceeded` warning during solo and coordinated mining (Reported by discord users BerryCZ, mousetu, radion_nizametdinov, qq87237850, Qwinn, Vidiot) ### Symptoms - Logs show `mining_worker_failed_to_reserve_cache_space` warnings, with reason set to `cache_limit_exceeded` ### Resolution The warning, if seen periodically, is expected and safe to ignore. ### Root Cause All VDF servers - even those with the exact same VDF time - will be on slightly different steps. This is because new VDF epochs are opened roughly every 20 minutes and are opened when a block is added to the chain. Depending on when your VDF server receives that block it may start calculating the new VDF chain earlier or later than other VDF servers. This can cause there to be a gap in the VDF steps generated by two different servers even if they are able to compute new VDF steps at the exact same speed. When a VDF server receives a block that is ahead of it in the VDF chain, it is able to quickly validate and use all the new VDF steps. This can cause the associated miners to receive a batch of VDF steps all at once. In these situations, the miner may exceed its mining cache causing the `cache_limit_exceeded` warning. However this ultimately does not materially impact the miner's true hashrate. A miner will process VDF steps in reverse order (latest steps first) as those are the most valuable steps. The steps being dropped from the cache will be the oldest steps. Old steps *may* still be useful, but there is a far greater chance that any solution mined off an old step will be orphaned. The older the VDF step, the less useful it is. **TLDR:** the warning, if seen periodically, is expected and safe to ignore. **Exception:** If you are continually seeing the warning (i.e. not in periodic batches, but constantly and all the time) it may indicate that your miner is not able to keep up with its workload. This can indicate a hardware configuration issue (e.g. disk read rates are too slow), or perhaps a hardware capacity issue (E.g. CPU not fast enough to run hashes on all attached storage module), or some other performance-related issue. ### Guidance - This alpha increases the default cache size from 4 steps to 20 VDF steps. This should noticeably reduce (but not eliminate) the frequency of the `cache_limit_exceeded` warning - If you want to increase it further you can set the `mining_cache_size_mb` option. ## Guidance: 2.9.5-alphaX hashrate appears to be slower than 2.9.4.1 (Reported by discord users EvM, Lawso2517, Qwinn) ### Symptoms - 2.9.4.1 hashrate is higher than 2.9.5-alphaX - 2.9.4.1 hashrate when solo mining might even be higher than the "ideal" hashrate listed in the mining report or grafana metrics ### Resolution The 2.9.4.1 hashrate included invalid hashes and the 2.9.5-alpha6 hashrate, although lower, includes only valid hashes. ### Root Cause 2.9.4.1 (and earlier releases) had a bug which caused miners to generate hashes off of entropy in addition to valid packed data. The replica.2.9 data format lays down a full covering of entropy in each storage module before adding packed chunks. The result that is that for any storage module with less than 3.6TB of packed data, there is some amount of data on disk that is just entropy. A bug in the 2.9.4.1 mining algorithm generated hashes off of this entropy causing an inflated hashrate. Often the 2.9.4.1 hashrate is above the estimated "ideal" hashrate even when solo mining. Another symptom of this bug is the `chunk_not_found` error occasionally reported by miners. This occurs under 2.9.4.1 (and earlier releases) when the miner hashes a range of entropy and generates a hash that exceeds the network difficulty. The miner believes this to be a valid solution and begins to build a block. At some point in the block building process the miner has to validate and include the packed chunk data. However since no packed chunk data exists (only entropy), the solution fails and the error is printed. 2.9.5-alpha2 fixed this bug so that miners correctly exclude entropy data when mining. This means that under 2.9.5-alpha2 and later releases miners spend fewer resources hashing entropy data, and generate fewer failed solution errors. The reported hashrate on 2.9.5-alpha2 is lower than 2.9.4.1 because the invalid hashes are no longer being counted. ## Community involvement A huge thank you to all the Mining community members who contributed to this release by identifying and investigating bugs, sharing debug logs and node metrics, and providing guidance on performance tuning! Discord users (alphabetical order): - BerryCZ - EvM - JanP - lawso2517 - mousetu - qq87237850 - Qwinn - radion_nizametdinov - smash - T777 - Vidiot ================================================ FILE: release_notes/N.2.9.5.1/README.md ================================================ # Arweave 2.9.5.1 Patch Release Notes ### This release introduces various stability and validation enhancements. Several input validation steps could crash on invalid values, in some cases halting the arweave node. The patch includes graceful validation of certain inputs and defensive deserialization of local binaries. ================================================ FILE: release_notes/README.md ================================================ # Arweave Releases Process for doing an Arweave release ## Run tests 1. Make sure the automated unit tests are green for both: - [Ubuntu](https://github.com/ArweaveTeam/arweave/actions/workflows/test-amd64-ubuntu-22.04.yml) - [MacOS](https://github.com/ArweaveTeam/arweave/actions/workflows/test-arm64-macos-15.yml) 2. Optionally run the `e2e` test locally (these test can take a couple hours to complete): `./bin/e2e` ## Release procedure This section explains how arweave is released using Github Actions. 1. find a release version using `N.X.Y.Z.*` format, for example `N.9.8.7-alpha2`. During the next step, it will be called `${release_version}`. 2. create a new release notes containing the instruction of the new release. ```sh mkdir release_notes/${release_version} touch release_notes/${release_version}/README.md cat > release_notes/${release_version}/README.md < release_notes/N.X.Y.Z.P/README.md <&1 >/dev/null then echo -e "\033[0;31m===> Test failed, printing the ${peer} node's output...\033[0m" cat ${peer}-*.out else echo -e "\033[0;31m===> Test failed without ${peer} output...\033[0m" fi } # check if test can be restarted _check_retry() { local first_line_peer1 echo -e "\033[0;32m===> Checking for retry\033[0m" # For debugging purposes, print the peer1 output if the tests failed if ls peer1-*.out 2>&1 >/dev/null then first_line_peer1=$(head -n 1 peer1-*.out) fi first_line_main=$(head -n 1 main.out) echo -e "\033[0;31m===> First line of peer1 node's output: $first_line_peer1\033[0m" echo -e "\033[0;31m===> First line of main node's output: $first_line_main\033[0m" # Check if it is a retryable error if [[ "$first_line_peer1" == "Protocol 'inet_tcp': register/listen error: "* ]] then echo "Retrying test because of inet_tcp error..." RETRYABLE=1 sleep 1 elif [[ "$first_line_peer1" == "Protocol 'inet_tcp': the name"* ]] then echo "Retrying test because of inet_tcp clash..." RETRYABLE=1 sleep 1 elif [[ "$first_line_main" == *"econnrefused"* ]] then echo "Retrying test because of econnrefused..." RETRYABLE=1 sleep 1 else _print_peer_logs peer1 _print_peer_logs peer2 _print_peer_logs peer3 _print_peer_logs peer4 fi } # set github environment _set_github_env() { if test -z "${GITHUB_ENV}" then echo "GITHUB_ENV variable not set" return 1 fi local exit_code=${1} # Set the exit_code output variable using Environment Files echo "exit_code=${exit_code}" >> ${GITHUB_ENV} return 0 } ###################################################################### # main script ###################################################################### MODE="${1}" NAMESPACE_FLAG="${2}" PWD=$(pwd) EXIT_CODE=0 export PATH="${PWD}/_build/erts/bin:${PATH}" export ERL_EPMD_ADDRESS="127.0.0.1" export NAMESPACE="${NAMESPACE_FLAG}" if test "${MODE}" = "e2e" then export ERL_PATH_ADD="$(echo ${PWD}/_build/e2e/lib/*/ebin)" export ERL_PATH_TEST="${PWD}/_build/e2e/lib/arweave/e2e" else export ERL_PATH_ADD="$(echo ${PWD}/_build/test/lib/*/ebin)" export ERL_PATH_TEST="${PWD}/_build/test/lib/arweave/test" fi export ERL_PATH_CONF="${PWD}/config/sys.config" export ERL_TEST_OPTS="-pa ${ERL_PATH_ADD} ${ERL_PATH_TEST} -config ${ERL_PATH_CONF}" RETRYABLE=1 while [[ $RETRYABLE -eq 1 ]] do RETRYABLE=0 set +e set -x NODE_NAME="main-${NAMESPACE}@127.0.0.1" COOKIE=${NAMESPACE} erl +S 4:4 $ERL_TEST_OPTS \ -noshell \ -name "${NODE_NAME}" \ -setcookie "${COOKIE}" \ -run ar ${MODE} "${NAMESPACE}" \ -s init stop 2>&1 | tee main.out EXIT_CODE=${PIPESTATUS[0]} set +x set -e if [[ ${EXIT_CODE} -ne 0 ]] then _check_retry fi done # exit with the exit code of the tests _set_github_env ${EXIT_CODE} exit ${EXIT_CODE} ================================================ FILE: scripts/ierl_kernel.sh ================================================ #!/usr/bin/env sh set -eu SCRIPT_DIR="$(dirname "$0")" REPO_ROOT="$SCRIPT_DIR/.." IERL_BIN="$REPO_ROOT/.venv/bin/ierl" if [ -x "$IERL_BIN" ]; then exec "$IERL_BIN" "$@" fi exec ierl "$@" ================================================ FILE: scripts/list_test_modules.sh ================================================ #!/usr/bin/env bash set -euo pipefail SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd -P)" MODULES_FILE="${SCRIPT_DIR}/full_test_modules.txt" FORMAT="${1:-plain}" case "${FORMAT}" in json) awk ' BEGIN { first = 1 printf("[") } /^[[:space:]]*#/ || /^[[:space:]]*$/ { next } { gsub(/^[[:space:]]+|[[:space:]]+$/, "", $0) if (first == 0) { printf(",") } printf("\"%s\"", $0) first = 0 } END { print "]" } ' "${MODULES_FILE}" ;; plain) awk ' /^[[:space:]]*#/ || /^[[:space:]]*$/ { next } { gsub(/^[[:space:]]+|[[:space:]]+$/, "", $0) print } ' "${MODULES_FILE}" ;; *) echo "Usage: $0 [plain|json]" >&2 exit 1 ;; esac ================================================ FILE: scripts/run_notebook.sh ================================================ #!/usr/bin/env bash set -euo pipefail SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" NOTEBOOK_NAME="pricing_transition_localnet" NOTEBOOK_PATH="" NOTEBOOK_URL_PATH="" NOTEBOOK_DIR="" NOTEBOOK_ABS_PATH="" NODE_NAME_FULL="main-localnet@127.0.0.1" NODE_COOKIE="localnet" JOIN_TIMEOUT_SEC="${JOIN_TIMEOUT_SEC:-300}" JOIN_POLL_SEC="${JOIN_POLL_SEC:-1}" JUPYTER_PORT="${JUPYTER_PORT:-8888}" JUPYTER_OPEN_BROWSER="${JUPYTER_OPEN_BROWSER:-true}" JUPYTER_DATA_DIR="${JUPYTER_DATA_DIR:-$REPO_ROOT/.tmp/jupyter}" JUPYTER_CONFIG_DIR="${JUPYTER_CONFIG_DIR:-$REPO_ROOT/.jupyter}" LOCALNET_HTTP_HOST="${LOCALNET_HTTP_HOST:-127.0.0.1}" LOCALNET_HTTP_PORT="${LOCALNET_HTTP_PORT:-1984}" LOCALNET_NETWORK_NAME="${LOCALNET_NETWORK_NAME:-arweave.localnet}" STARTED_LOCALNET=0 LOCALNET_PID="" resolve_notebook() { if [ -z "$NOTEBOOK_PATH" ]; then NOTEBOOK_PATH="notebooks/${NOTEBOOK_NAME}.ipynb" fi if [ "${NOTEBOOK_PATH:0:1}" = "/" ]; then NOTEBOOK_ABS_PATH="$NOTEBOOK_PATH" else NOTEBOOK_ABS_PATH="$REPO_ROOT/$NOTEBOOK_PATH" fi if [ ! -f "$NOTEBOOK_ABS_PATH" ]; then echo "Notebook not found: $NOTEBOOK_ABS_PATH" exit 1 fi NOTEBOOK_DIR="$(dirname "$NOTEBOOK_ABS_PATH")" NOTEBOOK_URL_PATH="$(basename "$NOTEBOOK_ABS_PATH")" } start_localnet() { if [ "$(uname -s)" == "Darwin" ]; then RANDOMX_JIT="disable randomx_jit" else RANDOMX_JIT= fi export ERL_EPMD_ADDRESS=127.0.0.1 ./ar-rebar3 localnet compile ERL_LOCALNET_OPTS="-pa $(./rebar3 as localnet path) $(./rebar3 as localnet path --base)/lib/arweave/test -config config/sys.config" erl $ERL_LOCALNET_OPTS -name "$NODE_NAME_FULL" -setcookie "$NODE_COOKIE" -noshell -s ar shell_localnet -eval "timer:sleep(infinity)." & LOCALNET_PID="$!" STARTED_LOCALNET=1 } fetch_info() { curl -fsS --max-time 2 \ -H "x-network: ${LOCALNET_NETWORK_NAME}" \ "http://${LOCALNET_HTTP_HOST}:${LOCALNET_HTTP_PORT}/info" 2>/dev/null | tr -d '\n' || true } parse_info_network() { local info="$1" echo "$info" | sed -E -n 's/.*"network"[[:space:]]*:[[:space:]]*"([^"]*)".*/\1/p' } parse_info_height() { local info="$1" echo "$info" | sed -E -n 's/.*"height"[[:space:]]*:[[:space:]]*(-?[0-9]+).*/\1/p' } wait_for_info_height() { local start local info local network local height start="$(date +%s)" while true; do info="$(fetch_info)" if [ -n "$info" ]; then network="$(parse_info_network "$info")" if [ -z "$network" ]; then echo "Failed to parse network from /info: $info" return 1 fi if [ "$network" != "$LOCALNET_NETWORK_NAME" ]; then echo "Found node at ${LOCALNET_HTTP_HOST}:${LOCALNET_HTTP_PORT} with network ${network}, expected ${LOCALNET_NETWORK_NAME}." return 1 fi height="$(parse_info_height "$info")" if [ -z "$height" ]; then echo "Failed to parse height from /info: $info" return 1 fi if [ "$height" != "-1" ]; then return 0 fi fi if [ "$(( $(date +%s) - start ))" -ge "$JOIN_TIMEOUT_SEC" ]; then echo "Timed out waiting for localnet /info height." return 1 fi sleep "$JOIN_POLL_SEC" done } cleanup() { if [ "$STARTED_LOCALNET" = "1" ] && [ -n "$LOCALNET_PID" ]; then kill "$LOCALNET_PID" >/dev/null 2>&1 || true fi } run_notebook() { local jupyter_cmd jupyter_cmd=() export PATH="$REPO_ROOT/.venv/bin:$REPO_ROOT/scripts:$PATH" if command -v jupyter >/dev/null 2>&1; then jupyter_cmd=("jupyter") elif command -v uv >/dev/null 2>&1 && [ -d "$REPO_ROOT/.venv" ]; then jupyter_cmd=("uv" "run" "jupyter") else jupyter_cmd=("jupyter") fi if [ "$JUPYTER_OPEN_BROWSER" = "true" ]; then JUPYTER_DATA_DIR="$JUPYTER_DATA_DIR" JUPYTER_CONFIG_DIR="$JUPYTER_CONFIG_DIR" "${jupyter_cmd[@]}" notebook \ --NotebookApp.use_redirect_file=False \ --NotebookApp.default_url="/notebooks/${NOTEBOOK_URL_PATH}" \ --ServerApp.default_url="/notebooks/${NOTEBOOK_URL_PATH}" \ --NotebookApp.notebook_dir="$NOTEBOOK_DIR" \ --ServerApp.root_dir="$NOTEBOOK_DIR" \ --port "$JUPYTER_PORT" else JUPYTER_DATA_DIR="$JUPYTER_DATA_DIR" JUPYTER_CONFIG_DIR="$JUPYTER_CONFIG_DIR" "${jupyter_cmd[@]}" notebook \ --NotebookApp.default_url="/notebooks/${NOTEBOOK_URL_PATH}" \ --ServerApp.default_url="/notebooks/${NOTEBOOK_URL_PATH}" \ --NotebookApp.notebook_dir="$NOTEBOOK_DIR" \ --ServerApp.root_dir="$NOTEBOOK_DIR" \ --no-browser \ --port "$JUPYTER_PORT" fi } cd "$REPO_ROOT" trap cleanup EXIT resolve_notebook if [ -z "$(fetch_info)" ]; then start_localnet fi wait_for_info_height run_notebook ================================================ FILE: scripts/run_notebook_headless.sh ================================================ #!/usr/bin/env bash set -euo pipefail SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" NOTEBOOK_NAME="pricing_transition_localnet" NOTEBOOK_PATH="" NOTEBOOK_ARG="" while [ "$#" -gt 0 ]; do NOTEBOOK_ARG="$1" shift 1 done NODE_NAME_FULL="main-localnet@127.0.0.1" NODE_COOKIE="localnet" JOIN_TIMEOUT_SEC="${JOIN_TIMEOUT_SEC:-300}" JOIN_POLL_SEC="${JOIN_POLL_SEC:-1}" EXEC_TIMEOUT_SEC="${EXEC_TIMEOUT_SEC:-1200}" KERNEL_NAME="${ERLANG_JUPYTER_KERNEL:-erlang}" JUPYTER_DATA_DIR="${JUPYTER_DATA_DIR:-$REPO_ROOT/.tmp/jupyter}" JUPYTER_CONFIG_DIR="${JUPYTER_CONFIG_DIR:-$REPO_ROOT/.jupyter}" LOCALNET_HTTP_HOST="${LOCALNET_HTTP_HOST:-127.0.0.1}" LOCALNET_HTTP_PORT="${LOCALNET_HTTP_PORT:-1984}" LOCALNET_NETWORK_NAME="${LOCALNET_NETWORK_NAME:-arweave.localnet}" STARTED_LOCALNET=0 LOCALNET_PID="" resolve_notebook() { if [ -z "$NOTEBOOK_PATH" ]; then case "${NOTEBOOK_ARG:-$NOTEBOOK_NAME}" in *.ipynb|*/*) NOTEBOOK_PATH="${NOTEBOOK_ARG:-$NOTEBOOK_NAME}" ;; *) NOTEBOOK_PATH="notebooks/${NOTEBOOK_ARG:-$NOTEBOOK_NAME}.ipynb" ;; esac fi if [ ! -f "$NOTEBOOK_PATH" ]; then echo "Notebook not found: $NOTEBOOK_PATH" exit 1 fi } start_localnet() { if [ "$(uname -s)" == "Darwin" ]; then RANDOMX_JIT="disable randomx_jit" else RANDOMX_JIT= fi export ERL_EPMD_ADDRESS=127.0.0.1 ./ar-rebar3 localnet compile ERL_LOCALNET_OPTS="-pa $(./rebar3 as localnet path) $(./rebar3 as localnet path --base)/lib/arweave/test -config config/sys.config" erl $ERL_LOCALNET_OPTS -name "$NODE_NAME_FULL" -setcookie "$NODE_COOKIE" -noshell -s ar shell_localnet -eval "timer:sleep(infinity)." & LOCALNET_PID="$!" STARTED_LOCALNET=1 } fetch_info() { curl -fsS --max-time 2 \ -H "x-network: ${LOCALNET_NETWORK_NAME}" \ "http://${LOCALNET_HTTP_HOST}:${LOCALNET_HTTP_PORT}/info" 2>/dev/null | tr -d '\n' || true } parse_info_network() { local info="$1" echo "$info" | sed -E -n 's/.*"network"[[:space:]]*:[[:space:]]*"([^"]*)".*/\1/p' } parse_info_height() { local info="$1" echo "$info" | sed -E -n 's/.*"height"[[:space:]]*:[[:space:]]*(-?[0-9]+).*/\1/p' } wait_for_info_height() { local start local info local network local height start="$(date +%s)" while true; do info="$(fetch_info)" if [ -n "$info" ]; then network="$(parse_info_network "$info")" if [ -z "$network" ]; then echo "Failed to parse network from /info: $info" return 1 fi if [ "$network" != "$LOCALNET_NETWORK_NAME" ]; then echo "Found node at ${LOCALNET_HTTP_HOST}:${LOCALNET_HTTP_PORT} with network ${network}, expected ${LOCALNET_NETWORK_NAME}." return 1 fi height="$(parse_info_height "$info")" if [ -z "$height" ]; then echo "Failed to parse height from /info: $info" return 1 fi if [ "$height" != "-1" ]; then return 0 fi fi if [ "$(( $(date +%s) - start ))" -ge "$JOIN_TIMEOUT_SEC" ]; then echo "Timed out waiting for localnet /info height." return 1 fi sleep "$JOIN_POLL_SEC" done } cleanup() { if [ "$STARTED_LOCALNET" = "1" ] && [ -n "$LOCALNET_PID" ]; then kill "$LOCALNET_PID" >/dev/null 2>&1 || true fi } run_notebook() { local jupyter_cmd local tmp_dir local tmp_output local tmp_root jupyter_cmd=() export PATH="$REPO_ROOT/.venv/bin:$REPO_ROOT/scripts:$PATH" if command -v jupyter >/dev/null 2>&1; then jupyter_cmd=("jupyter") elif command -v uv >/dev/null 2>&1 && [ -d "$REPO_ROOT/.venv" ]; then jupyter_cmd=("uv" "run" "jupyter") else jupyter_cmd=("jupyter") fi tmp_root="${NOTEBOOK_TMP_DIR:-$REPO_ROOT/.tmp/nbconvert}" mkdir -p "$tmp_root" tmp_dir="$(mktemp -d "$tmp_root/notebook.XXXXXX")" tmp_output="$(basename "$NOTEBOOK_PATH")" JUPYTER_DATA_DIR="$JUPYTER_DATA_DIR" JUPYTER_CONFIG_DIR="$JUPYTER_CONFIG_DIR" "${jupyter_cmd[@]}" nbconvert \ --to notebook \ --execute \ --output "$tmp_output" \ --output-dir "$tmp_dir" \ --ExecutePreprocessor.timeout="$EXEC_TIMEOUT_SEC" \ --ExecutePreprocessor.kernel_name="$KERNEL_NAME" \ "$NOTEBOOK_PATH" if [ "${NOTEBOOK_SAVE_OUTPUTS:-}" = "1" ]; then mv "$tmp_dir/$tmp_output" "$NOTEBOOK_PATH" fi rm -rf "$tmp_dir" } cd "$REPO_ROOT" trap cleanup EXIT resolve_notebook if [ -z "$(fetch_info)" ]; then start_localnet fi wait_for_info_height run_notebook ================================================ FILE: scripts/setup_notebook_env.sh ================================================ #!/usr/bin/env bash set -euo pipefail SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" ERLANG_KERNEL_NAME="${ERLANG_JUPYTER_KERNEL:-erlang}" IERL_URL="${IERL_URL:-https://github.com/filmor/ierl/releases/latest/download/ierl}" IERL_PATH="${IERL_PATH:-$REPO_ROOT/.tmp/ierl}" JUPYTER_DATA_DIR="${JUPYTER_DATA_DIR:-$REPO_ROOT/.tmp/jupyter}" cd "$REPO_ROOT" mkdir -p "$REPO_ROOT/.tmp" mkdir -p "$JUPYTER_DATA_DIR" if ! command -v python3 >/dev/null 2>&1; then echo "python3 is not installed or not on PATH." exit 1 fi if [ -d "$REPO_ROOT/.venv" ]; then echo "Using existing virtual environment at: $REPO_ROOT/.venv" else python3 -m venv "$REPO_ROOT/.venv" fi "$REPO_ROOT/.venv/bin/python" -m pip install --upgrade pip "$REPO_ROOT/.venv/bin/python" -m pip install jupyter pandas if ! command -v curl >/dev/null 2>&1; then echo "curl is not installed or not on PATH." exit 1 fi if [ ! -x "$IERL_PATH" ]; then curl -L "$IERL_URL" -o "$IERL_PATH" chmod +x "$IERL_PATH" fi if [ -d "$REPO_ROOT/.venv/bin" ]; then install -m 0755 "$IERL_PATH" "$REPO_ROOT/.venv/bin/ierl" fi cat > "$REPO_ROOT/scripts/ierl_kernel.sh" <<'EOF' #!/usr/bin/env sh set -eu SCRIPT_DIR="$(dirname "$0")" REPO_ROOT="$SCRIPT_DIR/.." IERL_BIN="$REPO_ROOT/.venv/bin/ierl" if [ -x "$IERL_BIN" ]; then exec "$IERL_BIN" "$@" fi exec ierl "$@" EOF chmod +x "$REPO_ROOT/scripts/ierl_kernel.sh" install_kernel() { local kernel_dir="$JUPYTER_DATA_DIR/kernels/$ERLANG_KERNEL_NAME" local kernel_json="$kernel_dir/kernel.json" local kernel_wrapper="$kernel_dir/ierl_kernel.sh" mkdir -p "$kernel_dir" cat > "$kernel_wrapper" <<'EOF' #!/usr/bin/env sh set -eu SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" REPO_ROOT="$(cd "$SCRIPT_DIR/../../../.." && pwd)" IERL_BIN="$REPO_ROOT/.venv/bin/ierl" if [ -x "$IERL_BIN" ]; then if [ "${NOTEBOOK_SKIP_COMPILE:-0}" = "0" ]; then if [ ! -f "$REPO_ROOT/_build/localnet/lib/arweave/ebin/ar_node.beam" ]; then (cd "$REPO_ROOT" && ./ar-rebar3 localnet compile) fi fi if [ -d "$REPO_ROOT/_build/localnet/lib" ]; then export ERL_LIBS="$REPO_ROOT/_build/localnet/lib" fi exec "$IERL_BIN" "$@" fi exec ierl "$@" EOF chmod +x "$kernel_wrapper" cat > "$kernel_json" <<'EOF' { "argv": [ "{resource_dir}/ierl_kernel.sh", "kernel", "erlang", "-f", "{connection_file}" ], "display_name": "Erlang", "language": "erlang" } EOF } install_kernel if ! PATH="$REPO_ROOT/.venv/bin:$PATH" JUPYTER_DATA_DIR="$JUPYTER_DATA_DIR" "$REPO_ROOT/.venv/bin/jupyter" kernelspec list 2>/dev/null | grep -q "[[:space:]]${ERLANG_KERNEL_NAME}[[:space:]]"; then echo "Kernel not found after install: ${ERLANG_KERNEL_NAME}" echo "Check kernelspec list: $REPO_ROOT/.venv/bin/jupyter kernelspec list" exit 1 fi echo "Notebook environment ready." ================================================ FILE: scripts/surefire_to_html.py ================================================ #!/usr/bin/env python3 ###################################################################### # A simple script to convert a surefire report from XML to HTML. # see: https://www.erlang.org/doc/apps/eunit/eunit_surefire.html # see: https://maven.apache.org/surefire/maven-surefire-report-plugin/ ###################################################################### import xml import sys import xml.etree.ElementTree as ET def usage(): print("Usage: %s [PATH|-]" % sys.argv[0]) def main(): if len(sys.argv) <= 1: usage() return 1 if sys.argv[1] == "-": return from_stdin() if sys.argv[1]: return from_file(sys.argv[1]) return 1 def from_stdin(): data = sys.stdin.readlines() element = ET.fromstringlist(data) return convert(element) def from_file(file): tree = ET.parse(file) element = tree.getroot() return convert(element) def convert(element): attrib = element.attrib print(f"""
name{attrib["name"]}
tests{attrib["tests"]}
failures{attrib["failures"]}
errors{attrib["errors"]}
skipped{attrib["skipped"]}
time{attrib["time"]}
""") return 0 if __name__ == '__main__': sys.exit(main()) ================================================ FILE: scripts/system_info.sh ================================================ #!/usr/bin/env sh ###################################################################### # Script used to export information about local software installed, # useful in case of debugging. ###################################################################### CHECK_SOFTWARE="cc cmake cpp clang curl erl g++ gcc git make rsync wget pkg-config" CHECK_LIBS="libssl gmp sqlite3 ncurses" # function helper to check software version _software_version() { local name="${1}" local flag="--version" test "${name}" = "erl" && flag="-version" if which "${name}" 2>&1 >/dev/null then local path=$(which "${name}") local version=$(${name} ${flag} 2>&1 | head -n1) echo "${name}:" echo " path: ${path}" echo " version: ${version}" else echo "${name}: not found" fi } # wrapper around erl command to easily evaluate erlang # code from the shell _erl() { local eval="${1}" local erl="erl -mode embedded -noshell -noinput -eval '${eval}.' -eval 'init:stop().' " eval ${erl} } # print erlang/beam information _erlang_version() { if which erl 2>&1 >/dev/null then echo "erlang/beam:" _erl ' io:format("\ \ root_dir: ~s~n", [code:root_dir()]), io:format("\ \ lib_dir: ~s~n", [code:lib_dir()]), io:format("\ \ modules:~n"), [ io:format("\ \ \ \ ~s: ~s~n",[X,Y]) || {X,Y,_} <- code:all_available() ] ' fi } # function helper to check library version _lib_version() { local name="${1}" if pkg-config --exists "${name}" then echo "${name}:" echo " version: $(pkg-config --modversion ${name})" echo " flags: $(pkg-config --libs --cflags --define-prefix ${name})" else echo "${name}: not found" fi } ###################################################################### # main script ###################################################################### # check software for s in ${CHECK_SOFTWARE} do _software_version ${s} done # check libraries if $(which pkg-config 2>&1 >/dev/null) then for l in ${CHECK_LIBS} do _lib_version ${l} done fi # check specific erlang vm and modules _erlang_version ================================================ FILE: scripts/testnet/benchmark ================================================ #!/usr/bin/env bash set -e SCRIPT_DIR="$(dirname "$0")" $SCRIPT_DIR/check-nofile # Sets $ARWEAVE and $COMMAND source $SCRIPT_DIR/arweave.env echo "Moving the benchmark folder to benchmark.old..." rm -rf benchmark.old if [ -d benchmark ]; then mv -i benchmark benchmark.old; fi $ARWEAVE foreground -run ar main $RANDOMX_JIT init mine data_dir benchmark ================================================ FILE: testnet/assert_testnet.sh ================================================ #!/bin/bash ARWEAVE_DIR="$(cd "$(dirname "$0")/.." && pwd)" ALL_NODES+=( testnet-1 testnet-2 testnet-3 testnet-4 testnet-5 testnet-6 ) # Get the current hostname current_host=$(hostname -f) # Check if current hostname is in the list of testnet servers is_testnet_server=0 for server in "${ALL_NODES[@]}"; do if [[ "$current_host" == "$server" ]]; then is_testnet_server=1 break fi done # If not a testnet server, abort if [[ "$is_testnet_server" -eq 0 ]]; then exit 1 fi mkdir -p /arweave-build/mainnet mkdir -p /arweave-build/testnet ================================================ FILE: testnet/backup_data.sh ================================================ #!/bin/bash ARWEAVE_DIR="$(cd "$(dirname "$0")/.." && pwd)" if ! $ARWEAVE_DIR/testnet/assert_testnet.sh; then echo "Error: This script must be run on a testnet server." exit 1 fi if [ $# -ne 1 ]; then echo "backup_data.sh " exit 1 fi NAME=$1 BACKUP_DIR="/arweave-backups/${NAME}/" if [ -d "$BACKUP_DIR" ]; then echo "Error: Backup directory $BACKUP_DIR already exists." exit 1 fi set -x mkdir -p $BACKUP_DIR cp -rf /arweave-data/data_sync_state $BACKUP_DIR cp -rf /arweave-data/header_sync_state $BACKUP_DIR cp -rf /arweave-data/ar_tx_blacklist $BACKUP_DIR cp -rf /arweave-data/disk_cache $BACKUP_DIR cp -rf /arweave-data/rocksdb $BACKUP_DIR cp -rf /arweave-data/txs $BACKUP_DIR cp -rf /arweave-data/wallet_lists $BACKUP_DIR cp -rf /arweave-data/wallets $BACKUP_DIR { set +x; } 2>/dev/null echo ================================================ FILE: testnet/clear_data.sh ================================================ #!/bin/bash ARWEAVE_DIR="$(cd "$(dirname "$0")/.." && pwd)" if ! $ARWEAVE_DIR/testnet/assert_testnet.sh; then echo "Error: This script must be run on a testnet server." exit 1 fi read -p "Do you really want to delete all files and directories in /arweave-data \ except for storage_modules and wallets? [y/N] " response if [[ "$response" =~ ^([yY][eE][sS]|[yY])$ ]]; then for item in /arweave-data/*; do filename=$(basename "$item") if [[ "$filename" != "wallets" && ! "$filename" =~ ^storage_module ]]; then echo rm -rf "$item" rm -rf "$item" fi done echo "Cleanup complete!" else echo "Operation cancelled." fi ================================================ FILE: testnet/config/testnet-1.json ================================================ { "storage_modules": [ "7,1099511627776,PyDuArRDMyRzK1IR5aoM6woO6YhVTUavldx-ZlTluDk", "7,1099511627776,HnjnoDf25mJroiFgY3FJLYw3EsUtcF9LDcJYMI3gKZs", "112,1099511627776,HnjnoDf25mJroiFgY3FJLYw3EsUtcF9LDcJYMI3gKZs" ], "mining_addr": "HnjnoDf25mJroiFgY3FJLYw3EsUtcF9LDcJYMI3gKZs", "peers": [ "testnet-2.arweave.xyz", "testnet-3.arweave.xyz", "testnet-4.arweave.xyz", "testnet-5.arweave.xyz", "testnet-6.arweave.xyz" ], "coordinated_mining": true, "cm_api_secret": "testnet_cm_secret", "cm_exit_peer": "testnet-2.arweave.xyz", "cm_peers": [ "testnet-2.arweave.xyz", "testnet-3.arweave.xyz", "testnet-6.arweave.xyz" ], "debug": true, "mine": true, "enable": [ "remove_orphaned_storage_module_data", "randomx_large_pages", "pack_served_chunks" ], "data_dir": "/arweave-data", "requests_per_minute_limit": 9000, "mining_cache_size_mb": 3200 } ================================================ FILE: testnet/config/testnet-2.json ================================================ { "storage_modules": [ "40,1099511627776,HnjnoDf25mJroiFgY3FJLYw3EsUtcF9LDcJYMI3gKZs", "8,1099511627776,HnjnoDf25mJroiFgY3FJLYw3EsUtcF9LDcJYMI3gKZs" ], "mining_addr": "HnjnoDf25mJroiFgY3FJLYw3EsUtcF9LDcJYMI3gKZs", "peers": [ "testnet-1.arweave.xyz", "testnet-3.arweave.xyz", "testnet-4.arweave.xyz", "testnet-5.arweave.xyz", "testnet-6.arweave.xyz" ], "vdf_server_trusted_peers": [ "testnet-4.arweave.xyz" ], "coordinated_mining": true, "cm_api_secret": "testnet_cm_secret", "cm_peers": [ "testnet-1.arweave.xyz", "testnet-3.arweave.xyz", "testnet-6.arweave.xyz" ], "debug": true, "mine": true, "enable": [ "remove_orphaned_storage_module_data", "randomx_large_pages", "pack_served_chunks" ], "data_dir": "/arweave-data", "requests_per_minute_limit": 9000 } ================================================ FILE: testnet/config/testnet-3.json ================================================ { "storage_modules": [ "51,1099511627776,peHwdWtsEr27dC7SeTT1hoympOePTO7vlJt2zhQMAtg", "137969,1073741824,peHwdWtsEr27dC7SeTT1hoympOePTO7vlJt2zhQMAtg", "137970,1073741824,peHwdWtsEr27dC7SeTT1hoympOePTO7vlJt2zhQMAtg" ], "mining_addr": "peHwdWtsEr27dC7SeTT1hoympOePTO7vlJt2zhQMAtg", "peers": [ "testnet-1.arweave.xyz", "testnet-2.arweave.xyz", "testnet-4.arweave.xyz", "testnet-5.arweave.xyz", "testnet-6.arweave.xyz" ], "vdf_server_trusted_peers": [ "testnet-4.arweave.xyz" ], "debug": true, "mine": true, "enable": [ "remove_orphaned_storage_module_data", "randomx_large_pages", "pack_served_chunks" ], "data_dir": "/arweave-data", "requests_per_minute_limit": 9000, "mining_cache_size_mb": 2400 } ================================================ FILE: testnet/config/testnet-4.json ================================================ { "storage_modules": [ "70,1099511627776,hDdptPiuAlrP5RxEHwZoffm7obIyvvBi40T5PPvp57w", "71,1099511627776,hDdptPiuAlrP5RxEHwZoffm7obIyvvBi40T5PPvp57w", "245,500000000000,hDdptPiuAlrP5RxEHwZoffm7obIyvvBi40T5PPvp57w", "1226,100000000000,hDdptPiuAlrP5RxEHwZoffm7obIyvvBi40T5PPvp57w" ], "mining_addr": "hDdptPiuAlrP5RxEHwZoffm7obIyvvBi40T5PPvp57w", "peers": ["testnet-1.arweave.xyz", "testnet-2.arweave.xyz"], "vdf_client_peers": [ "testnet-3.arweave.xyz" ], "header_sync_jobs": 0, "debug": true, "mine": true, "enable": [ "remove_orphaned_storage_module_data", "public_vdf_server", "randomx_large_pages", "pack_served_chunks" ], "data_dir": "/arweave-data", "requests_per_minute_limit": 9000 } ================================================ FILE: testnet/config/testnet-5.json ================================================ { "storage_modules": [ "53,1099511627776,v0AnxIi2DhwdyKUEHR_GrHjIGJtv1ImSB2z2ZWyQzSc,repack_in_place,c5if16ZXh5ooCsCVLumeqgl7Z73lGI8xf8PQOvnb_CE.replica.2.9", "19,3298534883328,v0AnxIi2DhwdyKUEHR_GrHjIGJtv1ImSB2z2ZWyQzSc,repack_in_place,c5if16ZXh5ooCsCVLumeqgl7Z73lGI8xf8PQOvnb_CE.replica.2.9", "20,3298534883328,v0AnxIi2DhwdyKUEHR_GrHjIGJtv1ImSB2z2ZWyQzSc,repack_in_place,c5if16ZXh5ooCsCVLumeqgl7Z73lGI8xf8PQOvnb_CE.replica.2.9", "7,1099511627776,v0AnxIi2DhwdyKUEHR_GrHjIGJtv1ImSB2z2ZWyQzSc,repack_in_place,c5if16ZXh5ooCsCVLumeqgl7Z73lGI8xf8PQOvnb_CE.replica.2.9", "40,1099511627776,c5if16ZXh5ooCsCVLumeqgl7Z73lGI8xf8PQOvnb_CE.replica.2.9", "70,1099511627776,v0AnxIi2DhwdyKUEHR_GrHjIGJtv1ImSB2z2ZWyQzSc,repack_in_place,c5if16ZXh5ooCsCVLumeqgl7Z73lGI8xf8PQOvnb_CE.replica.2.9", "71,1099511627776,v0AnxIi2DhwdyKUEHR_GrHjIGJtv1ImSB2z2ZWyQzSc,repack_in_place,c5if16ZXh5ooCsCVLumeqgl7Z73lGI8xf8PQOvnb_CE.replica.2.9" ], "mining_addr": "c5if16ZXh5ooCsCVLumeqgl7Z73lGI8xf8PQOvnb_CE", "peers": [ "testnet-1.arweave.xyz", "testnet-2.arweave.xyz", "testnet-3.arweave.xyz", "testnet-4.arweave.xyz", "testnet-6.arweave.xyz" ], "debug": true, "mine": true, "enable": [ "remove_orphaned_storage_module_data", "randomx_large_pages", "pack_served_chunks" ], "data_dir": "/arweave-data", "requests_per_minute_limit": 9000 } ================================================ FILE: testnet/config/testnet-6.json ================================================ { "storage_modules": [ "19,3298534883328,HnjnoDf25mJroiFgY3FJLYw3EsUtcF9LDcJYMI3gKZs", "20,3298534883328,HnjnoDf25mJroiFgY3FJLYw3EsUtcF9LDcJYMI3gKZs", "40,1099511627776,HnjnoDf25mJroiFgY3FJLYw3EsUtcF9LDcJYMI3gKZs", "53,1099511627776,HnjnoDf25mJroiFgY3FJLYw3EsUtcF9LDcJYMI3gKZs", "70,1099511627776,HnjnoDf25mJroiFgY3FJLYw3EsUtcF9LDcJYMI3gKZs", "71,1099511627776,HnjnoDf25mJroiFgY3FJLYw3EsUtcF9LDcJYMI3gKZs", "7,1099511627776,HnjnoDf25mJroiFgY3FJLYw3EsUtcF9LDcJYMI3gKZs" ], "mining_addr": "HnjnoDf25mJroiFgY3FJLYw3EsUtcF9LDcJYMI3gKZs", "peers": [ "testnet-1.arweave.xyz", "testnet-2.arweave.xyz", "testnet-3.arweave.xyz", "testnet-4.arweave.xyz", "testnet-5.arweave.xyz" ], "vdf_server_trusted_peers": [ "testnet-4.arweave.xyz" ], "cm_api_secret": "testnet_cm_secret", "cm_exit_peer": "testnet-2.arweave.xyz", "cm_peers": [ "testnet-1.arweave.xyz", "testnet-2.arweave.xyz", "testnet-3.arweave.xyz" ], "debug": true, "mine": true, "enable": [ "remove_orphaned_storage_module_data", "randomx_large_pages", "pack_served_chunks" ], "data_dir": "/arweave-data", "requests_per_minute_limit": 9000 } ================================================ FILE: testnet/rebuild_mainnet.sh ================================================ #!/bin/bash ARWEAVE_DIR="$(cd "$(dirname "$0")/.." && pwd)" if ! $ARWEAVE_DIR/testnet/assert_testnet.sh; then echo "Error: This script must be run on a testnet server." exit 1 fi mkdir -p /arweave-build/mainnet rm -rf /arweave-build/mainnet/* echo "$0 $@" > /arweave-build/mainnet/build.command cd $ARWEAVE_DIR rm -rf $ARWEAVE_DIR/_build/prod/rel/arweave/* $ARWEAVE_DIR/rebar3 as prod tar tar xf $ARWEAVE_DIR/_build/prod/rel/arweave/arweave-*.tar.gz -C /arweave-build/mainnet ================================================ FILE: testnet/rebuild_testnet.sh ================================================ #!/bin/bash ARWEAVE_DIR="$(cd "$(dirname "$0")/.." && pwd)" if ! $ARWEAVE_DIR/testnet/assert_testnet.sh; then echo "Error: This script must be run on a testnet server." exit 1 fi mkdir -p /arweave-build/testnet rm -rf /arweave-build/testnet/* echo "$0 $@" > /arweave-build/testnet/build.command cd $ARWEAVE_DIR rm -rf $ARWEAVE_DIR/_build/testnet/rel/arweave/* $ARWEAVE_DIR/rebar3 as testnet tar tar xf $ARWEAVE_DIR/_build/testnet/rel/arweave/arweave-*.tar.gz -C /arweave-build/testnet ================================================ FILE: testnet/restore_data.sh ================================================ #!/bin/bash ARWEAVE_DIR="$(cd "$(dirname "$0")/.." && pwd)" if ! $ARWEAVE_DIR/testnet/assert_testnet.sh; then echo "Error: This script must be run on a testnet server." exit 1 fi if [ $# -ne 1 ]; then echo "restore_data.sh " exit 1 fi NAME=$1 BACKUP_DIR="/arweave-backups/${NAME}/" if [ ! -d "$BACKUP_DIR" ]; then echo "Error: Backup directory $BACKUP_DIR does not exist." exit 1 fi DIRECTORIES=( "data_sync_state" "header_sync_state" "ar_tx_blacklist" "disk_cache" "rocksdb" "txs" "wallet_lists" "wallets" ) # Warn about the deletion echo "The following files/directories will be DELETED:" for DIR in "${DIRECTORIES[@]}"; do echo "/arweave-data/$DIR" done # Prompt for confirmation echo "Are you sure you want to continue? (yes/no)" read -r RESPONSE if [[ "$RESPONSE" == "yes" ]]; then # Proceed with deletion for DIR in "${DIRECTORIES[@]}"; do FULL_PATH="/arweave-data/$DIR" if [ -e "$FULL_PATH" ]; then set -x rm -rf "$FULL_PATH" { set +x; } 2>/dev/null fi done else # Abort the operation echo "Operation aborted." exit 0 fi for DIR in "${DIRECTORIES[@]}"; do set -x cp -rf $BACKUP_DIR/$DIR /arweave-data/$DIR { set +x; } 2>/dev/null done echo ================================================ FILE: testnet/start_mainnet.sh ================================================ #!/bin/bash ARWEAVE_DIR="$(cd "$(dirname "$0")/.." && pwd)" if ! $ARWEAVE_DIR/testnet/assert_testnet.sh; then echo "Error: This script must be run on a testnet server." exit 1 fi if [[ ! -f "/arweave-build/mainnet/bin/start" ]]; then echo "Arweave start script not found. Please run rebuild_mainnet.sh first." exit 1 fi config_file="$ARWEAVE_DIR/testnet/config/$(hostname -f).json" SCREEN_CMD="screen -dmsL arweave /arweave-build/mainnet/bin/start config_file $config_file vdf_server_trusted_peer vdf-server-3.arweave.xyz" echo "$SCREEN_CMD" echo "$SCREEN_CMD" > /arweave-build/mainnet/run.sh chmod +x /arweave-build/mainnet/run.sh cd /arweave-build/mainnet ./run.sh ================================================ FILE: testnet/start_testnet.sh ================================================ #!/bin/bash # Function to display help display_help() { echo "Usage: $0 []" echo " : start_from_block or start_from_latest_state is required when " echo " launching the pilot node with the start_from_block flag." } ARWEAVE_DIR="$(cd "$(dirname "$0")/.." && pwd)" if ! $ARWEAVE_DIR/testnet/assert_testnet.sh; then echo "Error: This script must be run on a testnet server." exit 1 fi if [[ ! -f "/arweave-build/testnet/bin/start" ]]; then echo "Arweave start script not found. Please run rebuild_testnet.sh first." exit 1 fi node=$(hostname -f) config_file="$ARWEAVE_DIR/testnet/config/$(hostname -f).json" blacklist="transaction_blacklist_url \"${BLACKLIST_URL}\"" SCREEN_CMD="screen -dmsL arweave /arweave-build/testnet/bin/start $blacklist config_file $config_file $*" echo "$SCREEN_CMD" echo "$SCREEN_CMD" > /arweave-build/testnet/run.sh chmod +x /arweave-build/testnet/run.sh cd /arweave-build/testnet ./run.sh