[
  {
    "path": ".clang-format",
    "content": "# SPDX-License-Identifier: GPL-2.0\n#\n# clang-format configuration file. Intended for clang-format >= 11.\n#\n# For more information, see:\n#\n#   Documentation/process/clang-format.rst\n#   https://clang.llvm.org/docs/ClangFormat.html\n#   https://clang.llvm.org/docs/ClangFormatStyleOptions.html\n#\n\n# clang-format configuration for Linux kernel, except that ColumnLimit is 90\n---\nAccessModifierOffset: -4\nAlignAfterOpenBracket: Align\nAlignConsecutiveAssignments: false\nAlignConsecutiveDeclarations: false\nAlignEscapedNewlines: Left\nAlignOperands: true\nAlignTrailingComments: false\nAllowAllParametersOfDeclarationOnNextLine: false\nAllowShortBlocksOnASingleLine: false\nAllowShortCaseLabelsOnASingleLine: false\nAllowShortFunctionsOnASingleLine: None\nAllowShortIfStatementsOnASingleLine: false\nAllowShortLoopsOnASingleLine: false\nAlwaysBreakAfterDefinitionReturnType: None\nAlwaysBreakAfterReturnType: None\nAlwaysBreakBeforeMultilineStrings: false\nAlwaysBreakTemplateDeclarations: false\nBinPackArguments: true\nBinPackParameters: true\nBraceWrapping:\n  AfterClass: false\n  AfterControlStatement: false\n  AfterEnum: false\n  AfterFunction: true\n  AfterNamespace: true\n  AfterObjCDeclaration: false\n  AfterStruct: false\n  AfterUnion: false\n  AfterExternBlock: false\n  BeforeCatch: false\n  BeforeElse: false\n  IndentBraces: false\n  SplitEmptyFunction: true\n  SplitEmptyRecord: true\n  SplitEmptyNamespace: true\nBreakBeforeBinaryOperators: None\nBreakBeforeBraces: Custom\nBreakBeforeInheritanceComma: false\nBreakBeforeTernaryOperators: false\nBreakConstructorInitializersBeforeComma: false\nBreakConstructorInitializers: BeforeComma\nBreakAfterJavaFieldAnnotations: false\nBreakStringLiterals: false\nColumnLimit: 90\nCommentPragmas: '^ IWYU pragma:'\nCompactNamespaces: false\nConstructorInitializerAllOnOneLineOrOnePerLine: false\nConstructorInitializerIndentWidth: 8\nContinuationIndentWidth: 8\nCpp11BracedListStyle: false\nDerivePointerAlignment: false\nDisableFormat: false\nExperimentalAutoDetectBinPacking: false\nFixNamespaceComments: false\n\n# Taken from:\n#   git grep -h '^#define [^[:space:]]*for_each[^[:space:]]*(' include/ tools/ \\\n#   | sed \"s,^#define \\([^[:space:]]*for_each[^[:space:]]*\\)(.*$,  - '\\1',\" \\\n#   | LC_ALL=C sort -u\nForEachMacros:\n  - '__ata_qc_for_each'\n  - '__bio_for_each_bvec'\n  - '__bio_for_each_segment'\n  - '__evlist__for_each_entry'\n  - '__evlist__for_each_entry_continue'\n  - '__evlist__for_each_entry_from'\n  - '__evlist__for_each_entry_reverse'\n  - '__evlist__for_each_entry_safe'\n  - '__for_each_mem_range'\n  - '__for_each_mem_range_rev'\n  - '__for_each_thread'\n  - '__hlist_for_each_rcu'\n  - '__map__for_each_symbol_by_name'\n  - '__pci_bus_for_each_res0'\n  - '__pci_bus_for_each_res1'\n  - '__pci_dev_for_each_res0'\n  - '__pci_dev_for_each_res1'\n  - '__perf_evlist__for_each_entry'\n  - '__perf_evlist__for_each_entry_reverse'\n  - '__perf_evlist__for_each_entry_safe'\n  - '__rq_for_each_bio'\n  - '__shost_for_each_device'\n  - '__sym_for_each'\n  - 'apei_estatus_for_each_section'\n  - 'ata_for_each_dev'\n  - 'ata_for_each_link'\n  - 'ata_qc_for_each'\n  - 'ata_qc_for_each_raw'\n  - 'ata_qc_for_each_with_internal'\n  - 'ax25_for_each'\n  - 'ax25_uid_for_each'\n  - 'bio_for_each_bvec'\n  - 'bio_for_each_bvec_all'\n  - 'bio_for_each_folio_all'\n  - 'bio_for_each_integrity_vec'\n  - 'bio_for_each_segment'\n  - 'bio_for_each_segment_all'\n  - 'bio_list_for_each'\n  - 'bip_for_each_vec'\n  - 'bond_for_each_slave'\n  - 'bond_for_each_slave_rcu'\n  - 'bpf_for_each'\n  - 'bpf_for_each_reg_in_vstate'\n  - 'bpf_for_each_reg_in_vstate_mask'\n  - 'bpf_for_each_spilled_reg'\n  - 'bpf_object__for_each_map'\n  - 'bpf_object__for_each_program'\n  - 'btree_for_each_safe128'\n  - 'btree_for_each_safe32'\n  - 'btree_for_each_safe64'\n  - 'btree_for_each_safel'\n  - 'card_for_each_dev'\n  - 'cgroup_taskset_for_each'\n  - 'cgroup_taskset_for_each_leader'\n  - 'cpu_aggr_map__for_each_idx'\n  - 'cpufreq_for_each_efficient_entry_idx'\n  - 'cpufreq_for_each_entry'\n  - 'cpufreq_for_each_entry_idx'\n  - 'cpufreq_for_each_valid_entry'\n  - 'cpufreq_for_each_valid_entry_idx'\n  - 'css_for_each_child'\n  - 'css_for_each_descendant_post'\n  - 'css_for_each_descendant_pre'\n  - 'damon_for_each_region'\n  - 'damon_for_each_region_from'\n  - 'damon_for_each_region_safe'\n  - 'damon_for_each_scheme'\n  - 'damon_for_each_scheme_safe'\n  - 'damon_for_each_target'\n  - 'damon_for_each_target_safe'\n  - 'damos_for_each_filter'\n  - 'damos_for_each_filter_safe'\n  - 'data__for_each_file'\n  - 'data__for_each_file_new'\n  - 'data__for_each_file_start'\n  - 'device_for_each_child_node'\n  - 'displayid_iter_for_each'\n  - 'dma_fence_array_for_each'\n  - 'dma_fence_chain_for_each'\n  - 'dma_fence_unwrap_for_each'\n  - 'dma_resv_for_each_fence'\n  - 'dma_resv_for_each_fence_unlocked'\n  - 'do_for_each_ftrace_op'\n  - 'drm_atomic_crtc_for_each_plane'\n  - 'drm_atomic_crtc_state_for_each_plane'\n  - 'drm_atomic_crtc_state_for_each_plane_state'\n  - 'drm_atomic_for_each_plane_damage'\n  - 'drm_client_for_each_connector_iter'\n  - 'drm_client_for_each_modeset'\n  - 'drm_connector_for_each_possible_encoder'\n  - 'drm_exec_for_each_locked_object'\n  - 'drm_exec_for_each_locked_object_reverse'\n  - 'drm_for_each_bridge_in_chain'\n  - 'drm_for_each_connector_iter'\n  - 'drm_for_each_crtc'\n  - 'drm_for_each_crtc_reverse'\n  - 'drm_for_each_encoder'\n  - 'drm_for_each_encoder_mask'\n  - 'drm_for_each_fb'\n  - 'drm_for_each_legacy_plane'\n  - 'drm_for_each_plane'\n  - 'drm_for_each_plane_mask'\n  - 'drm_for_each_privobj'\n  - 'drm_gem_for_each_gpuva'\n  - 'drm_gem_for_each_gpuva_safe'\n  - 'drm_gpuva_for_each_op'\n  - 'drm_gpuva_for_each_op_from_reverse'\n  - 'drm_gpuva_for_each_op_safe'\n  - 'drm_gpuvm_for_each_va'\n  - 'drm_gpuvm_for_each_va_range'\n  - 'drm_gpuvm_for_each_va_range_safe'\n  - 'drm_gpuvm_for_each_va_safe'\n  - 'drm_mm_for_each_hole'\n  - 'drm_mm_for_each_node'\n  - 'drm_mm_for_each_node_in_range'\n  - 'drm_mm_for_each_node_safe'\n  - 'dsa_switch_for_each_available_port'\n  - 'dsa_switch_for_each_cpu_port'\n  - 'dsa_switch_for_each_cpu_port_continue_reverse'\n  - 'dsa_switch_for_each_port'\n  - 'dsa_switch_for_each_port_continue_reverse'\n  - 'dsa_switch_for_each_port_safe'\n  - 'dsa_switch_for_each_user_port'\n  - 'dsa_tree_for_each_cpu_port'\n  - 'dsa_tree_for_each_user_port'\n  - 'dsa_tree_for_each_user_port_continue_reverse'\n  - 'dso__for_each_symbol'\n  - 'dsos__for_each_with_build_id'\n  - 'elf_hash_for_each_possible'\n  - 'elf_symtab__for_each_symbol'\n  - 'evlist__for_each_cpu'\n  - 'evlist__for_each_entry'\n  - 'evlist__for_each_entry_continue'\n  - 'evlist__for_each_entry_from'\n  - 'evlist__for_each_entry_reverse'\n  - 'evlist__for_each_entry_safe'\n  - 'flow_action_for_each'\n  - 'for_each_acpi_consumer_dev'\n  - 'for_each_acpi_dev_match'\n  - 'for_each_active_dev_scope'\n  - 'for_each_active_drhd_unit'\n  - 'for_each_active_iommu'\n  - 'for_each_active_route'\n  - 'for_each_aggr_pgid'\n  - 'for_each_and_bit'\n  - 'for_each_andnot_bit'\n  - 'for_each_available_child_of_node'\n  - 'for_each_bench'\n  - 'for_each_bio'\n  - 'for_each_board_func_rsrc'\n  - 'for_each_btf_ext_rec'\n  - 'for_each_btf_ext_sec'\n  - 'for_each_bvec'\n  - 'for_each_card_auxs'\n  - 'for_each_card_auxs_safe'\n  - 'for_each_card_components'\n  - 'for_each_card_dapms'\n  - 'for_each_card_pre_auxs'\n  - 'for_each_card_prelinks'\n  - 'for_each_card_rtds'\n  - 'for_each_card_rtds_safe'\n  - 'for_each_card_widgets'\n  - 'for_each_card_widgets_safe'\n  - 'for_each_cgroup_storage_type'\n  - 'for_each_child_of_node'\n  - 'for_each_clear_bit'\n  - 'for_each_clear_bit_from'\n  - 'for_each_clear_bitrange'\n  - 'for_each_clear_bitrange_from'\n  - 'for_each_cmd'\n  - 'for_each_cmsghdr'\n  - 'for_each_collection'\n  - 'for_each_comp_order'\n  - 'for_each_compatible_node'\n  - 'for_each_component_dais'\n  - 'for_each_component_dais_safe'\n  - 'for_each_conduit'\n  - 'for_each_console'\n  - 'for_each_console_srcu'\n  - 'for_each_cpu'\n  - 'for_each_cpu_and'\n  - 'for_each_cpu_andnot'\n  - 'for_each_cpu_or'\n  - 'for_each_cpu_wrap'\n  - 'for_each_dapm_widgets'\n  - 'for_each_dedup_cand'\n  - 'for_each_dev_addr'\n  - 'for_each_dev_scope'\n  - 'for_each_dma_cap_mask'\n  - 'for_each_dpcm_be'\n  - 'for_each_dpcm_be_rollback'\n  - 'for_each_dpcm_be_safe'\n  - 'for_each_dpcm_fe'\n  - 'for_each_drhd_unit'\n  - 'for_each_dss_dev'\n  - 'for_each_efi_memory_desc'\n  - 'for_each_efi_memory_desc_in_map'\n  - 'for_each_element'\n  - 'for_each_element_extid'\n  - 'for_each_element_id'\n  - 'for_each_endpoint_of_node'\n  - 'for_each_event'\n  - 'for_each_event_tps'\n  - 'for_each_evictable_lru'\n  - 'for_each_fib6_node_rt_rcu'\n  - 'for_each_fib6_walker_rt'\n  - 'for_each_free_mem_pfn_range_in_zone'\n  - 'for_each_free_mem_pfn_range_in_zone_from'\n  - 'for_each_free_mem_range'\n  - 'for_each_free_mem_range_reverse'\n  - 'for_each_func_rsrc'\n  - 'for_each_gpiochip_node'\n  - 'for_each_group_evsel'\n  - 'for_each_group_evsel_head'\n  - 'for_each_group_member'\n  - 'for_each_group_member_head'\n  - 'for_each_hstate'\n  - 'for_each_if'\n  - 'for_each_inject_fn'\n  - 'for_each_insn'\n  - 'for_each_insn_prefix'\n  - 'for_each_intid'\n  - 'for_each_iommu'\n  - 'for_each_ip_tunnel_rcu'\n  - 'for_each_irq_nr'\n  - 'for_each_lang'\n  - 'for_each_link_codecs'\n  - 'for_each_link_cpus'\n  - 'for_each_link_platforms'\n  - 'for_each_lru'\n  - 'for_each_matching_node'\n  - 'for_each_matching_node_and_match'\n  - 'for_each_media_entity_data_link'\n  - 'for_each_mem_pfn_range'\n  - 'for_each_mem_range'\n  - 'for_each_mem_range_rev'\n  - 'for_each_mem_region'\n  - 'for_each_member'\n  - 'for_each_memory'\n  - 'for_each_migratetype_order'\n  - 'for_each_missing_reg'\n  - 'for_each_mle_subelement'\n  - 'for_each_mod_mem_type'\n  - 'for_each_net'\n  - 'for_each_net_continue_reverse'\n  - 'for_each_net_rcu'\n  - 'for_each_netdev'\n  - 'for_each_netdev_continue'\n  - 'for_each_netdev_continue_rcu'\n  - 'for_each_netdev_continue_reverse'\n  - 'for_each_netdev_dump'\n  - 'for_each_netdev_feature'\n  - 'for_each_netdev_in_bond_rcu'\n  - 'for_each_netdev_rcu'\n  - 'for_each_netdev_reverse'\n  - 'for_each_netdev_safe'\n  - 'for_each_new_connector_in_state'\n  - 'for_each_new_crtc_in_state'\n  - 'for_each_new_mst_mgr_in_state'\n  - 'for_each_new_plane_in_state'\n  - 'for_each_new_plane_in_state_reverse'\n  - 'for_each_new_private_obj_in_state'\n  - 'for_each_new_reg'\n  - 'for_each_node'\n  - 'for_each_node_by_name'\n  - 'for_each_node_by_type'\n  - 'for_each_node_mask'\n  - 'for_each_node_state'\n  - 'for_each_node_with_cpus'\n  - 'for_each_node_with_property'\n  - 'for_each_nonreserved_multicast_dest_pgid'\n  - 'for_each_numa_hop_mask'\n  - 'for_each_of_allnodes'\n  - 'for_each_of_allnodes_from'\n  - 'for_each_of_cpu_node'\n  - 'for_each_of_pci_range'\n  - 'for_each_old_connector_in_state'\n  - 'for_each_old_crtc_in_state'\n  - 'for_each_old_mst_mgr_in_state'\n  - 'for_each_old_plane_in_state'\n  - 'for_each_old_private_obj_in_state'\n  - 'for_each_oldnew_connector_in_state'\n  - 'for_each_oldnew_crtc_in_state'\n  - 'for_each_oldnew_mst_mgr_in_state'\n  - 'for_each_oldnew_plane_in_state'\n  - 'for_each_oldnew_plane_in_state_reverse'\n  - 'for_each_oldnew_private_obj_in_state'\n  - 'for_each_online_cpu'\n  - 'for_each_online_node'\n  - 'for_each_online_pgdat'\n  - 'for_each_or_bit'\n  - 'for_each_path'\n  - 'for_each_pci_bridge'\n  - 'for_each_pci_dev'\n  - 'for_each_pcm_streams'\n  - 'for_each_physmem_range'\n  - 'for_each_populated_zone'\n  - 'for_each_possible_cpu'\n  - 'for_each_present_blessed_reg'\n  - 'for_each_present_cpu'\n  - 'for_each_prime_number'\n  - 'for_each_prime_number_from'\n  - 'for_each_probe_cache_entry'\n  - 'for_each_process'\n  - 'for_each_process_thread'\n  - 'for_each_prop_codec_conf'\n  - 'for_each_prop_dai_codec'\n  - 'for_each_prop_dai_cpu'\n  - 'for_each_prop_dlc_codecs'\n  - 'for_each_prop_dlc_cpus'\n  - 'for_each_prop_dlc_platforms'\n  - 'for_each_property_of_node'\n  - 'for_each_reg'\n  - 'for_each_reg_filtered'\n  - 'for_each_reloc'\n  - 'for_each_reloc_from'\n  - 'for_each_requested_gpio'\n  - 'for_each_requested_gpio_in_range'\n  - 'for_each_reserved_mem_range'\n  - 'for_each_reserved_mem_region'\n  - 'for_each_rtd_codec_dais'\n  - 'for_each_rtd_components'\n  - 'for_each_rtd_cpu_dais'\n  - 'for_each_rtd_dais'\n  - 'for_each_sband_iftype_data'\n  - 'for_each_script'\n  - 'for_each_sec'\n  - 'for_each_set_bit'\n  - 'for_each_set_bit_from'\n  - 'for_each_set_bit_wrap'\n  - 'for_each_set_bitrange'\n  - 'for_each_set_bitrange_from'\n  - 'for_each_set_clump8'\n  - 'for_each_sg'\n  - 'for_each_sg_dma_page'\n  - 'for_each_sg_page'\n  - 'for_each_sgtable_dma_page'\n  - 'for_each_sgtable_dma_sg'\n  - 'for_each_sgtable_page'\n  - 'for_each_sgtable_sg'\n  - 'for_each_sibling_event'\n  - 'for_each_sta_active_link'\n  - 'for_each_subelement'\n  - 'for_each_subelement_extid'\n  - 'for_each_subelement_id'\n  - 'for_each_sublist'\n  - 'for_each_subsystem'\n  - 'for_each_supported_activate_fn'\n  - 'for_each_supported_inject_fn'\n  - 'for_each_sym'\n  - 'for_each_test'\n  - 'for_each_thread'\n  - 'for_each_token'\n  - 'for_each_unicast_dest_pgid'\n  - 'for_each_valid_link'\n  - 'for_each_vif_active_link'\n  - 'for_each_vma'\n  - 'for_each_vma_range'\n  - 'for_each_vsi'\n  - 'for_each_wakeup_source'\n  - 'for_each_zone'\n  - 'for_each_zone_zonelist'\n  - 'for_each_zone_zonelist_nodemask'\n  - 'func_for_each_insn'\n  - 'fwnode_for_each_available_child_node'\n  - 'fwnode_for_each_child_node'\n  - 'fwnode_for_each_parent_node'\n  - 'fwnode_graph_for_each_endpoint'\n  - 'gadget_for_each_ep'\n  - 'genradix_for_each'\n  - 'genradix_for_each_from'\n  - 'genradix_for_each_reverse'\n  - 'hash_for_each'\n  - 'hash_for_each_possible'\n  - 'hash_for_each_possible_rcu'\n  - 'hash_for_each_possible_rcu_notrace'\n  - 'hash_for_each_possible_safe'\n  - 'hash_for_each_rcu'\n  - 'hash_for_each_safe'\n  - 'hashmap__for_each_entry'\n  - 'hashmap__for_each_entry_safe'\n  - 'hashmap__for_each_key_entry'\n  - 'hashmap__for_each_key_entry_safe'\n  - 'hctx_for_each_ctx'\n  - 'hists__for_each_format'\n  - 'hists__for_each_sort_list'\n  - 'hlist_bl_for_each_entry'\n  - 'hlist_bl_for_each_entry_rcu'\n  - 'hlist_bl_for_each_entry_safe'\n  - 'hlist_for_each'\n  - 'hlist_for_each_entry'\n  - 'hlist_for_each_entry_continue'\n  - 'hlist_for_each_entry_continue_rcu'\n  - 'hlist_for_each_entry_continue_rcu_bh'\n  - 'hlist_for_each_entry_from'\n  - 'hlist_for_each_entry_from_rcu'\n  - 'hlist_for_each_entry_rcu'\n  - 'hlist_for_each_entry_rcu_bh'\n  - 'hlist_for_each_entry_rcu_notrace'\n  - 'hlist_for_each_entry_safe'\n  - 'hlist_for_each_entry_srcu'\n  - 'hlist_for_each_safe'\n  - 'hlist_nulls_for_each_entry'\n  - 'hlist_nulls_for_each_entry_from'\n  - 'hlist_nulls_for_each_entry_rcu'\n  - 'hlist_nulls_for_each_entry_safe'\n  - 'i3c_bus_for_each_i2cdev'\n  - 'i3c_bus_for_each_i3cdev'\n  - 'idr_for_each_entry'\n  - 'idr_for_each_entry_continue'\n  - 'idr_for_each_entry_continue_ul'\n  - 'idr_for_each_entry_ul'\n  - 'in_dev_for_each_ifa_rcu'\n  - 'in_dev_for_each_ifa_rtnl'\n  - 'inet_bind_bucket_for_each'\n  - 'interval_tree_for_each_span'\n  - 'intlist__for_each_entry'\n  - 'intlist__for_each_entry_safe'\n  - 'kcore_copy__for_each_phdr'\n  - 'key_for_each'\n  - 'key_for_each_safe'\n  - 'klp_for_each_func'\n  - 'klp_for_each_func_safe'\n  - 'klp_for_each_func_static'\n  - 'klp_for_each_object'\n  - 'klp_for_each_object_safe'\n  - 'klp_for_each_object_static'\n  - 'kunit_suite_for_each_test_case'\n  - 'kvm_for_each_memslot'\n  - 'kvm_for_each_memslot_in_gfn_range'\n  - 'kvm_for_each_vcpu'\n  - 'libbpf_nla_for_each_attr'\n  - 'list_for_each'\n  - 'list_for_each_codec'\n  - 'list_for_each_codec_safe'\n  - 'list_for_each_continue'\n  - 'list_for_each_entry'\n  - 'list_for_each_entry_continue'\n  - 'list_for_each_entry_continue_rcu'\n  - 'list_for_each_entry_continue_reverse'\n  - 'list_for_each_entry_from'\n  - 'list_for_each_entry_from_rcu'\n  - 'list_for_each_entry_from_reverse'\n  - 'list_for_each_entry_lockless'\n  - 'list_for_each_entry_rcu'\n  - 'list_for_each_entry_reverse'\n  - 'list_for_each_entry_safe'\n  - 'list_for_each_entry_safe_continue'\n  - 'list_for_each_entry_safe_from'\n  - 'list_for_each_entry_safe_reverse'\n  - 'list_for_each_entry_srcu'\n  - 'list_for_each_from'\n  - 'list_for_each_prev'\n  - 'list_for_each_prev_safe'\n  - 'list_for_each_rcu'\n  - 'list_for_each_reverse'\n  - 'list_for_each_safe'\n  - 'llist_for_each'\n  - 'llist_for_each_entry'\n  - 'llist_for_each_entry_safe'\n  - 'llist_for_each_safe'\n  - 'lwq_for_each_safe'\n  - 'map__for_each_symbol'\n  - 'map__for_each_symbol_by_name'\n  - 'maps__for_each_entry'\n  - 'maps__for_each_entry_safe'\n  - 'mas_for_each'\n  - 'mci_for_each_dimm'\n  - 'media_device_for_each_entity'\n  - 'media_device_for_each_intf'\n  - 'media_device_for_each_link'\n  - 'media_device_for_each_pad'\n  - 'media_entity_for_each_pad'\n  - 'media_pipeline_for_each_entity'\n  - 'media_pipeline_for_each_pad'\n  - 'mlx5_lag_for_each_peer_mdev'\n  - 'msi_domain_for_each_desc'\n  - 'msi_for_each_desc'\n  - 'mt_for_each'\n  - 'nanddev_io_for_each_page'\n  - 'netdev_for_each_lower_dev'\n  - 'netdev_for_each_lower_private'\n  - 'netdev_for_each_lower_private_rcu'\n  - 'netdev_for_each_mc_addr'\n  - 'netdev_for_each_synced_mc_addr'\n  - 'netdev_for_each_synced_uc_addr'\n  - 'netdev_for_each_uc_addr'\n  - 'netdev_for_each_upper_dev_rcu'\n  - 'netdev_hw_addr_list_for_each'\n  - 'nft_rule_for_each_expr'\n  - 'nla_for_each_attr'\n  - 'nla_for_each_nested'\n  - 'nlmsg_for_each_attr'\n  - 'nlmsg_for_each_msg'\n  - 'nr_neigh_for_each'\n  - 'nr_neigh_for_each_safe'\n  - 'nr_node_for_each'\n  - 'nr_node_for_each_safe'\n  - 'of_for_each_phandle'\n  - 'of_property_for_each_string'\n  - 'of_property_for_each_u32'\n  - 'pci_bus_for_each_resource'\n  - 'pci_dev_for_each_resource'\n  - 'pcl_for_each_chunk'\n  - 'pcl_for_each_segment'\n  - 'pcm_for_each_format'\n  - 'perf_config_items__for_each_entry'\n  - 'perf_config_sections__for_each_entry'\n  - 'perf_config_set__for_each_entry'\n  - 'perf_cpu_map__for_each_cpu'\n  - 'perf_cpu_map__for_each_idx'\n  - 'perf_evlist__for_each_entry'\n  - 'perf_evlist__for_each_entry_reverse'\n  - 'perf_evlist__for_each_entry_safe'\n  - 'perf_evlist__for_each_evsel'\n  - 'perf_evlist__for_each_mmap'\n  - 'perf_hpp_list__for_each_format'\n  - 'perf_hpp_list__for_each_format_safe'\n  - 'perf_hpp_list__for_each_sort_list'\n  - 'perf_hpp_list__for_each_sort_list_safe'\n  - 'perf_tool_event__for_each_event'\n  - 'plist_for_each'\n  - 'plist_for_each_continue'\n  - 'plist_for_each_entry'\n  - 'plist_for_each_entry_continue'\n  - 'plist_for_each_entry_safe'\n  - 'plist_for_each_safe'\n  - 'pnp_for_each_card'\n  - 'pnp_for_each_dev'\n  - 'protocol_for_each_card'\n  - 'protocol_for_each_dev'\n  - 'queue_for_each_hw_ctx'\n  - 'radix_tree_for_each_slot'\n  - 'radix_tree_for_each_tagged'\n  - 'rb_for_each'\n  - 'rbtree_postorder_for_each_entry_safe'\n  - 'rdma_for_each_block'\n  - 'rdma_for_each_port'\n  - 'rdma_umem_for_each_dma_block'\n  - 'resort_rb__for_each_entry'\n  - 'resource_list_for_each_entry'\n  - 'resource_list_for_each_entry_safe'\n  - 'rhl_for_each_entry_rcu'\n  - 'rhl_for_each_rcu'\n  - 'rht_for_each'\n  - 'rht_for_each_entry'\n  - 'rht_for_each_entry_from'\n  - 'rht_for_each_entry_rcu'\n  - 'rht_for_each_entry_rcu_from'\n  - 'rht_for_each_entry_safe'\n  - 'rht_for_each_from'\n  - 'rht_for_each_rcu'\n  - 'rht_for_each_rcu_from'\n  - 'rq_for_each_bvec'\n  - 'rq_for_each_segment'\n  - 'rq_list_for_each'\n  - 'rq_list_for_each_safe'\n  - 'sample_read_group__for_each'\n  - 'scsi_for_each_prot_sg'\n  - 'scsi_for_each_sg'\n  - 'sctp_for_each_hentry'\n  - 'sctp_skb_for_each'\n  - 'sec_for_each_insn'\n  - 'sec_for_each_insn_continue'\n  - 'sec_for_each_insn_from'\n  - 'sec_for_each_sym'\n  - 'shdma_for_each_chan'\n  - 'shost_for_each_device'\n  - 'sk_for_each'\n  - 'sk_for_each_bound'\n  - 'sk_for_each_bound_bhash2'\n  - 'sk_for_each_entry_offset_rcu'\n  - 'sk_for_each_from'\n  - 'sk_for_each_rcu'\n  - 'sk_for_each_safe'\n  - 'sk_nulls_for_each'\n  - 'sk_nulls_for_each_from'\n  - 'sk_nulls_for_each_rcu'\n  - 'snd_array_for_each'\n  - 'snd_pcm_group_for_each_entry'\n  - 'snd_soc_dapm_widget_for_each_path'\n  - 'snd_soc_dapm_widget_for_each_path_safe'\n  - 'snd_soc_dapm_widget_for_each_sink_path'\n  - 'snd_soc_dapm_widget_for_each_source_path'\n  - 'strlist__for_each_entry'\n  - 'strlist__for_each_entry_safe'\n  - 'sym_for_each_insn'\n  - 'sym_for_each_insn_continue_reverse'\n  - 'symbols__for_each_entry'\n  - 'tb_property_for_each'\n  - 'tcf_act_for_each_action'\n  - 'tcf_exts_for_each_action'\n  - 'ttm_resource_manager_for_each_res'\n  - 'twsk_for_each_bound_bhash2'\n  - 'udp_portaddr_for_each_entry'\n  - 'udp_portaddr_for_each_entry_rcu'\n  - 'usb_hub_for_each_child'\n  - 'v4l2_device_for_each_subdev'\n  - 'v4l2_m2m_for_each_dst_buf'\n  - 'v4l2_m2m_for_each_dst_buf_safe'\n  - 'v4l2_m2m_for_each_src_buf'\n  - 'v4l2_m2m_for_each_src_buf_safe'\n  - 'virtio_device_for_each_vq'\n  - 'while_for_each_ftrace_op'\n  - 'xa_for_each'\n  - 'xa_for_each_marked'\n  - 'xa_for_each_range'\n  - 'xa_for_each_start'\n  - 'xas_for_each'\n  - 'xas_for_each_conflict'\n  - 'xas_for_each_marked'\n  - 'xbc_array_for_each_value'\n  - 'xbc_for_each_key_value'\n  - 'xbc_node_for_each_array_value'\n  - 'xbc_node_for_each_child'\n  - 'xbc_node_for_each_key_value'\n  - 'xbc_node_for_each_subkey'\n  - 'zorro_for_each_dev'\n  - 'pool_iter_for_each'\n  - 'pool_for_each'\n\nIncludeBlocks: Preserve\nIncludeCategories:\n  - Regex: '.*'\n    Priority: 1\nIncludeIsMainRegex: '(Test)?$'\nIndentCaseLabels: false\nIndentGotoLabels: false\nIndentPPDirectives: None\nIndentWidth: 8\nIndentWrappedFunctionNames: false\nJavaScriptQuotes: Leave\nJavaScriptWrapImports: true\nKeepEmptyLinesAtTheStartOfBlocks: false\nMacroBlockBegin: ''\nMacroBlockEnd: ''\nMaxEmptyLinesToKeep: 1\nNamespaceIndentation: None\nObjCBinPackProtocolList: Auto\nObjCBlockIndentWidth: 8\nObjCSpaceAfterProperty: true\nObjCSpaceBeforeProtocolList: true\n\n# Taken from git's rules\nPenaltyBreakAssignment: 10\nPenaltyBreakBeforeFirstCallParameter: 30\nPenaltyBreakComment: 10\nPenaltyBreakFirstLessLess: 0\nPenaltyBreakString: 10\nPenaltyExcessCharacter: 100\nPenaltyReturnTypeOnItsOwnLine: 60\n\nPointerAlignment: Right\nReflowComments: false\nSortIncludes: false\nSortUsingDeclarations: false\nSpaceAfterCStyleCast: false\nSpaceAfterTemplateKeyword: true\nSpaceBeforeAssignmentOperators: true\nSpaceBeforeCtorInitializerColon: true\nSpaceBeforeInheritanceColon: true\nSpaceBeforeParens: ControlStatementsExceptForEachMacros\nSpaceBeforeRangeBasedForLoopColon: true\nSpaceInEmptyParentheses: false\nSpacesBeforeTrailingComments: 1\nSpacesInAngles: false\nSpacesInContainerLiterals: false\nSpacesInCStyleCastParentheses: false\nSpacesInParentheses: false\nSpacesInSquareBrackets: false\nStandard: Cpp03\nTabWidth: 8\nUseTab: Always\n...\n"
  },
  {
    "path": ".dockerignore",
    "content": "build\n"
  },
  {
    "path": ".github/workflows/build-freebsd.yml",
    "content": "name: build on FreeBSD\n\non:\n  push:\n    branches: [ \"main\", \"dev\" ]\n  pull_request:\n    branches: [ \"main\", \"dev\" ]\n\njobs:\n  build:\n    runs-on: ubuntu-latest\n\n    steps:\n    - uses: actions/checkout@v4\n      with:\n        submodules: true\n\n    - name: apply the patch to libssh\n      run: |\n        git -C libssh fetch --all --tags --prune\n        patch -d libssh -p1 < patch/$(git -C libssh describe).patch\n\n    - name: Build in FreeBSD\n      uses: vmactions/freebsd-vm@v1\n      with:\n        prepare: |\n          pkg install -y git cmake\n        run: |\n          cmake -B build -DCMAKE_BUILD_TYPE=Release\n          cmake --build build\n          build/mscp -h\n"
  },
  {
    "path": ".github/workflows/build-macos.yml",
    "content": "name: build on macOS\n\non:\n  push:\n    branches: [ \"main\", \"dev\" ]\n  pull_request:\n    branches: [ \"main\", \"dev\" ]\n\nenv:\n  # Customize the CMake build type here (Release, Debug, RelWithDebInfo, etc.)\n  BUILD_TYPE: Release\n\njobs:\n  build:\n    # The CMake configure and build commands are platform agnostic and should work equally well on Windows or Mac.\n    # You can convert this to a matrix build if you need cross-platform coverage.\n    # See: https://docs.github.com/en/free-pro-team@latest/actions/learn-github-actions/managing-complex-workflows#using-a-build-matrix\n    runs-on: macos-latest\n\n    steps:\n    - uses: actions/checkout@v4\n      with:\n        submodules: true\n\n    - name: apply the patch to libssh\n      run: |\n        git -C libssh fetch --all --tags --prune\n        patch -d libssh -p1 < patch/$(git -C libssh describe).patch\n\n    - name: install build dependency\n      run: ./scripts/install-build-deps.sh\n\n    - name: save homebrew prefix\n      id: brew-prefix\n      run: echo \"HOMEBREW_PREFIX=$(brew --prefix)\" >> $GITHUB_OUTPUT\n\n    - name: Configure CMake\n      # Configure CMake in a 'build' subdirectory. `CMAKE_BUILD_TYPE` is only required if you are using a single-configuration generator such as make.\n      # See https://cmake.org/cmake/help/latest/variable/CMAKE_BUILD_TYPE.html?highlight=cmake_build_type\n      run: cmake -B ${{github.workspace}}/build -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} -DOPENSSL_ROOT_DIR=${{steps.brew-prefix.outputs.HOMEBREW_PREFIX}}/opt/openssl@3\n\n    - name: Build\n      # Build your program with the given configuration\n      run: cmake --build ${{github.workspace}}/build --config ${{env.BUILD_TYPE}}\n\n    - name: Run\n      run: ${{github.workspace}}/build/mscp -h\n"
  },
  {
    "path": ".github/workflows/build-ubuntu.yml",
    "content": "name: build on ubuntu\n\non:\n  push:\n    branches: [ \"main\", \"dev\" ]\n  pull_request:\n    branches: [ \"main\", \"dev\" ]\n\nenv:\n  # Customize the CMake build type here (Release, Debug, RelWithDebInfo, etc.)\n  BUILD_TYPE: Release\n\njobs:\n  build:\n    # The CMake configure and build commands are platform agnostic and should work equally well on Windows or Mac.\n    # You can convert this to a matrix build if you need cross-platform coverage.\n    # See: https://docs.github.com/en/free-pro-team@latest/actions/learn-github-actions/managing-complex-workflows#using-a-build-matrix\n    runs-on: ubuntu-latest\n\n    steps:\n    - uses: actions/checkout@v4\n      with:\n        submodules: true\n\n    - name: apply the patch to libssh\n      run: |\n        git -C libssh fetch --all --tags --prune\n        patch -d libssh -p1 < patch/$(git -C libssh describe).patch\n\n    - name: install build dependency\n      run: |\n        sudo apt-get update\n        sudo ./scripts/install-build-deps.sh\n\n    - name: Configure CMake\n      # Configure CMake in a 'build' subdirectory. `CMAKE_BUILD_TYPE` is only required if you are using a single-configuration generator such as make.\n      # See https://cmake.org/cmake/help/latest/variable/CMAKE_BUILD_TYPE.html?highlight=cmake_build_type\n      run: cmake -B ${{github.workspace}}/build -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}}\n\n    - name: Build\n      # Build your program with the given configuration\n      run: cmake --build ${{github.workspace}}/build --config ${{env.BUILD_TYPE}}\n\n    - name: Run\n      run: ${{github.workspace}}/build/mscp -h\n"
  },
  {
    "path": ".github/workflows/codeql.yml",
    "content": "# For most projects, this workflow file will not need changing; you simply need\n# to commit it to your repository.\n#\n# You may wish to alter this file to override the set of languages analyzed,\n# or to provide custom queries or build logic.\n#\n# ******** NOTE ********\n# We have attempted to detect the languages in your repository. Please check\n# the `language` matrix defined below to confirm you have the correct set of\n# supported CodeQL languages.\n#\nname: \"CodeQL\"\n\non:\n  push:\n    branches: [ \"main\", \"dev\" ]\n  pull_request:\n    # The branches below must be a subset of the branches above\n    branches: [ \"main\", \"dev\" ]\n  schedule:\n    - cron: '35 11 * * 5'\n\njobs:\n  analyze:\n    name: Analyze\n    runs-on: ubuntu-latest\n    permissions:\n      actions: read\n      contents: read\n      security-events: write\n\n    strategy:\n      fail-fast: false\n      matrix:\n        language: [ 'cpp' ]\n        # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]\n        # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support\n\n    steps:\n    - name: Checkout repository\n      uses: actions/checkout@v4\n      with:\n        submodules: true\n\n    - name: apply the patch to libssh\n      run: |\n        git -C libssh fetch --all --tags --prune\n        patch -d libssh -p1 < patch/$(git -C libssh describe).patch\n\n    - name: install build dependency\n      run: |\n        sudo apt-get update\n        sudo ./scripts/install-build-deps.sh\n\n    # Initializes the CodeQL tools for scanning.\n    - name: Initialize CodeQL\n      uses: github/codeql-action/init@v3\n      with:\n        languages: ${{ matrix.language }}\n        # If you wish to specify custom queries, you can do so here or in a config file.\n        # By default, queries listed here will override any specified in a config file.\n        # Prefix the list here with \"+\" to use these queries and those in the config file.\n        \n        # Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs\n        # queries: security-extended,security-and-quality\n\n    # Autobuild attempts to build any compiled languages  (C/C++, C#, Go, or Java).\n    # If this step fails, then you should remove it and run the build manually (see below)\n    - name: Autobuild\n      uses: github/codeql-action/autobuild@v3\n\n    # ℹ️ Command-line programs to run using the OS shell.\n    # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun\n\n    #   If the Autobuild fails above, remove it and uncomment the following three lines. \n    #   modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance.\n\n    # - run: |\n    #   echo \"Run, Build Application using script\"\n    #   ./location_of_script_within_repo/buildscript.sh\n\n    - name: Perform CodeQL Analysis\n      uses: github/codeql-action/analyze@v3\n      with:\n        category: \"/language:${{matrix.language}}\"\n"
  },
  {
    "path": ".github/workflows/release.yml",
    "content": "name: release\n\non:\n  push:\n    tags:\n      - \"v*.*.*\"\n  workflow_dispatch:\n\nenv:\n    BUILD_TYPE: Release\n\njobs:\n  source-release:\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@v4\n        with:\n          submodules: true\n\n      - name: apply the patch to libssh\n        run: |\n          git -C libssh fetch --all --tags --prune\n          patch -d libssh -p1 < patch/$(git -C libssh describe).patch\n\n      - name: Set variables\n        run: |\n          VER=$(cat VERSION)\n          echo \"VERSION=$VER\" >> $GITHUB_ENV\n\n      - name: archive\n        run: |\n          cd ..\n          cp -r mscp mscp-${{env.VERSION}}\n          tar zcvf mscp-${{env.VERSION}}.tar.gz --exclude-vcs mscp-${{env.VERSION}}\n\n      - name: Release\n        uses: softprops/action-gh-release@v1\n        with:\n          files: |\n            ${{github.workspace}}/../mscp-${{env.VERSION}}.tar.gz\n"
  },
  {
    "path": ".github/workflows/test.yml",
    "content": "name: test\n\non:\n  push:\n    branches: [ \"main\", \"dev\" ]\n  pull_request:\n    branches: [ \"main\", \"dev\" ]\n\nenv:\n  BUILD_TYPE: Release\n\njobs:\n  test:\n    runs-on: ubuntu-latest\n    strategy:\n      fail-fast: false\n      matrix:\n        index: # see DIST_IDS and DIST_VERS lists in CMakeLists.txt\n        - ubuntu-20.04\n        - ubuntu-22.04\n        - ubuntu-24.04\n        - rocky-8.9\n        - rocky-9.3\n        - almalinux-9.3\n        - alpine-3.22\n        - arch-base\n    steps:\n    - uses: actions/checkout@v4\n      with:\n        submodules: true\n\n    - name: apply the patch to libssh\n      run: |\n        git -C libssh fetch --all --tags --prune\n        patch -d libssh -p1 < patch/$(git -C libssh describe).patch\n\n    # TODO: just building docker images does not require libssh. fix CMakeLists\n    - name: install build dependency\n      run: |\n        sudo apt-get update\n        sudo ./scripts/install-build-deps.sh\n\n    - name: configure CMake\n      run: cmake -B ${{github.workspace}}/build -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}}\n\n    - name: Build Containers\n      run: make -C ${{github.workspace}}/build docker-build-${{ matrix.index }}\n\n    - name: Run Test\n      run: make -C ${{github.workspace}}/build docker-test-${{ matrix.index }}\n"
  },
  {
    "path": ".gitignore",
    "content": "build\nhtml\ncompile_commands.json\nCMakeUserPresets.json\n.*.swp\n.cache\n\ninclude/mscp_version.h\n"
  },
  {
    "path": ".gitmodules",
    "content": "[submodule \"libssh\"]\n\tpath = libssh\n\turl = https://git.libssh.org/projects/libssh.git\n\tignore = dirty\n"
  },
  {
    "path": "CMakeLists.txt",
    "content": "cmake_minimum_required(VERSION 3.13)\n\nfile (STRINGS \"VERSION\" MSCP_VERSION)\n\nproject(mscp\n\tVERSION ${MSCP_VERSION}\n\tLANGUAGES C)\n\n\nfind_package(Git)\nif (Git_FOUND)\n\t# based on https://github.com/nocnokneo/cmake-git-versioning-example\n\texecute_process(\n\t\tCOMMAND\t${GIT_EXECUTABLE} describe --tags --match \"v*\"\n\t\tOUTPUT_VARIABLE\tGIT_DESCRIBE_VERSION\n\t\tRESULT_VARIABLE\tGIT_DESCRIBE_ERROR_CODE\n\t\tOUTPUT_STRIP_TRAILING_WHITESPACE)\n\tif(NOT GIT_DESCRIBE_ERROR_CODE)\n\t\tset(MSCP_BUILD_VERSION ${GIT_DESCRIBE_VERSION})\n\tendif()\nendif()\n\nif (NOT MSCP_BUILD_VERSION)\n\tmessage(STATUS \"Failed to determine version via Git. Use VERSION file instead.\")\n\tset(MSCP_BUILD_VERSION v${MSCP_VERSION})\nendif()\n\n\ninclude(GNUInstallDirs)\n\nset(CMAKE_C_FLAGS_DEBUG \"${CMAKE_C_FLAGS_DEBUG} -DDEBUG\")\nlist(APPEND CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/cmake/modules)\n\nif(APPLE)\n\tlist(APPEND CMAKE_PREFIX_PATH /usr/local) # intel mac homebrew prefix\n\tlist(APPEND CMAKE_PREFIX_PATH /opt/homebrew) # arm mac homebrew prefix\nendif() # APPLE\n\n\noption(BUILD_CONAN OFF) # Build mscp with conan\nif(BUILD_CONAN)\n\tmessage(STATUS \"Build mscp with conan\")\nendif()\n\noption(BUILD_STATIC OFF) # Build mscp with -static LD flag\nif (BUILD_STATIC)\n\tmessage(STATUS \"Build mscp with -static LD option\")\n\tif (NOT BUILD_CONAN)\n\t\tmessage(WARNING\n\t\t\t\"BUILD_STATIC strongly recommended with BUILD_CONAN option\")\n\tendif()\nendif()\n\noption(USE_PODMAN OFF) # use podman instread of docker\nif(USE_PODMAN)\n\tmessage(STATUS \"Use podman instead of docker\")\n\tset(CE podman) # CE means Container Engine\nelse()\n\tset(CE docker)\nendif()\n\n\n# add libssh static library\nset(CMAKE_POLICY_DEFAULT_CMP0077 NEW)\nset(WITH_SERVER OFF)\nset(BUILD_SHARED_LIBS OFF)\nset(WITH_EXAMPLES OFF)\nset(BUILD_STATIC_LIB ON)\nif(BUILD_CONAN)\n\tmessage(STATUS\n\t\t\"Disable libssh GSSAPI support because libkrb5 doesn't exist in conan\")\n\tset(WITH_GSSAPI OFF)\nendif()\nadd_subdirectory(libssh EXCLUDE_FROM_ALL)\n\n\n\n# setup mscp compile options\nlist(APPEND MSCP_COMPILE_OPTS -iquote ${CMAKE_CURRENT_BINARY_DIR}/libssh/include)\nlist(APPEND MSCP_BUILD_INCLUDE_DIRS\n\t${mscp_SOURCE_DIR}/src\n\t${CMAKE_CURRENT_BINARY_DIR}/libssh/include)\n\nlist(APPEND MSCP_LINK_LIBS ssh-static)\nif(BUILD_CONAN)\n\tfind_package(ZLIB REQUIRED)\n\tfind_package(OpenSSL REQUIRED)\n\tlist(APPEND MSCP_LINK_LIBS ZLIB::ZLIB)\n\tlist(APPEND MSCP_LINK_LIBS OpenSSL::Crypto)\nendif()\n\n\n# Symbol check\ncheck_symbol_exists(htonll\tarpa/inet.h\tHAVE_HTONLL)\ncheck_symbol_exists(ntohll\tarpa/inet.h\tHAVE_NTOHLL)\ncheck_symbol_exists(strlcat\tstring.h\tHAVE_STRLCAT)\nif (NOT HAVE_STRLCAT)\n\tlist(APPEND OPENBSD_COMPAT_SRC src/openbsd-compat/strlcat.c)\nendif()\n\n\n# generate config.h in build dir\nconfigure_file(\n\t${mscp_SOURCE_DIR}/include/config.h.in\n\t${CMAKE_CURRENT_BINARY_DIR}/include/config.h)\nlist(APPEND MSCP_BUILD_INCLUDE_DIRS ${CMAKE_CURRENT_BINARY_DIR}/include)\n\n\n# libmscp.a\nset(LIBMSCP_SRC\n\tsrc/mscp.c src/ssh.c src/fileops.c src/path.c src/checkpoint.c\n\tsrc/bwlimit.c src/platform.c src/print.c src/pool.c src/strerrno.c\n\t${OPENBSD_COMPAT_SRC})\nadd_library(mscp-static STATIC ${LIBMSCP_SRC})\ntarget_include_directories(mscp-static\n\tPRIVATE ${MSCP_BUILD_INCLUDE_DIRS} ${mscp_SOURCE_DIR}/include)\ntarget_compile_options(mscp-static PRIVATE ${MSCP_COMPILE_OPTS})\ntarget_link_libraries(mscp-static PRIVATE ${MSCP_LINK_LIBS})\nset_target_properties(mscp-static\n\tPROPERTIES\n\tOUTPUT_NAME\tmscp)\n\n# mscp executable\nlist(APPEND MSCP_LINK_LIBS m pthread)\n\nadd_executable(mscp src/main.c)\ntarget_include_directories(mscp\n\tPRIVATE ${MSCP_BUILD_INCLUDE_DIRS} ${mscp_SOURCE_DIR}/include)\ntarget_link_libraries(mscp mscp-static ${MSCP_LINK_LIBS})\nif (BUILD_STATIC)\n\ttarget_link_options(mscp PRIVATE -static)\nendif()\ntarget_compile_options(mscp PRIVATE ${MSCP_COMPILE_OPTS})\n\n\ninstall(TARGETS mscp RUNTIME DESTINATION bin)\n\n\n# mscp manpage and document\nconfigure_file(\n\t${mscp_SOURCE_DIR}/doc/mscp.1.in\n\t${PROJECT_BINARY_DIR}/mscp.1)\n\nadd_custom_target(update-rst\n\tCOMMENT \"Update doc/mscp.rst from mscp.1.in\"\n\tWORKING_DIRECTORY ${PROJECT_BINARY_DIR}\n\tCOMMAND\n\tpandoc -s -f man mscp.1 -t rst -o ${PROJECT_SOURCE_DIR}/doc/mscp.rst)\n\ninstall(FILES ${PROJECT_BINARY_DIR}/mscp.1\n\tDESTINATION ${CMAKE_INSTALL_MANDIR}/man1)\n\n\n# Test\nadd_test(NAME\tpytest\n\tCOMMAND\tpython3 -m pytest -v\n\t\t--mscp-path=${PROJECT_BINARY_DIR}/mscp ${PROJECT_SOURCE_DIR}/test\n\tWORKING_DIRECTORY\t${PROJECT_BINARY_DIR})\n\nenable_testing()\n\n\n\n\n# Custom targets to build and test mscp in docker containers.\n# foreach(IN ZIP_LISTS) (cmake >= 3.17) can shorten the following lists.\n# However, ubuntu 20.04 has cmake 3.16.3. So this is a roundabout trick.\n#\n# When edit DIST_IDS and DIST_VERS, also edit .github/workflows/test.yaml\nlist(APPEND DIST_IDS  ubuntu ubuntu ubuntu rocky rocky almalinux alpine arch)\nlist(APPEND DIST_VERS  20.04  22.04  24.04   8.9   9.3       9.3   3.22 base)\n\nlist(LENGTH DIST_IDS _DIST_LISTLEN)\nmath(EXPR DIST_LISTLEN \"${_DIST_LISTLEN} - 1\")\n\nforeach(x RANGE ${DIST_LISTLEN})\n\tlist(GET DIST_IDS\t${x} DIST_ID)\n\tlist(GET DIST_VERS\t${x} DIST_VER)\n\n\tset(DOCKER_IMAGE mscp-${DIST_ID}:${DIST_VER})\n\tset(DOCKER_INDEX ${DIST_ID}-${DIST_VER})\n\texecute_process(\n\t\tCOMMAND ${CMAKE_SOURCE_DIR}/scripts/install-build-deps.sh\n\t\t--dont-install --platform Linux-${DIST_ID}\n\t\tOUTPUT_VARIABLE REQUIREDPKGS\n\t\tOUTPUT_STRIP_TRAILING_WHITESPACE)\n\n\tadd_custom_target(docker-build-${DOCKER_INDEX}\n\t\tCOMMENT \"Build mscp in ${DOCKER_IMAGE} container\"\n\t\tWORKING_DIRECTORY ${mscp_SOURCE_DIR}\n\t\tCOMMAND\n\t\t${CE} build --build-arg REQUIREDPKGS=${REQUIREDPKGS}\n\t\t-t ${DOCKER_IMAGE} -f Dockerfile/${DOCKER_INDEX}.Dockerfile .)\n\n\tadd_custom_target(docker-build-${DOCKER_INDEX}-no-cache\n\t\tCOMMENT \"Build mscp in ${DOCKER_IMAGE} container\"\n\t\tWORKING_DIRECTORY ${mscp_SOURCE_DIR}\n\t\tCOMMAND\n\t\t${CE} build --build-arg REQUIREDPKGS=${REQUIREDPKGS} --no-cache\n\t\t-t ${DOCKER_IMAGE} -f Dockerfile/${DOCKER_INDEX}.Dockerfile .)\n\n\tadd_custom_target(docker-test-${DOCKER_INDEX}\n\t\tCOMMENT \"Test mscp in ${DOCKER_IMAGE} container\"\n\t\tWORKING_DIRECTORY ${CMAKE_BINARY_DIR}\n\t\tCOMMAND\n\t\t${CE} run --init --rm --privileged\n\t\t--sysctl net.ipv6.conf.all.disable_ipv6=0\n\t\t--add-host=ip6-localhost:::1\n\t\t${DOCKER_IMAGE} /mscp/scripts/test-in-container.sh)\n\n\tadd_custom_target(docker-run-${DOCKER_INDEX}\n\t\tCOMMENT \"Start ${DOCKER_IMAGE} container\"\n\t\tWORKING_DIRECTORY ${CMAKE_BINARY_DIR}\n\t\tCOMMAND\n\t\t${CE} run --init --rm --privileged\n\t\t--sysctl net.ipv6.conf.all.disable_ipv6=0\n\t\t--add-host=ip6-localhost:::1\n\t\t-it\n\t\t${DOCKER_IMAGE} /mscp/scripts/test-in-container.sh bash)\n\n\tlist(APPEND DOCKER_BUILDS\t\tdocker-build-${DOCKER_INDEX})\n\tlist(APPEND DOCKER_BUILDS_NO_CACHE\tdocker-build-${DOCKER_INDEX}-no-cache)\n\tlist(APPEND DOCKER_TESTS\t\tdocker-test-${DOCKER_INDEX})\nendforeach()\n\nadd_custom_target(docker-build-all\t\tDEPENDS ${DOCKER_BUILDS})\nadd_custom_target(docker-build-all-no-cache\tDEPENDS ${DOCKER_BUILDS_NO_CACHE})\nadd_custom_target(docker-test-all\t\tDEPENDS ${DOCKER_TESTS})\n\n\n### debuild-related definitions\n\nset(DEBBUILDCONTAINER mscp-build-deb)\nexecute_process(\n\tCOMMAND ${CMAKE_SOURCE_DIR}/scripts/install-build-deps.sh\n\t--dont-install --platform Linux-ubuntu\n\tOUTPUT_VARIABLE REQUIREDPKGS_DEB\n\tOUTPUT_STRIP_TRAILING_WHITESPACE)\n\nadd_custom_target(build-deb\n\tCOMMENT \"build mscp deb files inside a container\"\n\tWORKING_DIRECTORY ${mscp_SOURCE_DIR}\n\tBYPRODUCTS ${CMAKE_BINARY_DIR}/debbuild\n\tCOMMAND\n\t${CE} build --build-arg REQUIREDPKGS=${REQUIREDPKGS_DEB}\n\t-t ${DEBBUILDCONTAINER} -f Dockerfile/build-deb.Dockerfile .\n\tCOMMAND\n\t${CE} run --rm -v ${CMAKE_BINARY_DIR}:/out ${DEBBUILDCONTAINER}\n\tcp -r /debbuild /out/)\n\n\n### rpmbuild-related definitions\n\n# generate files for rpmbuild\nconfigure_file(\n\t${mscp_SOURCE_DIR}/rpm/mscp.spec.in\n\t${mscp_SOURCE_DIR}/rpm/mscp.spec\n\t@ONLY)\n#configure_file(\n#\t${mscp_SOURCE_DIR}/Dockerfile/build-srpm.Dockerfile.in\n#\t${mscp_SOURCE_DIR}/Dockerfile/build-srpm.Dockerfile\n#\t@ONLY)\n\n# Custom target to build mscp as a src.rpm in docker.\nset(RPMBUILDCONTAINER mscp-build-srpm)\nexecute_process(\n\tCOMMAND ${CMAKE_SOURCE_DIR}/scripts/install-build-deps.sh\n\t--dont-install --platform Linux-rocky\n\tOUTPUT_VARIABLE REQUIREDPKGS_RPM\n\tOUTPUT_STRIP_TRAILING_WHITESPACE)\n\nadd_custom_target(build-srpm\n\tCOMMENT \"Build mscp src.rpm inside a container\"\n\tWORKING_DIRECTORY ${mscp_SOURCE_DIR}\n\tCOMMAND\n\t${CE} build --build-arg REQUIREDPKGS=${REQUIREDPKGS_RPM}\n\t--build-arg MSCP_VERSION=${MSCP_VERSION}\n\t-t ${RPMBUILDCONTAINER} -f Dockerfile/build-srpm.Dockerfile .\n\tCOMMAND\n\t${CE} run --rm -v ${CMAKE_BINARY_DIR}:/out ${RPMBUILDCONTAINER}\n\tbash -c \"cp /root/rpmbuild/SRPMS/mscp-*.src.rpm /out/\")\n\n### single-binary-build-related definitions\n\n# Custom target to get single binary mscp\nset(SINGLEBINARYFILE mscp.linux.${CMAKE_SYSTEM_PROCESSOR}.static)\nadd_custom_target(build-single-binary\n\tCOMMENT \"Build mscp as a single binary in alpine conatiner\"\n\tWORKING_DIRECTORY ${mscp_SOURCE_DIR}\n\tBYPRODUCTS ${CMAKE_BINARY_DIR}/${SINGLEBINARYFILE}\n\tDEPENDS docker-build-alpine-3.22\n\tCOMMAND\n\t${CE} run --rm -v ${CMAKE_BINARY_DIR}:/out mscp-alpine:3.22\n\tcp /mscp/build/mscp /out/${SINGLEBINARYFILE})\n\n\nadd_custom_target(build-pkg-all\n\tDEPENDS build-deb build-srpm build-single-binary)\n"
  },
  {
    "path": "Dockerfile/README.md",
    "content": "\nDockerfiles for building and testing mscp.\n\ncmake provides custom targets to build and test mscp in the containers\nSee `make docker-*` targets. `make docker-build-all` builds all\ncontainer images, and `make docker-test-all` runs the test in all\ncontainer images."
  },
  {
    "path": "Dockerfile/almalinux-9.3.Dockerfile",
    "content": "FROM almalinux:9.3\n\nARG REQUIREDPKGS\n\n# install pytest, sshd for test, and rpm-build\nRUN set -ex && yum -y install \\\n\t${REQUIREDPKGS} python3 python3-pip python3-devel \\\n\topenssh openssh-server openssh-clients rpm-build\n\nRUN python3 -m pip install pytest\n\n\n# preparation for sshd\nRUN mkdir /var/run/sshd        \\\n\t&& ssh-keygen -A\t\\\n        && ssh-keygen -f /root/.ssh/id_rsa -N \"\"                \\\n        && cat /root/.ssh/id_rsa.pub > /root/.ssh/authorized_keys\n\n# create test user\nRUN useradd -m -d /home/test test       \\\n        && echo \"test:userpassword\" | chpasswd \\\n        && mkdir -p /home/test/.ssh     \\\n        && ssh-keygen -f /home/test/.ssh/id_rsa_test -N \"keypassphrase\" \\\n        && cat /home/test/.ssh/id_rsa_test.pub >> /home/test/.ssh/authorized_keys \\\n        && chown -R test:test /home/test \\\n        && chown -R test:test /home/test/.ssh\n\nRUN rm -rf /run/nologin\n\nARG mscpdir=\"/mscp\"\n\nCOPY . ${mscpdir}\n\n# build\nRUN cd ${mscpdir}\t\t\t\\\n        && rm -rf build\t\t\t\\\n        && cmake -B build\t\t\\\n        && cd ${mscpdir}/build          \\\n\t&& make\t-j 2\t\t\t\\\n\t&& make install\n"
  },
  {
    "path": "Dockerfile/alpine-3.22.Dockerfile",
    "content": "FROM alpine:3.22\n\n# do not use REQUIREDPKGS build argument because\n# this Dockerfile compiles mscp with conan,so we do not need\n# libssl-dev and zlib-dev\n\n# Build mscp with conan to create single binary mscp\n\nRUN apk add --no-cache \\\n\tgcc make cmake libc-dev \\\n\tlinux-headers openssh bash perl \\\n\tpython3 py3-pip python3-dev py3-pytest g++\n\nRUN pip3 install --break-system-packages conan\n\n# preparation for sshd\nRUN ssh-keygen -A \\\n\t&& mkdir /var/run/sshd        \\\n\t&& ssh-keygen -f /root/.ssh/id_rsa -N \"\"                \\\n\t&& cat /root/.ssh/id_rsa.pub > /root/.ssh/authorized_keys\n\n# disable PerSourcePenaltie, which would distrub test:\n# https://undeadly.org/cgi?action=article;sid=20240607042157\nRUN echo \"PerSourcePenalties=no\" > /etc/ssh/sshd_config.d/90-mscp-test.conf\n\n# create test user\nRUN addgroup -S test \\\n\t&& adduser -S test -G test \\\n        && echo \"test:userpassword\" | chpasswd \\\n        && mkdir -p /home/test/.ssh     \\\n        && ssh-keygen -f /home/test/.ssh/id_rsa_test -N \"keypassphrase\" \\\n        && cat /home/test/.ssh/id_rsa_test.pub >> /home/test/.ssh/authorized_keys \\\n        && chown -R test:test /home/test \\\n        && chown -R test:test /home/test/.ssh\n\n\n# Build mscp as a single binary\nRUN conan profile detect --force\n\nARG mscpdir=\"/mscp\"\n\nCOPY . ${mscpdir}\n\nRUN cd ${mscpdir}\t\t\t\t\t\t\t\\\n\t&& rm -rf build\t\t\t\t\t\t\t\\\n\t&& conan install . --output-folder=build --build=missing\t\\\n\t&& cd ${mscpdir}/build\t\t\t\t\t\t\\\n\t&& cmake ..\t\t\t\t\t\t\t\\\n\t\t-DCMAKE_BUILD_TYPE=Release\t\t\t\t\\\n\t\t-DCMAKE_TOOLCHAIN_FILE=conan_toolchain.cmake\t\t\\\n\t\t-DBUILD_CONAN=ON -DBUILD_STATIC=ON\t\t\t\\\n\t&& make\t-j 2\t\t\t\t\t\t\t\\\n\t&& make install\n\n"
  },
  {
    "path": "Dockerfile/arch-base.Dockerfile",
    "content": "FROM archlinux:base\n\nARG REQUIREDPKGS\n\n# install pyest and openssh for test\nRUN set -ex && pacman -Syy && pacman --noconfirm -S ${REQUIREDPKGS} openssh python-pytest\n\nRUN mkdir /var/run/sshd        \\\n        && ssh-keygen -A        \\\n        && ssh-keygen -f /root/.ssh/id_rsa -N \"\"                \\\n        && cat /root/.ssh/id_rsa.pub > /root/.ssh/authorized_keys\n\n# disable PerSourcePenaltie, which would distrub test:\n# https://undeadly.org/cgi?action=article;sid=20240607042157\nRUN echo \"PerSourcePenalties=no\" > /etc/ssh/sshd_config.d/90-mscp-test.conf\n\n# create test user\nRUN useradd -m -d /home/test test       \\\n        && echo \"test:userpassword\" | chpasswd \\\n        && mkdir -p /home/test/.ssh     \\\n        && ssh-keygen -f /home/test/.ssh/id_rsa_test -N \"keypassphrase\" \\\n        && cat /home/test/.ssh/id_rsa_test.pub >> /home/test/.ssh/authorized_keys \\\n        && chown -R test:test /home/test \\\n        && chown -R test:test /home/test/.ssh\n\nARG mscpdir=\"/mscp\"\n\nCOPY . ${mscpdir}\n\n# build\nRUN cd ${mscpdir}                       \\\n        && rm -rf build                 \\\n        && cmake -B build               \\\n        && cd ${mscpdir}/build          \\\n        && make -j 2                    \\\n        && make install\n"
  },
  {
    "path": "Dockerfile/build-deb.Dockerfile",
    "content": "FROM ubuntu:22.04\n\nARG REQUIREDPKGS\n\nARG DEBIAN_FRONTEND=noninteractive\nRUN set -ex && apt-get update && apt-get install -y --no-install-recommends \\\n\t${REQUIREDPKGS} ca-certificates \\\n\tbuild-essential devscripts debhelper gcc make cmake\n\nARG mscpdir=\"/debbuild/mscp\"\n\nCOPY . ${mscpdir}\n\n# build\nRUN cd ${mscpdir} \t\\\n\t&& debuild -us -uc -S \\\n\t&& mv ${mscpdir} /\n\n# Then all debuild output files exsit at /debbuild\n\n"
  },
  {
    "path": "Dockerfile/build-srpm.Dockerfile",
    "content": "FROM rockylinux:9\n\nARG REQUIREDPKGS\nARG MSCP_VERSION\n\n# install pytest, sshd for test, and rpm-build\nRUN set -ex && yum -y install ${REQUIREDPKGS} rpm-build rpmdevtools\n\nARG mscpdir=\"/mscp-${MSCP_VERSION}\"\nARG mscptgz=\"mscp-${MSCP_VERSION}.tar.gz\"\n\nCOPY . ${mscpdir}\n\n# prepare rpmbuild\nRUN rpmdev-setuptree \\\n\t&& rm -rf ${mscpdir}/build\t\\\n\t&& tar zcvf /${mscptgz} --exclude-vcs ${mscpdir}\t\\\n\t&& cp /${mscptgz} ~/rpmbuild/SOURCES/\t\\\n\t&& cp ${mscpdir}/rpm/mscp.spec ~/rpmbuild/SPECS/\n\n# build rpm and src.rpm\nRUN rpmbuild -ba ~/rpmbuild/SPECS/mscp.spec\n"
  },
  {
    "path": "Dockerfile/rocky-8.9.Dockerfile",
    "content": "FROM rockylinux:8.9\n\nARG REQUIREDPKGS\n\n# install pytest, sshd for test, and rpm-build\nRUN set -ex && yum -y install \\\n\t${REQUIREDPKGS}\t\\\n\tpython3 python3-pip python3-devel \\\n\topenssh openssh-server openssh-clients rpm-build\n\nRUN python3 -m pip install pytest\n\n\n# preparation for sshd\nRUN mkdir /var/run/sshd        \\\n\t&& ssh-keygen -A\t\\\n        && ssh-keygen -f /root/.ssh/id_rsa -N \"\"                \\\n        && cat /root/.ssh/id_rsa.pub > /root/.ssh/authorized_keys\n\n# create test user\nRUN useradd -m -d /home/test test       \\\n        && echo \"test:userpassword\" | chpasswd \\\n        && mkdir -p /home/test/.ssh     \\\n        && ssh-keygen -f /home/test/.ssh/id_rsa_test -N \"keypassphrase\" \\\n        && cat /home/test/.ssh/id_rsa_test.pub >> /home/test/.ssh/authorized_keys \\\n        && chown -R test:test /home/test \\\n        && chown -R test:test /home/test/.ssh\n\nRUN rm -rf /run/nologin\n\n\nARG mscpdir=\"/mscp\"\n\nCOPY . ${mscpdir}\n\n# build\nRUN cd ${mscpdir}\t\t\t\\\n        && rm -rf build\t\t\t\\\n        && cmake -B build\t\t\\\n        && cd ${mscpdir}/build          \\\n\t&& make\t-j 2\t\t\t\\\n\t&& make install\n"
  },
  {
    "path": "Dockerfile/rocky-9.3.Dockerfile",
    "content": "FROM rockylinux:9.3\n\nARG REQUIREDPKGS\n\n# install pytest, sshd for test, and rpm-build\nRUN set -ex && yum -y install \\\n\t${REQUIREDPKGS}\t\\\n\tpython3 python3-pip python3-devel \\\n\topenssh openssh-server openssh-clients rpm-build\n\nRUN python3 -m pip install pytest\n\n\n# preparation for sshd\nRUN mkdir /var/run/sshd        \\\n\t&& ssh-keygen -A\t\\\n        && ssh-keygen -f /root/.ssh/id_rsa -N \"\"                \\\n        && cat /root/.ssh/id_rsa.pub > /root/.ssh/authorized_keys\n\n# create test user\nRUN useradd -m -d /home/test test       \\\n        && echo \"test:userpassword\" | chpasswd \\\n        && mkdir -p /home/test/.ssh     \\\n        && ssh-keygen -f /home/test/.ssh/id_rsa_test -N \"keypassphrase\" \\\n        && cat /home/test/.ssh/id_rsa_test.pub >> /home/test/.ssh/authorized_keys \\\n        && chown -R test:test /home/test \\\n        && chown -R test:test /home/test/.ssh\n\nRUN rm -rf /run/nologin\n\nARG mscpdir=\"/mscp\"\n\nCOPY . ${mscpdir}\n\n# build\nRUN cd ${mscpdir}\t\t\t\\\n        && rm -rf build\t\t\t\\\n        && cmake -B build\t\t\\\n        && cd ${mscpdir}/build          \\\n\t&& make\t-j 2\t\t\t\\\n\t&& make install\n\n"
  },
  {
    "path": "Dockerfile/ubuntu-20.04.Dockerfile",
    "content": "FROM ubuntu:20.04\n\nARG REQUIREDPKGS\n\nARG DEBIAN_FRONTEND=noninteractive\nRUN set -ex && apt-get update && apt-get install -y --no-install-recommends \\\n\t${REQUIREDPKGS} ca-certificates python3 python3-pip python3-dev openssh-server\n\nRUN python3 -m pip install pytest\n\n# preparation for sshd\nRUN mkdir /var/run/sshd        \\\n\t&& ssh-keygen -A\t\\\n        && ssh-keygen -f /root/.ssh/id_rsa -N \"\"                \\\n        && cat /root/.ssh/id_rsa.pub > /root/.ssh/authorized_keys\n\n# create test user\nRUN useradd -m -d /home/test test       \\\n        && echo \"test:userpassword\" | chpasswd \\\n        && mkdir -p /home/test/.ssh     \\\n        && ssh-keygen -f /home/test/.ssh/id_rsa_test -N \"keypassphrase\" \\\n        && cat /home/test/.ssh/id_rsa_test.pub >> /home/test/.ssh/authorized_keys \\\n        && chown -R test:test /home/test \\\n        && chown -R test:test /home/test/.ssh\n\n\nARG mscpdir=\"/mscp\"\n\nCOPY . ${mscpdir}\n\n# build\nRUN cd ${mscpdir}\t\t\t\\\n\t&& rm -rf build\t\t\t\\\n\t&& cmake -B build\t\t\\\n\t&& cd ${mscpdir}/build\t\t\\\n\t&& make\t-j 2\t\t\t\\\n\t&& make install\n\n"
  },
  {
    "path": "Dockerfile/ubuntu-22.04.Dockerfile",
    "content": "FROM ubuntu:22.04\n\nARG REQUIREDPKGS\n\nARG DEBIAN_FRONTEND=noninteractive\nRUN set -ex && apt-get update && apt-get install -y --no-install-recommends \\\n\t${REQUIREDPKGS} ca-certificates python3 python3-pip python3-dev openssh-server\n\nRUN python3 -m pip install pytest\n\n# preparation for sshd\nRUN mkdir /var/run/sshd        \\\n\t&& ssh-keygen -A\t\\\n\t&& ssh-keygen -f /root/.ssh/id_rsa -N \"\"                \\\n\t&& cat /root/.ssh/id_rsa.pub > /root/.ssh/authorized_keys\n\n# create test user\nRUN useradd -m -d /home/test test\t\\\n\t&& echo \"test:userpassword\" | chpasswd \\\n\t&& mkdir -p /home/test/.ssh\t\\\n\t&& ssh-keygen -f /home/test/.ssh/id_rsa_test -N \"keypassphrase\"\t\\\n\t&& cat /home/test/.ssh/id_rsa_test.pub >> /home/test/.ssh/authorized_keys \\\n\t&& chown -R test:test /home/test \\\n\t&& chown -R test:test /home/test/.ssh\n\n\nARG mscpdir=\"/mscp\"\n\nCOPY . ${mscpdir}\n\n# build\nRUN cd ${mscpdir}\t\t\t\\\n\t&& rm -rf build\t\t\t\\\n\t&& cmake -B build\t\t\\\n\t&& cd ${mscpdir}/build\t\t\\\n\t&& make\t-j 2\t\t\t\\\n\t&& make install\n"
  },
  {
    "path": "Dockerfile/ubuntu-24.04.Dockerfile",
    "content": "FROM ubuntu:24.04\n\nARG REQUIREDPKGS\n\nARG DEBIAN_FRONTEND=noninteractive\nRUN set -ex && apt-get update && apt-get install -y --no-install-recommends \\\n\t${REQUIREDPKGS} ca-certificates openssh-server vim-tiny \\\n\tpython3 python3-pip python3-dev python3-pytest\n\n\n# preparation for sshd\nRUN mkdir /var/run/sshd        \\\n\t&& ssh-keygen -A\t\\\n\t&& ssh-keygen -f /root/.ssh/id_rsa -N \"\"                \\\n\t&& cat /root/.ssh/id_rsa.pub > /root/.ssh/authorized_keys\n\n# create test user\nRUN useradd -m -d /home/test test\t\\\n\t&& echo \"test:userpassword\" | chpasswd \\\n\t&& mkdir -p /home/test/.ssh\t\\\n\t&& ssh-keygen -f /home/test/.ssh/id_rsa_test -N \"keypassphrase\"\t\\\n\t&& cat /home/test/.ssh/id_rsa_test.pub >> /home/test/.ssh/authorized_keys \\\n\t&& chown -R test:test /home/test \\\n\t&& chown -R test:test /home/test/.ssh\n\n\nARG mscpdir=\"/mscp\"\n\nCOPY . ${mscpdir}\n\n# build\nRUN cd ${mscpdir}\t\t\t\\\n\t&& rm -rf build\t\t\t\\\n\t&& cmake -B build\t\t\\\n\t&& cd ${mscpdir}/build\t\t\\\n\t&& make\t-j 2\t\t\t\\\n\t&& make install\n"
  },
  {
    "path": "Doxyfile",
    "content": "\nPROJECT_NAME\t= libmscp\n\nGENERATE_HTML\t= YES\nGENERATE_CHI\t= NO\nGENERATE_LATEX\t= NO\nGENERATE_RTF\t= NO\nGENERATE_MAN\t= NO\n\nSOURCE_BROWSER\t= YES\n\nINPUT\t= src include\n\n"
  },
  {
    "path": "LICENSE",
    "content": "                    GNU GENERAL PUBLIC LICENSE\n                       Version 3, 29 June 2007\n\n Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>\n Everyone is permitted to copy and distribute verbatim copies\n of this license document, but changing it is not allowed.\n\n                            Preamble\n\n  The GNU General Public License is a free, copyleft license for\nsoftware and other kinds of works.\n\n  The licenses for most software and other practical works are designed\nto take away your freedom to share and change the works.  By contrast,\nthe GNU General Public License is intended to guarantee your freedom to\nshare and change all versions of a program--to make sure it remains free\nsoftware for all its users.  We, the Free Software Foundation, use the\nGNU General Public License for most of our software; it applies also to\nany other work released this way by its authors.  You can apply it to\nyour programs, too.\n\n  When we speak of free software, we are referring to freedom, not\nprice.  Our General Public Licenses are designed to make sure that you\nhave the freedom to distribute copies of free software (and charge for\nthem if you wish), that you receive source code or can get it if you\nwant it, that you can change the software or use pieces of it in new\nfree programs, and that you know you can do these things.\n\n  To protect your rights, we need to prevent others from denying you\nthese rights or asking you to surrender the rights.  Therefore, you have\ncertain responsibilities if you distribute copies of the software, or if\nyou modify it: responsibilities to respect the freedom of others.\n\n  For example, if you distribute copies of such a program, whether\ngratis or for a fee, you must pass on to the recipients the same\nfreedoms that you received.  You must make sure that they, too, receive\nor can get the source code.  And you must show them these terms so they\nknow their rights.\n\n  Developers that use the GNU GPL protect your rights with two steps:\n(1) assert copyright on the software, and (2) offer you this License\ngiving you legal permission to copy, distribute and/or modify it.\n\n  For the developers' and authors' protection, the GPL clearly explains\nthat there is no warranty for this free software.  For both users' and\nauthors' sake, the GPL requires that modified versions be marked as\nchanged, so that their problems will not be attributed erroneously to\nauthors of previous versions.\n\n  Some devices are designed to deny users access to install or run\nmodified versions of the software inside them, although the manufacturer\ncan do so.  This is fundamentally incompatible with the aim of\nprotecting users' freedom to change the software.  The systematic\npattern of such abuse occurs in the area of products for individuals to\nuse, which is precisely where it is most unacceptable.  Therefore, we\nhave designed this version of the GPL to prohibit the practice for those\nproducts.  If such problems arise substantially in other domains, we\nstand ready to extend this provision to those domains in future versions\nof the GPL, as needed to protect the freedom of users.\n\n  Finally, every program is threatened constantly by software patents.\nStates should not allow patents to restrict development and use of\nsoftware on general-purpose computers, but in those that do, we wish to\navoid the special danger that patents applied to a free program could\nmake it effectively proprietary.  To prevent this, the GPL assures that\npatents cannot be used to render the program non-free.\n\n  The precise terms and conditions for copying, distribution and\nmodification follow.\n\n                       TERMS AND CONDITIONS\n\n  0. Definitions.\n\n  \"This License\" refers to version 3 of the GNU General Public License.\n\n  \"Copyright\" also means copyright-like laws that apply to other kinds of\nworks, such as semiconductor masks.\n\n  \"The Program\" refers to any copyrightable work licensed under this\nLicense.  Each licensee is addressed as \"you\".  \"Licensees\" and\n\"recipients\" may be individuals or organizations.\n\n  To \"modify\" a work means to copy from or adapt all or part of the work\nin a fashion requiring copyright permission, other than the making of an\nexact copy.  The resulting work is called a \"modified version\" of the\nearlier work or a work \"based on\" the earlier work.\n\n  A \"covered work\" means either the unmodified Program or a work based\non the Program.\n\n  To \"propagate\" a work means to do anything with it that, without\npermission, would make you directly or secondarily liable for\ninfringement under applicable copyright law, except executing it on a\ncomputer or modifying a private copy.  Propagation includes copying,\ndistribution (with or without modification), making available to the\npublic, and in some countries other activities as well.\n\n  To \"convey\" a work means any kind of propagation that enables other\nparties to make or receive copies.  Mere interaction with a user through\na computer network, with no transfer of a copy, is not conveying.\n\n  An interactive user interface displays \"Appropriate Legal Notices\"\nto the extent that it includes a convenient and prominently visible\nfeature that (1) displays an appropriate copyright notice, and (2)\ntells the user that there is no warranty for the work (except to the\nextent that warranties are provided), that licensees may convey the\nwork under this License, and how to view a copy of this License.  If\nthe interface presents a list of user commands or options, such as a\nmenu, a prominent item in the list meets this criterion.\n\n  1. Source Code.\n\n  The \"source code\" for a work means the preferred form of the work\nfor making modifications to it.  \"Object code\" means any non-source\nform of a work.\n\n  A \"Standard Interface\" means an interface that either is an official\nstandard defined by a recognized standards body, or, in the case of\ninterfaces specified for a particular programming language, one that\nis widely used among developers working in that language.\n\n  The \"System Libraries\" of an executable work include anything, other\nthan the work as a whole, that (a) is included in the normal form of\npackaging a Major Component, but which is not part of that Major\nComponent, and (b) serves only to enable use of the work with that\nMajor Component, or to implement a Standard Interface for which an\nimplementation is available to the public in source code form.  A\n\"Major Component\", in this context, means a major essential component\n(kernel, window system, and so on) of the specific operating system\n(if any) on which the executable work runs, or a compiler used to\nproduce the work, or an object code interpreter used to run it.\n\n  The \"Corresponding Source\" for a work in object code form means all\nthe source code needed to generate, install, and (for an executable\nwork) run the object code and to modify the work, including scripts to\ncontrol those activities.  However, it does not include the work's\nSystem Libraries, or general-purpose tools or generally available free\nprograms which are used unmodified in performing those activities but\nwhich are not part of the work.  For example, Corresponding Source\nincludes interface definition files associated with source files for\nthe work, and the source code for shared libraries and dynamically\nlinked subprograms that the work is specifically designed to require,\nsuch as by intimate data communication or control flow between those\nsubprograms and other parts of the work.\n\n  The Corresponding Source need not include anything that users\ncan regenerate automatically from other parts of the Corresponding\nSource.\n\n  The Corresponding Source for a work in source code form is that\nsame work.\n\n  2. Basic Permissions.\n\n  All rights granted under this License are granted for the term of\ncopyright on the Program, and are irrevocable provided the stated\nconditions are met.  This License explicitly affirms your unlimited\npermission to run the unmodified Program.  The output from running a\ncovered work is covered by this License only if the output, given its\ncontent, constitutes a covered work.  This License acknowledges your\nrights of fair use or other equivalent, as provided by copyright law.\n\n  You may make, run and propagate covered works that you do not\nconvey, without conditions so long as your license otherwise remains\nin force.  You may convey covered works to others for the sole purpose\nof having them make modifications exclusively for you, or provide you\nwith facilities for running those works, provided that you comply with\nthe terms of this License in conveying all material for which you do\nnot control copyright.  Those thus making or running the covered works\nfor you must do so exclusively on your behalf, under your direction\nand control, on terms that prohibit them from making any copies of\nyour copyrighted material outside their relationship with you.\n\n  Conveying under any other circumstances is permitted solely under\nthe conditions stated below.  Sublicensing is not allowed; section 10\nmakes it unnecessary.\n\n  3. Protecting Users' Legal Rights From Anti-Circumvention Law.\n\n  No covered work shall be deemed part of an effective technological\nmeasure under any applicable law fulfilling obligations under article\n11 of the WIPO copyright treaty adopted on 20 December 1996, or\nsimilar laws prohibiting or restricting circumvention of such\nmeasures.\n\n  When you convey a covered work, you waive any legal power to forbid\ncircumvention of technological measures to the extent such circumvention\nis effected by exercising rights under this License with respect to\nthe covered work, and you disclaim any intention to limit operation or\nmodification of the work as a means of enforcing, against the work's\nusers, your or third parties' legal rights to forbid circumvention of\ntechnological measures.\n\n  4. Conveying Verbatim Copies.\n\n  You may convey verbatim copies of the Program's source code as you\nreceive it, in any medium, provided that you conspicuously and\nappropriately publish on each copy an appropriate copyright notice;\nkeep intact all notices stating that this License and any\nnon-permissive terms added in accord with section 7 apply to the code;\nkeep intact all notices of the absence of any warranty; and give all\nrecipients a copy of this License along with the Program.\n\n  You may charge any price or no price for each copy that you convey,\nand you may offer support or warranty protection for a fee.\n\n  5. Conveying Modified Source Versions.\n\n  You may convey a work based on the Program, or the modifications to\nproduce it from the Program, in the form of source code under the\nterms of section 4, provided that you also meet all of these conditions:\n\n    a) The work must carry prominent notices stating that you modified\n    it, and giving a relevant date.\n\n    b) The work must carry prominent notices stating that it is\n    released under this License and any conditions added under section\n    7.  This requirement modifies the requirement in section 4 to\n    \"keep intact all notices\".\n\n    c) You must license the entire work, as a whole, under this\n    License to anyone who comes into possession of a copy.  This\n    License will therefore apply, along with any applicable section 7\n    additional terms, to the whole of the work, and all its parts,\n    regardless of how they are packaged.  This License gives no\n    permission to license the work in any other way, but it does not\n    invalidate such permission if you have separately received it.\n\n    d) If the work has interactive user interfaces, each must display\n    Appropriate Legal Notices; however, if the Program has interactive\n    interfaces that do not display Appropriate Legal Notices, your\n    work need not make them do so.\n\n  A compilation of a covered work with other separate and independent\nworks, which are not by their nature extensions of the covered work,\nand which are not combined with it such as to form a larger program,\nin or on a volume of a storage or distribution medium, is called an\n\"aggregate\" if the compilation and its resulting copyright are not\nused to limit the access or legal rights of the compilation's users\nbeyond what the individual works permit.  Inclusion of a covered work\nin an aggregate does not cause this License to apply to the other\nparts of the aggregate.\n\n  6. Conveying Non-Source Forms.\n\n  You may convey a covered work in object code form under the terms\nof sections 4 and 5, provided that you also convey the\nmachine-readable Corresponding Source under the terms of this License,\nin one of these ways:\n\n    a) Convey the object code in, or embodied in, a physical product\n    (including a physical distribution medium), accompanied by the\n    Corresponding Source fixed on a durable physical medium\n    customarily used for software interchange.\n\n    b) Convey the object code in, or embodied in, a physical product\n    (including a physical distribution medium), accompanied by a\n    written offer, valid for at least three years and valid for as\n    long as you offer spare parts or customer support for that product\n    model, to give anyone who possesses the object code either (1) a\n    copy of the Corresponding Source for all the software in the\n    product that is covered by this License, on a durable physical\n    medium customarily used for software interchange, for a price no\n    more than your reasonable cost of physically performing this\n    conveying of source, or (2) access to copy the\n    Corresponding Source from a network server at no charge.\n\n    c) Convey individual copies of the object code with a copy of the\n    written offer to provide the Corresponding Source.  This\n    alternative is allowed only occasionally and noncommercially, and\n    only if you received the object code with such an offer, in accord\n    with subsection 6b.\n\n    d) Convey the object code by offering access from a designated\n    place (gratis or for a charge), and offer equivalent access to the\n    Corresponding Source in the same way through the same place at no\n    further charge.  You need not require recipients to copy the\n    Corresponding Source along with the object code.  If the place to\n    copy the object code is a network server, the Corresponding Source\n    may be on a different server (operated by you or a third party)\n    that supports equivalent copying facilities, provided you maintain\n    clear directions next to the object code saying where to find the\n    Corresponding Source.  Regardless of what server hosts the\n    Corresponding Source, you remain obligated to ensure that it is\n    available for as long as needed to satisfy these requirements.\n\n    e) Convey the object code using peer-to-peer transmission, provided\n    you inform other peers where the object code and Corresponding\n    Source of the work are being offered to the general public at no\n    charge under subsection 6d.\n\n  A separable portion of the object code, whose source code is excluded\nfrom the Corresponding Source as a System Library, need not be\nincluded in conveying the object code work.\n\n  A \"User Product\" is either (1) a \"consumer product\", which means any\ntangible personal property which is normally used for personal, family,\nor household purposes, or (2) anything designed or sold for incorporation\ninto a dwelling.  In determining whether a product is a consumer product,\ndoubtful cases shall be resolved in favor of coverage.  For a particular\nproduct received by a particular user, \"normally used\" refers to a\ntypical or common use of that class of product, regardless of the status\nof the particular user or of the way in which the particular user\nactually uses, or expects or is expected to use, the product.  A product\nis a consumer product regardless of whether the product has substantial\ncommercial, industrial or non-consumer uses, unless such uses represent\nthe only significant mode of use of the product.\n\n  \"Installation Information\" for a User Product means any methods,\nprocedures, authorization keys, or other information required to install\nand execute modified versions of a covered work in that User Product from\na modified version of its Corresponding Source.  The information must\nsuffice to ensure that the continued functioning of the modified object\ncode is in no case prevented or interfered with solely because\nmodification has been made.\n\n  If you convey an object code work under this section in, or with, or\nspecifically for use in, a User Product, and the conveying occurs as\npart of a transaction in which the right of possession and use of the\nUser Product is transferred to the recipient in perpetuity or for a\nfixed term (regardless of how the transaction is characterized), the\nCorresponding Source conveyed under this section must be accompanied\nby the Installation Information.  But this requirement does not apply\nif neither you nor any third party retains the ability to install\nmodified object code on the User Product (for example, the work has\nbeen installed in ROM).\n\n  The requirement to provide Installation Information does not include a\nrequirement to continue to provide support service, warranty, or updates\nfor a work that has been modified or installed by the recipient, or for\nthe User Product in which it has been modified or installed.  Access to a\nnetwork may be denied when the modification itself materially and\nadversely affects the operation of the network or violates the rules and\nprotocols for communication across the network.\n\n  Corresponding Source conveyed, and Installation Information provided,\nin accord with this section must be in a format that is publicly\ndocumented (and with an implementation available to the public in\nsource code form), and must require no special password or key for\nunpacking, reading or copying.\n\n  7. Additional Terms.\n\n  \"Additional permissions\" are terms that supplement the terms of this\nLicense by making exceptions from one or more of its conditions.\nAdditional permissions that are applicable to the entire Program shall\nbe treated as though they were included in this License, to the extent\nthat they are valid under applicable law.  If additional permissions\napply only to part of the Program, that part may be used separately\nunder those permissions, but the entire Program remains governed by\nthis License without regard to the additional permissions.\n\n  When you convey a copy of a covered work, you may at your option\nremove any additional permissions from that copy, or from any part of\nit.  (Additional permissions may be written to require their own\nremoval in certain cases when you modify the work.)  You may place\nadditional permissions on material, added by you to a covered work,\nfor which you have or can give appropriate copyright permission.\n\n  Notwithstanding any other provision of this License, for material you\nadd to a covered work, you may (if authorized by the copyright holders of\nthat material) supplement the terms of this License with terms:\n\n    a) Disclaiming warranty or limiting liability differently from the\n    terms of sections 15 and 16 of this License; or\n\n    b) Requiring preservation of specified reasonable legal notices or\n    author attributions in that material or in the Appropriate Legal\n    Notices displayed by works containing it; or\n\n    c) Prohibiting misrepresentation of the origin of that material, or\n    requiring that modified versions of such material be marked in\n    reasonable ways as different from the original version; or\n\n    d) Limiting the use for publicity purposes of names of licensors or\n    authors of the material; or\n\n    e) Declining to grant rights under trademark law for use of some\n    trade names, trademarks, or service marks; or\n\n    f) Requiring indemnification of licensors and authors of that\n    material by anyone who conveys the material (or modified versions of\n    it) with contractual assumptions of liability to the recipient, for\n    any liability that these contractual assumptions directly impose on\n    those licensors and authors.\n\n  All other non-permissive additional terms are considered \"further\nrestrictions\" within the meaning of section 10.  If the Program as you\nreceived it, or any part of it, contains a notice stating that it is\ngoverned by this License along with a term that is a further\nrestriction, you may remove that term.  If a license document contains\na further restriction but permits relicensing or conveying under this\nLicense, you may add to a covered work material governed by the terms\nof that license document, provided that the further restriction does\nnot survive such relicensing or conveying.\n\n  If you add terms to a covered work in accord with this section, you\nmust place, in the relevant source files, a statement of the\nadditional terms that apply to those files, or a notice indicating\nwhere to find the applicable terms.\n\n  Additional terms, permissive or non-permissive, may be stated in the\nform of a separately written license, or stated as exceptions;\nthe above requirements apply either way.\n\n  8. Termination.\n\n  You may not propagate or modify a covered work except as expressly\nprovided under this License.  Any attempt otherwise to propagate or\nmodify it is void, and will automatically terminate your rights under\nthis License (including any patent licenses granted under the third\nparagraph of section 11).\n\n  However, if you cease all violation of this License, then your\nlicense from a particular copyright holder is reinstated (a)\nprovisionally, unless and until the copyright holder explicitly and\nfinally terminates your license, and (b) permanently, if the copyright\nholder fails to notify you of the violation by some reasonable means\nprior to 60 days after the cessation.\n\n  Moreover, your license from a particular copyright holder is\nreinstated permanently if the copyright holder notifies you of the\nviolation by some reasonable means, this is the first time you have\nreceived notice of violation of this License (for any work) from that\ncopyright holder, and you cure the violation prior to 30 days after\nyour receipt of the notice.\n\n  Termination of your rights under this section does not terminate the\nlicenses of parties who have received copies or rights from you under\nthis License.  If your rights have been terminated and not permanently\nreinstated, you do not qualify to receive new licenses for the same\nmaterial under section 10.\n\n  9. Acceptance Not Required for Having Copies.\n\n  You are not required to accept this License in order to receive or\nrun a copy of the Program.  Ancillary propagation of a covered work\noccurring solely as a consequence of using peer-to-peer transmission\nto receive a copy likewise does not require acceptance.  However,\nnothing other than this License grants you permission to propagate or\nmodify any covered work.  These actions infringe copyright if you do\nnot accept this License.  Therefore, by modifying or propagating a\ncovered work, you indicate your acceptance of this License to do so.\n\n  10. Automatic Licensing of Downstream Recipients.\n\n  Each time you convey a covered work, the recipient automatically\nreceives a license from the original licensors, to run, modify and\npropagate that work, subject to this License.  You are not responsible\nfor enforcing compliance by third parties with this License.\n\n  An \"entity transaction\" is a transaction transferring control of an\norganization, or substantially all assets of one, or subdividing an\norganization, or merging organizations.  If propagation of a covered\nwork results from an entity transaction, each party to that\ntransaction who receives a copy of the work also receives whatever\nlicenses to the work the party's predecessor in interest had or could\ngive under the previous paragraph, plus a right to possession of the\nCorresponding Source of the work from the predecessor in interest, if\nthe predecessor has it or can get it with reasonable efforts.\n\n  You may not impose any further restrictions on the exercise of the\nrights granted or affirmed under this License.  For example, you may\nnot impose a license fee, royalty, or other charge for exercise of\nrights granted under this License, and you may not initiate litigation\n(including a cross-claim or counterclaim in a lawsuit) alleging that\nany patent claim is infringed by making, using, selling, offering for\nsale, or importing the Program or any portion of it.\n\n  11. Patents.\n\n  A \"contributor\" is a copyright holder who authorizes use under this\nLicense of the Program or a work on which the Program is based.  The\nwork thus licensed is called the contributor's \"contributor version\".\n\n  A contributor's \"essential patent claims\" are all patent claims\nowned or controlled by the contributor, whether already acquired or\nhereafter acquired, that would be infringed by some manner, permitted\nby this License, of making, using, or selling its contributor version,\nbut do not include claims that would be infringed only as a\nconsequence of further modification of the contributor version.  For\npurposes of this definition, \"control\" includes the right to grant\npatent sublicenses in a manner consistent with the requirements of\nthis License.\n\n  Each contributor grants you a non-exclusive, worldwide, royalty-free\npatent license under the contributor's essential patent claims, to\nmake, use, sell, offer for sale, import and otherwise run, modify and\npropagate the contents of its contributor version.\n\n  In the following three paragraphs, a \"patent license\" is any express\nagreement or commitment, however denominated, not to enforce a patent\n(such as an express permission to practice a patent or covenant not to\nsue for patent infringement).  To \"grant\" such a patent license to a\nparty means to make such an agreement or commitment not to enforce a\npatent against the party.\n\n  If you convey a covered work, knowingly relying on a patent license,\nand the Corresponding Source of the work is not available for anyone\nto copy, free of charge and under the terms of this License, through a\npublicly available network server or other readily accessible means,\nthen you must either (1) cause the Corresponding Source to be so\navailable, or (2) arrange to deprive yourself of the benefit of the\npatent license for this particular work, or (3) arrange, in a manner\nconsistent with the requirements of this License, to extend the patent\nlicense to downstream recipients.  \"Knowingly relying\" means you have\nactual knowledge that, but for the patent license, your conveying the\ncovered work in a country, or your recipient's use of the covered work\nin a country, would infringe one or more identifiable patents in that\ncountry that you have reason to believe are valid.\n\n  If, pursuant to or in connection with a single transaction or\narrangement, you convey, or propagate by procuring conveyance of, a\ncovered work, and grant a patent license to some of the parties\nreceiving the covered work authorizing them to use, propagate, modify\nor convey a specific copy of the covered work, then the patent license\nyou grant is automatically extended to all recipients of the covered\nwork and works based on it.\n\n  A patent license is \"discriminatory\" if it does not include within\nthe scope of its coverage, prohibits the exercise of, or is\nconditioned on the non-exercise of one or more of the rights that are\nspecifically granted under this License.  You may not convey a covered\nwork if you are a party to an arrangement with a third party that is\nin the business of distributing software, under which you make payment\nto the third party based on the extent of your activity of conveying\nthe work, and under which the third party grants, to any of the\nparties who would receive the covered work from you, a discriminatory\npatent license (a) in connection with copies of the covered work\nconveyed by you (or copies made from those copies), or (b) primarily\nfor and in connection with specific products or compilations that\ncontain the covered work, unless you entered into that arrangement,\nor that patent license was granted, prior to 28 March 2007.\n\n  Nothing in this License shall be construed as excluding or limiting\nany implied license or other defenses to infringement that may\notherwise be available to you under applicable patent law.\n\n  12. No Surrender of Others' Freedom.\n\n  If conditions are imposed on you (whether by court order, agreement or\notherwise) that contradict the conditions of this License, they do not\nexcuse you from the conditions of this License.  If you cannot convey a\ncovered work so as to satisfy simultaneously your obligations under this\nLicense and any other pertinent obligations, then as a consequence you may\nnot convey it at all.  For example, if you agree to terms that obligate you\nto collect a royalty for further conveying from those to whom you convey\nthe Program, the only way you could satisfy both those terms and this\nLicense would be to refrain entirely from conveying the Program.\n\n  13. Use with the GNU Affero General Public License.\n\n  Notwithstanding any other provision of this License, you have\npermission to link or combine any covered work with a work licensed\nunder version 3 of the GNU Affero General Public License into a single\ncombined work, and to convey the resulting work.  The terms of this\nLicense will continue to apply to the part which is the covered work,\nbut the special requirements of the GNU Affero General Public License,\nsection 13, concerning interaction through a network will apply to the\ncombination as such.\n\n  14. Revised Versions of this License.\n\n  The Free Software Foundation may publish revised and/or new versions of\nthe GNU General Public License from time to time.  Such new versions will\nbe similar in spirit to the present version, but may differ in detail to\naddress new problems or concerns.\n\n  Each version is given a distinguishing version number.  If the\nProgram specifies that a certain numbered version of the GNU General\nPublic License \"or any later version\" applies to it, you have the\noption of following the terms and conditions either of that numbered\nversion or of any later version published by the Free Software\nFoundation.  If the Program does not specify a version number of the\nGNU General Public License, you may choose any version ever published\nby the Free Software Foundation.\n\n  If the Program specifies that a proxy can decide which future\nversions of the GNU General Public License can be used, that proxy's\npublic statement of acceptance of a version permanently authorizes you\nto choose that version for the Program.\n\n  Later license versions may give you additional or different\npermissions.  However, no additional obligations are imposed on any\nauthor or copyright holder as a result of your choosing to follow a\nlater version.\n\n  15. Disclaimer of Warranty.\n\n  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY\nAPPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT\nHOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM \"AS IS\" WITHOUT WARRANTY\nOF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,\nTHE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\nPURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM\nIS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF\nALL NECESSARY SERVICING, REPAIR OR CORRECTION.\n\n  16. Limitation of Liability.\n\n  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING\nWILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS\nTHE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY\nGENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE\nUSE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF\nDATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD\nPARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),\nEVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF\nSUCH DAMAGES.\n\n  17. Interpretation of Sections 15 and 16.\n\n  If the disclaimer of warranty and limitation of liability provided\nabove cannot be given local legal effect according to their terms,\nreviewing courts shall apply local law that most closely approximates\nan absolute waiver of all civil liability in connection with the\nProgram, unless a warranty or assumption of liability accompanies a\ncopy of the Program in return for a fee.\n\n                     END OF TERMS AND CONDITIONS\n\n            How to Apply These Terms to Your New Programs\n\n  If you develop a new program, and you want it to be of the greatest\npossible use to the public, the best way to achieve this is to make it\nfree software which everyone can redistribute and change under these terms.\n\n  To do so, attach the following notices to the program.  It is safest\nto attach them to the start of each source file to most effectively\nstate the exclusion of warranty; and each file should have at least\nthe \"copyright\" line and a pointer to where the full notice is found.\n\n    <one line to give the program's name and a brief idea of what it does.>\n    Copyright (C) <year>  <name of author>\n\n    This program is free software: you can redistribute it and/or modify\n    it under the terms of the GNU General Public License as published by\n    the Free Software Foundation, either version 3 of the License, or\n    (at your option) any later version.\n\n    This program is distributed in the hope that it will be useful,\n    but WITHOUT ANY WARRANTY; without even the implied warranty of\n    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n    GNU General Public License for more details.\n\n    You should have received a copy of the GNU General Public License\n    along with this program.  If not, see <https://www.gnu.org/licenses/>.\n\nAlso add information on how to contact you by electronic and paper mail.\n\n  If the program does terminal interaction, make it output a short\nnotice like this when it starts in an interactive mode:\n\n    <program>  Copyright (C) <year>  <name of author>\n    This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.\n    This is free software, and you are welcome to redistribute it\n    under certain conditions; type `show c' for details.\n\nThe hypothetical commands `show w' and `show c' should show the appropriate\nparts of the General Public License.  Of course, your program's commands\nmight be different; for a GUI interface, you would use an \"about box\".\n\n  You should also get your employer (if you work as a programmer) or school,\nif any, to sign a \"copyright disclaimer\" for the program, if necessary.\nFor more information on this, and how to apply and follow the GNU GPL, see\n<https://www.gnu.org/licenses/>.\n\n  The GNU General Public License does not permit incorporating your program\ninto proprietary programs.  If your program is a subroutine library, you\nmay consider it more useful to permit linking proprietary applications with\nthe library.  If this is what you want to do, use the GNU Lesser General\nPublic License instead of this License.  But first, please read\n<https://www.gnu.org/licenses/why-not-lgpl.html>.\n"
  },
  {
    "path": "README.md",
    "content": "# mscp: multi-threaded scp\n\n[![build on ubuntu](https://github.com/upa/mscp/actions/workflows/build-ubuntu.yml/badge.svg)](https://github.com/upa/mscp/actions/workflows/build-ubuntu.yml)\n[![build on macOS](https://github.com/upa/mscp/actions/workflows/build-macos.yml/badge.svg)](https://github.com/upa/mscp/actions/workflows/build-macos.yml)\n[![build on FreeBSD](https://github.com/upa/mscp/actions/workflows/build-freebsd.yml/badge.svg)](https://github.com/upa/mscp/actions/workflows/build-freebsd.yml)\n[![test](https://github.com/upa/mscp/actions/workflows/test.yml/badge.svg)](https://github.com/upa/mscp/actions/workflows/test.yml)\n\n\n\n`mscp`, a variant of `scp`, copies files over multiple SSH (SFTP)\nconnections by multiple threads. It enables transferring (1) multiple\nfiles simultaneously and (2) a large file in parallel, reducing the\ntransfer time for a lot of/large files over networks.\n\nYou can use `mscp` like `scp`, for example:\n\n```shell-session\n$ mscp srcfile user@example.com:dstfile\n```\n\nRemote hosts only need to run standard `sshd` supporting the SFTP\nsubsystem (e.g. openssh-server), and you need to be able to ssh to the\nhosts as usual. `mscp` does not require anything else.\n\n\nhttps://github.com/upa/mscp/assets/184632/19230f57-be7f-4ef0-98dd-cb4c460f570d\n\n--------------------------------------------------------------------\n\nMajor differences from `scp` on usage:\n\n- Remote-to-remote copy is not supported.\n- `-r` option is not needed to transfer directories.\n- Checkpointing for resuming failed transfer is supported.\n- and any other differences I have not implemented and noticed.\n\nPaper:\n- Ryo Nakamura and Yohei Kuga. 2023. Multi-threaded scp: Easy and Fast File Transfer over SSH. In Practice and Experience in Advanced Research Computing (PEARC '23). Association for Computing Machinery, New York, NY, USA, 320–323. https://doi.org/10.1145/3569951.3597582\n\n## Install\n\n- macOS\n\n```console\n# Homebrew\nbrew install upa/tap/mscp\n\n# MacPorts\nsudo port install mscp\n```\n\n- Ubuntu\n```console\nsudo add-apt-repository ppa:upaa/mscp\nsudo apt-get install mscp\n```\n\n- RHEL-based distributions\n```console\nsudo dnf copr enable upaaa/mscp\nsudo dnf install mscp\n```\n\n\n## Build\n\nmscp depends on a patched [libssh](https://www.libssh.org/). The\npatch introduces asynchronous SFTP Write, which is derived from\nhttps://github.com/limes-datentechnik-gmbh/libssh (see [Re: SFTP Write\nasync](https://archive.libssh.org/libssh/2020-06/0000004.html)).\n\nWe test building mscp on Linux (Ubuntu, Rocky, Alma, and Alpine),\nmacOS, and FreeBSD.\n\n\n```console\n# clone this repository\ngit clone https://github.com/upa/mscp.git\ncd mscp\n\n# prepare patched libssh\ngit submodule update --init\npatch -d libssh -p1 < patch/$(git -C libssh describe).patch\n\n# install build dependency\nbash ./scripts/install-build-deps.sh\n\n# configure mscp\nmkdir build && cd build\ncmake ..\n\n# in macOS, you may need OPENSSL_ROOT_DIR for cmake:\n# cmake .. -DOPENSSL_ROOT_DIR=$(brew --prefix)/opt/openssl@3\n\n# build\nmake\n\n# install the mscp binary to CMAKE_INSTALL_PREFIX/bin (usually /usr/local/bin)\nmake install\n```\n\nSource tar balls (`mscp-X.X.X.tar.gz`, not `Source code`) in\n[Releases page](https://github.com/upa/mscp/releases) contain the patched version\nof libssh. So you can start from cmake with it.\n\n\n## Documentation\n\n[manpage](/doc/mscp.rst) is available.\n"
  },
  {
    "path": "VERSION",
    "content": "0.2.4\n"
  },
  {
    "path": "conanfile.txt",
    "content": "[requires]\nzlib/1.2.11\nopenssl/1.1.1t\n\n[generators]\nCMakeDeps\nCMakeToolchain\n"
  },
  {
    "path": "doc/RELEASE.md",
    "content": "\n\n## Build mscp as deb package\n\n`make build-deb` produces a mscp deb package and related files. This\ntarget builds mscp with `debuild` inside a docker container\n(Dockerfile is `docker/build-deb.Docerfile`).\n\n\n```console\nmkdir build && cd build && cmake ..\nmake build-deb\n```\n\nAfter that:\n\n```console\n$ ls debbuild\nmscp_0.1.4.dsc\t\t mscp_0.1.4_source.buildinfo  mscp_0.1.4.tar.xz\nmscp_0.1.4_source.build  mscp_0.1.4_source.changes\n```\n\n### To publush mscp in launchpad PPA:\n\n1. write changes in `debian/changelog` at main branch (the date\n   command needed here is `date -R`)\n2. switch to `ppa-focal` or `ppa-jammy` branch\n3. rebase to the `main` branch and modify `debian/changes`:\n   * change `mscp (X.X.X) UNRELEASED;` to `mscp (X.X.X-1~RELEASENAME) RELEASENAME;`\n\t where `RELEASENAME` is `focal` or `jammy`.\n4. run `make build-deb` at the build directory and `cd debbuild`\n5. sign the files with `debsign -k [GPGKEYID] mscp_X.X.X~X_source.changes`\n5. upload the files with `dput ppa:upaa/mscp mscp_X.X.X~X_source.changes`\n\n\n## Build mscp as (source) rpm package\n\n`make build-srpm` produces a mscp src.rpm package. This target builts\nmscp with `rpmbuild` inside a docker container (Dockerfile is\n`docker/build-srpm.Dockerfile`, generated from\n`build-srpm.Dockerfile.in` by cmake).\n\n```console\nmkdir build && cd build && cmake ..\nmake build-srpm\n```\n\nAfter that:\n\n```console\n$ ls *.rpm\nmscp-0.1.3-1.el9.src.rpm\n```\n\n### To publish mscp in COPR:\n\n1. update `rpm/mscp.spec.in`, the `changelog` section (the date\n   command needed here is `date \"+%a %b %d %Y\"`)\n2. run `make build-srpm`\n3. download `mscp-X.X.X-1.yyy.src.rpm`\n4. upload the src.rpm to Build page at COPR.\n\n\n\n## Update Document\n\nThe docuemnt is `doc/mscp.rst` (at present). When `mscp.1.in` is\nmodified, run `make update-rst` to make it up to date.\n\n```console\nmkdir build cd build && cmake ..\nmake update-rst\n```\n\n"
  },
  {
    "path": "doc/mscp.1.in",
    "content": ".TH MSCP 1 \"@MSCP_BUILD_VERSION@\" \"mscp\" \"User Commands\"\n\n.SH NAME\nmscp \\- copy files over multiple SSH connections\n\n.SH SYNOPSIS\n\n.B mscp\n.RB [ \\-46vqDpdNh ]\n[\\c\n.BI \\-n \\ NR_CONNECTIONS\\c\n]\n[\\c\n.BI \\-m \\ COREMASK\\c\n]\n[\\c\n.BI \\-u \\ MAX_STARTUPS\\c\n]\n[\\c\n.BI \\-I \\ INTERVAL\\c\n]\n[\\c\n.BI \\-W \\ CHECKPOINT\\c\n]\n[\\c\n.BI \\-R \\ CHECKPOINT\\c\n]\n[\\c\n.BI \\-s \\ MIN_CHUNK_SIZE\\c\n]\n[\\c\n.BI \\-S \\ MAX_CHUNK_SIZE\\c\n]\n[\\c\n.BI \\-a \\ NR_AHEAD\\c\n]\n[\\c\n.BI \\-b \\ BUF_SIZE\\c\n]\n[\\c\n.BI \\-L \\ LIMIT_BITRATE\\c\n]\n[\\c\n.BI \\-l \\ LOGIN_NAME\\c\n]\n[\\c\n.BI \\-P \\ PORT\\c\n]\n[\\c\n.BI \\-F \\ SSH_CONFIG\\c\n]\n[\\c\n.BI \\-o \\ SSH_OPTION\\c\n]\n[\\c\n.BI \\-i \\ IDENTITY\\c\n]\n[\\c\n.BI \\-J \\ DESTINATION\\c\n]\n[\\c\n.BI \\-c \\ CIPHER\\c\n]\n[\\c\n.BI \\-M \\ HMAC\\c\n]\n[\\c\n.BI \\-C \\ COMPRESS\\c\n]\n[\\c\n.BI \\-g \\ CONGESTION\\c\n]\n.I source ... target\n\n.SH DESCRIPTION\n\n.PP\n.B mscp\ncopies files over multiple SSH (SFTP) connections by multiple\nthreads. It enables transferring (1) multiple files simultaneously and\n(2) a large file in parallel, reducing the transfer time for a lot\nof/large files over networks.\n\n.PP\nThe usage of\n.B mscp\nfollows the\n.B scp\ncommand of\n.I OpenSSH,\nfor example:\n\n.nf\n    $ mscp srcfile user@example.com:dstfile\n.fi\n\nRemote hosts only need to run standard\n.B sshd\nsupporting the SFTP subsystem, and users need to be able to\n.B ssh\nto the hosts as usual.\n.B mscp\ndoes not require anything else.\n\n.PP\n.B mscp\nuses\n.UR https://\\:www\\:.libssh\\:.org\nlibssh\n.UE\nas its SSH implementation. Thus, supported SSH features, for example,\nauthentication, encryption, and various options in ssh_config, follow\nwhat\n.I libssh\nsupports.\n\n.SH OPTIONS\n.TP\n.B \\-n \\fINR_CONNECTIONS\\fR\nSpecifies the number of SSH connections. The default value is\ncalculated from the number of CPU cores on the host with the following\nformula: floor(log(nr_cores)*2)+1.\n\n.TP\n.B \\-m \\fICOREMASK\\fR\nConfigures CPU cores to be used by the hexadecimal bitmask. For\nexample, -m 0x25 pins threads onto CPU cores 0, 2, and 5. The default\nvalue is not specified: all CPU cores are used and no threads are\npinned to any cores.\n\n.TP\n.B \\-u \\fIMAX_STARTUPS\\fR\nSpecifies the number of concurrent unauthenticated SSH connection\nattempts.\n.B sshd\nlimits the number of simultaneous SSH connection attempts by\n.I MaxStartups\nin\n.I sshd_config.\nThe default\n.I MaxStartups\nis 10; thus, we set the default MAX_STARTUPS 8.\n\n.TP\n.B \\-I \\fIINTERVAL\\fR\nSpecifies the interval (in seconds) between SSH connection\nattempts. Some firewall products treat SSH connection attempts from a\nsingle source IP address for a short period as a brute force attack.\nThis option inserts intervals between the attempts to avoid being\ndetermined as an attack. The default value is 0.\n\n.TP\n.B \\-W \\fICHECKPOINT\\fR\nSpecifies a checkpoint file to save the state of a failed\ntransfer. When transferring fails due to, for example, connection\ndisruption or user interrupt,\n.B mscp\nwrites the information about the remaining files and chunks to the\nspecified checkpoint file.\n.B \\-W\noption with\n.B \\-D\n(dry-run mode) only writes a checkpoint file and exits.\n\n\n.TP\n.B \\-R \\fICHECKPOINT\\fR\nSpecifies a checkpoint file to resume a transfer. When a checkpoint\nfile is passed,\n.B mscp\nreads the checkpoint to load a remote host, copy direction, and files\nand their chunks to be transferred. Namely,\n.B mscp\ncan resume a past failed transfer from the checkpoint. Resuming with a\ncheckpoint does not require\n.I source ... target\narguments. Other SSH connection options, such as port number and\nconfig file, should be specified as with the failed run. In addition,\ncheckpoint files have file paths as relative paths. Thus, you must run\n.B mscp\nin the same working directory as the failed run. You can see the\ncontents of a checkpoint file with the\n.B mscp \\-vv \\-D \\-R CHECKPOINT\ncommand (Dry-run mode).  Note that the checkpoint file is not\nautomatically removed after the resumed transfer ends\nsuccessfully. Users should check the return value of\n.B mscp\nand remove the checkpoint if it returns 0.\n\n\n.TP\n.B \\-s \\fIMIN_CHUNK_SIZE\\fR\nSpecifies the minimum chunk size.\n.B mscp\ndivides a single file into chunks and copies the chunks in\nparallel. The default value is 16M bytes.\n\n.TP\n.B \\-S \\fIMAX_CHUNK_SIZE\\fR\nSpecifies the maximum chunk size. The default is file size divided by\nthe number of connections and devided by 4. If the calculated value\nis smarller than the\n.B MIN_CHUNK_SIZE\nvalue,\nMIN_CHUNK_SIZE is used.\n\n.TP\n.B \\-a \\fINR_AHEAD\\fR\nSpecifies the number of inflight SFTP commands. The default value is\n32.\n\n.TP\n.B \\-b \\fIBUF_SIZE\\fR\nSpecifies the buffer size for I/O and transfer over SFTP. The default\nvalue is 16384. Note that the SSH specification restricts buffer size\ndelivered over SSH. Changing this value is not recommended at present.\n\n.TP\n.B \\-L \\fILIMIT_BITRATE\\fR\nLimits the bitrate, specified with k (K), m (M), and g (G), e.g., 100m\nindicates 100 Mbps.\n\n.TP\n.B \\-4\nUses IPv4 addresses only.\n\n.TP\n.B \\-6\nUses IPv6 addresses only.\n\n.TP\n.B \\-v\nIncrements the verbose output level.\n\n.TP\n.B \\-q\nQuiet mode: turns off all outputs.\n\n.TP\n.B \\-D\nDry-run mode: it scans source files to be copied, calculates chunks,\nresolves destination file paths, and exits. Dry-run mode with\n.B -vv\noption can confirm files to be copied and their destination paths.\n\n.TP\n.B \\-r\nNo effect.\n.B mscp\ncopies recursively if a source path is a directory. This option exists\nfor just compatibility.\n\n.TP\n.B \\-l \\fILOGIN_NAME\\fR\nSpecifies the username to log in on the remote machine as with\n.I ssh(1).\n\n.TP\n.B \\-P \\fIPORT\\fR\nSpecifies the port number to connect to on the remote machine as with\n.I scp(1).\n\n.TP\n.B \\-F \\fISSH_CONFIG\\fR\nSpecifies an alternative per-user ssh configuration file. Note that\nacceptable options in the configuration file are what\n.I libssh\nsupports.\n\n.TP\n.B \\-o \\fISSH_OPTION\\fR\nSpecifies ssh options in the format used in ssh_config. Note that\nacceptable options are what\n.I libssh\nsupports.\n\n.TP\n.B \\-i \\fIIDENTITY\\fR\nSpecifies the identity file for public key authentication.\n\n.TP\n.B \\-J \\fIDESTINATION\\fR\nA shortcut to define a\n.B ProxyJump\nconfiguration directive. Each SFTP session of\n.B mscp\nconnects to the target host by first making an\n.B ssh\nconnection to the jump host described by\n.I destination.\n\n\n\n.TP\n.B \\-c \\fICIPHER\\fR\nSelects the cipher to use for encrypting the data transfer. See\n.B mscp -h\nor\n.B Ciphers\nin\n.UR https://\\:www\\:.libssh\\:.org/\\:features/\nlibssh features\n.UE .\n\n.TP\n.B \\-M \\fIHMAC\\fR\nSpecifies MAC hash algorithms. See\n.B mscp -h\nor\n.B MAC hashes\nin\n.UR https://\\:www\\:.libssh\\:.org/\\:features/\nlibssh features\n.UE .\n\n.TP\n.B \\-C \\fICOMPRESS\\fR\nEnables compression: yes, no, zlib, zlib@openssh.com. The default is\nnone. See\n.UR https://\\:www\\:.libssh\\:.org/\\:features/\nlibssh features\n.UE .\n\n.TP\n.B \\-g \\fICONGESTION\\fR\nSpecifies the TCP congestion control algorithm to use (Linux only).\nSee\n.B sysctl net.ipv4.tcp_allowed_congestion_control\nfor available values.\n\n.TP\n.B \\-p\nPreserves modification times and access times (file mode bits are\npreserved by default).\n\n.TP\n.B \\-d\nIncrements the ssh debug output level.\n\n.TP\n.B \\-N\nEnables Nagle's algorithm. It is disabled by default.\n\n.TP\n.B \\-h\nPrints help.\n\n.SH EXIT STATUS\nExit status is 0 on success,  and >0 if an error occurs.\n\n.SH ENVIRONMENT\n\n.PP\n.B mscp\nrecognizes the following environment variables.\n\n.TP\n.B MSCP_SSH_AUTH_PASSWORD\nThis environment variable passes a password for password\nauthentication to establish SSH connections.\n\n.TP\n.B MSCP_SSH_AUTH_PASSPHRASE\nThis environment variable passes a passphrase for public-key\nauthentication for establishing SSH connections.\n\n\n\n.SH NOTES\n\n.PP\n.B mscp\nuses glob(3) for globbing pathnames, including matching patterns for\nlocal and remote paths. However, globbing on the\n.I remote\nside does not work with musl libc (used in Alpine Linux and the\nsingle-binary version of mscp) because musl libc does not support\nGLOB_ALTDIRFUNC.\n\n.PP\n.B mscp\ndoes not support remote-to-remote copy, which\n.B scp\nsupports.\n\n.SH EXAMPLES\n\n.PP\nCopy a local file to a remote host with different name:\n\n.nf\n    $ mscp ~/src-file 10.0.0.1:copied-file\n.fi\n\n.PP\nCopy a local file and a directory to /tmp at a remote host:\n\n.nf\n    $ mscp ~/src-file dir1 10.0.0.1:/tmp\n.fi\n\n.PP\nSave a checkpoint if transfer fails:\n\n.nf\n    $ mscp -W mscp.checkpoint many-large-files 10.0.0.1:dst/\n.fi\n\n.PP\nCheck the remaining files and chunks, and resume the failed transfer:\n\n.nf\n    # Dump the content of a checkpoint and exit (dry-run mode)\n    $ mscp -vv -D -R mscp.checkpoint\n\n    # resume transferring from the checkpoint\n    $ mscp -R mscp.checkpoint\n.fi\n\n.PP\nIn a long fat network, following options might improve performance:\n\n.nf\n    $ mscp -n 64 -m 0xffff -a 64 -c aes128-gcm@openssh.com src 10.0.0.1:\n.fi\n\n.B -n\nincreases the number of SSH connections than default,\n.B -m\npins threads to specific CPU cores,\n.B -a\nincreases asynchronous inflight SFTP WRITE/READ commands, and\n.B -c aes128-gcm@openssh.com\nwill be faster than the default chacha20-poly1305 cipher, particularly\non hosts that support AES-NI.\n\n\n.SH \"SEE ALSO\"\n.BR scp (1),\n.BR ssh (1),\n.BR sshd (8).\n\n.SH \"PAPER REFERENCE\"\n\n\nRyo Nakamura and Yohei Kuga. 2023. Multi-threaded scp: Easy and Fast\nFile Transfer over SSH. In Practice and Experience in Advanced\nResearch Computing (PEARC '23). Association for Computing Machinery,\nNew York, NY, USA, 320–323.\n.UR https://\\:doi\\:.org/\\:10.1145/\\:3569951.3597582\nDOI\n.UE .\n\n\n.SH CONTACT INFORMATION\n.PP\nFor patches, bug reports, or feature requests, please open an issue on\n.UR https://\\:github\\:.com/\\:upa/\\:mscp\nGitHub\n.UE .\n\n.SH AUTHORS\nRyo Nakamura <upa@haeena.net>\n"
  },
  {
    "path": "doc/mscp.rst",
    "content": "====\nMSCP\n====\n\n:Date: v0.2.4\n\nNAME\n====\n\nmscp - copy files over multiple SSH connections\n\nSYNOPSIS\n========\n\n**mscp** [**-46vqDpdNh**] [ **-n** *NR_CONNECTIONS* ] [ **-m**\n*COREMASK* ] [ **-u** *MAX_STARTUPS* ] [ **-I** *INTERVAL* ] [ **-W**\n*CHECKPOINT* ] [ **-R** *CHECKPOINT* ] [ **-s** *MIN_CHUNK_SIZE* ] [\n**-S** *MAX_CHUNK_SIZE* ] [ **-a** *NR_AHEAD* ] [ **-b** *BUF_SIZE* ] [\n**-L** *LIMIT_BITRATE* ] [ **-l** *LOGIN_NAME* ] [ **-P** *PORT* ] [\n**-F** *SSH_CONFIG* ] [ **-o** *SSH_OPTION* ] [ **-i** *IDENTITY* ] [\n**-J** *DESTINATION* ] [ **-c** *CIPHER* ] [ **-M** *HMAC* ] [ **-C**\n*COMPRESS* ] [ **-g** *CONGESTION* ] *source ... target*\n\nDESCRIPTION\n===========\n\n**mscp** copies files over multiple SSH (SFTP) connections by multiple\nthreads. It enables transferring (1) multiple files simultaneously and\n(2) a large file in parallel, reducing the transfer time for a lot\nof/large files over networks.\n\nThe usage of **mscp** follows the **scp** command of *OpenSSH,* for\nexample:\n\n::\n\n       $ mscp srcfile user@example.com:dstfile\n\nRemote hosts only need to run standard **sshd** supporting the SFTP\nsubsystem, and users need to be able to **ssh** to the hosts as usual.\n**mscp** does not require anything else.\n\n**mscp** uses `libssh <https://www.libssh.org>`__ as its SSH\nimplementation. Thus, supported SSH features, for example,\nauthentication, encryption, and various options in ssh_config, follow\nwhat *libssh* supports.\n\nOPTIONS\n=======\n\n**-n NR_CONNECTIONS**\n   Specifies the number of SSH connections. The default value is\n   calculated from the number of CPU cores on the host with the\n   following formula: floor(log(nr_cores)*2)+1.\n\n**-m COREMASK**\n   Configures CPU cores to be used by the hexadecimal bitmask. For\n   example, -m 0x25 pins threads onto CPU cores 0, 2, and 5. The default\n   value is not specified: all CPU cores are used and no threads are\n   pinned to any cores.\n\n**-u MAX_STARTUPS**\n   Specifies the number of concurrent unauthenticated SSH connection\n   attempts. **sshd** limits the number of simultaneous SSH connection\n   attempts by *MaxStartups* in *sshd_config.* The default *MaxStartups*\n   is 10; thus, we set the default MAX_STARTUPS 8.\n\n**-I INTERVAL**\n   Specifies the interval (in seconds) between SSH connection attempts.\n   Some firewall products treat SSH connection attempts from a single\n   source IP address for a short period as a brute force attack. This\n   option inserts intervals between the attempts to avoid being\n   determined as an attack. The default value is 0.\n\n**-W CHECKPOINT**\n   Specifies a checkpoint file to save the state of a failed transfer.\n   When transferring fails due to, for example, connection disruption or\n   user interrupt, **mscp** writes the information about the remaining\n   files and chunks to the specified checkpoint file. **-W** option with\n   **-D** (dry-run mode) only writes a checkpoint file and exits.\n\n**-R CHECKPOINT**\n   Specifies a checkpoint file to resume a transfer. When a checkpoint\n   file is passed, **mscp** reads the checkpoint to load a remote host,\n   copy direction, and files and their chunks to be transferred. Namely,\n   **mscp** can resume a past failed transfer from the checkpoint.\n   Resuming with a checkpoint does not require *source ... target*\n   arguments. Other SSH connection options, such as port number and\n   config file, should be specified as with the failed run. In addition,\n   checkpoint files have file paths as relative paths. Thus, you must\n   run **mscp** in the same working directory as the failed run. You can\n   see the contents of a checkpoint file with the **mscp -vv -D -R\n   CHECKPOINT** command (Dry-run mode). Note that the checkpoint file is\n   not automatically removed after the resumed transfer ends\n   successfully. Users should check the return value of **mscp** and\n   remove the checkpoint if it returns 0.\n\n**-s MIN_CHUNK_SIZE**\n   Specifies the minimum chunk size. **mscp** divides a single file into\n   chunks and copies the chunks in parallel. The default value is 16M\n   bytes.\n\n**-S MAX_CHUNK_SIZE**\n   Specifies the maximum chunk size. The default is file size divided by\n   the number of connections and devided by 4. If the calculated value\n   is smarller than the **MIN_CHUNK_SIZE** value, MIN_CHUNK_SIZE is\n   used.\n\n**-a NR_AHEAD**\n   Specifies the number of inflight SFTP commands. The default value is\n   32.\n\n**-b BUF_SIZE**\n   Specifies the buffer size for I/O and transfer over SFTP. The default\n   value is 16384. Note that the SSH specification restricts buffer size\n   delivered over SSH. Changing this value is not recommended at\n   present.\n\n**-L LIMIT_BITRATE**\n   Limits the bitrate, specified with k (K), m (M), and g (G), e.g.,\n   100m indicates 100 Mbps.\n\n**-4**\n   Uses IPv4 addresses only.\n\n**-6**\n   Uses IPv6 addresses only.\n\n**-v**\n   Increments the verbose output level.\n\n**-q**\n   Quiet mode: turns off all outputs.\n\n**-D**\n   Dry-run mode: it scans source files to be copied, calculates chunks,\n   resolves destination file paths, and exits. Dry-run mode with **-vv**\n   option can confirm files to be copied and their destination paths.\n\n**-r**\n   No effect. **mscp** copies recursively if a source path is a\n   directory. This option exists for just compatibility.\n\n**-l LOGIN_NAME**\n   Specifies the username to log in on the remote machine as with\n   *ssh(1).*\n\n**-P PORT**\n   Specifies the port number to connect to on the remote machine as with\n   *scp(1).*\n\n**-F SSH_CONFIG**\n   Specifies an alternative per-user ssh configuration file. Note that\n   acceptable options in the configuration file are what *libssh*\n   supports.\n\n**-o SSH_OPTION**\n   Specifies ssh options in the format used in ssh_config. Note that\n   acceptable options are what *libssh* supports.\n\n**-i IDENTITY**\n   Specifies the identity file for public key authentication.\n\n**-J DESTINATION**\n   A shortcut to define a **ProxyJump** configuration directive. Each\n   SFTP session of **mscp** connects to the target host by first making\n   an **ssh** connection to the jump host described by *destination.*\n\n**-c CIPHER**\n   Selects the cipher to use for encrypting the data transfer. See\n   **mscp -h** or **Ciphers** in `libssh\n   features <https://www.libssh.org/features/>`__.\n\n**-M HMAC**\n   Specifies MAC hash algorithms. See **mscp -h** or **MAC hashes** in\n   `libssh features <https://www.libssh.org/features/>`__.\n\n**-C COMPRESS**\n   Enables compression: yes, no, zlib, zlib@openssh.com. The default is\n   none. See `libssh features <https://www.libssh.org/features/>`__.\n\n**-g CONGESTION**\n   Specifies the TCP congestion control algorithm to use (Linux only).\n   See **sysctl net.ipv4.tcp_allowed_congestion_control** for available\n   values.\n\n**-p**\n   Preserves modification times and access times (file mode bits are\n   preserved by default).\n\n**-d**\n   Increments the ssh debug output level.\n\n**-N**\n   Enables Nagle's algorithm. It is disabled by default.\n\n**-h**\n   Prints help.\n\nEXIT STATUS\n===========\n\nExit status is 0 on success, and >0 if an error occurs.\n\nENVIRONMENT\n===========\n\n**mscp** recognizes the following environment variables.\n\n**MSCP_SSH_AUTH_PASSWORD**\n   This environment variable passes a password for password\n   authentication to establish SSH connections.\n\n**MSCP_SSH_AUTH_PASSPHRASE**\n   This environment variable passes a passphrase for public-key\n   authentication for establishing SSH connections.\n\nNOTES\n=====\n\n**mscp** uses glob(3) for globbing pathnames, including matching\npatterns for local and remote paths. However, globbing on the *remote*\nside does not work with musl libc (used in Alpine Linux and the\nsingle-binary version of mscp) because musl libc does not support\nGLOB_ALTDIRFUNC.\n\n**mscp** does not support remote-to-remote copy, which **scp** supports.\n\nEXAMPLES\n========\n\nCopy a local file to a remote host with different name:\n\n::\n\n       $ mscp ~/src-file 10.0.0.1:copied-file\n\nCopy a local file and a directory to /tmp at a remote host:\n\n::\n\n       $ mscp ~/src-file dir1 10.0.0.1:/tmp\n\nSave a checkpoint if transfer fails:\n\n::\n\n       $ mscp -W mscp.checkpoint many-large-files 10.0.0.1:dst/\n\nCheck the remaining files and chunks, and resume the failed transfer:\n\n::\n\n       # Dump the content of a checkpoint and exit (dry-run mode)\n       $ mscp -vv -D -R mscp.checkpoint\n\n       # resume transferring from the checkpoint\n       $ mscp -R mscp.checkpoint\n\nIn a long fat network, following options might improve performance:\n\n::\n\n       $ mscp -n 64 -m 0xffff -a 64 -c aes128-gcm@openssh.com src 10.0.0.1:\n\n**-n** increases the number of SSH connections than default, **-m** pins\nthreads to specific CPU cores, **-a** increases asynchronous inflight\nSFTP WRITE/READ commands, and **-c aes128-gcm@openssh.com** will be\nfaster than the default chacha20-poly1305 cipher, particularly on hosts\nthat support AES-NI.\n\nSEE ALSO\n========\n\n**scp**\\ (1), **ssh**\\ (1), **sshd**\\ (8).\n\nPAPER REFERENCE\n===============\n\nRyo Nakamura and Yohei Kuga. 2023. Multi-threaded scp: Easy and Fast\nFile Transfer over SSH. In Practice and Experience in Advanced Research\nComputing (PEARC '23). Association for Computing Machinery, New York,\nNY, USA, 320–323. `DOI <https://doi.org/10.1145/3569951.3597582>`__.\n\nCONTACT INFORMATION\n===================\n\nFor patches, bug reports, or feature requests, please open an issue on\n`GitHub <https://github.com/upa/mscp>`__.\n\nAUTHORS\n=======\n\nRyo Nakamura <upa@haeena.net>\n"
  },
  {
    "path": "include/config.h.in",
    "content": "/* SPDX-License-Identifier: GPL-3.0-only */\n#ifndef _CONFIG_H_\n#define _CONFIG_H_\n\n#define MSCP_VERSION\t\t\"@MSCP_VERSION@\"\n#define MSCP_BUILD_VERSION\t\"@MSCP_BUILD_VERSION@\"\n\n\n/* Define to 1 if you have the strlcat function. */\n#cmakedefine HAVE_STRLCAT 1\n\n/* Define to 1 if you have the htonll function. */\n#cmakedefine HAVE_HTONLL 1\n\n/* Define to 1 if you have the ntohll function. */\n#cmakedefine HAVE_NTOHLL 1\n\n#endif /* _CONFIG_H_ */\n"
  },
  {
    "path": "include/mscp.h",
    "content": "/* SPDX-License-Identifier: GPL-3.0-only */\n#ifndef _MSCP_H_\n#define _MSCP_H_\n\n/**\n * @file mscp.h\n *\n * @brief mscp library header file.\n *\n * @mainpage\n *\n * libmscp is a library for multi-threaded scp. Project page is\n * https://github.com/upa/mscp.\n *\n * All public APIs of libmscp are defined in mscp.h. Basic usage of\n * libmscp is follows:\n *\n * 1. create mscp instance with mscp_init()\n * 2. set remote host and copy direction with mscp_set_remote()\n * 3. connect to remote host with mscp_connect()\n * 4. add path to source files with mscp_add_src_path()\n * 5. set path to destination with mscp_set_dst_path()\n * 6. start to scan source files with mscp_scan()\n * 7. start copy with mscp_start()\n * 8. wait for copy finished with mscp_join()\n * 9. cleanup mscp instance with mscp_cleanup() and mscp_free()\n */\n\n#include <stdbool.h>\n#include <limits.h>\n#include <stdlib.h>\n\n#define MSCP_DIRECTION_L2R\t1\t/** Indicates local to remote copy */\n#define MSCP_DIRECTION_R2L\t2\t/** Indicates remote to local copy */\n\n/**\n * @struct\tmscp_opts\n * @brief\tStructure configuring mscp.\n */\nstruct mscp_opts {\n\tint\tnr_threads;\t/** number of copy threads */\n\tint\tnr_ahead;\t/** number of SFTP commands on-the-fly */\n\tsize_t\tmin_chunk_sz;\t/** minimum chunk size (default 64MB) */\n\tsize_t\tmax_chunk_sz;\t/** maximum chunk size (default file size/nr_threads) */\n\tsize_t\tbuf_sz;\t\t/** buffer size, default 16k. */\n\tsize_t\tbitrate;\t/** bits-per-seconds to limit bandwidth */\n\tchar\t*coremask;\t/** hex to specifiy usable cpu cores */\n\tint\tmax_startups;\t/** sshd MaxStartups concurrent connections */\n\tint     interval;\t/** interval between SSH connection attempts */\n\tbool\tpreserve_ts;\t/** preserve file timestamps */\n\tint\tseverity; \t/** messaging severity. set MSCP_SERVERITY_* */\n};\n\n\n/**\n * @struct\tmscp_ssh_opts\n * @brief\tStructure configuring SSH connections\n */\nstruct mscp_ssh_opts {\n\t/* ssh options */\n\tchar\t*login_name;\t/** ssh username */\n\tchar\t*port;\t\t/** ssh port */\n\tint\tai_family;\t/** address family */\n\tchar\t*config;\t/** path to ssh_config, default ~/.ssh/config*/\n\tchar\t**options;\t/** array of ssh_config options, terminated by NULL */\n\tchar\t*identity;\t/** path to private key */\n\tchar\t*proxyjump;\t/** ProxyJump configuration directive (shortcut) */\n\tchar\t*cipher;\t/** cipher spec */\n\tchar\t*hmac;\t\t/** hmacp spec */\n\tchar\t*compress;\t/** yes, no, zlib@openssh.com */\n\tchar\t*ccalgo;\t/** TCP cc algorithm */\n\n\tchar\t*password;\t/** password auth passowrd */\n\tchar\t*passphrase;\t/** passphrase for private key */\n\n\tint\tdebug_level;\t\t/** inclirement libssh debug output level */\n\tbool\tenable_nagle;\t\t/** enable Nagle's algorithm if true */\n};\n\n/** @def\n * Environment variable that passes password for ssh password auth\n */\n#define ENV_SSH_AUTH_PASSWORD\t\"MSCP_SSH_AUTH_PASSWORD\"\n\n/** @def\n * Environment vraible that passes passphrase for private key\n */\n#define ENV_SSH_AUTH_PASSPHRASE\t\"MSCP_SSH_AUTH_PASSPHRASE\"\n\n\n/**\n * @struct\tmscp_stats\n * @brief\tStructure to get mscp statistics\n */\nstruct mscp_stats {\n\tsize_t total;\t/** total bytes to be transferred */\n\tsize_t done;\t/** total bytes transferred */\n};\n\n\n/** Structure representing mscp instance */\nstruct mscp;\n\n/**\n * @brief Creates a new mscp instance.\n *\n * @param o\t\toptions for configuring mscp.\n * @param s\t\toptions for configuring ssh connections.\n *\n * @retrun \t\tA new mscp instance or NULL on error.\n */\nstruct mscp *mscp_init(struct mscp_opts *o, struct mscp_ssh_opts *s);\n\n/**\n * @brief Set remote host and copy direction.\n *\n * @param remote_host\tremote host for file transer.\n * @param direction\tcopy direction, `MSCP_DIRECTION_L2R` or `MSCP_DIRECTION_R2L`\n *\n * @return              0 on success, < 0 if an error occured.\n */\nint mscp_set_remote(struct mscp *m, const char *remote_host, int direction);\n\n/**\n * @brief Connect the first SSH connection. mscp_connect connects to\n * remote host and initialize a SFTP session over the\n * connection. mscp_scan() and mscp_start() require mscp_connect()\n * beforehand.\n *\n * @param m\tmscp instance.\n *\n * @return \t0 on success, < 0 if an error occured.\n */\nint mscp_connect(struct mscp *m);\n\n/* add a source file path to be copied */\n\n/**\n * @brief Add a source file path to be copied. The path indicates\n * either a file or directory.\n *\n * @param m\t\tmscp instance.\n * @param src_path\tsource file path to be copied.\n *\n * @return \t\t0 on success, < 0 if an error occured.\n */\nint mscp_add_src_path(struct mscp *m, const char *src_path);\n\n/**\n * @brief Set the destination file path. The path indicates either a\n * file, directory, or nonexistent path.\n *\n * @param m\t\tmscp instance.\n * @param dst_path\tdestination path to which source files copied.\n *\n * @return \t\t0 on success, < 0 if an error occured.\n */\nint mscp_set_dst_path(struct mscp *m, const char *dst_path);\n\n/* scan source files, resolve destination file paths for all source\n * files, and calculate chunks for all files. */\n\n/**\n * @brief Scan source paths and prepare. This function checks all\n * source files (recursively), resolve paths on the destination side,\n * and calculate file chunks. This function is non-blocking.\n *\n * @param m\tmscp instance.\n *\n * @return \t0 on success, < 0 if an error occured.\n */\nint mscp_scan(struct mscp *m);\n\n/**\n * @brief Join scan thread invoked by mscp_scan() if it\n * runs. mscp_join() involves mscp_can_join(). Thus, there is no need\n * to call this function alone.\n *\n * @param m\tmscp instance.\n * @return\t0 on success, < 0 if an error occured.\n */\nint mscp_scan_join(struct mscp *m);\n\n/**\n * @brief get information about remote host and copy direction from a\n * checkpoint file specified by *pathname. This functions returns\n * remote host name to *renote, and the copy direction into *dir.\n * Thus, you can call mscp_init with those values.\n *\n * @param pathname\tpath to a checkpoint file.\n * @param remote\tchar buffer to which remote hostname is stored.\n * @param len\t\tlength of *remote.\n * @param dir\t\tint to which the copy direction is stored.\n */\nint mscp_checkpoint_get_remote(const char *pathname, char *remote, size_t len, int *dir);\n\n/**\n * @brief load information about untransferred files and chunks at the\n * last transfer . mscp_checkpoint_load() loads files and associated\n * chunks from the checkpoint file pointed by pathname. If you call\n * mscp_checkpoint_load(), do not call mscp_scan().\n *\n * @param m\t\tmscp instance.\n * @param pathname\tpath to a checkpoint file.\n * @return\t\t0 on success, < 0 if an error occured.\n */\nint mscp_checkpoint_load(struct mscp *m, const char *pathname);\n\n/**\n * @brief save information about untransferred files and chunks to a\n * checkpoint file.\n *\n * @param m\t\tmscp instance.\n * @param pathname\tpath to a checkpoint file.\n * @return\t\t0 on success, < 0 if an error occured.\n */\nint mscp_checkpoint_save(struct mscp *m, const char *pathname);\n\n/**\n * @brief Start to copy files. mscp_start() returns immediately. You\n * can get statistics via mscp_get_stats() or messages via pipe set by\n * mscp_opts.msg_fd or mscp_set_msg_fd(). mscp_stop() cancels mscp\n * copy threads, and mscp_join() joins the threads.\n *\n * @param m\tmscp instance.\n *\n * @return \tnumber of threads on success, < 0 if an error occured.\n *\n * @see\t\tmscp_join()\n */\nint mscp_start(struct mscp *m);\n\n\n/**\n * @brief Stop coping files.\n *\n * @param m\tmscp instance.\n */\nvoid mscp_stop(struct mscp *m);\n\n\n/**\n * @brief Join copy threads. This function is blocking until all copy\n * have done.\n *\n * @param m\tmscp instance.\n *\n * @return \t0 on success, < 0 if an error occured.\n */\nint mscp_join(struct mscp *m);\n\n/**\n * @brief Get statistics of copy.\n *\n * @param m\t\tmscp instance.\n * @param s[out]\tstatistics.\n */\nvoid mscp_get_stats(struct mscp *m, struct mscp_stats *s);\n\n/**\n * @brief Cleanup the mscp instance. Before calling mscp_cleanup(),\n * must call mscp_join(). After mscp_cleanup() called, the mscp\n * instance can restart from mscp_connect(). Note that do not call\n * mscp_cleanup() before callign mscp_join(). It causes crash (ToDo:\n * check status of copy threads and return error when they are\n * running).\n *\n * @param m\t\tmscp instance.\n */\nvoid mscp_cleanup(struct mscp *m);\n\n/**\n * @brief Release the mscp instance.  Note that do not call *\n mscp_free() before calling mscp_join(). It causes crash (ToDo: check\n * status of copy threads and return error when they are running).\n *\n * @param m\t\tmscp instance.\n */\nvoid mscp_free(struct mscp *m);\n\n\n/* messaging with mscp */\n\n/**\n * @enum\tmscp_serverity\n * @brief \tFilter messages from libmscp with severity level.\n */\nenum {\n\tMSCP_SEVERITY_NONE\t= -1,\n\tMSCP_SEVERITY_ERR\t= 0,\n\tMSCP_SEVERITY_WARN\t= 1,\n\tMSCP_SEVERITY_NOTICE\t= 2,\n        MSCP_SEVERITY_INFO\t= 3,\n\tMSCP_SEVERITY_DEBUG\t= 4,\n};\n\n\n/**\n * @brief Return available ciphers.\n */\nconst char **mscp_ssh_ciphers(void);\n\n/**\n * @brief Return available hmacs.\n */\n const char **mscp_ssh_hmacs(void);\n\n\n#endif /* _MSCP_H_ */\n"
  },
  {
    "path": "patch/README.md",
    "content": "\nPatches in this directory introduce enhancements for libssh including\n`sftp_async_write()` and `sftp_async_write_end()`, derived from\nhttps://github.com/limes-datentechnik-gmbh/libssh. See [Re: SFTP Write\nasync](https://archive.libssh.org/libssh/2020-06/0000004.html).\n\n"
  },
  {
    "path": "patch/libssh-0.10.4.patch",
    "content": "diff --git a/ConfigureChecks.cmake b/ConfigureChecks.cmake\nindex 7103f303..c64eb39d 100644\n--- a/ConfigureChecks.cmake\n+++ b/ConfigureChecks.cmake\n@@ -258,6 +258,7 @@ if (UNIX)\n     check_library_exists(util forkpty \"\" HAVE_LIBUTIL)\n     check_function_exists(cfmakeraw HAVE_CFMAKERAW)\n     check_function_exists(__strtoull HAVE___STRTOULL)\n+    check_symbol_exists(TCP_CONGESTION \"netinet/tcp.h\" HAVE_TCP_CONGESTION)\n endif (UNIX)\n \n set(LIBSSH_REQUIRED_LIBRARIES ${_REQUIRED_LIBRARIES} CACHE INTERNAL \"libssh required system libraries\")\ndiff --git a/config.h.cmake b/config.h.cmake\nindex 1357615b..1e915ead 100644\n--- a/config.h.cmake\n+++ b/config.h.cmake\n@@ -237,6 +237,8 @@\n \n #cmakedefine HAVE_GCC_BOUNDED_ATTRIBUTE 1\n \n+#cmakedefine HAVE_TCP_CONGESTION 1\n+\n /* Define to 1 if you want to enable GSSAPI */\n #cmakedefine WITH_GSSAPI 1\n \ndiff --git a/include/libssh/buffer.h b/include/libssh/buffer.h\nindex a55a1b40..e34e075c 100644\n--- a/include/libssh/buffer.h\n+++ b/include/libssh/buffer.h\n@@ -33,6 +33,8 @@ int ssh_buffer_add_u8(ssh_buffer buffer, uint8_t data);\n int ssh_buffer_add_u16(ssh_buffer buffer, uint16_t data);\n int ssh_buffer_add_u32(ssh_buffer buffer, uint32_t data);\n int ssh_buffer_add_u64(ssh_buffer buffer, uint64_t data);\n+ssize_t ssh_buffer_add_func(ssh_buffer buffer, ssh_add_func f, size_t max_bytes,\n+\t\t\t    void *userdata);\n \n int ssh_buffer_validate_length(struct ssh_buffer_struct *buffer, size_t len);\n \ndiff --git a/include/libssh/libssh.h b/include/libssh/libssh.h\nindex 7857a77b..6b4d481c 100644\n--- a/include/libssh/libssh.h\n+++ b/include/libssh/libssh.h\n@@ -402,6 +402,7 @@ enum ssh_options_e {\n   SSH_OPTIONS_GSSAPI_AUTH,\n   SSH_OPTIONS_GLOBAL_KNOWNHOSTS,\n   SSH_OPTIONS_NODELAY,\n+  SSH_OPTIONS_CCALGO,\n   SSH_OPTIONS_PUBLICKEY_ACCEPTED_TYPES,\n   SSH_OPTIONS_PROCESS_CONFIG,\n   SSH_OPTIONS_REKEY_DATA,\n@@ -833,6 +834,7 @@ LIBSSH_API const char* ssh_get_hmac_in(ssh_session session);\n LIBSSH_API const char* ssh_get_hmac_out(ssh_session session);\n \n LIBSSH_API ssh_buffer ssh_buffer_new(void);\n+LIBSSH_API ssh_buffer ssh_buffer_new_size(uint32_t size, uint32_t headroom);\n LIBSSH_API void ssh_buffer_free(ssh_buffer buffer);\n #define SSH_BUFFER_FREE(x) \\\n     do { if ((x) != NULL) { ssh_buffer_free(x); x = NULL; } } while(0)\n@@ -843,6 +845,8 @@ LIBSSH_API void *ssh_buffer_get(ssh_buffer buffer);\n LIBSSH_API uint32_t ssh_buffer_get_len(ssh_buffer buffer);\n LIBSSH_API int ssh_session_set_disconnect_message(ssh_session session, const char *message);\n \n+typedef ssize_t (*ssh_add_func) (void *ptr, size_t max_bytes, void *userdata);\n+\n #ifndef LIBSSH_LEGACY_0_4\n #include \"libssh/legacy.h\"\n #endif\ndiff --git a/include/libssh/session.h b/include/libssh/session.h\nindex d3e5787c..15183d1b 100644\n--- a/include/libssh/session.h\n+++ b/include/libssh/session.h\n@@ -232,6 +232,7 @@ struct ssh_session_struct {\n         int gss_delegate_creds;\n         int flags;\n         int nodelay;\n+        char *ccalgo;\n         bool config_processed;\n         uint8_t options_seen[SOC_MAX];\n         uint64_t rekey_data;\ndiff --git a/include/libssh/sftp.h b/include/libssh/sftp.h\nindex c855df8a..0fcdb9b8 100644\n--- a/include/libssh/sftp.h\n+++ b/include/libssh/sftp.h\n@@ -565,6 +565,10 @@ LIBSSH_API int sftp_async_read(sftp_file file, void *data, uint32_t len, uint32_\n  */\n LIBSSH_API ssize_t sftp_write(sftp_file file, const void *buf, size_t count);\n \n+LIBSSH_API ssize_t sftp_async_write(sftp_file file, ssh_add_func f, size_t count,\n+\t\t\t\t    void *userdata, uint32_t* id);\n+LIBSSH_API int sftp_async_write_end(sftp_file file, uint32_t id, int blocking);\n+\n /**\n  * @brief Seek to a specific location in a file.\n  *\ndiff --git a/src/CMakeLists.txt b/src/CMakeLists.txt\nindex c090fef7..e2f86309 100644\n--- a/src/CMakeLists.txt\n+++ b/src/CMakeLists.txt\n@@ -435,6 +435,11 @@ if (BUILD_STATIC_LIB)\n   if (WIN32)\n     target_compile_definitions(ssh-static PUBLIC \"LIBSSH_STATIC\")\n   endif (WIN32)\n+\n+  install(TARGETS ssh-static\n+\t  EXPORT libssh-config\n+\t  LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}\n+\t  COMPONENT libraries)\n endif (BUILD_STATIC_LIB)\n \n message(STATUS \"Threads_FOUND=${Threads_FOUND}\")\ndiff --git a/src/buffer.c b/src/buffer.c\nindex e0068015..cc0caf35 100644\n--- a/src/buffer.c\n+++ b/src/buffer.c\n@@ -141,6 +141,40 @@ struct ssh_buffer_struct *ssh_buffer_new(void)\n     return buf;\n }\n \n+/**\n+ * @brief Create a new SSH buffer with a specified size and headroom.\n+ *\n+ * @param[in] len       length for newly initialized SSH buffer.\n+ * @param[in] headroom  length for headroom\n+ * @return A newly initialized SSH buffer, NULL on error.\n+ */\n+struct ssh_buffer_struct *ssh_buffer_new_size(uint32_t len, uint32_t headroom)\n+{\n+    struct ssh_buffer_struct *buf = NULL;\n+    int rc;\n+\n+    if (len < headroom)\n+\t    return NULL;\n+\n+    buf = calloc(1, sizeof(struct ssh_buffer_struct));\n+    if (buf == NULL) {\n+        return NULL;\n+    }\n+\n+    rc = ssh_buffer_allocate_size(buf, len);\n+    if (rc != 0) {\n+        SAFE_FREE(buf);\n+        return NULL;\n+    }\n+\n+    buf->pos += headroom;\n+    buf->used += headroom;\n+\n+    buffer_verify(buf);\n+\n+    return buf;\n+}\n+\n /**\n  * @brief Deallocate a SSH buffer.\n  *\n@@ -328,6 +362,49 @@ int ssh_buffer_add_data(struct ssh_buffer_struct *buffer, const void *data, uint\n     return 0;\n }\n \n+/**\n+ * @brief Add data at the tail of a buffer by an external function\n+ *\n+ * @param[in]  buffer    The buffer to add data.\n+ *\n+ * @param[in]  f         function that adds data to the buffer.\n+ *\n+ * @param[in]  max_bytes The maximum length of the data to add.\n+ *\n+ * @return               actual bytes added on success, < 0 on error.\n+ */\n+ssize_t ssh_buffer_add_func(struct ssh_buffer_struct *buffer, ssh_add_func f,\n+\t\t\t    size_t max_bytes, void *userdata)\n+{\n+    ssize_t actual;\n+\n+    if (buffer == NULL) {\n+        return -1;\n+    }\n+\n+    buffer_verify(buffer);\n+\n+    if (buffer->used + max_bytes < max_bytes) {\n+        return -1;\n+    }\n+\n+    if (buffer->allocated < (buffer->used + max_bytes)) {\n+        if (buffer->pos > 0) {\n+            buffer_shift(buffer);\n+        }\n+        if (realloc_buffer(buffer, buffer->used + max_bytes) < 0) {\n+            return -1;\n+        }\n+    }\n+\n+    if ((actual = f(buffer->data + buffer->used, max_bytes, userdata)) < 0)\n+      return -1;\n+\n+    buffer->used += actual;\n+    buffer_verify(buffer);\n+    return actual;\n+}\n+\n /**\n  * @brief Ensure the buffer has at least a certain preallocated size.\n  *\ndiff --git a/src/connect.c b/src/connect.c\nindex 57e37e63..c02397d5 100644\n--- a/src/connect.c\n+++ b/src/connect.c\n@@ -156,6 +156,20 @@ static int set_tcp_nodelay(socket_t socket)\n                       sizeof(opt));\n }\n \n+static int set_tcp_ccalgo(socket_t socket, const char *ccalgo)\n+{\n+#ifdef HAVE_TCP_CONGESTION\n+\treturn setsockopt(socket,\n+\t\t\t  IPPROTO_TCP,\n+\t\t\t  TCP_CONGESTION,\n+\t\t\t  (void *)ccalgo,\n+\t\t\t  strlen(ccalgo));\n+#else\n+\terrno = ENOTSUP;\n+\treturn -1;\n+#endif\n+}\n+\n /**\n  * @internal\n  *\n@@ -256,6 +270,18 @@ socket_t ssh_connect_host_nonblocking(ssh_session session, const char *host,\n             }\n         }\n \n+        if (session->opts.ccalgo) {\n+\t     rc = set_tcp_ccalgo(s, session->opts.ccalgo);\n+\t     if (rc < 0) {\n+\t\t ssh_set_error(session, SSH_FATAL,\n+\t\t\t       \"Failed to set TCP_CONGESTION on socket: %s\",\n+\t\t\t       ssh_strerror(errno, err_msg, SSH_ERRNO_MSG_MAX));\n+\t\t ssh_connect_socket_close(s);\n+\t\t s = -1;\n+\t\t continue;\n+\t     }\n+\t}\n+\n         errno = 0;\n         rc = connect(s, itr->ai_addr, itr->ai_addrlen);\n         if (rc == -1 && (errno != 0) && (errno != EINPROGRESS)) {\ndiff --git a/src/options.c b/src/options.c\nindex 49aaefa2..9f7360c3 100644\n--- a/src/options.c\n+++ b/src/options.c\n@@ -210,6 +210,7 @@ int ssh_options_copy(ssh_session src, ssh_session *dest)\n     new->opts.gss_delegate_creds    = src->opts.gss_delegate_creds;\n     new->opts.flags                 = src->opts.flags;\n     new->opts.nodelay               = src->opts.nodelay;\n+    new->opts.ccalgo                = src->opts.ccalgo;\n     new->opts.config_processed      = src->opts.config_processed;\n     new->common.log_verbosity       = src->common.log_verbosity;\n     new->common.callbacks           = src->common.callbacks;\n@@ -450,6 +451,10 @@ int ssh_options_set_algo(ssh_session session,\n  *                Set it to disable Nagle's Algorithm (TCP_NODELAY) on the\n  *                session socket. (int, 0=false)\n  *\n+ *              - SSH_OPTIONS_CCALGO\n+ *                Set it to specify TCP congestion control algorithm on the\n+ *                session socket (Linux only). (int, 0=false)\n+ *\n  *              - SSH_OPTIONS_PROCESS_CONFIG\n  *                Set it to false to disable automatic processing of per-user\n  *                and system-wide OpenSSH configuration files. LibSSH\n@@ -1013,6 +1018,20 @@ int ssh_options_set(ssh_session session, enum ssh_options_e type,\n                 session->opts.nodelay = (*x & 0xff) > 0 ? 1 : 0;\n             }\n             break;\n+\tcase SSH_OPTIONS_CCALGO:\n+            v = value;\n+            if (v == NULL || v[0] == '\\0') {\n+                ssh_set_error_invalid(session);\n+                return -1;\n+            } else {\n+                SAFE_FREE(session->opts.ccalgo);\n+                session->opts.ccalgo = strdup(v);\n+                if (session->opts.ccalgo == NULL) {\n+                    ssh_set_error_oom(session);\n+                    return -1;\n+                }\n+            }\n+            break;\n         case SSH_OPTIONS_PROCESS_CONFIG:\n             if (value == NULL) {\n                 ssh_set_error_invalid(session);\ndiff --git a/src/session.c b/src/session.c\nindex 6025c133..6b197526 100644\n--- a/src/session.c\n+++ b/src/session.c\n@@ -108,6 +108,7 @@ ssh_session ssh_new(void)\n     session->opts.fd = -1;\n     session->opts.compressionlevel = 7;\n     session->opts.nodelay = 0;\n+    session->opts.ccalgo = NULL;\n \n     session->opts.flags = SSH_OPT_FLAG_PASSWORD_AUTH |\n                           SSH_OPT_FLAG_PUBKEY_AUTH |\ndiff --git a/src/sftp.c b/src/sftp.c\nindex e01012a8..702623a0 100644\n--- a/src/sftp.c\n+++ b/src/sftp.c\n@@ -2228,6 +2228,132 @@ ssize_t sftp_write(sftp_file file, const void *buf, size_t count) {\n   return -1; /* not reached */\n }\n \n+/*\n+ * sftp_async_write is based on and sftp_async_write_end is copied from\n+ * https://github.com/limes-datentechnik-gmbh/libssh\n+ *\n+ * sftp_async_write has some optimizations:\n+ * - use ssh_buffer_new_size() to reduce realoc_buffer.\n+ * - use ssh_buffer_add_func() to avoid memcpy from read buffer to ssh buffer.\n+ */\n+ssize_t sftp_async_write(sftp_file file, ssh_add_func f, size_t count, void *userdata,\n+\t\t\t uint32_t* id) {\n+  sftp_session sftp = file->sftp;\n+  ssh_buffer buffer;\n+  uint32_t buf_sz;\n+  ssize_t actual;\n+  int len;\n+  int packetlen;\n+  int rc;\n+\n+#define HEADROOM 16\n+  /* sftp_packet_write() prepends a 5-bytes (uint32_t length and\n+   * 1-byte type) header to the head of the payload by\n+   * ssh_buffer_prepend_data(). Inserting headroom by\n+   * ssh_buffer_new_size() eliminates memcpy for prepending the\n+   * header.\n+   */\n+\n+  buf_sz = (HEADROOM + /* for header */\n+\t    sizeof(uint32_t) + /* id */\n+\t    ssh_string_len(file->handle) + 4 + /* file->handle */\n+\t    sizeof(uint64_t) + /* file->offset */\n+\t    sizeof(uint32_t) + /* count */\n+\t    count); /* datastring */\n+\n+  buffer = ssh_buffer_new_size(buf_sz, HEADROOM);\n+  if (buffer == NULL) {\n+    ssh_set_error_oom(sftp->session);\n+    return -1;\n+  }\n+\n+  *id = sftp_get_new_id(file->sftp);\n+\n+  rc = ssh_buffer_pack(buffer,\n+                       \"dSqd\",\n+                       *id,\n+                       file->handle,\n+                       file->offset,\n+                       count); /* len of datastring */\n+\n+  if (rc != SSH_OK){\n+    ssh_set_error_oom(sftp->session);\n+    ssh_buffer_free(buffer);\n+    return SSH_ERROR;\n+  }\n+\n+  actual = ssh_buffer_add_func(buffer, f, count, userdata);\n+  if (actual < 0){\n+    ssh_set_error_oom(sftp->session);\n+    ssh_buffer_free(buffer);\n+    return SSH_ERROR;\n+  }\n+\n+  packetlen=ssh_buffer_get_len(buffer)+5;\n+  len = sftp_packet_write(file->sftp, SSH_FXP_WRITE, buffer);\n+  ssh_buffer_free(buffer);\n+  if (len < 0) {\n+    return SSH_ERROR;\n+  } else  if (len != packetlen) {\n+    ssh_set_error(sftp->session, SSH_FATAL,\n+      \"Could only send %d of %d bytes to remote host!\", len, packetlen);\n+    SSH_LOG(SSH_LOG_PACKET,\n+        \"Could not write as much data as expected\");\n+    return SSH_ERROR;\n+  }\n+\n+  file->offset += actual;\n+\n+  return actual;\n+}\n+\n+int sftp_async_write_end(sftp_file file, uint32_t id, int blocking) {\n+  sftp_session sftp = file->sftp;\n+  sftp_message msg = NULL;\n+  sftp_status_message status;\n+\n+  msg = sftp_dequeue(sftp, id);\n+  while (msg == NULL) {\n+    if (!blocking && ssh_channel_poll(sftp->channel, 0) == 0) {\n+      /* we cannot block */\n+      return SSH_AGAIN;\n+    }\n+    if (sftp_read_and_dispatch(sftp) < 0) {\n+      /* something nasty has happened */\n+      return SSH_ERROR;\n+    }\n+    msg = sftp_dequeue(sftp, id);\n+  }\n+\n+  switch (msg->packet_type) {\n+    case SSH_FXP_STATUS:\n+      status = parse_status_msg(msg);\n+      sftp_message_free(msg);\n+      if (status == NULL) {\n+        return SSH_ERROR;\n+      }\n+      sftp_set_error(sftp, status->status);\n+      switch (status->status) {\n+        case SSH_FX_OK:\n+          status_msg_free(status);\n+          return SSH_OK;\n+        default:\n+          break;\n+      }\n+      ssh_set_error(sftp->session, SSH_REQUEST_DENIED,\n+          \"SFTP server: %s\", status->errormsg);\n+      status_msg_free(status);\n+      return SSH_ERROR;\n+    default:\n+      ssh_set_error(sftp->session, SSH_FATAL,\n+          \"Received message %d during write!\", msg->packet_type);\n+      sftp_message_free(msg);\n+      return SSH_ERROR;\n+  }\n+\n+  return SSH_ERROR; /* not reached */\n+}\n+\n /* Seek to a specific location in a file. */\n int sftp_seek(sftp_file file, uint32_t new_offset) {\n   if (file == NULL) {\n"
  },
  {
    "path": "patch/libssh-0.10.6-2-g6f1b1e76.patch",
    "content": "diff --git a/CMakeLists.txt b/CMakeLists.txt\nindex a64b7708..c6344a5a 100644\n--- a/CMakeLists.txt\n+++ b/CMakeLists.txt\n@@ -1,4 +1,4 @@\n-cmake_minimum_required(VERSION 3.3.0)\n+cmake_minimum_required(VERSION 3.13.0)\n cmake_policy(SET CMP0048 NEW)\n \n # Specify search path for CMake modules to be loaded by include()\ndiff --git a/ConfigureChecks.cmake b/ConfigureChecks.cmake\nindex 9de10225..0f3d20ed 100644\n--- a/ConfigureChecks.cmake\n+++ b/ConfigureChecks.cmake\n@@ -258,6 +258,7 @@ if (UNIX)\n     check_library_exists(util forkpty \"\" HAVE_LIBUTIL)\n     check_function_exists(cfmakeraw HAVE_CFMAKERAW)\n     check_function_exists(__strtoull HAVE___STRTOULL)\n+    check_symbol_exists(TCP_CONGESTION \"netinet/tcp.h\" HAVE_TCP_CONGESTION)\n endif (UNIX)\n \n set(LIBSSH_REQUIRED_LIBRARIES ${_REQUIRED_LIBRARIES} CACHE INTERNAL \"libssh required system libraries\")\ndiff --git a/config.h.cmake b/config.h.cmake\nindex cc83734d..f74cd03b 100644\n--- a/config.h.cmake\n+++ b/config.h.cmake\n@@ -237,6 +237,8 @@\n \n #cmakedefine HAVE_GCC_BOUNDED_ATTRIBUTE 1\n \n+#cmakedefine HAVE_TCP_CONGESTION 1\n+\n /* Define to 1 if you want to enable GSSAPI */\n #cmakedefine WITH_GSSAPI 1\n \ndiff --git a/include/libssh/buffer.h b/include/libssh/buffer.h\nindex 1fce7b76..b64d1455 100644\n--- a/include/libssh/buffer.h\n+++ b/include/libssh/buffer.h\n@@ -37,6 +37,8 @@ int ssh_buffer_add_u8(ssh_buffer buffer, uint8_t data);\n int ssh_buffer_add_u16(ssh_buffer buffer, uint16_t data);\n int ssh_buffer_add_u32(ssh_buffer buffer, uint32_t data);\n int ssh_buffer_add_u64(ssh_buffer buffer, uint64_t data);\n+ssize_t ssh_buffer_add_func(ssh_buffer buffer, ssh_add_func f, size_t max_bytes,\n+\t\t\t    void *userdata);\n \n int ssh_buffer_validate_length(struct ssh_buffer_struct *buffer, size_t len);\n \ndiff --git a/include/libssh/libssh.h b/include/libssh/libssh.h\nindex 669a0a96..26b20f3f 100644\n--- a/include/libssh/libssh.h\n+++ b/include/libssh/libssh.h\n@@ -368,6 +368,7 @@ enum ssh_options_e {\n   SSH_OPTIONS_HOST,\n   SSH_OPTIONS_PORT,\n   SSH_OPTIONS_PORT_STR,\n+  SSH_OPTIONS_AI_FAMILY,\n   SSH_OPTIONS_FD,\n   SSH_OPTIONS_USER,\n   SSH_OPTIONS_SSH_DIR,\n@@ -402,6 +403,7 @@ enum ssh_options_e {\n   SSH_OPTIONS_GSSAPI_AUTH,\n   SSH_OPTIONS_GLOBAL_KNOWNHOSTS,\n   SSH_OPTIONS_NODELAY,\n+  SSH_OPTIONS_CCALGO,\n   SSH_OPTIONS_PUBLICKEY_ACCEPTED_TYPES,\n   SSH_OPTIONS_PROCESS_CONFIG,\n   SSH_OPTIONS_REKEY_DATA,\n@@ -833,6 +835,7 @@ LIBSSH_API const char* ssh_get_hmac_in(ssh_session session);\n LIBSSH_API const char* ssh_get_hmac_out(ssh_session session);\n \n LIBSSH_API ssh_buffer ssh_buffer_new(void);\n+LIBSSH_API ssh_buffer ssh_buffer_new_size(uint32_t size, uint32_t headroom);\n LIBSSH_API void ssh_buffer_free(ssh_buffer buffer);\n #define SSH_BUFFER_FREE(x) \\\n     do { if ((x) != NULL) { ssh_buffer_free(x); x = NULL; } } while(0)\n@@ -843,6 +846,11 @@ LIBSSH_API void *ssh_buffer_get(ssh_buffer buffer);\n LIBSSH_API uint32_t ssh_buffer_get_len(ssh_buffer buffer);\n LIBSSH_API int ssh_session_set_disconnect_message(ssh_session session, const char *message);\n \n+typedef ssize_t (*ssh_add_func) (void *ptr, size_t max_bytes, void *userdata);\n+\n+LIBSSH_API const char **ssh_ciphers(void);\n+LIBSSH_API const char **ssh_hmacs(void);\n+\n #ifndef LIBSSH_LEGACY_0_4\n #include \"libssh/legacy.h\"\n #endif\ndiff --git a/include/libssh/session.h b/include/libssh/session.h\nindex 97936195..e4fc4fce 100644\n--- a/include/libssh/session.h\n+++ b/include/libssh/session.h\n@@ -249,6 +249,7 @@ struct ssh_session_struct {\n         unsigned long timeout; /* seconds */\n         unsigned long timeout_usec;\n         uint16_t port;\n+        int  ai_family;\n         socket_t fd;\n         int StrictHostKeyChecking;\n         char compressionlevel;\n@@ -258,6 +259,7 @@ struct ssh_session_struct {\n         int flags;\n         int exp_flags;\n         int nodelay;\n+        char *ccalgo;\n         bool config_processed;\n         uint8_t options_seen[SOC_MAX];\n         uint64_t rekey_data;\ndiff --git a/include/libssh/sftp.h b/include/libssh/sftp.h\nindex c713466e..e27fe326 100644\n--- a/include/libssh/sftp.h\n+++ b/include/libssh/sftp.h\n@@ -565,6 +565,10 @@ LIBSSH_API int sftp_async_read(sftp_file file, void *data, uint32_t len, uint32_\n  */\n LIBSSH_API ssize_t sftp_write(sftp_file file, const void *buf, size_t count);\n \n+LIBSSH_API ssize_t sftp_async_write(sftp_file file, ssh_add_func f, size_t count,\n+\t\t\t\t    void *userdata, uint32_t* id);\n+LIBSSH_API int sftp_async_write_end(sftp_file file, uint32_t id, int blocking);\n+\n /**\n  * @brief Seek to a specific location in a file.\n  *\ndiff --git a/src/CMakeLists.txt b/src/CMakeLists.txt\nindex 807313b5..86487087 100644\n--- a/src/CMakeLists.txt\n+++ b/src/CMakeLists.txt\n@@ -448,6 +448,11 @@ if (BUILD_STATIC_LIB)\n   if (WIN32)\n     target_compile_definitions(ssh-static PUBLIC \"LIBSSH_STATIC\")\n   endif (WIN32)\n+\n+  install(TARGETS ssh-static\n+\t  EXPORT libssh-config\n+\t  LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}\n+\t  COMPONENT libraries)\n endif (BUILD_STATIC_LIB)\n \n message(STATUS \"Threads_FOUND=${Threads_FOUND}\")\ndiff --git a/src/buffer.c b/src/buffer.c\nindex 8991e006..e0414801 100644\n--- a/src/buffer.c\n+++ b/src/buffer.c\n@@ -142,6 +142,40 @@ struct ssh_buffer_struct *ssh_buffer_new(void)\n     return buf;\n }\n \n+/**\n+ * @brief Create a new SSH buffer with a specified size and headroom.\n+ *\n+ * @param[in] len       length for newly initialized SSH buffer.\n+ * @param[in] headroom  length for headroom\n+ * @return A newly initialized SSH buffer, NULL on error.\n+ */\n+struct ssh_buffer_struct *ssh_buffer_new_size(uint32_t len, uint32_t headroom)\n+{\n+    struct ssh_buffer_struct *buf = NULL;\n+    int rc;\n+\n+    if (len < headroom)\n+\t    return NULL;\n+\n+    buf = calloc(1, sizeof(struct ssh_buffer_struct));\n+    if (buf == NULL) {\n+        return NULL;\n+    }\n+\n+    rc = ssh_buffer_allocate_size(buf, len);\n+    if (rc != 0) {\n+        SAFE_FREE(buf);\n+        return NULL;\n+    }\n+\n+    buf->pos += headroom;\n+    buf->used += headroom;\n+\n+    buffer_verify(buf);\n+\n+    return buf;\n+}\n+\n /**\n  * @brief Deallocate a SSH buffer.\n  *\n@@ -329,6 +363,49 @@ int ssh_buffer_add_data(struct ssh_buffer_struct *buffer, const void *data, uint\n     return 0;\n }\n \n+/**\n+ * @brief Add data at the tail of a buffer by an external function\n+ *\n+ * @param[in]  buffer    The buffer to add data.\n+ *\n+ * @param[in]  f         function that adds data to the buffer.\n+ *\n+ * @param[in]  max_bytes The maximum length of the data to add.\n+ *\n+ * @return               actual bytes added on success, < 0 on error.\n+ */\n+ssize_t ssh_buffer_add_func(struct ssh_buffer_struct *buffer, ssh_add_func f,\n+\t\t\t    size_t max_bytes, void *userdata)\n+{\n+    ssize_t actual;\n+\n+    if (buffer == NULL) {\n+        return -1;\n+    }\n+\n+    buffer_verify(buffer);\n+\n+    if (buffer->used + max_bytes < max_bytes) {\n+        return -1;\n+    }\n+\n+    if (buffer->allocated < (buffer->used + max_bytes)) {\n+        if (buffer->pos > 0) {\n+            buffer_shift(buffer);\n+        }\n+        if (realloc_buffer(buffer, buffer->used + max_bytes) < 0) {\n+            return -1;\n+        }\n+    }\n+\n+    if ((actual = f(buffer->data + buffer->used, max_bytes, userdata)) < 0)\n+      return -1;\n+\n+    buffer->used += actual;\n+    buffer_verify(buffer);\n+    return actual;\n+}\n+\n /**\n  * @brief Ensure the buffer has at least a certain preallocated size.\n  *\ndiff --git a/src/connect.c b/src/connect.c\nindex 15cae644..02ef43b4 100644\n--- a/src/connect.c\n+++ b/src/connect.c\n@@ -114,7 +114,7 @@ static int ssh_connect_socket_close(socket_t s)\n #endif\n }\n \n-static int getai(const char *host, int port, struct addrinfo **ai)\n+static int getai(const char *host, int port, int ai_family, struct addrinfo **ai)\n {\n     const char *service = NULL;\n     struct addrinfo hints;\n@@ -123,7 +123,7 @@ static int getai(const char *host, int port, struct addrinfo **ai)\n     ZERO_STRUCT(hints);\n \n     hints.ai_protocol = IPPROTO_TCP;\n-    hints.ai_family = PF_UNSPEC;\n+    hints.ai_family = ai_family > 0 ? ai_family : PF_UNSPEC;\n     hints.ai_socktype = SOCK_STREAM;\n \n     if (port == 0) {\n@@ -156,6 +156,20 @@ static int set_tcp_nodelay(socket_t socket)\n                       sizeof(opt));\n }\n \n+static int set_tcp_ccalgo(socket_t socket, const char *ccalgo)\n+{\n+#ifdef HAVE_TCP_CONGESTION\n+\treturn setsockopt(socket,\n+\t\t\t  IPPROTO_TCP,\n+\t\t\t  TCP_CONGESTION,\n+\t\t\t  (void *)ccalgo,\n+\t\t\t  strlen(ccalgo));\n+#else\n+\terrno = ENOTSUP;\n+\treturn -1;\n+#endif\n+}\n+\n /**\n  * @internal\n  *\n@@ -173,7 +187,7 @@ socket_t ssh_connect_host_nonblocking(ssh_session session, const char *host,\n     struct addrinfo *ai = NULL;\n     struct addrinfo *itr = NULL;\n \n-    rc = getai(host, port, &ai);\n+    rc = getai(host, port, session->opts.ai_family, &ai);\n     if (rc != 0) {\n         ssh_set_error(session, SSH_FATAL,\n                       \"Failed to resolve hostname %s (%s)\",\n@@ -199,7 +213,7 @@ socket_t ssh_connect_host_nonblocking(ssh_session session, const char *host,\n \n             SSH_LOG(SSH_LOG_PACKET, \"Resolving %s\", bind_addr);\n \n-            rc = getai(bind_addr, 0, &bind_ai);\n+            rc = getai(bind_addr, 0, session->opts.ai_family, &bind_ai);\n             if (rc != 0) {\n                 ssh_set_error(session, SSH_FATAL,\n                               \"Failed to resolve bind address %s (%s)\",\n@@ -256,6 +270,18 @@ socket_t ssh_connect_host_nonblocking(ssh_session session, const char *host,\n             }\n         }\n \n+        if (session->opts.ccalgo) {\n+\t     rc = set_tcp_ccalgo(s, session->opts.ccalgo);\n+\t     if (rc < 0) {\n+\t\t ssh_set_error(session, SSH_FATAL,\n+\t\t\t       \"Failed to set TCP_CONGESTION on socket: %s\",\n+\t\t\t       ssh_strerror(errno, err_msg, SSH_ERRNO_MSG_MAX));\n+\t\t ssh_connect_socket_close(s);\n+\t\t s = -1;\n+\t\t continue;\n+\t     }\n+\t}\n+\n         errno = 0;\n         rc = connect(s, itr->ai_addr, itr->ai_addrlen);\n         if (rc == -1 && (errno != 0) && (errno != EINPROGRESS)) {\ndiff --git a/src/misc.c b/src/misc.c\nindex 7081f12a..e3879fe4 100644\n--- a/src/misc.c\n+++ b/src/misc.c\n@@ -71,6 +71,8 @@\n #include \"libssh/priv.h\"\n #include \"libssh/misc.h\"\n #include \"libssh/session.h\"\n+#include \"libssh/wrapper.h\"\n+#include \"libssh/crypto.h\"\n \n #ifdef HAVE_LIBGCRYPT\n #define GCRYPT_STRING \"/gnutls\"\n@@ -2074,4 +2076,40 @@ int ssh_check_hostname_syntax(const char *hostname)\n     return SSH_OK;\n }\n \n+/**\n+ * @brief Return supported cipher names\n+ * @return\tThe list of cipher names.\n+ */\n+const char **ssh_ciphers(void)\n+{\n+     struct ssh_cipher_struct *tab=ssh_get_ciphertab();\n+     static const char *ciphers[32];\n+     int n;\n+\n+     memset(ciphers, 0, sizeof(*ciphers));\n+\n+     for (n = 0; tab[n].name != NULL; n++) {\n+\t  ciphers[n] = tab[n].name;\n+     }\n+     return ciphers;\n+}\n+\n+/**\n+ * @brief Return supported hmac names\n+ * @return\tThe list of hmac names.\n+ */\n+const char **ssh_hmacs(void)\n+{\n+     struct ssh_hmac_struct *tab=ssh_get_hmactab();\n+     static const char *hmacs[32];\n+     int n;\n+\n+     memset(hmacs, 0, sizeof(*hmacs));\n+\n+     for (n = 0; tab[n].name != NULL; n++) {\n+\t  hmacs[n] = tab[n].name;\n+     }\n+     return hmacs;\n+}\n+\n /** @} */\ndiff --git a/src/options.c b/src/options.c\nindex b3ecffe1..8de24ed6 100644\n--- a/src/options.c\n+++ b/src/options.c\n@@ -217,6 +217,7 @@ int ssh_options_copy(ssh_session src, ssh_session *dest)\n     new->opts.gss_delegate_creds    = src->opts.gss_delegate_creds;\n     new->opts.flags                 = src->opts.flags;\n     new->opts.nodelay               = src->opts.nodelay;\n+    new->opts.ccalgo                = src->opts.ccalgo;\n     new->opts.config_processed      = src->opts.config_processed;\n     new->common.log_verbosity       = src->common.log_verbosity;\n     new->common.callbacks           = src->common.callbacks;\n@@ -268,6 +269,9 @@ int ssh_options_set_algo(ssh_session session,\n  *              - SSH_OPTIONS_PORT_STR:\n  *                The port to connect to (const char *).\n  *\n+ *              - SSH_OPTIONS_AI_FAMILY:\n+ *                The address family for connecting (int *).\n+ *\n  *              - SSH_OPTIONS_FD:\n  *                The file descriptor to use (socket_t).\\n\n  *                \\n\n@@ -458,6 +462,10 @@ int ssh_options_set_algo(ssh_session session,\n  *                Set it to disable Nagle's Algorithm (TCP_NODELAY) on the\n  *                session socket. (int, 0=false)\n  *\n+ *              - SSH_OPTIONS_CCALGO\n+ *                Set it to specify TCP congestion control algorithm on the\n+ *                session socket (Linux only). (int, 0=false)\n+ *\n  *              - SSH_OPTIONS_PROCESS_CONFIG\n  *                Set it to false to disable automatic processing of per-user\n  *                and system-wide OpenSSH configuration files. LibSSH\n@@ -571,6 +579,21 @@ int ssh_options_set(ssh_session session, enum ssh_options_e type,\n                 session->opts.port = i & 0xffffU;\n             }\n             break;\n+        case SSH_OPTIONS_AI_FAMILY:\n+            if (value == NULL) {\n+                session->opts.ai_family = 0;\n+                ssh_set_error_invalid(session);\n+                return -1;\n+            } else {\n+                int *x = (int *) value;\n+                if (*x < 0) {\n+\t\t    session->opts.ai_family = 0;\n+                    ssh_set_error_invalid(session);\n+                    return -1;\n+                }\n+                session->opts.ai_family = *x;\n+            }\n+            break;\n         case SSH_OPTIONS_FD:\n             if (value == NULL) {\n                 session->opts.fd = SSH_INVALID_SOCKET;\n@@ -1017,6 +1040,20 @@ int ssh_options_set(ssh_session session, enum ssh_options_e type,\n                 session->opts.nodelay = (*x & 0xff) > 0 ? 1 : 0;\n             }\n             break;\n+\tcase SSH_OPTIONS_CCALGO:\n+            v = value;\n+            if (v == NULL || v[0] == '\\0') {\n+                ssh_set_error_invalid(session);\n+                return -1;\n+            } else {\n+                SAFE_FREE(session->opts.ccalgo);\n+                session->opts.ccalgo = strdup(v);\n+                if (session->opts.ccalgo == NULL) {\n+                    ssh_set_error_oom(session);\n+                    return -1;\n+                }\n+            }\n+            break;\n         case SSH_OPTIONS_PROCESS_CONFIG:\n             if (value == NULL) {\n                 ssh_set_error_invalid(session);\ndiff --git a/src/session.c b/src/session.c\nindex 8c509699..307388e5 100644\n--- a/src/session.c\n+++ b/src/session.c\n@@ -105,9 +105,11 @@ ssh_session ssh_new(void)\n     /* OPTIONS */\n     session->opts.StrictHostKeyChecking = 1;\n     session->opts.port = 22;\n+    session->opts.ai_family = 0;\n     session->opts.fd = -1;\n     session->opts.compressionlevel = 7;\n     session->opts.nodelay = 0;\n+    session->opts.ccalgo = NULL;\n \n     session->opts.flags = SSH_OPT_FLAG_PASSWORD_AUTH |\n                           SSH_OPT_FLAG_PUBKEY_AUTH |\ndiff --git a/src/sftp.c b/src/sftp.c\nindex e01012a8..702623a0 100644\n--- a/src/sftp.c\n+++ b/src/sftp.c\n@@ -2228,6 +2228,132 @@ ssize_t sftp_write(sftp_file file, const void *buf, size_t count) {\n   return -1; /* not reached */\n }\n \n+/*\n+ * sftp_async_write is based on and sftp_async_write_end is copied from\n+ * https://github.com/limes-datentechnik-gmbh/libssh\n+ *\n+ * sftp_async_write has some optimizations:\n+ * - use ssh_buffer_new_size() to reduce realoc_buffer.\n+ * - use ssh_buffer_add_func() to avoid memcpy from read buffer to ssh buffer.\n+ */\n+ssize_t sftp_async_write(sftp_file file, ssh_add_func f, size_t count, void *userdata,\n+\t\t\t uint32_t* id) {\n+  sftp_session sftp = file->sftp;\n+  ssh_buffer buffer;\n+  uint32_t buf_sz;\n+  ssize_t actual;\n+  int len;\n+  int packetlen;\n+  int rc;\n+\n+#define HEADROOM 16\n+  /* sftp_packet_write() prepends a 5-bytes (uint32_t length and\n+   * 1-byte type) header to the head of the payload by\n+   * ssh_buffer_prepend_data(). Inserting headroom by\n+   * ssh_buffer_new_size() eliminates memcpy for prepending the\n+   * header.\n+   */\n+\n+  buf_sz = (HEADROOM + /* for header */\n+\t    sizeof(uint32_t) + /* id */\n+\t    ssh_string_len(file->handle) + 4 + /* file->handle */\n+\t    sizeof(uint64_t) + /* file->offset */\n+\t    sizeof(uint32_t) + /* count */\n+\t    count); /* datastring */\n+\n+  buffer = ssh_buffer_new_size(buf_sz, HEADROOM);\n+  if (buffer == NULL) {\n+    ssh_set_error_oom(sftp->session);\n+    return -1;\n+  }\n+\n+  *id = sftp_get_new_id(file->sftp);\n+\n+  rc = ssh_buffer_pack(buffer,\n+                       \"dSqd\",\n+                       *id,\n+                       file->handle,\n+                       file->offset,\n+                       count); /* len of datastring */\n+\n+  if (rc != SSH_OK){\n+    ssh_set_error_oom(sftp->session);\n+    ssh_buffer_free(buffer);\n+    return SSH_ERROR;\n+  }\n+\n+  actual = ssh_buffer_add_func(buffer, f, count, userdata);\n+  if (actual < 0){\n+    ssh_set_error_oom(sftp->session);\n+    ssh_buffer_free(buffer);\n+    return SSH_ERROR;\n+  }\n+\n+  packetlen=ssh_buffer_get_len(buffer)+5;\n+  len = sftp_packet_write(file->sftp, SSH_FXP_WRITE, buffer);\n+  ssh_buffer_free(buffer);\n+  if (len < 0) {\n+    return SSH_ERROR;\n+  } else  if (len != packetlen) {\n+    ssh_set_error(sftp->session, SSH_FATAL,\n+      \"Could only send %d of %d bytes to remote host!\", len, packetlen);\n+    SSH_LOG(SSH_LOG_PACKET,\n+        \"Could not write as much data as expected\");\n+    return SSH_ERROR;\n+  }\n+\n+  file->offset += actual;\n+\n+  return actual;\n+}\n+\n+int sftp_async_write_end(sftp_file file, uint32_t id, int blocking) {\n+  sftp_session sftp = file->sftp;\n+  sftp_message msg = NULL;\n+  sftp_status_message status;\n+\n+  msg = sftp_dequeue(sftp, id);\n+  while (msg == NULL) {\n+    if (!blocking && ssh_channel_poll(sftp->channel, 0) == 0) {\n+      /* we cannot block */\n+      return SSH_AGAIN;\n+    }\n+    if (sftp_read_and_dispatch(sftp) < 0) {\n+      /* something nasty has happened */\n+      return SSH_ERROR;\n+    }\n+    msg = sftp_dequeue(sftp, id);\n+  }\n+\n+  switch (msg->packet_type) {\n+    case SSH_FXP_STATUS:\n+      status = parse_status_msg(msg);\n+      sftp_message_free(msg);\n+      if (status == NULL) {\n+        return SSH_ERROR;\n+      }\n+      sftp_set_error(sftp, status->status);\n+      switch (status->status) {\n+        case SSH_FX_OK:\n+          status_msg_free(status);\n+          return SSH_OK;\n+        default:\n+          break;\n+      }\n+      ssh_set_error(sftp->session, SSH_REQUEST_DENIED,\n+          \"SFTP server: %s\", status->errormsg);\n+      status_msg_free(status);\n+      return SSH_ERROR;\n+    default:\n+      ssh_set_error(sftp->session, SSH_FATAL,\n+          \"Received message %d during write!\", msg->packet_type);\n+      sftp_message_free(msg);\n+      return SSH_ERROR;\n+  }\n+\n+  return SSH_ERROR; /* not reached */\n+}\n+\n /* Seek to a specific location in a file. */\n int sftp_seek(sftp_file file, uint32_t new_offset) {\n   if (file == NULL) {\n"
  },
  {
    "path": "patch/libssh-0.10.6.patch",
    "content": "diff --git a/ConfigureChecks.cmake b/ConfigureChecks.cmake\nindex 9de10225..0f3d20ed 100644\n--- a/ConfigureChecks.cmake\n+++ b/ConfigureChecks.cmake\n@@ -258,6 +258,7 @@ if (UNIX)\n     check_library_exists(util forkpty \"\" HAVE_LIBUTIL)\n     check_function_exists(cfmakeraw HAVE_CFMAKERAW)\n     check_function_exists(__strtoull HAVE___STRTOULL)\n+    check_symbol_exists(TCP_CONGESTION \"netinet/tcp.h\" HAVE_TCP_CONGESTION)\n endif (UNIX)\n \n set(LIBSSH_REQUIRED_LIBRARIES ${_REQUIRED_LIBRARIES} CACHE INTERNAL \"libssh required system libraries\")\ndiff --git a/config.h.cmake b/config.h.cmake\nindex cc83734d..f74cd03b 100644\n--- a/config.h.cmake\n+++ b/config.h.cmake\n@@ -237,6 +237,8 @@\n \n #cmakedefine HAVE_GCC_BOUNDED_ATTRIBUTE 1\n \n+#cmakedefine HAVE_TCP_CONGESTION 1\n+\n /* Define to 1 if you want to enable GSSAPI */\n #cmakedefine WITH_GSSAPI 1\n \ndiff --git a/include/libssh/buffer.h b/include/libssh/buffer.h\nindex 1fce7b76..b64d1455 100644\n--- a/include/libssh/buffer.h\n+++ b/include/libssh/buffer.h\n@@ -37,6 +37,8 @@ int ssh_buffer_add_u8(ssh_buffer buffer, uint8_t data);\n int ssh_buffer_add_u16(ssh_buffer buffer, uint16_t data);\n int ssh_buffer_add_u32(ssh_buffer buffer, uint32_t data);\n int ssh_buffer_add_u64(ssh_buffer buffer, uint64_t data);\n+ssize_t ssh_buffer_add_func(ssh_buffer buffer, ssh_add_func f, size_t max_bytes,\n+\t\t\t    void *userdata);\n \n int ssh_buffer_validate_length(struct ssh_buffer_struct *buffer, size_t len);\n \ndiff --git a/include/libssh/libssh.h b/include/libssh/libssh.h\nindex 669a0a96..b6a93ac7 100644\n--- a/include/libssh/libssh.h\n+++ b/include/libssh/libssh.h\n@@ -402,6 +402,7 @@ enum ssh_options_e {\n   SSH_OPTIONS_GSSAPI_AUTH,\n   SSH_OPTIONS_GLOBAL_KNOWNHOSTS,\n   SSH_OPTIONS_NODELAY,\n+  SSH_OPTIONS_CCALGO,\n   SSH_OPTIONS_PUBLICKEY_ACCEPTED_TYPES,\n   SSH_OPTIONS_PROCESS_CONFIG,\n   SSH_OPTIONS_REKEY_DATA,\n@@ -833,6 +834,7 @@ LIBSSH_API const char* ssh_get_hmac_in(ssh_session session);\n LIBSSH_API const char* ssh_get_hmac_out(ssh_session session);\n \n LIBSSH_API ssh_buffer ssh_buffer_new(void);\n+LIBSSH_API ssh_buffer ssh_buffer_new_size(uint32_t size, uint32_t headroom);\n LIBSSH_API void ssh_buffer_free(ssh_buffer buffer);\n #define SSH_BUFFER_FREE(x) \\\n     do { if ((x) != NULL) { ssh_buffer_free(x); x = NULL; } } while(0)\n@@ -843,6 +845,8 @@ LIBSSH_API void *ssh_buffer_get(ssh_buffer buffer);\n LIBSSH_API uint32_t ssh_buffer_get_len(ssh_buffer buffer);\n LIBSSH_API int ssh_session_set_disconnect_message(ssh_session session, const char *message);\n \n+typedef ssize_t (*ssh_add_func) (void *ptr, size_t max_bytes, void *userdata);\n+\n #ifndef LIBSSH_LEGACY_0_4\n #include \"libssh/legacy.h\"\n #endif\ndiff --git a/include/libssh/session.h b/include/libssh/session.h\nindex 97936195..e4a7f80c 100644\n--- a/include/libssh/session.h\n+++ b/include/libssh/session.h\n@@ -258,6 +258,7 @@ struct ssh_session_struct {\n         int flags;\n         int exp_flags;\n         int nodelay;\n+        char *ccalgo;\n         bool config_processed;\n         uint8_t options_seen[SOC_MAX];\n         uint64_t rekey_data;\ndiff --git a/include/libssh/sftp.h b/include/libssh/sftp.h\nindex c713466e..e27fe326 100644\n--- a/include/libssh/sftp.h\n+++ b/include/libssh/sftp.h\n@@ -565,6 +565,10 @@ LIBSSH_API int sftp_async_read(sftp_file file, void *data, uint32_t len, uint32_\n  */\n LIBSSH_API ssize_t sftp_write(sftp_file file, const void *buf, size_t count);\n \n+LIBSSH_API ssize_t sftp_async_write(sftp_file file, ssh_add_func f, size_t count,\n+\t\t\t\t    void *userdata, uint32_t* id);\n+LIBSSH_API int sftp_async_write_end(sftp_file file, uint32_t id, int blocking);\n+\n /**\n  * @brief Seek to a specific location in a file.\n  *\ndiff --git a/src/CMakeLists.txt b/src/CMakeLists.txt\nindex 807313b5..86487087 100644\n--- a/src/CMakeLists.txt\n+++ b/src/CMakeLists.txt\n@@ -448,6 +448,11 @@ if (BUILD_STATIC_LIB)\n   if (WIN32)\n     target_compile_definitions(ssh-static PUBLIC \"LIBSSH_STATIC\")\n   endif (WIN32)\n+\n+  install(TARGETS ssh-static\n+\t  EXPORT libssh-config\n+\t  LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}\n+\t  COMPONENT libraries)\n endif (BUILD_STATIC_LIB)\n \n message(STATUS \"Threads_FOUND=${Threads_FOUND}\")\ndiff --git a/src/buffer.c b/src/buffer.c\nindex 8991e006..e0414801 100644\n--- a/src/buffer.c\n+++ b/src/buffer.c\n@@ -142,6 +142,40 @@ struct ssh_buffer_struct *ssh_buffer_new(void)\n     return buf;\n }\n \n+/**\n+ * @brief Create a new SSH buffer with a specified size and headroom.\n+ *\n+ * @param[in] len       length for newly initialized SSH buffer.\n+ * @param[in] headroom  length for headroom\n+ * @return A newly initialized SSH buffer, NULL on error.\n+ */\n+struct ssh_buffer_struct *ssh_buffer_new_size(uint32_t len, uint32_t headroom)\n+{\n+    struct ssh_buffer_struct *buf = NULL;\n+    int rc;\n+\n+    if (len < headroom)\n+\t    return NULL;\n+\n+    buf = calloc(1, sizeof(struct ssh_buffer_struct));\n+    if (buf == NULL) {\n+        return NULL;\n+    }\n+\n+    rc = ssh_buffer_allocate_size(buf, len);\n+    if (rc != 0) {\n+        SAFE_FREE(buf);\n+        return NULL;\n+    }\n+\n+    buf->pos += headroom;\n+    buf->used += headroom;\n+\n+    buffer_verify(buf);\n+\n+    return buf;\n+}\n+\n /**\n  * @brief Deallocate a SSH buffer.\n  *\n@@ -329,6 +363,49 @@ int ssh_buffer_add_data(struct ssh_buffer_struct *buffer, const void *data, uint\n     return 0;\n }\n \n+/**\n+ * @brief Add data at the tail of a buffer by an external function\n+ *\n+ * @param[in]  buffer    The buffer to add data.\n+ *\n+ * @param[in]  f         function that adds data to the buffer.\n+ *\n+ * @param[in]  max_bytes The maximum length of the data to add.\n+ *\n+ * @return               actual bytes added on success, < 0 on error.\n+ */\n+ssize_t ssh_buffer_add_func(struct ssh_buffer_struct *buffer, ssh_add_func f,\n+\t\t\t    size_t max_bytes, void *userdata)\n+{\n+    ssize_t actual;\n+\n+    if (buffer == NULL) {\n+        return -1;\n+    }\n+\n+    buffer_verify(buffer);\n+\n+    if (buffer->used + max_bytes < max_bytes) {\n+        return -1;\n+    }\n+\n+    if (buffer->allocated < (buffer->used + max_bytes)) {\n+        if (buffer->pos > 0) {\n+            buffer_shift(buffer);\n+        }\n+        if (realloc_buffer(buffer, buffer->used + max_bytes) < 0) {\n+            return -1;\n+        }\n+    }\n+\n+    if ((actual = f(buffer->data + buffer->used, max_bytes, userdata)) < 0)\n+      return -1;\n+\n+    buffer->used += actual;\n+    buffer_verify(buffer);\n+    return actual;\n+}\n+\n /**\n  * @brief Ensure the buffer has at least a certain preallocated size.\n  *\ndiff --git a/src/connect.c b/src/connect.c\nindex 15cae644..e7520f40 100644\n--- a/src/connect.c\n+++ b/src/connect.c\n@@ -156,6 +156,20 @@ static int set_tcp_nodelay(socket_t socket)\n                       sizeof(opt));\n }\n \n+static int set_tcp_ccalgo(socket_t socket, const char *ccalgo)\n+{\n+#ifdef HAVE_TCP_CONGESTION\n+\treturn setsockopt(socket,\n+\t\t\t  IPPROTO_TCP,\n+\t\t\t  TCP_CONGESTION,\n+\t\t\t  (void *)ccalgo,\n+\t\t\t  strlen(ccalgo));\n+#else\n+\terrno = ENOTSUP;\n+\treturn -1;\n+#endif\n+}\n+\n /**\n  * @internal\n  *\n@@ -256,6 +270,18 @@ socket_t ssh_connect_host_nonblocking(ssh_session session, const char *host,\n             }\n         }\n \n+        if (session->opts.ccalgo) {\n+\t     rc = set_tcp_ccalgo(s, session->opts.ccalgo);\n+\t     if (rc < 0) {\n+\t\t ssh_set_error(session, SSH_FATAL,\n+\t\t\t       \"Failed to set TCP_CONGESTION on socket: %s\",\n+\t\t\t       ssh_strerror(errno, err_msg, SSH_ERRNO_MSG_MAX));\n+\t\t ssh_connect_socket_close(s);\n+\t\t s = -1;\n+\t\t continue;\n+\t     }\n+\t}\n+\n         errno = 0;\n         rc = connect(s, itr->ai_addr, itr->ai_addrlen);\n         if (rc == -1 && (errno != 0) && (errno != EINPROGRESS)) {\ndiff --git a/src/options.c b/src/options.c\nindex 38511455..a183605d 100644\n--- a/src/options.c\n+++ b/src/options.c\n@@ -217,6 +217,7 @@ int ssh_options_copy(ssh_session src, ssh_session *dest)\n     new->opts.gss_delegate_creds    = src->opts.gss_delegate_creds;\n     new->opts.flags                 = src->opts.flags;\n     new->opts.nodelay               = src->opts.nodelay;\n+    new->opts.ccalgo                = src->opts.ccalgo;\n     new->opts.config_processed      = src->opts.config_processed;\n     new->common.log_verbosity       = src->common.log_verbosity;\n     new->common.callbacks           = src->common.callbacks;\n@@ -458,6 +459,10 @@ int ssh_options_set_algo(ssh_session session,\n  *                Set it to disable Nagle's Algorithm (TCP_NODELAY) on the\n  *                session socket. (int, 0=false)\n  *\n+ *              - SSH_OPTIONS_CCALGO\n+ *                Set it to specify TCP congestion control algorithm on the\n+ *                session socket (Linux only). (int, 0=false)\n+ *\n  *              - SSH_OPTIONS_PROCESS_CONFIG\n  *                Set it to false to disable automatic processing of per-user\n  *                and system-wide OpenSSH configuration files. LibSSH\n@@ -1023,6 +1028,20 @@ int ssh_options_set(ssh_session session, enum ssh_options_e type,\n                 session->opts.nodelay = (*x & 0xff) > 0 ? 1 : 0;\n             }\n             break;\n+\tcase SSH_OPTIONS_CCALGO:\n+            v = value;\n+            if (v == NULL || v[0] == '\\0') {\n+                ssh_set_error_invalid(session);\n+                return -1;\n+            } else {\n+                SAFE_FREE(session->opts.ccalgo);\n+                session->opts.ccalgo = strdup(v);\n+                if (session->opts.ccalgo == NULL) {\n+                    ssh_set_error_oom(session);\n+                    return -1;\n+                }\n+            }\n+            break;\n         case SSH_OPTIONS_PROCESS_CONFIG:\n             if (value == NULL) {\n                 ssh_set_error_invalid(session);\ndiff --git a/src/session.c b/src/session.c\nindex 8c509699..88602b6a 100644\n--- a/src/session.c\n+++ b/src/session.c\n@@ -108,6 +108,7 @@ ssh_session ssh_new(void)\n     session->opts.fd = -1;\n     session->opts.compressionlevel = 7;\n     session->opts.nodelay = 0;\n+    session->opts.ccalgo = NULL;\n \n     session->opts.flags = SSH_OPT_FLAG_PASSWORD_AUTH |\n                           SSH_OPT_FLAG_PUBKEY_AUTH |\ndiff --git a/src/sftp.c b/src/sftp.c\nindex e01012a8..702623a0 100644\n--- a/src/sftp.c\n+++ b/src/sftp.c\n@@ -2228,6 +2228,132 @@ ssize_t sftp_write(sftp_file file, const void *buf, size_t count) {\n   return -1; /* not reached */\n }\n \n+/*\n+ * sftp_async_write is based on and sftp_async_write_end is copied from\n+ * https://github.com/limes-datentechnik-gmbh/libssh\n+ *\n+ * sftp_async_write has some optimizations:\n+ * - use ssh_buffer_new_size() to reduce realoc_buffer.\n+ * - use ssh_buffer_add_func() to avoid memcpy from read buffer to ssh buffer.\n+ */\n+ssize_t sftp_async_write(sftp_file file, ssh_add_func f, size_t count, void *userdata,\n+\t\t\t uint32_t* id) {\n+  sftp_session sftp = file->sftp;\n+  ssh_buffer buffer;\n+  uint32_t buf_sz;\n+  ssize_t actual;\n+  int len;\n+  int packetlen;\n+  int rc;\n+\n+#define HEADROOM 16\n+  /* sftp_packet_write() prepends a 5-bytes (uint32_t length and\n+   * 1-byte type) header to the head of the payload by\n+   * ssh_buffer_prepend_data(). Inserting headroom by\n+   * ssh_buffer_new_size() eliminates memcpy for prepending the\n+   * header.\n+   */\n+\n+  buf_sz = (HEADROOM + /* for header */\n+\t    sizeof(uint32_t) + /* id */\n+\t    ssh_string_len(file->handle) + 4 + /* file->handle */\n+\t    sizeof(uint64_t) + /* file->offset */\n+\t    sizeof(uint32_t) + /* count */\n+\t    count); /* datastring */\n+\n+  buffer = ssh_buffer_new_size(buf_sz, HEADROOM);\n+  if (buffer == NULL) {\n+    ssh_set_error_oom(sftp->session);\n+    return -1;\n+  }\n+\n+  *id = sftp_get_new_id(file->sftp);\n+\n+  rc = ssh_buffer_pack(buffer,\n+                       \"dSqd\",\n+                       *id,\n+                       file->handle,\n+                       file->offset,\n+                       count); /* len of datastring */\n+\n+  if (rc != SSH_OK){\n+    ssh_set_error_oom(sftp->session);\n+    ssh_buffer_free(buffer);\n+    return SSH_ERROR;\n+  }\n+\n+  actual = ssh_buffer_add_func(buffer, f, count, userdata);\n+  if (actual < 0){\n+    ssh_set_error_oom(sftp->session);\n+    ssh_buffer_free(buffer);\n+    return SSH_ERROR;\n+  }\n+\n+  packetlen=ssh_buffer_get_len(buffer)+5;\n+  len = sftp_packet_write(file->sftp, SSH_FXP_WRITE, buffer);\n+  ssh_buffer_free(buffer);\n+  if (len < 0) {\n+    return SSH_ERROR;\n+  } else  if (len != packetlen) {\n+    ssh_set_error(sftp->session, SSH_FATAL,\n+      \"Could only send %d of %d bytes to remote host!\", len, packetlen);\n+    SSH_LOG(SSH_LOG_PACKET,\n+        \"Could not write as much data as expected\");\n+    return SSH_ERROR;\n+  }\n+\n+  file->offset += actual;\n+\n+  return actual;\n+}\n+\n+int sftp_async_write_end(sftp_file file, uint32_t id, int blocking) {\n+  sftp_session sftp = file->sftp;\n+  sftp_message msg = NULL;\n+  sftp_status_message status;\n+\n+  msg = sftp_dequeue(sftp, id);\n+  while (msg == NULL) {\n+    if (!blocking && ssh_channel_poll(sftp->channel, 0) == 0) {\n+      /* we cannot block */\n+      return SSH_AGAIN;\n+    }\n+    if (sftp_read_and_dispatch(sftp) < 0) {\n+      /* something nasty has happened */\n+      return SSH_ERROR;\n+    }\n+    msg = sftp_dequeue(sftp, id);\n+  }\n+\n+  switch (msg->packet_type) {\n+    case SSH_FXP_STATUS:\n+      status = parse_status_msg(msg);\n+      sftp_message_free(msg);\n+      if (status == NULL) {\n+        return SSH_ERROR;\n+      }\n+      sftp_set_error(sftp, status->status);\n+      switch (status->status) {\n+        case SSH_FX_OK:\n+          status_msg_free(status);\n+          return SSH_OK;\n+        default:\n+          break;\n+      }\n+      ssh_set_error(sftp->session, SSH_REQUEST_DENIED,\n+          \"SFTP server: %s\", status->errormsg);\n+      status_msg_free(status);\n+      return SSH_ERROR;\n+    default:\n+      ssh_set_error(sftp->session, SSH_FATAL,\n+          \"Received message %d during write!\", msg->packet_type);\n+      sftp_message_free(msg);\n+      return SSH_ERROR;\n+  }\n+\n+  return SSH_ERROR; /* not reached */\n+}\n+\n /* Seek to a specific location in a file. */\n int sftp_seek(sftp_file file, uint32_t new_offset) {\n   if (file == NULL) {\n"
  },
  {
    "path": "patch/libssh-0.11.2.patch",
    "content": "diff --git a/ConfigureChecks.cmake b/ConfigureChecks.cmake\nindex 8765dc6e..766e7d16 100644\n--- a/ConfigureChecks.cmake\n+++ b/ConfigureChecks.cmake\n@@ -209,6 +209,7 @@ if (UNIX)\n     check_library_exists(util forkpty \"\" HAVE_LIBUTIL)\n     check_function_exists(cfmakeraw HAVE_CFMAKERAW)\n     check_function_exists(__strtoull HAVE___STRTOULL)\n+    check_symbol_exists(TCP_CONGESTION \"netinet/tcp.h\" HAVE_TCP_CONGESTION)\n endif (UNIX)\n \n set(LIBSSH_REQUIRED_LIBRARIES ${_REQUIRED_LIBRARIES} CACHE INTERNAL \"libssh required system libraries\")\ndiff --git a/config.h.cmake b/config.h.cmake\nindex 8dce5273..ef534762 100644\n--- a/config.h.cmake\n+++ b/config.h.cmake\n@@ -219,6 +219,8 @@\n \n #cmakedefine HAVE_GCC_BOUNDED_ATTRIBUTE 1\n \n+#cmakedefine HAVE_TCP_CONGESTION 1\n+\n /* Define to 1 if you want to enable GSSAPI */\n #cmakedefine WITH_GSSAPI 1\n \ndiff --git a/include/libssh/buffer.h b/include/libssh/buffer.h\nindex d22178e7..2d6aa0a7 100644\n--- a/include/libssh/buffer.h\n+++ b/include/libssh/buffer.h\n@@ -37,6 +37,8 @@ int ssh_buffer_add_u8(ssh_buffer buffer, uint8_t data);\n int ssh_buffer_add_u16(ssh_buffer buffer, uint16_t data);\n int ssh_buffer_add_u32(ssh_buffer buffer, uint32_t data);\n int ssh_buffer_add_u64(ssh_buffer buffer, uint64_t data);\n+ssize_t ssh_buffer_add_func(ssh_buffer buffer, ssh_add_func f, size_t max_bytes,\n+\t\t\t    void *userdata);\n \n int ssh_buffer_validate_length(struct ssh_buffer_struct *buffer, size_t len);\n \ndiff --git a/include/libssh/libssh.h b/include/libssh/libssh.h\nindex 3bddb019..1d5d7761 100644\n--- a/include/libssh/libssh.h\n+++ b/include/libssh/libssh.h\n@@ -373,6 +373,7 @@ enum ssh_options_e {\n     SSH_OPTIONS_HOST,\n     SSH_OPTIONS_PORT,\n     SSH_OPTIONS_PORT_STR,\n+    SSH_OPTIONS_AI_FAMILY,\n     SSH_OPTIONS_FD,\n     SSH_OPTIONS_USER,\n     SSH_OPTIONS_SSH_DIR,\n@@ -407,6 +408,7 @@ enum ssh_options_e {\n     SSH_OPTIONS_GSSAPI_AUTH,\n     SSH_OPTIONS_GLOBAL_KNOWNHOSTS,\n     SSH_OPTIONS_NODELAY,\n+    SSH_OPTIONS_CCALGO,\n     SSH_OPTIONS_PUBLICKEY_ACCEPTED_TYPES,\n     SSH_OPTIONS_PROCESS_CONFIG,\n     SSH_OPTIONS_REKEY_DATA,\n@@ -876,6 +878,7 @@ LIBSSH_API const char* ssh_get_hmac_in(ssh_session session);\n LIBSSH_API const char* ssh_get_hmac_out(ssh_session session);\n \n LIBSSH_API ssh_buffer ssh_buffer_new(void);\n+LIBSSH_API ssh_buffer ssh_buffer_new_size(uint32_t size, uint32_t headroom);\n LIBSSH_API void ssh_buffer_free(ssh_buffer buffer);\n #define SSH_BUFFER_FREE(x) \\\n     do { if ((x) != NULL) { ssh_buffer_free(x); x = NULL; } } while(0)\n@@ -886,6 +889,12 @@ LIBSSH_API void *ssh_buffer_get(ssh_buffer buffer);\n LIBSSH_API uint32_t ssh_buffer_get_len(ssh_buffer buffer);\n LIBSSH_API int ssh_session_set_disconnect_message(ssh_session session, const char *message);\n \n+typedef ssize_t (*ssh_add_func) (void *ptr, size_t max_bytes, void *userdata);\n+\n+LIBSSH_API const char **ssh_ciphers(void);\n+LIBSSH_API const char **ssh_hmacs(void);\n+LIBSSH_API void ssh_use_openssh_proxy_jumps(int);\n+\n #ifndef LIBSSH_LEGACY_0_4\n #include \"libssh/legacy.h\"\n #endif\ndiff --git a/include/libssh/session.h b/include/libssh/session.h\nindex aed94072..327cf4fe 100644\n--- a/include/libssh/session.h\n+++ b/include/libssh/session.h\n@@ -255,6 +255,7 @@ struct ssh_session_struct {\n         unsigned long timeout; /* seconds */\n         unsigned long timeout_usec;\n         uint16_t port;\n+        int  ai_family;\n         socket_t fd;\n         int StrictHostKeyChecking;\n         char compressionlevel;\n@@ -264,6 +265,7 @@ struct ssh_session_struct {\n         int flags;\n         int exp_flags;\n         int nodelay;\n+        char *ccalgo;\n         bool config_processed;\n         uint8_t options_seen[SOC_MAX];\n         uint64_t rekey_data;\ndiff --git a/include/libssh/sftp.h b/include/libssh/sftp.h\nindex cf4458c3..1a864795 100644\n--- a/include/libssh/sftp.h\n+++ b/include/libssh/sftp.h\n@@ -569,6 +569,10 @@ SSH_DEPRECATED LIBSSH_API int sftp_async_read(sftp_file file,\n                                               uint32_t len,\n                                               uint32_t id);\n \n+LIBSSH_API ssize_t sftp_async_write(sftp_file file, ssh_add_func f, size_t count,\n+\t\t\t\t    void *userdata, uint32_t* id);\n+LIBSSH_API int sftp_async_write_end(sftp_file file, uint32_t id, int blocking);\n+\n /**\n  * @brief Write to a file using an opened sftp file handle.\n  *\ndiff --git a/src/buffer.c b/src/buffer.c\nindex 449fa941..f49e8af6 100644\n--- a/src/buffer.c\n+++ b/src/buffer.c\n@@ -142,6 +142,40 @@ struct ssh_buffer_struct *ssh_buffer_new(void)\n     return buf;\n }\n \n+/**\n+ * @brief Create a new SSH buffer with a specified size and headroom.\n+ *\n+ * @param[in] len       length for newly initialized SSH buffer.\n+ * @param[in] headroom  length for headroom\n+ * @return A newly initialized SSH buffer, NULL on error.\n+ */\n+struct ssh_buffer_struct *ssh_buffer_new_size(uint32_t len, uint32_t headroom)\n+{\n+    struct ssh_buffer_struct *buf = NULL;\n+    int rc;\n+\n+    if (len < headroom)\n+\t    return NULL;\n+\n+    buf = calloc(1, sizeof(struct ssh_buffer_struct));\n+    if (buf == NULL) {\n+        return NULL;\n+    }\n+\n+    rc = ssh_buffer_allocate_size(buf, len);\n+    if (rc != 0) {\n+        SAFE_FREE(buf);\n+        return NULL;\n+    }\n+\n+    buf->pos += headroom;\n+    buf->used += headroom;\n+\n+    buffer_verify(buf);\n+\n+    return buf;\n+}\n+\n /**\n  * @brief Deallocate a SSH buffer.\n  *\n@@ -329,6 +363,49 @@ int ssh_buffer_add_data(struct ssh_buffer_struct *buffer, const void *data, uint\n     return 0;\n }\n \n+/**\n+ * @brief Add data at the tail of a buffer by an external function\n+ *\n+ * @param[in]  buffer    The buffer to add data.\n+ *\n+ * @param[in]  f         function that adds data to the buffer.\n+ *\n+ * @param[in]  max_bytes The maximum length of the data to add.\n+ *\n+ * @return               actual bytes added on success, < 0 on error.\n+ */\n+ssize_t ssh_buffer_add_func(struct ssh_buffer_struct *buffer, ssh_add_func f,\n+\t\t\t    size_t max_bytes, void *userdata)\n+{\n+    ssize_t actual;\n+\n+    if (buffer == NULL) {\n+        return -1;\n+    }\n+\n+    buffer_verify(buffer);\n+\n+    if (buffer->used + max_bytes < max_bytes) {\n+        return -1;\n+    }\n+\n+    if (buffer->allocated < (buffer->used + max_bytes)) {\n+        if (buffer->pos > 0) {\n+            buffer_shift(buffer);\n+        }\n+        if (realloc_buffer(buffer, buffer->used + max_bytes) < 0) {\n+            return -1;\n+        }\n+    }\n+\n+    if ((actual = f(buffer->data + buffer->used, max_bytes, userdata)) < 0)\n+      return -1;\n+\n+    buffer->used += actual;\n+    buffer_verify(buffer);\n+    return actual;\n+}\n+\n /**\n  * @brief Ensure the buffer has at least a certain preallocated size.\n  *\ndiff --git a/src/connect.c b/src/connect.c\nindex 2cb64037..51f4c87e 100644\n--- a/src/connect.c\n+++ b/src/connect.c\n@@ -109,7 +109,7 @@ static int ssh_connect_socket_close(socket_t s)\n #endif\n }\n \n-static int getai(const char *host, int port, struct addrinfo **ai)\n+static int getai(const char *host, int port, int ai_family, struct addrinfo **ai)\n {\n     const char *service = NULL;\n     struct addrinfo hints;\n@@ -118,7 +118,7 @@ static int getai(const char *host, int port, struct addrinfo **ai)\n     ZERO_STRUCT(hints);\n \n     hints.ai_protocol = IPPROTO_TCP;\n-    hints.ai_family = PF_UNSPEC;\n+    hints.ai_family = ai_family > 0 ? ai_family : PF_UNSPEC;\n     hints.ai_socktype = SOCK_STREAM;\n \n     if (port == 0) {\n@@ -151,6 +151,20 @@ static int set_tcp_nodelay(socket_t socket)\n                       sizeof(opt));\n }\n \n+static int set_tcp_ccalgo(socket_t socket, const char *ccalgo)\n+{\n+#ifdef HAVE_TCP_CONGESTION\n+\treturn setsockopt(socket,\n+\t\t\t  IPPROTO_TCP,\n+\t\t\t  TCP_CONGESTION,\n+\t\t\t  (void *)ccalgo,\n+\t\t\t  strlen(ccalgo));\n+#else\n+\terrno = ENOTSUP;\n+\treturn -1;\n+#endif\n+}\n+\n /**\n  * @internal\n  *\n@@ -168,7 +182,7 @@ socket_t ssh_connect_host_nonblocking(ssh_session session, const char *host,\n     struct addrinfo *ai = NULL;\n     struct addrinfo *itr = NULL;\n \n-    rc = getai(host, port, &ai);\n+    rc = getai(host, port, session->opts.ai_family, &ai);\n     if (rc != 0) {\n         ssh_set_error(session, SSH_FATAL,\n                       \"Failed to resolve hostname %s (%s)\",\n@@ -194,7 +208,7 @@ socket_t ssh_connect_host_nonblocking(ssh_session session, const char *host,\n \n             SSH_LOG(SSH_LOG_PACKET, \"Resolving %s\", bind_addr);\n \n-            rc = getai(bind_addr, 0, &bind_ai);\n+            rc = getai(bind_addr, 0, session->opts.ai_family, &bind_ai);\n             if (rc != 0) {\n                 ssh_set_error(session, SSH_FATAL,\n                               \"Failed to resolve bind address %s (%s)\",\n@@ -251,6 +265,18 @@ socket_t ssh_connect_host_nonblocking(ssh_session session, const char *host,\n             }\n         }\n \n+        if (session->opts.ccalgo) {\n+\t     rc = set_tcp_ccalgo(s, session->opts.ccalgo);\n+\t     if (rc < 0) {\n+\t\t ssh_set_error(session, SSH_FATAL,\n+\t\t\t       \"Failed to set TCP_CONGESTION on socket: %s\",\n+\t\t\t       ssh_strerror(errno, err_msg, SSH_ERRNO_MSG_MAX));\n+\t\t ssh_connect_socket_close(s);\n+\t\t s = -1;\n+\t\t continue;\n+\t     }\n+\t}\n+\n         errno = 0;\n         rc = connect(s, itr->ai_addr, itr->ai_addrlen);\n         if (rc == -1) {\ndiff --git a/src/misc.c b/src/misc.c\nindex 774211fb..74e57959 100644\n--- a/src/misc.c\n+++ b/src/misc.c\n@@ -71,6 +71,8 @@\n #include \"libssh/priv.h\"\n #include \"libssh/misc.h\"\n #include \"libssh/session.h\"\n+#include \"libssh/wrapper.h\"\n+#include \"libssh/crypto.h\"\n \n #ifdef HAVE_LIBGCRYPT\n #define GCRYPT_STRING \"/gcrypt\"\n@@ -2054,6 +2056,42 @@ ssize_t ssh_readn(int fd, void *buf, size_t nbytes)\n     return total_bytes_read;\n }\n \n+/**\n+ * @brief Return supported cipher names\n+ * @return\tThe list of cipher names.\n+ */\n+const char **ssh_ciphers(void)\n+{\n+     struct ssh_cipher_struct *tab=ssh_get_ciphertab();\n+     static const char *ciphers[32];\n+     int n;\n+\n+     memset(ciphers, 0, sizeof(*ciphers));\n+\n+     for (n = 0; tab[n].name != NULL; n++) {\n+\t  ciphers[n] = tab[n].name;\n+     }\n+     return ciphers;\n+}\n+\n+/**\n+ * @brief Return supported hmac names\n+ * @return\tThe list of hmac names.\n+ */\n+const char **ssh_hmacs(void)\n+{\n+     struct ssh_hmac_struct *tab=ssh_get_hmactab();\n+     static const char *hmacs[32];\n+     int n;\n+\n+     memset(hmacs, 0, sizeof(*hmacs));\n+\n+     for (n = 0; tab[n].name != NULL; n++) {\n+\t  hmacs[n] = tab[n].name;\n+     }\n+     return hmacs;\n+}\n+\n /**\n  * @brief Write the requested number of bytes to a local file.\n  *\n@@ -2227,6 +2265,17 @@ ssh_proxyjumps_free(struct ssh_list *proxy_jump_list)\n     }\n }\n \n+static bool force_openssh_proxy_jumps;\n+\n+/**\n+ * @breif set use openssh proxy jumps without the OPENSSH_PROXYJUMP env var\n+ */\n+void\n+ssh_use_openssh_proxy_jumps(int v)\n+{\n+    force_openssh_proxy_jumps = (v > 0);\n+}\n+\n /**\n  * @brief Check if libssh proxy jumps is enabled\n  *\n@@ -2241,7 +2290,12 @@ ssh_libssh_proxy_jumps(void)\n {\n     const char *t = getenv(\"OPENSSH_PROXYJUMP\");\n \n+    if (force_openssh_proxy_jumps)\n+\t    return false;\n+\n     return !(t != NULL && t[0] == '1');\n }\n \n+\n+\n /** @} */\ndiff --git a/src/options.c b/src/options.c\nindex 785296dd..a82d4d81 100644\n--- a/src/options.c\n+++ b/src/options.c\n@@ -251,6 +251,7 @@ int ssh_options_copy(ssh_session src, ssh_session *dest)\n     new->opts.gss_delegate_creds    = src->opts.gss_delegate_creds;\n     new->opts.flags                 = src->opts.flags;\n     new->opts.nodelay               = src->opts.nodelay;\n+    new->opts.ccalgo                = src->opts.ccalgo;\n     new->opts.config_processed      = src->opts.config_processed;\n     new->opts.control_master        = src->opts.control_master;\n     new->common.log_verbosity       = src->common.log_verbosity;\n@@ -326,6 +327,9 @@ int ssh_options_set_algo(ssh_session session,\n  *              - SSH_OPTIONS_PORT_STR:\n  *                The port to connect to (const char *).\n  *\n+ *              - SSH_OPTIONS_AI_FAMILY:\n+ *                The address family for connecting (int *).\n+ *\n  *              - SSH_OPTIONS_FD:\n  *                The file descriptor to use (socket_t).\\n\n  *                \\n\n@@ -571,6 +575,10 @@ int ssh_options_set_algo(ssh_session session,\n  *                Set it to disable Nagle's Algorithm (TCP_NODELAY) on the\n  *                session socket. (int, 0=false)\n  *\n+ *              - SSH_OPTIONS_CCALGO\n+ *                Set it to specify TCP congestion control algorithm on the\n+ *                session socket (Linux only). (int, 0=false)\n+ *\n  *              - SSH_OPTIONS_PROCESS_CONFIG\n  *                Set it to false to disable automatic processing of per-user\n  *                and system-wide OpenSSH configuration files. LibSSH\n@@ -727,6 +735,21 @@ int ssh_options_set(ssh_session session, enum ssh_options_e type,\n                 session->opts.port = i & 0xffffU;\n             }\n             break;\n+        case SSH_OPTIONS_AI_FAMILY:\n+            if (value == NULL) {\n+                session->opts.ai_family = 0;\n+                ssh_set_error_invalid(session);\n+                return -1;\n+            } else {\n+                int *x = (int *) value;\n+                if (*x < 0) {\n+\t\t    session->opts.ai_family = 0;\n+                    ssh_set_error_invalid(session);\n+                    return -1;\n+                }\n+                session->opts.ai_family = *x;\n+            }\n+            break;\n         case SSH_OPTIONS_FD:\n             if (value == NULL) {\n                 session->opts.fd = SSH_INVALID_SOCKET;\n@@ -1241,6 +1264,20 @@ int ssh_options_set(ssh_session session, enum ssh_options_e type,\n                 session->opts.nodelay = (*x & 0xff) > 0 ? 1 : 0;\n             }\n             break;\n+\tcase SSH_OPTIONS_CCALGO:\n+            v = value;\n+            if (v == NULL || v[0] == '\\0') {\n+                ssh_set_error_invalid(session);\n+                return -1;\n+            } else {\n+                SAFE_FREE(session->opts.ccalgo);\n+                session->opts.ccalgo = strdup(v);\n+                if (session->opts.ccalgo == NULL) {\n+                    ssh_set_error_oom(session);\n+                    return -1;\n+                }\n+            }\n+            break;\n         case SSH_OPTIONS_PROCESS_CONFIG:\n             if (value == NULL) {\n                 ssh_set_error_invalid(session);\ndiff --git a/src/session.c b/src/session.c\nindex 9fd5d946..ed9f908e 100644\n--- a/src/session.c\n+++ b/src/session.c\n@@ -107,9 +107,11 @@ ssh_session ssh_new(void)\n     /* OPTIONS */\n     session->opts.StrictHostKeyChecking = 1;\n     session->opts.port = 22;\n+    session->opts.ai_family = 0;\n     session->opts.fd = -1;\n     session->opts.compressionlevel = 7;\n     session->opts.nodelay = 0;\n+    session->opts.ccalgo = NULL;\n     session->opts.identities_only = false;\n     session->opts.control_master = SSH_CONTROL_MASTER_NO;\n \ndiff --git a/src/sftp.c b/src/sftp.c\nindex 37b4133b..12b6d296 100644\n--- a/src/sftp.c\n+++ b/src/sftp.c\n@@ -1488,6 +1488,132 @@ ssize_t sftp_write(sftp_file file, const void *buf, size_t count) {\n   return -1; /* not reached */\n }\n \n+/*\n+ * sftp_async_write is based on and sftp_async_write_end is copied from\n+ * https://github.com/limes-datentechnik-gmbh/libssh\n+ *\n+ * sftp_async_write has some optimizations:\n+ * - use ssh_buffer_new_size() to reduce realoc_buffer.\n+ * - use ssh_buffer_add_func() to avoid memcpy from read buffer to ssh buffer.\n+ */\n+ssize_t sftp_async_write(sftp_file file, ssh_add_func f, size_t count, void *userdata,\n+\t\t\t uint32_t* id) {\n+  sftp_session sftp = file->sftp;\n+  ssh_buffer buffer;\n+  uint32_t buf_sz;\n+  ssize_t actual;\n+  int len;\n+  int packetlen;\n+  int rc;\n+\n+#define HEADROOM 16\n+  /* sftp_packet_write() prepends a 5-bytes (uint32_t length and\n+   * 1-byte type) header to the head of the payload by\n+   * ssh_buffer_prepend_data(). Inserting headroom by\n+   * ssh_buffer_new_size() eliminates memcpy for prepending the\n+   * header.\n+   */\n+\n+  buf_sz = (HEADROOM + /* for header */\n+\t    sizeof(uint32_t) + /* id */\n+\t    ssh_string_len(file->handle) + 4 + /* file->handle */\n+\t    sizeof(uint64_t) + /* file->offset */\n+\t    sizeof(uint32_t) + /* count */\n+\t    count); /* datastring */\n+\n+  buffer = ssh_buffer_new_size(buf_sz, HEADROOM);\n+  if (buffer == NULL) {\n+    ssh_set_error_oom(sftp->session);\n+    return -1;\n+  }\n+\n+  *id = sftp_get_new_id(file->sftp);\n+\n+  rc = ssh_buffer_pack(buffer,\n+                       \"dSqd\",\n+                       *id,\n+                       file->handle,\n+                       file->offset,\n+                       count); /* len of datastring */\n+\n+  if (rc != SSH_OK){\n+    ssh_set_error_oom(sftp->session);\n+    ssh_buffer_free(buffer);\n+    return SSH_ERROR;\n+  }\n+\n+  actual = ssh_buffer_add_func(buffer, f, count, userdata);\n+  if (actual < 0){\n+    ssh_set_error_oom(sftp->session);\n+    ssh_buffer_free(buffer);\n+    return SSH_ERROR;\n+  }\n+\n+  packetlen=ssh_buffer_get_len(buffer)+5;\n+  len = sftp_packet_write(file->sftp, SSH_FXP_WRITE, buffer);\n+  ssh_buffer_free(buffer);\n+  if (len < 0) {\n+    return SSH_ERROR;\n+  } else  if (len != packetlen) {\n+    ssh_set_error(sftp->session, SSH_FATAL,\n+      \"Could only send %d of %d bytes to remote host!\", len, packetlen);\n+    SSH_LOG(SSH_LOG_PACKET,\n+        \"Could not write as much data as expected\");\n+    return SSH_ERROR;\n+  }\n+\n+  file->offset += actual;\n+\n+  return actual;\n+}\n+\n+int sftp_async_write_end(sftp_file file, uint32_t id, int blocking) {\n+  sftp_session sftp = file->sftp;\n+  sftp_message msg = NULL;\n+  sftp_status_message status;\n+\n+  msg = sftp_dequeue(sftp, id);\n+  while (msg == NULL) {\n+    if (!blocking && ssh_channel_poll(sftp->channel, 0) == 0) {\n+      /* we cannot block */\n+      return SSH_AGAIN;\n+    }\n+    if (sftp_read_and_dispatch(sftp) < 0) {\n+      /* something nasty has happened */\n+      return SSH_ERROR;\n+    }\n+    msg = sftp_dequeue(sftp, id);\n+  }\n+\n+  switch (msg->packet_type) {\n+    case SSH_FXP_STATUS:\n+      status = parse_status_msg(msg);\n+      sftp_message_free(msg);\n+      if (status == NULL) {\n+        return SSH_ERROR;\n+      }\n+      sftp_set_error(sftp, status->status);\n+      switch (status->status) {\n+        case SSH_FX_OK:\n+          status_msg_free(status);\n+          return SSH_OK;\n+        default:\n+          break;\n+      }\n+      ssh_set_error(sftp->session, SSH_REQUEST_DENIED,\n+          \"SFTP server: %s\", status->errormsg);\n+      status_msg_free(status);\n+      return SSH_ERROR;\n+    default:\n+      ssh_set_error(sftp->session, SSH_FATAL,\n+          \"Received message %d during write!\", msg->packet_type);\n+      sftp_message_free(msg);\n+      return SSH_ERROR;\n+  }\n+\n+  return SSH_ERROR; /* not reached */\n+}\n+\n /* Seek to a specific location in a file. */\n int sftp_seek(sftp_file file, uint32_t new_offset) {\n   if (file == NULL) {\n"
  },
  {
    "path": "patch/libssh-0.9.6.patch",
    "content": "diff --git a/DefineOptions.cmake b/DefineOptions.cmake\nindex b82a5018..f1f2ab9d 100644\n--- a/DefineOptions.cmake\n+++ b/DefineOptions.cmake\n@@ -15,13 +15,14 @@ option(UNIT_TESTING \"Build with unit tests\" OFF)\n option(CLIENT_TESTING \"Build with client tests; requires openssh\" OFF)\n option(SERVER_TESTING \"Build with server tests; requires openssh and dropbear\" OFF)\n option(WITH_BENCHMARKS \"Build benchmarks tools\" OFF)\n-option(WITH_EXAMPLES \"Build examples\" ON)\n+option(WITH_EXAMPLES \"Build examples\" OFF)\n option(WITH_NACL \"Build with libnacl (curve25519)\" ON)\n option(WITH_SYMBOL_VERSIONING \"Build with symbol versioning\" ON)\n option(WITH_ABI_BREAK \"Allow ABI break\" OFF)\n option(WITH_GEX \"Enable DH Group exchange mechanisms\" ON)\n option(FUZZ_TESTING \"Build with fuzzer for the server\" OFF)\n option(PICKY_DEVELOPER \"Build with picky developer flags\" OFF)\n+option(WITH_STATIC_LIB \"Build static library\" ON)\n \n if (WITH_ZLIB)\n     set(WITH_LIBZ ON)\n@@ -53,3 +54,7 @@ endif (NOT GLOBAL_BIND_CONFIG)\n if (NOT GLOBAL_CLIENT_CONFIG)\n   set(GLOBAL_CLIENT_CONFIG \"/etc/ssh/ssh_config\")\n endif (NOT GLOBAL_CLIENT_CONFIG)\n+\n+if (WITH_STATIC_LIB)\n+  set(BUILD_STATIC_LIB ON)\n+endif()\ndiff --git a/include/libssh/sftp.h b/include/libssh/sftp.h\nindex 8c14b21d..95ac1d6b 100644\n--- a/include/libssh/sftp.h\n+++ b/include/libssh/sftp.h\n@@ -565,6 +565,9 @@ LIBSSH_API int sftp_async_read(sftp_file file, void *data, uint32_t len, uint32_\n  */\n LIBSSH_API ssize_t sftp_write(sftp_file file, const void *buf, size_t count);\n \n+LIBSSH_API int sftp_async_write(sftp_file file, const void *buf, size_t count, uint32_t* id);\n+LIBSSH_API int sftp_async_write_end(sftp_file file, uint32_t id, int blocking);\n+\n /**\n  * @brief Seek to a specific location in a file.\n  *\ndiff --git a/src/CMakeLists.txt b/src/CMakeLists.txt\nindex a576cf71..303a1c7f 100644\n--- a/src/CMakeLists.txt\n+++ b/src/CMakeLists.txt\n@@ -412,6 +412,10 @@ if (BUILD_STATIC_LIB)\n   if (WIN32)\n     target_compile_definitions(ssh-static PUBLIC \"LIBSSH_STATIC\")\n   endif (WIN32)\n+  install(TARGETS ssh-static\n+\tEXPORT libssh-config\n+\tLIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}\n+\tCOMPONENT libraries)\n endif (BUILD_STATIC_LIB)\n \n message(STATUS \"Threads_FOUND=${Threads_FOUND}\")\ndiff --git a/src/sftp.c b/src/sftp.c\nindex a8346040..a4261ec9 100644\n--- a/src/sftp.c\n+++ b/src/sftp.c\n@@ -2234,6 +2234,102 @@ ssize_t sftp_write(sftp_file file, const void *buf, size_t count) {\n   return -1; /* not reached */\n }\n \n+/*\n+ * sftp_async_write and sftp_async_write_end are copied from\n+ * https://github.com/limes-datentechnik-gmbh/libssh\n+ */\n+int sftp_async_write(sftp_file file, const void *buf, size_t count, uint32_t* id) {\n+  sftp_session sftp = file->sftp;\n+  ssh_buffer buffer;\n+  int len;\n+  int packetlen;\n+  int rc;\n+\n+  buffer = ssh_buffer_new();\n+  if (buffer == NULL) {\n+    ssh_set_error_oom(sftp->session);\n+    return -1;\n+  }\n+\n+  *id = sftp_get_new_id(file->sftp);\n+\n+  rc = ssh_buffer_pack(buffer,\n+                       \"dSqdP\",\n+                       *id,\n+                       file->handle,\n+                       file->offset,\n+                       count, /* len of datastring */\n+                       (size_t)count, buf);\n+  if (rc != SSH_OK){\n+    ssh_set_error_oom(sftp->session);\n+    ssh_buffer_free(buffer);\n+    return SSH_ERROR;\n+  }\n+  packetlen=ssh_buffer_get_len(buffer)+5;\n+  len = sftp_packet_write(file->sftp, SSH_FXP_WRITE, buffer);\n+  ssh_buffer_free(buffer);\n+  if (len < 0) {\n+    return SSH_ERROR;\n+  } else  if (len != packetlen) {\n+    ssh_set_error(sftp->session, SSH_FATAL,\n+      \"Could only send %d of %d bytes to remote host!\", len, packetlen);\n+    SSH_LOG(SSH_LOG_PACKET,\n+        \"Could not write as much data as expected\");\n+    return SSH_ERROR;\n+  }\n+\n+  file->offset += count;\n+\n+  return SSH_OK;\n+}\n+\n+int sftp_async_write_end(sftp_file file, uint32_t id, int blocking) {\n+  sftp_session sftp = file->sftp;\n+  sftp_message msg = NULL;\n+  sftp_status_message status;\n+\n+  msg = sftp_dequeue(sftp, id);\n+  while (msg == NULL) {\n+    if (!blocking && ssh_channel_poll(sftp->channel, 0) == 0) {\n+      /* we cannot block */\n+      return SSH_AGAIN;\n+    }\n+    if (sftp_read_and_dispatch(sftp) < 0) {\n+      /* something nasty has happened */\n+      return SSH_ERROR;\n+    }\n+    msg = sftp_dequeue(sftp, id);\n+  }\n+\n+  switch (msg->packet_type) {\n+    case SSH_FXP_STATUS:\n+      status = parse_status_msg(msg);\n+      sftp_message_free(msg);\n+      if (status == NULL) {\n+        return SSH_ERROR;\n+      }\n+      sftp_set_error(sftp, status->status);\n+      switch (status->status) {\n+        case SSH_FX_OK:\n+          status_msg_free(status);\n+          return SSH_OK;\n+        default:\n+          break;\n+      }\n+      ssh_set_error(sftp->session, SSH_REQUEST_DENIED,\n+          \"SFTP server: %s\", status->errormsg);\n+      status_msg_free(status);\n+      return SSH_ERROR;\n+    default:\n+      ssh_set_error(sftp->session, SSH_FATAL,\n+          \"Received message %d during write!\", msg->packet_type);\n+      sftp_message_free(msg);\n+      return SSH_ERROR;\n+  }\n+\n+  return SSH_ERROR; /* not reached */\n+}\n+\n /* Seek to a specific location in a file. */\n int sftp_seek(sftp_file file, uint32_t new_offset) {\n   if (file == NULL) {\n"
  },
  {
    "path": "rpm/.gitignore",
    "content": "\n# generated by cmake\nmscp.spec\n"
  },
  {
    "path": "rpm/mscp.spec.in",
    "content": "Name:\t\tmscp\nVersion:\t@MSCP_VERSION@\nRelease:\t1%{?dist}\nSummary:\tmscp, fast file transfer over multiple SSH connections\n\nGroup:\t\tApplications/Internet\nLicense:\tGPLv3\nURL:\t\thttps://github.com/upa/mscp\nSource0:\t%{name}-%{version}.tar.gz\n\nBuildRequires:\tgcc make cmake zlib-devel openssl-devel\nRequires:\tglibc crypto-policies krb5-libs openssl-libs libcom_err\n\n%description\nmscp transfers files over multiple SSH connections. Multiple threads\nand connections in mscp transfer (1) multiple files simultaneously\nand (2) a large file in parallel. It would shorten the waiting time\nfor transferring a lot of/large files over networks.\n\n\n%global debug_package %{nil}\n\n%prep\n%setup -q\n\n\n%build\ncmake -S . -B build -DINSTALL_EXECUTABLE_ONLY=ON\nmake -C build %{?_smp_mflags}\n\n\n%install\nmake -C build install DESTDIR=%{buildroot}\n\n%files\n/usr/local/bin/mscp\n/usr/local/share/man/man1/mscp.1\n\n\n%changelog\n* Sat Nov 08 2025 Ryo Nakamura <upa@haeena.net> - 0.2.4-1\n- RPM release for v0.2.4\n\n* Tue Aug 12 2025 Ryo Nakamura <upa@haeena.net> - 0.2.3-1\n- RPM release for v0.2.3\n\n* Wed Apr 16 2025 Ryo Nakamura <upa@haeena.net> - 0.2.2-1\n- RPM release for v0.2.2\n\n* Sat May 11 2024 Ryo Nakamura <upa@haeena.net> - 0.2.1-1\n- RPM release for v0.2.1\n\n* Mon Apr 15 2024 Ryo Nakamura <upa@haeena.net> - 0.2.0-1\n- RPM release for v0.2.0\n\n* Thu Mar 14 2024 Ryo Nakamura <upa@haeena.net> - 0.1.5-0\n- RPM release for v0.1.5\n\n* Wed Feb 07 2024 Ryo Nakamura <upa@haeena.net> - 0.1.4-0\n- RPM release for v0.1.4\n\n* Sat Feb 03 2024 Ryo Nakamura <upa@haeena.net> - 0.1.3-0\n- Initial release for rpm packaging\n"
  },
  {
    "path": "scripts/install-build-deps.sh",
    "content": "#!/usr/bin/env bash\n#\n# Install build dpenedencies.\n\nset -e\n#set -u\n\nfunction print_help() {\n\techo \"$0 [options]\"\n\techo \"    --dont-install         Print required packages.\"\n\techo \"    --platform [PLATFORM]  PLATFORM is Kernel-ID, e.g., Linux-ubuntu.\"\n\techo \"                           Automatically detected if not specified.\"\n}\n\nplatform=$(uname -s)\ndoinstall=1\n\nif [ -e /etc/os-release ]; then\n\tsource /etc/os-release\n\tplatform=${platform}-${ID}\nfi\n\nwhile getopts h-: opt; do\n        optarg=\"${!OPTIND}\"\n        [[ \"$opt\" = - ]] && opt=\"-$OPTARG\"\n\tcase \"-${opt}\" in\n\t\t--dont-install)\n\t\t\tdoinstall=0\n\t\t\t;;\n\t\t--platform)\n\t\t\tplatform=$optarg\n\t\t\tshift\n\t\t\t;;\n\t\t-h)\n\t\t\tprint_help\n\t\t\texit 0\n\t\t\t;;\n\t\t*)\n\t\t\tprint_help\n\t\t\texit 1\n\t\t\t;;\n\tesac\ndone\n\ncase $platform in\n\tDarwin)\n\t\tcmd=\"brew install\"\n\t\tpkgs=\"openssl@3\"\n\t\t;;\n\tLinux-ubuntu* | Linux-debian* | Linux-devuan*)\n\t\tcmd=\"apt-get install --no-install-recommends -y\"\n\t\tpkgs=\"gcc make cmake zlib1g-dev libssl-dev libkrb5-dev\"\n\t\t;;\n\tLinux-centos* | Linux-rhel* | Linux-rocky* | Linux-almalinux)\n\t\tcmd=\"yum install -y\"\n\t\tpkgs=\"gcc make cmake zlib-devel openssl-devel rpm-build\"\n\t\t;;\n\tLinux-arch*)\n\t\tcmd=\"pacman --no-confirm -S\"\n\t\tpkgs=\"gcc make cmake\"\n\t\t;;\n\tFreeBSD-freebsd)\n\t\tcmd=\"pkg install\"\n\t\tpkgs=\"cmake\"\n\t\t;;\n\t*)\n\t\techo \"unsupported platform: $platform\"\n\t\texit 1\n\t\t;;\nesac\n\nif [ $doinstall -gt 0 ]; then\n\techo do \"$cmd $pkgs\"\n\t$cmd $pkgs\nelse\n\techo $pkgs\nfi\n"
  },
  {
    "path": "scripts/test-in-container.sh",
    "content": "#!/bin/bash -e\n#\n# Run this script in mscp docker containers.\n# This script runs end-to-end test with installed mscp.\n\nscript_dir=$(cd $(dirname ${0}) && pwd)\ncd $script_dir\n\nset -x\n\n# sshd Linsten on 22 and 8022\necho \"Port 22\" >> /etc/ssh/sshd_config\necho \"Port 8022\" >> /etc/ssh/sshd_config\n\n## Alpine default sshd disables TcpForwarding, which is required for proxyjump test\nsed -i -e 's/AllowTcpForwarding no/AllowTcpForwarding yes/' /etc/ssh/sshd_config\n\n# Run sshd\nif [ ! -e /var/run/sshd.pid ]; then\n\t/usr/sbin/sshd -E /tmp/sshd.log\n\tsleep 1\nfi\n\nfor port in 22 8022; do\n\tssh-keyscan -p $port localhost >> ${HOME}/.ssh/known_hosts\n\tssh-keyscan -p $port ip6-localhost >> ${HOME}/.ssh/known_hosts\n\tssh-keyscan -p $port 127.0.0.1 >> ${HOME}/.ssh/known_hosts\n\tssh-keyscan -p $port ::1 >> ${HOME}/.ssh/known_hosts\ndone\n\n\nif [ $# -gt 0 ]; then\n\t# command arguments are passed, exec them\n\t\"$@\"\nelse\n\t# no arguments passed, run the test\n\tpython3 -m pytest -v ../test\nfi\n"
  },
  {
    "path": "src/atomic.h",
    "content": "/* SPDX-License-Identifier: GPL-3.0-only */\n#ifndef _ATOMIC_H_\n#define _ATOMIC_H_\n\n#include <stdlib.h>\n#include <assert.h>\n#include <pthread.h>\n\ntypedef int refcnt;\n\nstatic inline void refcnt_inc(refcnt *cnt)\n{\n\t__sync_add_and_fetch(cnt, 1);\n}\n\nstatic inline refcnt refcnt_dec(refcnt *cnt)\n{\n\treturn __sync_sub_and_fetch(cnt, 1);\n}\n\n/* mutex */\n\ntypedef pthread_mutex_t lock;\n\nstatic inline void lock_init(lock *l)\n{\n\tpthread_mutex_init(l, NULL);\n}\n\nstatic inline void lock_acquire(lock *l)\n{\n\tint ret = pthread_mutex_lock(l);\n\tassert(ret == 0);\n}\n\nstatic inline void lock_release(lock *l)\n{\n\tint ret = pthread_mutex_unlock(l);\n\tassert(ret == 0);\n}\n\nstatic inline void lock_release_via_cleanup(void *l)\n{\n\tlock_release(l);\n}\n\n#define LOCK_ACQUIRE(l)  \\\n\tlock_acquire(l); \\\n\tpthread_cleanup_push(lock_release_via_cleanup, l)\n\n#define LOCK_RELEASE() pthread_cleanup_pop(1)\n\n/* read/write lock */\ntypedef pthread_rwlock_t rwlock;\n\nstatic inline void rwlock_init(rwlock *rw)\n{\n\tpthread_rwlock_init(rw, NULL);\n}\n\nstatic inline void rwlock_read_acquire(rwlock *rw)\n{\n\tint ret = pthread_rwlock_rdlock(rw);\n\tassert(ret == 0);\n}\n\nstatic inline void rwlock_write_acquire(rwlock *rw)\n{\n\tint ret = pthread_rwlock_wrlock(rw);\n\tassert(ret == 0);\n}\n\nstatic inline void rwlock_release(rwlock *rw)\n{\n\tint ret = pthread_rwlock_unlock(rw);\n\tassert(ret == 0);\n}\n\nstatic inline void rwlock_release_via_cleanup(void *rw)\n{\n\trwlock_release(rw);\n}\n\n#define RWLOCK_READ_ACQUIRE(rw)  \\\n\trwlock_read_acquire(rw); \\\n\tpthread_cleanup_push(rwlock_release_via_cleanup, rw)\n\n#define RWLOCK_WRITE_ACQUIRE(rw)  \\\n\trwlock_write_acquire(rw); \\\n\tpthread_cleanup_push(rwlock_release_via_cleanup, rw)\n\n#define RWLOCK_RELEASE() pthread_cleanup_pop(1)\n\n#endif /* _ATOMIC_H_ */\n"
  },
  {
    "path": "src/bwlimit.c",
    "content": "/* SPDX-License-Identifier: GPL-3.0-only */\n#include <errno.h>\n\n#include <bwlimit.h>\n#include <platform.h>\n\n#define timespeczerorize(ts)    \\\n\tdo {                    \\\n\t\tts.tv_sec = 0;  \\\n\t\tts.tv_nsec = 0; \\\n\t} while (0)\n\nint bwlimit_init(struct bwlimit *bw, uint64_t bps, uint64_t win)\n{\n\tif (!(bw->sem = sem_create(1)))\n\t\treturn -1;\n\n\tbw->bps = bps;\n\tbw->win = win; /* msec window */\n\tbw->amt = (double)bps / 8 / 1000 * win; /* bytes in a window (msec) */\n\tbw->credit = bw->amt;\n\ttimespeczerorize(bw->wstart);\n\ttimespeczerorize(bw->wend);\n\n\treturn 0;\n}\n\n#define timespecisset(ts) ((ts).tv_sec || (ts).tv_nsec)\n\n#define timespecmsadd(a, msec, r)                                \\\n\tdo {                                                     \\\n\t\t(r).tv_sec = (a).tv_sec;                         \\\n\t\t(r).tv_nsec = (a).tv_nsec + (msec * 1000000);    \\\n\t\tif ((r).tv_nsec > 1000000000) {                  \\\n\t\t\t(r).tv_sec += (r.tv_nsec) / 1000000000L; \\\n\t\t\t(r).tv_nsec = (r.tv_nsec) % 1000000000L; \\\n\t\t}                                                \\\n\t} while (0)\n\n#define timespecsub(a, b, r)                             \\\n\tdo {                                             \\\n\t\t(r).tv_sec = (a).tv_sec - (b).tv_sec;    \\\n\t\t(r).tv_nsec = (a).tv_nsec - (b).tv_nsec; \\\n\t\tif ((r).tv_nsec < 0) {                   \\\n\t\t\t(r).tv_sec -= 1;                 \\\n\t\t\t(r).tv_nsec += 1000000000;       \\\n\t\t}                                        \\\n\t} while (0)\n\n#define timespeccmp(a, b, expr) \\\n\t((a.tv_sec * 1000000000 + a.tv_nsec) expr(b.tv_sec * 1000000000 + b.tv_nsec))\n\nint bwlimit_wait(struct bwlimit *bw, size_t nr_bytes)\n{\n\tstruct timespec now, end, rq, rm;\n\n\tif (bw->bps == 0)\n\t\treturn 0; /* no bandwidth limit */\n\n\tif (sem_wait(bw->sem) < 0)\n\t\treturn -1;\n\n\tclock_gettime(CLOCK_MONOTONIC, &now);\n\n\tif (!timespecisset(bw->wstart)) {\n\t\tbw->wstart = now;\n\t\ttimespecmsadd(bw->wstart, bw->win, bw->wend);\n\t}\n\n\tbw->credit -= nr_bytes;\n\n\tif (bw->credit < 0) {\n\t\t/* no more credit on this window. sleep until the end\n\t\t * of this window + additional time for the remaining\n\t\t * bytes. */\n\t\tuint64_t addition = (double)(bw->credit * -1) / (bw->bps / 8);\n\t\ttimespecmsadd(bw->wend, addition * 1000, end);\n\t\tif (timespeccmp(end, now, >)) {\n\t\t\ttimespecsub(end, now, rq);\n\t\t\twhile (nanosleep(&rq, &rm) == -1) {\n\t\t\t\tif (errno != EINTR)\n\t\t\t\t\tbreak;\n\t\t\t\trq = rm;\n\t\t\t}\n\t\t}\n\t\tbw->credit = bw->amt;\n\t\ttimespeczerorize(bw->wstart);\n\t}\n\n\tsem_post(bw->sem);\n\treturn 0;\n}\n"
  },
  {
    "path": "src/bwlimit.h",
    "content": "/* SPDX-License-Identifier: GPL-3.0-only */\n#ifndef _BWLIMIT_H_\n#define _BWLIMIT_H_\n\n#include <stdbool.h>\n#include <stdint.h>\n#include <sys/types.h>\n#include <time.h>\n#include <semaphore.h>\n\nstruct bwlimit {\n\tsem_t\t*sem;\t/* semaphore */\n\tsize_t\tbps;\t/* limit bit-rate (bps) */\n\tsize_t\twin;\t/* window size (msec) */\n\tsize_t\tamt;\t/* amount of bytes can be sent in a window */\n\n\tssize_t\t\tcredit;\t/* remaining bytes can be sent in a window */\n\tstruct timespec wstart, wend; /* window start time and end time */\n};\n\nint bwlimit_init(struct bwlimit *bw, uint64_t bps, uint64_t win);\n/* if bps is 0, it means that bwlimit is not active. If so,\n * bwlimit_wait() returns immediately. */\n\nint bwlimit_wait(struct bwlimit *bw, size_t nr_bytes);\n\n\n#endif /* _BWLIMIT_H_ */\n"
  },
  {
    "path": "src/checkpoint.c",
    "content": "/* SPDX-License-Identifier: GPL-3.0-only */\n#include <fcntl.h>\n#include <sys/uio.h>\n#include <arpa/inet.h>\n\n#include <path.h>\n#include <print.h>\n#include <platform.h>\n#include <strerrno.h>\n#include <openbsd-compat/openbsd-compat.h>\n\n#include <checkpoint.h>\n\n#define MSCP_CHECKPOINT_MAGIC 0x7063736dUL /* mscp in ascii */\n#define MSCP_CHECKPOINT_VERSION 0x1\n\n/**\n * mscp checkpoint file format. All values are network byte order.\n *\n * The file starts with the File header:\n * 0                   1                   2                   3\n *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1\n * +---------------------------------------------------------------+\n * |                          Magic Code                           |\n * +---------------+-----------------------------------------------+\n * |     Version   |\n * +---------------+\n *\n * Magic code: 0x7063736dUL\n *\n * Version: 1.\n *\n *\n * Each object in a checkpoint always starts with an object header:\n * +---------------+---------------+-------------------------------+\n * |     Type      |      rsv      |             Length            |\n * +---------------+---------------+-------------------------------+\n *\n * Type: 0x0A (meta), 0x0B (path), or 0x0C (chunk)\n *\n * Rsv: reserved\n *\n * Length: Length of this object including the object header.\n *\n *\n * Meta object provides generaic information for the failed copy:\n * +---------------+---------------+-------------------------------+\n * |     Type      |      rsv      |             Length            |\n * +---------------+---------------+-------------------------------+\n * |  Direction    | Remote string ...\n * +---------------+------------------\n *\n * Direction: 1 (Local-to-Remote copy) or 2 (Remote-to-Local copy)\n *\n * Remote string: Remote host, e.g., user@hostname and IP address,\n * string including '\\0'.\n *\n *\n * Path object represnts a file with sourcen and destination paths:\n * +---------------+---------------+-------------------------------+\n * |     Type      |      rsv      |             Length            |\n * +---------------+---------------+-------------------------------+\n * |                             Index                             |\n * +-------------------------------+-------------------------------+\n * |         Source offset         |      Destination offset       |\n * +-------------------------------+-------------------------------+\n * //                                                             //\n * //                     Source path string                      //\n * //                                                             //\n * +---------------------------------------------------------------+\n * //                                                             //\n * //                   Destination path string                   //\n * //                                                             //\n * +---------------------------------------------------------------+\n *\n * Index: 32-bit unsigned int indicating this path (used by chunks)\n *\n * Source offset: Offset of the Source path string from the head of\n * this object. It is identical to the end of the Destination offset\n * filed.\n *\n * Destination offset: Offset of the Destnation path string from the\n * head of this object. It also indicates the end of the Source path\n * string.\n *\n * Source path string: String of copy source path (including '\\0').\n *\n * Destination path string: string of copy destination path (including\n * '\\0').\n *\n *\n * Chunk object represents a chunk associated with a path object:\n * +---------------+---------------+-------------------------------+\n * |     Type      |      rsv      |             Length            |\n * +---------------+---------------+-------------------------------+\n * |                             Index                             |\n * +---------------------------------------------------------------+\n * |                             Chunk                             |\n * |                             offset                            |\n * +---------------------------------------------------------------+\n * |                             Chunk                             |\n * |                             length                            |\n * +---------------------------------------------------------------+\n *\n * Index: 32 bit unsigned int indicating the index of a path object\n * this chunk associated with.\n *\n * Chunk offset: 64 bit unsigned int indicating the offset of this\n * chunk from the head of the associating a file.\n *\n * Chunk length: 64 bit unsigned int indicating the length (bytes) of\n * this chunk.\n */\n\nenum {\n\tOBJ_TYPE_META = 0x0A,\n\tOBJ_TYPE_PATH = 0x0B,\n\tOBJ_TYPE_CHUNK = 0x0C,\n};\n\nstruct checkpoint_file_hdr {\n\tuint32_t magic;\n\tuint8_t version;\n} __attribute__((packed));\n\nstruct checkpoint_obj_hdr {\n\tuint8_t type;\n\tuint8_t rsv;\n\tuint16_t len; /* length of an object including this hdr */\n} __attribute__((packed));\n\nstruct checkpoint_obj_meta {\n\tstruct checkpoint_obj_hdr hdr;\n\tuint8_t direction; /* L2R or R2L */\n\n\tchar remote[0];\n} __attribute__((packed));\n\nstruct checkpoint_obj_path {\n\tstruct checkpoint_obj_hdr hdr;\n\n\tuint32_t idx;\n\tuint16_t src_off; /* offset to the src path string (including\n\t\t\t   * \\0) from the head of this object. */\n\tuint16_t dst_off; /* offset to the dst path string (including\n\t\t\t   * \\0) from the head of this object */\n} __attribute__((packed));\n\n#define obj_path_src(o) ((char *)(o) + ntohs(o->src_off))\n#define obj_path_dst(o) ((char *)(o) + ntohs(o->dst_off))\n\n#define obj_path_src_len(o) (ntohs(o->dst_off) - ntohs(o->src_off))\n#define obj_path_dst_len(o) (ntohs(o->hdr.len) - ntohs(o->dst_off))\n\n#define obj_path_validate(o)\t\t\t\t\\\n\t((ntohs(o->hdr.len) > ntohs(o->dst_off)) &&\t\\\n\t (ntohs(o->dst_off) > ntohs(o->src_off)) &&\t\\\n\t (obj_path_src_len(o) < PATH_MAX) &&\t\t\\\n\t (obj_path_dst_len(o) < PATH_MAX))\n\nstruct checkpoint_obj_chunk {\n\tstruct checkpoint_obj_hdr hdr;\n\n\tuint32_t idx; /* index indicating associating path */\n\tuint64_t off;\n\tuint64_t len;\n} __attribute__((packed));\n\n#define CHECKPOINT_OBJ_MAXLEN (sizeof(struct checkpoint_obj_path) + PATH_MAX * 2)\n\nstatic int checkpoint_write_path(int fd, struct path *p, unsigned int idx)\n{\n\tchar buf[CHECKPOINT_OBJ_MAXLEN];\n\tstruct checkpoint_obj_path *path = (struct checkpoint_obj_path *)buf;\n\tsize_t src_len, dst_len;\n\tstruct iovec iov[3];\n\n\tp->data = idx; /* save idx to be pointed by chunks */\n\n\tsrc_len = strlen(p->path) + 1;\n\tdst_len = strlen(p->dst_path) + 1;\n\n\tmemset(buf, 0, sizeof(buf));\n\tpath->hdr.type = OBJ_TYPE_PATH;\n\tpath->hdr.len = htons(sizeof(*path) + src_len + dst_len);\n\n\tpath->idx = htonl(idx);\n\tpath->src_off = htons(sizeof(*path));\n\tpath->dst_off = htons(sizeof(*path) + src_len);\n\n\tiov[0].iov_base = path;\n\tiov[0].iov_len = sizeof(*path);\n\tiov[1].iov_base = p->path;\n\tiov[1].iov_len = src_len;\n\tiov[2].iov_base = p->dst_path;\n\tiov[2].iov_len = dst_len;\n\n\tif (writev(fd, iov, 3) < 0) {\n\t\tpriv_set_errv(\"writev: %s\", strerrno());\n\t\treturn -1;\n\t}\n\treturn 0;\n}\n\nstatic int checkpoint_write_chunk(int fd, struct chunk *c)\n{\n\tstruct checkpoint_obj_chunk chunk;\n\n\tmemset(&chunk, 0, sizeof(chunk));\n\tchunk.hdr.type = OBJ_TYPE_CHUNK;\n\tchunk.hdr.len = htons(sizeof(chunk));\n\n\tchunk.idx = htonl(c->p->data); /* index stored by checkpoint_write_path */\n\tchunk.off = htonll(c->off);\n\tchunk.len = htonll(c->len);\n\n\tif (write(fd, &chunk, sizeof(chunk)) < 0) {\n\t\tpriv_set_errv(\"writev: %s\", strerrno());\n\t\treturn -1;\n\t}\n\treturn 0;\n}\n\nint checkpoint_save(const char *pathname, int dir, const char *user, const char *remote,\n\t\t    pool *path_pool, pool *chunk_pool)\n{\n\tstruct checkpoint_file_hdr hdr;\n\tstruct checkpoint_obj_meta meta;\n\tstruct iovec iov[3];\n\tstruct chunk *c;\n\tstruct path *p;\n\tchar buf[1024];\n\tunsigned int i, nr_paths, nr_chunks;\n\tint fd, ret;\n\n\tfd = open(pathname, O_WRONLY | O_CREAT | O_TRUNC,\n\t\t  S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH);\n\tif (fd < 0) {\n\t\tpriv_set_errv(\"open: %s: %s\", pathname, strerrno());\n\t\treturn -1;\n\t}\n\n\t/* write file hdr */\n\thdr.magic = htonl(MSCP_CHECKPOINT_MAGIC);\n\thdr.version = MSCP_CHECKPOINT_VERSION;\n\n\t/* write meta */\n\tif (user)\n\t\tret = snprintf(buf, sizeof(buf), \"%s@%s\", user, remote);\n\telse\n\t\tret = snprintf(buf, sizeof(buf), \"%s\", remote);\n\tif (ret >= sizeof(buf)) {\n\t\tpriv_set_errv(\"too long username and/or remote\");\n\t\treturn -1;\n\t}\n\n\tmemset(&meta, 0, sizeof(meta));\n\tmeta.hdr.type = OBJ_TYPE_META;\n\tmeta.hdr.len = htons(sizeof(meta) + strlen(buf) + 1);\n\tmeta.direction = dir;\n\n\tiov[0].iov_base = &hdr;\n\tiov[0].iov_len = sizeof(hdr);\n\tiov[1].iov_base = &meta;\n\tiov[1].iov_len = sizeof(meta);\n\tiov[2].iov_base = buf;\n\tiov[2].iov_len = strlen(buf) + 1;\n\n\tif (writev(fd, iov, 3) < 0) {\n\t\tpriv_set_errv(\"writev: %s\", strerrno());\n\t\treturn -1;\n\t}\n\n\t/* write paths */\n\tnr_paths = 0;\n\tpool_for_each(path_pool, p, i) {\n\t\tif (p->state == FILE_STATE_DONE)\n\t\t\tcontinue;\n\t\tif (checkpoint_write_path(fd, p, nr_paths) < 0)\n\t\t\treturn -1;\n\t\tnr_paths++;\n\t}\n\n\t/* write chunks */\n\tnr_chunks = 0;\n\tpool_for_each(chunk_pool, c, i) {\n\t\tif (c->state == CHUNK_STATE_DONE)\n\t\t\tcontinue;\n\t\tif (checkpoint_write_chunk(fd, c) < 0)\n\t\t\treturn -1;\n\t\tnr_chunks++;\n\t}\n\n\tpr_notice(\"checkpoint: %u paths and %u chunks saved\", nr_paths, nr_chunks);\n\n\treturn 0;\n}\n\nstatic int checkpoint_load_meta(struct checkpoint_obj_hdr *hdr, char *remote, size_t len,\n\t\t\t\tint *dir)\n{\n\tstruct checkpoint_obj_meta *meta = (struct checkpoint_obj_meta *)hdr;\n\n\tif (len < ntohs(hdr->len) - sizeof(*meta)) {\n\t\tpriv_set_errv(\"too short buffer\");\n\t\treturn -1;\n\t}\n\tsnprintf(remote, len, \"%s\", meta->remote);\n\t*dir = meta->direction;\n\n\tpr_notice(\"checkpoint: remote=%s direction=%s\", meta->remote,\n\t\t  meta->direction == MSCP_DIRECTION_L2R ? \"local-to-remote\" :\n\t\t  meta->direction == MSCP_DIRECTION_R2L ? \"remote-to-local\" :\n\t\t\t\t\t\t\t  \"invalid\");\n\n\treturn 0;\n}\n\nstatic int checkpoint_load_path(struct checkpoint_obj_hdr *hdr, pool *path_pool)\n{\n\tstruct checkpoint_obj_path *path = (struct checkpoint_obj_path *)hdr;\n\tstruct path *p;\n\tchar *s, *d;\n\n\tif (!obj_path_validate(path)) {\n\t\tpriv_set_errv(\"invalid path object\");\n\t\treturn -1;\n\t}\n\n\tif (!(s = strndup(obj_path_src(path), obj_path_src_len(path)))) {\n\t\tpriv_set_errv(\"strdup: %s\", strerrno());\n\t\treturn -1;\n\t}\n\n\tif (!(d = strndup(obj_path_dst(path), obj_path_dst_len(path)))) {\n\t\tpriv_set_errv(\"strdup: %s\", strerrno());\n\t\tfree(s);\n\t\treturn -1;\n\t}\n\n\tif (!(p = alloc_path(s, d))) {\n\t\tfree(s);\n\t\tfree(d);\n\t\treturn -1;\n\t}\n\n\tif (pool_push(path_pool, p) < 0) {\n\t\tpriv_set_errv(\"pool_push: %s\", strerrno());\n\t\treturn -1;\n\t}\n\n\tpr_info(\"checkpoint:file: idx=%u %s -> %s\", ntohl(path->idx),\n\t\tp->path, p->dst_path);\n\n\treturn 0;\n}\n\nstatic int checkpoint_load_chunk(struct checkpoint_obj_hdr *hdr, pool *path_pool,\n\t\t\t\t pool *chunk_pool)\n{\n\tstruct checkpoint_obj_chunk *chunk = (struct checkpoint_obj_chunk *)hdr;\n\tstruct chunk *c;\n\tstruct path *p;\n\n\tif (!(p = pool_get(path_pool, ntohl(chunk->idx)))) {\n\t\t/* we assumes all paths are already loaded in the order */\n\t\tpriv_set_errv(\"path index %u not found\", ntohl(chunk->idx));\n\t\treturn -1;\n\t}\n\n\tif (!(c = alloc_chunk(p, ntohll(chunk->off), ntohll(chunk->len))))\n\t\treturn -1;\n\n\tif (pool_push(chunk_pool, c) < 0) {\n\t\tpriv_set_errv(\"pool_push: %s\", strerrno());\n\t\treturn -1;\n\t}\n\n\tpr_debug(\"checkpoint:chunk: idx=%u %s 0x%lx-0x%lx\", ntohl(chunk->idx),\n\t\t p->path, c->off, c->off + c->len);\n\n\treturn 0;\n}\n\nstatic int checkpoint_read_obj(int fd, void *buf, size_t count)\n{\n\tstruct checkpoint_obj_hdr *hdr = (struct checkpoint_obj_hdr *)buf;\n\tssize_t ret, objlen, objbuflen;\n\n\tmemset(buf, 0, count);\n\n\tif (count < sizeof(*hdr)) {\n\t\tpriv_set_errv(\"too short buffer\");\n\t\treturn -1;\n\t}\n\n\tret = read(fd, hdr, sizeof(*hdr));\n\tif (ret == 0)\n\t\treturn 0; /* no more objects */\n\tif (ret < 0)\n\t\treturn -1;\n\n\tobjlen = ntohs(hdr->len) - sizeof(*hdr);\n\tobjbuflen = count - sizeof(*hdr);\n\tif (objbuflen < objlen) {\n\t\tpriv_set_errv(\"too short buffer\");\n\t\treturn -1;\n\t}\n\n\tret = read(fd, buf + sizeof(*hdr), objlen);\n\tif (ret < objlen) {\n\t\tpriv_set_errv(\"checkpoint truncated\");\n\t\treturn -1;\n\t}\n\n\treturn 1;\n}\n\nstatic int checkpoint_read_file_hdr(int fd)\n{\n\tstruct checkpoint_file_hdr hdr;\n\tssize_t ret;\n\n\tret = read(fd, &hdr, sizeof(hdr));\n\tif (ret < 0) {\n\t\tpriv_set_errv(\"read: %s\", strerrno());\n\t\treturn -1;\n\t}\n\tif (ret < sizeof(hdr)) {\n\t\tpriv_set_errv(\"checkpoint truncated\");\n\t\treturn -1;\n\t}\n\n\tif (ntohl(hdr.magic) != MSCP_CHECKPOINT_MAGIC) {\n\t\tpriv_set_errv(\"checkpoint: invalid megic code\");\n\t\treturn -1;\n\t}\n\n\tif (hdr.version != MSCP_CHECKPOINT_VERSION) {\n\t\tpriv_set_errv(\"checkpoint: unknown version %u\", hdr.version);\n\t\treturn -1;\n\t}\n\n\treturn 0;\n}\n\nstatic int checkpoint_load(const char *pathname, char *remote, size_t len, int *dir,\n\t\t\t   pool *path_pool, pool *chunk_pool)\n{\n\tchar buf[CHECKPOINT_OBJ_MAXLEN];\n\tstruct checkpoint_obj_hdr *hdr;\n\tint fd, ret;\n\n\tif ((fd = open(pathname, O_RDONLY)) < 0) {\n\t\tpriv_set_errv(\"open: %s: %s\", pathname, strerrno());\n\t\treturn -1;\n\t}\n\n\tif (checkpoint_read_file_hdr(fd) < 0)\n\t\treturn -1;\n\n\thdr = (struct checkpoint_obj_hdr *)buf;\n\twhile ((ret = checkpoint_read_obj(fd, buf, sizeof(buf))) > 0) {\n\t\tswitch (hdr->type) {\n\t\tcase OBJ_TYPE_META:\n\t\t\tif (!remote || !dir)\n\t\t\t\tbreak;\n\t\t\tif (checkpoint_load_meta(hdr, remote, len, dir) < 0)\n\t\t\t\treturn -1;\n\t\t\tif (!path_pool || !chunk_pool)\n\t\t\t\tgoto out;\n\t\t\tbreak;\n\t\tcase OBJ_TYPE_PATH:\n\t\t\tif (!path_pool)\n\t\t\t\tbreak;\n\t\t\tif (checkpoint_load_path(hdr, path_pool) < 0)\n\t\t\t\treturn -1;\n\t\t\tbreak;\n\t\tcase OBJ_TYPE_CHUNK:\n\t\t\tif (!path_pool)\n\t\t\t\tbreak;\n\t\t\tif (checkpoint_load_chunk(hdr, path_pool, chunk_pool) < 0)\n\t\t\t\treturn -1;\n\t\t\tbreak;\n\t\tdefault:\n\t\t\tpriv_set_errv(\"unknown obj type %u\", hdr->type);\n\t\t\treturn -1;\n\t\t}\n\t}\n\nout:\n\tclose(fd);\n\n\treturn 0;\n}\n\nint checkpoint_load_remote(const char *pathname, char *remote, size_t len, int *dir)\n{\n\treturn checkpoint_load(pathname, remote, len, dir, NULL, NULL);\n}\n\nint checkpoint_load_paths(const char *pathname, pool *path_pool, pool *chunk_pool)\n{\n\treturn checkpoint_load(pathname, NULL, 0, NULL, path_pool, chunk_pool);\n}\n"
  },
  {
    "path": "src/checkpoint.h",
    "content": "/* SPDX-License-Identifier: GPL-3.0-only */\n#ifndef _CHECKPOINT_H_\n#define _CHECKPOINT_H_\n\n#include <pool.h>\n\n/* checkpoint_save() stores states to a checkponint file (pathname) */\nint checkpoint_save(const char *pathname, int dir, const char *user, const char *remote,\n\t\t    pool *path_pool, pool *chunk_pool);\n\n/* checkpoint_load_meta() reads a checkpoint file (pathname) and returns\n * remote host string to *remote and transfer direction to *dir.\n */\nint checkpoint_load_remote(const char *pathname, char *remote, size_t len, int *dir);\n\n/* checkpoint_load_paths() reads a checkpoint file (pathname) and\n * fills path_pool and chunk_pool.\n */\nint checkpoint_load_paths(const char *pathname, pool *path_pool, pool *chunk_pool);\n\n#endif /* _CHECKPOINT_H_ */\n"
  },
  {
    "path": "src/fileops.c",
    "content": "/* SPDX-License-Identifier: GPL-3.0-only */\n#include <stdlib.h>\n#include <string.h>\n#include <errno.h>\n#include <fcntl.h>\n#include <dirent.h>\n#include <sys/times.h>\n#include <utime.h>\n\n#include <fileops.h>\n#include <ssh.h>\n#include <print.h>\n#include <platform.h>\n\nsftp_session __thread tls_sftp;\n/* tls_sftp is used *_wrapped() functions */\n\nvoid set_tls_sftp_session(sftp_session sftp)\n{\n\ttls_sftp = sftp;\n}\n\nstatic void sftp_err_to_errno(sftp_session sftp)\n{\n\tint sftperr = sftp_get_error(sftp);\n\n\tswitch (sftperr) {\n\tcase SSH_FX_OK:\n\tcase SSH_FX_EOF:\n\t\terrno = 0;\n\t\tbreak;\n\tcase SSH_FX_NO_SUCH_FILE:\n\tcase SSH_FX_NO_SUCH_PATH:\n\t\terrno = ENOENT;\n\t\tbreak;\n\tcase SSH_FX_PERMISSION_DENIED:\n\t\terrno = EACCES;\n\t\tbreak;\n\tcase SSH_FX_FAILURE:\n\t\terrno = EINVAL;\n\tcase SSH_FX_BAD_MESSAGE:\n\t\terrno = EBADMSG;\n\tcase SSH_FX_NO_CONNECTION:\n\t\terrno = ENOTCONN;\n\t\tbreak;\n\tcase SSH_FX_CONNECTION_LOST:\n\t\terrno = ENETRESET;\n\t\tbreak;\n\tcase SSH_FX_OP_UNSUPPORTED:\n\t\terrno = EOPNOTSUPP;\n\t\tbreak;\n\tcase SSH_FX_INVALID_HANDLE:\n\t\terrno = EBADF;\n\t\tbreak;\n\tcase SSH_FX_FILE_ALREADY_EXISTS:\n\t\terrno = EEXIST;\n\t\tbreak;\n\tcase SSH_FX_WRITE_PROTECT:\n\t\terrno = EPERM;\n\t\tbreak;\n\tcase SSH_FX_NO_MEDIA:\n\t\terrno = ENODEV;\n\t\tbreak;\n\tdefault:\n\t\tpr_warn(\"unkown SSH_FX response %d\", sftperr);\n\t}\n}\n\nMDIR *mscp_opendir(const char *path, sftp_session sftp)\n{\n\tMDIR *md;\n\n\tif (!(md = malloc(sizeof(*md))))\n\t\treturn NULL;\n\tmemset(md, 0, sizeof(*md));\n\n\tif (sftp) {\n\t\tmd->remote = sftp_opendir(sftp, path);\n\t\tsftp_err_to_errno(sftp);\n\t\tif (!md->remote) {\n\t\t\tgoto free_out;\n\t\t}\n\t} else {\n\t\tmd->local = opendir(path);\n\t\tif (!md->local) {\n\t\t\tgoto free_out;\n\t\t}\n\t}\n\n\treturn md;\n\nfree_out:\n\tfree(md);\n\treturn NULL;\n}\n\nMDIR *mscp_opendir_wrapped(const char *path)\n{\n\treturn mscp_opendir(path, tls_sftp);\n}\n\nvoid mscp_closedir(MDIR *md)\n{\n\tif (md->remote)\n\t\tsftp_closedir(md->remote);\n\telse\n\t\tclosedir(md->local);\n\n\tfree(md);\n}\n\nstruct dirent __thread tls_dirent;\n/* tls_dirent contains dirent converted from sftp_attributes returned\n * from sftp_readdir(). This trick is derived from openssh's\n * fudge_readdir() */\n\nstruct dirent *mscp_readdir(MDIR *md)\n{\n\tsftp_attributes attr;\n\tstruct dirent *ret = NULL;\n\tstatic int inum = 1;\n\n\tif (md->remote) {\n\t\tattr = sftp_readdir(md->remote->sftp, md->remote);\n\t\tif (!attr) {\n\t\t\tsftp_err_to_errno(md->remote->sftp);\n\t\t\treturn NULL;\n\t\t}\n\n\t\tmemset(&tls_dirent, 0, sizeof(tls_dirent));\n\t\tstrncpy(tls_dirent.d_name, attr->name, sizeof(tls_dirent.d_name) - 1);\n\t\ttls_dirent.d_ino = inum++;\n\t\tif (!inum)\n\t\t\tinum = 1;\n\t\tret = &tls_dirent;\n\t\tsftp_attributes_free(attr);\n\t} else\n\t\tret = readdir(md->local);\n\n\treturn ret;\n}\n\nint mscp_mkdir(const char *path, mode_t mode, sftp_session sftp)\n{\n\tint ret;\n\n\tif (sftp) {\n\t\tret = sftp_mkdir(sftp, path, mode);\n\t\tsftp_err_to_errno(sftp);\n\t} else\n\t\tret = mkdir(path, mode);\n\n\tif (ret < 0 && errno == EEXIST) {\n\t\tret = 0;\n\t}\n\n\treturn ret;\n}\n\nstatic void sftp_attr_to_stat(sftp_attributes attr, struct stat *st)\n{\n\tmemset(st, 0, sizeof(*st));\n\tst->st_size = attr->size;\n\tst->st_uid = attr->uid;\n\tst->st_gid = attr->gid;\n\tst->st_mode = attr->permissions;\n\n#if defined(__APPLE__)\n#define st_atim st_atimespec\n#define st_mtim st_mtimespec\n#define st_ctim st_ctimespec\n#endif\n\tst->st_atim.tv_sec = attr->atime;\n\tst->st_atim.tv_nsec = attr->atime_nseconds;\n\tst->st_mtim.tv_sec = attr->mtime;\n\tst->st_mtim.tv_nsec = attr->mtime_nseconds;\n\tst->st_ctim.tv_sec = attr->createtime;\n\tst->st_ctim.tv_nsec = attr->createtime_nseconds;\n\n\tswitch (attr->type) {\n\tcase SSH_FILEXFER_TYPE_REGULAR:\n\t\tst->st_mode |= S_IFREG;\n\t\tbreak;\n\tcase SSH_FILEXFER_TYPE_DIRECTORY:\n\t\tst->st_mode |= S_IFDIR;\n\t\tbreak;\n\tcase SSH_FILEXFER_TYPE_SYMLINK:\n\t\tst->st_mode |= S_IFLNK;\n\t\tbreak;\n\tcase SSH_FILEXFER_TYPE_SPECIAL:\n\t\tst->st_mode |= S_IFCHR; /* or block? */\n\t\tbreak;\n\tcase SSH_FILEXFER_TYPE_UNKNOWN:\n\t\tst->st_mode |= S_IFIFO; /* really? */\n\t\tbreak;\n\tdefault:\n\t\tpr_warn(\"unkown SSH_FILEXFER_TYPE %d\", attr->type);\n\t}\n}\n\nint mscp_stat(const char *path, struct stat *st, sftp_session sftp)\n{\n\tsftp_attributes attr;\n\tint ret = 0;\n\n\tmemset(st, 0, sizeof(*st));\n\n\tif (sftp) {\n\t\tattr = sftp_stat(sftp, path);\n\t\tsftp_err_to_errno(sftp);\n\t\tif (!attr)\n\t\t\treturn -1;\n\n\t\tsftp_attr_to_stat(attr, st);\n\t\tsftp_attributes_free(attr);\n\t\tret = 0;\n\t} else\n\t\tret = stat(path, st);\n\n\treturn ret;\n}\n\nint mscp_stat_wrapped(const char *path, struct stat *st)\n{\n\treturn mscp_stat(path, st, tls_sftp);\n}\n\nint mscp_lstat(const char *path, struct stat *st, sftp_session sftp)\n{\n\tsftp_attributes attr;\n\tint ret = 0;\n\n\tif (sftp) {\n\t\tattr = sftp_lstat(sftp, path);\n\t\tsftp_err_to_errno(sftp);\n\t\tif (!attr)\n\t\t\treturn -1;\n\n\t\tsftp_attr_to_stat(attr, st);\n\t\tsftp_attributes_free(attr);\n\t\tret = 0;\n\t} else\n\t\tret = lstat(path, st);\n\n\treturn ret;\n}\n\nint mscp_lstat_wrapped(const char *path, struct stat *st)\n{\n\treturn mscp_lstat(path, st, tls_sftp);\n}\n\nmf *mscp_open(const char *path, int flags, mode_t mode, sftp_session sftp)\n{\n\tmf *f;\n\n\tf = malloc(sizeof(*f));\n\tif (!f)\n\t\treturn NULL;\n\tmemset(f, 0, sizeof(*f));\n\n\tif (sftp) {\n\t\tf->remote = sftp_open(sftp, path, flags, mode);\n\t\tif (!f->remote) {\n\t\t\tsftp_err_to_errno(sftp);\n\t\t\tgoto free_out;\n\t\t}\n\t} else {\n\t\tf->local = open(path, flags, mode);\n\t\tif (f->local < 0)\n\t\t\tgoto free_out;\n\t}\n\n\treturn f;\n\nfree_out:\n\tfree(f);\n\treturn NULL;\n}\n\nvoid mscp_close(mf *f)\n{\n\tif (f->remote)\n\t\tsftp_close(f->remote);\n\tif (f->local > 0)\n\t\tclose(f->local);\n\tfree(f);\n}\n\noff_t mscp_lseek(mf *f, off_t off)\n{\n\toff_t ret;\n\n\tif (f->remote) {\n\t\tret = sftp_seek64(f->remote, off);\n\t\tsftp_err_to_errno(f->remote->sftp);\n\t} else\n\t\tret = lseek(f->local, off, SEEK_SET);\n\n\treturn ret;\n}\n\nint mscp_setstat(const char *path, struct stat *st, bool preserve_ts, sftp_session sftp)\n{\n\tint ret;\n\n\tif (sftp) {\n\t\tstruct sftp_attributes_struct attr;\n\t\tmemset(&attr, 0, sizeof(attr));\n\t\tattr.permissions = st->st_mode;\n\t\tattr.size = st->st_size;\n\t\tattr.flags = (SSH_FILEXFER_ATTR_PERMISSIONS | SSH_FILEXFER_ATTR_SIZE);\n\t\tif (preserve_ts) {\n\t\t\tattr.atime = st->st_atim.tv_sec;\n\t\t\tattr.atime_nseconds = st->st_atim.tv_nsec;\n\t\t\tattr.mtime = st->st_mtim.tv_sec;\n\t\t\tattr.mtime_nseconds = st->st_mtim.tv_nsec;\n\t\t\tattr.flags |= (SSH_FILEXFER_ATTR_ACCESSTIME |\n\t\t\t\t       SSH_FILEXFER_ATTR_MODIFYTIME |\n\t\t\t\t       SSH_FILEXFER_ATTR_SUBSECOND_TIMES);\n\t\t}\n\t\tret = sftp_setstat(sftp, path, &attr);\n\t\tsftp_err_to_errno(sftp);\n\t} else {\n\t\tif ((ret = truncate(path, st->st_size)) < 0)\n\t\t\treturn ret;\n\t\tif (preserve_ts) {\n\t\t\tif ((ret = setutimes(path, st->st_atim, st->st_mtim)) < 0)\n\t\t\t\treturn ret;\n\t\t}\n\t\tif ((ret = chmod(path, st->st_mode)) < 0)\n\t\t\treturn ret;\n\t}\n\n\treturn ret;\n}\n\nint mscp_glob(const char *pattern, int flags, glob_t *pglob, sftp_session sftp)\n{\n\tint ret;\n\tif (sftp) {\n#ifndef GLOB_ALTDIRFUNC\n#define GLOB_NOALTDIRMAGIC INT_MAX\n\t\t/* musl does not implement GLOB_ALTDIRFUNC */\n\t\tpglob->gl_pathc = 1;\n\t\tpglob->gl_pathv = malloc(sizeof(char *));\n\t\tpglob->gl_pathv[0] = strdup(pattern);\n\t\tpglob->gl_offs = GLOB_NOALTDIRMAGIC;\n\t\treturn 0;\n#else\n\t\tflags |= GLOB_ALTDIRFUNC;\n\t\tset_tls_sftp_session(sftp);\n#if defined(__APPLE__) || defined(__FreeBSD__)\n\t\tpglob->gl_opendir = (void *(*)(const char *))mscp_opendir_wrapped;\n\t\tpglob->gl_readdir = (struct dirent * (*)(void *)) mscp_readdir;\n\t\tpglob->gl_closedir = (void (*)(void *))mscp_closedir;\n\t\tpglob->gl_lstat = mscp_lstat_wrapped;\n\t\tpglob->gl_stat = mscp_stat_wrapped;\n#elif linux\n\t\tpglob->gl_opendir = (void *(*)(const char *))mscp_opendir_wrapped;\n\t\tpglob->gl_readdir = (void *(*)(void *))mscp_readdir;\n\t\tpglob->gl_closedir = (void (*)(void *))mscp_closedir;\n\t\tpglob->gl_lstat = (int (*)(const char *, void *))mscp_lstat_wrapped;\n\t\tpglob->gl_stat = (int (*)(const char *, void *))mscp_stat_wrapped;\n#else\n#error unsupported platform\n#endif\n#endif\n\t}\n\n\tret = glob(pattern, flags, NULL, pglob);\n\n\tif (sftp)\n\t\tset_tls_sftp_session(NULL);\n\treturn ret;\n}\n\nvoid mscp_globfree(glob_t *pglob)\n{\n#ifndef GLOB_ALTDIRFUNC\n\tif (pglob->gl_offs == GLOB_NOALTDIRMAGIC) {\n\t\tfree(pglob->gl_pathv[0]);\n\t\tfree(pglob->gl_pathv);\n\t\treturn;\n\t}\n#endif\n\tglobfree(pglob);\n}\n"
  },
  {
    "path": "src/fileops.h",
    "content": "/* SPDX-License-Identifier: GPL-3.0-only */\n#include <dirent.h>\n#include <sys/stat.h>\n#include <glob.h>\n\n#include <ssh.h>\n\nvoid set_tls_sftp_session(sftp_session sftp);\n/* sftp_session set by set_tls_sftp_session is sued in\n mscp_open_wrapped(), mscp_stat_wrapped(), and\n mscp_lstat_wrapped(). This _wrapped() functions exist for\n sftp_glob() */\n\n/* directory operations */\n\nstruct mdir_struct {\n\tDIR *local;\n\tsftp_dir remote;\n};\ntypedef struct mdir_struct MDIR;\n\nMDIR *mscp_opendir(const char *path, sftp_session sftp);\nMDIR *mscp_opendir_wrapped(const char *path);\nvoid mscp_closedir(MDIR *md);\nstruct dirent *mscp_readdir(MDIR *md);\n\nint mscp_mkdir(const char *path, mode_t mode, sftp_session sftp);\n\n/* stat operations */\nint mscp_stat(const char *path, struct stat *st, sftp_session sftp);\nint mscp_stat_wrapped(const char *path, struct stat *st);\n\nint mscp_lstat(const char *path, struct stat *st, sftp_session sftp);\nint mscp_lstat_wrapped(const char *path, struct stat *st);\n\n/* file operations */\n\nstruct mf_struct {\n\tsftp_file remote;\n\tint local;\n};\ntypedef struct mf_struct mf;\n\nmf *mscp_open(const char *path, int flags, mode_t mode, sftp_session sftp);\nvoid mscp_close(mf *f);\noff_t mscp_lseek(mf *f, off_t off);\n\n/* mscp_setstat() involves chmod and truncate. It executes both at\n * once via a single SFTP command (sftp_setstat()).\n */\nint mscp_setstat(const char *path, struct stat *st, bool preserve_ts, sftp_session sftp);\n\n/* remote glob */\nint mscp_glob(const char *pattern, int flags, glob_t *pglob, sftp_session sftp);\nvoid mscp_globfree(glob_t *pglob);\n"
  },
  {
    "path": "src/main.c",
    "content": "/* SPDX-License-Identifier: GPL-3.0-only */\n#include <stdio.h>\n#include <stdlib.h>\n#include <stdarg.h>\n#include <fcntl.h>\n#include <unistd.h>\n#include <limits.h>\n#include <math.h>\n#include <signal.h>\n#include <sys/time.h>\n#include <sys/ioctl.h>\n#include <poll.h>\n#include <netinet/in.h>\n#include <sys/socket.h>\n#include <pthread.h>\n\n#include <mscp.h>\n#include <minmax.h>\n#include <strerrno.h>\n#include <print.h>\n\n#include <config.h>\n\nvoid usage(bool print_help)\n{\n\tprintf(\"mscp \" MSCP_BUILD_VERSION \": copy files over multiple SSH connections\\n\"\n\t       \"\\n\"\n\t       \"Usage: mscp [-46vqDpdNh] [-n nr_conns] [-m coremask] [-u max_startups]\\n\"\n\t       \"            [-I interval] [-W checkpoint] [-R checkpoint]\\n\"\n\t       \"            [-s min_chunk_sz] [-S max_chunk_sz] [-a nr_ahead]\\n\"\n\t       \"            [-b buf_sz] [-L limit_bitrate]\\n\"\n\t       \"            [-l login_name] [-P port] [-F ssh_config] [-o ssh_option]\\n\"\n\t       \"            [-i identity_file] [-J destination] [-c cipher_spec] [-M hmac_spec]\\n\"\n\t       \"            [-C compress] [-g congestion]\\n\"\n\t       \"            source ... target\\n\"\n\t       \"\\n\");\n\n\tif (!print_help)\n\t\treturn;\n\n\tprintf(\"    -n NR_CONNECTIONS  number of connections \"\n\t       \"(default: floor(log(cores)*2)+1)\\n\"\n\t       \"    -m COREMASK        hex value to specify cores where threads pinned\\n\"\n\t       \"    -u MAX_STARTUPS    number of concurrent unauthed SSH attempts \"\n\t       \"(default: 8)\\n\"\n\t       \"    -I INTERVAL        interval between SSH connection attempts (default: 0)\\n\"\n\t       \"    -W CHECKPOINT      write states to the checkpoint if transfer fails\\n\"\n\t       \"    -R CHECKPOINT      resume transferring from the checkpoint\\n\"\n\t       \"\\n\"\n\t       \"    -s MIN_CHUNK_SIZE  min chunk size (default: 16M bytes)\\n\"\n\t       \"    -S MAX_CHUNK_SIZE  max chunk size (default: filesize/nr_conn/4)\\n\"\n\t       \"    -a NR_AHEAD        number of inflight SFTP commands (default: 32)\\n\"\n\t       \"    -b BUF_SZ          buffer size for i/o and transfer\\n\"\n\t       \"    -L LIMIT_BITRATE   Limit the bitrate, n[KMG] (default: 0, no limit)\\n\"\n\t       \"\\n\"\n\t       \"    -4                 use IPv4\\n\"\n\t       \"    -6                 use IPv6\\n\"\n\t       \"    -v                 increment verbose output level\\n\"\n\t       \"    -q                 disable output\\n\"\n\t       \"    -D                 dry run. check copy destinations with -vvv\\n\"\n\t       \"    -r                 no effect\\n\"\n\t       \"\\n\"\n\t       \"    -l LOGIN_NAME      login name\\n\"\n\t       \"    -P PORT            port number\\n\"\n\t       \"    -F SSH_CONFIG      path to user ssh config (default ~/.ssh/config)\\n\"\n\t       \"    -o SSH_OPTION      ssh_config option\\n\"\n\t       \"    -i IDENTITY        identity file for public key authentication\\n\"\n\t       \"    -J DESTINATION     ProxyJump destination\\n\"\n\t       \"    -c CIPHER          cipher spec\\n\"\n\t       \"    -M HMAC            hmac spec\\n\"\n\t       \"    -C COMPRESS        enable compression: \"\n\t       \"yes, no, zlib, zlib@openssh.com\\n\"\n\t       \"    -g CONGESTION      specify TCP congestion control algorithm\\n\"\n\t       \"    -p                 preserve timestamps of files\\n\"\n\t       \"    -d                 increment ssh debug output level\\n\"\n\t       \"    -N                 enable Nagle's algorithm (default disabled)\\n\"\n\t       \"    -h                 print this help\\n\"\n\t       \"\\n\");\n\n\tconst char **ciphers = mscp_ssh_ciphers();\n\tconst char **hmacs = mscp_ssh_hmacs();\n\tint n;\n\n\tprintf(\"Available ciphers: \");\n\tfor (n = 0; ciphers[n] != NULL; n++) {\n\t\tprintf(\"%s\", ciphers[n]);\n\t\tif (ciphers[n + 1])\n\t\t\tprintf(\", \");\n\t}\n\tprintf(\"\\n\\n\");\n\n\tprintf(\"Available hmacs: \");\n\tfor (n = 0; hmacs[n] != NULL; n++) {\n\t\tprintf(\"%s\", hmacs[n]);\n\t\tif (hmacs[n + 1])\n\t\t\tprintf(\", \");\n\t}\n\tprintf(\"\\n\\n\");\n}\n\nchar *strip_brackets(char *s)\n{\n\tif (s[0] == '[' && s[strlen(s) - 1] == ']') {\n\t\ts[strlen(s) - 1] = '\\0';\n\t\treturn s + 1;\n\t}\n\treturn s;\n}\n\nchar *split_user_host_path(const char *s, char **userp, char **hostp, char **pathp)\n{\n\tchar *tmp, *cp, *user = NULL, *host = NULL, *path = NULL;\n\tbool inbrackets = false;\n\n\tif (!(tmp = strdup(s))) {\n\t\tpr_err(\"stdrup: %s\", strerror(errno));\n\t\treturn NULL;\n\t}\n\n\tpath = tmp;\n\tfor (cp = tmp; *cp; cp++) {\n\t\tif (*cp == '@' && (cp > tmp) && *(cp - 1) != '\\\\' && user == NULL) {\n\t\t\t/* cp is non-escaped '@', so this '@' is the\n\t\t\t * delimitater between username and host. */\n\t\t\t*cp = '\\0';\n\t\t\tuser = tmp;\n\t\t\thost = cp + 1;\n\t\t}\n\t\tif (*cp == '[')\n\t\t\tinbrackets = true;\n\t\tif (*cp == ']')\n\t\t\tinbrackets = false;\n\t\tif (*cp == ':' && (cp > tmp) && *(cp - 1) != '\\\\') {\n\t\t\tif (!inbrackets) {\n\t\t\t\t/* cp is non-escaped ':' and not in\n\t\t\t\t * brackets for IPv6 address\n\t\t\t\t * notation. So, this ':' is the\n\t\t\t\t * delimitater between host and\n\t\t\t\t * path. */\n\t\t\t\t*cp = '\\0';\n\t\t\t\thost = host == NULL ? tmp : host;\n\t\t\t\tpath = cp + 1;\n\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\t}\n\t*userp = user;\n\t*hostp = host ? strip_brackets(host) : NULL;\n\t*pathp = path;\n\treturn tmp;\n}\n\nstruct target {\n\tchar *copy;\n\tchar *user;\n\tchar *host;\n\tchar *path;\n};\n\nint compare_remote(struct target *a, struct target *b)\n{\n\t/* return 0 if a and b have the identical user@host, otherwise 1 */\n\tint alen, blen;\n\n\tif (a->user) {\n\t\tif (!b->user)\n\t\t\treturn 1;\n\t\talen = strlen(a->user);\n\t\tblen = strlen(b->user);\n\t\tif (alen != blen)\n\t\t\treturn 1;\n\t\tif (strncmp(a->user, b->user, alen) != 0)\n\t\t\treturn 1;\n\t} else if (b->user)\n\t\treturn 1;\n\n\tif (a->host) {\n\t\tif (!b->host)\n\t\t\treturn 1;\n\t\talen = strlen(a->host);\n\t\tblen = strlen(b->host);\n\t\tif (alen != blen)\n\t\t\treturn 1;\n\t\tif (strncmp(a->host, b->host, alen) != 0)\n\t\t\treturn 1;\n\t} else if (b->host)\n\t\treturn 1;\n\n\treturn 0;\n}\n\nstruct target *validate_targets(char **arg, int len)\n{\n\t/* arg is array of source ... destination.\n\t * There are two cases:\n\t *\n\t * 1. user@host:path host:path ... path, remote to local copy\n\t * 2. path path ... host:path, local to remote copy.\n\t *\n\t * This function split user@remote:path args into struct target,\n\t * and validate all remotes are identical (mscp does not support\n\t * remote to remote copy).\n\t */\n\n\tstruct target *t, *t0;\n\tint n, nslash;\n\n\tif ((t = calloc(len, sizeof(struct target))) == NULL) {\n\t\tpr_err(\"calloc: %s\", strerrno());\n\t\treturn NULL;\n\t}\n\n\t/* split remote:path into remote and path */\n\tfor (n = 0; n < len; n++) {\n\t\tt[n].copy =\n\t\t\tsplit_user_host_path(arg[n], &t[n].user, &t[n].host, &t[n].path);\n\t\tif (!t[n].copy) {\n\t\t\tpr_err(\"failed to parse '%s'\", arg[n]);\n\t\t\tgoto free_target_out;\n\t\t}\n\t}\n\n\t/* expand remote path, e.g., empty dst path and '~' */\n\tfor (n = 0; n < len; n++) {\n\t\tif (!t[n].host)\n\t\t\tcontinue;\n\n\t\t/* this target is a remote path. check the path and\n\t\t * expand it. this part is derived from\n\t\t * openssh-portal prepare_remote_path() function.\n\t\t */\n\t\tchar *path = t[n].path;\n\t\tif (*path == '\\0' || strcmp(path, \"~\") == 0)\n\t\t\tt[n].path = strdup(\".\");\n\t\telse if (strncmp(path, \"~/\", 2) == 0) {\n\t\t\tif ((nslash = strspn(path + 2, \"/\")) == strlen(path + 2))\n\t\t\t\tt[n].path = strdup(\".\");\n\t\t\telse\n\t\t\t\tt[n].path = strdup(path + 2 + nslash);\n\t\t}\n\t\tif (!t[n].path) {\n\t\t\tpr_err(\"strdup failed: %s\", strerrno());\n\t\t\tgoto free_target_out;\n\t\t}\n\t}\n\n\t/* check all user@host are identical. t[len - 1] is the\n\t * destination, so we need to check t[0] to t[len - 2] having\n\t * the identical remote notation */\n\tt0 = &t[0];\n\tfor (n = 1; n < len - 1; n++) {\n\t\tif (compare_remote(t0, &t[n]) != 0)\n\t\t\tgoto invalid_remotes;\n\t}\n\n\t/* check inconsistent remote position in args */\n\tif (t[0].host == NULL && t[len - 1].host == NULL) {\n\t\tpr_err(\"no remote host given\");\n\t\tgoto free_split_out;\n\t}\n\n\tif (t[0].host != NULL && t[len - 1].host != NULL) {\n\t\tpr_err(\"no local path given\");\n\t\tgoto free_split_out;\n\t}\n\n\treturn t;\n\ninvalid_remotes:\n\tpr_err(\"invalid remote host notation\");\n\nfree_split_out:\n\tfor (n = 0; n < len; n++)\n\t\tif (t[n].copy)\n\t\t\tfree(t[n].copy);\n\nfree_target_out:\n\tfree(t);\n\treturn NULL;\n}\n\nstruct mscp *m = NULL;\npthread_t tid_stat = 0;\nbool interrupted = false;\n\nvoid sigint_handler(int sig)\n{\n\tinterrupted = true;\n\tmscp_stop(m);\n}\n\nvoid *print_stat_thread(void *arg);\n\nvoid print_cli(const char *fmt, ...)\n{\n\tva_list va;\n\tva_start(va, fmt);\n\tvfprintf(stdout, fmt, va);\n\tfflush(stdout);\n\tva_end(va);\n}\n\nvoid print_stat(bool final);\n\nlong atol_with_unit(char *value, bool i)\n{\n\t/* value must be \"\\d+[kKmMgG]?\" */\n\n\tchar *u = value + (strlen(value) - 1);\n\tlong k = i ? 1024 : 1000;\n\tlong factor = 1;\n\tlong v;\n\n\tswitch (*u) {\n\tcase 'k':\n\tcase 'K':\n\t\tfactor = k;\n\t\t*u = '\\0';\n\t\tbreak;\n\tcase 'm':\n\tcase 'M':\n\t\tfactor = k * k;\n\t\t*u = '\\0';\n\t\tbreak;\n\tcase 'g':\n\tcase 'G':\n\t\tfactor = k * k * k;\n\t\t*u = '\\0';\n\t\tbreak;\n\t}\n\n\tv = atol(value);\n\treturn v * factor;\n}\n\nint to_dev_null(int fd)\n{\n\tint nfd = open(\"/dev/null\", O_WRONLY);\n\tif (nfd < 0) {\n\t\tpr_err(\"open /dev/null: %s\", strerrno());\n\t\treturn -1;\n\t}\n\n\tif (dup2(nfd, fd) < 0) {\n\t\tpr_err(\"dup2: %s\", strerrno());\n\t\treturn -1;\n\t}\n\n\tclose(nfd);\n\n\treturn 0;\n}\n\nint main(int argc, char **argv)\n{\n\tstruct mscp_ssh_opts s;\n\tstruct mscp_opts o;\n\tstruct target *t;\n\tint ch, n, i, ret;\n\tint direction = 0;\n\tchar *remote = NULL, *checkpoint_save = NULL, *checkpoint_load = NULL;\n\tbool quiet = false, dryrun = false, resume = false;\n\tint nr_options = 0;\n\n\tmemset(&s, 0, sizeof(s));\n\tmemset(&o, 0, sizeof(o));\n\to.severity = MSCP_SEVERITY_WARN;\n\n#define mscpopts \"n:m:u:I:W:R:s:S:a:b:L:46vqDrl:P:F:o:i:J:c:M:C:g:pdNh\"\n\twhile ((ch = getopt(argc, argv, mscpopts)) != -1) {\n\t\tswitch (ch) {\n\t\tcase 'n':\n\t\t\to.nr_threads = atoi(optarg);\n\t\t\tif (o.nr_threads < 1) {\n\t\t\t\tpr_err(\"invalid number of connections: %s\", optarg);\n\t\t\t\treturn 1;\n\t\t\t}\n\t\t\tbreak;\n\t\tcase 'm':\n\t\t\to.coremask = optarg;\n\t\t\tbreak;\n\t\tcase 'u':\n\t\t\to.max_startups = atoi(optarg);\n\t\t\tbreak;\n\t\tcase 'I':\n\t\t\to.interval = atoi(optarg);\n\t\t\tbreak;\n\t\tcase 'W':\n\t\t\tcheckpoint_save = optarg;\n\t\t\tbreak;\n\t\tcase 'R':\n\t\t\tcheckpoint_load = optarg;\n\t\t\tresume = true;\n\t\t\tbreak;\n\t\tcase 's':\n\t\t\to.min_chunk_sz = atol_with_unit(optarg, true);\n\t\t\tbreak;\n\t\tcase 'S':\n\t\t\to.max_chunk_sz = atol_with_unit(optarg, true);\n\t\t\tbreak;\n\t\tcase 'a':\n\t\t\to.nr_ahead = atoi(optarg);\n\t\t\tbreak;\n\t\tcase 'b':\n\t\t\to.buf_sz = atol_with_unit(optarg, true);\n\t\t\tbreak;\n\t\tcase 'L':\n\t\t\to.bitrate = atol_with_unit(optarg, false);\n\t\t\tbreak;\n\t\tcase '4':\n\t\t\ts.ai_family = AF_INET;\n\t\t\tbreak;\n\t\tcase '6':\n\t\t\ts.ai_family = AF_INET6;\n\t\t\tbreak;\n\t\tcase 'v':\n\t\t\to.severity++;\n\t\t\tbreak;\n\t\tcase 'q':\n\t\t\tquiet = true;\n\t\t\tbreak;\n\t\tcase 'D':\n\t\t\tdryrun = true;\n\t\t\tbreak;\n\t\tcase 'r':\n\t\t\t/* for compatibility with scp */\n\t\t\tbreak;\n\t\tcase 'l':\n\t\t\ts.login_name = optarg;\n\t\t\tbreak;\n\t\tcase 'P':\n\t\t\ts.port = optarg;\n\t\t\tbreak;\n\t\tcase 'F':\n\t\t\ts.config = optarg;\n\t\t\tbreak;\n\t\tcase 'o':\n\t\t\tnr_options++;\n\t\t\ts.options = realloc(s.options, sizeof(char *) * (nr_options + 1));\n\t\t\tif (!s.options) {\n\t\t\t\tpr_err(\"realloc: %s\", strerrno());\n\t\t\t\treturn 1;\n\t\t\t}\n\t\t\ts.options[nr_options - 1] = optarg;\n\t\t\ts.options[nr_options] = NULL;\n\t\t\tbreak;\n\t\tcase 'i':\n\t\t\ts.identity = optarg;\n\t\t\tbreak;\n\t\tcase 'J':\n\t\t\ts.proxyjump = optarg;\n\t\t\tbreak;\n\t\tcase 'c':\n\t\t\ts.cipher = optarg;\n\t\t\tbreak;\n\t\tcase 'M':\n\t\t\ts.hmac = optarg;\n\t\t\tbreak;\n\t\tcase 'C':\n\t\t\ts.compress = optarg;\n\t\t\tbreak;\n\t\tcase 'g':\n\t\t\ts.ccalgo = optarg;\n\t\t\tbreak;\n\t\tcase 'p':\n\t\t\to.preserve_ts = true;\n\t\t\tbreak;\n\t\tcase 'd':\n\t\t\ts.debug_level++;\n\t\t\tbreak;\n\t\tcase 'N':\n\t\t\ts.enable_nagle = true;\n\t\t\tbreak;\n\t\tcase 'h':\n\t\t\tusage(true);\n\t\t\treturn 0;\n\t\tdefault:\n\t\t\tusage(false);\n\t\t\treturn 1;\n\t\t}\n\t}\n\n\tif (quiet)\n\t\tto_dev_null(STDOUT_FILENO);\n\n\ts.password = getenv(ENV_SSH_AUTH_PASSWORD);\n\ts.passphrase = getenv(ENV_SSH_AUTH_PASSPHRASE);\n\n\tif ((m = mscp_init(&o, &s)) == NULL) {\n\t\tpr_err(\"mscp_init: %s\", priv_get_err());\n\t\treturn -1;\n\t}\n\n\tif (!resume) {\n\t\t/* normal transfer (not resume) */\n\t\tif (argc - optind < 2) {\n\t\t\t/* mscp needs at lease 2 (src and target) argument */\n\t\t\tusage(false);\n\t\t\treturn 1;\n\t\t}\n\t\ti = argc - optind;\n\n\t\tif ((t = validate_targets(argv + optind, i)) == NULL)\n\t\t\treturn -1;\n\n\t\tif (t[0].host) {\n\t\t\t/* copy remote to local */\n\t\t\tdirection = MSCP_DIRECTION_R2L;\n\t\t\tremote = t[0].host;\n\t\t\ts.login_name = s.login_name ? s.login_name : t[0].user;\n\t\t} else {\n\t\t\t/* copy local to remote */\n\t\t\tdirection = MSCP_DIRECTION_L2R;\n\t\t\tremote = t[i - 1].host;\n\t\t\ts.login_name = s.login_name ? s.login_name : t[i - 1].user;\n\t\t}\n\n\t\tif (mscp_set_remote(m, remote, direction) < 0) {\n\t\t\tpr_err(\"mscp_set_remote: %s\", priv_get_err());\n\t\t\treturn -1;\n\t\t}\n\n\t\tif (mscp_connect(m) < 0) {\n\t\t\tpr_err(\"mscp_connect: %s\", priv_get_err());\n\t\t\treturn -1;\n\t\t}\n\n\t\tfor (n = 0; n < i - 1; n++) {\n\t\t\tif (mscp_add_src_path(m, t[n].path) < 0) {\n\t\t\t\tpr_err(\"mscp_add_src_path: %s\", priv_get_err());\n\t\t\t\treturn -1;\n\t\t\t}\n\t\t}\n\n\t\tif (mscp_set_dst_path(m, t[i - 1].path) < 0) {\n\t\t\tpr_err(\"mscp_set_dst_path: %s\", priv_get_err());\n\t\t\treturn -1;\n\t\t}\n\n\t\t/* start to scan source files and resolve their destination paths */\n\t\tif (mscp_scan(m) < 0) {\n\t\t\tpr_err(\"mscp_scan: %s\", priv_get_err());\n\t\t\treturn -1;\n\t\t}\n\t} else {\n\t\t/* resume a transfer from the specified checkpoint */\n\t\tchar r[512];\n\t\tint d;\n\t\tif (mscp_checkpoint_get_remote(checkpoint_load, r, sizeof(r), &d) < 0) {\n\t\t\tpr_err(\"mscp_checkpoint_get_remote: %s\", priv_get_err());\n\t\t\treturn -1;\n\t\t}\n\n\t\tif (mscp_set_remote(m, r, d) < 0) {\n\t\t\tpr_err(\"mscp_set_remote: %s\", priv_get_err());\n\t\t\treturn -1;\n\t\t}\n\n\t\t/* load paths and chunks to be transferred from checkpoint */\n\t\tif (mscp_checkpoint_load(m, checkpoint_load) < 0) {\n\t\t\tpr_err(\"mscp_checkpoint_load: %s\", priv_get_err());\n\t\t\treturn -1;\n\t\t}\n\n\t\tif (dryrun)\n\t\t\tgoto out;\n\n\t\t/* create the first ssh connection to get password or\n\t\t * passphrase. The sftp session over it will be not\n\t\t * used for resume transfer in actuality. ToDo:\n\t\t * connectin managemnet should be improved. */\n\t\tif (mscp_connect(m) < 0) {\n\t\t\tpr_err(\"mscp_connect: %s\", priv_get_err());\n\t\t\treturn -1;\n\t\t}\n\t}\n\n\tif (dryrun) {\n\t\tret = mscp_scan_join(m);\n\t\tgoto out;\n\t}\n\n\tif (pthread_create(&tid_stat, NULL, print_stat_thread, NULL) < 0) {\n\t\tpr_err(\"pthread_create: %s\", strerror(errno));\n\t\treturn -1;\n\t}\n\n\tif (signal(SIGINT, sigint_handler) == SIG_ERR) {\n\t\tpr_err(\"signal: %s\", strerror(errno));\n\t\treturn -1;\n\t}\n\n\tret = mscp_start(m);\n\tif (ret < 0)\n\t\tpr_err(\"mscp_start: %s\", priv_get_err());\n\n\tret = mscp_join(m);\n\n\tpthread_cancel(tid_stat);\n\tpthread_join(tid_stat, NULL);\n\n\tprint_stat(true);\n\tprint_cli(\"\\n\"); /* final output */\nout:\n\tif (interrupted)\n\t\tret = 1;\n\n\tif ((dryrun || ret != 0) && checkpoint_save) {\n\t\tprint_cli(\"save checkpoint to %s\\n\", checkpoint_save);\n\t\tif (mscp_checkpoint_save(m, checkpoint_save) < 0) {\n\t\t\tpr_err(\"mscp_checkpoint_save: %s\", priv_get_err());\n\t\t\treturn -1;\n\t\t}\n\t}\n\n\tmscp_cleanup(m);\n\tmscp_free(m);\n\n\treturn ret;\n}\n\n/* progress bar-related functions */\n\ndouble calculate_timedelta(struct timeval *b, struct timeval *a)\n{\n\tdouble sec, usec;\n\n\tif (a->tv_usec < b->tv_usec) {\n\t\ta->tv_usec += 1000000;\n\t\ta->tv_sec--;\n\t}\n\n\tsec = a->tv_sec - b->tv_sec;\n\tusec = a->tv_usec - b->tv_usec;\n\tsec += usec / 1000000;\n\n\treturn sec;\n}\n\ndouble calculate_bps(size_t diff, struct timeval *b, struct timeval *a)\n{\n\treturn (double)diff / calculate_timedelta(b, a);\n}\n\nchar *calculate_eta(size_t remain, size_t diff, struct timeval *b, struct timeval *a,\n\t\t    bool final)\n{\n\tstatic char buf[16];\n\n#define bps_window_size 16\n\tstatic double bps_window[bps_window_size];\n\tstatic size_t sum, idx, count;\n\tdouble elapsed = calculate_timedelta(b, a);\n\tdouble bps = diff / elapsed;\n\tdouble avg, eta;\n\n\t/* early return when diff == 0 (stalled) or final output */\n\tif (diff == 0) {\n\t\tsnprintf(buf, sizeof(buf), \"--:-- ETA\");\n\t\treturn buf;\n\t}\n\tif (final) {\n\t\tsnprintf(buf, sizeof(buf), \"%02d:%02d    \", (int)(floor(elapsed / 60)),\n\t\t\t (int)round(elapsed) % 60);\n\t\treturn buf;\n\t}\n\n\t/* drop the old bps value and add the recent one */\n\tsum -= bps_window[idx];\n\tbps_window[idx] = bps;\n\tsum += bps_window[idx];\n\tidx = (idx + 1) % bps_window_size;\n\tcount++;\n\n\t/* calcuate ETA from avg of recent bps values */\n\tavg = sum / min(count, bps_window_size);\n\teta = remain / avg;\n\tsnprintf(buf, sizeof(buf), \"%02d:%02d ETA\", (int)floor(eta / 60),\n\t\t (int)round(eta) % 60);\n\n\treturn buf;\n}\n\nvoid print_progress_bar(double percent, char *suffix)\n{\n\tint n, thresh, bar_width;\n\tstruct winsize ws;\n\tchar buf[128];\n\n\t/*\n         * [=======>   ] XX% SUFFIX\n         */\n\n\tbuf[0] = '\\0';\n\n\tif (ioctl(STDOUT_FILENO, TIOCGWINSZ, &ws) < 0 || ws.ws_col == 0) {\n\n\t\t// fallback to default\n\t\tws.ws_col = 80;\n\t\tws.ws_row = 24;\n\t}\n\n\tbar_width = min(sizeof(buf), ws.ws_col) - strlen(suffix) - 7;\n\n\tmemset(buf, 0, sizeof(buf));\n\tif (bar_width > 8) {\n\t\tthresh = floor(bar_width * (percent / 100)) - 1;\n\n\t\tfor (n = 1; n < bar_width - 1; n++) {\n\t\t\tif (n <= thresh)\n\t\t\t\tbuf[n] = '=';\n\t\t\telse\n\t\t\t\tbuf[n] = ' ';\n\t\t}\n\t\tbuf[thresh] = '>';\n\t\tbuf[0] = '[';\n\t\tbuf[bar_width - 1] = ']';\n\t\tsnprintf(buf + bar_width, sizeof(buf) - bar_width, \" %3d%% \",\n\t\t\t (int)floor(percent));\n\t}\n\n\tprint_cli(\"\\r\\033[K\"\n\t\t  \"%s%s\",\n\t\t  buf, suffix);\n}\n\nvoid print_progress(struct timeval *b, struct timeval *a, size_t total, size_t last,\n\t\t    size_t done, bool final)\n{\n\tchar *bps_units[] = { \"B/s \", \"KB/s\", \"MB/s\", \"GB/s\" };\n\tchar *byte_units[] = { \"B \", \"KB\", \"MB\", \"GB\", \"TB\", \"PB\" };\n\tchar suffix[128];\n\tint bps_u, byte_tu, byte_du;\n\tdouble total_round, done_round;\n\tint percent;\n\tdouble bps;\n\n#define array_size(a) (sizeof(a) / sizeof(a[0]))\n\n\tif (total <= 0) {\n\t\tprint_cli(\"\\r\\033[K\"\n\t\t\t  \"total 0 byte transferred\");\n\t\treturn; /* copy 0-byte file(s) */\n\t}\n\n\ttotal_round = total;\n\tfor (byte_tu = 0; total_round > 1000 && byte_tu < array_size(byte_units) - 1;\n\t     byte_tu++)\n\t\ttotal_round /= 1024;\n\n\tbps = calculate_bps(done - last, b, a);\n\tfor (bps_u = 0; bps > 1000 && bps_u < array_size(bps_units); bps_u++)\n\t\tbps /= 1000;\n\n\tpercent = floor(((double)(done) / (double)total) * 100);\n\n\tdone_round = done;\n\tfor (byte_du = 0; done_round > 1024 && byte_du < array_size(byte_units) - 1;\n\t     byte_du++)\n\t\tdone_round /= 1024;\n\n\tsnprintf(suffix, sizeof(suffix), \"%4.1lf%s/%.1lf%s %6.1f%s  %s\", done_round,\n\t\t byte_units[byte_du], total_round, byte_units[byte_tu], bps,\n\t\t bps_units[bps_u], calculate_eta(total - done, done - last, b, a, final));\n\n\tprint_progress_bar(percent, suffix);\n}\n\nstruct xfer_stat {\n\tstruct timeval start, before, after;\n\tsize_t total;\n\tsize_t last;\n\tsize_t done;\n};\nstruct xfer_stat x;\n\nvoid print_stat(bool final)\n{\n\tstruct mscp_stats s;\n\n\tgettimeofday(&x.after, NULL);\n\tif (calculate_timedelta(&x.before, &x.after) > 1 || final) {\n\t\tmscp_get_stats(m, &s);\n\t\tx.total = s.total;\n\t\tx.done = s.done;\n\t\tprint_progress(!final ? &x.before : &x.start, &x.after, x.total,\n\t\t\t       !final ? x.last : 0, x.done, final);\n\t\tx.before = x.after;\n\t\tx.last = x.done;\n\t}\n}\n\nvoid *print_stat_thread(void *arg)\n{\n\tmemset(&x, 0, sizeof(x));\n\tgettimeofday(&x.start, NULL);\n\tx.before = x.start;\n\n\twhile (true) {\n\t\tprint_stat(false);\n\t\tsleep(1);\n\t}\n\n\treturn NULL;\n}\n"
  },
  {
    "path": "src/minmax.h",
    "content": "/* SPDX-License-Identifier: GPL-3.0-only */\n#ifndef _MINMAX_H_\n#define _MINMAX_H_\n\n#define min(a, b) (((a) > (b)) ? (b) : (a))\n#define max(a, b) (((a) > (b)) ? (a) : (b))\n\n#endif /* _MINMAX_H_ */\n"
  },
  {
    "path": "src/mscp.c",
    "content": "/* SPDX-License-Identifier: GPL-3.0-only */\n#include <stdbool.h>\n#include <unistd.h>\n#include <math.h>\n#include <pthread.h>\n#include <semaphore.h>\n#include <sys/time.h>\n\n#include <pool.h>\n#include <minmax.h>\n#include <ssh.h>\n#include <path.h>\n#include <checkpoint.h>\n#include <fileops.h>\n#include <atomic.h>\n#include <platform.h>\n#include <print.h>\n#include <strerrno.h>\n#include <mscp.h>\n#include <bwlimit.h>\n\n#include <openbsd-compat/openbsd-compat.h>\n\nstruct mscp_thread {\n\tstruct mscp *m;\n\tsftp_session sftp;\n\n\t/* attributes used by copy threads */\n\tsize_t copied_bytes;\n\tint id;\n\tint cpu;\n\n\t/* thread-specific values */\n\tpthread_t tid;\n\tint ret;\n};\n\nstruct mscp {\n\tchar *remote; /* remote host (and uername) */\n\tint direction; /* copy direction */\n\tchar dst_path[PATH_MAX];\n\n\tstruct mscp_opts *opts;\n\tstruct mscp_ssh_opts *ssh_opts;\n\n\tint *cores; /* usable cpu cores by COREMASK */\n\tint nr_cores; /* length of array of cores */\n\n\tsem_t *sem; /* semaphore for concurrent  connecting ssh sessions */\n\n\tsftp_session first; /* first sftp session */\n\n\tpool *src_pool, *path_pool, *chunk_pool, *thread_pool;\n\n\tsize_t total_bytes; /* total_bytes to be copied */\n\tbool chunk_pool_ready;\n#define chunk_pool_is_ready(m) ((m)->chunk_pool_ready)\n#define chunk_pool_set_ready(m, b) ((m)->chunk_pool_ready = b)\n\n\tstruct bwlimit bw; /* bandwidth limit mechanism */\n\n\tstruct mscp_thread scan; /* mscp_thread for mscp_scan_thread() */\n};\n\n#define DEFAULT_MIN_CHUNK_SZ (16 << 20) /* 16MB */\n#define DEFAULT_NR_AHEAD 32\n#define DEFAULT_BUF_SZ 16384\n/* XXX: we use 16384 byte buffer pointed by\n * https://api.libssh.org/stable/libssh_tutor_sftp.html. The larget\n * read length from sftp_async_read is 65536 byte. Read sizes larger\n * than 65536 cause a situation where data remainds but\n * sftp_async_read returns 0.\n */\n\n#define DEFAULT_MAX_STARTUPS 8\n\n#define non_null_string(s) (s[0] != '\\0')\n\nstatic int expand_coremask(const char *coremask, int **cores, int *nr_cores)\n{\n\tint n, *core_list, nr_usable, nr_all;\n\tchar c[2] = { 'x', '\\0' };\n\tconst char *_coremask;\n\tlong v, needle;\n\tint ncores = nr_cpus();\n\n\t/*\n         * This function returns array of usable cores in `cores` and\n         * returns the number of usable cores (array length) through\n         * nr_cores.\n         */\n\n\tif (strncmp(coremask, \"0x\", 2) == 0)\n\t\t_coremask = coremask + 2;\n\telse\n\t\t_coremask = coremask;\n\n\tcore_list = realloc(NULL, sizeof(int) * 64);\n\tif (!core_list) {\n\t\tpriv_set_errv(\"realloc: %s\", strerrno());\n\t\treturn -1;\n\t}\n\n\tnr_usable = 0;\n\tnr_all = 0;\n\tfor (n = strlen(_coremask) - 1; n >= 0; n--) {\n\t\tc[0] = _coremask[n];\n\t\tv = strtol(c, NULL, 16);\n\t\tif (v == LONG_MIN || v == LONG_MAX) {\n\t\t\tpriv_set_errv(\"invalid coremask: %s\", coremask);\n\t\t\treturn -1;\n\t\t}\n\n\t\tfor (needle = 0x01; needle < 0x10; needle <<= 1) {\n\t\t\tnr_all++;\n\t\t\tif (nr_all > ncores)\n\t\t\t\tbreak; /* too long coremask */\n\t\t\tif (v & needle) {\n\t\t\t\tnr_usable++;\n\t\t\t\tcore_list = realloc(core_list, sizeof(int) * nr_usable);\n\t\t\t\tif (!core_list) {\n\t\t\t\t\tpriv_set_errv(\"realloc: %s\", strerrno());\n\t\t\t\t\treturn -1;\n\t\t\t\t}\n\t\t\t\tcore_list[nr_usable - 1] = nr_all - 1;\n\t\t\t}\n\t\t}\n\t}\n\n\tif (nr_usable < 1) {\n\t\tpriv_set_errv(\"invalid core mask: %s\", coremask);\n\t\treturn -1;\n\t}\n\n\t*cores = core_list;\n\t*nr_cores = nr_usable;\n\treturn 0;\n}\n\nstatic int default_nr_threads()\n{\n\treturn (int)(floor(log(nr_cpus()) * 2) + 1);\n}\n\nstatic int validate_and_set_defaut_params(struct mscp_opts *o)\n{\n\tif (o->nr_threads < 0) {\n\t\tpriv_set_errv(\"invalid nr_threads: %d\", o->nr_threads);\n\t\treturn -1;\n\t} else if (o->nr_threads == 0)\n\t\to->nr_threads = default_nr_threads();\n\n\tif (o->nr_ahead < 0) {\n\t\tpriv_set_errv(\"invalid nr_ahead: %d\", o->nr_ahead);\n\t\treturn -1;\n\t} else if (o->nr_ahead == 0)\n\t\to->nr_ahead = DEFAULT_NR_AHEAD;\n\n\tif (o->min_chunk_sz == 0)\n\t\to->min_chunk_sz = DEFAULT_MIN_CHUNK_SZ;\n\n\tif (o->max_chunk_sz) {\n\t\tif (o->min_chunk_sz > o->max_chunk_sz) {\n\t\t\tpriv_set_errv(\"smaller max chunk size than \"\n\t\t\t\t      \"min chunk size: %lu < %lu\",\n\t\t\t\t      o->max_chunk_sz, o->min_chunk_sz);\n\t\t\treturn -1;\n\t\t}\n\t}\n\n\tif (o->buf_sz == 0)\n\t\to->buf_sz = DEFAULT_BUF_SZ;\n\telse if (o->buf_sz == 0) {\n\t\tpriv_set_errv(\"invalid buf size: %lu\", o->buf_sz);\n\t\treturn -1;\n\t}\n\n\tif (o->max_startups == 0)\n\t\to->max_startups = DEFAULT_MAX_STARTUPS;\n\telse if (o->max_startups < 0) {\n\t\tpriv_set_errv(\"invalid max_startups: %d\", o->max_startups);\n\t\treturn -1;\n\t}\n\n\tif (o->interval > 0) {\n\t\t/* when the interval is set, establish SSH connections sequentially. */\n\t\to->max_startups = 1;\n\t}\n\n\treturn 0;\n}\n\nint mscp_set_remote(struct mscp *m, const char *remote_host, int direction)\n{\n\tif (!remote_host) {\n\t\tpriv_set_errv(\"empty remote host\");\n\t\treturn -1;\n\t}\n\n\tif (!(direction == MSCP_DIRECTION_L2R || direction == MSCP_DIRECTION_R2L)) {\n\t\tpriv_set_errv(\"invalid copy direction: %d\", direction);\n\t\treturn -1;\n\t}\n\n\tif (!(m->remote = strdup(remote_host))) {\n\t\tpriv_set_errv(\"strdup: %s\", strerrno());\n\t\treturn -1;\n\t}\n\tm->direction = direction;\n\n\treturn 0;\n}\n\nstruct mscp *mscp_init(struct mscp_opts *o, struct mscp_ssh_opts *s)\n{\n\tstruct mscp *m;\n\tint n;\n\n\tset_print_severity(o->severity);\n\n\tif (validate_and_set_defaut_params(o) < 0) {\n\t\treturn NULL;\n\t}\n\n\tif (!(m = malloc(sizeof(*m)))) {\n\t\tpriv_set_errv(\"malloc: %s\", strerrno());\n\t\treturn NULL;\n\t}\n\tmemset(m, 0, sizeof(*m));\n\tm->opts = o;\n\tm->ssh_opts = s;\n\tchunk_pool_set_ready(m, false);\n\n\tif (!(m->src_pool = pool_new())) {\n\t\tpriv_set_errv(\"pool_new: %s\", strerrno());\n\t\tgoto free_out;\n\t}\n\n\tif (!(m->path_pool = pool_new())) {\n\t\tpriv_set_errv(\"pool_new: %s\", strerrno());\n\t\tgoto free_out;\n\t}\n\n\tif (!(m->chunk_pool = pool_new())) {\n\t\tpriv_set_errv(\"pool_new: %s\", strerrno());\n\t\tgoto free_out;\n\t}\n\n\tif (!(m->thread_pool = pool_new())) {\n\t\tpriv_set_errv(\"pool_new: %s\", strerrno());\n\t\tgoto free_out;\n\t}\n\n\tif ((m->sem = sem_create(o->max_startups)) == NULL) {\n\t\tpriv_set_errv(\"sem_create: %s\", strerrno());\n\t\tgoto free_out;\n\t}\n\n\tif (o->coremask) {\n\t\tif (expand_coremask(o->coremask, &m->cores, &m->nr_cores) < 0)\n\t\t\tgoto free_out;\n\t\tchar b[512], c[8];\n\t\tmemset(b, 0, sizeof(b));\n\t\tfor (n = 0; n < m->nr_cores; n++) {\n\t\t\tmemset(c, 0, sizeof(c));\n\t\t\tsnprintf(c, sizeof(c) - 1, \" %d\", m->cores[n]);\n\t\t\tstrlcat(b, c, sizeof(b));\n\t\t}\n\t\tpr_notice(\"usable cpu cores:%s\", b);\n\t}\n\n\tif (bwlimit_init(&m->bw, o->bitrate, 100) < 0) { /* 100ms window (hardcoded) */\n\t\tpriv_set_errv(\"bwlimit_init: %s\", strerrno());\n\t\tgoto free_out;\n\t}\n\n\t/* workaround: set libssh using openssh proxyjump\n\t * https://gitlab.com/libssh/libssh-mirror/-/issues/319 */\n\tssh_use_openssh_proxy_jumps(1);\n\n\t/* call ssh_init() because libssh is statically linked */\n\tssh_init();\n\n\treturn m;\n\nfree_out:\n\tif (m->src_pool)\n\t\tpool_free(m->src_pool);\n\tif (m->path_pool)\n\t\tpool_free(m->path_pool);\n\tif (m->chunk_pool)\n\t\tpool_free(m->chunk_pool);\n\tif (m->thread_pool)\n\t\tpool_free(m->thread_pool);\n\tif (m->remote)\n\t\tfree(m->remote);\n\tfree(m);\n\treturn NULL;\n}\n\nint mscp_connect(struct mscp *m)\n{\n\tm->first = ssh_init_sftp_session(m->remote, m->ssh_opts);\n\tif (!m->first)\n\t\treturn -1;\n\n\treturn 0;\n}\n\nint mscp_add_src_path(struct mscp *m, const char *src_path)\n{\n\tchar *s = strdup(src_path);\n\tif (!s) {\n\t\tpriv_set_errv(\"strdup: %s\", strerrno());\n\t\treturn -1;\n\t}\n\tif (pool_push(m->src_pool, s) < 0) {\n\t\tpriv_set_errv(\"pool_push: %s\", strerrno());\n\t\treturn -1;\n\t}\n\treturn 0;\n}\n\nint mscp_set_dst_path(struct mscp *m, const char *dst_path)\n{\n\tif (strlen(dst_path) + 1 >= PATH_MAX) {\n\t\tpriv_set_errv(\"too long dst path: %s\", dst_path);\n\t\treturn -1;\n\t}\n\n\tif (!non_null_string(dst_path)) {\n\t\tpriv_set_errv(\"empty dst path\");\n\t\treturn -1;\n\t}\n\n\tstrncpy(m->dst_path, dst_path, PATH_MAX);\n\n\treturn 0;\n}\n\nstatic size_t get_page_mask(void)\n{\n\tsize_t page_sz = sysconf(_SC_PAGESIZE);\n\treturn ~(page_sz - 1);\n}\n\nstatic void mscp_stop_copy_thread(struct mscp *m)\n{\n\tstruct mscp_thread *t;\n\tunsigned int idx;\n\tpool_lock(m->thread_pool);\n\tpool_for_each(m->thread_pool, t, idx) {\n\t\tif (t->tid)\n\t\t\tpthread_cancel(t->tid);\n\t}\n\tpool_unlock(m->thread_pool);\n}\n\nstatic void mscp_stop_scan_thread(struct mscp *m)\n{\n\tif (m->scan.tid)\n\t\tpthread_cancel(m->scan.tid);\n}\n\nvoid mscp_stop(struct mscp *m)\n{\n\tmscp_stop_scan_thread(m);\n\tmscp_stop_copy_thread(m);\n}\n\nvoid *mscp_scan_thread(void *arg)\n{\n\tstruct mscp_thread *t = arg;\n\tstruct mscp *m = t->m;\n\tsftp_session src_sftp = NULL, dst_sftp = NULL;\n\tstruct path_resolve_args a;\n\tstruct path *p;\n\tstruct stat ss, ds;\n\tchar *src_path;\n\tglob_t pglob;\n\tint n;\n\n\tswitch (m->direction) {\n\tcase MSCP_DIRECTION_L2R:\n\t\tsrc_sftp = NULL;\n\t\tdst_sftp = t->sftp;\n\t\tbreak;\n\tcase MSCP_DIRECTION_R2L:\n\t\tsrc_sftp = t->sftp;\n\t\tdst_sftp = NULL;\n\t\tbreak;\n\tdefault:\n\t\tpr_err(\"invalid copy direction: %d\", m->direction);\n\t\tgoto err_out;\n\t}\n\n\t/* initialize path_resolve_args */\n\tmemset(&a, 0, sizeof(a));\n\ta.total_bytes = &m->total_bytes;\n\n\tif (pool_size(m->src_pool) > 1)\n\t\ta.dst_path_should_dir = true;\n\n\tif (m->dst_path[strlen(m->dst_path) - 1] == '/')\n\t\ta.dst_path_should_dir = true;\n\n\tif (mscp_stat(m->dst_path, &ds, dst_sftp) == 0) {\n\t\tif (S_ISDIR(ds.st_mode))\n\t\t\ta.dst_path_is_dir = true;\n\t}\n\n\ta.path_pool = m->path_pool;\n\ta.chunk_pool = m->chunk_pool;\n\ta.nr_conn = m->opts->nr_threads;\n\ta.min_chunk_sz = m->opts->min_chunk_sz;\n\ta.max_chunk_sz = m->opts->max_chunk_sz;\n\ta.chunk_align = get_page_mask();\n\n\tpr_info(\"start to walk source path(s)\");\n\n\t/* walk each src_path recusively, and resolve path->dst_path for each src */\n\tpool_iter_for_each(m->src_pool, src_path) {\n\t\tmemset(&pglob, 0, sizeof(pglob));\n\t\tif (mscp_glob(src_path, GLOB_NOCHECK, &pglob, src_sftp) < 0) {\n\t\t\tpr_err(\"mscp_glob: %s\", strerrno());\n\t\t\tgoto err_out;\n\t\t}\n\n\t\tfor (n = 0; n < pglob.gl_pathc; n++) {\n\t\t\tif (mscp_stat(pglob.gl_pathv[n], &ss, src_sftp) < 0) {\n\t\t\t\tpr_err(\"stat: %s %s\", src_path, strerrno());\n\t\t\t\tgoto err_out;\n\t\t\t}\n\n\t\t\tif (!a.dst_path_should_dir && pglob.gl_pathc > 1)\n\t\t\t\ta.dst_path_should_dir = true; /* we have over 1 srces */\n\n\t\t\t/* set path specific args */\n\t\t\ta.src_path = pglob.gl_pathv[n];\n\t\t\ta.dst_path = m->dst_path;\n\t\t\ta.src_path_is_dir = S_ISDIR(ss.st_mode);\n\n\t\t\tif (walk_src_path(src_sftp, pglob.gl_pathv[n], &a) < 0)\n\t\t\t\tgoto err_out;\n\t\t}\n\t\tmscp_globfree(&pglob);\n\t}\n\n\tpr_info(\"walk source path(s) done\");\n\tt->ret = 0;\n\tchunk_pool_set_ready(m, true);\n\treturn NULL;\n\nerr_out:\n\tt->ret = -1;\n\tchunk_pool_set_ready(m, true);\n\treturn NULL;\n}\n\nint mscp_scan(struct mscp *m)\n{\n\tstruct mscp_thread *t = &m->scan;\n\tint ret;\n\n\tmemset(t, 0, sizeof(*t));\n\tt->m = m;\n\tt->sftp = m->first;\n\n\tif ((ret = pthread_create(&t->tid, NULL, mscp_scan_thread, t)) < 0) {\n\t\tpriv_set_err(\"pthread_create: %d\", ret);\n\t\treturn -1;\n\t}\n\n\t/* We wait for there are over nr_threads chunks to determine\n\t * actual number of threads (and connections), or scan\n\t * finished. If the number of chunks are smaller than\n\t * nr_threads, we adjust nr_threads to the number of chunks.\n\t */\n\twhile (!chunk_pool_is_ready(m) && pool_size(m->chunk_pool) < m->opts->nr_threads)\n\t\tusleep(100);\n\n\treturn 0;\n}\n\nint mscp_scan_join(struct mscp *m)\n{\n\tstruct mscp_thread *t = &m->scan;\n\tif (t->tid) {\n\t\tpthread_join(t->tid, NULL);\n\t\tt->tid = 0;\n\t\treturn t->ret;\n\t}\n\treturn 0;\n}\n\nint mscp_checkpoint_get_remote(const char *pathname, char *remote, size_t len, int *dir)\n{\n\treturn checkpoint_load_remote(pathname, remote, len, dir);\n}\n\nint mscp_checkpoint_load(struct mscp *m, const char *pathname)\n{\n\tstruct chunk *c;\n\tunsigned int i;\n\n\tif (checkpoint_load_paths(pathname, m->path_pool, m->chunk_pool) < 0)\n\t\treturn -1;\n\n\t/* totaling up bytes to be transferred and set chunk_pool is\n\t * ready instead of the mscp_scan thread */\n\tm->total_bytes = 0;\n\tpool_for_each(m->chunk_pool, c, i) {\n\t\tm->total_bytes += c->len;\n\t}\n\tchunk_pool_set_ready(m, true);\n\n\treturn 0;\n}\n\nint mscp_checkpoint_save(struct mscp *m, const char *pathname)\n{\n\treturn checkpoint_save(pathname, m->direction, m->ssh_opts->login_name, m->remote,\n\t\t\t       m->path_pool, m->chunk_pool);\n}\n\nstatic void *mscp_copy_thread(void *arg);\n\nstatic struct mscp_thread *mscp_copy_thread_spawn(struct mscp *m, int id)\n{\n\tstruct mscp_thread *t;\n\tint ret;\n\n\tif (!(t = malloc(sizeof(*t)))) {\n\t\tpriv_set_errv(\"malloc: %s\", strerrno());\n\t\treturn NULL;\n\t}\n\n\tmemset(t, 0, sizeof(*t));\n\tt->m = m;\n\tt->id = id;\n\tif (m->cores == NULL)\n\t\tt->cpu = -1; /* not pinned to cpu */\n\telse\n\t\tt->cpu = m->cores[id % m->nr_cores];\n\n\tif ((ret = pthread_create(&t->tid, NULL, mscp_copy_thread, t)) < 0) {\n\t\tpriv_set_errv(\"pthread_create: %d\", ret);\n\t\tfree(t);\n\t\treturn NULL;\n\t}\n\n\treturn t;\n}\n\nint mscp_start(struct mscp *m)\n{\n\tstruct mscp_thread *t;\n\tint n, ret = 0;\n\n\tif ((n = pool_size(m->chunk_pool)) < m->opts->nr_threads) {\n\t\tpr_notice(\"we have %d chunk(s), set number of connections to %d\", n, n);\n\t\tm->opts->nr_threads = n;\n\t}\n\n\tpr_notice(\"threads: %d\",m->opts->nr_threads);\n\tpr_notice(\"bwlimit: %ld bps\", m->bw.bps);\n\n\tfor (n = 0; n < m->opts->nr_threads; n++) {\n\t\tt = mscp_copy_thread_spawn(m, n);\n\t\tif (!t)\n\t\t\tbreak;\n\t\tif (pool_push_lock(m->thread_pool, t) < 0) {\n\t\t\tpriv_set_errv(\"pool_push_lock: %s\", strerrno());\n\t\t\tbreak;\n\t\t}\n\t}\n\n\treturn n;\n}\n\nint mscp_join(struct mscp *m)\n{\n\tstruct mscp_thread *t;\n\tstruct path *p;\n\tunsigned int idx;\n\tsize_t total_copied_bytes = 0, nr_copied = 0, nr_tobe_copied = 0;\n\tint n, ret = 0;\n\n\t/* waiting for scan thread joins... */\n\tret = mscp_scan_join(m);\n\n\t/* waiting for copy threads join... */\n\tpool_for_each(m->thread_pool, t, idx) {\n\t\tpthread_join(t->tid, NULL);\n\t}\n\n\tpool_for_each(m->thread_pool, t, idx) {\n\t\ttotal_copied_bytes += t->copied_bytes;\n\t\tif (t->ret != 0)\n\t\t\tret = t->ret;\n\t\tif (t->sftp) {\n\t\t\tssh_sftp_close(t->sftp);\n\t\t\tt->sftp = NULL;\n\t\t}\n\t}\n\n\t/* count up number of transferred files */\n\tpool_iter_for_each(m->path_pool, p) {\n\t\tnr_tobe_copied++;\n\t\tif (p->state == FILE_STATE_DONE) {\n\t\t\tnr_copied++;\n\t\t}\n\t}\n\n\tif (m->first) {\n\t\tssh_sftp_close(m->first);\n\t\tm->first = NULL;\n\t}\n\n\tpr_notice(\"%lu/%lu bytes copied for %lu/%lu files\", total_copied_bytes,\n\t\t  m->total_bytes, nr_copied, nr_tobe_copied);\n\n\treturn ret;\n}\n\n/* copy thread-related functions */\n\nstatic void wait_for_interval(int interval)\n{\n\t_Atomic static long next;\n\tstruct timeval t;\n\tlong now;\n\n\tgettimeofday(&t, NULL);\n\tnow = t.tv_sec * 1000000 + t.tv_usec;\n\n\tif (next - now > 0)\n\t\tusleep(next - now);\n\n\tnext = now + interval * 1000000;\n}\n\nvoid *mscp_copy_thread(void *arg)\n{\n\tsftp_session src_sftp, dst_sftp;\n\tstruct mscp_thread *t = arg;\n\tstruct mscp *m = t->m;\n\tstruct chunk *c;\n\tbool next_chunk_exist;\n\n\t/* when error occurs, each thread prints error messages\n\t * immediately with pr_* functions. */\n\n\tif (t->cpu > -1) {\n\t\tif (set_thread_affinity(pthread_self(), t->cpu) < 0) {\n\t\t\tpr_err(\"set_thread_affinity: %s\", priv_get_err());\n\t\t\tgoto err_out;\n\t\t}\n\t\tpr_notice(\"thread[%d]: pin to cpu core %d\", t->id, t->cpu);\n\t}\n\n\tif (sem_wait(m->sem) < 0) {\n\t\tpr_err(\"sem_wait: %s\", strerrno());\n\t\tgoto err_out;\n\t}\n\n\tif ((next_chunk_exist = pool_iter_has_next_lock(m->chunk_pool))) {\n\t\tif (m->opts->interval > 0)\n\t\t\twait_for_interval(m->opts->interval);\n\t\tpr_notice(\"thread[%d]: connecting to %s\", t->id, m->remote);\n\t\tt->sftp = ssh_init_sftp_session(m->remote, m->ssh_opts);\n\t}\n\n\tif (sem_post(m->sem) < 0) {\n\t\tpr_err(\"sem_post: %s\", strerrno());\n\t\tgoto err_out;\n\t}\n\n\tif (!next_chunk_exist) {\n\t\tpr_notice(\"thread[%d]: no more connections needed\", t->id);\n\t\tgoto out;\n\t}\n\n\tif (!t->sftp) {\n\t\tpr_err(\"thread[%d]: %s\", t->id, priv_get_err());\n\t\tgoto err_out;\n\t}\n\n\tswitch (m->direction) {\n\tcase MSCP_DIRECTION_L2R:\n\t\tsrc_sftp = NULL;\n\t\tdst_sftp = t->sftp;\n\t\tbreak;\n\tcase MSCP_DIRECTION_R2L:\n\t\tsrc_sftp = t->sftp;\n\t\tdst_sftp = NULL;\n\t\tbreak;\n\tdefault:\n\t\tassert(false);\n\t\tgoto err_out; /* not reached */\n\t}\n\n\twhile (1) {\n\t\tc = pool_iter_next_lock(m->chunk_pool);\n\t\tif (c == NULL) {\n\t\t\tif (!chunk_pool_is_ready(m)) {\n\t\t\t\t/* a new chunk will be added. wait for it. */\n\t\t\t\tusleep(100);\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tbreak; /* no more chunks */\n\t\t}\n\n\t\tif ((t->ret = copy_chunk(c, src_sftp, dst_sftp, m->opts->nr_ahead,\n\t\t\t\t\t m->opts->buf_sz, m->opts->preserve_ts, &m->bw,\n\t\t\t\t\t &t->copied_bytes)) < 0)\n\t\t\tbreak;\n\t}\n\n\tif (t->ret < 0) {\n\t\tpr_err(\"thread[%d]: copy failed: %s -> %s, 0x%010lx-0x%010lx, %s\", t->id,\n\t\t       c->p->path, c->p->dst_path, c->off, c->off + c->len,\n\t\t       priv_get_err());\n\t}\n\n\treturn NULL;\n\nerr_out:\n\tt->ret = -1;\n\treturn NULL;\nout:\n\tt->ret = 0;\n\treturn NULL;\n}\n\n/* cleanup-related functions */\n\nvoid mscp_cleanup(struct mscp *m)\n{\n\tif (m->first) {\n\t\tssh_sftp_close(m->first);\n\t\tm->first = NULL;\n\t}\n\n\tpool_zeroize(m->src_pool, free);\n\tpool_zeroize(m->path_pool, (pool_map_f)free_path);\n\tpool_zeroize(m->chunk_pool, free);\n\tpool_zeroize(m->thread_pool, free);\n}\n\nvoid mscp_free(struct mscp *m)\n{\n\tpool_destroy(m->src_pool, free);\n\tpool_destroy(m->path_pool, (pool_map_f)free_path);\n\n\tif (m->remote)\n\t\tfree(m->remote);\n\tif (m->cores)\n\t\tfree(m->cores);\n\n\tsem_release(m->sem);\n\tfree(m);\n}\n\nvoid mscp_get_stats(struct mscp *m, struct mscp_stats *s)\n{\n\tstruct mscp_thread *t;\n\tunsigned int idx;\n\n\ts->total = m->total_bytes;\n\ts->done = 0;\n\n\tpool_for_each(m->thread_pool, t, idx) {\n\t\ts->done += t->copied_bytes;\n\t}\n}\n"
  },
  {
    "path": "src/openbsd-compat/openbsd-compat.h",
    "content": "#ifndef _OPENBSD_COMPAT_H\n#define _OPENBSD_COMPAT_H\n\n#include \"config.h\"\n\n#ifndef HAVE_STRLCAT\nsize_t strlcat(char *dst, const char *src, size_t siz);\n#endif\n\n#endif /* _OPENBSD_COMPAT_H_ */\n"
  },
  {
    "path": "src/openbsd-compat/strlcat.c",
    "content": "/*\t$OpenBSD: strlcat.c,v 1.13 2005/08/08 08:05:37 espie Exp $\t*/\n\n/*\n * Copyright (c) 1998 Todd C. Miller <Todd.Miller@courtesan.com>\n *\n * Permission to use, copy, modify, and distribute this software for any\n * purpose with or without fee is hereby granted, provided that the above\n * copyright notice and this permission notice appear in all copies.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n */\n\n/* OPENBSD ORIGINAL: lib/libc/string/strlcat.c */\n\n#include \"config.h\"\n#ifndef HAVE_STRLCAT\n\n#include <sys/types.h>\n#include <string.h>\n\n/*\n * Appends src to string dst of size siz (unlike strncat, siz is the\n * full size of dst, not space left).  At most siz-1 characters\n * will be copied.  Always NUL terminates (unless siz <= strlen(dst)).\n * Returns strlen(src) + MIN(siz, strlen(initial dst)).\n * If retval >= siz, truncation occurred.\n */\nsize_t\nstrlcat(char *dst, const char *src, size_t siz)\n{\n\tchar *d = dst;\n\tconst char *s = src;\n\tsize_t n = siz;\n\tsize_t dlen;\n\n\t/* Find the end of dst and adjust bytes left but don't go past end */\n\twhile (n-- != 0 && *d != '\\0')\n\t\td++;\n\tdlen = d - dst;\n\tn = siz - dlen;\n\n\tif (n == 0)\n\t\treturn(dlen + strlen(s));\n\twhile (*s != '\\0') {\n\t\tif (n != 1) {\n\t\t\t*d++ = *s;\n\t\t\tn--;\n\t\t}\n\t\ts++;\n\t}\n\t*d = '\\0';\n\n\treturn(dlen + (s - src));\t/* count does not include NUL */\n}\n\n#endif /* !HAVE_STRLCAT */\n"
  },
  {
    "path": "src/path.c",
    "content": "/* SPDX-License-Identifier: GPL-3.0-only */\n#include <string.h>\n#include <unistd.h>\n#include <dirent.h>\n#include <sys/stat.h>\n#include <libgen.h>\n#include <assert.h>\n\n#include <ssh.h>\n#include <minmax.h>\n#include <fileops.h>\n#include <atomic.h>\n#include <path.h>\n#include <strerrno.h>\n#include <print.h>\n\n/* paths of copy source resoltion */\nstatic char *resolve_dst_path(const char *src_file_path, struct path_resolve_args *a)\n{\n\tchar copy[PATH_MAX + 1], dst_file_path[PATH_MAX + 1];\n\tchar *prefix;\n\tint offset;\n\tint ret;\n\n\tstrncpy(copy, a->src_path, PATH_MAX);\n\tprefix = dirname(copy);\n\tif (!prefix) {\n\t\tpr_err(\"dirname: %s\", strerrno());\n\t\treturn NULL;\n\t}\n\n\toffset = strlen(prefix) + 1;\n\tif (strlen(prefix) == 1) { /* corner cases */\n\t\tswitch (prefix[0]) {\n\t\tcase '.':\n\t\t\toffset = 0;\n\t\t\tbreak;\n\t\tcase '/':\n\t\t\toffset = 1;\n\t\t\tbreak;\n\t\t}\n\t}\n\n\tif (!a->src_path_is_dir && !a->dst_path_is_dir) {\n\t\t/* src path is file. dst path is (1) file, or (2) does not exist.\n                 * In the second case, we need to put src under the dst.\n                 */\n\t\tif (a->dst_path_should_dir)\n\t\t\tret = snprintf(dst_file_path, PATH_MAX, \"%s/%s\", a->dst_path,\n\t\t\t\t       a->src_path + offset);\n\t\telse\n\t\t\tret = snprintf(dst_file_path, PATH_MAX, \"%s\", a->dst_path);\n\t}\n\n\t/* src is file, and dst is dir */\n\tif (!a->src_path_is_dir && a->dst_path_is_dir)\n\t\tret = snprintf(dst_file_path, PATH_MAX, \"%s/%s\", a->dst_path,\n\t\t\t       a->src_path + offset);\n\n\t/* both are directory */\n\tif (a->src_path_is_dir && a->dst_path_is_dir)\n\t\tret = snprintf(dst_file_path, PATH_MAX, \"%s/%s\", a->dst_path,\n\t\t\t       src_file_path + offset);\n\n\t/* dst path does not exist. change dir name to dst_path */\n\tif (a->src_path_is_dir && !a->dst_path_is_dir)\n\t\tret = snprintf(dst_file_path, PATH_MAX, \"%s/%s\", a->dst_path,\n\t\t\t       src_file_path + strlen(a->src_path) + 1);\n\n\tif (ret >= PATH_MAX) {\n\t\tpr_warn(\"Too long path: %s\", dst_file_path);\n\t\treturn NULL;\n\t}\n\n\tpr_debug(\"file: %s -> %s\", src_file_path, dst_file_path);\n\n\treturn strndup(dst_file_path, PATH_MAX);\n}\n\n/* chunk preparation */\nstruct chunk *alloc_chunk(struct path *p, size_t off, size_t len)\n{\n\tstruct chunk *c;\n\n\tif (!(c = malloc(sizeof(*c)))) {\n\t\tpr_err(\"malloc %s\", strerrno());\n\t\treturn NULL;\n\t}\n\tmemset(c, 0, sizeof(*c));\n\n\tc->p = p;\n\tc->off = off;\n\tc->len = len;\n\tc->state = CHUNK_STATE_INIT;\n\trefcnt_inc(&p->refcnt);\n\treturn c;\n}\n\nstatic int resolve_chunk(struct path *p, size_t size, struct path_resolve_args *a)\n{\n\tstruct chunk *c;\n\tsize_t chunk_sz, off, len;\n\tsize_t remaind;\n\n\tif (a->max_chunk_sz)\n\t\tchunk_sz = a->max_chunk_sz;\n\telse {\n\t\tchunk_sz = (size / (a->nr_conn * 4)) & a->chunk_align;\n\t\tif (chunk_sz <= a->min_chunk_sz)\n\t\t\tchunk_sz = a->min_chunk_sz;\n\t}\n\n\t/* for (size = size; size > 0;) does not create a file (chunk)\n         * when file size is 0. This do {} while (remaind > 0) creates\n         * just open/close a 0-byte file.\n         */\n\tremaind = size;\n\tdo {\n\t\toff = size - remaind;\n\t\tlen = remaind < chunk_sz ? remaind : chunk_sz;\n\t\tc = alloc_chunk(p, off, len);\n\t\tif (!c)\n\t\t\treturn -1;\n\n\t\tremaind -= len;\n\t\tif (pool_push_lock(a->chunk_pool, c) < 0) {\n\t\t\tpr_err(\"pool_push_lock: %s\", strerrno());\n\t\t\treturn -1;\n\t\t}\n\t} while (remaind > 0);\n\n\treturn 0;\n}\n\nvoid free_path(struct path *p)\n{\n\tif (p->path)\n\t\tfree(p->path);\n\tif (p->dst_path)\n\t\tfree(p->dst_path);\n\tfree(p);\n}\n\nstruct path *alloc_path(char *path, char *dst_path)\n{\n\tstruct path *p;\n\n\tif (!(p = malloc(sizeof(*p)))) {\n\t\tpr_err(\"malloc: %s\", strerrno());\n\t\treturn NULL;\n\t}\n\tmemset(p, 0, sizeof(*p));\n\n\tp->path = path;\n\tp->dst_path = dst_path;\n\tp->state = FILE_STATE_INIT;\n\tlock_init(&p->lock);\n\tp->data = 0;\n\n\treturn p;\n}\n\nstatic int append_path(sftp_session sftp, const char *path, struct stat st,\n\t\t       struct path_resolve_args *a)\n{\n\tstruct path *p;\n\tchar *src, *dst;\n\n\tif (!(src = strdup(path))) {\n\t\tpr_err(\"strdup: %s\", strerrno());\n\t\treturn -1;\n\t}\n\n\tif (!(dst = resolve_dst_path(src, a))) {\n\t\tfree(src);\n\t\treturn -1;\n\t}\n\n\tif (!(p = alloc_path(src, dst)))\n\t\treturn -1;\n\n\tif (resolve_chunk(p, st.st_size, a) < 0)\n\t\treturn -1; /* XXX: do not free path becuase chunk(s)\n\t\t\t    * was added to chunk pool already */\n\n\tif (pool_push_lock(a->path_pool, p) < 0) {\n\t\tpr_err(\"pool_push: %s\", strerrno());\n\t\tgoto free_out;\n\t}\n\n\t*a->total_bytes += st.st_size;\n\n\treturn 0;\n\nfree_out:\n\tfree_path(p);\n\treturn -1;\n}\n\nstatic bool check_path_should_skip(const char *path)\n{\n\tint len = strlen(path);\n\tif ((len == 1 && strncmp(path, \".\", 1) == 0) ||\n\t    (len == 2 && strncmp(path, \"..\", 2) == 0)) {\n\t\treturn true;\n\t}\n\treturn false;\n}\n\nstatic int walk_path_recursive(sftp_session sftp, const char *path,\n\t\t\t       struct path_resolve_args *a)\n{\n\tchar next_path[PATH_MAX + 1];\n\tstruct dirent *e;\n\tstruct stat st;\n\tMDIR *d;\n\tint ret;\n\n\tif (mscp_stat(path, &st, sftp) < 0) {\n\t\tpr_err(\"stat: %s: %s\", path, strerrno());\n\t\treturn -1;\n\t}\n\n\tif (S_ISREG(st.st_mode)) {\n\t\t/* this path is regular file. it is to be copied */\n\t\treturn append_path(sftp, path, st, a);\n\t}\n\n\tif (!S_ISDIR(st.st_mode))\n\t\treturn 0; /* not a regular file and not a directory, skip it. */\n\n\t/* ok, this path is a directory. walk through it. */\n\tif (!(d = mscp_opendir(path, sftp))) {\n\t\tpr_err(\"opendir: %s: %s\", path, strerrno());\n\t\treturn -1;\n\t}\n\n\tfor (e = mscp_readdir(d); e; e = mscp_readdir(d)) {\n\t\tif (check_path_should_skip(e->d_name))\n\t\t\tcontinue;\n\n\t\tret = snprintf(next_path, PATH_MAX, \"%s/%s\", path, e->d_name);\n\t\tif (ret >= PATH_MAX) {\n\t\t\tpr_warn(\"Too long path: %s/%s\", path, e->d_name);\n\t\t\tcontinue;\n\t\t}\n\n\t\twalk_path_recursive(sftp, next_path, a);\n\t\t/* do not stop even when walk_path_recursive returns\n\t\t * -1 due to an unreadable file. go to a next\n\t\t * file. Thus, do not pass error messages via\n\t\t * priv_set_err() under walk_path_recursive.  Print\n\t\t * the error with pr_err immediately.\n\t\t */\n\t}\n\n\tmscp_closedir(d);\n\n\treturn 0;\n}\n\nint walk_src_path(sftp_session src_sftp, const char *src_path,\n\t\t  struct path_resolve_args *a)\n{\n\treturn walk_path_recursive(src_sftp, src_path, a);\n}\n\n/* based on\n * https://stackoverflow.com/questions/2336242/recursive-mkdir-system-call-on-unix */\nstatic int touch_dst_path(struct path *p, sftp_session sftp)\n{\n\t/* XXX: should reflect the permission of the original directory? */\n\tmode_t mode = S_IRWXU | S_IRWXG | S_IRWXO;\n\tstruct stat st;\n\tchar path[PATH_MAX];\n\tchar *needle;\n\tint ret;\n\tmf *f;\n\n\tstrncpy(path, p->dst_path, sizeof(path));\n\n\t/* mkdir -p.\n\t * XXX: this may be slow when dst is the remote side. need speed-up. */\n\tfor (needle = strchr(path + 1, '/'); needle; needle = strchr(needle + 1, '/')) {\n\t\t*needle = '\\0';\n\n\t\tif (mscp_stat(path, &st, sftp) == 0) {\n\t\t\tif (S_ISDIR(st.st_mode))\n\t\t\t\tgoto next; /* directory exists. go deeper */\n\t\t\telse {\n\t\t\t\tpriv_set_errv(\"mscp_stat %s: not a directory\", path);\n\t\t\t\treturn -1; /* path exists, but not directory. */\n\t\t\t}\n\t\t}\n\n\t\tif (errno == ENOENT) {\n\t\t\t/* no file on the path. create directory. */\n\t\t\tif (mscp_mkdir(path, mode, sftp) < 0) {\n\t\t\t\tpriv_set_errv(\"mscp_mkdir %s: %s\", path, strerrno());\n\t\t\t\treturn -1;\n\t\t\t}\n\t\t}\nnext:\n\t\t*needle = '/';\n\t}\n\n\t/* Do not set O_TRUNC here. Instead, do mscp_setstat() at the\n\t * end. see https://bugzilla.mindrot.org/show_bug.cgi?id=3431 */\n\tf = mscp_open(p->dst_path, O_WRONLY | O_CREAT, S_IRUSR | S_IWUSR, sftp);\n\tif (!f) {\n\t\tpriv_set_errv(\"mscp_open %s: %s\", p->dst_path, strerrno());\n\t\treturn -1;\n\t}\n\n\tmscp_close(f);\n\n\treturn 0;\n}\n\nstatic int prepare_dst_path(struct path *p, sftp_session dst_sftp)\n{\n\tint ret = 0;\n\n\tLOCK_ACQUIRE(&p->lock);\n\tif (p->state == FILE_STATE_INIT) {\n\t\tif (touch_dst_path(p, dst_sftp) < 0) {\n\t\t\tret = -1;\n\t\t\tgoto out;\n\t\t}\n\t\tp->state = FILE_STATE_OPENED;\n\t\tpr_info(\"copy start: %s\", p->path);\n\t}\n\nout:\n\tLOCK_RELEASE();\n\treturn ret;\n}\n\n/* functions for copy */\n\nstatic ssize_t read_to_buf(void *ptr, size_t len, void *userdata)\n{\n\tint fd = *((int *)userdata);\n\treturn read(fd, ptr, len);\n}\n\nstatic int copy_chunk_l2r(struct chunk *c, int fd, sftp_file sf, int nr_ahead, int buf_sz,\n\t\t\t  struct bwlimit *bw, size_t *counter)\n{\n\tssize_t read_bytes, remaind, thrown;\n\tint idx, ret;\n\tstruct {\n\t\tuint32_t id;\n\t\tssize_t len;\n\t} reqs[nr_ahead];\n\n\tif (c->len == 0)\n\t\treturn 0;\n\n\tremaind = thrown = c->len;\n\tfor (idx = 0; idx < nr_ahead && thrown > 0; idx++) {\n\t\treqs[idx].len = min(thrown, buf_sz);\n\t\treqs[idx].len = sftp_async_write(sf, read_to_buf, reqs[idx].len, &fd,\n\t\t\t\t\t\t &reqs[idx].id);\n\t\tif (reqs[idx].len < 0) {\n\t\t\tpriv_set_errv(\"sftp_async_write: %s\",\n\t\t\t\t      sftp_get_ssh_error(sf->sftp));\n\t\t\treturn -1;\n\t\t}\n\t\tthrown -= reqs[idx].len;\n\t\tbwlimit_wait(bw, reqs[idx].len);\n\t}\n\n\tfor (idx = 0; remaind > 0; idx = (idx + 1) % nr_ahead) {\n\t\tret = sftp_async_write_end(sf, reqs[idx].id, 1);\n\t\tif (ret != SSH_OK) {\n\t\t\tpriv_set_errv(\"sftp_async_write_end: %s\",\n\t\t\t\t      sftp_get_ssh_error(sf->sftp));\n\t\t\treturn -1;\n\t\t}\n\n\t\t*counter += reqs[idx].len;\n\t\tremaind -= reqs[idx].len;\n\n\t\tif (remaind <= 0)\n\t\t\tbreak;\n\n\t\tif (thrown <= 0)\n\t\t\tcontinue;\n\n\t\treqs[idx].len = min(thrown, buf_sz);\n\t\treqs[idx].len = sftp_async_write(sf, read_to_buf, reqs[idx].len, &fd,\n\t\t\t\t\t\t &reqs[idx].id);\n\t\tif (reqs[idx].len < 0) {\n\t\t\tpriv_set_errv(\"sftp_async_write: %s\",\n\t\t\t\t      sftp_get_ssh_error(sf->sftp));\n\t\t\treturn -1;\n\t\t}\n\t\tthrown -= reqs[idx].len;\n\t\tbwlimit_wait(bw, reqs[idx].len);\n\t}\n\n\tif (remaind < 0) {\n\t\tpriv_set_errv(\"invalid remaind bytes %ld. \"\n\t\t\t      \"last async_write_end bytes %lu.\",\n\t\t\t      remaind, reqs[idx].len);\n\t\treturn -1;\n\t}\n\n\treturn 0;\n}\n\nstatic int copy_chunk_r2l(struct chunk *c, sftp_file sf, int fd,\n\t\t\t   int nr_ahead, int buf_sz,\n\t\t\t   struct bwlimit *bw, size_t *counter)\n{\n\tssize_t read_bytes, write_bytes, remain, thrown, len, requested;\n\tsftp_aio reqs[nr_ahead];\n\tchar buf[buf_sz];\n\tint i;\n\n\tif (c->len == 0)\n\t\treturn 0;\n\n\tremain = thrown = c->len;\n\n\tfor (i = 0; i < nr_ahead && thrown > 0; i++) {\n\t\tlen = min(thrown, sizeof(buf));\n\t\trequested = sftp_aio_begin_read(sf, len, &reqs[i]);\n\t\tif (requested == SSH_ERROR) {\n\t\t\tpriv_set_errv(\"sftp_aio_begin_read: %d\",\n\t\t\t\t      sftp_get_error(sf->sftp));\n\t\t\treturn -1;\n\t\t}\n\t\tthrown -= requested;\n\t\tbwlimit_wait(bw, requested);\n\t}\n\n\tfor (i = 0; remain > 0; i = (i + 1) % nr_ahead) {\n\t\tread_bytes = sftp_aio_wait_read(&reqs[i], buf, sizeof(buf));\n\t\tif (read_bytes == SSH_ERROR) {\n\t\t\tpriv_set_errv(\"sftp_aio_wait_read: %d\",\n\t\t\t\t      sftp_get_error(sf->sftp));\n\t\t\treturn -1;\n\t\t}\n\n\t\tif (thrown > 0) {\n\t\t\tlen = min(thrown, sizeof(buf));\n\t\t\trequested = sftp_aio_begin_read(sf, len, &reqs[i]);\n\t\t\tthrown -= requested;\n\t\t\tbwlimit_wait(bw, requested);\n\t\t}\n\n\t\twrite_bytes = write(fd, buf, read_bytes);\n\t\tif (write_bytes < 0) {\n\t\t\tpriv_set_errv(\"write: %s\", strerrno());\n\t\t\treturn -1;\n\t\t}\n\n\t\tif (write_bytes < read_bytes) {\n\t\t\tpriv_set_errv(\"failed to write full bytes\");\n\t\t\treturn -1;\n\t\t}\n\n\t\t*counter += write_bytes;\n\t\tremain -= write_bytes;\n\t}\n\n\tif (remain < 0) {\n\t\tpriv_set_errv(\"invalid remain bytes %ld. last async_read bytes %ld. \"\n\t\t\t      \"last write bytes %ld\",\n\t\t\t      remain, read_bytes, write_bytes);\n\t\treturn -1;\n\t}\n\n\treturn 0;\n}\n\nstatic int _copy_chunk(struct chunk *c, mf *s, mf *d, int nr_ahead, int buf_sz,\n\t\t       struct bwlimit *bw, size_t *counter)\n{\n\tif (s->local && d->remote) /* local to remote copy */\n\t\treturn copy_chunk_l2r(c, s->local, d->remote, nr_ahead, buf_sz, bw,\n\t\t\t\t      counter);\n\telse if (s->remote && d->local) /* remote to local copy */\n\t\treturn copy_chunk_r2l(c, s->remote, d->local, nr_ahead, buf_sz, bw,\n\t\t\t\t      counter);\n\n\tassert(false);\n\treturn -1; /* not reached */\n}\n\nint copy_chunk(struct chunk *c, sftp_session src_sftp, sftp_session dst_sftp,\n\t       int nr_ahead, int buf_sz, bool preserve_ts, struct bwlimit *bw,\n\t       size_t *counter)\n{\n\tmode_t mode;\n\tint flags;\n\tmf *s, *d;\n\tint ret;\n\n\tassert((src_sftp && !dst_sftp) || (!src_sftp && dst_sftp));\n\n\tif (prepare_dst_path(c->p, dst_sftp) < 0)\n\t\treturn -1;\n\n\t/* open src */\n\tflags = O_RDONLY;\n\tmode = S_IRUSR;\n\tif (!(s = mscp_open(c->p->path, flags, mode, src_sftp))) {\n\t\tpriv_set_errv(\"mscp_open: %s: %s\", c->p->path, strerrno());\n\t\treturn -1;\n\t}\n\tif (mscp_lseek(s, c->off) < 0) {\n\t\tpriv_set_errv(\"mscp_lseek: %s: %s\", c->p->path, strerrno());\n\t\treturn -1;\n\t}\n\n\t/* open dst */\n\tflags = O_WRONLY;\n\tmode = S_IRUSR | S_IWUSR;\n\tif (!(d = mscp_open(c->p->dst_path, flags, mode, dst_sftp))) {\n\t\tmscp_close(s);\n\t\tpriv_set_errv(\"mscp_open: %s: %s\", c->p->dst_path, strerrno());\n\t\treturn -1;\n\t}\n\tif (mscp_lseek(d, c->off) < 0) {\n\t\tpriv_set_errv(\"mscp_lseek: %s: %s\", c->p->dst_path, strerrno());\n\t\treturn -1;\n\t}\n\n\tc->state = CHUNK_STATE_COPING;\n\tpr_debug(\"copy chunk start: %s 0x%lx-0x%lx\", c->p->path, c->off, c->off + c->len);\n\n\tret = _copy_chunk(c, s, d, nr_ahead, buf_sz, bw, counter);\n\n\tpr_debug(\"copy chunk done: %s 0x%lx-0x%lx\", c->p->path, c->off, c->off + c->len);\n\n\tmscp_close(d);\n\tmscp_close(s);\n\tif (ret < 0)\n\t\treturn ret;\n\n\tif (refcnt_dec(&c->p->refcnt) == 0) {\n\t\tstruct stat st;\n\t\tc->p->state = FILE_STATE_DONE;\n\n\t\t/* sync stat */\n\t\tif (mscp_stat(c->p->path, &st, src_sftp) < 0) {\n\t\t\tpriv_set_errv(\"mscp_stat: %s: %s\", c->p->path, strerrno());\n\t\t\treturn -1;\n\t\t}\n\t\tif (mscp_setstat(c->p->dst_path, &st, preserve_ts, dst_sftp) < 0) {\n\t\t\tpriv_set_errv(\"mscp_setstat: %s: %s\", c->p->path, strerrno());\n\t\t\treturn -1;\n\t\t}\n\t\tpr_info(\"copy done: %s\", c->p->path);\n\t}\n\n\tif (ret == 0)\n\t\tc->state = CHUNK_STATE_DONE;\n\n\treturn ret;\n}\n"
  },
  {
    "path": "src/path.h",
    "content": "/* SPDX-License-Identifier: GPL-3.0-only */\n#ifndef _PATH_H_\n#define _PATH_H_\n\n#include <limits.h>\n#include <fcntl.h>\n#include <dirent.h>\n#include <sys/stat.h>\n#include <pool.h>\n#include <atomic.h>\n#include <ssh.h>\n#include <bwlimit.h>\n\nstruct path {\n\tchar *path; /* file path */\n\tchar *dst_path; /* copy dst path */\n\n\trefcnt refcnt; /* number of associated chunks */\n\tlock lock;\n\tint state;\n#define FILE_STATE_INIT 0\n#define FILE_STATE_OPENED 1\n#define FILE_STATE_DONE 2\n\n\tuint64_t data; /* used by other components, i.e., checkpoint */\n};\n\nstruct path *alloc_path(char *path, char *dst_path);\n\nstruct chunk {\n\tstruct path *p;\n\tsize_t off; /* offset of this chunk on the file on path p */\n\tsize_t len; /* length of this chunk */\n\tint state;\n#define CHUNK_STATE_INIT 0\n#define CHUNK_STATE_COPING 1\n#define CHUNK_STATE_DONE 2\n};\n\nstruct chunk *alloc_chunk(struct path *p, size_t off, size_t len);\n\nstruct path_resolve_args {\n\tsize_t *total_bytes;\n\n\t/* args to resolve src path to dst path */\n\tconst char *src_path;\n\tconst char *dst_path;\n\tbool src_path_is_dir;\n\tbool dst_path_is_dir;\n\tbool dst_path_should_dir;\n\n\t/* args to resolve chunks for a path */\n\tpool *path_pool;\n\tpool *chunk_pool;\n\tint nr_conn;\n\tsize_t min_chunk_sz;\n\tsize_t max_chunk_sz;\n\tsize_t chunk_align;\n};\n\n/* walk src_path recursivly and fill a->path_pool with found files */\nint walk_src_path(sftp_session src_sftp, const char *src_path,\n\t\t  struct path_resolve_args *a);\n\n/* free struct path */\nvoid free_path(struct path *p);\n\n/* copy a chunk. either src_sftp or dst_sftp is not null, and another is null */\nint copy_chunk(struct chunk *c, sftp_session src_sftp, sftp_session dst_sftp,\n\t       int nr_ahead, int buf_sz, bool preserve_ts, struct bwlimit *bw,\n\t       size_t *counter);\n\n#endif /* _PATH_H_ */\n"
  },
  {
    "path": "src/platform.c",
    "content": "/* SPDX-License-Identifier: GPL-3.0-only */\n#ifdef __APPLE__\n#include <stdlib.h>\n#include <sys/types.h>\n#include <sys/time.h>\n#include <sys/sysctl.h>\n#elif linux\n#define _GNU_SOURCE\n#include <stdlib.h>\n#include <unistd.h>\n#include <sys/types.h>\n#include <sys/stat.h>\n#include <fcntl.h>\n#include <sched.h>\n#elif __FreeBSD__\n#include <stdlib.h>\n#include <unistd.h>\n#include <sys/stat.h>\n#include <fcntl.h>\n#include <pthread_np.h>\n#else\n#error unsupported platform\n#endif\n\n#include <config.h>\n#include <platform.h>\n#include <strerrno.h>\n#include <print.h>\n\n#ifdef __APPLE__\nint nr_cpus()\n{\n\tint n;\n\tsize_t size = sizeof(n);\n\n\tif (sysctlbyname(\"machdep.cpu.core_count\", &n, &size, NULL, 0) != 0) {\n\t\tpriv_set_errv(\"failed to get number of cpu cores: %s\", strerrno());\n\t\treturn -1;\n\t}\n\n\treturn n;\n}\n\nint set_thread_affinity(pthread_t tid, int core)\n{\n\tpr_warn(\"setting thread afinity is not implemented on apple\");\n\treturn 0;\n}\n\nint setutimes(const char *path, struct timespec atime, struct timespec mtime)\n{\n\tstruct timeval tv[2] = {\n\t\t{\n\t\t\t.tv_sec = atime.tv_sec,\n\t\t\t.tv_usec = atime.tv_nsec * 1000,\n\t\t},\n\t\t{\n\t\t\t.tv_sec = mtime.tv_sec,\n\t\t\t.tv_usec = mtime.tv_nsec * 1000,\n\t\t},\n\t};\n\treturn utimes(path, tv);\n}\n\nstatic void random_string(char *buf, size_t size)\n{\n\tchar chars[] = \"abcdefhijklmnopqrstuvwxyz1234567890\";\n\tint n, x;\n\n\tfor (n = 0; n < size - 1; n++) {\n\t\tx = arc4random() % (sizeof(chars) - 1);\n\t\tbuf[n] = chars[x];\n\t}\n\tbuf[size - 1] = '\\0';\n}\n\nsem_t *sem_create(int value)\n{\n\tchar sem_name[30] = \"mscp-\";\n\tsem_t *sem;\n\tint n;\n\n\tn = strlen(sem_name);\n\trandom_string(sem_name + n, sizeof(sem_name) - n - 1);\n\tif ((sem = sem_open(sem_name, O_CREAT, 600, value)) == SEM_FAILED)\n\t\treturn NULL;\n\n\treturn sem;\n}\n\nint sem_release(sem_t *sem)\n{\n\treturn sem_close(sem);\n}\n\n#endif\n\n#ifdef linux\nint nr_cpus()\n{\n\tcpu_set_t cpu_set;\n\tif (sched_getaffinity(0, sizeof(cpu_set_t), &cpu_set) == 0)\n\t\treturn CPU_COUNT(&cpu_set);\n\treturn -1;\n}\n#endif\n\n#ifdef __FreeBSD__\nint nr_cpus()\n{\n\tlong nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);\n\treturn nr_cpus;\n}\n#endif\n\n#if defined(linux) || defined(__FreeBSD__)\n\nint set_thread_affinity(pthread_t tid, int core)\n{\n\tcpu_set_t target_cpu_set;\n\tint ret = 0;\n\n\tCPU_ZERO(&target_cpu_set);\n\tCPU_SET(core, &target_cpu_set);\n\tret = pthread_setaffinity_np(tid, sizeof(target_cpu_set), &target_cpu_set);\n\tif (ret < 0)\n\t\tpriv_set_errv(\"failed to set thread/cpu affinity for core %d: %s\", core,\n\t\t\t      strerrno());\n\treturn ret;\n}\n\nint setutimes(const char *path, struct timespec atime, struct timespec mtime)\n{\n\tstruct timespec ts[2] = { atime, mtime };\n\tint fd = open(path, O_WRONLY);\n\tint ret;\n\n\tif (fd < 0)\n\t\treturn -1;\n\tret = futimens(fd, ts);\n\tclose(fd);\n\treturn ret;\n}\n\nsem_t *sem_create(int value)\n{\n\tsem_t *sem;\n\n\tif ((sem = malloc(sizeof(*sem))) == NULL)\n\t\treturn NULL;\n\n\tif (sem_init(sem, 0, value) < 0) {\n\t\tfree(sem);\n\t\treturn NULL;\n\t}\n\n\treturn sem;\n}\n\nint sem_release(sem_t *sem)\n{\n\tfree(sem);\n\treturn 0;\n}\n\n#endif\n"
  },
  {
    "path": "src/platform.h",
    "content": "/* SPDX-License-Identifier: GPL-3.0-only */\n#ifndef _PLATFORM_H_\n#define _PLATFORM_H_\n\n#include <config.h>\n\n#include <pthread.h>\n#include <semaphore.h>\n#include <stdint.h>\n\nint nr_cpus(void);\nint set_thread_affinity(pthread_t tid, int core);\nint setutimes(const char *path, struct timespec atime, struct timespec mtime);\n\n/*\n * macOS does not support sem_init(). macOS (seems to) releases the\n * named semaphore when associated mscp process finished. In linux,\n * program (seems to) need to release named semaphore in /dev/shm by\n * sem_unlink() explicitly. So, using sem_init() (unnamed semaphore)\n * in linux and using sem_open() (named semaphore) in macOS without\n * sem_unlink() are reasonable (?).\n */\nsem_t *sem_create(int value);\nint sem_release(sem_t *sem);\n\n#ifdef HAVE_HTONLL\n#include <arpa/inet.h> /* Apple has htonll and ntohll in arpa/inet.h */\n#endif\n\n/* copied from libssh: libssh/include/libssh/priv.h */\n#ifndef HAVE_HTONLL\n#ifdef WORDS_BIGENDIAN\n#define htonll(x) (x)\n#else\n#define htonll(x) (((uint64_t)htonl((x)&0xFFFFFFFF) << 32) | htonl((x) >> 32))\n#endif\n#endif\n\n#ifndef HAVE_NTOHLL\n#ifdef WORDS_BIGENDIAN\n#define ntohll(x) (x)\n#else\n#define ntohll(x) (((uint64_t)ntohl((x)&0xFFFFFFFF) << 32) | ntohl((x) >> 32))\n#endif\n#endif\n\n#endif /* _PLATFORM_H_ */\n"
  },
  {
    "path": "src/pool.c",
    "content": "/* SPDX-License-Identifier: GPL-3.0-only */\n#include <string.h>\n#include <stdlib.h>\n#include <pool.h>\n\n#define DEFAULT_START_SIZE 16\n\npool *pool_new(void)\n{\n\tpool *p;\n\tp = malloc(sizeof(*p));\n\tif (!p)\n\t\treturn NULL;\n\tmemset(p, 0, sizeof(*p));\n\n\tp->array = calloc(DEFAULT_START_SIZE, sizeof(void *));\n\tif (!p->array) {\n\t\tfree(p);\n\t\treturn NULL;\n\t}\n\n\tp->len = DEFAULT_START_SIZE;\n\tp->num = 0;\n\tlock_init(&p->lock);\n\treturn p;\n}\n\nvoid pool_free(pool *p)\n{\n\tif (p->array) {\n\t\tfree(p->array);\n\t\tp->array = NULL;\n\t}\n\tfree(p);\n}\n\nvoid pool_zeroize(pool *p, pool_map_f f)\n{\n\tvoid *v;\n\tpool_iter_for_each(p, v) {\n\t\tf(v);\n\t}\n\tp->num = 0;\n}\n\nvoid pool_destroy(pool *p, pool_map_f f)\n{\n\tpool_zeroize(p, f);\n\tpool_free(p);\n}\n\nint pool_push(pool *p, void *v)\n{\n\tif (p->num == p->len) {\n\t\t/* expand array */\n\t\tsize_t newlen = p->len * 2;\n\t\tvoid *new = realloc(p->array, newlen * sizeof(void *));\n\t\tif (new == NULL)\n\t\t\treturn -1;\n\t\tp->len = newlen;\n\t\tp->array = new;\n\t}\n\tp->array[p->num] = v;\n\t__sync_synchronize();\n\tp->num++;\n\treturn 0;\n}\n\nint pool_push_lock(pool *p, void *v)\n{\n\tint ret = -1;\n\tpool_lock(p);\n\tret = pool_push(p, v);\n\tpool_unlock(p);\n\treturn ret;\n}\n\nvoid *pool_pop(pool *p)\n{\n\treturn p->num == 0 ? NULL : p->array[--p->num];\n}\n\nvoid *pool_pop_lock(pool *p)\n{\n\tvoid *v;\n\tpool_lock(p);\n\tv = pool_pop(p);\n\tpool_unlock(p);\n\treturn v;\n}\n\nvoid *pool_get(pool *p, unsigned int idx)\n{\n\treturn p->num <= idx ? NULL : p->array[idx];\n}\n\nvoid *pool_iter_next(pool *p)\n{\n\tif (p->num <= p->idx)\n\t\treturn NULL;\n\n\tvoid *v = p->array[p->idx];\n\tp->idx++;\n\treturn v;\n}\n\nvoid *pool_iter_next_lock(pool *p)\n{\n\tvoid *v = NULL;\n\tpool_lock(p);\n\tv = pool_iter_next(p);\n\tpool_unlock(p);\n\treturn v;\n}\n\nbool pool_iter_has_next_lock(pool *p)\n{\n\tbool next_exist;\n\tpool_lock(p);\n\tnext_exist = (p->idx < p->num);\n\tpool_unlock(p);\n\treturn next_exist;\n}\n"
  },
  {
    "path": "src/pool.h",
    "content": "/* SPDX-License-Identifier: GPL-3.0-only */\n#ifndef _POOL_H_\n#define _POOL_H_\n\n#include <stdbool.h>\n#include <stddef.h>\n\n#include <atomic.h>\n\n/* A pool like a stack with an iterator walking from the bottom to the\n * top. The memory foot print for a pool never shrinks. Thus this is\n * not suitable for long-term uses. */\n\nstruct pool_struct {\n\tvoid **array;\n\tsize_t len; /* length of array */\n\tsize_t num; /* number of items in the array */\n\tsize_t idx; /* index used dy iter */\n\tlock lock;\n};\n\ntypedef struct pool_struct pool;\n\n/* allocate a new pool */\npool *pool_new(void);\n\n/* func type applied to each item in a pool */\ntypedef void (*pool_map_f)(void *v);\n\n/* apply f, which free an item, to all items and set num to 0 */\nvoid pool_zeroize(pool *p, pool_map_f f);\n\n/* free pool->array and pool */\nvoid pool_free(pool *p);\n\n/* free pool->array and pool after applying f to all items in p->array */\nvoid pool_destroy(pool *p, pool_map_f f);\n\n#define pool_lock(p) LOCK_ACQUIRE(&(p->lock))\n#define pool_unlock(p) LOCK_RELEASE()\n\n/*\n * pool_push() pushes *v to pool *p. pool_push_lock() does this while\n * locking *p.\n */\nint pool_push(pool *p, void *v);\nint pool_push_lock(pool *p, void *v);\n\n/*\n * pool_pop() pops the last *v pushed to *p. pool_pop_lock() does this\n * while locking *p.\n */\nvoid *pool_pop(pool *p);\nvoid *pool_pop_lock(pool *p);\n\n/* pool_get() returns value indexed by idx */\nvoid *pool_get(pool *p, unsigned int idx);\n\n#define pool_size(p) ((p)->num)\n#define pool_is_empty(p) (pool_size(p) == 0)\n\n/*\n * pool->idx indicates next *v in an iteration. This has two\n * use-cases.\n *\n * (1) A simple list: just a single thread has a pool, and the thread\n * can call pool_iter_for_each() for the pool (not thread safe).\n *\n * (2) A thread-safe queue: one thread initializes the iterator for a\n * pool by pool_iter_init(). Then, multiple threads get a next *v\n * concurrently by pool_iter_next_lock(), which means dequeuing. At\n * this time, other thread can add new *v by pool_push_lock(), which\n * means enqueuing. During this, other threads must not intercept the\n * pool by pool_iter_* functions.\n */\n\n#define pool_iter_init(p) (p->idx = 0)\nvoid *pool_iter_next(pool *p);\nvoid *pool_iter_next_lock(pool *p);\n\n/* pool_iter_has_next_lock() returns true if pool_iter_next(_lock)\n * function will retrun a next value, otherwise false, which means\n * there is no more values in this iteration. */\nbool pool_iter_has_next_lock(pool *p);\n\n#define pool_iter_for_each(p, v) \\\n\tpool_iter_init(p);       \\\n\tfor (v = pool_iter_next(p); v != NULL; v = pool_iter_next(p))\n\n#define pool_for_each(p, v, idx) \\\n\tidx = 0;                 \\\n\tfor (v = pool_get(p, idx); v != NULL; v = pool_get(p, ++idx))\n\n#endif /* _POOL_H_ */\n"
  },
  {
    "path": "src/print.c",
    "content": "/* SPDX-License-Identifier: GPL-3.0-only */\n\n#include <print.h>\n\n/* message print functions */\nstatic int __print_severity = MSCP_SEVERITY_WARN;\n\nvoid set_print_severity(int serverity)\n{\n\tif (serverity < 0)\n\t\t__print_severity = -1; /* no print */\n        else\n                __print_severity = serverity;\n}\n\nint get_print_severity()\n{\n\treturn __print_severity;\n}\n"
  },
  {
    "path": "src/print.h",
    "content": "/* SPDX-License-Identifier: GPL-3.0-only */\n#ifndef _PRINT_H_\n#define _PRINT_H_\n\n#include <libgen.h>\n#include <stdio.h>\n#include <errno.h>\n#include <string.h>\n\n#include <mscp.h>\n\n/* message print. printed messages are passed to application via msg_fd */\nvoid set_print_severity(int severity);\nint get_print_severity();\n\n#define __print(fp, severity, fmt, ...)                                  \\\n\tdo {                                                             \\\n\t\tif (severity <= get_print_severity()) {                  \\\n\t\t\tfprintf(fp, \"\\r\\033[K\" fmt \"\\n\", ##__VA_ARGS__); \\\n\t\t\tfflush(fp);                                      \\\n\t\t}                                                        \\\n\t} while (0)\n\n#define pr_err(fmt, ...) __print(stderr, MSCP_SEVERITY_ERR, fmt, ##__VA_ARGS__)\n#define pr_warn(fmt, ...) __print(stderr, MSCP_SEVERITY_WARN, fmt, ##__VA_ARGS__)\n#define pr_notice(fmt, ...) __print(stdout, MSCP_SEVERITY_NOTICE, fmt, ##__VA_ARGS__)\n#define pr_info(fmt, ...) __print(stdout, MSCP_SEVERITY_INFO, fmt, ##__VA_ARGS__)\n#define pr_debug(fmt, ...) __print(stdout, MSCP_SEVERITY_DEBUG, fmt, ##__VA_ARGS__)\n\n#endif /* _PRINT_H_ */\n"
  },
  {
    "path": "src/ssh.c",
    "content": "/* SPDX-License-Identifier: GPL-3.0-only */\n#include <stdio.h>\n#include <string.h>\n#include <unistd.h>\n#include <stdlib.h>\n\n#include <ssh.h>\n#include <mscp.h>\n#include <strerrno.h>\n\n#include \"libssh/callbacks.h\"\n#include \"libssh/options.h\"\n\nstatic int ssh_verify_known_hosts(ssh_session session);\nstatic int ssh_authenticate_kbdint(ssh_session session);\n\nstatic int ssh_set_opts(ssh_session ssh, struct mscp_ssh_opts *opts)\n{\n\tssh_set_log_level(opts->debug_level);\n\n\tif (opts->login_name &&\n\t    ssh_options_set(ssh, SSH_OPTIONS_USER, opts->login_name) < 0) {\n\t\tpriv_set_errv(\"failed to set login name\");\n\t\treturn -1;\n\t}\n\n\tif (opts->port && ssh_options_set(ssh, SSH_OPTIONS_PORT_STR, opts->port) < 0) {\n\t\tpriv_set_errv(\"failed to set port number\");\n\t\treturn -1;\n\t}\n\n\tif (opts->ai_family &&\n\t    ssh_options_set(ssh, SSH_OPTIONS_AI_FAMILY, &opts->ai_family) < 0) {\n\t\tpriv_set_errv(\"failed to set address family\");\n\t\treturn -1;\n\t}\n\n\tif (opts->identity &&\n\t    ssh_options_set(ssh, SSH_OPTIONS_IDENTITY, opts->identity) < 0) {\n\t\tpriv_set_errv(\"failed to set identity\");\n\t\treturn -1;\n\t}\n\n\tif (opts->cipher) {\n\t\tif (ssh_options_set(ssh, SSH_OPTIONS_CIPHERS_C_S, opts->cipher) < 0) {\n\t\t\tpriv_set_errv(\"failed to set cipher for client to server\");\n\t\t\treturn -1;\n\t\t}\n\t\tif (ssh_options_set(ssh, SSH_OPTIONS_CIPHERS_S_C, opts->cipher) < 0) {\n\t\t\tpriv_set_errv(\"failed to set cipher for server to client\");\n\t\t\treturn -1;\n\t\t}\n\t}\n\n\tif (opts->hmac) {\n\t\tif (ssh_options_set(ssh, SSH_OPTIONS_HMAC_C_S, opts->hmac) < 0) {\n\t\t\tpriv_set_errv(\"failed to set hmac for client to server\");\n\t\t\treturn -1;\n\t\t}\n\t\tif (ssh_options_set(ssh, SSH_OPTIONS_HMAC_S_C, opts->hmac) < 0) {\n\t\t\tpriv_set_errv(\"failed to set hmac for server to client\");\n\t\t\treturn -1;\n\t\t}\n\t}\n\n\tif (opts->compress &&\n\t    ssh_options_set(ssh, SSH_OPTIONS_COMPRESSION, opts->compress) < 0) {\n\t\tpriv_set_errv(\"failed to enable ssh compression\");\n\t\treturn -1;\n\t}\n\n\tif (opts->ccalgo && ssh_options_set(ssh, SSH_OPTIONS_CCALGO, opts->ccalgo) < 0) {\n\t\tpriv_set_errv(\"failed to set cclago\");\n\t\treturn -1;\n\t}\n\n\t/* if NOT specified to enable Nagle's algorithm, disable it (set TCP_NODELAY) */\n\tif (!opts->enable_nagle) {\n\t\tint v = 1;\n\t\tif (ssh_options_set(ssh, SSH_OPTIONS_NODELAY, &v) < 0) {\n\t\t\tpriv_set_errv(\"failed to set TCP_NODELAY\");\n\t\t\treturn -1;\n\t\t}\n\t}\n\n\tif (opts->config && ssh_options_parse_config(ssh, opts->config) < 0) {\n\t\tpriv_set_errv(\"failed to parse ssh_config: %s\", opts->config);\n\t\treturn -1;\n\t}\n\n\tif (opts->proxyjump) {\n\t\tchar buf[256];\n\t\tmemset(buf, 0, sizeof(buf));\n\t\tsnprintf(buf, sizeof(buf), \"proxyjump=%s\", opts->proxyjump);\n\t\tif (ssh_config_parse_string(ssh, buf) != SSH_OK) {\n\t\t\tpriv_set_errv(\"failed to set ssh option: %s\", buf);\n\t\t\treturn -1;\n\t\t}\n\t}\n\n\tif (opts->options) {\n\t\tint n;\n\t\tfor (n = 0; opts->options[n]; n++) {\n\t\t\tif (ssh_config_parse_string(ssh, opts->options[n]) != SSH_OK) {\n\t\t\t\tpriv_set_errv(\"failed to set ssh option: %s\",\n\t\t\t\t\t      opts->options[n]);\n\t\t\t\treturn -1;\n\t\t\t}\n\t\t}\n\t}\n\n\treturn 0;\n}\n\nstatic int ssh_authenticate(ssh_session ssh, struct mscp_ssh_opts *opts)\n{\n\tstatic int auth_bit_mask;\n\tint ret;\n\n        if (auth_bit_mask == 0) {\n                /* the first authentication attempt. try none auth to\n                 * get available auth methods. */\n                if (ssh_userauth_none(ssh, NULL) == SSH_AUTH_SUCCESS)\n                        return 0;\n\n                /* save auth_bit_mask for further authentications */\n                auth_bit_mask = ssh_userauth_list(ssh, NULL);\n        }\n\n\tif (auth_bit_mask & SSH_AUTH_METHOD_PUBLICKEY) {\n                char *p = opts->passphrase ? opts->passphrase : NULL;\n                if (ssh_userauth_publickey_auto(ssh, NULL, p) == SSH_AUTH_SUCCESS)\n                        return 0;\n        }\n\n\tif (auth_bit_mask & SSH_AUTH_METHOD_PASSWORD) {\n\t\tif (!opts->password) {\n\t\t\tchar buf[128] = {};\n\t\t\tif (ssh_getpass(\"Password: \", buf, sizeof(buf), 0, 0) < 0) {\n\t\t\t\tpriv_set_errv(\"ssh_getpass failed\");\n\t\t\t\treturn -1;\n\t\t\t}\n\t\t\tif (!(opts->password = strndup(buf, sizeof(buf)))) {\n\t\t\t\tpriv_set_errv(\"strndup: %s\", strerrno());\n\t\t\t\treturn -1;\n\t\t\t}\n\t\t}\n\n\t\tif (ssh_userauth_password(ssh, NULL, opts->password) == SSH_AUTH_SUCCESS)\n\t\t\treturn 0;\n\t}\n\n\tauth_bit_mask = ssh_userauth_list(ssh, NULL);\n\tif (auth_bit_mask & SSH_AUTH_METHOD_INTERACTIVE) {\n\t\tif (ssh_authenticate_kbdint(ssh) == SSH_AUTH_SUCCESS)\n\t\t\treturn 0;\n\t}\n\n\treturn -1;\n}\n\nstatic int ssh_cache_passphrase(const char *prompt, char *buf, size_t len, int echo,\n\t\t\t\tint verify, void *userdata)\n{\n\tstruct mscp_ssh_opts *opts = userdata;\n\n\t/* This function is called on the first time for importing\n\t * priv key file with passphrase. It is not called on the\n\t * second time or after because cached passphrase is passed\n\t * to ssh_userauth_publickey_auto(). */\n\n\t/* ToDo: use\n\t * ssh_userauth_publickey_auto_get_current_identity() to print\n\t * id for which we ask passphrase */\n\n\tif (ssh_getpass(\"Passphrase: \", buf, len, echo, verify) < 0)\n\t\treturn -1;\n\n\t/* cache the passphrase */\n\tif (opts->passphrase)\n\t\tfree(opts->passphrase);\n\n\tif (!(opts->passphrase = strndup(buf, len))) {\n\t\tpriv_set_errv(\"strndup: %s\", strerrno());\n\t\treturn -1;\n\t}\n\n\treturn 0;\n}\n\nstatic struct ssh_callbacks_struct cb = {\n\t.auth_function = ssh_cache_passphrase,\n\t.userdata = NULL,\n};\n\nstatic ssh_session ssh_init_session(const char *sshdst, struct mscp_ssh_opts *opts)\n{\n\tssh_session ssh = ssh_new();\n\n\tssh_callbacks_init(&cb);\n\tcb.userdata = opts;\n\tssh_set_callbacks(ssh, &cb);\n\n\tif (ssh_options_set(ssh, SSH_OPTIONS_HOST, sshdst) != SSH_OK) {\n\t\tpriv_set_errv(\"failed to set destination host\");\n\t\tgoto free_out;\n\t}\n\n\tif (ssh_set_opts(ssh, opts) != 0)\n\t\tgoto free_out;\n\n\tif (ssh_connect(ssh) != SSH_OK) {\n\t\tpriv_set_errv(\"failed to connect ssh server: %s\", ssh_get_error(ssh));\n\t\tgoto free_out;\n\t}\n\n\tif (ssh_authenticate(ssh, opts) != 0) {\n\t\tpriv_set_errv(\"authentication failed: %s\", ssh_get_error(ssh));\n\t\tgoto disconnect_out;\n\t}\n\n\tif (ssh_verify_known_hosts(ssh) != 0) {\n\t\tpriv_set_errv(\"ssh_veriy_known_hosts failed\");\n\t\tgoto disconnect_out;\n\t}\n\n\treturn ssh;\n\ndisconnect_out:\n\tssh_disconnect(ssh);\nfree_out:\n\tssh_free(ssh);\n\treturn NULL;\n}\n\nsftp_session ssh_init_sftp_session(const char *sshdst, struct mscp_ssh_opts *opts)\n{\n\tsftp_session sftp;\n\tssh_session ssh = ssh_init_session(sshdst, opts);\n\n\tif (!ssh)\n\t\treturn NULL;\n\n\tsftp = sftp_new(ssh);\n\tif (!sftp) {\n\t\tpriv_set_errv(\"failed to allocate sftp session: %s\", ssh_get_error(ssh));\n\t\tgoto err_out;\n\t}\n\n\tif (sftp_init(sftp) != SSH_OK) {\n\t\tpriv_set_errv(\"failed to initialize sftp session: err code %d\",\n\t\t\t      sftp_get_error(sftp));\n\t\tgoto err_out;\n\t}\n\n\treturn sftp;\nerr_out:\n\tssh_disconnect(ssh);\n\tssh_free(ssh);\n\treturn NULL;\n}\n\n/* copied from https://api.libssh.org/stable/libssh_tutor_guided_tour.html*/\nstatic int ssh_verify_known_hosts(ssh_session session)\n{\n\tenum ssh_known_hosts_e state;\n\tunsigned char *hash = NULL;\n\tssh_key srv_pubkey = NULL;\n\tsize_t hlen;\n\tchar buf[10];\n\tchar *hexa;\n\tchar *p;\n\tint cmp;\n\tint rc;\n\n\trc = ssh_get_server_publickey(session, &srv_pubkey);\n\tif (rc < 0) {\n\t\treturn -1;\n\t}\n\n\trc = ssh_get_publickey_hash(srv_pubkey, SSH_PUBLICKEY_HASH_SHA1, &hash, &hlen);\n\tssh_key_free(srv_pubkey);\n\tif (rc < 0) {\n\t\treturn -1;\n\t}\n\n\tstate = ssh_session_is_known_server(session);\n\tswitch (state) {\n\tcase SSH_KNOWN_HOSTS_OK:\n\t\t/* OK */\n\n\t\tbreak;\n\tcase SSH_KNOWN_HOSTS_CHANGED:\n\t\tfprintf(stderr, \"Host key for server changed: it is now:\\n\");\n\t\t//ssh_print_hexa(\"Public key hash\", hash, hlen);\n\t\tfprintf(stderr, \"For security reasons, connection will be stopped\\n\");\n\t\tssh_clean_pubkey_hash(&hash);\n\n\t\treturn -1;\n\tcase SSH_KNOWN_HOSTS_OTHER:\n\t\tfprintf(stderr, \"The host key for this server was not found but an other\"\n\t\t\t\t\"type of key exists.\\n\");\n\t\tfprintf(stderr,\n\t\t\t\"An attacker might change the default server key to\"\n\t\t\t\"confuse your client into thinking the key does not exist\\n\");\n\t\tssh_clean_pubkey_hash(&hash);\n\n\t\treturn -1;\n\tcase SSH_KNOWN_HOSTS_NOT_FOUND:\n\t\tfprintf(stderr, \"Could not find known host file.\\n\");\n\t\tfprintf(stderr, \"If you accept the host key here, the file will be\"\n\t\t\t\t\"automatically created.\\n\");\n\n\t\t/* FALL THROUGH to SSH_SERVER_NOT_KNOWN behavior */\n\n\tcase SSH_KNOWN_HOSTS_UNKNOWN:\n\t\thexa = ssh_get_hexa(hash, hlen);\n\t\tfprintf(stderr, \"The server is unknown. Do you trust the host key?\\n\");\n\t\tfprintf(stderr, \"Public key hash: %s\\n\", hexa);\n\t\tfprintf(stderr, \"(yes/no): \");\n\t\tssh_string_free_char(hexa);\n\t\tssh_clean_pubkey_hash(&hash);\n\t\tp = fgets(buf, sizeof(buf), stdin);\n\t\tif (p == NULL) {\n\t\t\treturn -1;\n\t\t}\n\n\t\tcmp = strncasecmp(buf, \"yes\", 3);\n\t\tif (cmp != 0) {\n\t\t\treturn -1;\n\t\t}\n\n\t\trc = ssh_session_update_known_hosts(session);\n\t\tif (rc < 0) {\n\t\t\tpriv_set_errv(\"%s\", ssh_get_error(session));\n\t\t\treturn -1;\n\t\t}\n\n\t\tbreak;\n\tcase SSH_KNOWN_HOSTS_ERROR:\n\t\tfprintf(stderr, \"known hosts error: %s\", ssh_get_error(session));\n\t\tssh_clean_pubkey_hash(&hash);\n\t\treturn -1;\n\t}\n\n\tssh_clean_pubkey_hash(&hash);\n\treturn 0;\n}\n\nstatic int ssh_authenticate_kbdint(ssh_session ssh)\n{\n\t/* Copied and bit modified from\n\t * https://api.libssh.org/stable/libssh_tutor_authentication.html */\n\tint rc;\n\n\trc = ssh_userauth_kbdint(ssh, NULL, NULL);\n\twhile (rc == SSH_AUTH_INFO) {\n\t\tconst char *name, *instruction;\n\t\tint nprompts, iprompt;\n\n\t\tname = ssh_userauth_kbdint_getname(ssh);\n\t\tinstruction = ssh_userauth_kbdint_getinstruction(ssh);\n\t\tnprompts = ssh_userauth_kbdint_getnprompts(ssh);\n\n\t\tif (strlen(name) > 0)\n\t\t\tprintf(\"%s\\n\", name);\n\t\tif (strlen(instruction) > 0)\n\t\t\tprintf(\"%s\\n\", instruction);\n\t\tfor (iprompt = 0; iprompt < nprompts; iprompt++) {\n\t\t\tconst char *prompt;\n\t\t\tchar echo;\n\n\t\t\tprompt = ssh_userauth_kbdint_getprompt(ssh, iprompt, &echo);\n\t\t\tif (echo) {\n\t\t\t\tchar buf[128], *ptr;\n\n\t\t\t\tprintf(\"%s\", prompt);\n\t\t\t\tif (fgets(buf, sizeof(buf), stdin) == NULL)\n\t\t\t\t\treturn SSH_AUTH_ERROR;\n\t\t\t\tbuf[sizeof(buf) - 1] = '\\0';\n\t\t\t\tif ((ptr = strchr(buf, '\\n')) != NULL)\n\t\t\t\t\t*ptr = '\\0';\n\t\t\t\tif (ssh_userauth_kbdint_setanswer(ssh, iprompt, buf) < 0)\n\t\t\t\t\treturn SSH_AUTH_ERROR;\n\t\t\t\tmemset(buf, 0, strlen(buf));\n\t\t\t} else {\n\t\t\t\tchar *ptr;\n\t\t\t\tptr = getpass(prompt);\n\t\t\t\tif (ssh_userauth_kbdint_setanswer(ssh, iprompt, ptr) < 0)\n\t\t\t\t\treturn SSH_AUTH_ERROR;\n\t\t\t}\n\t\t}\n\t\trc = ssh_userauth_kbdint(ssh, NULL, NULL);\n\t}\n\treturn rc;\n}\n\nvoid ssh_sftp_close(sftp_session sftp)\n{\n\tssh_session ssh = sftp_ssh(sftp);\n\t/* XXX: sftp_free is stuck in ssh_poll_ctx_dopoll() when build type is Release.\n\t * skip sftp_free inappropriately...\n\t */\n\t//sftp_free(sftp);\n\tssh_disconnect(ssh);\n\tssh_free(ssh);\n}\n\nconst char **mscp_ssh_ciphers(void)\n{\n\treturn ssh_ciphers();\n}\n\nconst char **mscp_ssh_hmacs(void)\n{\n\treturn ssh_hmacs();\n}\n"
  },
  {
    "path": "src/ssh.h",
    "content": "/* SPDX-License-Identifier: GPL-3.0-only */\n#ifndef _SSH_H_\n#define _SSH_H_\n\n#include <stdbool.h>\n#include \"libssh/libssh.h\"\n#include \"libssh/sftp.h\"\n\n#include <mscp.h>\n\n/* ssh_init_sftp_session() creates sftp_session. sshdst accpets\n * user@hostname and hostname notations (by libssh).\n */\nsftp_session ssh_init_sftp_session(const char *sshdst, struct mscp_ssh_opts *opts);\nvoid ssh_sftp_close(sftp_session sftp);\n\n#define sftp_ssh(sftp) (sftp)->session\n#define sftp_get_ssh_error(sftp) ssh_get_error(sftp_ssh(sftp))\n\n#endif /* _SSH_H_ */\n"
  },
  {
    "path": "src/strerrno.c",
    "content": "/* SPDX-License-Identifier: GPL-3.0-only */\n\n#include <errno.h>\n#include <stdio.h>\n#include <string.h>\n#include <stdarg.h>\n\n#include <strerrno.h>\n\n#define STRERRNO_TLS_BUFSIZ 128\n__thread char tls_strerrno_buf[STRERRNO_TLS_BUFSIZ];\n\nconst char *strerrno(void)\n{\n\tsnprintf(tls_strerrno_buf, sizeof(tls_strerrno_buf), \"%s\", \"strerror_r error\");\n\tstrerror_r(errno, tls_strerrno_buf, sizeof(tls_strerrno_buf));\n\treturn tls_strerrno_buf;\n}\n\n#define PRIV_ERR_BUFSIZ (1 << 12)\n__thread char priv_err_buf[PRIV_ERR_BUFSIZ], internal[PRIV_ERR_BUFSIZ];\n\nvoid priv_set_err(const char *fmt, ...)\n{\n\tva_list va;\n\tmemset(internal, 0, sizeof(internal));\n\tva_start(va, fmt);\n\tvsnprintf(internal, sizeof(internal), fmt, va);\n\tva_end(va);\n\tsnprintf(priv_err_buf, sizeof(priv_err_buf), \"%s\", internal);\n}\n\nconst char *priv_get_err()\n{\n\treturn priv_err_buf;\n}\n"
  },
  {
    "path": "src/strerrno.h",
    "content": "/* SPDX-License-Identifier: GPL-3.0-only */\n#ifndef _STRERRNO_\n#define _STRERRNO_\n\n#include <libgen.h> /* basename() */\n\n/**\n * strerrno() returns error message string corresponding to errno.\n * strerrno() is thread safe.\n */\nconst char *strerrno(void);\n\n/**\n * priv_set_err() sets an error message into a thread-local private\n * buffer. This error message can be accessed via priv_get_err().\n *\n * The top-level function in a thread should print errors using\n * priv_get_err(), while lower-level functions should set error\n * messages using priv_set_err().\n */\nvoid priv_set_err(const char *fmt, ...);\n\n/**\n * priv_set_errv(), a wrapper for priv_set_err(), just adds filename,\n * line, and function name to the error message.\n */\n#define priv_set_errv(fmt, ...)                                                      \\\n\tpriv_set_err(\"[%s:%d:%s] \" fmt \"\\0\", basename(__FILE__), __LINE__, __func__, \\\n\t\t     ##__VA_ARGS__)\n\n/**\n * priv_get_err() gets the error message sotred in the thread-local private buffer.\n */\nconst char *priv_get_err();\n\n#endif /* _STRERRNO_ */\n"
  },
  {
    "path": "test/.gitignore",
    "content": "__pycache__\n"
  },
  {
    "path": "test/README.md",
    "content": "\nThis test assumes that the user executing the test can ssh to the\nlocalhost without password.\n\n- Run pytest through ctest.\n\n```console\npython3 -m pip install pytest numpy\n\ncd build\ncmake ..\nctest --verbose # or `make test ARGS='-V'`\n```\n"
  },
  {
    "path": "test/conftest.py",
    "content": "import pytest\n\ndef pytest_addoption(parser):\n    parser.addoption(\"--mscp-path\", default = \"mscp\",\n                     help = \"path to mscp binary\")\n\n@pytest.fixture\ndef mscp(request):\n    return request.config.getoption(\"--mscp-path\")\n"
  },
  {
    "path": "test/test_e2e.py",
    "content": "\n\"\"\"\ntest_e2e.py: End-to-End test for mscp executable.\n\"\"\"\n\nimport platform\nimport pytest\nimport getpass\nimport datetime\nimport time\nimport os\nimport re\nimport shutil\n\nfrom subprocess import check_call, CalledProcessError\nfrom util import File, check_same_md5sum\n\n\ndef run2ok(args, env = None, quiet = False):\n    cmd = list(map(str, args))\n    if not quiet:\n        print(\"cmd: {}\".format(\" \".join(cmd)))\n    check_call(cmd, env = env)\n\ndef run2ng(args, env = None, timeout = None, quiet = False):\n    if timeout:\n        args = [\"timeout\", \"-s\", \"INT\", timeout] + args\n    cmd = list(map(str, args))\n    if not quiet:\n        print(\"cmd: {}\".format(\" \".join(cmd)))\n    with pytest.raises(CalledProcessError):\n        check_call(cmd, env = env)\n\n\n@pytest.fixture(autouse=True)\ndef cleanup_files():\n    \"\"\"\n    Cleanup files having the following `prefixes` or matching `paths`.\n    \"\"\"\n\n    yield\n\n    prefixes = [\n        \"src\", \"dst\",\n        \"non_existent_dstdir\",\n    ]\n    paths = [\n        \"/mscp-test-src\", \"/tmp/mscp-test-src\",\n        \"{}/src\".format(os.environ[\"HOME\"]),\n        \"{}/dst\".format(os.environ[\"HOME\"]),\n        \"/tmp/mscp_test_ssh_config\",\n        \"/home/test/dst\",\n        \"/home/test/src\",\n        \"checkpoint\",\n    ]\n\n    def remove(path):\n        print(f\"cleanup remove: {fname}\")\n        if os.path.isdir(path):\n            shutil.rmtree(path)\n        else:\n            os.remove(path)\n\n    for fname in os.listdir(os.getcwd()):\n        for prefix in prefixes:\n            if fname.startswith(prefix):\n                remove(fname)\n                break\n\n    for path in paths:\n        if os.path.exists(path):\n            remove(path)\n\n\n\"\"\" usage test \"\"\"\n\ndef test_usage(mscp):\n    run2ng([mscp])\n    run2ok([mscp, \"-h\"])\n\ndef test_invalid_chunk_size_config(mscp):\n    run2ng([mscp, \"-s\", 8 << 20, \"-S\", 4 << 20])\n\nparam_invalid_hostnames = [\n    ([\"a:a\", \"b:b\", \"c:c\"]), ([\"a:a\", \"b:b\", \"c\"]), ([\"a:a\", \"b\", \"c:c\"]),\n    ([\"a\", \"b:b\", \"c:c\"])\n]\n\n@pytest.mark.parametrize(\"args\", param_invalid_hostnames)\ndef test_nonidentical_hostnames(mscp, args):\n    run2ng([mscp] + args)\n\n\n\n\n\"\"\" copy test \"\"\"\n\nremote_prefix = \"localhost:{}/\".format(os.getcwd()) # use current dir\nparam_remote_prefix = [\n    (\"\", remote_prefix), (remote_prefix, \"\")\n]\n\nparam_single_copy = [\n    (File(\"src\", size = 64), File(\"dst\")),\n    (File(\"src\", size = 4096 * 1), File(\"dst\")),\n    (File(\"src\", size = 128 * 1024 * 1024), File(\"dst\")),\n]\n\n@pytest.mark.parametrize(\"src_prefix, dst_prefix\", param_remote_prefix)\n@pytest.mark.parametrize(\"src, dst\", param_single_copy)\ndef test_single_copy(mscp, src_prefix, dst_prefix, src, dst):\n    src.make()\n    run2ok([mscp, \"-vvv\", src_prefix + src.path, dst_prefix + dst.path])\n    assert check_same_md5sum(src, dst)\n\n\n@pytest.mark.parametrize(\"src_prefix, dst_prefix\", param_remote_prefix)\ndef test_failed_to_copy_nonexistent_file(mscp, src_prefix, dst_prefix):\n    src = \"nonexistent_src\"\n    dst = \"nonexistent_dst\"\n    run2ng([mscp, \"-vvv\", src_prefix + src, dst_prefix + dst])\n\nparam_double_copy = [\n    (File(\"src1\", size = 1024 * 1024), File(\"src2\", size = 1024 * 1024),\n     File(\"dst/src1\"), File(\"dst/src2\")\n     )\n]\n@pytest.mark.parametrize(\"src_prefix, dst_prefix\", param_remote_prefix)\n@pytest.mark.parametrize(\"s1, s2, d1, d2\", param_double_copy)\ndef test_double_copy(mscp, src_prefix, dst_prefix, s1, s2, d1, d2):\n    s1.make()\n    s2.make()\n    run2ok([mscp, \"-vvv\", src_prefix + s1.path, src_prefix + s2.path, dst_prefix + \"dst\"])\n    assert check_same_md5sum(s1, d1)\n    assert check_same_md5sum(s2, d2)\n\n\nremote_v6_prefix = \"[::1]:{}/\".format(os.getcwd())\nparam_remote_v6_prefix = [\n    (\"\", remote_v6_prefix), (remote_v6_prefix, \"\")\n]\n@pytest.mark.parametrize(\"src_prefix, dst_prefix\", param_remote_v6_prefix)\n@pytest.mark.parametrize(\"s1, s2, d1, d2\", param_double_copy)\ndef test_double_copy_with_ipv6_notation(mscp, src_prefix, dst_prefix, s1, s2, d1, d2):\n    s1.make()\n    s2.make()\n    run2ok([mscp, \"-vvv\",\n            src_prefix + s1.path, src_prefix + s2.path, dst_prefix + \"dst\"])\n    assert check_same_md5sum(s1, d1)\n    assert check_same_md5sum(s2, d2)\n\n\nremote_user_v6_prefix = \"{}@[::1]:{}/\".format(getpass.getuser(), os.getcwd())\nparam_remote_user_v6_prefix = [\n    (\"\", remote_user_v6_prefix), (remote_user_v6_prefix, \"\")\n]\n@pytest.mark.parametrize(\"src_prefix, dst_prefix\", param_remote_user_v6_prefix)\n@pytest.mark.parametrize(\"s1, s2, d1, d2\", param_double_copy)\ndef test_double_copy_with_user_and_ipv6_notation(mscp, src_prefix, dst_prefix,\n                                                 s1, s2, d1, d2):\n    s1.make()\n    s2.make()\n    run2ok([mscp, \"-vvv\",\n            src_prefix + s1.path, src_prefix + s2.path, dst_prefix + \"dst\"])\n    assert check_same_md5sum(s1, d1)\n    assert check_same_md5sum(s2, d2)\n\n\nparam_dir_copy = [\n    ( \"src_dir\", \"dst_dir\",\n        [ File(\"src_dir/t1\", size = 64),\n          File(\"src_dir/t2\", size = 4096),\n          File(\"src_dir/d1/t3\", size = 64),\n          File(\"src_dir/d1/d2/t4\", size = 128), ],\n        [ File(\"dst_dir/t1\"),\n          File(\"dst_dir/t2\"),\n          File(\"dst_dir/d1/t3\"),\n          File(\"dst_dir/d1/d2/t4\"), ],\n        [ File(\"dst_dir/src_dir/t1\"),\n          File(\"dst_dir/src_dir/t2\"),\n          File(\"dst_dir/src_dir/d1/t3\"),\n          File(\"dst_dir/src_dir/d1/d2/t4\"), ],\n    )\n]\n\n\"\"\"\n`scp remote:src_dir dst_dir` renames src_dir to dst_dir if dst_dir\ndoes not exist. If dst_dir exists, scp copies src_dir to\ndst_dir/src_dir. So, this test checks both cases.\n\"\"\"\n\n@pytest.mark.parametrize(\"src_prefix, dst_prefix\", param_remote_prefix)\n@pytest.mark.parametrize(\"src_dir, dst_dir, src, dst, twice\", param_dir_copy)\ndef test_dir_copy(mscp, src_prefix, dst_prefix, src_dir, dst_dir, src, dst, twice):\n    for f in src:\n        f.make()\n\n    run2ok([mscp, \"-vvv\", src_prefix + src_dir, dst_prefix + dst_dir])\n    for sf, df in zip(src, dst):\n        assert check_same_md5sum(sf, df)\n\n    run2ok([mscp, \"-vvv\", src_prefix + src_dir, dst_prefix + dst_dir])\n    for sf, df in zip(src, twice):\n        assert check_same_md5sum(sf, df)\n\n\n\nparam_dir_copy_single = [\n    (\"src_dir\", \"dst_dir\",\n     File(\"src_dir/t1\", size = 1024 * 1024),\n     File(\"dst_dir/src_dir/t1\"),\n     )\n]\n@pytest.mark.parametrize(\"src_prefix, dst_prefix\", param_remote_prefix)\n@pytest.mark.parametrize(\"src_dir, dst_dir, src, dst\", param_dir_copy_single)\ndef test_dir_copy_single(mscp, src_prefix, dst_prefix, src_dir, dst_dir, src, dst):\n    src.make()\n    os.mkdir(dst_dir)\n    run2ok([mscp, \"-vvv\", src_prefix + src_dir, dst_prefix + dst_dir])\n    assert check_same_md5sum(src, dst)\n\n\n@pytest.mark.parametrize(\"src_prefix, dst_prefix\", param_remote_prefix)\ndef test_override_single_file(mscp, src_prefix, dst_prefix):\n    src = File(\"src\", size = 128).make()\n    dst = File(\"dst\", size = 128).make()\n    assert not check_same_md5sum(src, dst)\n\n    run2ok([mscp, \"-vvv\", src_prefix + src.path, dst_prefix + dst.path])\n    assert check_same_md5sum(src, dst)\n\n\nabsolute_remote_prefix = \"localhost:\"\nparam_absolute_remote_prefix = [\n    (\"\", absolute_remote_prefix), (absolute_remote_prefix, \"\")\n]\n@pytest.mark.parametrize(\"src_prefix, dst_prefix\", param_absolute_remote_prefix)\ndef test_copy_file_under_root_to_dir(mscp, src_prefix, dst_prefix):\n    src = File(\"/mscp-test-src\", size = 1024).make()\n    dst = File(\"/tmp/mscp-test-src\")\n\n    run2ok([mscp, \"-vvv\", src_prefix + src.path,\n            dst_prefix + os.path.dirname(dst.path)])\n    assert check_same_md5sum(src, dst)\n\n\n@pytest.mark.parametrize(\"src_prefix, dst_prefix\", param_remote_prefix)\ndef test_dst_has_suffix_slash(mscp, src_prefix, dst_prefix):\n    \"\"\"\n    if dst path has suffix '/' like \"dir/\" and does not exist,\n    mscp should create dir/ and put dir/src-file-name.\n    \"\"\"\n    dstdir = \"non_existent_dstdir/\"\n\n    src = File(\"src\", size = 1024 * 1024).make()\n    dst = File(f\"{dstdir}/src\")\n\n    run2ok([mscp, \"-vvv\", src_prefix + src.path,\n            dst_prefix + dstdir])\n\n    assert check_same_md5sum(src, dst)\n\n\nparam_tilde_paths = [\n    (\"src\", \"localhost:~/dst\"),\n    (\"localhost:~/src\", \"dst\"),\n]\n@pytest.mark.parametrize(\"src_path, dst_path\", param_tilde_paths)\ndef test_remote_path_contains_tilde(mscp, src_path, dst_path):\n    \"\"\"\n    if remote path contains '~' as prefix, it should be expanded as '.'.\n    Note that `~user` notation is not supported yet.\n    \"\"\"\n    def extract_and_expand(path):\n        path = path if not ':' in path else path[path.index(':')+1:]\n        return path.replace('~', os.environ[\"HOME\"])\n\n    src_f_path = extract_and_expand(src_path)\n    dst_f_path = extract_and_expand(dst_path)\n\n    src = File(src_f_path, size = 1024 * 1024).make()\n    dst = File(dst_f_path)\n\n    run2ok([mscp, \"-vvv\", src_path, dst_path])\n    assert check_same_md5sum(src, dst)\n\n\ndef test_remote_path_contains_tilde2(mscp):\n    src = File(\"src\", size = 1024 * 1024).make()\n    dst = File(f\"{os.environ['HOME']}/src\")\n\n    run2ok([mscp, \"-vvv\", src.path, f\"localhost:~\"])\n    assert check_same_md5sum(src, dst)\n\n\n@pytest.mark.parametrize(\"src_prefix, dst_prefix\", param_remote_prefix)\ndef test_min_chunk(mscp, src_prefix, dst_prefix):\n    src = File(\"src\", size = 16 * 1024).make()\n    dst = File(\"dst\")\n\n    run2ok([mscp, \"-vvv\", \"-s\", 32768, src_prefix + src.path, dst_prefix + dst.path])\n    assert check_same_md5sum(src, dst)\n\n\ndef is_alpine():\n    if os.path.exists(\"/etc/os-release\"):\n        with open(\"/etc/os-release\", \"r\") as f:\n            for line in f:\n                if line.strip() == \"ID=alpine\":\n                    return True\n    return False\n\nparam_glob_copy = [\n    (\n        \"src*\", \"dstx\",\n        [ File(\"src1\"), File(\"src2\"), File(\"src3\") ],\n        [ File(\"dstx/src1\"), File(\"dstx/src2\"), File(\"dstx/src3\") ],\n    ),\n    (\n        \"src*\", \"dstx\",\n        [ File(\"src1/s1\"), File(\"src2/s2\"), File(\"src3/s3\") ],\n        [ File(\"dstx/s1\"), File(\"dstx/s2\"), File(\"dstx/s3\") ],\n    )\n]\n\n@pytest.mark.skipif(is_alpine(),\n                    reason = \"musl does not implement glob ALTDIRFUNC\")\n@pytest.mark.parametrize(\"src_prefix, dst_prefix\", param_remote_prefix)\n@pytest.mark.parametrize(\"src_glob_path, dst_path, srcs, dsts\", param_glob_copy)\ndef test_glob_src_path(mscp, src_prefix, dst_prefix,\n                       src_glob_path, dst_path, srcs, dsts):\n    for src in srcs:\n        src.make(size = 1024 * 1024)\n\n    run2ok([mscp, \"-vvv\", src_prefix + src_glob_path, dst_prefix + dst_path])\n    for src, dst in zip(srcs, dsts):\n        assert check_same_md5sum(src, dst)\n\n\n@pytest.mark.parametrize(\"src_prefix, dst_prefix\", param_remote_prefix)\ndef test_thread_affinity(mscp, src_prefix, dst_prefix):\n    src = File(\"src\", size = 64 * 1024).make()\n    dst = File(\"dst\")\n\n    run2ok([mscp, \"-vvv\", \"-n\", 4, \"-m\", \"0x01\",\n            src_prefix + src.path, dst_prefix + dst.path])\n    assert check_same_md5sum(src, dst)\n\n\n@pytest.mark.parametrize(\"src_prefix, dst_prefix\", param_remote_prefix)\ndef test_cannot_override_file_with_dir(mscp, src_prefix, dst_prefix):\n    src = File(\"src\", size = 128).make()\n    dst = File(\"dst\").make()\n\n    run2ng([mscp, \"-vvv\", src_prefix + src.path, dst_prefix + \"dst/src\"])\n\n\n@pytest.mark.parametrize(\"src_prefix, dst_prefix\", param_remote_prefix)\ndef test_transfer_zero_bytes(mscp, src_prefix, dst_prefix):\n    src = File(\"src\", size = 0).make()\n    dst = File(\"dst\")\n    run2ok([mscp, \"-vvv\", src_prefix + src.path, dst_prefix + \"dst\"])\n    assert os.path.exists(\"dst\")\n\n@pytest.mark.parametrize(\"src_prefix, dst_prefix\", param_remote_prefix)\ndef test_override_dst_having_larger_size(mscp, src_prefix, dst_prefix):\n    src = File(\"src\", size = 1024 * 1024).make()\n    dst = File(\"dst\", size = 1024 * 1024 * 2).make()\n    run2ok([mscp, \"-vvv\", src_prefix + src.path, dst_prefix + \"dst\"])\n    assert check_same_md5sum(src, dst)\n\n@pytest.mark.parametrize(\"src_prefix, dst_prefix\", param_remote_prefix)\ndef test_dont_truncate_dst(mscp, src_prefix, dst_prefix):\n    f = File(\"srcanddst\", size = 1024 * 1024 * 128).make()\n    md5_before = f.md5sum()\n    run2ok([mscp, \"-vvv\", src_prefix + f.path, dst_prefix + f.path])\n    md5_after = f.md5sum()\n    assert md5_before == md5_after\n\n\n@pytest.mark.parametrize(\"src_prefix, dst_prefix\", param_remote_prefix)\ndef test_copy_readonly_file(mscp, src_prefix, dst_prefix):\n    \"\"\"When a source file permission is r--r--r--, if chmod(r--r--r--)\n    runs first on the remote side, following truncate() and setutime()\n    fail due to permission deneid. So, run chmod() after truncate()\n    and setutime()\n\n    \"\"\"\n    src = File(\"src\", size = 1024 * 1024 * 128, perm = 0o444).make()\n    dst = File(\"dst\")\n    run2ok([mscp, \"-vvv\", src_prefix + src.path, dst_prefix + dst.path])\n    assert check_same_md5sum(src, dst)\n\n@pytest.mark.parametrize(\"src_prefix, dst_prefix\", param_remote_prefix)\ndef test_dont_make_conns_more_than_chunks(mscp, src_prefix, dst_prefix):\n    # copy 100 files with -n 20 -I 1 options. if mscp creates 20 SSH\n    # connections although all files have been copied, it is error.\n    srcs = []\n    dsts = []\n    for n in range(100):\n        srcs.append(File(\"src/src-{:06d}\".format(n), size=1024).make())\n        dsts.append(File(\"dst/src-{:06d}\".format(n)))\n    start = time.time()\n    run2ok([mscp, \"-v\", \"-n\", \"20\", \"-I\", \"1\",\n            src_prefix + \"src\", dst_prefix + \"dst\"])\n    end = time.time()\n    for s, d in zip(srcs, dsts):\n        assert check_same_md5sum(s, d)\n\n    assert((end - start) < 10)\n\n\n@pytest.mark.parametrize(\"src_prefix, dst_prefix\", param_remote_prefix)\ndef test_bwlimit(mscp, src_prefix, dst_prefix):\n    \"\"\"Copy 100MB file with 100Mbps bitrate, this requires 8 seconds.\"\"\"\n    src = File(\"src\", size = 100 * 1024 * 1024).make()\n    dst = File(\"dst\")\n\n    start = datetime.datetime.now().timestamp()\n    run2ok([mscp, \"-vvv\", \"-L\", \"100m\", src_prefix + \"src\", dst_prefix + \"dst\"])\n    end = datetime.datetime.now().timestamp()\n    assert check_same_md5sum(src, dst)\n    assert end - start > 7\n\n\n@pytest.mark.parametrize(\"src_prefix, dst_prefix\", param_remote_prefix)\n@pytest.mark.parametrize(\"src, dst\", param_single_copy)\ndef test_set_port_ng(mscp, src_prefix, dst_prefix, src, dst):\n    src.make()\n    run2ng([mscp, \"-vvv\", \"-P\", 21, src_prefix + src.path, dst_prefix + dst.path])\n\n\n@pytest.mark.parametrize(\"src_prefix, dst_prefix\", param_remote_prefix)\n@pytest.mark.parametrize(\"src, dst\", param_single_copy)\ndef test_set_port_ok(mscp, src_prefix, dst_prefix, src, dst):\n    src.make()\n    run2ok([mscp, \"-vvv\", \"-P\", 8022, src_prefix + src.path, dst_prefix + dst.path])\n\ndef test_v4only(mscp):\n    src = File(\"src\", size = 1024).make()\n    dst = File(\"dst\")\n    dst_prefix = \"localhost:{}/\".format(os.getcwd())\n    run2ok([mscp, \"-vvv\", \"-4\", src.path, dst_prefix + dst.path])\n    assert check_same_md5sum(src, dst)\n\ndef test_v6only(mscp):\n    src = File(\"src\", size = 1024).make()\n    dst = File(\"dst\")\n    dst_prefix = \"ip6-localhost:{}/\".format(os.getcwd())\n    run2ok([mscp, \"-vvv\", \"-6\", src.path, dst_prefix + dst.path])\n    assert check_same_md5sum(src, dst)\n\ndef test_v4_to_v6_should_fail(mscp):\n    src = File(\"src\", size = 1024).make()\n    dst = File(\"dst\")\n    dst_prefix = \"[::1]:{}/\".format(os.getcwd())\n    run2ng([mscp, \"-vvv\", \"-4\", src.path, dst_prefix + dst.path])\n\ndef test_v6_to_v4_should_fail(mscp):\n    src = File(\"src\", size = 1024).make()\n    dst = File(\"dst\")\n    dst_prefix = \"127.0.0.1:{}/\".format(os.getcwd())\n    run2ng([mscp, \"-vvv\", \"-6\", src.path, dst_prefix + dst.path])\n\ndef test_quiet_mode(capsys, mscp):\n    src = File(\"src\", size = 1024).make()\n    dst = File(\"dst\")\n    dst_prefix = \"127.0.0.1:{}/\".format(os.getcwd())\n    run2ok([mscp, \"-vvv\", \"-q\", src.path, dst_prefix + dst.path], quiet=True)\n    assert check_same_md5sum(src, dst)\n\n    captured = capsys.readouterr()\n    assert not captured.out\n    assert not captured.err\n\n@pytest.mark.parametrize(\"src_prefix, dst_prefix\", param_remote_prefix)\ndef test_set_conn_interval(mscp, src_prefix, dst_prefix):\n    srcs = []\n    dsts = []\n    for x in range(500):\n        srcs.append(File(\"src/file{}\".format(x), size = 128).make())\n        dsts.append(File(\"dst/file{}\".format(x)))\n    run2ok([mscp, \"-vvv\", \"-I\", 1, src_prefix + \"src\", dst_prefix + \"dst\"])\n\n    for src, dst in zip(srcs, dsts):\n        assert check_same_md5sum(src, dst)\n\ncompressions = [\"yes\", \"no\", \"none\"]\n@pytest.mark.parametrize(\"src_prefix, dst_prefix\", param_remote_prefix)\n@pytest.mark.parametrize(\"compress\", compressions)\ndef test_compression(mscp, src_prefix, dst_prefix, compress):\n    src = File(\"src\", size = 1024 * 1024).make()\n    dst = File(\"dst\", size = 1024 * 1024 * 2).make()\n    run2ok([mscp, \"-vvv\", \"-C\", compress, src_prefix + src.path, dst_prefix + \"dst\"])\n    assert check_same_md5sum(src, dst)\n\n@pytest.mark.parametrize(\"src_prefix, dst_prefix\", param_remote_prefix)\ndef test_ccalgo(mscp, src_prefix, dst_prefix):\n    src = File(\"src\", size = 1024 * 1024).make()\n    dst = File(\"dst\").make()\n    if platform.system() == \"Darwin\":\n        # Darwin does not support TCP_CONGESTION\n        algo = \"cubic\"\n        run = run2ng\n    elif platform.system() == \"Linux\":\n        # Linux supports TCP_CONGESTION\n        with open(\"/proc/sys/net/ipv4/tcp_allowed_congestion_control\", \"r\") as f:\n            algo = f.read().strip().split().pop()\n        run = run2ok\n    run([mscp, \"-vvv\", \"-g\", algo, src_prefix + src.path, dst_prefix + \"dst\"])\n\n\ntesthost = \"mscptestlocalhost\"\ntesthost_prefix = \"{}:{}/\".format(testhost, os.getcwd()) # use current dir\nparam_testhost_prefix = [\n    (\"\", testhost_prefix), (testhost_prefix, \"\")\n]\n@pytest.mark.parametrize(\"src_prefix, dst_prefix\", param_testhost_prefix)\ndef test_config_ok(mscp, src_prefix, dst_prefix):\n    config = \"/tmp/mscp_test_ssh_config\"\n    with open(config, \"w\") as f:\n        f.write(\"host {}\\n\".format(testhost))\n        f.write(\"    hostname localhost\\n\")\n\n    src = File(\"src\", size = 1024 * 1024).make()\n    dst = File(\"dst\", size = 1024 * 1024 * 2).make()\n    run2ok([mscp, \"-vvv\", \"-F\", config,\n            src_prefix + src.path, dst_prefix + \"dst\"])\n\n    os.remove(config)\n    assert check_same_md5sum(src, dst)\n\n\n@pytest.mark.parametrize(\"src_prefix, dst_prefix\", param_testhost_prefix)\ndef test_config_ng(mscp, src_prefix, dst_prefix):\n    config = \"/tmp/mscp_test_ssh_config\"\n    with open(config, \"w\") as f:\n        f.write(\"\\n\") # use empty ssh_config\n\n    src = File(\"src\", size = 1024 * 1024).make()\n    dst = File(\"dst\", size = 1024 * 1024 * 2).make()\n    run2ng([mscp, \"-vvv\", \"-F\", config,\n            src_prefix + src.path, dst_prefix + \"dst\"])\n\n    os.remove(config)\n\n\nparam_valid_option_ok = [\n    [ \"-o\", \"Port=8022\" ],\n    [ \"-o\", \"Port=8022\", \"-o\", \"User=root\" ],\n    [ \"-o\", \"unknown-option-is-silently-ignored\" ],\n]\n@pytest.mark.parametrize(\"src_prefix, dst_prefix\", param_remote_prefix)\n@pytest.mark.parametrize(\"option\", param_valid_option_ok)\ndef test_inline_option_ok(mscp, src_prefix, dst_prefix, option):\n    \"\"\" change port number with -o option. it should be ok. \"\"\"\n    src = File(\"src\", size = 1024 * 1024).make()\n    dst = File(\"dst\")\n    run2ok([mscp, \"-vvv\"] + option +\n           [src_prefix + src.path, dst_prefix + dst.path])\n    assert check_same_md5sum(src, dst)\n\n\nparam_valid_option_ng = [\n    [ \"-o\", \"Port=8023\" ],\n    [ \"-o\", \"User=invaliduser\" ],\n]\n@pytest.mark.parametrize(\"src_prefix, dst_prefix\", param_remote_prefix)\n@pytest.mark.parametrize(\"option\", param_valid_option_ng)\ndef test_inline_option_ng(mscp, src_prefix, dst_prefix, option):\n    \"\"\" change port number with -o option. it should be ng. \"\"\"\n    src = File(\"src\", size = 1024 * 1024).make()\n    dst = File(\"dst\")\n    run2ng([mscp, \"-vvv\"] + option +\n           [src_prefix + src.path, dst_prefix + dst.path])\n\n\n@pytest.mark.parametrize(\"src_prefix, dst_prefix\", param_remote_prefix)\ndef test_porxyjump_ok(mscp, src_prefix, dst_prefix):\n    \"\"\" test -J proxyjump option\"\"\"\n    src = File(\"src\", size = 10 * 1024 * 1024).make()\n    dst = File(\"dst\")\n    # use small min-chunk-size to use multiple connections\n    run2ok([mscp, \"-n\", 4, \"-s\", 1024 * 1024, \"-vvv\",\n            \"-J\", \"localhost:8022\",\n            src_prefix + src.path, dst_prefix + dst.path])\n    assert check_same_md5sum(src, dst)\n\n\n@pytest.mark.parametrize(\"src_prefix, dst_prefix\", param_remote_prefix)\ndef test_porxyjump_ng(mscp, src_prefix, dst_prefix):\n    \"\"\" test -J proxyjump option, invalid jump node causes fail\"\"\"\n    src = File(\"src\", size = 10 * 1024 * 1024).make()\n    dst = File(\"dst\")\n    # use small min-chunk-size to use multiple connections\n    run2ng([mscp, \"-n\", 4, \"-s\", 1024 * 1024, \"-vvv\",\n            \"-J\", \"invaliduser@localhost:8022\",\n            src_prefix + src.path, dst_prefix + dst.path])\n\n# username test assumes that this test runs inside a container, see Dockerfiles\ndef test_specify_passphrase_via_env(mscp):\n    src = File(os.getcwd() + \"/src\", size = 1024).make()\n    dst = File(\"/home/test/dst\")\n    env = os.environ\n    env[\"MSCP_SSH_AUTH_PASSPHRASE\"]  = \"keypassphrase\"\n    run2ok([mscp, \"-vvv\", \"-l\", \"test\", \"-i\", \"/home/test/.ssh/id_rsa_test\",\n            src.path, \"localhost:\" + dst.path], env = env)\n    assert check_same_md5sum(src, dst)\n\ndef test_specify_invalid_passphrase_via_env(mscp):\n    src = File(os.getcwd() + \"/src\", size = 1024).make()\n    dst = File(\"/home/test/dst\")\n    env = os.environ\n    env[\"MSCP_SSH_AUTH_PASSPHRASE\"]  = \"invalid-keypassphrase\"\n    run2ng([mscp, \"-vvv\", \"-l\", \"test\", \"-i\", \"/home/test/.ssh/id_rsa_test\",\n            src.path, \"localhost:\" + dst.path], env = env)\n\ndef test_specify_password_via_env(mscp):\n    src = File(os.getcwd() + \"/src\", size = 1024).make()\n    dst = File(\"/home/test/dst\")\n    env = os.environ\n    env[\"MSCP_SSH_AUTH_PASSWORD\"]  = \"userpassword\"\n    run2ok([mscp, \"-vvv\", \"-l\", \"test\",\n            src.path, \"localhost:\" + dst.path], env = env)\n    assert check_same_md5sum(src, dst)\n\ndef test_specify_invalid_password_via_env(mscp):\n    src = File(os.getcwd() + \"/src\", size = 1024).make()\n    dst = File(\"/home/test/dst\")\n    env = os.environ\n    env[\"MSCP_SSH_AUTH_PASSWORD\"]  = \"invalid-userpassword\"\n    run2ng([mscp, \"-vvv\", \"-l\", \"test\",\n            src.path, \"localhost:\" + dst.path], env = env)\n\n\n@pytest.fixture\ndef move_pubkey_temporally():\n    \"\"\"\n    mv ~/.ssh/id_* to id_rsa.bak before test, and move it back after test.\n    \"\"\"\n\n    sshdir = os.path.join(os.environ[\"HOME\"], \".ssh\")\n\n    # move pubkeys to /tmp\n    moved = []\n    for fname in os.listdir(sshdir):\n        if re.match(r\"^id_[a-z0-9]+$\", fname):\n            moved.append(fname)\n            shutil.move(f\"{sshdir}/{fname}\", f\"/tmp/{fname}\")\n\n    yield\n\n    # move back the keys\n    for fname in moved:\n        shutil.move(f\"/tmp/{fname}\", f\"{sshdir}/{fname}\")\n\n\n@pytest.mark.parametrize(\"src_prefix, dst_prefix\", param_remote_prefix)\ndef test_passwordauth_without_pubkey(move_pubkey_temporally,\n                                     mscp, src_prefix, dst_prefix):\n    \"\"\"\n    make sure password auth works (by removing public keys)\n    \"\"\"\n    src = File(os.getcwd() + \"/src\", size = 1024).make()\n    dst = File(\"/home/test/dst\")\n    env = os.environ\n    env[\"MSCP_SSH_AUTH_PASSWORD\"]  = \"userpassword\"\n    run2ok([mscp, \"-vvv\", \"-l\", \"test\",\n            src.path, \"localhost:\" + dst.path], env = env)\n    assert check_same_md5sum(src, dst)\n\n\n@pytest.mark.parametrize(\"src_prefix, dst_prefix\", param_remote_prefix)\ndef test_10k_files(mscp, src_prefix, dst_prefix):\n    srcs = []\n    dsts = []\n    for n in range(10000):\n        srcs.append(File(\"src/src-{:06d}\".format(n), size=1024).make())\n        dsts.append(File(\"dst/src-{:06d}\".format(n)))\n    run2ok([mscp, \"-v\", src_prefix + \"src\", dst_prefix + \"dst\"])\n    for s, d in zip(srcs, dsts):\n        assert check_same_md5sum(s, d)\n\n@pytest.mark.parametrize(\"src_prefix, dst_prefix\", param_remote_prefix)\ndef test_checkpoint_dump_and_resume(mscp, src_prefix, dst_prefix):\n    src1 = File(\"src1\", size = 64 * 1024 * 1024).make()\n    src2 = File(\"src2\", size = 64 * 1024 * 1024).make()\n    dst1 = File(\"dst/src1\")\n    dst2 = File(\"dst/src2\")\n    run2ok([mscp, \"-vvv\", \"-W\", \"checkpoint\", \"-D\",\n            src_prefix + \"src1\", src_prefix + \"src2\", dst_prefix + \"dst\"])\n    assert os.path.exists(\"checkpoint\")\n\n    run2ok([mscp, \"-vvv\", \"-R\", \"checkpoint\"])\n    assert check_same_md5sum(src1, dst1)\n    assert check_same_md5sum(src2, dst2)\n    os.remove(\"checkpoint\")\n\n@pytest.mark.parametrize(\"timeout\", [ 1, 2, 3, 4, 5 ])\n@pytest.mark.parametrize(\"src_prefix, dst_prefix\", param_remote_prefix)\ndef test_checkpoint_interrupt_large_file(mscp, timeout, src_prefix, dst_prefix):\n    \"\"\"Copy two 100MB files with 200Mbps -> 4 sec + 4 sec \"\"\"\n    src1 = File(\"src1\", size = 100 * 1024 * 1024).make()\n    src2 = File(\"src2\", size = 100 * 1024 * 1024).make()\n    dst1 = File(\"dst/src1\")\n    dst2 = File(\"dst/src2\")\n    run2ng([mscp, \"-vv\", \"-W\", \"checkpoint\", \"-L\", \"200m\",\n            src_prefix + \"src1\", src_prefix + \"src2\", dst_prefix + \"dst\"],\n           timeout = timeout)\n    assert os.path.exists(\"checkpoint\")\n\n    run2ok([mscp, \"-vv\", \"-R\", \"checkpoint\"])\n    assert check_same_md5sum(src1, dst1)\n    assert check_same_md5sum(src2, dst2)\n    os.remove(\"checkpoint\")\n\n@pytest.mark.parametrize(\"timeout\", [ 1, 2, 3, 4, 5 ])\n@pytest.mark.parametrize(\"src_prefix, dst_prefix\", param_remote_prefix)\ndef test_checkpoint_interrupt_many_files(mscp, timeout, src_prefix, dst_prefix):\n    \"\"\"Copy 100 1-MB files with 4 connections, and interrupt and\n    resume the transfer\n    \"\"\"\n\n    files = []\n    for x in range(100):\n        files.append((\n            File(\"src/{:03d}\".format(x), size = 1024 * 1024).make(),\n            File(\"dst/{:03d}\".format(x))\n        ))\n\n    run2ng([mscp, \"-vv\", \"-W\", \"checkpoint\", \"-L\", \"80m\", \"-n\", 4,\n            src_prefix + \"src\",  dst_prefix + \"dst\"],\n           timeout = timeout)\n    assert os.path.exists(\"checkpoint\")\n\n    run2ok([mscp, \"-vv\", \"-R\", \"checkpoint\"])\n\n    for src, dst in files:\n        assert check_same_md5sum(src, dst)\n\n    os.remove(\"checkpoint\")\n\n"
  },
  {
    "path": "test/util.py",
    "content": "\nimport hashlib\nimport os\n\n\ndef check_same_md5sum(fa, fb):\n    return (fa.md5sum() == fb.md5sum())\n\n\nclass File():\n    def __init__(self, path, size = 0, content = \"random\", perm = 0o664):\n        if not content in [\"zero\", \"random\"]:\n            raise ValueError(\"invalid type: {}\".format(content))\n        self.path = path\n        self.size = size\n        self.content = content\n        self.perm = perm\n\n    def __repr__(self):\n        return \"<file:{} {}-bytes>\".format(self.path, self.size)\n\n    def __str__(self):\n        return self.path\n\n    def make(self, size = None):\n        if size:\n            self.size = size\n\n        d = os.path.dirname(self.path)\n        if d:\n            os.makedirs(d, exist_ok = True)\n        if self.content == \"zero\":\n            self.make_content_zero()\n        elif self.content == \"random\":\n            self.make_content_random()\n        else:\n            raise ValueError(\"invalud content type: {}\".format(self.content))\n        os.chmod(self.path, self.perm)\n        return self\n\n    def make_content_zero(self):\n        with open(self.path, \"wb\") as f:\n            f.seek(self.size, 0)\n\n    def make_content_random(self):\n        with open(self.path, \"wb\") as f:\n            f.write(os.urandom(self.size))\n\n    def cleanup(self, preserve_dir = False):\n        os.remove(self.path)\n        if preserve_dir:\n            return\n        tmp = os.path.dirname(self.path)\n        while tmp and not tmp in [\".\", \"/\"]:\n            if len(os.listdir(tmp)) == 0:\n                os.rmdir(tmp)\n            tmp = os.path.dirname(tmp)\n\n    def md5sum(self):\n        m = hashlib.md5()\n        with open(self.path, 'rb') as f:\n            for chunk in iter(lambda: f.read(4096 * m.block_size), b''):\n                m.update(chunk)\n        return m.hexdigest()\n\n"
  }
]