Repository: upa/mscp
Branch: main
Commit: 1313853d7d30
Files: 71
Total size: 320.7 KB
Directory structure:
gitextract_bck5e2hc/
├── .clang-format
├── .dockerignore
├── .github/
│ └── workflows/
│ ├── build-freebsd.yml
│ ├── build-macos.yml
│ ├── build-ubuntu.yml
│ ├── codeql.yml
│ ├── release.yml
│ └── test.yml
├── .gitignore
├── .gitmodules
├── CMakeLists.txt
├── Dockerfile/
│ ├── README.md
│ ├── almalinux-9.3.Dockerfile
│ ├── alpine-3.22.Dockerfile
│ ├── arch-base.Dockerfile
│ ├── build-deb.Dockerfile
│ ├── build-srpm.Dockerfile
│ ├── rocky-8.9.Dockerfile
│ ├── rocky-9.3.Dockerfile
│ ├── ubuntu-20.04.Dockerfile
│ ├── ubuntu-22.04.Dockerfile
│ └── ubuntu-24.04.Dockerfile
├── Doxyfile
├── LICENSE
├── README.md
├── VERSION
├── conanfile.txt
├── doc/
│ ├── RELEASE.md
│ ├── mscp.1.in
│ └── mscp.rst
├── include/
│ ├── config.h.in
│ └── mscp.h
├── patch/
│ ├── README.md
│ ├── libssh-0.10.4.patch
│ ├── libssh-0.10.6-2-g6f1b1e76.patch
│ ├── libssh-0.10.6.patch
│ ├── libssh-0.11.2.patch
│ └── libssh-0.9.6.patch
├── rpm/
│ ├── .gitignore
│ └── mscp.spec.in
├── scripts/
│ ├── install-build-deps.sh
│ └── test-in-container.sh
├── src/
│ ├── atomic.h
│ ├── bwlimit.c
│ ├── bwlimit.h
│ ├── checkpoint.c
│ ├── checkpoint.h
│ ├── fileops.c
│ ├── fileops.h
│ ├── main.c
│ ├── minmax.h
│ ├── mscp.c
│ ├── openbsd-compat/
│ │ ├── openbsd-compat.h
│ │ └── strlcat.c
│ ├── path.c
│ ├── path.h
│ ├── platform.c
│ ├── platform.h
│ ├── pool.c
│ ├── pool.h
│ ├── print.c
│ ├── print.h
│ ├── ssh.c
│ ├── ssh.h
│ ├── strerrno.c
│ └── strerrno.h
└── test/
├── .gitignore
├── README.md
├── conftest.py
├── test_e2e.py
└── util.py
================================================
FILE CONTENTS
================================================
================================================
FILE: .clang-format
================================================
# SPDX-License-Identifier: GPL-2.0
#
# clang-format configuration file. Intended for clang-format >= 11.
#
# For more information, see:
#
# Documentation/process/clang-format.rst
# https://clang.llvm.org/docs/ClangFormat.html
# https://clang.llvm.org/docs/ClangFormatStyleOptions.html
#
# clang-format configuration for Linux kernel, except that ColumnLimit is 90
---
AccessModifierOffset: -4
AlignAfterOpenBracket: Align
AlignConsecutiveAssignments: false
AlignConsecutiveDeclarations: false
AlignEscapedNewlines: Left
AlignOperands: true
AlignTrailingComments: false
AllowAllParametersOfDeclarationOnNextLine: false
AllowShortBlocksOnASingleLine: false
AllowShortCaseLabelsOnASingleLine: false
AllowShortFunctionsOnASingleLine: None
AllowShortIfStatementsOnASingleLine: false
AllowShortLoopsOnASingleLine: false
AlwaysBreakAfterDefinitionReturnType: None
AlwaysBreakAfterReturnType: None
AlwaysBreakBeforeMultilineStrings: false
AlwaysBreakTemplateDeclarations: false
BinPackArguments: true
BinPackParameters: true
BraceWrapping:
AfterClass: false
AfterControlStatement: false
AfterEnum: false
AfterFunction: true
AfterNamespace: true
AfterObjCDeclaration: false
AfterStruct: false
AfterUnion: false
AfterExternBlock: false
BeforeCatch: false
BeforeElse: false
IndentBraces: false
SplitEmptyFunction: true
SplitEmptyRecord: true
SplitEmptyNamespace: true
BreakBeforeBinaryOperators: None
BreakBeforeBraces: Custom
BreakBeforeInheritanceComma: false
BreakBeforeTernaryOperators: false
BreakConstructorInitializersBeforeComma: false
BreakConstructorInitializers: BeforeComma
BreakAfterJavaFieldAnnotations: false
BreakStringLiterals: false
ColumnLimit: 90
CommentPragmas: '^ IWYU pragma:'
CompactNamespaces: false
ConstructorInitializerAllOnOneLineOrOnePerLine: false
ConstructorInitializerIndentWidth: 8
ContinuationIndentWidth: 8
Cpp11BracedListStyle: false
DerivePointerAlignment: false
DisableFormat: false
ExperimentalAutoDetectBinPacking: false
FixNamespaceComments: false
# Taken from:
# git grep -h '^#define [^[:space:]]*for_each[^[:space:]]*(' include/ tools/ \
# | sed "s,^#define \([^[:space:]]*for_each[^[:space:]]*\)(.*$, - '\1'," \
# | LC_ALL=C sort -u
ForEachMacros:
- '__ata_qc_for_each'
- '__bio_for_each_bvec'
- '__bio_for_each_segment'
- '__evlist__for_each_entry'
- '__evlist__for_each_entry_continue'
- '__evlist__for_each_entry_from'
- '__evlist__for_each_entry_reverse'
- '__evlist__for_each_entry_safe'
- '__for_each_mem_range'
- '__for_each_mem_range_rev'
- '__for_each_thread'
- '__hlist_for_each_rcu'
- '__map__for_each_symbol_by_name'
- '__pci_bus_for_each_res0'
- '__pci_bus_for_each_res1'
- '__pci_dev_for_each_res0'
- '__pci_dev_for_each_res1'
- '__perf_evlist__for_each_entry'
- '__perf_evlist__for_each_entry_reverse'
- '__perf_evlist__for_each_entry_safe'
- '__rq_for_each_bio'
- '__shost_for_each_device'
- '__sym_for_each'
- 'apei_estatus_for_each_section'
- 'ata_for_each_dev'
- 'ata_for_each_link'
- 'ata_qc_for_each'
- 'ata_qc_for_each_raw'
- 'ata_qc_for_each_with_internal'
- 'ax25_for_each'
- 'ax25_uid_for_each'
- 'bio_for_each_bvec'
- 'bio_for_each_bvec_all'
- 'bio_for_each_folio_all'
- 'bio_for_each_integrity_vec'
- 'bio_for_each_segment'
- 'bio_for_each_segment_all'
- 'bio_list_for_each'
- 'bip_for_each_vec'
- 'bond_for_each_slave'
- 'bond_for_each_slave_rcu'
- 'bpf_for_each'
- 'bpf_for_each_reg_in_vstate'
- 'bpf_for_each_reg_in_vstate_mask'
- 'bpf_for_each_spilled_reg'
- 'bpf_object__for_each_map'
- 'bpf_object__for_each_program'
- 'btree_for_each_safe128'
- 'btree_for_each_safe32'
- 'btree_for_each_safe64'
- 'btree_for_each_safel'
- 'card_for_each_dev'
- 'cgroup_taskset_for_each'
- 'cgroup_taskset_for_each_leader'
- 'cpu_aggr_map__for_each_idx'
- 'cpufreq_for_each_efficient_entry_idx'
- 'cpufreq_for_each_entry'
- 'cpufreq_for_each_entry_idx'
- 'cpufreq_for_each_valid_entry'
- 'cpufreq_for_each_valid_entry_idx'
- 'css_for_each_child'
- 'css_for_each_descendant_post'
- 'css_for_each_descendant_pre'
- 'damon_for_each_region'
- 'damon_for_each_region_from'
- 'damon_for_each_region_safe'
- 'damon_for_each_scheme'
- 'damon_for_each_scheme_safe'
- 'damon_for_each_target'
- 'damon_for_each_target_safe'
- 'damos_for_each_filter'
- 'damos_for_each_filter_safe'
- 'data__for_each_file'
- 'data__for_each_file_new'
- 'data__for_each_file_start'
- 'device_for_each_child_node'
- 'displayid_iter_for_each'
- 'dma_fence_array_for_each'
- 'dma_fence_chain_for_each'
- 'dma_fence_unwrap_for_each'
- 'dma_resv_for_each_fence'
- 'dma_resv_for_each_fence_unlocked'
- 'do_for_each_ftrace_op'
- 'drm_atomic_crtc_for_each_plane'
- 'drm_atomic_crtc_state_for_each_plane'
- 'drm_atomic_crtc_state_for_each_plane_state'
- 'drm_atomic_for_each_plane_damage'
- 'drm_client_for_each_connector_iter'
- 'drm_client_for_each_modeset'
- 'drm_connector_for_each_possible_encoder'
- 'drm_exec_for_each_locked_object'
- 'drm_exec_for_each_locked_object_reverse'
- 'drm_for_each_bridge_in_chain'
- 'drm_for_each_connector_iter'
- 'drm_for_each_crtc'
- 'drm_for_each_crtc_reverse'
- 'drm_for_each_encoder'
- 'drm_for_each_encoder_mask'
- 'drm_for_each_fb'
- 'drm_for_each_legacy_plane'
- 'drm_for_each_plane'
- 'drm_for_each_plane_mask'
- 'drm_for_each_privobj'
- 'drm_gem_for_each_gpuva'
- 'drm_gem_for_each_gpuva_safe'
- 'drm_gpuva_for_each_op'
- 'drm_gpuva_for_each_op_from_reverse'
- 'drm_gpuva_for_each_op_safe'
- 'drm_gpuvm_for_each_va'
- 'drm_gpuvm_for_each_va_range'
- 'drm_gpuvm_for_each_va_range_safe'
- 'drm_gpuvm_for_each_va_safe'
- 'drm_mm_for_each_hole'
- 'drm_mm_for_each_node'
- 'drm_mm_for_each_node_in_range'
- 'drm_mm_for_each_node_safe'
- 'dsa_switch_for_each_available_port'
- 'dsa_switch_for_each_cpu_port'
- 'dsa_switch_for_each_cpu_port_continue_reverse'
- 'dsa_switch_for_each_port'
- 'dsa_switch_for_each_port_continue_reverse'
- 'dsa_switch_for_each_port_safe'
- 'dsa_switch_for_each_user_port'
- 'dsa_tree_for_each_cpu_port'
- 'dsa_tree_for_each_user_port'
- 'dsa_tree_for_each_user_port_continue_reverse'
- 'dso__for_each_symbol'
- 'dsos__for_each_with_build_id'
- 'elf_hash_for_each_possible'
- 'elf_symtab__for_each_symbol'
- 'evlist__for_each_cpu'
- 'evlist__for_each_entry'
- 'evlist__for_each_entry_continue'
- 'evlist__for_each_entry_from'
- 'evlist__for_each_entry_reverse'
- 'evlist__for_each_entry_safe'
- 'flow_action_for_each'
- 'for_each_acpi_consumer_dev'
- 'for_each_acpi_dev_match'
- 'for_each_active_dev_scope'
- 'for_each_active_drhd_unit'
- 'for_each_active_iommu'
- 'for_each_active_route'
- 'for_each_aggr_pgid'
- 'for_each_and_bit'
- 'for_each_andnot_bit'
- 'for_each_available_child_of_node'
- 'for_each_bench'
- 'for_each_bio'
- 'for_each_board_func_rsrc'
- 'for_each_btf_ext_rec'
- 'for_each_btf_ext_sec'
- 'for_each_bvec'
- 'for_each_card_auxs'
- 'for_each_card_auxs_safe'
- 'for_each_card_components'
- 'for_each_card_dapms'
- 'for_each_card_pre_auxs'
- 'for_each_card_prelinks'
- 'for_each_card_rtds'
- 'for_each_card_rtds_safe'
- 'for_each_card_widgets'
- 'for_each_card_widgets_safe'
- 'for_each_cgroup_storage_type'
- 'for_each_child_of_node'
- 'for_each_clear_bit'
- 'for_each_clear_bit_from'
- 'for_each_clear_bitrange'
- 'for_each_clear_bitrange_from'
- 'for_each_cmd'
- 'for_each_cmsghdr'
- 'for_each_collection'
- 'for_each_comp_order'
- 'for_each_compatible_node'
- 'for_each_component_dais'
- 'for_each_component_dais_safe'
- 'for_each_conduit'
- 'for_each_console'
- 'for_each_console_srcu'
- 'for_each_cpu'
- 'for_each_cpu_and'
- 'for_each_cpu_andnot'
- 'for_each_cpu_or'
- 'for_each_cpu_wrap'
- 'for_each_dapm_widgets'
- 'for_each_dedup_cand'
- 'for_each_dev_addr'
- 'for_each_dev_scope'
- 'for_each_dma_cap_mask'
- 'for_each_dpcm_be'
- 'for_each_dpcm_be_rollback'
- 'for_each_dpcm_be_safe'
- 'for_each_dpcm_fe'
- 'for_each_drhd_unit'
- 'for_each_dss_dev'
- 'for_each_efi_memory_desc'
- 'for_each_efi_memory_desc_in_map'
- 'for_each_element'
- 'for_each_element_extid'
- 'for_each_element_id'
- 'for_each_endpoint_of_node'
- 'for_each_event'
- 'for_each_event_tps'
- 'for_each_evictable_lru'
- 'for_each_fib6_node_rt_rcu'
- 'for_each_fib6_walker_rt'
- 'for_each_free_mem_pfn_range_in_zone'
- 'for_each_free_mem_pfn_range_in_zone_from'
- 'for_each_free_mem_range'
- 'for_each_free_mem_range_reverse'
- 'for_each_func_rsrc'
- 'for_each_gpiochip_node'
- 'for_each_group_evsel'
- 'for_each_group_evsel_head'
- 'for_each_group_member'
- 'for_each_group_member_head'
- 'for_each_hstate'
- 'for_each_if'
- 'for_each_inject_fn'
- 'for_each_insn'
- 'for_each_insn_prefix'
- 'for_each_intid'
- 'for_each_iommu'
- 'for_each_ip_tunnel_rcu'
- 'for_each_irq_nr'
- 'for_each_lang'
- 'for_each_link_codecs'
- 'for_each_link_cpus'
- 'for_each_link_platforms'
- 'for_each_lru'
- 'for_each_matching_node'
- 'for_each_matching_node_and_match'
- 'for_each_media_entity_data_link'
- 'for_each_mem_pfn_range'
- 'for_each_mem_range'
- 'for_each_mem_range_rev'
- 'for_each_mem_region'
- 'for_each_member'
- 'for_each_memory'
- 'for_each_migratetype_order'
- 'for_each_missing_reg'
- 'for_each_mle_subelement'
- 'for_each_mod_mem_type'
- 'for_each_net'
- 'for_each_net_continue_reverse'
- 'for_each_net_rcu'
- 'for_each_netdev'
- 'for_each_netdev_continue'
- 'for_each_netdev_continue_rcu'
- 'for_each_netdev_continue_reverse'
- 'for_each_netdev_dump'
- 'for_each_netdev_feature'
- 'for_each_netdev_in_bond_rcu'
- 'for_each_netdev_rcu'
- 'for_each_netdev_reverse'
- 'for_each_netdev_safe'
- 'for_each_new_connector_in_state'
- 'for_each_new_crtc_in_state'
- 'for_each_new_mst_mgr_in_state'
- 'for_each_new_plane_in_state'
- 'for_each_new_plane_in_state_reverse'
- 'for_each_new_private_obj_in_state'
- 'for_each_new_reg'
- 'for_each_node'
- 'for_each_node_by_name'
- 'for_each_node_by_type'
- 'for_each_node_mask'
- 'for_each_node_state'
- 'for_each_node_with_cpus'
- 'for_each_node_with_property'
- 'for_each_nonreserved_multicast_dest_pgid'
- 'for_each_numa_hop_mask'
- 'for_each_of_allnodes'
- 'for_each_of_allnodes_from'
- 'for_each_of_cpu_node'
- 'for_each_of_pci_range'
- 'for_each_old_connector_in_state'
- 'for_each_old_crtc_in_state'
- 'for_each_old_mst_mgr_in_state'
- 'for_each_old_plane_in_state'
- 'for_each_old_private_obj_in_state'
- 'for_each_oldnew_connector_in_state'
- 'for_each_oldnew_crtc_in_state'
- 'for_each_oldnew_mst_mgr_in_state'
- 'for_each_oldnew_plane_in_state'
- 'for_each_oldnew_plane_in_state_reverse'
- 'for_each_oldnew_private_obj_in_state'
- 'for_each_online_cpu'
- 'for_each_online_node'
- 'for_each_online_pgdat'
- 'for_each_or_bit'
- 'for_each_path'
- 'for_each_pci_bridge'
- 'for_each_pci_dev'
- 'for_each_pcm_streams'
- 'for_each_physmem_range'
- 'for_each_populated_zone'
- 'for_each_possible_cpu'
- 'for_each_present_blessed_reg'
- 'for_each_present_cpu'
- 'for_each_prime_number'
- 'for_each_prime_number_from'
- 'for_each_probe_cache_entry'
- 'for_each_process'
- 'for_each_process_thread'
- 'for_each_prop_codec_conf'
- 'for_each_prop_dai_codec'
- 'for_each_prop_dai_cpu'
- 'for_each_prop_dlc_codecs'
- 'for_each_prop_dlc_cpus'
- 'for_each_prop_dlc_platforms'
- 'for_each_property_of_node'
- 'for_each_reg'
- 'for_each_reg_filtered'
- 'for_each_reloc'
- 'for_each_reloc_from'
- 'for_each_requested_gpio'
- 'for_each_requested_gpio_in_range'
- 'for_each_reserved_mem_range'
- 'for_each_reserved_mem_region'
- 'for_each_rtd_codec_dais'
- 'for_each_rtd_components'
- 'for_each_rtd_cpu_dais'
- 'for_each_rtd_dais'
- 'for_each_sband_iftype_data'
- 'for_each_script'
- 'for_each_sec'
- 'for_each_set_bit'
- 'for_each_set_bit_from'
- 'for_each_set_bit_wrap'
- 'for_each_set_bitrange'
- 'for_each_set_bitrange_from'
- 'for_each_set_clump8'
- 'for_each_sg'
- 'for_each_sg_dma_page'
- 'for_each_sg_page'
- 'for_each_sgtable_dma_page'
- 'for_each_sgtable_dma_sg'
- 'for_each_sgtable_page'
- 'for_each_sgtable_sg'
- 'for_each_sibling_event'
- 'for_each_sta_active_link'
- 'for_each_subelement'
- 'for_each_subelement_extid'
- 'for_each_subelement_id'
- 'for_each_sublist'
- 'for_each_subsystem'
- 'for_each_supported_activate_fn'
- 'for_each_supported_inject_fn'
- 'for_each_sym'
- 'for_each_test'
- 'for_each_thread'
- 'for_each_token'
- 'for_each_unicast_dest_pgid'
- 'for_each_valid_link'
- 'for_each_vif_active_link'
- 'for_each_vma'
- 'for_each_vma_range'
- 'for_each_vsi'
- 'for_each_wakeup_source'
- 'for_each_zone'
- 'for_each_zone_zonelist'
- 'for_each_zone_zonelist_nodemask'
- 'func_for_each_insn'
- 'fwnode_for_each_available_child_node'
- 'fwnode_for_each_child_node'
- 'fwnode_for_each_parent_node'
- 'fwnode_graph_for_each_endpoint'
- 'gadget_for_each_ep'
- 'genradix_for_each'
- 'genradix_for_each_from'
- 'genradix_for_each_reverse'
- 'hash_for_each'
- 'hash_for_each_possible'
- 'hash_for_each_possible_rcu'
- 'hash_for_each_possible_rcu_notrace'
- 'hash_for_each_possible_safe'
- 'hash_for_each_rcu'
- 'hash_for_each_safe'
- 'hashmap__for_each_entry'
- 'hashmap__for_each_entry_safe'
- 'hashmap__for_each_key_entry'
- 'hashmap__for_each_key_entry_safe'
- 'hctx_for_each_ctx'
- 'hists__for_each_format'
- 'hists__for_each_sort_list'
- 'hlist_bl_for_each_entry'
- 'hlist_bl_for_each_entry_rcu'
- 'hlist_bl_for_each_entry_safe'
- 'hlist_for_each'
- 'hlist_for_each_entry'
- 'hlist_for_each_entry_continue'
- 'hlist_for_each_entry_continue_rcu'
- 'hlist_for_each_entry_continue_rcu_bh'
- 'hlist_for_each_entry_from'
- 'hlist_for_each_entry_from_rcu'
- 'hlist_for_each_entry_rcu'
- 'hlist_for_each_entry_rcu_bh'
- 'hlist_for_each_entry_rcu_notrace'
- 'hlist_for_each_entry_safe'
- 'hlist_for_each_entry_srcu'
- 'hlist_for_each_safe'
- 'hlist_nulls_for_each_entry'
- 'hlist_nulls_for_each_entry_from'
- 'hlist_nulls_for_each_entry_rcu'
- 'hlist_nulls_for_each_entry_safe'
- 'i3c_bus_for_each_i2cdev'
- 'i3c_bus_for_each_i3cdev'
- 'idr_for_each_entry'
- 'idr_for_each_entry_continue'
- 'idr_for_each_entry_continue_ul'
- 'idr_for_each_entry_ul'
- 'in_dev_for_each_ifa_rcu'
- 'in_dev_for_each_ifa_rtnl'
- 'inet_bind_bucket_for_each'
- 'interval_tree_for_each_span'
- 'intlist__for_each_entry'
- 'intlist__for_each_entry_safe'
- 'kcore_copy__for_each_phdr'
- 'key_for_each'
- 'key_for_each_safe'
- 'klp_for_each_func'
- 'klp_for_each_func_safe'
- 'klp_for_each_func_static'
- 'klp_for_each_object'
- 'klp_for_each_object_safe'
- 'klp_for_each_object_static'
- 'kunit_suite_for_each_test_case'
- 'kvm_for_each_memslot'
- 'kvm_for_each_memslot_in_gfn_range'
- 'kvm_for_each_vcpu'
- 'libbpf_nla_for_each_attr'
- 'list_for_each'
- 'list_for_each_codec'
- 'list_for_each_codec_safe'
- 'list_for_each_continue'
- 'list_for_each_entry'
- 'list_for_each_entry_continue'
- 'list_for_each_entry_continue_rcu'
- 'list_for_each_entry_continue_reverse'
- 'list_for_each_entry_from'
- 'list_for_each_entry_from_rcu'
- 'list_for_each_entry_from_reverse'
- 'list_for_each_entry_lockless'
- 'list_for_each_entry_rcu'
- 'list_for_each_entry_reverse'
- 'list_for_each_entry_safe'
- 'list_for_each_entry_safe_continue'
- 'list_for_each_entry_safe_from'
- 'list_for_each_entry_safe_reverse'
- 'list_for_each_entry_srcu'
- 'list_for_each_from'
- 'list_for_each_prev'
- 'list_for_each_prev_safe'
- 'list_for_each_rcu'
- 'list_for_each_reverse'
- 'list_for_each_safe'
- 'llist_for_each'
- 'llist_for_each_entry'
- 'llist_for_each_entry_safe'
- 'llist_for_each_safe'
- 'lwq_for_each_safe'
- 'map__for_each_symbol'
- 'map__for_each_symbol_by_name'
- 'maps__for_each_entry'
- 'maps__for_each_entry_safe'
- 'mas_for_each'
- 'mci_for_each_dimm'
- 'media_device_for_each_entity'
- 'media_device_for_each_intf'
- 'media_device_for_each_link'
- 'media_device_for_each_pad'
- 'media_entity_for_each_pad'
- 'media_pipeline_for_each_entity'
- 'media_pipeline_for_each_pad'
- 'mlx5_lag_for_each_peer_mdev'
- 'msi_domain_for_each_desc'
- 'msi_for_each_desc'
- 'mt_for_each'
- 'nanddev_io_for_each_page'
- 'netdev_for_each_lower_dev'
- 'netdev_for_each_lower_private'
- 'netdev_for_each_lower_private_rcu'
- 'netdev_for_each_mc_addr'
- 'netdev_for_each_synced_mc_addr'
- 'netdev_for_each_synced_uc_addr'
- 'netdev_for_each_uc_addr'
- 'netdev_for_each_upper_dev_rcu'
- 'netdev_hw_addr_list_for_each'
- 'nft_rule_for_each_expr'
- 'nla_for_each_attr'
- 'nla_for_each_nested'
- 'nlmsg_for_each_attr'
- 'nlmsg_for_each_msg'
- 'nr_neigh_for_each'
- 'nr_neigh_for_each_safe'
- 'nr_node_for_each'
- 'nr_node_for_each_safe'
- 'of_for_each_phandle'
- 'of_property_for_each_string'
- 'of_property_for_each_u32'
- 'pci_bus_for_each_resource'
- 'pci_dev_for_each_resource'
- 'pcl_for_each_chunk'
- 'pcl_for_each_segment'
- 'pcm_for_each_format'
- 'perf_config_items__for_each_entry'
- 'perf_config_sections__for_each_entry'
- 'perf_config_set__for_each_entry'
- 'perf_cpu_map__for_each_cpu'
- 'perf_cpu_map__for_each_idx'
- 'perf_evlist__for_each_entry'
- 'perf_evlist__for_each_entry_reverse'
- 'perf_evlist__for_each_entry_safe'
- 'perf_evlist__for_each_evsel'
- 'perf_evlist__for_each_mmap'
- 'perf_hpp_list__for_each_format'
- 'perf_hpp_list__for_each_format_safe'
- 'perf_hpp_list__for_each_sort_list'
- 'perf_hpp_list__for_each_sort_list_safe'
- 'perf_tool_event__for_each_event'
- 'plist_for_each'
- 'plist_for_each_continue'
- 'plist_for_each_entry'
- 'plist_for_each_entry_continue'
- 'plist_for_each_entry_safe'
- 'plist_for_each_safe'
- 'pnp_for_each_card'
- 'pnp_for_each_dev'
- 'protocol_for_each_card'
- 'protocol_for_each_dev'
- 'queue_for_each_hw_ctx'
- 'radix_tree_for_each_slot'
- 'radix_tree_for_each_tagged'
- 'rb_for_each'
- 'rbtree_postorder_for_each_entry_safe'
- 'rdma_for_each_block'
- 'rdma_for_each_port'
- 'rdma_umem_for_each_dma_block'
- 'resort_rb__for_each_entry'
- 'resource_list_for_each_entry'
- 'resource_list_for_each_entry_safe'
- 'rhl_for_each_entry_rcu'
- 'rhl_for_each_rcu'
- 'rht_for_each'
- 'rht_for_each_entry'
- 'rht_for_each_entry_from'
- 'rht_for_each_entry_rcu'
- 'rht_for_each_entry_rcu_from'
- 'rht_for_each_entry_safe'
- 'rht_for_each_from'
- 'rht_for_each_rcu'
- 'rht_for_each_rcu_from'
- 'rq_for_each_bvec'
- 'rq_for_each_segment'
- 'rq_list_for_each'
- 'rq_list_for_each_safe'
- 'sample_read_group__for_each'
- 'scsi_for_each_prot_sg'
- 'scsi_for_each_sg'
- 'sctp_for_each_hentry'
- 'sctp_skb_for_each'
- 'sec_for_each_insn'
- 'sec_for_each_insn_continue'
- 'sec_for_each_insn_from'
- 'sec_for_each_sym'
- 'shdma_for_each_chan'
- 'shost_for_each_device'
- 'sk_for_each'
- 'sk_for_each_bound'
- 'sk_for_each_bound_bhash2'
- 'sk_for_each_entry_offset_rcu'
- 'sk_for_each_from'
- 'sk_for_each_rcu'
- 'sk_for_each_safe'
- 'sk_nulls_for_each'
- 'sk_nulls_for_each_from'
- 'sk_nulls_for_each_rcu'
- 'snd_array_for_each'
- 'snd_pcm_group_for_each_entry'
- 'snd_soc_dapm_widget_for_each_path'
- 'snd_soc_dapm_widget_for_each_path_safe'
- 'snd_soc_dapm_widget_for_each_sink_path'
- 'snd_soc_dapm_widget_for_each_source_path'
- 'strlist__for_each_entry'
- 'strlist__for_each_entry_safe'
- 'sym_for_each_insn'
- 'sym_for_each_insn_continue_reverse'
- 'symbols__for_each_entry'
- 'tb_property_for_each'
- 'tcf_act_for_each_action'
- 'tcf_exts_for_each_action'
- 'ttm_resource_manager_for_each_res'
- 'twsk_for_each_bound_bhash2'
- 'udp_portaddr_for_each_entry'
- 'udp_portaddr_for_each_entry_rcu'
- 'usb_hub_for_each_child'
- 'v4l2_device_for_each_subdev'
- 'v4l2_m2m_for_each_dst_buf'
- 'v4l2_m2m_for_each_dst_buf_safe'
- 'v4l2_m2m_for_each_src_buf'
- 'v4l2_m2m_for_each_src_buf_safe'
- 'virtio_device_for_each_vq'
- 'while_for_each_ftrace_op'
- 'xa_for_each'
- 'xa_for_each_marked'
- 'xa_for_each_range'
- 'xa_for_each_start'
- 'xas_for_each'
- 'xas_for_each_conflict'
- 'xas_for_each_marked'
- 'xbc_array_for_each_value'
- 'xbc_for_each_key_value'
- 'xbc_node_for_each_array_value'
- 'xbc_node_for_each_child'
- 'xbc_node_for_each_key_value'
- 'xbc_node_for_each_subkey'
- 'zorro_for_each_dev'
- 'pool_iter_for_each'
- 'pool_for_each'
IncludeBlocks: Preserve
IncludeCategories:
- Regex: '.*'
Priority: 1
IncludeIsMainRegex: '(Test)?$'
IndentCaseLabels: false
IndentGotoLabels: false
IndentPPDirectives: None
IndentWidth: 8
IndentWrappedFunctionNames: false
JavaScriptQuotes: Leave
JavaScriptWrapImports: true
KeepEmptyLinesAtTheStartOfBlocks: false
MacroBlockBegin: ''
MacroBlockEnd: ''
MaxEmptyLinesToKeep: 1
NamespaceIndentation: None
ObjCBinPackProtocolList: Auto
ObjCBlockIndentWidth: 8
ObjCSpaceAfterProperty: true
ObjCSpaceBeforeProtocolList: true
# Taken from git's rules
PenaltyBreakAssignment: 10
PenaltyBreakBeforeFirstCallParameter: 30
PenaltyBreakComment: 10
PenaltyBreakFirstLessLess: 0
PenaltyBreakString: 10
PenaltyExcessCharacter: 100
PenaltyReturnTypeOnItsOwnLine: 60
PointerAlignment: Right
ReflowComments: false
SortIncludes: false
SortUsingDeclarations: false
SpaceAfterCStyleCast: false
SpaceAfterTemplateKeyword: true
SpaceBeforeAssignmentOperators: true
SpaceBeforeCtorInitializerColon: true
SpaceBeforeInheritanceColon: true
SpaceBeforeParens: ControlStatementsExceptForEachMacros
SpaceBeforeRangeBasedForLoopColon: true
SpaceInEmptyParentheses: false
SpacesBeforeTrailingComments: 1
SpacesInAngles: false
SpacesInContainerLiterals: false
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
SpacesInSquareBrackets: false
Standard: Cpp03
TabWidth: 8
UseTab: Always
...
================================================
FILE: .dockerignore
================================================
build
================================================
FILE: .github/workflows/build-freebsd.yml
================================================
name: build on FreeBSD
on:
push:
branches: [ "main", "dev" ]
pull_request:
branches: [ "main", "dev" ]
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: apply the patch to libssh
run: |
git -C libssh fetch --all --tags --prune
patch -d libssh -p1 < patch/$(git -C libssh describe).patch
- name: Build in FreeBSD
uses: vmactions/freebsd-vm@v1
with:
prepare: |
pkg install -y git cmake
run: |
cmake -B build -DCMAKE_BUILD_TYPE=Release
cmake --build build
build/mscp -h
================================================
FILE: .github/workflows/build-macos.yml
================================================
name: build on macOS
on:
push:
branches: [ "main", "dev" ]
pull_request:
branches: [ "main", "dev" ]
env:
# Customize the CMake build type here (Release, Debug, RelWithDebInfo, etc.)
BUILD_TYPE: Release
jobs:
build:
# The CMake configure and build commands are platform agnostic and should work equally well on Windows or Mac.
# You can convert this to a matrix build if you need cross-platform coverage.
# See: https://docs.github.com/en/free-pro-team@latest/actions/learn-github-actions/managing-complex-workflows#using-a-build-matrix
runs-on: macos-latest
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: apply the patch to libssh
run: |
git -C libssh fetch --all --tags --prune
patch -d libssh -p1 < patch/$(git -C libssh describe).patch
- name: install build dependency
run: ./scripts/install-build-deps.sh
- name: save homebrew prefix
id: brew-prefix
run: echo "HOMEBREW_PREFIX=$(brew --prefix)" >> $GITHUB_OUTPUT
- name: Configure CMake
# Configure CMake in a 'build' subdirectory. `CMAKE_BUILD_TYPE` is only required if you are using a single-configuration generator such as make.
# See https://cmake.org/cmake/help/latest/variable/CMAKE_BUILD_TYPE.html?highlight=cmake_build_type
run: cmake -B ${{github.workspace}}/build -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} -DOPENSSL_ROOT_DIR=${{steps.brew-prefix.outputs.HOMEBREW_PREFIX}}/opt/openssl@3
- name: Build
# Build your program with the given configuration
run: cmake --build ${{github.workspace}}/build --config ${{env.BUILD_TYPE}}
- name: Run
run: ${{github.workspace}}/build/mscp -h
================================================
FILE: .github/workflows/build-ubuntu.yml
================================================
name: build on ubuntu
on:
push:
branches: [ "main", "dev" ]
pull_request:
branches: [ "main", "dev" ]
env:
# Customize the CMake build type here (Release, Debug, RelWithDebInfo, etc.)
BUILD_TYPE: Release
jobs:
build:
# The CMake configure and build commands are platform agnostic and should work equally well on Windows or Mac.
# You can convert this to a matrix build if you need cross-platform coverage.
# See: https://docs.github.com/en/free-pro-team@latest/actions/learn-github-actions/managing-complex-workflows#using-a-build-matrix
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: apply the patch to libssh
run: |
git -C libssh fetch --all --tags --prune
patch -d libssh -p1 < patch/$(git -C libssh describe).patch
- name: install build dependency
run: |
sudo apt-get update
sudo ./scripts/install-build-deps.sh
- name: Configure CMake
# Configure CMake in a 'build' subdirectory. `CMAKE_BUILD_TYPE` is only required if you are using a single-configuration generator such as make.
# See https://cmake.org/cmake/help/latest/variable/CMAKE_BUILD_TYPE.html?highlight=cmake_build_type
run: cmake -B ${{github.workspace}}/build -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}}
- name: Build
# Build your program with the given configuration
run: cmake --build ${{github.workspace}}/build --config ${{env.BUILD_TYPE}}
- name: Run
run: ${{github.workspace}}/build/mscp -h
================================================
FILE: .github/workflows/codeql.yml
================================================
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL"
on:
push:
branches: [ "main", "dev" ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ "main", "dev" ]
schedule:
- cron: '35 11 * * 5'
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
permissions:
actions: read
contents: read
security-events: write
strategy:
fail-fast: false
matrix:
language: [ 'cpp' ]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
# Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
submodules: true
- name: apply the patch to libssh
run: |
git -C libssh fetch --all --tags --prune
patch -d libssh -p1 < patch/$(git -C libssh describe).patch
- name: install build dependency
run: |
sudo apt-get update
sudo ./scripts/install-build-deps.sh
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v3
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
# queries: security-extended,security-and-quality
# Autobuild attempts to build any compiled languages (C/C++, C#, Go, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v3
# ℹ️ Command-line programs to run using the OS shell.
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
# If the Autobuild fails above, remove it and uncomment the following three lines.
# modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance.
# - run: |
# echo "Run, Build Application using script"
# ./location_of_script_within_repo/buildscript.sh
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v3
with:
category: "/language:${{matrix.language}}"
================================================
FILE: .github/workflows/release.yml
================================================
name: release
on:
push:
tags:
- "v*.*.*"
workflow_dispatch:
env:
BUILD_TYPE: Release
jobs:
source-release:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: apply the patch to libssh
run: |
git -C libssh fetch --all --tags --prune
patch -d libssh -p1 < patch/$(git -C libssh describe).patch
- name: Set variables
run: |
VER=$(cat VERSION)
echo "VERSION=$VER" >> $GITHUB_ENV
- name: archive
run: |
cd ..
cp -r mscp mscp-${{env.VERSION}}
tar zcvf mscp-${{env.VERSION}}.tar.gz --exclude-vcs mscp-${{env.VERSION}}
- name: Release
uses: softprops/action-gh-release@v1
with:
files: |
${{github.workspace}}/../mscp-${{env.VERSION}}.tar.gz
================================================
FILE: .github/workflows/test.yml
================================================
name: test
on:
push:
branches: [ "main", "dev" ]
pull_request:
branches: [ "main", "dev" ]
env:
BUILD_TYPE: Release
jobs:
test:
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
index: # see DIST_IDS and DIST_VERS lists in CMakeLists.txt
- ubuntu-20.04
- ubuntu-22.04
- ubuntu-24.04
- rocky-8.9
- rocky-9.3
- almalinux-9.3
- alpine-3.22
- arch-base
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: apply the patch to libssh
run: |
git -C libssh fetch --all --tags --prune
patch -d libssh -p1 < patch/$(git -C libssh describe).patch
# TODO: just building docker images does not require libssh. fix CMakeLists
- name: install build dependency
run: |
sudo apt-get update
sudo ./scripts/install-build-deps.sh
- name: configure CMake
run: cmake -B ${{github.workspace}}/build -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}}
- name: Build Containers
run: make -C ${{github.workspace}}/build docker-build-${{ matrix.index }}
- name: Run Test
run: make -C ${{github.workspace}}/build docker-test-${{ matrix.index }}
================================================
FILE: .gitignore
================================================
build
html
compile_commands.json
CMakeUserPresets.json
.*.swp
.cache
include/mscp_version.h
================================================
FILE: .gitmodules
================================================
[submodule "libssh"]
path = libssh
url = https://git.libssh.org/projects/libssh.git
ignore = dirty
================================================
FILE: CMakeLists.txt
================================================
cmake_minimum_required(VERSION 3.13)
file (STRINGS "VERSION" MSCP_VERSION)
project(mscp
VERSION ${MSCP_VERSION}
LANGUAGES C)
find_package(Git)
if (Git_FOUND)
# based on https://github.com/nocnokneo/cmake-git-versioning-example
execute_process(
COMMAND ${GIT_EXECUTABLE} describe --tags --match "v*"
OUTPUT_VARIABLE GIT_DESCRIBE_VERSION
RESULT_VARIABLE GIT_DESCRIBE_ERROR_CODE
OUTPUT_STRIP_TRAILING_WHITESPACE)
if(NOT GIT_DESCRIBE_ERROR_CODE)
set(MSCP_BUILD_VERSION ${GIT_DESCRIBE_VERSION})
endif()
endif()
if (NOT MSCP_BUILD_VERSION)
message(STATUS "Failed to determine version via Git. Use VERSION file instead.")
set(MSCP_BUILD_VERSION v${MSCP_VERSION})
endif()
include(GNUInstallDirs)
set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -DDEBUG")
list(APPEND CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/cmake/modules)
if(APPLE)
list(APPEND CMAKE_PREFIX_PATH /usr/local) # intel mac homebrew prefix
list(APPEND CMAKE_PREFIX_PATH /opt/homebrew) # arm mac homebrew prefix
endif() # APPLE
option(BUILD_CONAN OFF) # Build mscp with conan
if(BUILD_CONAN)
message(STATUS "Build mscp with conan")
endif()
option(BUILD_STATIC OFF) # Build mscp with -static LD flag
if (BUILD_STATIC)
message(STATUS "Build mscp with -static LD option")
if (NOT BUILD_CONAN)
message(WARNING
"BUILD_STATIC strongly recommended with BUILD_CONAN option")
endif()
endif()
option(USE_PODMAN OFF) # use podman instread of docker
if(USE_PODMAN)
message(STATUS "Use podman instead of docker")
set(CE podman) # CE means Container Engine
else()
set(CE docker)
endif()
# add libssh static library
set(CMAKE_POLICY_DEFAULT_CMP0077 NEW)
set(WITH_SERVER OFF)
set(BUILD_SHARED_LIBS OFF)
set(WITH_EXAMPLES OFF)
set(BUILD_STATIC_LIB ON)
if(BUILD_CONAN)
message(STATUS
"Disable libssh GSSAPI support because libkrb5 doesn't exist in conan")
set(WITH_GSSAPI OFF)
endif()
add_subdirectory(libssh EXCLUDE_FROM_ALL)
# setup mscp compile options
list(APPEND MSCP_COMPILE_OPTS -iquote ${CMAKE_CURRENT_BINARY_DIR}/libssh/include)
list(APPEND MSCP_BUILD_INCLUDE_DIRS
${mscp_SOURCE_DIR}/src
${CMAKE_CURRENT_BINARY_DIR}/libssh/include)
list(APPEND MSCP_LINK_LIBS ssh-static)
if(BUILD_CONAN)
find_package(ZLIB REQUIRED)
find_package(OpenSSL REQUIRED)
list(APPEND MSCP_LINK_LIBS ZLIB::ZLIB)
list(APPEND MSCP_LINK_LIBS OpenSSL::Crypto)
endif()
# Symbol check
check_symbol_exists(htonll arpa/inet.h HAVE_HTONLL)
check_symbol_exists(ntohll arpa/inet.h HAVE_NTOHLL)
check_symbol_exists(strlcat string.h HAVE_STRLCAT)
if (NOT HAVE_STRLCAT)
list(APPEND OPENBSD_COMPAT_SRC src/openbsd-compat/strlcat.c)
endif()
# generate config.h in build dir
configure_file(
${mscp_SOURCE_DIR}/include/config.h.in
${CMAKE_CURRENT_BINARY_DIR}/include/config.h)
list(APPEND MSCP_BUILD_INCLUDE_DIRS ${CMAKE_CURRENT_BINARY_DIR}/include)
# libmscp.a
set(LIBMSCP_SRC
src/mscp.c src/ssh.c src/fileops.c src/path.c src/checkpoint.c
src/bwlimit.c src/platform.c src/print.c src/pool.c src/strerrno.c
${OPENBSD_COMPAT_SRC})
add_library(mscp-static STATIC ${LIBMSCP_SRC})
target_include_directories(mscp-static
PRIVATE ${MSCP_BUILD_INCLUDE_DIRS} ${mscp_SOURCE_DIR}/include)
target_compile_options(mscp-static PRIVATE ${MSCP_COMPILE_OPTS})
target_link_libraries(mscp-static PRIVATE ${MSCP_LINK_LIBS})
set_target_properties(mscp-static
PROPERTIES
OUTPUT_NAME mscp)
# mscp executable
list(APPEND MSCP_LINK_LIBS m pthread)
add_executable(mscp src/main.c)
target_include_directories(mscp
PRIVATE ${MSCP_BUILD_INCLUDE_DIRS} ${mscp_SOURCE_DIR}/include)
target_link_libraries(mscp mscp-static ${MSCP_LINK_LIBS})
if (BUILD_STATIC)
target_link_options(mscp PRIVATE -static)
endif()
target_compile_options(mscp PRIVATE ${MSCP_COMPILE_OPTS})
install(TARGETS mscp RUNTIME DESTINATION bin)
# mscp manpage and document
configure_file(
${mscp_SOURCE_DIR}/doc/mscp.1.in
${PROJECT_BINARY_DIR}/mscp.1)
add_custom_target(update-rst
COMMENT "Update doc/mscp.rst from mscp.1.in"
WORKING_DIRECTORY ${PROJECT_BINARY_DIR}
COMMAND
pandoc -s -f man mscp.1 -t rst -o ${PROJECT_SOURCE_DIR}/doc/mscp.rst)
install(FILES ${PROJECT_BINARY_DIR}/mscp.1
DESTINATION ${CMAKE_INSTALL_MANDIR}/man1)
# Test
add_test(NAME pytest
COMMAND python3 -m pytest -v
--mscp-path=${PROJECT_BINARY_DIR}/mscp ${PROJECT_SOURCE_DIR}/test
WORKING_DIRECTORY ${PROJECT_BINARY_DIR})
enable_testing()
# Custom targets to build and test mscp in docker containers.
# foreach(IN ZIP_LISTS) (cmake >= 3.17) can shorten the following lists.
# However, ubuntu 20.04 has cmake 3.16.3. So this is a roundabout trick.
#
# When edit DIST_IDS and DIST_VERS, also edit .github/workflows/test.yaml
list(APPEND DIST_IDS ubuntu ubuntu ubuntu rocky rocky almalinux alpine arch)
list(APPEND DIST_VERS 20.04 22.04 24.04 8.9 9.3 9.3 3.22 base)
list(LENGTH DIST_IDS _DIST_LISTLEN)
math(EXPR DIST_LISTLEN "${_DIST_LISTLEN} - 1")
foreach(x RANGE ${DIST_LISTLEN})
list(GET DIST_IDS ${x} DIST_ID)
list(GET DIST_VERS ${x} DIST_VER)
set(DOCKER_IMAGE mscp-${DIST_ID}:${DIST_VER})
set(DOCKER_INDEX ${DIST_ID}-${DIST_VER})
execute_process(
COMMAND ${CMAKE_SOURCE_DIR}/scripts/install-build-deps.sh
--dont-install --platform Linux-${DIST_ID}
OUTPUT_VARIABLE REQUIREDPKGS
OUTPUT_STRIP_TRAILING_WHITESPACE)
add_custom_target(docker-build-${DOCKER_INDEX}
COMMENT "Build mscp in ${DOCKER_IMAGE} container"
WORKING_DIRECTORY ${mscp_SOURCE_DIR}
COMMAND
${CE} build --build-arg REQUIREDPKGS=${REQUIREDPKGS}
-t ${DOCKER_IMAGE} -f Dockerfile/${DOCKER_INDEX}.Dockerfile .)
add_custom_target(docker-build-${DOCKER_INDEX}-no-cache
COMMENT "Build mscp in ${DOCKER_IMAGE} container"
WORKING_DIRECTORY ${mscp_SOURCE_DIR}
COMMAND
${CE} build --build-arg REQUIREDPKGS=${REQUIREDPKGS} --no-cache
-t ${DOCKER_IMAGE} -f Dockerfile/${DOCKER_INDEX}.Dockerfile .)
add_custom_target(docker-test-${DOCKER_INDEX}
COMMENT "Test mscp in ${DOCKER_IMAGE} container"
WORKING_DIRECTORY ${CMAKE_BINARY_DIR}
COMMAND
${CE} run --init --rm --privileged
--sysctl net.ipv6.conf.all.disable_ipv6=0
--add-host=ip6-localhost:::1
${DOCKER_IMAGE} /mscp/scripts/test-in-container.sh)
add_custom_target(docker-run-${DOCKER_INDEX}
COMMENT "Start ${DOCKER_IMAGE} container"
WORKING_DIRECTORY ${CMAKE_BINARY_DIR}
COMMAND
${CE} run --init --rm --privileged
--sysctl net.ipv6.conf.all.disable_ipv6=0
--add-host=ip6-localhost:::1
-it
${DOCKER_IMAGE} /mscp/scripts/test-in-container.sh bash)
list(APPEND DOCKER_BUILDS docker-build-${DOCKER_INDEX})
list(APPEND DOCKER_BUILDS_NO_CACHE docker-build-${DOCKER_INDEX}-no-cache)
list(APPEND DOCKER_TESTS docker-test-${DOCKER_INDEX})
endforeach()
add_custom_target(docker-build-all DEPENDS ${DOCKER_BUILDS})
add_custom_target(docker-build-all-no-cache DEPENDS ${DOCKER_BUILDS_NO_CACHE})
add_custom_target(docker-test-all DEPENDS ${DOCKER_TESTS})
### debuild-related definitions
set(DEBBUILDCONTAINER mscp-build-deb)
execute_process(
COMMAND ${CMAKE_SOURCE_DIR}/scripts/install-build-deps.sh
--dont-install --platform Linux-ubuntu
OUTPUT_VARIABLE REQUIREDPKGS_DEB
OUTPUT_STRIP_TRAILING_WHITESPACE)
add_custom_target(build-deb
COMMENT "build mscp deb files inside a container"
WORKING_DIRECTORY ${mscp_SOURCE_DIR}
BYPRODUCTS ${CMAKE_BINARY_DIR}/debbuild
COMMAND
${CE} build --build-arg REQUIREDPKGS=${REQUIREDPKGS_DEB}
-t ${DEBBUILDCONTAINER} -f Dockerfile/build-deb.Dockerfile .
COMMAND
${CE} run --rm -v ${CMAKE_BINARY_DIR}:/out ${DEBBUILDCONTAINER}
cp -r /debbuild /out/)
### rpmbuild-related definitions
# generate files for rpmbuild
configure_file(
${mscp_SOURCE_DIR}/rpm/mscp.spec.in
${mscp_SOURCE_DIR}/rpm/mscp.spec
@ONLY)
#configure_file(
# ${mscp_SOURCE_DIR}/Dockerfile/build-srpm.Dockerfile.in
# ${mscp_SOURCE_DIR}/Dockerfile/build-srpm.Dockerfile
# @ONLY)
# Custom target to build mscp as a src.rpm in docker.
set(RPMBUILDCONTAINER mscp-build-srpm)
execute_process(
COMMAND ${CMAKE_SOURCE_DIR}/scripts/install-build-deps.sh
--dont-install --platform Linux-rocky
OUTPUT_VARIABLE REQUIREDPKGS_RPM
OUTPUT_STRIP_TRAILING_WHITESPACE)
add_custom_target(build-srpm
COMMENT "Build mscp src.rpm inside a container"
WORKING_DIRECTORY ${mscp_SOURCE_DIR}
COMMAND
${CE} build --build-arg REQUIREDPKGS=${REQUIREDPKGS_RPM}
--build-arg MSCP_VERSION=${MSCP_VERSION}
-t ${RPMBUILDCONTAINER} -f Dockerfile/build-srpm.Dockerfile .
COMMAND
${CE} run --rm -v ${CMAKE_BINARY_DIR}:/out ${RPMBUILDCONTAINER}
bash -c "cp /root/rpmbuild/SRPMS/mscp-*.src.rpm /out/")
### single-binary-build-related definitions
# Custom target to get single binary mscp
set(SINGLEBINARYFILE mscp.linux.${CMAKE_SYSTEM_PROCESSOR}.static)
add_custom_target(build-single-binary
COMMENT "Build mscp as a single binary in alpine conatiner"
WORKING_DIRECTORY ${mscp_SOURCE_DIR}
BYPRODUCTS ${CMAKE_BINARY_DIR}/${SINGLEBINARYFILE}
DEPENDS docker-build-alpine-3.22
COMMAND
${CE} run --rm -v ${CMAKE_BINARY_DIR}:/out mscp-alpine:3.22
cp /mscp/build/mscp /out/${SINGLEBINARYFILE})
add_custom_target(build-pkg-all
DEPENDS build-deb build-srpm build-single-binary)
================================================
FILE: Dockerfile/README.md
================================================
Dockerfiles for building and testing mscp.
cmake provides custom targets to build and test mscp in the containers
See `make docker-*` targets. `make docker-build-all` builds all
container images, and `make docker-test-all` runs the test in all
container images.
================================================
FILE: Dockerfile/almalinux-9.3.Dockerfile
================================================
FROM almalinux:9.3
ARG REQUIREDPKGS
# install pytest, sshd for test, and rpm-build
RUN set -ex && yum -y install \
${REQUIREDPKGS} python3 python3-pip python3-devel \
openssh openssh-server openssh-clients rpm-build
RUN python3 -m pip install pytest
# preparation for sshd
RUN mkdir /var/run/sshd \
&& ssh-keygen -A \
&& ssh-keygen -f /root/.ssh/id_rsa -N "" \
&& cat /root/.ssh/id_rsa.pub > /root/.ssh/authorized_keys
# create test user
RUN useradd -m -d /home/test test \
&& echo "test:userpassword" | chpasswd \
&& mkdir -p /home/test/.ssh \
&& ssh-keygen -f /home/test/.ssh/id_rsa_test -N "keypassphrase" \
&& cat /home/test/.ssh/id_rsa_test.pub >> /home/test/.ssh/authorized_keys \
&& chown -R test:test /home/test \
&& chown -R test:test /home/test/.ssh
RUN rm -rf /run/nologin
ARG mscpdir="/mscp"
COPY . ${mscpdir}
# build
RUN cd ${mscpdir} \
&& rm -rf build \
&& cmake -B build \
&& cd ${mscpdir}/build \
&& make -j 2 \
&& make install
================================================
FILE: Dockerfile/alpine-3.22.Dockerfile
================================================
FROM alpine:3.22
# do not use REQUIREDPKGS build argument because
# this Dockerfile compiles mscp with conan,so we do not need
# libssl-dev and zlib-dev
# Build mscp with conan to create single binary mscp
RUN apk add --no-cache \
gcc make cmake libc-dev \
linux-headers openssh bash perl \
python3 py3-pip python3-dev py3-pytest g++
RUN pip3 install --break-system-packages conan
# preparation for sshd
RUN ssh-keygen -A \
&& mkdir /var/run/sshd \
&& ssh-keygen -f /root/.ssh/id_rsa -N "" \
&& cat /root/.ssh/id_rsa.pub > /root/.ssh/authorized_keys
# disable PerSourcePenaltie, which would distrub test:
# https://undeadly.org/cgi?action=article;sid=20240607042157
RUN echo "PerSourcePenalties=no" > /etc/ssh/sshd_config.d/90-mscp-test.conf
# create test user
RUN addgroup -S test \
&& adduser -S test -G test \
&& echo "test:userpassword" | chpasswd \
&& mkdir -p /home/test/.ssh \
&& ssh-keygen -f /home/test/.ssh/id_rsa_test -N "keypassphrase" \
&& cat /home/test/.ssh/id_rsa_test.pub >> /home/test/.ssh/authorized_keys \
&& chown -R test:test /home/test \
&& chown -R test:test /home/test/.ssh
# Build mscp as a single binary
RUN conan profile detect --force
ARG mscpdir="/mscp"
COPY . ${mscpdir}
RUN cd ${mscpdir} \
&& rm -rf build \
&& conan install . --output-folder=build --build=missing \
&& cd ${mscpdir}/build \
&& cmake .. \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_TOOLCHAIN_FILE=conan_toolchain.cmake \
-DBUILD_CONAN=ON -DBUILD_STATIC=ON \
&& make -j 2 \
&& make install
================================================
FILE: Dockerfile/arch-base.Dockerfile
================================================
FROM archlinux:base
ARG REQUIREDPKGS
# install pyest and openssh for test
RUN set -ex && pacman -Syy && pacman --noconfirm -S ${REQUIREDPKGS} openssh python-pytest
RUN mkdir /var/run/sshd \
&& ssh-keygen -A \
&& ssh-keygen -f /root/.ssh/id_rsa -N "" \
&& cat /root/.ssh/id_rsa.pub > /root/.ssh/authorized_keys
# disable PerSourcePenaltie, which would distrub test:
# https://undeadly.org/cgi?action=article;sid=20240607042157
RUN echo "PerSourcePenalties=no" > /etc/ssh/sshd_config.d/90-mscp-test.conf
# create test user
RUN useradd -m -d /home/test test \
&& echo "test:userpassword" | chpasswd \
&& mkdir -p /home/test/.ssh \
&& ssh-keygen -f /home/test/.ssh/id_rsa_test -N "keypassphrase" \
&& cat /home/test/.ssh/id_rsa_test.pub >> /home/test/.ssh/authorized_keys \
&& chown -R test:test /home/test \
&& chown -R test:test /home/test/.ssh
ARG mscpdir="/mscp"
COPY . ${mscpdir}
# build
RUN cd ${mscpdir} \
&& rm -rf build \
&& cmake -B build \
&& cd ${mscpdir}/build \
&& make -j 2 \
&& make install
================================================
FILE: Dockerfile/build-deb.Dockerfile
================================================
FROM ubuntu:22.04
ARG REQUIREDPKGS
ARG DEBIAN_FRONTEND=noninteractive
RUN set -ex && apt-get update && apt-get install -y --no-install-recommends \
${REQUIREDPKGS} ca-certificates \
build-essential devscripts debhelper gcc make cmake
ARG mscpdir="/debbuild/mscp"
COPY . ${mscpdir}
# build
RUN cd ${mscpdir} \
&& debuild -us -uc -S \
&& mv ${mscpdir} /
# Then all debuild output files exsit at /debbuild
================================================
FILE: Dockerfile/build-srpm.Dockerfile
================================================
FROM rockylinux:9
ARG REQUIREDPKGS
ARG MSCP_VERSION
# install pytest, sshd for test, and rpm-build
RUN set -ex && yum -y install ${REQUIREDPKGS} rpm-build rpmdevtools
ARG mscpdir="/mscp-${MSCP_VERSION}"
ARG mscptgz="mscp-${MSCP_VERSION}.tar.gz"
COPY . ${mscpdir}
# prepare rpmbuild
RUN rpmdev-setuptree \
&& rm -rf ${mscpdir}/build \
&& tar zcvf /${mscptgz} --exclude-vcs ${mscpdir} \
&& cp /${mscptgz} ~/rpmbuild/SOURCES/ \
&& cp ${mscpdir}/rpm/mscp.spec ~/rpmbuild/SPECS/
# build rpm and src.rpm
RUN rpmbuild -ba ~/rpmbuild/SPECS/mscp.spec
================================================
FILE: Dockerfile/rocky-8.9.Dockerfile
================================================
FROM rockylinux:8.9
ARG REQUIREDPKGS
# install pytest, sshd for test, and rpm-build
RUN set -ex && yum -y install \
${REQUIREDPKGS} \
python3 python3-pip python3-devel \
openssh openssh-server openssh-clients rpm-build
RUN python3 -m pip install pytest
# preparation for sshd
RUN mkdir /var/run/sshd \
&& ssh-keygen -A \
&& ssh-keygen -f /root/.ssh/id_rsa -N "" \
&& cat /root/.ssh/id_rsa.pub > /root/.ssh/authorized_keys
# create test user
RUN useradd -m -d /home/test test \
&& echo "test:userpassword" | chpasswd \
&& mkdir -p /home/test/.ssh \
&& ssh-keygen -f /home/test/.ssh/id_rsa_test -N "keypassphrase" \
&& cat /home/test/.ssh/id_rsa_test.pub >> /home/test/.ssh/authorized_keys \
&& chown -R test:test /home/test \
&& chown -R test:test /home/test/.ssh
RUN rm -rf /run/nologin
ARG mscpdir="/mscp"
COPY . ${mscpdir}
# build
RUN cd ${mscpdir} \
&& rm -rf build \
&& cmake -B build \
&& cd ${mscpdir}/build \
&& make -j 2 \
&& make install
================================================
FILE: Dockerfile/rocky-9.3.Dockerfile
================================================
FROM rockylinux:9.3
ARG REQUIREDPKGS
# install pytest, sshd for test, and rpm-build
RUN set -ex && yum -y install \
${REQUIREDPKGS} \
python3 python3-pip python3-devel \
openssh openssh-server openssh-clients rpm-build
RUN python3 -m pip install pytest
# preparation for sshd
RUN mkdir /var/run/sshd \
&& ssh-keygen -A \
&& ssh-keygen -f /root/.ssh/id_rsa -N "" \
&& cat /root/.ssh/id_rsa.pub > /root/.ssh/authorized_keys
# create test user
RUN useradd -m -d /home/test test \
&& echo "test:userpassword" | chpasswd \
&& mkdir -p /home/test/.ssh \
&& ssh-keygen -f /home/test/.ssh/id_rsa_test -N "keypassphrase" \
&& cat /home/test/.ssh/id_rsa_test.pub >> /home/test/.ssh/authorized_keys \
&& chown -R test:test /home/test \
&& chown -R test:test /home/test/.ssh
RUN rm -rf /run/nologin
ARG mscpdir="/mscp"
COPY . ${mscpdir}
# build
RUN cd ${mscpdir} \
&& rm -rf build \
&& cmake -B build \
&& cd ${mscpdir}/build \
&& make -j 2 \
&& make install
================================================
FILE: Dockerfile/ubuntu-20.04.Dockerfile
================================================
FROM ubuntu:20.04
ARG REQUIREDPKGS
ARG DEBIAN_FRONTEND=noninteractive
RUN set -ex && apt-get update && apt-get install -y --no-install-recommends \
${REQUIREDPKGS} ca-certificates python3 python3-pip python3-dev openssh-server
RUN python3 -m pip install pytest
# preparation for sshd
RUN mkdir /var/run/sshd \
&& ssh-keygen -A \
&& ssh-keygen -f /root/.ssh/id_rsa -N "" \
&& cat /root/.ssh/id_rsa.pub > /root/.ssh/authorized_keys
# create test user
RUN useradd -m -d /home/test test \
&& echo "test:userpassword" | chpasswd \
&& mkdir -p /home/test/.ssh \
&& ssh-keygen -f /home/test/.ssh/id_rsa_test -N "keypassphrase" \
&& cat /home/test/.ssh/id_rsa_test.pub >> /home/test/.ssh/authorized_keys \
&& chown -R test:test /home/test \
&& chown -R test:test /home/test/.ssh
ARG mscpdir="/mscp"
COPY . ${mscpdir}
# build
RUN cd ${mscpdir} \
&& rm -rf build \
&& cmake -B build \
&& cd ${mscpdir}/build \
&& make -j 2 \
&& make install
================================================
FILE: Dockerfile/ubuntu-22.04.Dockerfile
================================================
FROM ubuntu:22.04
ARG REQUIREDPKGS
ARG DEBIAN_FRONTEND=noninteractive
RUN set -ex && apt-get update && apt-get install -y --no-install-recommends \
${REQUIREDPKGS} ca-certificates python3 python3-pip python3-dev openssh-server
RUN python3 -m pip install pytest
# preparation for sshd
RUN mkdir /var/run/sshd \
&& ssh-keygen -A \
&& ssh-keygen -f /root/.ssh/id_rsa -N "" \
&& cat /root/.ssh/id_rsa.pub > /root/.ssh/authorized_keys
# create test user
RUN useradd -m -d /home/test test \
&& echo "test:userpassword" | chpasswd \
&& mkdir -p /home/test/.ssh \
&& ssh-keygen -f /home/test/.ssh/id_rsa_test -N "keypassphrase" \
&& cat /home/test/.ssh/id_rsa_test.pub >> /home/test/.ssh/authorized_keys \
&& chown -R test:test /home/test \
&& chown -R test:test /home/test/.ssh
ARG mscpdir="/mscp"
COPY . ${mscpdir}
# build
RUN cd ${mscpdir} \
&& rm -rf build \
&& cmake -B build \
&& cd ${mscpdir}/build \
&& make -j 2 \
&& make install
================================================
FILE: Dockerfile/ubuntu-24.04.Dockerfile
================================================
FROM ubuntu:24.04
ARG REQUIREDPKGS
ARG DEBIAN_FRONTEND=noninteractive
RUN set -ex && apt-get update && apt-get install -y --no-install-recommends \
${REQUIREDPKGS} ca-certificates openssh-server vim-tiny \
python3 python3-pip python3-dev python3-pytest
# preparation for sshd
RUN mkdir /var/run/sshd \
&& ssh-keygen -A \
&& ssh-keygen -f /root/.ssh/id_rsa -N "" \
&& cat /root/.ssh/id_rsa.pub > /root/.ssh/authorized_keys
# create test user
RUN useradd -m -d /home/test test \
&& echo "test:userpassword" | chpasswd \
&& mkdir -p /home/test/.ssh \
&& ssh-keygen -f /home/test/.ssh/id_rsa_test -N "keypassphrase" \
&& cat /home/test/.ssh/id_rsa_test.pub >> /home/test/.ssh/authorized_keys \
&& chown -R test:test /home/test \
&& chown -R test:test /home/test/.ssh
ARG mscpdir="/mscp"
COPY . ${mscpdir}
# build
RUN cd ${mscpdir} \
&& rm -rf build \
&& cmake -B build \
&& cd ${mscpdir}/build \
&& make -j 2 \
&& make install
================================================
FILE: Doxyfile
================================================
PROJECT_NAME = libmscp
GENERATE_HTML = YES
GENERATE_CHI = NO
GENERATE_LATEX = NO
GENERATE_RTF = NO
GENERATE_MAN = NO
SOURCE_BROWSER = YES
INPUT = src include
================================================
FILE: LICENSE
================================================
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc.
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
Copyright (C)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see .
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
Copyright (C)
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
.
================================================
FILE: README.md
================================================
# mscp: multi-threaded scp
[](https://github.com/upa/mscp/actions/workflows/build-ubuntu.yml)
[](https://github.com/upa/mscp/actions/workflows/build-macos.yml)
[](https://github.com/upa/mscp/actions/workflows/build-freebsd.yml)
[](https://github.com/upa/mscp/actions/workflows/test.yml)
`mscp`, a variant of `scp`, copies files over multiple SSH (SFTP)
connections by multiple threads. It enables transferring (1) multiple
files simultaneously and (2) a large file in parallel, reducing the
transfer time for a lot of/large files over networks.
You can use `mscp` like `scp`, for example:
```shell-session
$ mscp srcfile user@example.com:dstfile
```
Remote hosts only need to run standard `sshd` supporting the SFTP
subsystem (e.g. openssh-server), and you need to be able to ssh to the
hosts as usual. `mscp` does not require anything else.
https://github.com/upa/mscp/assets/184632/19230f57-be7f-4ef0-98dd-cb4c460f570d
--------------------------------------------------------------------
Major differences from `scp` on usage:
- Remote-to-remote copy is not supported.
- `-r` option is not needed to transfer directories.
- Checkpointing for resuming failed transfer is supported.
- and any other differences I have not implemented and noticed.
Paper:
- Ryo Nakamura and Yohei Kuga. 2023. Multi-threaded scp: Easy and Fast File Transfer over SSH. In Practice and Experience in Advanced Research Computing (PEARC '23). Association for Computing Machinery, New York, NY, USA, 320–323. https://doi.org/10.1145/3569951.3597582
## Install
- macOS
```console
# Homebrew
brew install upa/tap/mscp
# MacPorts
sudo port install mscp
```
- Ubuntu
```console
sudo add-apt-repository ppa:upaa/mscp
sudo apt-get install mscp
```
- RHEL-based distributions
```console
sudo dnf copr enable upaaa/mscp
sudo dnf install mscp
```
## Build
mscp depends on a patched [libssh](https://www.libssh.org/). The
patch introduces asynchronous SFTP Write, which is derived from
https://github.com/limes-datentechnik-gmbh/libssh (see [Re: SFTP Write
async](https://archive.libssh.org/libssh/2020-06/0000004.html)).
We test building mscp on Linux (Ubuntu, Rocky, Alma, and Alpine),
macOS, and FreeBSD.
```console
# clone this repository
git clone https://github.com/upa/mscp.git
cd mscp
# prepare patched libssh
git submodule update --init
patch -d libssh -p1 < patch/$(git -C libssh describe).patch
# install build dependency
bash ./scripts/install-build-deps.sh
# configure mscp
mkdir build && cd build
cmake ..
# in macOS, you may need OPENSSL_ROOT_DIR for cmake:
# cmake .. -DOPENSSL_ROOT_DIR=$(brew --prefix)/opt/openssl@3
# build
make
# install the mscp binary to CMAKE_INSTALL_PREFIX/bin (usually /usr/local/bin)
make install
```
Source tar balls (`mscp-X.X.X.tar.gz`, not `Source code`) in
[Releases page](https://github.com/upa/mscp/releases) contain the patched version
of libssh. So you can start from cmake with it.
## Documentation
[manpage](/doc/mscp.rst) is available.
================================================
FILE: VERSION
================================================
0.2.4
================================================
FILE: conanfile.txt
================================================
[requires]
zlib/1.2.11
openssl/1.1.1t
[generators]
CMakeDeps
CMakeToolchain
================================================
FILE: doc/RELEASE.md
================================================
## Build mscp as deb package
`make build-deb` produces a mscp deb package and related files. This
target builds mscp with `debuild` inside a docker container
(Dockerfile is `docker/build-deb.Docerfile`).
```console
mkdir build && cd build && cmake ..
make build-deb
```
After that:
```console
$ ls debbuild
mscp_0.1.4.dsc mscp_0.1.4_source.buildinfo mscp_0.1.4.tar.xz
mscp_0.1.4_source.build mscp_0.1.4_source.changes
```
### To publush mscp in launchpad PPA:
1. write changes in `debian/changelog` at main branch (the date
command needed here is `date -R`)
2. switch to `ppa-focal` or `ppa-jammy` branch
3. rebase to the `main` branch and modify `debian/changes`:
* change `mscp (X.X.X) UNRELEASED;` to `mscp (X.X.X-1~RELEASENAME) RELEASENAME;`
where `RELEASENAME` is `focal` or `jammy`.
4. run `make build-deb` at the build directory and `cd debbuild`
5. sign the files with `debsign -k [GPGKEYID] mscp_X.X.X~X_source.changes`
5. upload the files with `dput ppa:upaa/mscp mscp_X.X.X~X_source.changes`
## Build mscp as (source) rpm package
`make build-srpm` produces a mscp src.rpm package. This target builts
mscp with `rpmbuild` inside a docker container (Dockerfile is
`docker/build-srpm.Dockerfile`, generated from
`build-srpm.Dockerfile.in` by cmake).
```console
mkdir build && cd build && cmake ..
make build-srpm
```
After that:
```console
$ ls *.rpm
mscp-0.1.3-1.el9.src.rpm
```
### To publish mscp in COPR:
1. update `rpm/mscp.spec.in`, the `changelog` section (the date
command needed here is `date "+%a %b %d %Y"`)
2. run `make build-srpm`
3. download `mscp-X.X.X-1.yyy.src.rpm`
4. upload the src.rpm to Build page at COPR.
## Update Document
The docuemnt is `doc/mscp.rst` (at present). When `mscp.1.in` is
modified, run `make update-rst` to make it up to date.
```console
mkdir build cd build && cmake ..
make update-rst
```
================================================
FILE: doc/mscp.1.in
================================================
.TH MSCP 1 "@MSCP_BUILD_VERSION@" "mscp" "User Commands"
.SH NAME
mscp \- copy files over multiple SSH connections
.SH SYNOPSIS
.B mscp
.RB [ \-46vqDpdNh ]
[\c
.BI \-n \ NR_CONNECTIONS\c
]
[\c
.BI \-m \ COREMASK\c
]
[\c
.BI \-u \ MAX_STARTUPS\c
]
[\c
.BI \-I \ INTERVAL\c
]
[\c
.BI \-W \ CHECKPOINT\c
]
[\c
.BI \-R \ CHECKPOINT\c
]
[\c
.BI \-s \ MIN_CHUNK_SIZE\c
]
[\c
.BI \-S \ MAX_CHUNK_SIZE\c
]
[\c
.BI \-a \ NR_AHEAD\c
]
[\c
.BI \-b \ BUF_SIZE\c
]
[\c
.BI \-L \ LIMIT_BITRATE\c
]
[\c
.BI \-l \ LOGIN_NAME\c
]
[\c
.BI \-P \ PORT\c
]
[\c
.BI \-F \ SSH_CONFIG\c
]
[\c
.BI \-o \ SSH_OPTION\c
]
[\c
.BI \-i \ IDENTITY\c
]
[\c
.BI \-J \ DESTINATION\c
]
[\c
.BI \-c \ CIPHER\c
]
[\c
.BI \-M \ HMAC\c
]
[\c
.BI \-C \ COMPRESS\c
]
[\c
.BI \-g \ CONGESTION\c
]
.I source ... target
.SH DESCRIPTION
.PP
.B mscp
copies files over multiple SSH (SFTP) connections by multiple
threads. It enables transferring (1) multiple files simultaneously and
(2) a large file in parallel, reducing the transfer time for a lot
of/large files over networks.
.PP
The usage of
.B mscp
follows the
.B scp
command of
.I OpenSSH,
for example:
.nf
$ mscp srcfile user@example.com:dstfile
.fi
Remote hosts only need to run standard
.B sshd
supporting the SFTP subsystem, and users need to be able to
.B ssh
to the hosts as usual.
.B mscp
does not require anything else.
.PP
.B mscp
uses
.UR https://\:www\:.libssh\:.org
libssh
.UE
as its SSH implementation. Thus, supported SSH features, for example,
authentication, encryption, and various options in ssh_config, follow
what
.I libssh
supports.
.SH OPTIONS
.TP
.B \-n \fINR_CONNECTIONS\fR
Specifies the number of SSH connections. The default value is
calculated from the number of CPU cores on the host with the following
formula: floor(log(nr_cores)*2)+1.
.TP
.B \-m \fICOREMASK\fR
Configures CPU cores to be used by the hexadecimal bitmask. For
example, -m 0x25 pins threads onto CPU cores 0, 2, and 5. The default
value is not specified: all CPU cores are used and no threads are
pinned to any cores.
.TP
.B \-u \fIMAX_STARTUPS\fR
Specifies the number of concurrent unauthenticated SSH connection
attempts.
.B sshd
limits the number of simultaneous SSH connection attempts by
.I MaxStartups
in
.I sshd_config.
The default
.I MaxStartups
is 10; thus, we set the default MAX_STARTUPS 8.
.TP
.B \-I \fIINTERVAL\fR
Specifies the interval (in seconds) between SSH connection
attempts. Some firewall products treat SSH connection attempts from a
single source IP address for a short period as a brute force attack.
This option inserts intervals between the attempts to avoid being
determined as an attack. The default value is 0.
.TP
.B \-W \fICHECKPOINT\fR
Specifies a checkpoint file to save the state of a failed
transfer. When transferring fails due to, for example, connection
disruption or user interrupt,
.B mscp
writes the information about the remaining files and chunks to the
specified checkpoint file.
.B \-W
option with
.B \-D
(dry-run mode) only writes a checkpoint file and exits.
.TP
.B \-R \fICHECKPOINT\fR
Specifies a checkpoint file to resume a transfer. When a checkpoint
file is passed,
.B mscp
reads the checkpoint to load a remote host, copy direction, and files
and their chunks to be transferred. Namely,
.B mscp
can resume a past failed transfer from the checkpoint. Resuming with a
checkpoint does not require
.I source ... target
arguments. Other SSH connection options, such as port number and
config file, should be specified as with the failed run. In addition,
checkpoint files have file paths as relative paths. Thus, you must run
.B mscp
in the same working directory as the failed run. You can see the
contents of a checkpoint file with the
.B mscp \-vv \-D \-R CHECKPOINT
command (Dry-run mode). Note that the checkpoint file is not
automatically removed after the resumed transfer ends
successfully. Users should check the return value of
.B mscp
and remove the checkpoint if it returns 0.
.TP
.B \-s \fIMIN_CHUNK_SIZE\fR
Specifies the minimum chunk size.
.B mscp
divides a single file into chunks and copies the chunks in
parallel. The default value is 16M bytes.
.TP
.B \-S \fIMAX_CHUNK_SIZE\fR
Specifies the maximum chunk size. The default is file size divided by
the number of connections and devided by 4. If the calculated value
is smarller than the
.B MIN_CHUNK_SIZE
value,
MIN_CHUNK_SIZE is used.
.TP
.B \-a \fINR_AHEAD\fR
Specifies the number of inflight SFTP commands. The default value is
32.
.TP
.B \-b \fIBUF_SIZE\fR
Specifies the buffer size for I/O and transfer over SFTP. The default
value is 16384. Note that the SSH specification restricts buffer size
delivered over SSH. Changing this value is not recommended at present.
.TP
.B \-L \fILIMIT_BITRATE\fR
Limits the bitrate, specified with k (K), m (M), and g (G), e.g., 100m
indicates 100 Mbps.
.TP
.B \-4
Uses IPv4 addresses only.
.TP
.B \-6
Uses IPv6 addresses only.
.TP
.B \-v
Increments the verbose output level.
.TP
.B \-q
Quiet mode: turns off all outputs.
.TP
.B \-D
Dry-run mode: it scans source files to be copied, calculates chunks,
resolves destination file paths, and exits. Dry-run mode with
.B -vv
option can confirm files to be copied and their destination paths.
.TP
.B \-r
No effect.
.B mscp
copies recursively if a source path is a directory. This option exists
for just compatibility.
.TP
.B \-l \fILOGIN_NAME\fR
Specifies the username to log in on the remote machine as with
.I ssh(1).
.TP
.B \-P \fIPORT\fR
Specifies the port number to connect to on the remote machine as with
.I scp(1).
.TP
.B \-F \fISSH_CONFIG\fR
Specifies an alternative per-user ssh configuration file. Note that
acceptable options in the configuration file are what
.I libssh
supports.
.TP
.B \-o \fISSH_OPTION\fR
Specifies ssh options in the format used in ssh_config. Note that
acceptable options are what
.I libssh
supports.
.TP
.B \-i \fIIDENTITY\fR
Specifies the identity file for public key authentication.
.TP
.B \-J \fIDESTINATION\fR
A shortcut to define a
.B ProxyJump
configuration directive. Each SFTP session of
.B mscp
connects to the target host by first making an
.B ssh
connection to the jump host described by
.I destination.
.TP
.B \-c \fICIPHER\fR
Selects the cipher to use for encrypting the data transfer. See
.B mscp -h
or
.B Ciphers
in
.UR https://\:www\:.libssh\:.org/\:features/
libssh features
.UE .
.TP
.B \-M \fIHMAC\fR
Specifies MAC hash algorithms. See
.B mscp -h
or
.B MAC hashes
in
.UR https://\:www\:.libssh\:.org/\:features/
libssh features
.UE .
.TP
.B \-C \fICOMPRESS\fR
Enables compression: yes, no, zlib, zlib@openssh.com. The default is
none. See
.UR https://\:www\:.libssh\:.org/\:features/
libssh features
.UE .
.TP
.B \-g \fICONGESTION\fR
Specifies the TCP congestion control algorithm to use (Linux only).
See
.B sysctl net.ipv4.tcp_allowed_congestion_control
for available values.
.TP
.B \-p
Preserves modification times and access times (file mode bits are
preserved by default).
.TP
.B \-d
Increments the ssh debug output level.
.TP
.B \-N
Enables Nagle's algorithm. It is disabled by default.
.TP
.B \-h
Prints help.
.SH EXIT STATUS
Exit status is 0 on success, and >0 if an error occurs.
.SH ENVIRONMENT
.PP
.B mscp
recognizes the following environment variables.
.TP
.B MSCP_SSH_AUTH_PASSWORD
This environment variable passes a password for password
authentication to establish SSH connections.
.TP
.B MSCP_SSH_AUTH_PASSPHRASE
This environment variable passes a passphrase for public-key
authentication for establishing SSH connections.
.SH NOTES
.PP
.B mscp
uses glob(3) for globbing pathnames, including matching patterns for
local and remote paths. However, globbing on the
.I remote
side does not work with musl libc (used in Alpine Linux and the
single-binary version of mscp) because musl libc does not support
GLOB_ALTDIRFUNC.
.PP
.B mscp
does not support remote-to-remote copy, which
.B scp
supports.
.SH EXAMPLES
.PP
Copy a local file to a remote host with different name:
.nf
$ mscp ~/src-file 10.0.0.1:copied-file
.fi
.PP
Copy a local file and a directory to /tmp at a remote host:
.nf
$ mscp ~/src-file dir1 10.0.0.1:/tmp
.fi
.PP
Save a checkpoint if transfer fails:
.nf
$ mscp -W mscp.checkpoint many-large-files 10.0.0.1:dst/
.fi
.PP
Check the remaining files and chunks, and resume the failed transfer:
.nf
# Dump the content of a checkpoint and exit (dry-run mode)
$ mscp -vv -D -R mscp.checkpoint
# resume transferring from the checkpoint
$ mscp -R mscp.checkpoint
.fi
.PP
In a long fat network, following options might improve performance:
.nf
$ mscp -n 64 -m 0xffff -a 64 -c aes128-gcm@openssh.com src 10.0.0.1:
.fi
.B -n
increases the number of SSH connections than default,
.B -m
pins threads to specific CPU cores,
.B -a
increases asynchronous inflight SFTP WRITE/READ commands, and
.B -c aes128-gcm@openssh.com
will be faster than the default chacha20-poly1305 cipher, particularly
on hosts that support AES-NI.
.SH "SEE ALSO"
.BR scp (1),
.BR ssh (1),
.BR sshd (8).
.SH "PAPER REFERENCE"
Ryo Nakamura and Yohei Kuga. 2023. Multi-threaded scp: Easy and Fast
File Transfer over SSH. In Practice and Experience in Advanced
Research Computing (PEARC '23). Association for Computing Machinery,
New York, NY, USA, 320–323.
.UR https://\:doi\:.org/\:10.1145/\:3569951.3597582
DOI
.UE .
.SH CONTACT INFORMATION
.PP
For patches, bug reports, or feature requests, please open an issue on
.UR https://\:github\:.com/\:upa/\:mscp
GitHub
.UE .
.SH AUTHORS
Ryo Nakamura
================================================
FILE: doc/mscp.rst
================================================
====
MSCP
====
:Date: v0.2.4
NAME
====
mscp - copy files over multiple SSH connections
SYNOPSIS
========
**mscp** [**-46vqDpdNh**] [ **-n** *NR_CONNECTIONS* ] [ **-m**
*COREMASK* ] [ **-u** *MAX_STARTUPS* ] [ **-I** *INTERVAL* ] [ **-W**
*CHECKPOINT* ] [ **-R** *CHECKPOINT* ] [ **-s** *MIN_CHUNK_SIZE* ] [
**-S** *MAX_CHUNK_SIZE* ] [ **-a** *NR_AHEAD* ] [ **-b** *BUF_SIZE* ] [
**-L** *LIMIT_BITRATE* ] [ **-l** *LOGIN_NAME* ] [ **-P** *PORT* ] [
**-F** *SSH_CONFIG* ] [ **-o** *SSH_OPTION* ] [ **-i** *IDENTITY* ] [
**-J** *DESTINATION* ] [ **-c** *CIPHER* ] [ **-M** *HMAC* ] [ **-C**
*COMPRESS* ] [ **-g** *CONGESTION* ] *source ... target*
DESCRIPTION
===========
**mscp** copies files over multiple SSH (SFTP) connections by multiple
threads. It enables transferring (1) multiple files simultaneously and
(2) a large file in parallel, reducing the transfer time for a lot
of/large files over networks.
The usage of **mscp** follows the **scp** command of *OpenSSH,* for
example:
::
$ mscp srcfile user@example.com:dstfile
Remote hosts only need to run standard **sshd** supporting the SFTP
subsystem, and users need to be able to **ssh** to the hosts as usual.
**mscp** does not require anything else.
**mscp** uses `libssh `__ as its SSH
implementation. Thus, supported SSH features, for example,
authentication, encryption, and various options in ssh_config, follow
what *libssh* supports.
OPTIONS
=======
**-n NR_CONNECTIONS**
Specifies the number of SSH connections. The default value is
calculated from the number of CPU cores on the host with the
following formula: floor(log(nr_cores)*2)+1.
**-m COREMASK**
Configures CPU cores to be used by the hexadecimal bitmask. For
example, -m 0x25 pins threads onto CPU cores 0, 2, and 5. The default
value is not specified: all CPU cores are used and no threads are
pinned to any cores.
**-u MAX_STARTUPS**
Specifies the number of concurrent unauthenticated SSH connection
attempts. **sshd** limits the number of simultaneous SSH connection
attempts by *MaxStartups* in *sshd_config.* The default *MaxStartups*
is 10; thus, we set the default MAX_STARTUPS 8.
**-I INTERVAL**
Specifies the interval (in seconds) between SSH connection attempts.
Some firewall products treat SSH connection attempts from a single
source IP address for a short period as a brute force attack. This
option inserts intervals between the attempts to avoid being
determined as an attack. The default value is 0.
**-W CHECKPOINT**
Specifies a checkpoint file to save the state of a failed transfer.
When transferring fails due to, for example, connection disruption or
user interrupt, **mscp** writes the information about the remaining
files and chunks to the specified checkpoint file. **-W** option with
**-D** (dry-run mode) only writes a checkpoint file and exits.
**-R CHECKPOINT**
Specifies a checkpoint file to resume a transfer. When a checkpoint
file is passed, **mscp** reads the checkpoint to load a remote host,
copy direction, and files and their chunks to be transferred. Namely,
**mscp** can resume a past failed transfer from the checkpoint.
Resuming with a checkpoint does not require *source ... target*
arguments. Other SSH connection options, such as port number and
config file, should be specified as with the failed run. In addition,
checkpoint files have file paths as relative paths. Thus, you must
run **mscp** in the same working directory as the failed run. You can
see the contents of a checkpoint file with the **mscp -vv -D -R
CHECKPOINT** command (Dry-run mode). Note that the checkpoint file is
not automatically removed after the resumed transfer ends
successfully. Users should check the return value of **mscp** and
remove the checkpoint if it returns 0.
**-s MIN_CHUNK_SIZE**
Specifies the minimum chunk size. **mscp** divides a single file into
chunks and copies the chunks in parallel. The default value is 16M
bytes.
**-S MAX_CHUNK_SIZE**
Specifies the maximum chunk size. The default is file size divided by
the number of connections and devided by 4. If the calculated value
is smarller than the **MIN_CHUNK_SIZE** value, MIN_CHUNK_SIZE is
used.
**-a NR_AHEAD**
Specifies the number of inflight SFTP commands. The default value is
32.
**-b BUF_SIZE**
Specifies the buffer size for I/O and transfer over SFTP. The default
value is 16384. Note that the SSH specification restricts buffer size
delivered over SSH. Changing this value is not recommended at
present.
**-L LIMIT_BITRATE**
Limits the bitrate, specified with k (K), m (M), and g (G), e.g.,
100m indicates 100 Mbps.
**-4**
Uses IPv4 addresses only.
**-6**
Uses IPv6 addresses only.
**-v**
Increments the verbose output level.
**-q**
Quiet mode: turns off all outputs.
**-D**
Dry-run mode: it scans source files to be copied, calculates chunks,
resolves destination file paths, and exits. Dry-run mode with **-vv**
option can confirm files to be copied and their destination paths.
**-r**
No effect. **mscp** copies recursively if a source path is a
directory. This option exists for just compatibility.
**-l LOGIN_NAME**
Specifies the username to log in on the remote machine as with
*ssh(1).*
**-P PORT**
Specifies the port number to connect to on the remote machine as with
*scp(1).*
**-F SSH_CONFIG**
Specifies an alternative per-user ssh configuration file. Note that
acceptable options in the configuration file are what *libssh*
supports.
**-o SSH_OPTION**
Specifies ssh options in the format used in ssh_config. Note that
acceptable options are what *libssh* supports.
**-i IDENTITY**
Specifies the identity file for public key authentication.
**-J DESTINATION**
A shortcut to define a **ProxyJump** configuration directive. Each
SFTP session of **mscp** connects to the target host by first making
an **ssh** connection to the jump host described by *destination.*
**-c CIPHER**
Selects the cipher to use for encrypting the data transfer. See
**mscp -h** or **Ciphers** in `libssh
features `__.
**-M HMAC**
Specifies MAC hash algorithms. See **mscp -h** or **MAC hashes** in
`libssh features `__.
**-C COMPRESS**
Enables compression: yes, no, zlib, zlib@openssh.com. The default is
none. See `libssh features `__.
**-g CONGESTION**
Specifies the TCP congestion control algorithm to use (Linux only).
See **sysctl net.ipv4.tcp_allowed_congestion_control** for available
values.
**-p**
Preserves modification times and access times (file mode bits are
preserved by default).
**-d**
Increments the ssh debug output level.
**-N**
Enables Nagle's algorithm. It is disabled by default.
**-h**
Prints help.
EXIT STATUS
===========
Exit status is 0 on success, and >0 if an error occurs.
ENVIRONMENT
===========
**mscp** recognizes the following environment variables.
**MSCP_SSH_AUTH_PASSWORD**
This environment variable passes a password for password
authentication to establish SSH connections.
**MSCP_SSH_AUTH_PASSPHRASE**
This environment variable passes a passphrase for public-key
authentication for establishing SSH connections.
NOTES
=====
**mscp** uses glob(3) for globbing pathnames, including matching
patterns for local and remote paths. However, globbing on the *remote*
side does not work with musl libc (used in Alpine Linux and the
single-binary version of mscp) because musl libc does not support
GLOB_ALTDIRFUNC.
**mscp** does not support remote-to-remote copy, which **scp** supports.
EXAMPLES
========
Copy a local file to a remote host with different name:
::
$ mscp ~/src-file 10.0.0.1:copied-file
Copy a local file and a directory to /tmp at a remote host:
::
$ mscp ~/src-file dir1 10.0.0.1:/tmp
Save a checkpoint if transfer fails:
::
$ mscp -W mscp.checkpoint many-large-files 10.0.0.1:dst/
Check the remaining files and chunks, and resume the failed transfer:
::
# Dump the content of a checkpoint and exit (dry-run mode)
$ mscp -vv -D -R mscp.checkpoint
# resume transferring from the checkpoint
$ mscp -R mscp.checkpoint
In a long fat network, following options might improve performance:
::
$ mscp -n 64 -m 0xffff -a 64 -c aes128-gcm@openssh.com src 10.0.0.1:
**-n** increases the number of SSH connections than default, **-m** pins
threads to specific CPU cores, **-a** increases asynchronous inflight
SFTP WRITE/READ commands, and **-c aes128-gcm@openssh.com** will be
faster than the default chacha20-poly1305 cipher, particularly on hosts
that support AES-NI.
SEE ALSO
========
**scp**\ (1), **ssh**\ (1), **sshd**\ (8).
PAPER REFERENCE
===============
Ryo Nakamura and Yohei Kuga. 2023. Multi-threaded scp: Easy and Fast
File Transfer over SSH. In Practice and Experience in Advanced Research
Computing (PEARC '23). Association for Computing Machinery, New York,
NY, USA, 320–323. `DOI `__.
CONTACT INFORMATION
===================
For patches, bug reports, or feature requests, please open an issue on
`GitHub `__.
AUTHORS
=======
Ryo Nakamura
================================================
FILE: include/config.h.in
================================================
/* SPDX-License-Identifier: GPL-3.0-only */
#ifndef _CONFIG_H_
#define _CONFIG_H_
#define MSCP_VERSION "@MSCP_VERSION@"
#define MSCP_BUILD_VERSION "@MSCP_BUILD_VERSION@"
/* Define to 1 if you have the strlcat function. */
#cmakedefine HAVE_STRLCAT 1
/* Define to 1 if you have the htonll function. */
#cmakedefine HAVE_HTONLL 1
/* Define to 1 if you have the ntohll function. */
#cmakedefine HAVE_NTOHLL 1
#endif /* _CONFIG_H_ */
================================================
FILE: include/mscp.h
================================================
/* SPDX-License-Identifier: GPL-3.0-only */
#ifndef _MSCP_H_
#define _MSCP_H_
/**
* @file mscp.h
*
* @brief mscp library header file.
*
* @mainpage
*
* libmscp is a library for multi-threaded scp. Project page is
* https://github.com/upa/mscp.
*
* All public APIs of libmscp are defined in mscp.h. Basic usage of
* libmscp is follows:
*
* 1. create mscp instance with mscp_init()
* 2. set remote host and copy direction with mscp_set_remote()
* 3. connect to remote host with mscp_connect()
* 4. add path to source files with mscp_add_src_path()
* 5. set path to destination with mscp_set_dst_path()
* 6. start to scan source files with mscp_scan()
* 7. start copy with mscp_start()
* 8. wait for copy finished with mscp_join()
* 9. cleanup mscp instance with mscp_cleanup() and mscp_free()
*/
#include
#include
#include
#define MSCP_DIRECTION_L2R 1 /** Indicates local to remote copy */
#define MSCP_DIRECTION_R2L 2 /** Indicates remote to local copy */
/**
* @struct mscp_opts
* @brief Structure configuring mscp.
*/
struct mscp_opts {
int nr_threads; /** number of copy threads */
int nr_ahead; /** number of SFTP commands on-the-fly */
size_t min_chunk_sz; /** minimum chunk size (default 64MB) */
size_t max_chunk_sz; /** maximum chunk size (default file size/nr_threads) */
size_t buf_sz; /** buffer size, default 16k. */
size_t bitrate; /** bits-per-seconds to limit bandwidth */
char *coremask; /** hex to specifiy usable cpu cores */
int max_startups; /** sshd MaxStartups concurrent connections */
int interval; /** interval between SSH connection attempts */
bool preserve_ts; /** preserve file timestamps */
int severity; /** messaging severity. set MSCP_SERVERITY_* */
};
/**
* @struct mscp_ssh_opts
* @brief Structure configuring SSH connections
*/
struct mscp_ssh_opts {
/* ssh options */
char *login_name; /** ssh username */
char *port; /** ssh port */
int ai_family; /** address family */
char *config; /** path to ssh_config, default ~/.ssh/config*/
char **options; /** array of ssh_config options, terminated by NULL */
char *identity; /** path to private key */
char *proxyjump; /** ProxyJump configuration directive (shortcut) */
char *cipher; /** cipher spec */
char *hmac; /** hmacp spec */
char *compress; /** yes, no, zlib@openssh.com */
char *ccalgo; /** TCP cc algorithm */
char *password; /** password auth passowrd */
char *passphrase; /** passphrase for private key */
int debug_level; /** inclirement libssh debug output level */
bool enable_nagle; /** enable Nagle's algorithm if true */
};
/** @def
* Environment variable that passes password for ssh password auth
*/
#define ENV_SSH_AUTH_PASSWORD "MSCP_SSH_AUTH_PASSWORD"
/** @def
* Environment vraible that passes passphrase for private key
*/
#define ENV_SSH_AUTH_PASSPHRASE "MSCP_SSH_AUTH_PASSPHRASE"
/**
* @struct mscp_stats
* @brief Structure to get mscp statistics
*/
struct mscp_stats {
size_t total; /** total bytes to be transferred */
size_t done; /** total bytes transferred */
};
/** Structure representing mscp instance */
struct mscp;
/**
* @brief Creates a new mscp instance.
*
* @param o options for configuring mscp.
* @param s options for configuring ssh connections.
*
* @retrun A new mscp instance or NULL on error.
*/
struct mscp *mscp_init(struct mscp_opts *o, struct mscp_ssh_opts *s);
/**
* @brief Set remote host and copy direction.
*
* @param remote_host remote host for file transer.
* @param direction copy direction, `MSCP_DIRECTION_L2R` or `MSCP_DIRECTION_R2L`
*
* @return 0 on success, < 0 if an error occured.
*/
int mscp_set_remote(struct mscp *m, const char *remote_host, int direction);
/**
* @brief Connect the first SSH connection. mscp_connect connects to
* remote host and initialize a SFTP session over the
* connection. mscp_scan() and mscp_start() require mscp_connect()
* beforehand.
*
* @param m mscp instance.
*
* @return 0 on success, < 0 if an error occured.
*/
int mscp_connect(struct mscp *m);
/* add a source file path to be copied */
/**
* @brief Add a source file path to be copied. The path indicates
* either a file or directory.
*
* @param m mscp instance.
* @param src_path source file path to be copied.
*
* @return 0 on success, < 0 if an error occured.
*/
int mscp_add_src_path(struct mscp *m, const char *src_path);
/**
* @brief Set the destination file path. The path indicates either a
* file, directory, or nonexistent path.
*
* @param m mscp instance.
* @param dst_path destination path to which source files copied.
*
* @return 0 on success, < 0 if an error occured.
*/
int mscp_set_dst_path(struct mscp *m, const char *dst_path);
/* scan source files, resolve destination file paths for all source
* files, and calculate chunks for all files. */
/**
* @brief Scan source paths and prepare. This function checks all
* source files (recursively), resolve paths on the destination side,
* and calculate file chunks. This function is non-blocking.
*
* @param m mscp instance.
*
* @return 0 on success, < 0 if an error occured.
*/
int mscp_scan(struct mscp *m);
/**
* @brief Join scan thread invoked by mscp_scan() if it
* runs. mscp_join() involves mscp_can_join(). Thus, there is no need
* to call this function alone.
*
* @param m mscp instance.
* @return 0 on success, < 0 if an error occured.
*/
int mscp_scan_join(struct mscp *m);
/**
* @brief get information about remote host and copy direction from a
* checkpoint file specified by *pathname. This functions returns
* remote host name to *renote, and the copy direction into *dir.
* Thus, you can call mscp_init with those values.
*
* @param pathname path to a checkpoint file.
* @param remote char buffer to which remote hostname is stored.
* @param len length of *remote.
* @param dir int to which the copy direction is stored.
*/
int mscp_checkpoint_get_remote(const char *pathname, char *remote, size_t len, int *dir);
/**
* @brief load information about untransferred files and chunks at the
* last transfer . mscp_checkpoint_load() loads files and associated
* chunks from the checkpoint file pointed by pathname. If you call
* mscp_checkpoint_load(), do not call mscp_scan().
*
* @param m mscp instance.
* @param pathname path to a checkpoint file.
* @return 0 on success, < 0 if an error occured.
*/
int mscp_checkpoint_load(struct mscp *m, const char *pathname);
/**
* @brief save information about untransferred files and chunks to a
* checkpoint file.
*
* @param m mscp instance.
* @param pathname path to a checkpoint file.
* @return 0 on success, < 0 if an error occured.
*/
int mscp_checkpoint_save(struct mscp *m, const char *pathname);
/**
* @brief Start to copy files. mscp_start() returns immediately. You
* can get statistics via mscp_get_stats() or messages via pipe set by
* mscp_opts.msg_fd or mscp_set_msg_fd(). mscp_stop() cancels mscp
* copy threads, and mscp_join() joins the threads.
*
* @param m mscp instance.
*
* @return number of threads on success, < 0 if an error occured.
*
* @see mscp_join()
*/
int mscp_start(struct mscp *m);
/**
* @brief Stop coping files.
*
* @param m mscp instance.
*/
void mscp_stop(struct mscp *m);
/**
* @brief Join copy threads. This function is blocking until all copy
* have done.
*
* @param m mscp instance.
*
* @return 0 on success, < 0 if an error occured.
*/
int mscp_join(struct mscp *m);
/**
* @brief Get statistics of copy.
*
* @param m mscp instance.
* @param s[out] statistics.
*/
void mscp_get_stats(struct mscp *m, struct mscp_stats *s);
/**
* @brief Cleanup the mscp instance. Before calling mscp_cleanup(),
* must call mscp_join(). After mscp_cleanup() called, the mscp
* instance can restart from mscp_connect(). Note that do not call
* mscp_cleanup() before callign mscp_join(). It causes crash (ToDo:
* check status of copy threads and return error when they are
* running).
*
* @param m mscp instance.
*/
void mscp_cleanup(struct mscp *m);
/**
* @brief Release the mscp instance. Note that do not call *
mscp_free() before calling mscp_join(). It causes crash (ToDo: check
* status of copy threads and return error when they are running).
*
* @param m mscp instance.
*/
void mscp_free(struct mscp *m);
/* messaging with mscp */
/**
* @enum mscp_serverity
* @brief Filter messages from libmscp with severity level.
*/
enum {
MSCP_SEVERITY_NONE = -1,
MSCP_SEVERITY_ERR = 0,
MSCP_SEVERITY_WARN = 1,
MSCP_SEVERITY_NOTICE = 2,
MSCP_SEVERITY_INFO = 3,
MSCP_SEVERITY_DEBUG = 4,
};
/**
* @brief Return available ciphers.
*/
const char **mscp_ssh_ciphers(void);
/**
* @brief Return available hmacs.
*/
const char **mscp_ssh_hmacs(void);
#endif /* _MSCP_H_ */
================================================
FILE: patch/README.md
================================================
Patches in this directory introduce enhancements for libssh including
`sftp_async_write()` and `sftp_async_write_end()`, derived from
https://github.com/limes-datentechnik-gmbh/libssh. See [Re: SFTP Write
async](https://archive.libssh.org/libssh/2020-06/0000004.html).
================================================
FILE: patch/libssh-0.10.4.patch
================================================
diff --git a/ConfigureChecks.cmake b/ConfigureChecks.cmake
index 7103f303..c64eb39d 100644
--- a/ConfigureChecks.cmake
+++ b/ConfigureChecks.cmake
@@ -258,6 +258,7 @@ if (UNIX)
check_library_exists(util forkpty "" HAVE_LIBUTIL)
check_function_exists(cfmakeraw HAVE_CFMAKERAW)
check_function_exists(__strtoull HAVE___STRTOULL)
+ check_symbol_exists(TCP_CONGESTION "netinet/tcp.h" HAVE_TCP_CONGESTION)
endif (UNIX)
set(LIBSSH_REQUIRED_LIBRARIES ${_REQUIRED_LIBRARIES} CACHE INTERNAL "libssh required system libraries")
diff --git a/config.h.cmake b/config.h.cmake
index 1357615b..1e915ead 100644
--- a/config.h.cmake
+++ b/config.h.cmake
@@ -237,6 +237,8 @@
#cmakedefine HAVE_GCC_BOUNDED_ATTRIBUTE 1
+#cmakedefine HAVE_TCP_CONGESTION 1
+
/* Define to 1 if you want to enable GSSAPI */
#cmakedefine WITH_GSSAPI 1
diff --git a/include/libssh/buffer.h b/include/libssh/buffer.h
index a55a1b40..e34e075c 100644
--- a/include/libssh/buffer.h
+++ b/include/libssh/buffer.h
@@ -33,6 +33,8 @@ int ssh_buffer_add_u8(ssh_buffer buffer, uint8_t data);
int ssh_buffer_add_u16(ssh_buffer buffer, uint16_t data);
int ssh_buffer_add_u32(ssh_buffer buffer, uint32_t data);
int ssh_buffer_add_u64(ssh_buffer buffer, uint64_t data);
+ssize_t ssh_buffer_add_func(ssh_buffer buffer, ssh_add_func f, size_t max_bytes,
+ void *userdata);
int ssh_buffer_validate_length(struct ssh_buffer_struct *buffer, size_t len);
diff --git a/include/libssh/libssh.h b/include/libssh/libssh.h
index 7857a77b..6b4d481c 100644
--- a/include/libssh/libssh.h
+++ b/include/libssh/libssh.h
@@ -402,6 +402,7 @@ enum ssh_options_e {
SSH_OPTIONS_GSSAPI_AUTH,
SSH_OPTIONS_GLOBAL_KNOWNHOSTS,
SSH_OPTIONS_NODELAY,
+ SSH_OPTIONS_CCALGO,
SSH_OPTIONS_PUBLICKEY_ACCEPTED_TYPES,
SSH_OPTIONS_PROCESS_CONFIG,
SSH_OPTIONS_REKEY_DATA,
@@ -833,6 +834,7 @@ LIBSSH_API const char* ssh_get_hmac_in(ssh_session session);
LIBSSH_API const char* ssh_get_hmac_out(ssh_session session);
LIBSSH_API ssh_buffer ssh_buffer_new(void);
+LIBSSH_API ssh_buffer ssh_buffer_new_size(uint32_t size, uint32_t headroom);
LIBSSH_API void ssh_buffer_free(ssh_buffer buffer);
#define SSH_BUFFER_FREE(x) \
do { if ((x) != NULL) { ssh_buffer_free(x); x = NULL; } } while(0)
@@ -843,6 +845,8 @@ LIBSSH_API void *ssh_buffer_get(ssh_buffer buffer);
LIBSSH_API uint32_t ssh_buffer_get_len(ssh_buffer buffer);
LIBSSH_API int ssh_session_set_disconnect_message(ssh_session session, const char *message);
+typedef ssize_t (*ssh_add_func) (void *ptr, size_t max_bytes, void *userdata);
+
#ifndef LIBSSH_LEGACY_0_4
#include "libssh/legacy.h"
#endif
diff --git a/include/libssh/session.h b/include/libssh/session.h
index d3e5787c..15183d1b 100644
--- a/include/libssh/session.h
+++ b/include/libssh/session.h
@@ -232,6 +232,7 @@ struct ssh_session_struct {
int gss_delegate_creds;
int flags;
int nodelay;
+ char *ccalgo;
bool config_processed;
uint8_t options_seen[SOC_MAX];
uint64_t rekey_data;
diff --git a/include/libssh/sftp.h b/include/libssh/sftp.h
index c855df8a..0fcdb9b8 100644
--- a/include/libssh/sftp.h
+++ b/include/libssh/sftp.h
@@ -565,6 +565,10 @@ LIBSSH_API int sftp_async_read(sftp_file file, void *data, uint32_t len, uint32_
*/
LIBSSH_API ssize_t sftp_write(sftp_file file, const void *buf, size_t count);
+LIBSSH_API ssize_t sftp_async_write(sftp_file file, ssh_add_func f, size_t count,
+ void *userdata, uint32_t* id);
+LIBSSH_API int sftp_async_write_end(sftp_file file, uint32_t id, int blocking);
+
/**
* @brief Seek to a specific location in a file.
*
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index c090fef7..e2f86309 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -435,6 +435,11 @@ if (BUILD_STATIC_LIB)
if (WIN32)
target_compile_definitions(ssh-static PUBLIC "LIBSSH_STATIC")
endif (WIN32)
+
+ install(TARGETS ssh-static
+ EXPORT libssh-config
+ LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
+ COMPONENT libraries)
endif (BUILD_STATIC_LIB)
message(STATUS "Threads_FOUND=${Threads_FOUND}")
diff --git a/src/buffer.c b/src/buffer.c
index e0068015..cc0caf35 100644
--- a/src/buffer.c
+++ b/src/buffer.c
@@ -141,6 +141,40 @@ struct ssh_buffer_struct *ssh_buffer_new(void)
return buf;
}
+/**
+ * @brief Create a new SSH buffer with a specified size and headroom.
+ *
+ * @param[in] len length for newly initialized SSH buffer.
+ * @param[in] headroom length for headroom
+ * @return A newly initialized SSH buffer, NULL on error.
+ */
+struct ssh_buffer_struct *ssh_buffer_new_size(uint32_t len, uint32_t headroom)
+{
+ struct ssh_buffer_struct *buf = NULL;
+ int rc;
+
+ if (len < headroom)
+ return NULL;
+
+ buf = calloc(1, sizeof(struct ssh_buffer_struct));
+ if (buf == NULL) {
+ return NULL;
+ }
+
+ rc = ssh_buffer_allocate_size(buf, len);
+ if (rc != 0) {
+ SAFE_FREE(buf);
+ return NULL;
+ }
+
+ buf->pos += headroom;
+ buf->used += headroom;
+
+ buffer_verify(buf);
+
+ return buf;
+}
+
/**
* @brief Deallocate a SSH buffer.
*
@@ -328,6 +362,49 @@ int ssh_buffer_add_data(struct ssh_buffer_struct *buffer, const void *data, uint
return 0;
}
+/**
+ * @brief Add data at the tail of a buffer by an external function
+ *
+ * @param[in] buffer The buffer to add data.
+ *
+ * @param[in] f function that adds data to the buffer.
+ *
+ * @param[in] max_bytes The maximum length of the data to add.
+ *
+ * @return actual bytes added on success, < 0 on error.
+ */
+ssize_t ssh_buffer_add_func(struct ssh_buffer_struct *buffer, ssh_add_func f,
+ size_t max_bytes, void *userdata)
+{
+ ssize_t actual;
+
+ if (buffer == NULL) {
+ return -1;
+ }
+
+ buffer_verify(buffer);
+
+ if (buffer->used + max_bytes < max_bytes) {
+ return -1;
+ }
+
+ if (buffer->allocated < (buffer->used + max_bytes)) {
+ if (buffer->pos > 0) {
+ buffer_shift(buffer);
+ }
+ if (realloc_buffer(buffer, buffer->used + max_bytes) < 0) {
+ return -1;
+ }
+ }
+
+ if ((actual = f(buffer->data + buffer->used, max_bytes, userdata)) < 0)
+ return -1;
+
+ buffer->used += actual;
+ buffer_verify(buffer);
+ return actual;
+}
+
/**
* @brief Ensure the buffer has at least a certain preallocated size.
*
diff --git a/src/connect.c b/src/connect.c
index 57e37e63..c02397d5 100644
--- a/src/connect.c
+++ b/src/connect.c
@@ -156,6 +156,20 @@ static int set_tcp_nodelay(socket_t socket)
sizeof(opt));
}
+static int set_tcp_ccalgo(socket_t socket, const char *ccalgo)
+{
+#ifdef HAVE_TCP_CONGESTION
+ return setsockopt(socket,
+ IPPROTO_TCP,
+ TCP_CONGESTION,
+ (void *)ccalgo,
+ strlen(ccalgo));
+#else
+ errno = ENOTSUP;
+ return -1;
+#endif
+}
+
/**
* @internal
*
@@ -256,6 +270,18 @@ socket_t ssh_connect_host_nonblocking(ssh_session session, const char *host,
}
}
+ if (session->opts.ccalgo) {
+ rc = set_tcp_ccalgo(s, session->opts.ccalgo);
+ if (rc < 0) {
+ ssh_set_error(session, SSH_FATAL,
+ "Failed to set TCP_CONGESTION on socket: %s",
+ ssh_strerror(errno, err_msg, SSH_ERRNO_MSG_MAX));
+ ssh_connect_socket_close(s);
+ s = -1;
+ continue;
+ }
+ }
+
errno = 0;
rc = connect(s, itr->ai_addr, itr->ai_addrlen);
if (rc == -1 && (errno != 0) && (errno != EINPROGRESS)) {
diff --git a/src/options.c b/src/options.c
index 49aaefa2..9f7360c3 100644
--- a/src/options.c
+++ b/src/options.c
@@ -210,6 +210,7 @@ int ssh_options_copy(ssh_session src, ssh_session *dest)
new->opts.gss_delegate_creds = src->opts.gss_delegate_creds;
new->opts.flags = src->opts.flags;
new->opts.nodelay = src->opts.nodelay;
+ new->opts.ccalgo = src->opts.ccalgo;
new->opts.config_processed = src->opts.config_processed;
new->common.log_verbosity = src->common.log_verbosity;
new->common.callbacks = src->common.callbacks;
@@ -450,6 +451,10 @@ int ssh_options_set_algo(ssh_session session,
* Set it to disable Nagle's Algorithm (TCP_NODELAY) on the
* session socket. (int, 0=false)
*
+ * - SSH_OPTIONS_CCALGO
+ * Set it to specify TCP congestion control algorithm on the
+ * session socket (Linux only). (int, 0=false)
+ *
* - SSH_OPTIONS_PROCESS_CONFIG
* Set it to false to disable automatic processing of per-user
* and system-wide OpenSSH configuration files. LibSSH
@@ -1013,6 +1018,20 @@ int ssh_options_set(ssh_session session, enum ssh_options_e type,
session->opts.nodelay = (*x & 0xff) > 0 ? 1 : 0;
}
break;
+ case SSH_OPTIONS_CCALGO:
+ v = value;
+ if (v == NULL || v[0] == '\0') {
+ ssh_set_error_invalid(session);
+ return -1;
+ } else {
+ SAFE_FREE(session->opts.ccalgo);
+ session->opts.ccalgo = strdup(v);
+ if (session->opts.ccalgo == NULL) {
+ ssh_set_error_oom(session);
+ return -1;
+ }
+ }
+ break;
case SSH_OPTIONS_PROCESS_CONFIG:
if (value == NULL) {
ssh_set_error_invalid(session);
diff --git a/src/session.c b/src/session.c
index 6025c133..6b197526 100644
--- a/src/session.c
+++ b/src/session.c
@@ -108,6 +108,7 @@ ssh_session ssh_new(void)
session->opts.fd = -1;
session->opts.compressionlevel = 7;
session->opts.nodelay = 0;
+ session->opts.ccalgo = NULL;
session->opts.flags = SSH_OPT_FLAG_PASSWORD_AUTH |
SSH_OPT_FLAG_PUBKEY_AUTH |
diff --git a/src/sftp.c b/src/sftp.c
index e01012a8..702623a0 100644
--- a/src/sftp.c
+++ b/src/sftp.c
@@ -2228,6 +2228,132 @@ ssize_t sftp_write(sftp_file file, const void *buf, size_t count) {
return -1; /* not reached */
}
+/*
+ * sftp_async_write is based on and sftp_async_write_end is copied from
+ * https://github.com/limes-datentechnik-gmbh/libssh
+ *
+ * sftp_async_write has some optimizations:
+ * - use ssh_buffer_new_size() to reduce realoc_buffer.
+ * - use ssh_buffer_add_func() to avoid memcpy from read buffer to ssh buffer.
+ */
+ssize_t sftp_async_write(sftp_file file, ssh_add_func f, size_t count, void *userdata,
+ uint32_t* id) {
+ sftp_session sftp = file->sftp;
+ ssh_buffer buffer;
+ uint32_t buf_sz;
+ ssize_t actual;
+ int len;
+ int packetlen;
+ int rc;
+
+#define HEADROOM 16
+ /* sftp_packet_write() prepends a 5-bytes (uint32_t length and
+ * 1-byte type) header to the head of the payload by
+ * ssh_buffer_prepend_data(). Inserting headroom by
+ * ssh_buffer_new_size() eliminates memcpy for prepending the
+ * header.
+ */
+
+ buf_sz = (HEADROOM + /* for header */
+ sizeof(uint32_t) + /* id */
+ ssh_string_len(file->handle) + 4 + /* file->handle */
+ sizeof(uint64_t) + /* file->offset */
+ sizeof(uint32_t) + /* count */
+ count); /* datastring */
+
+ buffer = ssh_buffer_new_size(buf_sz, HEADROOM);
+ if (buffer == NULL) {
+ ssh_set_error_oom(sftp->session);
+ return -1;
+ }
+
+ *id = sftp_get_new_id(file->sftp);
+
+ rc = ssh_buffer_pack(buffer,
+ "dSqd",
+ *id,
+ file->handle,
+ file->offset,
+ count); /* len of datastring */
+
+ if (rc != SSH_OK){
+ ssh_set_error_oom(sftp->session);
+ ssh_buffer_free(buffer);
+ return SSH_ERROR;
+ }
+
+ actual = ssh_buffer_add_func(buffer, f, count, userdata);
+ if (actual < 0){
+ ssh_set_error_oom(sftp->session);
+ ssh_buffer_free(buffer);
+ return SSH_ERROR;
+ }
+
+ packetlen=ssh_buffer_get_len(buffer)+5;
+ len = sftp_packet_write(file->sftp, SSH_FXP_WRITE, buffer);
+ ssh_buffer_free(buffer);
+ if (len < 0) {
+ return SSH_ERROR;
+ } else if (len != packetlen) {
+ ssh_set_error(sftp->session, SSH_FATAL,
+ "Could only send %d of %d bytes to remote host!", len, packetlen);
+ SSH_LOG(SSH_LOG_PACKET,
+ "Could not write as much data as expected");
+ return SSH_ERROR;
+ }
+
+ file->offset += actual;
+
+ return actual;
+}
+
+int sftp_async_write_end(sftp_file file, uint32_t id, int blocking) {
+ sftp_session sftp = file->sftp;
+ sftp_message msg = NULL;
+ sftp_status_message status;
+
+ msg = sftp_dequeue(sftp, id);
+ while (msg == NULL) {
+ if (!blocking && ssh_channel_poll(sftp->channel, 0) == 0) {
+ /* we cannot block */
+ return SSH_AGAIN;
+ }
+ if (sftp_read_and_dispatch(sftp) < 0) {
+ /* something nasty has happened */
+ return SSH_ERROR;
+ }
+ msg = sftp_dequeue(sftp, id);
+ }
+
+ switch (msg->packet_type) {
+ case SSH_FXP_STATUS:
+ status = parse_status_msg(msg);
+ sftp_message_free(msg);
+ if (status == NULL) {
+ return SSH_ERROR;
+ }
+ sftp_set_error(sftp, status->status);
+ switch (status->status) {
+ case SSH_FX_OK:
+ status_msg_free(status);
+ return SSH_OK;
+ default:
+ break;
+ }
+ ssh_set_error(sftp->session, SSH_REQUEST_DENIED,
+ "SFTP server: %s", status->errormsg);
+ status_msg_free(status);
+ return SSH_ERROR;
+ default:
+ ssh_set_error(sftp->session, SSH_FATAL,
+ "Received message %d during write!", msg->packet_type);
+ sftp_message_free(msg);
+ return SSH_ERROR;
+ }
+
+ return SSH_ERROR; /* not reached */
+}
+
/* Seek to a specific location in a file. */
int sftp_seek(sftp_file file, uint32_t new_offset) {
if (file == NULL) {
================================================
FILE: patch/libssh-0.10.6-2-g6f1b1e76.patch
================================================
diff --git a/CMakeLists.txt b/CMakeLists.txt
index a64b7708..c6344a5a 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required(VERSION 3.3.0)
+cmake_minimum_required(VERSION 3.13.0)
cmake_policy(SET CMP0048 NEW)
# Specify search path for CMake modules to be loaded by include()
diff --git a/ConfigureChecks.cmake b/ConfigureChecks.cmake
index 9de10225..0f3d20ed 100644
--- a/ConfigureChecks.cmake
+++ b/ConfigureChecks.cmake
@@ -258,6 +258,7 @@ if (UNIX)
check_library_exists(util forkpty "" HAVE_LIBUTIL)
check_function_exists(cfmakeraw HAVE_CFMAKERAW)
check_function_exists(__strtoull HAVE___STRTOULL)
+ check_symbol_exists(TCP_CONGESTION "netinet/tcp.h" HAVE_TCP_CONGESTION)
endif (UNIX)
set(LIBSSH_REQUIRED_LIBRARIES ${_REQUIRED_LIBRARIES} CACHE INTERNAL "libssh required system libraries")
diff --git a/config.h.cmake b/config.h.cmake
index cc83734d..f74cd03b 100644
--- a/config.h.cmake
+++ b/config.h.cmake
@@ -237,6 +237,8 @@
#cmakedefine HAVE_GCC_BOUNDED_ATTRIBUTE 1
+#cmakedefine HAVE_TCP_CONGESTION 1
+
/* Define to 1 if you want to enable GSSAPI */
#cmakedefine WITH_GSSAPI 1
diff --git a/include/libssh/buffer.h b/include/libssh/buffer.h
index 1fce7b76..b64d1455 100644
--- a/include/libssh/buffer.h
+++ b/include/libssh/buffer.h
@@ -37,6 +37,8 @@ int ssh_buffer_add_u8(ssh_buffer buffer, uint8_t data);
int ssh_buffer_add_u16(ssh_buffer buffer, uint16_t data);
int ssh_buffer_add_u32(ssh_buffer buffer, uint32_t data);
int ssh_buffer_add_u64(ssh_buffer buffer, uint64_t data);
+ssize_t ssh_buffer_add_func(ssh_buffer buffer, ssh_add_func f, size_t max_bytes,
+ void *userdata);
int ssh_buffer_validate_length(struct ssh_buffer_struct *buffer, size_t len);
diff --git a/include/libssh/libssh.h b/include/libssh/libssh.h
index 669a0a96..26b20f3f 100644
--- a/include/libssh/libssh.h
+++ b/include/libssh/libssh.h
@@ -368,6 +368,7 @@ enum ssh_options_e {
SSH_OPTIONS_HOST,
SSH_OPTIONS_PORT,
SSH_OPTIONS_PORT_STR,
+ SSH_OPTIONS_AI_FAMILY,
SSH_OPTIONS_FD,
SSH_OPTIONS_USER,
SSH_OPTIONS_SSH_DIR,
@@ -402,6 +403,7 @@ enum ssh_options_e {
SSH_OPTIONS_GSSAPI_AUTH,
SSH_OPTIONS_GLOBAL_KNOWNHOSTS,
SSH_OPTIONS_NODELAY,
+ SSH_OPTIONS_CCALGO,
SSH_OPTIONS_PUBLICKEY_ACCEPTED_TYPES,
SSH_OPTIONS_PROCESS_CONFIG,
SSH_OPTIONS_REKEY_DATA,
@@ -833,6 +835,7 @@ LIBSSH_API const char* ssh_get_hmac_in(ssh_session session);
LIBSSH_API const char* ssh_get_hmac_out(ssh_session session);
LIBSSH_API ssh_buffer ssh_buffer_new(void);
+LIBSSH_API ssh_buffer ssh_buffer_new_size(uint32_t size, uint32_t headroom);
LIBSSH_API void ssh_buffer_free(ssh_buffer buffer);
#define SSH_BUFFER_FREE(x) \
do { if ((x) != NULL) { ssh_buffer_free(x); x = NULL; } } while(0)
@@ -843,6 +846,11 @@ LIBSSH_API void *ssh_buffer_get(ssh_buffer buffer);
LIBSSH_API uint32_t ssh_buffer_get_len(ssh_buffer buffer);
LIBSSH_API int ssh_session_set_disconnect_message(ssh_session session, const char *message);
+typedef ssize_t (*ssh_add_func) (void *ptr, size_t max_bytes, void *userdata);
+
+LIBSSH_API const char **ssh_ciphers(void);
+LIBSSH_API const char **ssh_hmacs(void);
+
#ifndef LIBSSH_LEGACY_0_4
#include "libssh/legacy.h"
#endif
diff --git a/include/libssh/session.h b/include/libssh/session.h
index 97936195..e4fc4fce 100644
--- a/include/libssh/session.h
+++ b/include/libssh/session.h
@@ -249,6 +249,7 @@ struct ssh_session_struct {
unsigned long timeout; /* seconds */
unsigned long timeout_usec;
uint16_t port;
+ int ai_family;
socket_t fd;
int StrictHostKeyChecking;
char compressionlevel;
@@ -258,6 +259,7 @@ struct ssh_session_struct {
int flags;
int exp_flags;
int nodelay;
+ char *ccalgo;
bool config_processed;
uint8_t options_seen[SOC_MAX];
uint64_t rekey_data;
diff --git a/include/libssh/sftp.h b/include/libssh/sftp.h
index c713466e..e27fe326 100644
--- a/include/libssh/sftp.h
+++ b/include/libssh/sftp.h
@@ -565,6 +565,10 @@ LIBSSH_API int sftp_async_read(sftp_file file, void *data, uint32_t len, uint32_
*/
LIBSSH_API ssize_t sftp_write(sftp_file file, const void *buf, size_t count);
+LIBSSH_API ssize_t sftp_async_write(sftp_file file, ssh_add_func f, size_t count,
+ void *userdata, uint32_t* id);
+LIBSSH_API int sftp_async_write_end(sftp_file file, uint32_t id, int blocking);
+
/**
* @brief Seek to a specific location in a file.
*
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 807313b5..86487087 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -448,6 +448,11 @@ if (BUILD_STATIC_LIB)
if (WIN32)
target_compile_definitions(ssh-static PUBLIC "LIBSSH_STATIC")
endif (WIN32)
+
+ install(TARGETS ssh-static
+ EXPORT libssh-config
+ LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
+ COMPONENT libraries)
endif (BUILD_STATIC_LIB)
message(STATUS "Threads_FOUND=${Threads_FOUND}")
diff --git a/src/buffer.c b/src/buffer.c
index 8991e006..e0414801 100644
--- a/src/buffer.c
+++ b/src/buffer.c
@@ -142,6 +142,40 @@ struct ssh_buffer_struct *ssh_buffer_new(void)
return buf;
}
+/**
+ * @brief Create a new SSH buffer with a specified size and headroom.
+ *
+ * @param[in] len length for newly initialized SSH buffer.
+ * @param[in] headroom length for headroom
+ * @return A newly initialized SSH buffer, NULL on error.
+ */
+struct ssh_buffer_struct *ssh_buffer_new_size(uint32_t len, uint32_t headroom)
+{
+ struct ssh_buffer_struct *buf = NULL;
+ int rc;
+
+ if (len < headroom)
+ return NULL;
+
+ buf = calloc(1, sizeof(struct ssh_buffer_struct));
+ if (buf == NULL) {
+ return NULL;
+ }
+
+ rc = ssh_buffer_allocate_size(buf, len);
+ if (rc != 0) {
+ SAFE_FREE(buf);
+ return NULL;
+ }
+
+ buf->pos += headroom;
+ buf->used += headroom;
+
+ buffer_verify(buf);
+
+ return buf;
+}
+
/**
* @brief Deallocate a SSH buffer.
*
@@ -329,6 +363,49 @@ int ssh_buffer_add_data(struct ssh_buffer_struct *buffer, const void *data, uint
return 0;
}
+/**
+ * @brief Add data at the tail of a buffer by an external function
+ *
+ * @param[in] buffer The buffer to add data.
+ *
+ * @param[in] f function that adds data to the buffer.
+ *
+ * @param[in] max_bytes The maximum length of the data to add.
+ *
+ * @return actual bytes added on success, < 0 on error.
+ */
+ssize_t ssh_buffer_add_func(struct ssh_buffer_struct *buffer, ssh_add_func f,
+ size_t max_bytes, void *userdata)
+{
+ ssize_t actual;
+
+ if (buffer == NULL) {
+ return -1;
+ }
+
+ buffer_verify(buffer);
+
+ if (buffer->used + max_bytes < max_bytes) {
+ return -1;
+ }
+
+ if (buffer->allocated < (buffer->used + max_bytes)) {
+ if (buffer->pos > 0) {
+ buffer_shift(buffer);
+ }
+ if (realloc_buffer(buffer, buffer->used + max_bytes) < 0) {
+ return -1;
+ }
+ }
+
+ if ((actual = f(buffer->data + buffer->used, max_bytes, userdata)) < 0)
+ return -1;
+
+ buffer->used += actual;
+ buffer_verify(buffer);
+ return actual;
+}
+
/**
* @brief Ensure the buffer has at least a certain preallocated size.
*
diff --git a/src/connect.c b/src/connect.c
index 15cae644..02ef43b4 100644
--- a/src/connect.c
+++ b/src/connect.c
@@ -114,7 +114,7 @@ static int ssh_connect_socket_close(socket_t s)
#endif
}
-static int getai(const char *host, int port, struct addrinfo **ai)
+static int getai(const char *host, int port, int ai_family, struct addrinfo **ai)
{
const char *service = NULL;
struct addrinfo hints;
@@ -123,7 +123,7 @@ static int getai(const char *host, int port, struct addrinfo **ai)
ZERO_STRUCT(hints);
hints.ai_protocol = IPPROTO_TCP;
- hints.ai_family = PF_UNSPEC;
+ hints.ai_family = ai_family > 0 ? ai_family : PF_UNSPEC;
hints.ai_socktype = SOCK_STREAM;
if (port == 0) {
@@ -156,6 +156,20 @@ static int set_tcp_nodelay(socket_t socket)
sizeof(opt));
}
+static int set_tcp_ccalgo(socket_t socket, const char *ccalgo)
+{
+#ifdef HAVE_TCP_CONGESTION
+ return setsockopt(socket,
+ IPPROTO_TCP,
+ TCP_CONGESTION,
+ (void *)ccalgo,
+ strlen(ccalgo));
+#else
+ errno = ENOTSUP;
+ return -1;
+#endif
+}
+
/**
* @internal
*
@@ -173,7 +187,7 @@ socket_t ssh_connect_host_nonblocking(ssh_session session, const char *host,
struct addrinfo *ai = NULL;
struct addrinfo *itr = NULL;
- rc = getai(host, port, &ai);
+ rc = getai(host, port, session->opts.ai_family, &ai);
if (rc != 0) {
ssh_set_error(session, SSH_FATAL,
"Failed to resolve hostname %s (%s)",
@@ -199,7 +213,7 @@ socket_t ssh_connect_host_nonblocking(ssh_session session, const char *host,
SSH_LOG(SSH_LOG_PACKET, "Resolving %s", bind_addr);
- rc = getai(bind_addr, 0, &bind_ai);
+ rc = getai(bind_addr, 0, session->opts.ai_family, &bind_ai);
if (rc != 0) {
ssh_set_error(session, SSH_FATAL,
"Failed to resolve bind address %s (%s)",
@@ -256,6 +270,18 @@ socket_t ssh_connect_host_nonblocking(ssh_session session, const char *host,
}
}
+ if (session->opts.ccalgo) {
+ rc = set_tcp_ccalgo(s, session->opts.ccalgo);
+ if (rc < 0) {
+ ssh_set_error(session, SSH_FATAL,
+ "Failed to set TCP_CONGESTION on socket: %s",
+ ssh_strerror(errno, err_msg, SSH_ERRNO_MSG_MAX));
+ ssh_connect_socket_close(s);
+ s = -1;
+ continue;
+ }
+ }
+
errno = 0;
rc = connect(s, itr->ai_addr, itr->ai_addrlen);
if (rc == -1 && (errno != 0) && (errno != EINPROGRESS)) {
diff --git a/src/misc.c b/src/misc.c
index 7081f12a..e3879fe4 100644
--- a/src/misc.c
+++ b/src/misc.c
@@ -71,6 +71,8 @@
#include "libssh/priv.h"
#include "libssh/misc.h"
#include "libssh/session.h"
+#include "libssh/wrapper.h"
+#include "libssh/crypto.h"
#ifdef HAVE_LIBGCRYPT
#define GCRYPT_STRING "/gnutls"
@@ -2074,4 +2076,40 @@ int ssh_check_hostname_syntax(const char *hostname)
return SSH_OK;
}
+/**
+ * @brief Return supported cipher names
+ * @return The list of cipher names.
+ */
+const char **ssh_ciphers(void)
+{
+ struct ssh_cipher_struct *tab=ssh_get_ciphertab();
+ static const char *ciphers[32];
+ int n;
+
+ memset(ciphers, 0, sizeof(*ciphers));
+
+ for (n = 0; tab[n].name != NULL; n++) {
+ ciphers[n] = tab[n].name;
+ }
+ return ciphers;
+}
+
+/**
+ * @brief Return supported hmac names
+ * @return The list of hmac names.
+ */
+const char **ssh_hmacs(void)
+{
+ struct ssh_hmac_struct *tab=ssh_get_hmactab();
+ static const char *hmacs[32];
+ int n;
+
+ memset(hmacs, 0, sizeof(*hmacs));
+
+ for (n = 0; tab[n].name != NULL; n++) {
+ hmacs[n] = tab[n].name;
+ }
+ return hmacs;
+}
+
/** @} */
diff --git a/src/options.c b/src/options.c
index b3ecffe1..8de24ed6 100644
--- a/src/options.c
+++ b/src/options.c
@@ -217,6 +217,7 @@ int ssh_options_copy(ssh_session src, ssh_session *dest)
new->opts.gss_delegate_creds = src->opts.gss_delegate_creds;
new->opts.flags = src->opts.flags;
new->opts.nodelay = src->opts.nodelay;
+ new->opts.ccalgo = src->opts.ccalgo;
new->opts.config_processed = src->opts.config_processed;
new->common.log_verbosity = src->common.log_verbosity;
new->common.callbacks = src->common.callbacks;
@@ -268,6 +269,9 @@ int ssh_options_set_algo(ssh_session session,
* - SSH_OPTIONS_PORT_STR:
* The port to connect to (const char *).
*
+ * - SSH_OPTIONS_AI_FAMILY:
+ * The address family for connecting (int *).
+ *
* - SSH_OPTIONS_FD:
* The file descriptor to use (socket_t).\n
* \n
@@ -458,6 +462,10 @@ int ssh_options_set_algo(ssh_session session,
* Set it to disable Nagle's Algorithm (TCP_NODELAY) on the
* session socket. (int, 0=false)
*
+ * - SSH_OPTIONS_CCALGO
+ * Set it to specify TCP congestion control algorithm on the
+ * session socket (Linux only). (int, 0=false)
+ *
* - SSH_OPTIONS_PROCESS_CONFIG
* Set it to false to disable automatic processing of per-user
* and system-wide OpenSSH configuration files. LibSSH
@@ -571,6 +579,21 @@ int ssh_options_set(ssh_session session, enum ssh_options_e type,
session->opts.port = i & 0xffffU;
}
break;
+ case SSH_OPTIONS_AI_FAMILY:
+ if (value == NULL) {
+ session->opts.ai_family = 0;
+ ssh_set_error_invalid(session);
+ return -1;
+ } else {
+ int *x = (int *) value;
+ if (*x < 0) {
+ session->opts.ai_family = 0;
+ ssh_set_error_invalid(session);
+ return -1;
+ }
+ session->opts.ai_family = *x;
+ }
+ break;
case SSH_OPTIONS_FD:
if (value == NULL) {
session->opts.fd = SSH_INVALID_SOCKET;
@@ -1017,6 +1040,20 @@ int ssh_options_set(ssh_session session, enum ssh_options_e type,
session->opts.nodelay = (*x & 0xff) > 0 ? 1 : 0;
}
break;
+ case SSH_OPTIONS_CCALGO:
+ v = value;
+ if (v == NULL || v[0] == '\0') {
+ ssh_set_error_invalid(session);
+ return -1;
+ } else {
+ SAFE_FREE(session->opts.ccalgo);
+ session->opts.ccalgo = strdup(v);
+ if (session->opts.ccalgo == NULL) {
+ ssh_set_error_oom(session);
+ return -1;
+ }
+ }
+ break;
case SSH_OPTIONS_PROCESS_CONFIG:
if (value == NULL) {
ssh_set_error_invalid(session);
diff --git a/src/session.c b/src/session.c
index 8c509699..307388e5 100644
--- a/src/session.c
+++ b/src/session.c
@@ -105,9 +105,11 @@ ssh_session ssh_new(void)
/* OPTIONS */
session->opts.StrictHostKeyChecking = 1;
session->opts.port = 22;
+ session->opts.ai_family = 0;
session->opts.fd = -1;
session->opts.compressionlevel = 7;
session->opts.nodelay = 0;
+ session->opts.ccalgo = NULL;
session->opts.flags = SSH_OPT_FLAG_PASSWORD_AUTH |
SSH_OPT_FLAG_PUBKEY_AUTH |
diff --git a/src/sftp.c b/src/sftp.c
index e01012a8..702623a0 100644
--- a/src/sftp.c
+++ b/src/sftp.c
@@ -2228,6 +2228,132 @@ ssize_t sftp_write(sftp_file file, const void *buf, size_t count) {
return -1; /* not reached */
}
+/*
+ * sftp_async_write is based on and sftp_async_write_end is copied from
+ * https://github.com/limes-datentechnik-gmbh/libssh
+ *
+ * sftp_async_write has some optimizations:
+ * - use ssh_buffer_new_size() to reduce realoc_buffer.
+ * - use ssh_buffer_add_func() to avoid memcpy from read buffer to ssh buffer.
+ */
+ssize_t sftp_async_write(sftp_file file, ssh_add_func f, size_t count, void *userdata,
+ uint32_t* id) {
+ sftp_session sftp = file->sftp;
+ ssh_buffer buffer;
+ uint32_t buf_sz;
+ ssize_t actual;
+ int len;
+ int packetlen;
+ int rc;
+
+#define HEADROOM 16
+ /* sftp_packet_write() prepends a 5-bytes (uint32_t length and
+ * 1-byte type) header to the head of the payload by
+ * ssh_buffer_prepend_data(). Inserting headroom by
+ * ssh_buffer_new_size() eliminates memcpy for prepending the
+ * header.
+ */
+
+ buf_sz = (HEADROOM + /* for header */
+ sizeof(uint32_t) + /* id */
+ ssh_string_len(file->handle) + 4 + /* file->handle */
+ sizeof(uint64_t) + /* file->offset */
+ sizeof(uint32_t) + /* count */
+ count); /* datastring */
+
+ buffer = ssh_buffer_new_size(buf_sz, HEADROOM);
+ if (buffer == NULL) {
+ ssh_set_error_oom(sftp->session);
+ return -1;
+ }
+
+ *id = sftp_get_new_id(file->sftp);
+
+ rc = ssh_buffer_pack(buffer,
+ "dSqd",
+ *id,
+ file->handle,
+ file->offset,
+ count); /* len of datastring */
+
+ if (rc != SSH_OK){
+ ssh_set_error_oom(sftp->session);
+ ssh_buffer_free(buffer);
+ return SSH_ERROR;
+ }
+
+ actual = ssh_buffer_add_func(buffer, f, count, userdata);
+ if (actual < 0){
+ ssh_set_error_oom(sftp->session);
+ ssh_buffer_free(buffer);
+ return SSH_ERROR;
+ }
+
+ packetlen=ssh_buffer_get_len(buffer)+5;
+ len = sftp_packet_write(file->sftp, SSH_FXP_WRITE, buffer);
+ ssh_buffer_free(buffer);
+ if (len < 0) {
+ return SSH_ERROR;
+ } else if (len != packetlen) {
+ ssh_set_error(sftp->session, SSH_FATAL,
+ "Could only send %d of %d bytes to remote host!", len, packetlen);
+ SSH_LOG(SSH_LOG_PACKET,
+ "Could not write as much data as expected");
+ return SSH_ERROR;
+ }
+
+ file->offset += actual;
+
+ return actual;
+}
+
+int sftp_async_write_end(sftp_file file, uint32_t id, int blocking) {
+ sftp_session sftp = file->sftp;
+ sftp_message msg = NULL;
+ sftp_status_message status;
+
+ msg = sftp_dequeue(sftp, id);
+ while (msg == NULL) {
+ if (!blocking && ssh_channel_poll(sftp->channel, 0) == 0) {
+ /* we cannot block */
+ return SSH_AGAIN;
+ }
+ if (sftp_read_and_dispatch(sftp) < 0) {
+ /* something nasty has happened */
+ return SSH_ERROR;
+ }
+ msg = sftp_dequeue(sftp, id);
+ }
+
+ switch (msg->packet_type) {
+ case SSH_FXP_STATUS:
+ status = parse_status_msg(msg);
+ sftp_message_free(msg);
+ if (status == NULL) {
+ return SSH_ERROR;
+ }
+ sftp_set_error(sftp, status->status);
+ switch (status->status) {
+ case SSH_FX_OK:
+ status_msg_free(status);
+ return SSH_OK;
+ default:
+ break;
+ }
+ ssh_set_error(sftp->session, SSH_REQUEST_DENIED,
+ "SFTP server: %s", status->errormsg);
+ status_msg_free(status);
+ return SSH_ERROR;
+ default:
+ ssh_set_error(sftp->session, SSH_FATAL,
+ "Received message %d during write!", msg->packet_type);
+ sftp_message_free(msg);
+ return SSH_ERROR;
+ }
+
+ return SSH_ERROR; /* not reached */
+}
+
/* Seek to a specific location in a file. */
int sftp_seek(sftp_file file, uint32_t new_offset) {
if (file == NULL) {
================================================
FILE: patch/libssh-0.10.6.patch
================================================
diff --git a/ConfigureChecks.cmake b/ConfigureChecks.cmake
index 9de10225..0f3d20ed 100644
--- a/ConfigureChecks.cmake
+++ b/ConfigureChecks.cmake
@@ -258,6 +258,7 @@ if (UNIX)
check_library_exists(util forkpty "" HAVE_LIBUTIL)
check_function_exists(cfmakeraw HAVE_CFMAKERAW)
check_function_exists(__strtoull HAVE___STRTOULL)
+ check_symbol_exists(TCP_CONGESTION "netinet/tcp.h" HAVE_TCP_CONGESTION)
endif (UNIX)
set(LIBSSH_REQUIRED_LIBRARIES ${_REQUIRED_LIBRARIES} CACHE INTERNAL "libssh required system libraries")
diff --git a/config.h.cmake b/config.h.cmake
index cc83734d..f74cd03b 100644
--- a/config.h.cmake
+++ b/config.h.cmake
@@ -237,6 +237,8 @@
#cmakedefine HAVE_GCC_BOUNDED_ATTRIBUTE 1
+#cmakedefine HAVE_TCP_CONGESTION 1
+
/* Define to 1 if you want to enable GSSAPI */
#cmakedefine WITH_GSSAPI 1
diff --git a/include/libssh/buffer.h b/include/libssh/buffer.h
index 1fce7b76..b64d1455 100644
--- a/include/libssh/buffer.h
+++ b/include/libssh/buffer.h
@@ -37,6 +37,8 @@ int ssh_buffer_add_u8(ssh_buffer buffer, uint8_t data);
int ssh_buffer_add_u16(ssh_buffer buffer, uint16_t data);
int ssh_buffer_add_u32(ssh_buffer buffer, uint32_t data);
int ssh_buffer_add_u64(ssh_buffer buffer, uint64_t data);
+ssize_t ssh_buffer_add_func(ssh_buffer buffer, ssh_add_func f, size_t max_bytes,
+ void *userdata);
int ssh_buffer_validate_length(struct ssh_buffer_struct *buffer, size_t len);
diff --git a/include/libssh/libssh.h b/include/libssh/libssh.h
index 669a0a96..b6a93ac7 100644
--- a/include/libssh/libssh.h
+++ b/include/libssh/libssh.h
@@ -402,6 +402,7 @@ enum ssh_options_e {
SSH_OPTIONS_GSSAPI_AUTH,
SSH_OPTIONS_GLOBAL_KNOWNHOSTS,
SSH_OPTIONS_NODELAY,
+ SSH_OPTIONS_CCALGO,
SSH_OPTIONS_PUBLICKEY_ACCEPTED_TYPES,
SSH_OPTIONS_PROCESS_CONFIG,
SSH_OPTIONS_REKEY_DATA,
@@ -833,6 +834,7 @@ LIBSSH_API const char* ssh_get_hmac_in(ssh_session session);
LIBSSH_API const char* ssh_get_hmac_out(ssh_session session);
LIBSSH_API ssh_buffer ssh_buffer_new(void);
+LIBSSH_API ssh_buffer ssh_buffer_new_size(uint32_t size, uint32_t headroom);
LIBSSH_API void ssh_buffer_free(ssh_buffer buffer);
#define SSH_BUFFER_FREE(x) \
do { if ((x) != NULL) { ssh_buffer_free(x); x = NULL; } } while(0)
@@ -843,6 +845,8 @@ LIBSSH_API void *ssh_buffer_get(ssh_buffer buffer);
LIBSSH_API uint32_t ssh_buffer_get_len(ssh_buffer buffer);
LIBSSH_API int ssh_session_set_disconnect_message(ssh_session session, const char *message);
+typedef ssize_t (*ssh_add_func) (void *ptr, size_t max_bytes, void *userdata);
+
#ifndef LIBSSH_LEGACY_0_4
#include "libssh/legacy.h"
#endif
diff --git a/include/libssh/session.h b/include/libssh/session.h
index 97936195..e4a7f80c 100644
--- a/include/libssh/session.h
+++ b/include/libssh/session.h
@@ -258,6 +258,7 @@ struct ssh_session_struct {
int flags;
int exp_flags;
int nodelay;
+ char *ccalgo;
bool config_processed;
uint8_t options_seen[SOC_MAX];
uint64_t rekey_data;
diff --git a/include/libssh/sftp.h b/include/libssh/sftp.h
index c713466e..e27fe326 100644
--- a/include/libssh/sftp.h
+++ b/include/libssh/sftp.h
@@ -565,6 +565,10 @@ LIBSSH_API int sftp_async_read(sftp_file file, void *data, uint32_t len, uint32_
*/
LIBSSH_API ssize_t sftp_write(sftp_file file, const void *buf, size_t count);
+LIBSSH_API ssize_t sftp_async_write(sftp_file file, ssh_add_func f, size_t count,
+ void *userdata, uint32_t* id);
+LIBSSH_API int sftp_async_write_end(sftp_file file, uint32_t id, int blocking);
+
/**
* @brief Seek to a specific location in a file.
*
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 807313b5..86487087 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -448,6 +448,11 @@ if (BUILD_STATIC_LIB)
if (WIN32)
target_compile_definitions(ssh-static PUBLIC "LIBSSH_STATIC")
endif (WIN32)
+
+ install(TARGETS ssh-static
+ EXPORT libssh-config
+ LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
+ COMPONENT libraries)
endif (BUILD_STATIC_LIB)
message(STATUS "Threads_FOUND=${Threads_FOUND}")
diff --git a/src/buffer.c b/src/buffer.c
index 8991e006..e0414801 100644
--- a/src/buffer.c
+++ b/src/buffer.c
@@ -142,6 +142,40 @@ struct ssh_buffer_struct *ssh_buffer_new(void)
return buf;
}
+/**
+ * @brief Create a new SSH buffer with a specified size and headroom.
+ *
+ * @param[in] len length for newly initialized SSH buffer.
+ * @param[in] headroom length for headroom
+ * @return A newly initialized SSH buffer, NULL on error.
+ */
+struct ssh_buffer_struct *ssh_buffer_new_size(uint32_t len, uint32_t headroom)
+{
+ struct ssh_buffer_struct *buf = NULL;
+ int rc;
+
+ if (len < headroom)
+ return NULL;
+
+ buf = calloc(1, sizeof(struct ssh_buffer_struct));
+ if (buf == NULL) {
+ return NULL;
+ }
+
+ rc = ssh_buffer_allocate_size(buf, len);
+ if (rc != 0) {
+ SAFE_FREE(buf);
+ return NULL;
+ }
+
+ buf->pos += headroom;
+ buf->used += headroom;
+
+ buffer_verify(buf);
+
+ return buf;
+}
+
/**
* @brief Deallocate a SSH buffer.
*
@@ -329,6 +363,49 @@ int ssh_buffer_add_data(struct ssh_buffer_struct *buffer, const void *data, uint
return 0;
}
+/**
+ * @brief Add data at the tail of a buffer by an external function
+ *
+ * @param[in] buffer The buffer to add data.
+ *
+ * @param[in] f function that adds data to the buffer.
+ *
+ * @param[in] max_bytes The maximum length of the data to add.
+ *
+ * @return actual bytes added on success, < 0 on error.
+ */
+ssize_t ssh_buffer_add_func(struct ssh_buffer_struct *buffer, ssh_add_func f,
+ size_t max_bytes, void *userdata)
+{
+ ssize_t actual;
+
+ if (buffer == NULL) {
+ return -1;
+ }
+
+ buffer_verify(buffer);
+
+ if (buffer->used + max_bytes < max_bytes) {
+ return -1;
+ }
+
+ if (buffer->allocated < (buffer->used + max_bytes)) {
+ if (buffer->pos > 0) {
+ buffer_shift(buffer);
+ }
+ if (realloc_buffer(buffer, buffer->used + max_bytes) < 0) {
+ return -1;
+ }
+ }
+
+ if ((actual = f(buffer->data + buffer->used, max_bytes, userdata)) < 0)
+ return -1;
+
+ buffer->used += actual;
+ buffer_verify(buffer);
+ return actual;
+}
+
/**
* @brief Ensure the buffer has at least a certain preallocated size.
*
diff --git a/src/connect.c b/src/connect.c
index 15cae644..e7520f40 100644
--- a/src/connect.c
+++ b/src/connect.c
@@ -156,6 +156,20 @@ static int set_tcp_nodelay(socket_t socket)
sizeof(opt));
}
+static int set_tcp_ccalgo(socket_t socket, const char *ccalgo)
+{
+#ifdef HAVE_TCP_CONGESTION
+ return setsockopt(socket,
+ IPPROTO_TCP,
+ TCP_CONGESTION,
+ (void *)ccalgo,
+ strlen(ccalgo));
+#else
+ errno = ENOTSUP;
+ return -1;
+#endif
+}
+
/**
* @internal
*
@@ -256,6 +270,18 @@ socket_t ssh_connect_host_nonblocking(ssh_session session, const char *host,
}
}
+ if (session->opts.ccalgo) {
+ rc = set_tcp_ccalgo(s, session->opts.ccalgo);
+ if (rc < 0) {
+ ssh_set_error(session, SSH_FATAL,
+ "Failed to set TCP_CONGESTION on socket: %s",
+ ssh_strerror(errno, err_msg, SSH_ERRNO_MSG_MAX));
+ ssh_connect_socket_close(s);
+ s = -1;
+ continue;
+ }
+ }
+
errno = 0;
rc = connect(s, itr->ai_addr, itr->ai_addrlen);
if (rc == -1 && (errno != 0) && (errno != EINPROGRESS)) {
diff --git a/src/options.c b/src/options.c
index 38511455..a183605d 100644
--- a/src/options.c
+++ b/src/options.c
@@ -217,6 +217,7 @@ int ssh_options_copy(ssh_session src, ssh_session *dest)
new->opts.gss_delegate_creds = src->opts.gss_delegate_creds;
new->opts.flags = src->opts.flags;
new->opts.nodelay = src->opts.nodelay;
+ new->opts.ccalgo = src->opts.ccalgo;
new->opts.config_processed = src->opts.config_processed;
new->common.log_verbosity = src->common.log_verbosity;
new->common.callbacks = src->common.callbacks;
@@ -458,6 +459,10 @@ int ssh_options_set_algo(ssh_session session,
* Set it to disable Nagle's Algorithm (TCP_NODELAY) on the
* session socket. (int, 0=false)
*
+ * - SSH_OPTIONS_CCALGO
+ * Set it to specify TCP congestion control algorithm on the
+ * session socket (Linux only). (int, 0=false)
+ *
* - SSH_OPTIONS_PROCESS_CONFIG
* Set it to false to disable automatic processing of per-user
* and system-wide OpenSSH configuration files. LibSSH
@@ -1023,6 +1028,20 @@ int ssh_options_set(ssh_session session, enum ssh_options_e type,
session->opts.nodelay = (*x & 0xff) > 0 ? 1 : 0;
}
break;
+ case SSH_OPTIONS_CCALGO:
+ v = value;
+ if (v == NULL || v[0] == '\0') {
+ ssh_set_error_invalid(session);
+ return -1;
+ } else {
+ SAFE_FREE(session->opts.ccalgo);
+ session->opts.ccalgo = strdup(v);
+ if (session->opts.ccalgo == NULL) {
+ ssh_set_error_oom(session);
+ return -1;
+ }
+ }
+ break;
case SSH_OPTIONS_PROCESS_CONFIG:
if (value == NULL) {
ssh_set_error_invalid(session);
diff --git a/src/session.c b/src/session.c
index 8c509699..88602b6a 100644
--- a/src/session.c
+++ b/src/session.c
@@ -108,6 +108,7 @@ ssh_session ssh_new(void)
session->opts.fd = -1;
session->opts.compressionlevel = 7;
session->opts.nodelay = 0;
+ session->opts.ccalgo = NULL;
session->opts.flags = SSH_OPT_FLAG_PASSWORD_AUTH |
SSH_OPT_FLAG_PUBKEY_AUTH |
diff --git a/src/sftp.c b/src/sftp.c
index e01012a8..702623a0 100644
--- a/src/sftp.c
+++ b/src/sftp.c
@@ -2228,6 +2228,132 @@ ssize_t sftp_write(sftp_file file, const void *buf, size_t count) {
return -1; /* not reached */
}
+/*
+ * sftp_async_write is based on and sftp_async_write_end is copied from
+ * https://github.com/limes-datentechnik-gmbh/libssh
+ *
+ * sftp_async_write has some optimizations:
+ * - use ssh_buffer_new_size() to reduce realoc_buffer.
+ * - use ssh_buffer_add_func() to avoid memcpy from read buffer to ssh buffer.
+ */
+ssize_t sftp_async_write(sftp_file file, ssh_add_func f, size_t count, void *userdata,
+ uint32_t* id) {
+ sftp_session sftp = file->sftp;
+ ssh_buffer buffer;
+ uint32_t buf_sz;
+ ssize_t actual;
+ int len;
+ int packetlen;
+ int rc;
+
+#define HEADROOM 16
+ /* sftp_packet_write() prepends a 5-bytes (uint32_t length and
+ * 1-byte type) header to the head of the payload by
+ * ssh_buffer_prepend_data(). Inserting headroom by
+ * ssh_buffer_new_size() eliminates memcpy for prepending the
+ * header.
+ */
+
+ buf_sz = (HEADROOM + /* for header */
+ sizeof(uint32_t) + /* id */
+ ssh_string_len(file->handle) + 4 + /* file->handle */
+ sizeof(uint64_t) + /* file->offset */
+ sizeof(uint32_t) + /* count */
+ count); /* datastring */
+
+ buffer = ssh_buffer_new_size(buf_sz, HEADROOM);
+ if (buffer == NULL) {
+ ssh_set_error_oom(sftp->session);
+ return -1;
+ }
+
+ *id = sftp_get_new_id(file->sftp);
+
+ rc = ssh_buffer_pack(buffer,
+ "dSqd",
+ *id,
+ file->handle,
+ file->offset,
+ count); /* len of datastring */
+
+ if (rc != SSH_OK){
+ ssh_set_error_oom(sftp->session);
+ ssh_buffer_free(buffer);
+ return SSH_ERROR;
+ }
+
+ actual = ssh_buffer_add_func(buffer, f, count, userdata);
+ if (actual < 0){
+ ssh_set_error_oom(sftp->session);
+ ssh_buffer_free(buffer);
+ return SSH_ERROR;
+ }
+
+ packetlen=ssh_buffer_get_len(buffer)+5;
+ len = sftp_packet_write(file->sftp, SSH_FXP_WRITE, buffer);
+ ssh_buffer_free(buffer);
+ if (len < 0) {
+ return SSH_ERROR;
+ } else if (len != packetlen) {
+ ssh_set_error(sftp->session, SSH_FATAL,
+ "Could only send %d of %d bytes to remote host!", len, packetlen);
+ SSH_LOG(SSH_LOG_PACKET,
+ "Could not write as much data as expected");
+ return SSH_ERROR;
+ }
+
+ file->offset += actual;
+
+ return actual;
+}
+
+int sftp_async_write_end(sftp_file file, uint32_t id, int blocking) {
+ sftp_session sftp = file->sftp;
+ sftp_message msg = NULL;
+ sftp_status_message status;
+
+ msg = sftp_dequeue(sftp, id);
+ while (msg == NULL) {
+ if (!blocking && ssh_channel_poll(sftp->channel, 0) == 0) {
+ /* we cannot block */
+ return SSH_AGAIN;
+ }
+ if (sftp_read_and_dispatch(sftp) < 0) {
+ /* something nasty has happened */
+ return SSH_ERROR;
+ }
+ msg = sftp_dequeue(sftp, id);
+ }
+
+ switch (msg->packet_type) {
+ case SSH_FXP_STATUS:
+ status = parse_status_msg(msg);
+ sftp_message_free(msg);
+ if (status == NULL) {
+ return SSH_ERROR;
+ }
+ sftp_set_error(sftp, status->status);
+ switch (status->status) {
+ case SSH_FX_OK:
+ status_msg_free(status);
+ return SSH_OK;
+ default:
+ break;
+ }
+ ssh_set_error(sftp->session, SSH_REQUEST_DENIED,
+ "SFTP server: %s", status->errormsg);
+ status_msg_free(status);
+ return SSH_ERROR;
+ default:
+ ssh_set_error(sftp->session, SSH_FATAL,
+ "Received message %d during write!", msg->packet_type);
+ sftp_message_free(msg);
+ return SSH_ERROR;
+ }
+
+ return SSH_ERROR; /* not reached */
+}
+
/* Seek to a specific location in a file. */
int sftp_seek(sftp_file file, uint32_t new_offset) {
if (file == NULL) {
================================================
FILE: patch/libssh-0.11.2.patch
================================================
diff --git a/ConfigureChecks.cmake b/ConfigureChecks.cmake
index 8765dc6e..766e7d16 100644
--- a/ConfigureChecks.cmake
+++ b/ConfigureChecks.cmake
@@ -209,6 +209,7 @@ if (UNIX)
check_library_exists(util forkpty "" HAVE_LIBUTIL)
check_function_exists(cfmakeraw HAVE_CFMAKERAW)
check_function_exists(__strtoull HAVE___STRTOULL)
+ check_symbol_exists(TCP_CONGESTION "netinet/tcp.h" HAVE_TCP_CONGESTION)
endif (UNIX)
set(LIBSSH_REQUIRED_LIBRARIES ${_REQUIRED_LIBRARIES} CACHE INTERNAL "libssh required system libraries")
diff --git a/config.h.cmake b/config.h.cmake
index 8dce5273..ef534762 100644
--- a/config.h.cmake
+++ b/config.h.cmake
@@ -219,6 +219,8 @@
#cmakedefine HAVE_GCC_BOUNDED_ATTRIBUTE 1
+#cmakedefine HAVE_TCP_CONGESTION 1
+
/* Define to 1 if you want to enable GSSAPI */
#cmakedefine WITH_GSSAPI 1
diff --git a/include/libssh/buffer.h b/include/libssh/buffer.h
index d22178e7..2d6aa0a7 100644
--- a/include/libssh/buffer.h
+++ b/include/libssh/buffer.h
@@ -37,6 +37,8 @@ int ssh_buffer_add_u8(ssh_buffer buffer, uint8_t data);
int ssh_buffer_add_u16(ssh_buffer buffer, uint16_t data);
int ssh_buffer_add_u32(ssh_buffer buffer, uint32_t data);
int ssh_buffer_add_u64(ssh_buffer buffer, uint64_t data);
+ssize_t ssh_buffer_add_func(ssh_buffer buffer, ssh_add_func f, size_t max_bytes,
+ void *userdata);
int ssh_buffer_validate_length(struct ssh_buffer_struct *buffer, size_t len);
diff --git a/include/libssh/libssh.h b/include/libssh/libssh.h
index 3bddb019..1d5d7761 100644
--- a/include/libssh/libssh.h
+++ b/include/libssh/libssh.h
@@ -373,6 +373,7 @@ enum ssh_options_e {
SSH_OPTIONS_HOST,
SSH_OPTIONS_PORT,
SSH_OPTIONS_PORT_STR,
+ SSH_OPTIONS_AI_FAMILY,
SSH_OPTIONS_FD,
SSH_OPTIONS_USER,
SSH_OPTIONS_SSH_DIR,
@@ -407,6 +408,7 @@ enum ssh_options_e {
SSH_OPTIONS_GSSAPI_AUTH,
SSH_OPTIONS_GLOBAL_KNOWNHOSTS,
SSH_OPTIONS_NODELAY,
+ SSH_OPTIONS_CCALGO,
SSH_OPTIONS_PUBLICKEY_ACCEPTED_TYPES,
SSH_OPTIONS_PROCESS_CONFIG,
SSH_OPTIONS_REKEY_DATA,
@@ -876,6 +878,7 @@ LIBSSH_API const char* ssh_get_hmac_in(ssh_session session);
LIBSSH_API const char* ssh_get_hmac_out(ssh_session session);
LIBSSH_API ssh_buffer ssh_buffer_new(void);
+LIBSSH_API ssh_buffer ssh_buffer_new_size(uint32_t size, uint32_t headroom);
LIBSSH_API void ssh_buffer_free(ssh_buffer buffer);
#define SSH_BUFFER_FREE(x) \
do { if ((x) != NULL) { ssh_buffer_free(x); x = NULL; } } while(0)
@@ -886,6 +889,12 @@ LIBSSH_API void *ssh_buffer_get(ssh_buffer buffer);
LIBSSH_API uint32_t ssh_buffer_get_len(ssh_buffer buffer);
LIBSSH_API int ssh_session_set_disconnect_message(ssh_session session, const char *message);
+typedef ssize_t (*ssh_add_func) (void *ptr, size_t max_bytes, void *userdata);
+
+LIBSSH_API const char **ssh_ciphers(void);
+LIBSSH_API const char **ssh_hmacs(void);
+LIBSSH_API void ssh_use_openssh_proxy_jumps(int);
+
#ifndef LIBSSH_LEGACY_0_4
#include "libssh/legacy.h"
#endif
diff --git a/include/libssh/session.h b/include/libssh/session.h
index aed94072..327cf4fe 100644
--- a/include/libssh/session.h
+++ b/include/libssh/session.h
@@ -255,6 +255,7 @@ struct ssh_session_struct {
unsigned long timeout; /* seconds */
unsigned long timeout_usec;
uint16_t port;
+ int ai_family;
socket_t fd;
int StrictHostKeyChecking;
char compressionlevel;
@@ -264,6 +265,7 @@ struct ssh_session_struct {
int flags;
int exp_flags;
int nodelay;
+ char *ccalgo;
bool config_processed;
uint8_t options_seen[SOC_MAX];
uint64_t rekey_data;
diff --git a/include/libssh/sftp.h b/include/libssh/sftp.h
index cf4458c3..1a864795 100644
--- a/include/libssh/sftp.h
+++ b/include/libssh/sftp.h
@@ -569,6 +569,10 @@ SSH_DEPRECATED LIBSSH_API int sftp_async_read(sftp_file file,
uint32_t len,
uint32_t id);
+LIBSSH_API ssize_t sftp_async_write(sftp_file file, ssh_add_func f, size_t count,
+ void *userdata, uint32_t* id);
+LIBSSH_API int sftp_async_write_end(sftp_file file, uint32_t id, int blocking);
+
/**
* @brief Write to a file using an opened sftp file handle.
*
diff --git a/src/buffer.c b/src/buffer.c
index 449fa941..f49e8af6 100644
--- a/src/buffer.c
+++ b/src/buffer.c
@@ -142,6 +142,40 @@ struct ssh_buffer_struct *ssh_buffer_new(void)
return buf;
}
+/**
+ * @brief Create a new SSH buffer with a specified size and headroom.
+ *
+ * @param[in] len length for newly initialized SSH buffer.
+ * @param[in] headroom length for headroom
+ * @return A newly initialized SSH buffer, NULL on error.
+ */
+struct ssh_buffer_struct *ssh_buffer_new_size(uint32_t len, uint32_t headroom)
+{
+ struct ssh_buffer_struct *buf = NULL;
+ int rc;
+
+ if (len < headroom)
+ return NULL;
+
+ buf = calloc(1, sizeof(struct ssh_buffer_struct));
+ if (buf == NULL) {
+ return NULL;
+ }
+
+ rc = ssh_buffer_allocate_size(buf, len);
+ if (rc != 0) {
+ SAFE_FREE(buf);
+ return NULL;
+ }
+
+ buf->pos += headroom;
+ buf->used += headroom;
+
+ buffer_verify(buf);
+
+ return buf;
+}
+
/**
* @brief Deallocate a SSH buffer.
*
@@ -329,6 +363,49 @@ int ssh_buffer_add_data(struct ssh_buffer_struct *buffer, const void *data, uint
return 0;
}
+/**
+ * @brief Add data at the tail of a buffer by an external function
+ *
+ * @param[in] buffer The buffer to add data.
+ *
+ * @param[in] f function that adds data to the buffer.
+ *
+ * @param[in] max_bytes The maximum length of the data to add.
+ *
+ * @return actual bytes added on success, < 0 on error.
+ */
+ssize_t ssh_buffer_add_func(struct ssh_buffer_struct *buffer, ssh_add_func f,
+ size_t max_bytes, void *userdata)
+{
+ ssize_t actual;
+
+ if (buffer == NULL) {
+ return -1;
+ }
+
+ buffer_verify(buffer);
+
+ if (buffer->used + max_bytes < max_bytes) {
+ return -1;
+ }
+
+ if (buffer->allocated < (buffer->used + max_bytes)) {
+ if (buffer->pos > 0) {
+ buffer_shift(buffer);
+ }
+ if (realloc_buffer(buffer, buffer->used + max_bytes) < 0) {
+ return -1;
+ }
+ }
+
+ if ((actual = f(buffer->data + buffer->used, max_bytes, userdata)) < 0)
+ return -1;
+
+ buffer->used += actual;
+ buffer_verify(buffer);
+ return actual;
+}
+
/**
* @brief Ensure the buffer has at least a certain preallocated size.
*
diff --git a/src/connect.c b/src/connect.c
index 2cb64037..51f4c87e 100644
--- a/src/connect.c
+++ b/src/connect.c
@@ -109,7 +109,7 @@ static int ssh_connect_socket_close(socket_t s)
#endif
}
-static int getai(const char *host, int port, struct addrinfo **ai)
+static int getai(const char *host, int port, int ai_family, struct addrinfo **ai)
{
const char *service = NULL;
struct addrinfo hints;
@@ -118,7 +118,7 @@ static int getai(const char *host, int port, struct addrinfo **ai)
ZERO_STRUCT(hints);
hints.ai_protocol = IPPROTO_TCP;
- hints.ai_family = PF_UNSPEC;
+ hints.ai_family = ai_family > 0 ? ai_family : PF_UNSPEC;
hints.ai_socktype = SOCK_STREAM;
if (port == 0) {
@@ -151,6 +151,20 @@ static int set_tcp_nodelay(socket_t socket)
sizeof(opt));
}
+static int set_tcp_ccalgo(socket_t socket, const char *ccalgo)
+{
+#ifdef HAVE_TCP_CONGESTION
+ return setsockopt(socket,
+ IPPROTO_TCP,
+ TCP_CONGESTION,
+ (void *)ccalgo,
+ strlen(ccalgo));
+#else
+ errno = ENOTSUP;
+ return -1;
+#endif
+}
+
/**
* @internal
*
@@ -168,7 +182,7 @@ socket_t ssh_connect_host_nonblocking(ssh_session session, const char *host,
struct addrinfo *ai = NULL;
struct addrinfo *itr = NULL;
- rc = getai(host, port, &ai);
+ rc = getai(host, port, session->opts.ai_family, &ai);
if (rc != 0) {
ssh_set_error(session, SSH_FATAL,
"Failed to resolve hostname %s (%s)",
@@ -194,7 +208,7 @@ socket_t ssh_connect_host_nonblocking(ssh_session session, const char *host,
SSH_LOG(SSH_LOG_PACKET, "Resolving %s", bind_addr);
- rc = getai(bind_addr, 0, &bind_ai);
+ rc = getai(bind_addr, 0, session->opts.ai_family, &bind_ai);
if (rc != 0) {
ssh_set_error(session, SSH_FATAL,
"Failed to resolve bind address %s (%s)",
@@ -251,6 +265,18 @@ socket_t ssh_connect_host_nonblocking(ssh_session session, const char *host,
}
}
+ if (session->opts.ccalgo) {
+ rc = set_tcp_ccalgo(s, session->opts.ccalgo);
+ if (rc < 0) {
+ ssh_set_error(session, SSH_FATAL,
+ "Failed to set TCP_CONGESTION on socket: %s",
+ ssh_strerror(errno, err_msg, SSH_ERRNO_MSG_MAX));
+ ssh_connect_socket_close(s);
+ s = -1;
+ continue;
+ }
+ }
+
errno = 0;
rc = connect(s, itr->ai_addr, itr->ai_addrlen);
if (rc == -1) {
diff --git a/src/misc.c b/src/misc.c
index 774211fb..74e57959 100644
--- a/src/misc.c
+++ b/src/misc.c
@@ -71,6 +71,8 @@
#include "libssh/priv.h"
#include "libssh/misc.h"
#include "libssh/session.h"
+#include "libssh/wrapper.h"
+#include "libssh/crypto.h"
#ifdef HAVE_LIBGCRYPT
#define GCRYPT_STRING "/gcrypt"
@@ -2054,6 +2056,42 @@ ssize_t ssh_readn(int fd, void *buf, size_t nbytes)
return total_bytes_read;
}
+/**
+ * @brief Return supported cipher names
+ * @return The list of cipher names.
+ */
+const char **ssh_ciphers(void)
+{
+ struct ssh_cipher_struct *tab=ssh_get_ciphertab();
+ static const char *ciphers[32];
+ int n;
+
+ memset(ciphers, 0, sizeof(*ciphers));
+
+ for (n = 0; tab[n].name != NULL; n++) {
+ ciphers[n] = tab[n].name;
+ }
+ return ciphers;
+}
+
+/**
+ * @brief Return supported hmac names
+ * @return The list of hmac names.
+ */
+const char **ssh_hmacs(void)
+{
+ struct ssh_hmac_struct *tab=ssh_get_hmactab();
+ static const char *hmacs[32];
+ int n;
+
+ memset(hmacs, 0, sizeof(*hmacs));
+
+ for (n = 0; tab[n].name != NULL; n++) {
+ hmacs[n] = tab[n].name;
+ }
+ return hmacs;
+}
+
/**
* @brief Write the requested number of bytes to a local file.
*
@@ -2227,6 +2265,17 @@ ssh_proxyjumps_free(struct ssh_list *proxy_jump_list)
}
}
+static bool force_openssh_proxy_jumps;
+
+/**
+ * @breif set use openssh proxy jumps without the OPENSSH_PROXYJUMP env var
+ */
+void
+ssh_use_openssh_proxy_jumps(int v)
+{
+ force_openssh_proxy_jumps = (v > 0);
+}
+
/**
* @brief Check if libssh proxy jumps is enabled
*
@@ -2241,7 +2290,12 @@ ssh_libssh_proxy_jumps(void)
{
const char *t = getenv("OPENSSH_PROXYJUMP");
+ if (force_openssh_proxy_jumps)
+ return false;
+
return !(t != NULL && t[0] == '1');
}
+
+
/** @} */
diff --git a/src/options.c b/src/options.c
index 785296dd..a82d4d81 100644
--- a/src/options.c
+++ b/src/options.c
@@ -251,6 +251,7 @@ int ssh_options_copy(ssh_session src, ssh_session *dest)
new->opts.gss_delegate_creds = src->opts.gss_delegate_creds;
new->opts.flags = src->opts.flags;
new->opts.nodelay = src->opts.nodelay;
+ new->opts.ccalgo = src->opts.ccalgo;
new->opts.config_processed = src->opts.config_processed;
new->opts.control_master = src->opts.control_master;
new->common.log_verbosity = src->common.log_verbosity;
@@ -326,6 +327,9 @@ int ssh_options_set_algo(ssh_session session,
* - SSH_OPTIONS_PORT_STR:
* The port to connect to (const char *).
*
+ * - SSH_OPTIONS_AI_FAMILY:
+ * The address family for connecting (int *).
+ *
* - SSH_OPTIONS_FD:
* The file descriptor to use (socket_t).\n
* \n
@@ -571,6 +575,10 @@ int ssh_options_set_algo(ssh_session session,
* Set it to disable Nagle's Algorithm (TCP_NODELAY) on the
* session socket. (int, 0=false)
*
+ * - SSH_OPTIONS_CCALGO
+ * Set it to specify TCP congestion control algorithm on the
+ * session socket (Linux only). (int, 0=false)
+ *
* - SSH_OPTIONS_PROCESS_CONFIG
* Set it to false to disable automatic processing of per-user
* and system-wide OpenSSH configuration files. LibSSH
@@ -727,6 +735,21 @@ int ssh_options_set(ssh_session session, enum ssh_options_e type,
session->opts.port = i & 0xffffU;
}
break;
+ case SSH_OPTIONS_AI_FAMILY:
+ if (value == NULL) {
+ session->opts.ai_family = 0;
+ ssh_set_error_invalid(session);
+ return -1;
+ } else {
+ int *x = (int *) value;
+ if (*x < 0) {
+ session->opts.ai_family = 0;
+ ssh_set_error_invalid(session);
+ return -1;
+ }
+ session->opts.ai_family = *x;
+ }
+ break;
case SSH_OPTIONS_FD:
if (value == NULL) {
session->opts.fd = SSH_INVALID_SOCKET;
@@ -1241,6 +1264,20 @@ int ssh_options_set(ssh_session session, enum ssh_options_e type,
session->opts.nodelay = (*x & 0xff) > 0 ? 1 : 0;
}
break;
+ case SSH_OPTIONS_CCALGO:
+ v = value;
+ if (v == NULL || v[0] == '\0') {
+ ssh_set_error_invalid(session);
+ return -1;
+ } else {
+ SAFE_FREE(session->opts.ccalgo);
+ session->opts.ccalgo = strdup(v);
+ if (session->opts.ccalgo == NULL) {
+ ssh_set_error_oom(session);
+ return -1;
+ }
+ }
+ break;
case SSH_OPTIONS_PROCESS_CONFIG:
if (value == NULL) {
ssh_set_error_invalid(session);
diff --git a/src/session.c b/src/session.c
index 9fd5d946..ed9f908e 100644
--- a/src/session.c
+++ b/src/session.c
@@ -107,9 +107,11 @@ ssh_session ssh_new(void)
/* OPTIONS */
session->opts.StrictHostKeyChecking = 1;
session->opts.port = 22;
+ session->opts.ai_family = 0;
session->opts.fd = -1;
session->opts.compressionlevel = 7;
session->opts.nodelay = 0;
+ session->opts.ccalgo = NULL;
session->opts.identities_only = false;
session->opts.control_master = SSH_CONTROL_MASTER_NO;
diff --git a/src/sftp.c b/src/sftp.c
index 37b4133b..12b6d296 100644
--- a/src/sftp.c
+++ b/src/sftp.c
@@ -1488,6 +1488,132 @@ ssize_t sftp_write(sftp_file file, const void *buf, size_t count) {
return -1; /* not reached */
}
+/*
+ * sftp_async_write is based on and sftp_async_write_end is copied from
+ * https://github.com/limes-datentechnik-gmbh/libssh
+ *
+ * sftp_async_write has some optimizations:
+ * - use ssh_buffer_new_size() to reduce realoc_buffer.
+ * - use ssh_buffer_add_func() to avoid memcpy from read buffer to ssh buffer.
+ */
+ssize_t sftp_async_write(sftp_file file, ssh_add_func f, size_t count, void *userdata,
+ uint32_t* id) {
+ sftp_session sftp = file->sftp;
+ ssh_buffer buffer;
+ uint32_t buf_sz;
+ ssize_t actual;
+ int len;
+ int packetlen;
+ int rc;
+
+#define HEADROOM 16
+ /* sftp_packet_write() prepends a 5-bytes (uint32_t length and
+ * 1-byte type) header to the head of the payload by
+ * ssh_buffer_prepend_data(). Inserting headroom by
+ * ssh_buffer_new_size() eliminates memcpy for prepending the
+ * header.
+ */
+
+ buf_sz = (HEADROOM + /* for header */
+ sizeof(uint32_t) + /* id */
+ ssh_string_len(file->handle) + 4 + /* file->handle */
+ sizeof(uint64_t) + /* file->offset */
+ sizeof(uint32_t) + /* count */
+ count); /* datastring */
+
+ buffer = ssh_buffer_new_size(buf_sz, HEADROOM);
+ if (buffer == NULL) {
+ ssh_set_error_oom(sftp->session);
+ return -1;
+ }
+
+ *id = sftp_get_new_id(file->sftp);
+
+ rc = ssh_buffer_pack(buffer,
+ "dSqd",
+ *id,
+ file->handle,
+ file->offset,
+ count); /* len of datastring */
+
+ if (rc != SSH_OK){
+ ssh_set_error_oom(sftp->session);
+ ssh_buffer_free(buffer);
+ return SSH_ERROR;
+ }
+
+ actual = ssh_buffer_add_func(buffer, f, count, userdata);
+ if (actual < 0){
+ ssh_set_error_oom(sftp->session);
+ ssh_buffer_free(buffer);
+ return SSH_ERROR;
+ }
+
+ packetlen=ssh_buffer_get_len(buffer)+5;
+ len = sftp_packet_write(file->sftp, SSH_FXP_WRITE, buffer);
+ ssh_buffer_free(buffer);
+ if (len < 0) {
+ return SSH_ERROR;
+ } else if (len != packetlen) {
+ ssh_set_error(sftp->session, SSH_FATAL,
+ "Could only send %d of %d bytes to remote host!", len, packetlen);
+ SSH_LOG(SSH_LOG_PACKET,
+ "Could not write as much data as expected");
+ return SSH_ERROR;
+ }
+
+ file->offset += actual;
+
+ return actual;
+}
+
+int sftp_async_write_end(sftp_file file, uint32_t id, int blocking) {
+ sftp_session sftp = file->sftp;
+ sftp_message msg = NULL;
+ sftp_status_message status;
+
+ msg = sftp_dequeue(sftp, id);
+ while (msg == NULL) {
+ if (!blocking && ssh_channel_poll(sftp->channel, 0) == 0) {
+ /* we cannot block */
+ return SSH_AGAIN;
+ }
+ if (sftp_read_and_dispatch(sftp) < 0) {
+ /* something nasty has happened */
+ return SSH_ERROR;
+ }
+ msg = sftp_dequeue(sftp, id);
+ }
+
+ switch (msg->packet_type) {
+ case SSH_FXP_STATUS:
+ status = parse_status_msg(msg);
+ sftp_message_free(msg);
+ if (status == NULL) {
+ return SSH_ERROR;
+ }
+ sftp_set_error(sftp, status->status);
+ switch (status->status) {
+ case SSH_FX_OK:
+ status_msg_free(status);
+ return SSH_OK;
+ default:
+ break;
+ }
+ ssh_set_error(sftp->session, SSH_REQUEST_DENIED,
+ "SFTP server: %s", status->errormsg);
+ status_msg_free(status);
+ return SSH_ERROR;
+ default:
+ ssh_set_error(sftp->session, SSH_FATAL,
+ "Received message %d during write!", msg->packet_type);
+ sftp_message_free(msg);
+ return SSH_ERROR;
+ }
+
+ return SSH_ERROR; /* not reached */
+}
+
/* Seek to a specific location in a file. */
int sftp_seek(sftp_file file, uint32_t new_offset) {
if (file == NULL) {
================================================
FILE: patch/libssh-0.9.6.patch
================================================
diff --git a/DefineOptions.cmake b/DefineOptions.cmake
index b82a5018..f1f2ab9d 100644
--- a/DefineOptions.cmake
+++ b/DefineOptions.cmake
@@ -15,13 +15,14 @@ option(UNIT_TESTING "Build with unit tests" OFF)
option(CLIENT_TESTING "Build with client tests; requires openssh" OFF)
option(SERVER_TESTING "Build with server tests; requires openssh and dropbear" OFF)
option(WITH_BENCHMARKS "Build benchmarks tools" OFF)
-option(WITH_EXAMPLES "Build examples" ON)
+option(WITH_EXAMPLES "Build examples" OFF)
option(WITH_NACL "Build with libnacl (curve25519)" ON)
option(WITH_SYMBOL_VERSIONING "Build with symbol versioning" ON)
option(WITH_ABI_BREAK "Allow ABI break" OFF)
option(WITH_GEX "Enable DH Group exchange mechanisms" ON)
option(FUZZ_TESTING "Build with fuzzer for the server" OFF)
option(PICKY_DEVELOPER "Build with picky developer flags" OFF)
+option(WITH_STATIC_LIB "Build static library" ON)
if (WITH_ZLIB)
set(WITH_LIBZ ON)
@@ -53,3 +54,7 @@ endif (NOT GLOBAL_BIND_CONFIG)
if (NOT GLOBAL_CLIENT_CONFIG)
set(GLOBAL_CLIENT_CONFIG "/etc/ssh/ssh_config")
endif (NOT GLOBAL_CLIENT_CONFIG)
+
+if (WITH_STATIC_LIB)
+ set(BUILD_STATIC_LIB ON)
+endif()
diff --git a/include/libssh/sftp.h b/include/libssh/sftp.h
index 8c14b21d..95ac1d6b 100644
--- a/include/libssh/sftp.h
+++ b/include/libssh/sftp.h
@@ -565,6 +565,9 @@ LIBSSH_API int sftp_async_read(sftp_file file, void *data, uint32_t len, uint32_
*/
LIBSSH_API ssize_t sftp_write(sftp_file file, const void *buf, size_t count);
+LIBSSH_API int sftp_async_write(sftp_file file, const void *buf, size_t count, uint32_t* id);
+LIBSSH_API int sftp_async_write_end(sftp_file file, uint32_t id, int blocking);
+
/**
* @brief Seek to a specific location in a file.
*
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index a576cf71..303a1c7f 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -412,6 +412,10 @@ if (BUILD_STATIC_LIB)
if (WIN32)
target_compile_definitions(ssh-static PUBLIC "LIBSSH_STATIC")
endif (WIN32)
+ install(TARGETS ssh-static
+ EXPORT libssh-config
+ LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
+ COMPONENT libraries)
endif (BUILD_STATIC_LIB)
message(STATUS "Threads_FOUND=${Threads_FOUND}")
diff --git a/src/sftp.c b/src/sftp.c
index a8346040..a4261ec9 100644
--- a/src/sftp.c
+++ b/src/sftp.c
@@ -2234,6 +2234,102 @@ ssize_t sftp_write(sftp_file file, const void *buf, size_t count) {
return -1; /* not reached */
}
+/*
+ * sftp_async_write and sftp_async_write_end are copied from
+ * https://github.com/limes-datentechnik-gmbh/libssh
+ */
+int sftp_async_write(sftp_file file, const void *buf, size_t count, uint32_t* id) {
+ sftp_session sftp = file->sftp;
+ ssh_buffer buffer;
+ int len;
+ int packetlen;
+ int rc;
+
+ buffer = ssh_buffer_new();
+ if (buffer == NULL) {
+ ssh_set_error_oom(sftp->session);
+ return -1;
+ }
+
+ *id = sftp_get_new_id(file->sftp);
+
+ rc = ssh_buffer_pack(buffer,
+ "dSqdP",
+ *id,
+ file->handle,
+ file->offset,
+ count, /* len of datastring */
+ (size_t)count, buf);
+ if (rc != SSH_OK){
+ ssh_set_error_oom(sftp->session);
+ ssh_buffer_free(buffer);
+ return SSH_ERROR;
+ }
+ packetlen=ssh_buffer_get_len(buffer)+5;
+ len = sftp_packet_write(file->sftp, SSH_FXP_WRITE, buffer);
+ ssh_buffer_free(buffer);
+ if (len < 0) {
+ return SSH_ERROR;
+ } else if (len != packetlen) {
+ ssh_set_error(sftp->session, SSH_FATAL,
+ "Could only send %d of %d bytes to remote host!", len, packetlen);
+ SSH_LOG(SSH_LOG_PACKET,
+ "Could not write as much data as expected");
+ return SSH_ERROR;
+ }
+
+ file->offset += count;
+
+ return SSH_OK;
+}
+
+int sftp_async_write_end(sftp_file file, uint32_t id, int blocking) {
+ sftp_session sftp = file->sftp;
+ sftp_message msg = NULL;
+ sftp_status_message status;
+
+ msg = sftp_dequeue(sftp, id);
+ while (msg == NULL) {
+ if (!blocking && ssh_channel_poll(sftp->channel, 0) == 0) {
+ /* we cannot block */
+ return SSH_AGAIN;
+ }
+ if (sftp_read_and_dispatch(sftp) < 0) {
+ /* something nasty has happened */
+ return SSH_ERROR;
+ }
+ msg = sftp_dequeue(sftp, id);
+ }
+
+ switch (msg->packet_type) {
+ case SSH_FXP_STATUS:
+ status = parse_status_msg(msg);
+ sftp_message_free(msg);
+ if (status == NULL) {
+ return SSH_ERROR;
+ }
+ sftp_set_error(sftp, status->status);
+ switch (status->status) {
+ case SSH_FX_OK:
+ status_msg_free(status);
+ return SSH_OK;
+ default:
+ break;
+ }
+ ssh_set_error(sftp->session, SSH_REQUEST_DENIED,
+ "SFTP server: %s", status->errormsg);
+ status_msg_free(status);
+ return SSH_ERROR;
+ default:
+ ssh_set_error(sftp->session, SSH_FATAL,
+ "Received message %d during write!", msg->packet_type);
+ sftp_message_free(msg);
+ return SSH_ERROR;
+ }
+
+ return SSH_ERROR; /* not reached */
+}
+
/* Seek to a specific location in a file. */
int sftp_seek(sftp_file file, uint32_t new_offset) {
if (file == NULL) {
================================================
FILE: rpm/.gitignore
================================================
# generated by cmake
mscp.spec
================================================
FILE: rpm/mscp.spec.in
================================================
Name: mscp
Version: @MSCP_VERSION@
Release: 1%{?dist}
Summary: mscp, fast file transfer over multiple SSH connections
Group: Applications/Internet
License: GPLv3
URL: https://github.com/upa/mscp
Source0: %{name}-%{version}.tar.gz
BuildRequires: gcc make cmake zlib-devel openssl-devel
Requires: glibc crypto-policies krb5-libs openssl-libs libcom_err
%description
mscp transfers files over multiple SSH connections. Multiple threads
and connections in mscp transfer (1) multiple files simultaneously
and (2) a large file in parallel. It would shorten the waiting time
for transferring a lot of/large files over networks.
%global debug_package %{nil}
%prep
%setup -q
%build
cmake -S . -B build -DINSTALL_EXECUTABLE_ONLY=ON
make -C build %{?_smp_mflags}
%install
make -C build install DESTDIR=%{buildroot}
%files
/usr/local/bin/mscp
/usr/local/share/man/man1/mscp.1
%changelog
* Sat Nov 08 2025 Ryo Nakamura - 0.2.4-1
- RPM release for v0.2.4
* Tue Aug 12 2025 Ryo Nakamura - 0.2.3-1
- RPM release for v0.2.3
* Wed Apr 16 2025 Ryo Nakamura - 0.2.2-1
- RPM release for v0.2.2
* Sat May 11 2024 Ryo Nakamura - 0.2.1-1
- RPM release for v0.2.1
* Mon Apr 15 2024 Ryo Nakamura - 0.2.0-1
- RPM release for v0.2.0
* Thu Mar 14 2024 Ryo Nakamura - 0.1.5-0
- RPM release for v0.1.5
* Wed Feb 07 2024 Ryo Nakamura - 0.1.4-0
- RPM release for v0.1.4
* Sat Feb 03 2024 Ryo Nakamura - 0.1.3-0
- Initial release for rpm packaging
================================================
FILE: scripts/install-build-deps.sh
================================================
#!/usr/bin/env bash
#
# Install build dpenedencies.
set -e
#set -u
function print_help() {
echo "$0 [options]"
echo " --dont-install Print required packages."
echo " --platform [PLATFORM] PLATFORM is Kernel-ID, e.g., Linux-ubuntu."
echo " Automatically detected if not specified."
}
platform=$(uname -s)
doinstall=1
if [ -e /etc/os-release ]; then
source /etc/os-release
platform=${platform}-${ID}
fi
while getopts h-: opt; do
optarg="${!OPTIND}"
[[ "$opt" = - ]] && opt="-$OPTARG"
case "-${opt}" in
--dont-install)
doinstall=0
;;
--platform)
platform=$optarg
shift
;;
-h)
print_help
exit 0
;;
*)
print_help
exit 1
;;
esac
done
case $platform in
Darwin)
cmd="brew install"
pkgs="openssl@3"
;;
Linux-ubuntu* | Linux-debian* | Linux-devuan*)
cmd="apt-get install --no-install-recommends -y"
pkgs="gcc make cmake zlib1g-dev libssl-dev libkrb5-dev"
;;
Linux-centos* | Linux-rhel* | Linux-rocky* | Linux-almalinux)
cmd="yum install -y"
pkgs="gcc make cmake zlib-devel openssl-devel rpm-build"
;;
Linux-arch*)
cmd="pacman --no-confirm -S"
pkgs="gcc make cmake"
;;
FreeBSD-freebsd)
cmd="pkg install"
pkgs="cmake"
;;
*)
echo "unsupported platform: $platform"
exit 1
;;
esac
if [ $doinstall -gt 0 ]; then
echo do "$cmd $pkgs"
$cmd $pkgs
else
echo $pkgs
fi
================================================
FILE: scripts/test-in-container.sh
================================================
#!/bin/bash -e
#
# Run this script in mscp docker containers.
# This script runs end-to-end test with installed mscp.
script_dir=$(cd $(dirname ${0}) && pwd)
cd $script_dir
set -x
# sshd Linsten on 22 and 8022
echo "Port 22" >> /etc/ssh/sshd_config
echo "Port 8022" >> /etc/ssh/sshd_config
## Alpine default sshd disables TcpForwarding, which is required for proxyjump test
sed -i -e 's/AllowTcpForwarding no/AllowTcpForwarding yes/' /etc/ssh/sshd_config
# Run sshd
if [ ! -e /var/run/sshd.pid ]; then
/usr/sbin/sshd -E /tmp/sshd.log
sleep 1
fi
for port in 22 8022; do
ssh-keyscan -p $port localhost >> ${HOME}/.ssh/known_hosts
ssh-keyscan -p $port ip6-localhost >> ${HOME}/.ssh/known_hosts
ssh-keyscan -p $port 127.0.0.1 >> ${HOME}/.ssh/known_hosts
ssh-keyscan -p $port ::1 >> ${HOME}/.ssh/known_hosts
done
if [ $# -gt 0 ]; then
# command arguments are passed, exec them
"$@"
else
# no arguments passed, run the test
python3 -m pytest -v ../test
fi
================================================
FILE: src/atomic.h
================================================
/* SPDX-License-Identifier: GPL-3.0-only */
#ifndef _ATOMIC_H_
#define _ATOMIC_H_
#include
#include
#include
typedef int refcnt;
static inline void refcnt_inc(refcnt *cnt)
{
__sync_add_and_fetch(cnt, 1);
}
static inline refcnt refcnt_dec(refcnt *cnt)
{
return __sync_sub_and_fetch(cnt, 1);
}
/* mutex */
typedef pthread_mutex_t lock;
static inline void lock_init(lock *l)
{
pthread_mutex_init(l, NULL);
}
static inline void lock_acquire(lock *l)
{
int ret = pthread_mutex_lock(l);
assert(ret == 0);
}
static inline void lock_release(lock *l)
{
int ret = pthread_mutex_unlock(l);
assert(ret == 0);
}
static inline void lock_release_via_cleanup(void *l)
{
lock_release(l);
}
#define LOCK_ACQUIRE(l) \
lock_acquire(l); \
pthread_cleanup_push(lock_release_via_cleanup, l)
#define LOCK_RELEASE() pthread_cleanup_pop(1)
/* read/write lock */
typedef pthread_rwlock_t rwlock;
static inline void rwlock_init(rwlock *rw)
{
pthread_rwlock_init(rw, NULL);
}
static inline void rwlock_read_acquire(rwlock *rw)
{
int ret = pthread_rwlock_rdlock(rw);
assert(ret == 0);
}
static inline void rwlock_write_acquire(rwlock *rw)
{
int ret = pthread_rwlock_wrlock(rw);
assert(ret == 0);
}
static inline void rwlock_release(rwlock *rw)
{
int ret = pthread_rwlock_unlock(rw);
assert(ret == 0);
}
static inline void rwlock_release_via_cleanup(void *rw)
{
rwlock_release(rw);
}
#define RWLOCK_READ_ACQUIRE(rw) \
rwlock_read_acquire(rw); \
pthread_cleanup_push(rwlock_release_via_cleanup, rw)
#define RWLOCK_WRITE_ACQUIRE(rw) \
rwlock_write_acquire(rw); \
pthread_cleanup_push(rwlock_release_via_cleanup, rw)
#define RWLOCK_RELEASE() pthread_cleanup_pop(1)
#endif /* _ATOMIC_H_ */
================================================
FILE: src/bwlimit.c
================================================
/* SPDX-License-Identifier: GPL-3.0-only */
#include
#include
#include
#define timespeczerorize(ts) \
do { \
ts.tv_sec = 0; \
ts.tv_nsec = 0; \
} while (0)
int bwlimit_init(struct bwlimit *bw, uint64_t bps, uint64_t win)
{
if (!(bw->sem = sem_create(1)))
return -1;
bw->bps = bps;
bw->win = win; /* msec window */
bw->amt = (double)bps / 8 / 1000 * win; /* bytes in a window (msec) */
bw->credit = bw->amt;
timespeczerorize(bw->wstart);
timespeczerorize(bw->wend);
return 0;
}
#define timespecisset(ts) ((ts).tv_sec || (ts).tv_nsec)
#define timespecmsadd(a, msec, r) \
do { \
(r).tv_sec = (a).tv_sec; \
(r).tv_nsec = (a).tv_nsec + (msec * 1000000); \
if ((r).tv_nsec > 1000000000) { \
(r).tv_sec += (r.tv_nsec) / 1000000000L; \
(r).tv_nsec = (r.tv_nsec) % 1000000000L; \
} \
} while (0)
#define timespecsub(a, b, r) \
do { \
(r).tv_sec = (a).tv_sec - (b).tv_sec; \
(r).tv_nsec = (a).tv_nsec - (b).tv_nsec; \
if ((r).tv_nsec < 0) { \
(r).tv_sec -= 1; \
(r).tv_nsec += 1000000000; \
} \
} while (0)
#define timespeccmp(a, b, expr) \
((a.tv_sec * 1000000000 + a.tv_nsec) expr(b.tv_sec * 1000000000 + b.tv_nsec))
int bwlimit_wait(struct bwlimit *bw, size_t nr_bytes)
{
struct timespec now, end, rq, rm;
if (bw->bps == 0)
return 0; /* no bandwidth limit */
if (sem_wait(bw->sem) < 0)
return -1;
clock_gettime(CLOCK_MONOTONIC, &now);
if (!timespecisset(bw->wstart)) {
bw->wstart = now;
timespecmsadd(bw->wstart, bw->win, bw->wend);
}
bw->credit -= nr_bytes;
if (bw->credit < 0) {
/* no more credit on this window. sleep until the end
* of this window + additional time for the remaining
* bytes. */
uint64_t addition = (double)(bw->credit * -1) / (bw->bps / 8);
timespecmsadd(bw->wend, addition * 1000, end);
if (timespeccmp(end, now, >)) {
timespecsub(end, now, rq);
while (nanosleep(&rq, &rm) == -1) {
if (errno != EINTR)
break;
rq = rm;
}
}
bw->credit = bw->amt;
timespeczerorize(bw->wstart);
}
sem_post(bw->sem);
return 0;
}
================================================
FILE: src/bwlimit.h
================================================
/* SPDX-License-Identifier: GPL-3.0-only */
#ifndef _BWLIMIT_H_
#define _BWLIMIT_H_
#include
#include
#include
#include
#include
struct bwlimit {
sem_t *sem; /* semaphore */
size_t bps; /* limit bit-rate (bps) */
size_t win; /* window size (msec) */
size_t amt; /* amount of bytes can be sent in a window */
ssize_t credit; /* remaining bytes can be sent in a window */
struct timespec wstart, wend; /* window start time and end time */
};
int bwlimit_init(struct bwlimit *bw, uint64_t bps, uint64_t win);
/* if bps is 0, it means that bwlimit is not active. If so,
* bwlimit_wait() returns immediately. */
int bwlimit_wait(struct bwlimit *bw, size_t nr_bytes);
#endif /* _BWLIMIT_H_ */
================================================
FILE: src/checkpoint.c
================================================
/* SPDX-License-Identifier: GPL-3.0-only */
#include
#include
#include
#include
#include
#include
#include
#include
#include
#define MSCP_CHECKPOINT_MAGIC 0x7063736dUL /* mscp in ascii */
#define MSCP_CHECKPOINT_VERSION 0x1
/**
* mscp checkpoint file format. All values are network byte order.
*
* The file starts with the File header:
* 0 1 2 3
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
* +---------------------------------------------------------------+
* | Magic Code |
* +---------------+-----------------------------------------------+
* | Version |
* +---------------+
*
* Magic code: 0x7063736dUL
*
* Version: 1.
*
*
* Each object in a checkpoint always starts with an object header:
* +---------------+---------------+-------------------------------+
* | Type | rsv | Length |
* +---------------+---------------+-------------------------------+
*
* Type: 0x0A (meta), 0x0B (path), or 0x0C (chunk)
*
* Rsv: reserved
*
* Length: Length of this object including the object header.
*
*
* Meta object provides generaic information for the failed copy:
* +---------------+---------------+-------------------------------+
* | Type | rsv | Length |
* +---------------+---------------+-------------------------------+
* | Direction | Remote string ...
* +---------------+------------------
*
* Direction: 1 (Local-to-Remote copy) or 2 (Remote-to-Local copy)
*
* Remote string: Remote host, e.g., user@hostname and IP address,
* string including '\0'.
*
*
* Path object represnts a file with sourcen and destination paths:
* +---------------+---------------+-------------------------------+
* | Type | rsv | Length |
* +---------------+---------------+-------------------------------+
* | Index |
* +-------------------------------+-------------------------------+
* | Source offset | Destination offset |
* +-------------------------------+-------------------------------+
* // //
* // Source path string //
* // //
* +---------------------------------------------------------------+
* // //
* // Destination path string //
* // //
* +---------------------------------------------------------------+
*
* Index: 32-bit unsigned int indicating this path (used by chunks)
*
* Source offset: Offset of the Source path string from the head of
* this object. It is identical to the end of the Destination offset
* filed.
*
* Destination offset: Offset of the Destnation path string from the
* head of this object. It also indicates the end of the Source path
* string.
*
* Source path string: String of copy source path (including '\0').
*
* Destination path string: string of copy destination path (including
* '\0').
*
*
* Chunk object represents a chunk associated with a path object:
* +---------------+---------------+-------------------------------+
* | Type | rsv | Length |
* +---------------+---------------+-------------------------------+
* | Index |
* +---------------------------------------------------------------+
* | Chunk |
* | offset |
* +---------------------------------------------------------------+
* | Chunk |
* | length |
* +---------------------------------------------------------------+
*
* Index: 32 bit unsigned int indicating the index of a path object
* this chunk associated with.
*
* Chunk offset: 64 bit unsigned int indicating the offset of this
* chunk from the head of the associating a file.
*
* Chunk length: 64 bit unsigned int indicating the length (bytes) of
* this chunk.
*/
enum {
OBJ_TYPE_META = 0x0A,
OBJ_TYPE_PATH = 0x0B,
OBJ_TYPE_CHUNK = 0x0C,
};
struct checkpoint_file_hdr {
uint32_t magic;
uint8_t version;
} __attribute__((packed));
struct checkpoint_obj_hdr {
uint8_t type;
uint8_t rsv;
uint16_t len; /* length of an object including this hdr */
} __attribute__((packed));
struct checkpoint_obj_meta {
struct checkpoint_obj_hdr hdr;
uint8_t direction; /* L2R or R2L */
char remote[0];
} __attribute__((packed));
struct checkpoint_obj_path {
struct checkpoint_obj_hdr hdr;
uint32_t idx;
uint16_t src_off; /* offset to the src path string (including
* \0) from the head of this object. */
uint16_t dst_off; /* offset to the dst path string (including
* \0) from the head of this object */
} __attribute__((packed));
#define obj_path_src(o) ((char *)(o) + ntohs(o->src_off))
#define obj_path_dst(o) ((char *)(o) + ntohs(o->dst_off))
#define obj_path_src_len(o) (ntohs(o->dst_off) - ntohs(o->src_off))
#define obj_path_dst_len(o) (ntohs(o->hdr.len) - ntohs(o->dst_off))
#define obj_path_validate(o) \
((ntohs(o->hdr.len) > ntohs(o->dst_off)) && \
(ntohs(o->dst_off) > ntohs(o->src_off)) && \
(obj_path_src_len(o) < PATH_MAX) && \
(obj_path_dst_len(o) < PATH_MAX))
struct checkpoint_obj_chunk {
struct checkpoint_obj_hdr hdr;
uint32_t idx; /* index indicating associating path */
uint64_t off;
uint64_t len;
} __attribute__((packed));
#define CHECKPOINT_OBJ_MAXLEN (sizeof(struct checkpoint_obj_path) + PATH_MAX * 2)
static int checkpoint_write_path(int fd, struct path *p, unsigned int idx)
{
char buf[CHECKPOINT_OBJ_MAXLEN];
struct checkpoint_obj_path *path = (struct checkpoint_obj_path *)buf;
size_t src_len, dst_len;
struct iovec iov[3];
p->data = idx; /* save idx to be pointed by chunks */
src_len = strlen(p->path) + 1;
dst_len = strlen(p->dst_path) + 1;
memset(buf, 0, sizeof(buf));
path->hdr.type = OBJ_TYPE_PATH;
path->hdr.len = htons(sizeof(*path) + src_len + dst_len);
path->idx = htonl(idx);
path->src_off = htons(sizeof(*path));
path->dst_off = htons(sizeof(*path) + src_len);
iov[0].iov_base = path;
iov[0].iov_len = sizeof(*path);
iov[1].iov_base = p->path;
iov[1].iov_len = src_len;
iov[2].iov_base = p->dst_path;
iov[2].iov_len = dst_len;
if (writev(fd, iov, 3) < 0) {
priv_set_errv("writev: %s", strerrno());
return -1;
}
return 0;
}
static int checkpoint_write_chunk(int fd, struct chunk *c)
{
struct checkpoint_obj_chunk chunk;
memset(&chunk, 0, sizeof(chunk));
chunk.hdr.type = OBJ_TYPE_CHUNK;
chunk.hdr.len = htons(sizeof(chunk));
chunk.idx = htonl(c->p->data); /* index stored by checkpoint_write_path */
chunk.off = htonll(c->off);
chunk.len = htonll(c->len);
if (write(fd, &chunk, sizeof(chunk)) < 0) {
priv_set_errv("writev: %s", strerrno());
return -1;
}
return 0;
}
int checkpoint_save(const char *pathname, int dir, const char *user, const char *remote,
pool *path_pool, pool *chunk_pool)
{
struct checkpoint_file_hdr hdr;
struct checkpoint_obj_meta meta;
struct iovec iov[3];
struct chunk *c;
struct path *p;
char buf[1024];
unsigned int i, nr_paths, nr_chunks;
int fd, ret;
fd = open(pathname, O_WRONLY | O_CREAT | O_TRUNC,
S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH);
if (fd < 0) {
priv_set_errv("open: %s: %s", pathname, strerrno());
return -1;
}
/* write file hdr */
hdr.magic = htonl(MSCP_CHECKPOINT_MAGIC);
hdr.version = MSCP_CHECKPOINT_VERSION;
/* write meta */
if (user)
ret = snprintf(buf, sizeof(buf), "%s@%s", user, remote);
else
ret = snprintf(buf, sizeof(buf), "%s", remote);
if (ret >= sizeof(buf)) {
priv_set_errv("too long username and/or remote");
return -1;
}
memset(&meta, 0, sizeof(meta));
meta.hdr.type = OBJ_TYPE_META;
meta.hdr.len = htons(sizeof(meta) + strlen(buf) + 1);
meta.direction = dir;
iov[0].iov_base = &hdr;
iov[0].iov_len = sizeof(hdr);
iov[1].iov_base = &meta;
iov[1].iov_len = sizeof(meta);
iov[2].iov_base = buf;
iov[2].iov_len = strlen(buf) + 1;
if (writev(fd, iov, 3) < 0) {
priv_set_errv("writev: %s", strerrno());
return -1;
}
/* write paths */
nr_paths = 0;
pool_for_each(path_pool, p, i) {
if (p->state == FILE_STATE_DONE)
continue;
if (checkpoint_write_path(fd, p, nr_paths) < 0)
return -1;
nr_paths++;
}
/* write chunks */
nr_chunks = 0;
pool_for_each(chunk_pool, c, i) {
if (c->state == CHUNK_STATE_DONE)
continue;
if (checkpoint_write_chunk(fd, c) < 0)
return -1;
nr_chunks++;
}
pr_notice("checkpoint: %u paths and %u chunks saved", nr_paths, nr_chunks);
return 0;
}
static int checkpoint_load_meta(struct checkpoint_obj_hdr *hdr, char *remote, size_t len,
int *dir)
{
struct checkpoint_obj_meta *meta = (struct checkpoint_obj_meta *)hdr;
if (len < ntohs(hdr->len) - sizeof(*meta)) {
priv_set_errv("too short buffer");
return -1;
}
snprintf(remote, len, "%s", meta->remote);
*dir = meta->direction;
pr_notice("checkpoint: remote=%s direction=%s", meta->remote,
meta->direction == MSCP_DIRECTION_L2R ? "local-to-remote" :
meta->direction == MSCP_DIRECTION_R2L ? "remote-to-local" :
"invalid");
return 0;
}
static int checkpoint_load_path(struct checkpoint_obj_hdr *hdr, pool *path_pool)
{
struct checkpoint_obj_path *path = (struct checkpoint_obj_path *)hdr;
struct path *p;
char *s, *d;
if (!obj_path_validate(path)) {
priv_set_errv("invalid path object");
return -1;
}
if (!(s = strndup(obj_path_src(path), obj_path_src_len(path)))) {
priv_set_errv("strdup: %s", strerrno());
return -1;
}
if (!(d = strndup(obj_path_dst(path), obj_path_dst_len(path)))) {
priv_set_errv("strdup: %s", strerrno());
free(s);
return -1;
}
if (!(p = alloc_path(s, d))) {
free(s);
free(d);
return -1;
}
if (pool_push(path_pool, p) < 0) {
priv_set_errv("pool_push: %s", strerrno());
return -1;
}
pr_info("checkpoint:file: idx=%u %s -> %s", ntohl(path->idx),
p->path, p->dst_path);
return 0;
}
static int checkpoint_load_chunk(struct checkpoint_obj_hdr *hdr, pool *path_pool,
pool *chunk_pool)
{
struct checkpoint_obj_chunk *chunk = (struct checkpoint_obj_chunk *)hdr;
struct chunk *c;
struct path *p;
if (!(p = pool_get(path_pool, ntohl(chunk->idx)))) {
/* we assumes all paths are already loaded in the order */
priv_set_errv("path index %u not found", ntohl(chunk->idx));
return -1;
}
if (!(c = alloc_chunk(p, ntohll(chunk->off), ntohll(chunk->len))))
return -1;
if (pool_push(chunk_pool, c) < 0) {
priv_set_errv("pool_push: %s", strerrno());
return -1;
}
pr_debug("checkpoint:chunk: idx=%u %s 0x%lx-0x%lx", ntohl(chunk->idx),
p->path, c->off, c->off + c->len);
return 0;
}
static int checkpoint_read_obj(int fd, void *buf, size_t count)
{
struct checkpoint_obj_hdr *hdr = (struct checkpoint_obj_hdr *)buf;
ssize_t ret, objlen, objbuflen;
memset(buf, 0, count);
if (count < sizeof(*hdr)) {
priv_set_errv("too short buffer");
return -1;
}
ret = read(fd, hdr, sizeof(*hdr));
if (ret == 0)
return 0; /* no more objects */
if (ret < 0)
return -1;
objlen = ntohs(hdr->len) - sizeof(*hdr);
objbuflen = count - sizeof(*hdr);
if (objbuflen < objlen) {
priv_set_errv("too short buffer");
return -1;
}
ret = read(fd, buf + sizeof(*hdr), objlen);
if (ret < objlen) {
priv_set_errv("checkpoint truncated");
return -1;
}
return 1;
}
static int checkpoint_read_file_hdr(int fd)
{
struct checkpoint_file_hdr hdr;
ssize_t ret;
ret = read(fd, &hdr, sizeof(hdr));
if (ret < 0) {
priv_set_errv("read: %s", strerrno());
return -1;
}
if (ret < sizeof(hdr)) {
priv_set_errv("checkpoint truncated");
return -1;
}
if (ntohl(hdr.magic) != MSCP_CHECKPOINT_MAGIC) {
priv_set_errv("checkpoint: invalid megic code");
return -1;
}
if (hdr.version != MSCP_CHECKPOINT_VERSION) {
priv_set_errv("checkpoint: unknown version %u", hdr.version);
return -1;
}
return 0;
}
static int checkpoint_load(const char *pathname, char *remote, size_t len, int *dir,
pool *path_pool, pool *chunk_pool)
{
char buf[CHECKPOINT_OBJ_MAXLEN];
struct checkpoint_obj_hdr *hdr;
int fd, ret;
if ((fd = open(pathname, O_RDONLY)) < 0) {
priv_set_errv("open: %s: %s", pathname, strerrno());
return -1;
}
if (checkpoint_read_file_hdr(fd) < 0)
return -1;
hdr = (struct checkpoint_obj_hdr *)buf;
while ((ret = checkpoint_read_obj(fd, buf, sizeof(buf))) > 0) {
switch (hdr->type) {
case OBJ_TYPE_META:
if (!remote || !dir)
break;
if (checkpoint_load_meta(hdr, remote, len, dir) < 0)
return -1;
if (!path_pool || !chunk_pool)
goto out;
break;
case OBJ_TYPE_PATH:
if (!path_pool)
break;
if (checkpoint_load_path(hdr, path_pool) < 0)
return -1;
break;
case OBJ_TYPE_CHUNK:
if (!path_pool)
break;
if (checkpoint_load_chunk(hdr, path_pool, chunk_pool) < 0)
return -1;
break;
default:
priv_set_errv("unknown obj type %u", hdr->type);
return -1;
}
}
out:
close(fd);
return 0;
}
int checkpoint_load_remote(const char *pathname, char *remote, size_t len, int *dir)
{
return checkpoint_load(pathname, remote, len, dir, NULL, NULL);
}
int checkpoint_load_paths(const char *pathname, pool *path_pool, pool *chunk_pool)
{
return checkpoint_load(pathname, NULL, 0, NULL, path_pool, chunk_pool);
}
================================================
FILE: src/checkpoint.h
================================================
/* SPDX-License-Identifier: GPL-3.0-only */
#ifndef _CHECKPOINT_H_
#define _CHECKPOINT_H_
#include
/* checkpoint_save() stores states to a checkponint file (pathname) */
int checkpoint_save(const char *pathname, int dir, const char *user, const char *remote,
pool *path_pool, pool *chunk_pool);
/* checkpoint_load_meta() reads a checkpoint file (pathname) and returns
* remote host string to *remote and transfer direction to *dir.
*/
int checkpoint_load_remote(const char *pathname, char *remote, size_t len, int *dir);
/* checkpoint_load_paths() reads a checkpoint file (pathname) and
* fills path_pool and chunk_pool.
*/
int checkpoint_load_paths(const char *pathname, pool *path_pool, pool *chunk_pool);
#endif /* _CHECKPOINT_H_ */
================================================
FILE: src/fileops.c
================================================
/* SPDX-License-Identifier: GPL-3.0-only */
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
sftp_session __thread tls_sftp;
/* tls_sftp is used *_wrapped() functions */
void set_tls_sftp_session(sftp_session sftp)
{
tls_sftp = sftp;
}
static void sftp_err_to_errno(sftp_session sftp)
{
int sftperr = sftp_get_error(sftp);
switch (sftperr) {
case SSH_FX_OK:
case SSH_FX_EOF:
errno = 0;
break;
case SSH_FX_NO_SUCH_FILE:
case SSH_FX_NO_SUCH_PATH:
errno = ENOENT;
break;
case SSH_FX_PERMISSION_DENIED:
errno = EACCES;
break;
case SSH_FX_FAILURE:
errno = EINVAL;
case SSH_FX_BAD_MESSAGE:
errno = EBADMSG;
case SSH_FX_NO_CONNECTION:
errno = ENOTCONN;
break;
case SSH_FX_CONNECTION_LOST:
errno = ENETRESET;
break;
case SSH_FX_OP_UNSUPPORTED:
errno = EOPNOTSUPP;
break;
case SSH_FX_INVALID_HANDLE:
errno = EBADF;
break;
case SSH_FX_FILE_ALREADY_EXISTS:
errno = EEXIST;
break;
case SSH_FX_WRITE_PROTECT:
errno = EPERM;
break;
case SSH_FX_NO_MEDIA:
errno = ENODEV;
break;
default:
pr_warn("unkown SSH_FX response %d", sftperr);
}
}
MDIR *mscp_opendir(const char *path, sftp_session sftp)
{
MDIR *md;
if (!(md = malloc(sizeof(*md))))
return NULL;
memset(md, 0, sizeof(*md));
if (sftp) {
md->remote = sftp_opendir(sftp, path);
sftp_err_to_errno(sftp);
if (!md->remote) {
goto free_out;
}
} else {
md->local = opendir(path);
if (!md->local) {
goto free_out;
}
}
return md;
free_out:
free(md);
return NULL;
}
MDIR *mscp_opendir_wrapped(const char *path)
{
return mscp_opendir(path, tls_sftp);
}
void mscp_closedir(MDIR *md)
{
if (md->remote)
sftp_closedir(md->remote);
else
closedir(md->local);
free(md);
}
struct dirent __thread tls_dirent;
/* tls_dirent contains dirent converted from sftp_attributes returned
* from sftp_readdir(). This trick is derived from openssh's
* fudge_readdir() */
struct dirent *mscp_readdir(MDIR *md)
{
sftp_attributes attr;
struct dirent *ret = NULL;
static int inum = 1;
if (md->remote) {
attr = sftp_readdir(md->remote->sftp, md->remote);
if (!attr) {
sftp_err_to_errno(md->remote->sftp);
return NULL;
}
memset(&tls_dirent, 0, sizeof(tls_dirent));
strncpy(tls_dirent.d_name, attr->name, sizeof(tls_dirent.d_name) - 1);
tls_dirent.d_ino = inum++;
if (!inum)
inum = 1;
ret = &tls_dirent;
sftp_attributes_free(attr);
} else
ret = readdir(md->local);
return ret;
}
int mscp_mkdir(const char *path, mode_t mode, sftp_session sftp)
{
int ret;
if (sftp) {
ret = sftp_mkdir(sftp, path, mode);
sftp_err_to_errno(sftp);
} else
ret = mkdir(path, mode);
if (ret < 0 && errno == EEXIST) {
ret = 0;
}
return ret;
}
static void sftp_attr_to_stat(sftp_attributes attr, struct stat *st)
{
memset(st, 0, sizeof(*st));
st->st_size = attr->size;
st->st_uid = attr->uid;
st->st_gid = attr->gid;
st->st_mode = attr->permissions;
#if defined(__APPLE__)
#define st_atim st_atimespec
#define st_mtim st_mtimespec
#define st_ctim st_ctimespec
#endif
st->st_atim.tv_sec = attr->atime;
st->st_atim.tv_nsec = attr->atime_nseconds;
st->st_mtim.tv_sec = attr->mtime;
st->st_mtim.tv_nsec = attr->mtime_nseconds;
st->st_ctim.tv_sec = attr->createtime;
st->st_ctim.tv_nsec = attr->createtime_nseconds;
switch (attr->type) {
case SSH_FILEXFER_TYPE_REGULAR:
st->st_mode |= S_IFREG;
break;
case SSH_FILEXFER_TYPE_DIRECTORY:
st->st_mode |= S_IFDIR;
break;
case SSH_FILEXFER_TYPE_SYMLINK:
st->st_mode |= S_IFLNK;
break;
case SSH_FILEXFER_TYPE_SPECIAL:
st->st_mode |= S_IFCHR; /* or block? */
break;
case SSH_FILEXFER_TYPE_UNKNOWN:
st->st_mode |= S_IFIFO; /* really? */
break;
default:
pr_warn("unkown SSH_FILEXFER_TYPE %d", attr->type);
}
}
int mscp_stat(const char *path, struct stat *st, sftp_session sftp)
{
sftp_attributes attr;
int ret = 0;
memset(st, 0, sizeof(*st));
if (sftp) {
attr = sftp_stat(sftp, path);
sftp_err_to_errno(sftp);
if (!attr)
return -1;
sftp_attr_to_stat(attr, st);
sftp_attributes_free(attr);
ret = 0;
} else
ret = stat(path, st);
return ret;
}
int mscp_stat_wrapped(const char *path, struct stat *st)
{
return mscp_stat(path, st, tls_sftp);
}
int mscp_lstat(const char *path, struct stat *st, sftp_session sftp)
{
sftp_attributes attr;
int ret = 0;
if (sftp) {
attr = sftp_lstat(sftp, path);
sftp_err_to_errno(sftp);
if (!attr)
return -1;
sftp_attr_to_stat(attr, st);
sftp_attributes_free(attr);
ret = 0;
} else
ret = lstat(path, st);
return ret;
}
int mscp_lstat_wrapped(const char *path, struct stat *st)
{
return mscp_lstat(path, st, tls_sftp);
}
mf *mscp_open(const char *path, int flags, mode_t mode, sftp_session sftp)
{
mf *f;
f = malloc(sizeof(*f));
if (!f)
return NULL;
memset(f, 0, sizeof(*f));
if (sftp) {
f->remote = sftp_open(sftp, path, flags, mode);
if (!f->remote) {
sftp_err_to_errno(sftp);
goto free_out;
}
} else {
f->local = open(path, flags, mode);
if (f->local < 0)
goto free_out;
}
return f;
free_out:
free(f);
return NULL;
}
void mscp_close(mf *f)
{
if (f->remote)
sftp_close(f->remote);
if (f->local > 0)
close(f->local);
free(f);
}
off_t mscp_lseek(mf *f, off_t off)
{
off_t ret;
if (f->remote) {
ret = sftp_seek64(f->remote, off);
sftp_err_to_errno(f->remote->sftp);
} else
ret = lseek(f->local, off, SEEK_SET);
return ret;
}
int mscp_setstat(const char *path, struct stat *st, bool preserve_ts, sftp_session sftp)
{
int ret;
if (sftp) {
struct sftp_attributes_struct attr;
memset(&attr, 0, sizeof(attr));
attr.permissions = st->st_mode;
attr.size = st->st_size;
attr.flags = (SSH_FILEXFER_ATTR_PERMISSIONS | SSH_FILEXFER_ATTR_SIZE);
if (preserve_ts) {
attr.atime = st->st_atim.tv_sec;
attr.atime_nseconds = st->st_atim.tv_nsec;
attr.mtime = st->st_mtim.tv_sec;
attr.mtime_nseconds = st->st_mtim.tv_nsec;
attr.flags |= (SSH_FILEXFER_ATTR_ACCESSTIME |
SSH_FILEXFER_ATTR_MODIFYTIME |
SSH_FILEXFER_ATTR_SUBSECOND_TIMES);
}
ret = sftp_setstat(sftp, path, &attr);
sftp_err_to_errno(sftp);
} else {
if ((ret = truncate(path, st->st_size)) < 0)
return ret;
if (preserve_ts) {
if ((ret = setutimes(path, st->st_atim, st->st_mtim)) < 0)
return ret;
}
if ((ret = chmod(path, st->st_mode)) < 0)
return ret;
}
return ret;
}
int mscp_glob(const char *pattern, int flags, glob_t *pglob, sftp_session sftp)
{
int ret;
if (sftp) {
#ifndef GLOB_ALTDIRFUNC
#define GLOB_NOALTDIRMAGIC INT_MAX
/* musl does not implement GLOB_ALTDIRFUNC */
pglob->gl_pathc = 1;
pglob->gl_pathv = malloc(sizeof(char *));
pglob->gl_pathv[0] = strdup(pattern);
pglob->gl_offs = GLOB_NOALTDIRMAGIC;
return 0;
#else
flags |= GLOB_ALTDIRFUNC;
set_tls_sftp_session(sftp);
#if defined(__APPLE__) || defined(__FreeBSD__)
pglob->gl_opendir = (void *(*)(const char *))mscp_opendir_wrapped;
pglob->gl_readdir = (struct dirent * (*)(void *)) mscp_readdir;
pglob->gl_closedir = (void (*)(void *))mscp_closedir;
pglob->gl_lstat = mscp_lstat_wrapped;
pglob->gl_stat = mscp_stat_wrapped;
#elif linux
pglob->gl_opendir = (void *(*)(const char *))mscp_opendir_wrapped;
pglob->gl_readdir = (void *(*)(void *))mscp_readdir;
pglob->gl_closedir = (void (*)(void *))mscp_closedir;
pglob->gl_lstat = (int (*)(const char *, void *))mscp_lstat_wrapped;
pglob->gl_stat = (int (*)(const char *, void *))mscp_stat_wrapped;
#else
#error unsupported platform
#endif
#endif
}
ret = glob(pattern, flags, NULL, pglob);
if (sftp)
set_tls_sftp_session(NULL);
return ret;
}
void mscp_globfree(glob_t *pglob)
{
#ifndef GLOB_ALTDIRFUNC
if (pglob->gl_offs == GLOB_NOALTDIRMAGIC) {
free(pglob->gl_pathv[0]);
free(pglob->gl_pathv);
return;
}
#endif
globfree(pglob);
}
================================================
FILE: src/fileops.h
================================================
/* SPDX-License-Identifier: GPL-3.0-only */
#include
#include
#include
#include
void set_tls_sftp_session(sftp_session sftp);
/* sftp_session set by set_tls_sftp_session is sued in
mscp_open_wrapped(), mscp_stat_wrapped(), and
mscp_lstat_wrapped(). This _wrapped() functions exist for
sftp_glob() */
/* directory operations */
struct mdir_struct {
DIR *local;
sftp_dir remote;
};
typedef struct mdir_struct MDIR;
MDIR *mscp_opendir(const char *path, sftp_session sftp);
MDIR *mscp_opendir_wrapped(const char *path);
void mscp_closedir(MDIR *md);
struct dirent *mscp_readdir(MDIR *md);
int mscp_mkdir(const char *path, mode_t mode, sftp_session sftp);
/* stat operations */
int mscp_stat(const char *path, struct stat *st, sftp_session sftp);
int mscp_stat_wrapped(const char *path, struct stat *st);
int mscp_lstat(const char *path, struct stat *st, sftp_session sftp);
int mscp_lstat_wrapped(const char *path, struct stat *st);
/* file operations */
struct mf_struct {
sftp_file remote;
int local;
};
typedef struct mf_struct mf;
mf *mscp_open(const char *path, int flags, mode_t mode, sftp_session sftp);
void mscp_close(mf *f);
off_t mscp_lseek(mf *f, off_t off);
/* mscp_setstat() involves chmod and truncate. It executes both at
* once via a single SFTP command (sftp_setstat()).
*/
int mscp_setstat(const char *path, struct stat *st, bool preserve_ts, sftp_session sftp);
/* remote glob */
int mscp_glob(const char *pattern, int flags, glob_t *pglob, sftp_session sftp);
void mscp_globfree(glob_t *pglob);
================================================
FILE: src/main.c
================================================
/* SPDX-License-Identifier: GPL-3.0-only */
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
void usage(bool print_help)
{
printf("mscp " MSCP_BUILD_VERSION ": copy files over multiple SSH connections\n"
"\n"
"Usage: mscp [-46vqDpdNh] [-n nr_conns] [-m coremask] [-u max_startups]\n"
" [-I interval] [-W checkpoint] [-R checkpoint]\n"
" [-s min_chunk_sz] [-S max_chunk_sz] [-a nr_ahead]\n"
" [-b buf_sz] [-L limit_bitrate]\n"
" [-l login_name] [-P port] [-F ssh_config] [-o ssh_option]\n"
" [-i identity_file] [-J destination] [-c cipher_spec] [-M hmac_spec]\n"
" [-C compress] [-g congestion]\n"
" source ... target\n"
"\n");
if (!print_help)
return;
printf(" -n NR_CONNECTIONS number of connections "
"(default: floor(log(cores)*2)+1)\n"
" -m COREMASK hex value to specify cores where threads pinned\n"
" -u MAX_STARTUPS number of concurrent unauthed SSH attempts "
"(default: 8)\n"
" -I INTERVAL interval between SSH connection attempts (default: 0)\n"
" -W CHECKPOINT write states to the checkpoint if transfer fails\n"
" -R CHECKPOINT resume transferring from the checkpoint\n"
"\n"
" -s MIN_CHUNK_SIZE min chunk size (default: 16M bytes)\n"
" -S MAX_CHUNK_SIZE max chunk size (default: filesize/nr_conn/4)\n"
" -a NR_AHEAD number of inflight SFTP commands (default: 32)\n"
" -b BUF_SZ buffer size for i/o and transfer\n"
" -L LIMIT_BITRATE Limit the bitrate, n[KMG] (default: 0, no limit)\n"
"\n"
" -4 use IPv4\n"
" -6 use IPv6\n"
" -v increment verbose output level\n"
" -q disable output\n"
" -D dry run. check copy destinations with -vvv\n"
" -r no effect\n"
"\n"
" -l LOGIN_NAME login name\n"
" -P PORT port number\n"
" -F SSH_CONFIG path to user ssh config (default ~/.ssh/config)\n"
" -o SSH_OPTION ssh_config option\n"
" -i IDENTITY identity file for public key authentication\n"
" -J DESTINATION ProxyJump destination\n"
" -c CIPHER cipher spec\n"
" -M HMAC hmac spec\n"
" -C COMPRESS enable compression: "
"yes, no, zlib, zlib@openssh.com\n"
" -g CONGESTION specify TCP congestion control algorithm\n"
" -p preserve timestamps of files\n"
" -d increment ssh debug output level\n"
" -N enable Nagle's algorithm (default disabled)\n"
" -h print this help\n"
"\n");
const char **ciphers = mscp_ssh_ciphers();
const char **hmacs = mscp_ssh_hmacs();
int n;
printf("Available ciphers: ");
for (n = 0; ciphers[n] != NULL; n++) {
printf("%s", ciphers[n]);
if (ciphers[n + 1])
printf(", ");
}
printf("\n\n");
printf("Available hmacs: ");
for (n = 0; hmacs[n] != NULL; n++) {
printf("%s", hmacs[n]);
if (hmacs[n + 1])
printf(", ");
}
printf("\n\n");
}
char *strip_brackets(char *s)
{
if (s[0] == '[' && s[strlen(s) - 1] == ']') {
s[strlen(s) - 1] = '\0';
return s + 1;
}
return s;
}
char *split_user_host_path(const char *s, char **userp, char **hostp, char **pathp)
{
char *tmp, *cp, *user = NULL, *host = NULL, *path = NULL;
bool inbrackets = false;
if (!(tmp = strdup(s))) {
pr_err("stdrup: %s", strerror(errno));
return NULL;
}
path = tmp;
for (cp = tmp; *cp; cp++) {
if (*cp == '@' && (cp > tmp) && *(cp - 1) != '\\' && user == NULL) {
/* cp is non-escaped '@', so this '@' is the
* delimitater between username and host. */
*cp = '\0';
user = tmp;
host = cp + 1;
}
if (*cp == '[')
inbrackets = true;
if (*cp == ']')
inbrackets = false;
if (*cp == ':' && (cp > tmp) && *(cp - 1) != '\\') {
if (!inbrackets) {
/* cp is non-escaped ':' and not in
* brackets for IPv6 address
* notation. So, this ':' is the
* delimitater between host and
* path. */
*cp = '\0';
host = host == NULL ? tmp : host;
path = cp + 1;
break;
}
}
}
*userp = user;
*hostp = host ? strip_brackets(host) : NULL;
*pathp = path;
return tmp;
}
struct target {
char *copy;
char *user;
char *host;
char *path;
};
int compare_remote(struct target *a, struct target *b)
{
/* return 0 if a and b have the identical user@host, otherwise 1 */
int alen, blen;
if (a->user) {
if (!b->user)
return 1;
alen = strlen(a->user);
blen = strlen(b->user);
if (alen != blen)
return 1;
if (strncmp(a->user, b->user, alen) != 0)
return 1;
} else if (b->user)
return 1;
if (a->host) {
if (!b->host)
return 1;
alen = strlen(a->host);
blen = strlen(b->host);
if (alen != blen)
return 1;
if (strncmp(a->host, b->host, alen) != 0)
return 1;
} else if (b->host)
return 1;
return 0;
}
struct target *validate_targets(char **arg, int len)
{
/* arg is array of source ... destination.
* There are two cases:
*
* 1. user@host:path host:path ... path, remote to local copy
* 2. path path ... host:path, local to remote copy.
*
* This function split user@remote:path args into struct target,
* and validate all remotes are identical (mscp does not support
* remote to remote copy).
*/
struct target *t, *t0;
int n, nslash;
if ((t = calloc(len, sizeof(struct target))) == NULL) {
pr_err("calloc: %s", strerrno());
return NULL;
}
/* split remote:path into remote and path */
for (n = 0; n < len; n++) {
t[n].copy =
split_user_host_path(arg[n], &t[n].user, &t[n].host, &t[n].path);
if (!t[n].copy) {
pr_err("failed to parse '%s'", arg[n]);
goto free_target_out;
}
}
/* expand remote path, e.g., empty dst path and '~' */
for (n = 0; n < len; n++) {
if (!t[n].host)
continue;
/* this target is a remote path. check the path and
* expand it. this part is derived from
* openssh-portal prepare_remote_path() function.
*/
char *path = t[n].path;
if (*path == '\0' || strcmp(path, "~") == 0)
t[n].path = strdup(".");
else if (strncmp(path, "~/", 2) == 0) {
if ((nslash = strspn(path + 2, "/")) == strlen(path + 2))
t[n].path = strdup(".");
else
t[n].path = strdup(path + 2 + nslash);
}
if (!t[n].path) {
pr_err("strdup failed: %s", strerrno());
goto free_target_out;
}
}
/* check all user@host are identical. t[len - 1] is the
* destination, so we need to check t[0] to t[len - 2] having
* the identical remote notation */
t0 = &t[0];
for (n = 1; n < len - 1; n++) {
if (compare_remote(t0, &t[n]) != 0)
goto invalid_remotes;
}
/* check inconsistent remote position in args */
if (t[0].host == NULL && t[len - 1].host == NULL) {
pr_err("no remote host given");
goto free_split_out;
}
if (t[0].host != NULL && t[len - 1].host != NULL) {
pr_err("no local path given");
goto free_split_out;
}
return t;
invalid_remotes:
pr_err("invalid remote host notation");
free_split_out:
for (n = 0; n < len; n++)
if (t[n].copy)
free(t[n].copy);
free_target_out:
free(t);
return NULL;
}
struct mscp *m = NULL;
pthread_t tid_stat = 0;
bool interrupted = false;
void sigint_handler(int sig)
{
interrupted = true;
mscp_stop(m);
}
void *print_stat_thread(void *arg);
void print_cli(const char *fmt, ...)
{
va_list va;
va_start(va, fmt);
vfprintf(stdout, fmt, va);
fflush(stdout);
va_end(va);
}
void print_stat(bool final);
long atol_with_unit(char *value, bool i)
{
/* value must be "\d+[kKmMgG]?" */
char *u = value + (strlen(value) - 1);
long k = i ? 1024 : 1000;
long factor = 1;
long v;
switch (*u) {
case 'k':
case 'K':
factor = k;
*u = '\0';
break;
case 'm':
case 'M':
factor = k * k;
*u = '\0';
break;
case 'g':
case 'G':
factor = k * k * k;
*u = '\0';
break;
}
v = atol(value);
return v * factor;
}
int to_dev_null(int fd)
{
int nfd = open("/dev/null", O_WRONLY);
if (nfd < 0) {
pr_err("open /dev/null: %s", strerrno());
return -1;
}
if (dup2(nfd, fd) < 0) {
pr_err("dup2: %s", strerrno());
return -1;
}
close(nfd);
return 0;
}
int main(int argc, char **argv)
{
struct mscp_ssh_opts s;
struct mscp_opts o;
struct target *t;
int ch, n, i, ret;
int direction = 0;
char *remote = NULL, *checkpoint_save = NULL, *checkpoint_load = NULL;
bool quiet = false, dryrun = false, resume = false;
int nr_options = 0;
memset(&s, 0, sizeof(s));
memset(&o, 0, sizeof(o));
o.severity = MSCP_SEVERITY_WARN;
#define mscpopts "n:m:u:I:W:R:s:S:a:b:L:46vqDrl:P:F:o:i:J:c:M:C:g:pdNh"
while ((ch = getopt(argc, argv, mscpopts)) != -1) {
switch (ch) {
case 'n':
o.nr_threads = atoi(optarg);
if (o.nr_threads < 1) {
pr_err("invalid number of connections: %s", optarg);
return 1;
}
break;
case 'm':
o.coremask = optarg;
break;
case 'u':
o.max_startups = atoi(optarg);
break;
case 'I':
o.interval = atoi(optarg);
break;
case 'W':
checkpoint_save = optarg;
break;
case 'R':
checkpoint_load = optarg;
resume = true;
break;
case 's':
o.min_chunk_sz = atol_with_unit(optarg, true);
break;
case 'S':
o.max_chunk_sz = atol_with_unit(optarg, true);
break;
case 'a':
o.nr_ahead = atoi(optarg);
break;
case 'b':
o.buf_sz = atol_with_unit(optarg, true);
break;
case 'L':
o.bitrate = atol_with_unit(optarg, false);
break;
case '4':
s.ai_family = AF_INET;
break;
case '6':
s.ai_family = AF_INET6;
break;
case 'v':
o.severity++;
break;
case 'q':
quiet = true;
break;
case 'D':
dryrun = true;
break;
case 'r':
/* for compatibility with scp */
break;
case 'l':
s.login_name = optarg;
break;
case 'P':
s.port = optarg;
break;
case 'F':
s.config = optarg;
break;
case 'o':
nr_options++;
s.options = realloc(s.options, sizeof(char *) * (nr_options + 1));
if (!s.options) {
pr_err("realloc: %s", strerrno());
return 1;
}
s.options[nr_options - 1] = optarg;
s.options[nr_options] = NULL;
break;
case 'i':
s.identity = optarg;
break;
case 'J':
s.proxyjump = optarg;
break;
case 'c':
s.cipher = optarg;
break;
case 'M':
s.hmac = optarg;
break;
case 'C':
s.compress = optarg;
break;
case 'g':
s.ccalgo = optarg;
break;
case 'p':
o.preserve_ts = true;
break;
case 'd':
s.debug_level++;
break;
case 'N':
s.enable_nagle = true;
break;
case 'h':
usage(true);
return 0;
default:
usage(false);
return 1;
}
}
if (quiet)
to_dev_null(STDOUT_FILENO);
s.password = getenv(ENV_SSH_AUTH_PASSWORD);
s.passphrase = getenv(ENV_SSH_AUTH_PASSPHRASE);
if ((m = mscp_init(&o, &s)) == NULL) {
pr_err("mscp_init: %s", priv_get_err());
return -1;
}
if (!resume) {
/* normal transfer (not resume) */
if (argc - optind < 2) {
/* mscp needs at lease 2 (src and target) argument */
usage(false);
return 1;
}
i = argc - optind;
if ((t = validate_targets(argv + optind, i)) == NULL)
return -1;
if (t[0].host) {
/* copy remote to local */
direction = MSCP_DIRECTION_R2L;
remote = t[0].host;
s.login_name = s.login_name ? s.login_name : t[0].user;
} else {
/* copy local to remote */
direction = MSCP_DIRECTION_L2R;
remote = t[i - 1].host;
s.login_name = s.login_name ? s.login_name : t[i - 1].user;
}
if (mscp_set_remote(m, remote, direction) < 0) {
pr_err("mscp_set_remote: %s", priv_get_err());
return -1;
}
if (mscp_connect(m) < 0) {
pr_err("mscp_connect: %s", priv_get_err());
return -1;
}
for (n = 0; n < i - 1; n++) {
if (mscp_add_src_path(m, t[n].path) < 0) {
pr_err("mscp_add_src_path: %s", priv_get_err());
return -1;
}
}
if (mscp_set_dst_path(m, t[i - 1].path) < 0) {
pr_err("mscp_set_dst_path: %s", priv_get_err());
return -1;
}
/* start to scan source files and resolve their destination paths */
if (mscp_scan(m) < 0) {
pr_err("mscp_scan: %s", priv_get_err());
return -1;
}
} else {
/* resume a transfer from the specified checkpoint */
char r[512];
int d;
if (mscp_checkpoint_get_remote(checkpoint_load, r, sizeof(r), &d) < 0) {
pr_err("mscp_checkpoint_get_remote: %s", priv_get_err());
return -1;
}
if (mscp_set_remote(m, r, d) < 0) {
pr_err("mscp_set_remote: %s", priv_get_err());
return -1;
}
/* load paths and chunks to be transferred from checkpoint */
if (mscp_checkpoint_load(m, checkpoint_load) < 0) {
pr_err("mscp_checkpoint_load: %s", priv_get_err());
return -1;
}
if (dryrun)
goto out;
/* create the first ssh connection to get password or
* passphrase. The sftp session over it will be not
* used for resume transfer in actuality. ToDo:
* connectin managemnet should be improved. */
if (mscp_connect(m) < 0) {
pr_err("mscp_connect: %s", priv_get_err());
return -1;
}
}
if (dryrun) {
ret = mscp_scan_join(m);
goto out;
}
if (pthread_create(&tid_stat, NULL, print_stat_thread, NULL) < 0) {
pr_err("pthread_create: %s", strerror(errno));
return -1;
}
if (signal(SIGINT, sigint_handler) == SIG_ERR) {
pr_err("signal: %s", strerror(errno));
return -1;
}
ret = mscp_start(m);
if (ret < 0)
pr_err("mscp_start: %s", priv_get_err());
ret = mscp_join(m);
pthread_cancel(tid_stat);
pthread_join(tid_stat, NULL);
print_stat(true);
print_cli("\n"); /* final output */
out:
if (interrupted)
ret = 1;
if ((dryrun || ret != 0) && checkpoint_save) {
print_cli("save checkpoint to %s\n", checkpoint_save);
if (mscp_checkpoint_save(m, checkpoint_save) < 0) {
pr_err("mscp_checkpoint_save: %s", priv_get_err());
return -1;
}
}
mscp_cleanup(m);
mscp_free(m);
return ret;
}
/* progress bar-related functions */
double calculate_timedelta(struct timeval *b, struct timeval *a)
{
double sec, usec;
if (a->tv_usec < b->tv_usec) {
a->tv_usec += 1000000;
a->tv_sec--;
}
sec = a->tv_sec - b->tv_sec;
usec = a->tv_usec - b->tv_usec;
sec += usec / 1000000;
return sec;
}
double calculate_bps(size_t diff, struct timeval *b, struct timeval *a)
{
return (double)diff / calculate_timedelta(b, a);
}
char *calculate_eta(size_t remain, size_t diff, struct timeval *b, struct timeval *a,
bool final)
{
static char buf[16];
#define bps_window_size 16
static double bps_window[bps_window_size];
static size_t sum, idx, count;
double elapsed = calculate_timedelta(b, a);
double bps = diff / elapsed;
double avg, eta;
/* early return when diff == 0 (stalled) or final output */
if (diff == 0) {
snprintf(buf, sizeof(buf), "--:-- ETA");
return buf;
}
if (final) {
snprintf(buf, sizeof(buf), "%02d:%02d ", (int)(floor(elapsed / 60)),
(int)round(elapsed) % 60);
return buf;
}
/* drop the old bps value and add the recent one */
sum -= bps_window[idx];
bps_window[idx] = bps;
sum += bps_window[idx];
idx = (idx + 1) % bps_window_size;
count++;
/* calcuate ETA from avg of recent bps values */
avg = sum / min(count, bps_window_size);
eta = remain / avg;
snprintf(buf, sizeof(buf), "%02d:%02d ETA", (int)floor(eta / 60),
(int)round(eta) % 60);
return buf;
}
void print_progress_bar(double percent, char *suffix)
{
int n, thresh, bar_width;
struct winsize ws;
char buf[128];
/*
* [=======> ] XX% SUFFIX
*/
buf[0] = '\0';
if (ioctl(STDOUT_FILENO, TIOCGWINSZ, &ws) < 0 || ws.ws_col == 0) {
// fallback to default
ws.ws_col = 80;
ws.ws_row = 24;
}
bar_width = min(sizeof(buf), ws.ws_col) - strlen(suffix) - 7;
memset(buf, 0, sizeof(buf));
if (bar_width > 8) {
thresh = floor(bar_width * (percent / 100)) - 1;
for (n = 1; n < bar_width - 1; n++) {
if (n <= thresh)
buf[n] = '=';
else
buf[n] = ' ';
}
buf[thresh] = '>';
buf[0] = '[';
buf[bar_width - 1] = ']';
snprintf(buf + bar_width, sizeof(buf) - bar_width, " %3d%% ",
(int)floor(percent));
}
print_cli("\r\033[K"
"%s%s",
buf, suffix);
}
void print_progress(struct timeval *b, struct timeval *a, size_t total, size_t last,
size_t done, bool final)
{
char *bps_units[] = { "B/s ", "KB/s", "MB/s", "GB/s" };
char *byte_units[] = { "B ", "KB", "MB", "GB", "TB", "PB" };
char suffix[128];
int bps_u, byte_tu, byte_du;
double total_round, done_round;
int percent;
double bps;
#define array_size(a) (sizeof(a) / sizeof(a[0]))
if (total <= 0) {
print_cli("\r\033[K"
"total 0 byte transferred");
return; /* copy 0-byte file(s) */
}
total_round = total;
for (byte_tu = 0; total_round > 1000 && byte_tu < array_size(byte_units) - 1;
byte_tu++)
total_round /= 1024;
bps = calculate_bps(done - last, b, a);
for (bps_u = 0; bps > 1000 && bps_u < array_size(bps_units); bps_u++)
bps /= 1000;
percent = floor(((double)(done) / (double)total) * 100);
done_round = done;
for (byte_du = 0; done_round > 1024 && byte_du < array_size(byte_units) - 1;
byte_du++)
done_round /= 1024;
snprintf(suffix, sizeof(suffix), "%4.1lf%s/%.1lf%s %6.1f%s %s", done_round,
byte_units[byte_du], total_round, byte_units[byte_tu], bps,
bps_units[bps_u], calculate_eta(total - done, done - last, b, a, final));
print_progress_bar(percent, suffix);
}
struct xfer_stat {
struct timeval start, before, after;
size_t total;
size_t last;
size_t done;
};
struct xfer_stat x;
void print_stat(bool final)
{
struct mscp_stats s;
gettimeofday(&x.after, NULL);
if (calculate_timedelta(&x.before, &x.after) > 1 || final) {
mscp_get_stats(m, &s);
x.total = s.total;
x.done = s.done;
print_progress(!final ? &x.before : &x.start, &x.after, x.total,
!final ? x.last : 0, x.done, final);
x.before = x.after;
x.last = x.done;
}
}
void *print_stat_thread(void *arg)
{
memset(&x, 0, sizeof(x));
gettimeofday(&x.start, NULL);
x.before = x.start;
while (true) {
print_stat(false);
sleep(1);
}
return NULL;
}
================================================
FILE: src/minmax.h
================================================
/* SPDX-License-Identifier: GPL-3.0-only */
#ifndef _MINMAX_H_
#define _MINMAX_H_
#define min(a, b) (((a) > (b)) ? (b) : (a))
#define max(a, b) (((a) > (b)) ? (a) : (b))
#endif /* _MINMAX_H_ */
================================================
FILE: src/mscp.c
================================================
/* SPDX-License-Identifier: GPL-3.0-only */
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
struct mscp_thread {
struct mscp *m;
sftp_session sftp;
/* attributes used by copy threads */
size_t copied_bytes;
int id;
int cpu;
/* thread-specific values */
pthread_t tid;
int ret;
};
struct mscp {
char *remote; /* remote host (and uername) */
int direction; /* copy direction */
char dst_path[PATH_MAX];
struct mscp_opts *opts;
struct mscp_ssh_opts *ssh_opts;
int *cores; /* usable cpu cores by COREMASK */
int nr_cores; /* length of array of cores */
sem_t *sem; /* semaphore for concurrent connecting ssh sessions */
sftp_session first; /* first sftp session */
pool *src_pool, *path_pool, *chunk_pool, *thread_pool;
size_t total_bytes; /* total_bytes to be copied */
bool chunk_pool_ready;
#define chunk_pool_is_ready(m) ((m)->chunk_pool_ready)
#define chunk_pool_set_ready(m, b) ((m)->chunk_pool_ready = b)
struct bwlimit bw; /* bandwidth limit mechanism */
struct mscp_thread scan; /* mscp_thread for mscp_scan_thread() */
};
#define DEFAULT_MIN_CHUNK_SZ (16 << 20) /* 16MB */
#define DEFAULT_NR_AHEAD 32
#define DEFAULT_BUF_SZ 16384
/* XXX: we use 16384 byte buffer pointed by
* https://api.libssh.org/stable/libssh_tutor_sftp.html. The larget
* read length from sftp_async_read is 65536 byte. Read sizes larger
* than 65536 cause a situation where data remainds but
* sftp_async_read returns 0.
*/
#define DEFAULT_MAX_STARTUPS 8
#define non_null_string(s) (s[0] != '\0')
static int expand_coremask(const char *coremask, int **cores, int *nr_cores)
{
int n, *core_list, nr_usable, nr_all;
char c[2] = { 'x', '\0' };
const char *_coremask;
long v, needle;
int ncores = nr_cpus();
/*
* This function returns array of usable cores in `cores` and
* returns the number of usable cores (array length) through
* nr_cores.
*/
if (strncmp(coremask, "0x", 2) == 0)
_coremask = coremask + 2;
else
_coremask = coremask;
core_list = realloc(NULL, sizeof(int) * 64);
if (!core_list) {
priv_set_errv("realloc: %s", strerrno());
return -1;
}
nr_usable = 0;
nr_all = 0;
for (n = strlen(_coremask) - 1; n >= 0; n--) {
c[0] = _coremask[n];
v = strtol(c, NULL, 16);
if (v == LONG_MIN || v == LONG_MAX) {
priv_set_errv("invalid coremask: %s", coremask);
return -1;
}
for (needle = 0x01; needle < 0x10; needle <<= 1) {
nr_all++;
if (nr_all > ncores)
break; /* too long coremask */
if (v & needle) {
nr_usable++;
core_list = realloc(core_list, sizeof(int) * nr_usable);
if (!core_list) {
priv_set_errv("realloc: %s", strerrno());
return -1;
}
core_list[nr_usable - 1] = nr_all - 1;
}
}
}
if (nr_usable < 1) {
priv_set_errv("invalid core mask: %s", coremask);
return -1;
}
*cores = core_list;
*nr_cores = nr_usable;
return 0;
}
static int default_nr_threads()
{
return (int)(floor(log(nr_cpus()) * 2) + 1);
}
static int validate_and_set_defaut_params(struct mscp_opts *o)
{
if (o->nr_threads < 0) {
priv_set_errv("invalid nr_threads: %d", o->nr_threads);
return -1;
} else if (o->nr_threads == 0)
o->nr_threads = default_nr_threads();
if (o->nr_ahead < 0) {
priv_set_errv("invalid nr_ahead: %d", o->nr_ahead);
return -1;
} else if (o->nr_ahead == 0)
o->nr_ahead = DEFAULT_NR_AHEAD;
if (o->min_chunk_sz == 0)
o->min_chunk_sz = DEFAULT_MIN_CHUNK_SZ;
if (o->max_chunk_sz) {
if (o->min_chunk_sz > o->max_chunk_sz) {
priv_set_errv("smaller max chunk size than "
"min chunk size: %lu < %lu",
o->max_chunk_sz, o->min_chunk_sz);
return -1;
}
}
if (o->buf_sz == 0)
o->buf_sz = DEFAULT_BUF_SZ;
else if (o->buf_sz == 0) {
priv_set_errv("invalid buf size: %lu", o->buf_sz);
return -1;
}
if (o->max_startups == 0)
o->max_startups = DEFAULT_MAX_STARTUPS;
else if (o->max_startups < 0) {
priv_set_errv("invalid max_startups: %d", o->max_startups);
return -1;
}
if (o->interval > 0) {
/* when the interval is set, establish SSH connections sequentially. */
o->max_startups = 1;
}
return 0;
}
int mscp_set_remote(struct mscp *m, const char *remote_host, int direction)
{
if (!remote_host) {
priv_set_errv("empty remote host");
return -1;
}
if (!(direction == MSCP_DIRECTION_L2R || direction == MSCP_DIRECTION_R2L)) {
priv_set_errv("invalid copy direction: %d", direction);
return -1;
}
if (!(m->remote = strdup(remote_host))) {
priv_set_errv("strdup: %s", strerrno());
return -1;
}
m->direction = direction;
return 0;
}
struct mscp *mscp_init(struct mscp_opts *o, struct mscp_ssh_opts *s)
{
struct mscp *m;
int n;
set_print_severity(o->severity);
if (validate_and_set_defaut_params(o) < 0) {
return NULL;
}
if (!(m = malloc(sizeof(*m)))) {
priv_set_errv("malloc: %s", strerrno());
return NULL;
}
memset(m, 0, sizeof(*m));
m->opts = o;
m->ssh_opts = s;
chunk_pool_set_ready(m, false);
if (!(m->src_pool = pool_new())) {
priv_set_errv("pool_new: %s", strerrno());
goto free_out;
}
if (!(m->path_pool = pool_new())) {
priv_set_errv("pool_new: %s", strerrno());
goto free_out;
}
if (!(m->chunk_pool = pool_new())) {
priv_set_errv("pool_new: %s", strerrno());
goto free_out;
}
if (!(m->thread_pool = pool_new())) {
priv_set_errv("pool_new: %s", strerrno());
goto free_out;
}
if ((m->sem = sem_create(o->max_startups)) == NULL) {
priv_set_errv("sem_create: %s", strerrno());
goto free_out;
}
if (o->coremask) {
if (expand_coremask(o->coremask, &m->cores, &m->nr_cores) < 0)
goto free_out;
char b[512], c[8];
memset(b, 0, sizeof(b));
for (n = 0; n < m->nr_cores; n++) {
memset(c, 0, sizeof(c));
snprintf(c, sizeof(c) - 1, " %d", m->cores[n]);
strlcat(b, c, sizeof(b));
}
pr_notice("usable cpu cores:%s", b);
}
if (bwlimit_init(&m->bw, o->bitrate, 100) < 0) { /* 100ms window (hardcoded) */
priv_set_errv("bwlimit_init: %s", strerrno());
goto free_out;
}
/* workaround: set libssh using openssh proxyjump
* https://gitlab.com/libssh/libssh-mirror/-/issues/319 */
ssh_use_openssh_proxy_jumps(1);
/* call ssh_init() because libssh is statically linked */
ssh_init();
return m;
free_out:
if (m->src_pool)
pool_free(m->src_pool);
if (m->path_pool)
pool_free(m->path_pool);
if (m->chunk_pool)
pool_free(m->chunk_pool);
if (m->thread_pool)
pool_free(m->thread_pool);
if (m->remote)
free(m->remote);
free(m);
return NULL;
}
int mscp_connect(struct mscp *m)
{
m->first = ssh_init_sftp_session(m->remote, m->ssh_opts);
if (!m->first)
return -1;
return 0;
}
int mscp_add_src_path(struct mscp *m, const char *src_path)
{
char *s = strdup(src_path);
if (!s) {
priv_set_errv("strdup: %s", strerrno());
return -1;
}
if (pool_push(m->src_pool, s) < 0) {
priv_set_errv("pool_push: %s", strerrno());
return -1;
}
return 0;
}
int mscp_set_dst_path(struct mscp *m, const char *dst_path)
{
if (strlen(dst_path) + 1 >= PATH_MAX) {
priv_set_errv("too long dst path: %s", dst_path);
return -1;
}
if (!non_null_string(dst_path)) {
priv_set_errv("empty dst path");
return -1;
}
strncpy(m->dst_path, dst_path, PATH_MAX);
return 0;
}
static size_t get_page_mask(void)
{
size_t page_sz = sysconf(_SC_PAGESIZE);
return ~(page_sz - 1);
}
static void mscp_stop_copy_thread(struct mscp *m)
{
struct mscp_thread *t;
unsigned int idx;
pool_lock(m->thread_pool);
pool_for_each(m->thread_pool, t, idx) {
if (t->tid)
pthread_cancel(t->tid);
}
pool_unlock(m->thread_pool);
}
static void mscp_stop_scan_thread(struct mscp *m)
{
if (m->scan.tid)
pthread_cancel(m->scan.tid);
}
void mscp_stop(struct mscp *m)
{
mscp_stop_scan_thread(m);
mscp_stop_copy_thread(m);
}
void *mscp_scan_thread(void *arg)
{
struct mscp_thread *t = arg;
struct mscp *m = t->m;
sftp_session src_sftp = NULL, dst_sftp = NULL;
struct path_resolve_args a;
struct path *p;
struct stat ss, ds;
char *src_path;
glob_t pglob;
int n;
switch (m->direction) {
case MSCP_DIRECTION_L2R:
src_sftp = NULL;
dst_sftp = t->sftp;
break;
case MSCP_DIRECTION_R2L:
src_sftp = t->sftp;
dst_sftp = NULL;
break;
default:
pr_err("invalid copy direction: %d", m->direction);
goto err_out;
}
/* initialize path_resolve_args */
memset(&a, 0, sizeof(a));
a.total_bytes = &m->total_bytes;
if (pool_size(m->src_pool) > 1)
a.dst_path_should_dir = true;
if (m->dst_path[strlen(m->dst_path) - 1] == '/')
a.dst_path_should_dir = true;
if (mscp_stat(m->dst_path, &ds, dst_sftp) == 0) {
if (S_ISDIR(ds.st_mode))
a.dst_path_is_dir = true;
}
a.path_pool = m->path_pool;
a.chunk_pool = m->chunk_pool;
a.nr_conn = m->opts->nr_threads;
a.min_chunk_sz = m->opts->min_chunk_sz;
a.max_chunk_sz = m->opts->max_chunk_sz;
a.chunk_align = get_page_mask();
pr_info("start to walk source path(s)");
/* walk each src_path recusively, and resolve path->dst_path for each src */
pool_iter_for_each(m->src_pool, src_path) {
memset(&pglob, 0, sizeof(pglob));
if (mscp_glob(src_path, GLOB_NOCHECK, &pglob, src_sftp) < 0) {
pr_err("mscp_glob: %s", strerrno());
goto err_out;
}
for (n = 0; n < pglob.gl_pathc; n++) {
if (mscp_stat(pglob.gl_pathv[n], &ss, src_sftp) < 0) {
pr_err("stat: %s %s", src_path, strerrno());
goto err_out;
}
if (!a.dst_path_should_dir && pglob.gl_pathc > 1)
a.dst_path_should_dir = true; /* we have over 1 srces */
/* set path specific args */
a.src_path = pglob.gl_pathv[n];
a.dst_path = m->dst_path;
a.src_path_is_dir = S_ISDIR(ss.st_mode);
if (walk_src_path(src_sftp, pglob.gl_pathv[n], &a) < 0)
goto err_out;
}
mscp_globfree(&pglob);
}
pr_info("walk source path(s) done");
t->ret = 0;
chunk_pool_set_ready(m, true);
return NULL;
err_out:
t->ret = -1;
chunk_pool_set_ready(m, true);
return NULL;
}
int mscp_scan(struct mscp *m)
{
struct mscp_thread *t = &m->scan;
int ret;
memset(t, 0, sizeof(*t));
t->m = m;
t->sftp = m->first;
if ((ret = pthread_create(&t->tid, NULL, mscp_scan_thread, t)) < 0) {
priv_set_err("pthread_create: %d", ret);
return -1;
}
/* We wait for there are over nr_threads chunks to determine
* actual number of threads (and connections), or scan
* finished. If the number of chunks are smaller than
* nr_threads, we adjust nr_threads to the number of chunks.
*/
while (!chunk_pool_is_ready(m) && pool_size(m->chunk_pool) < m->opts->nr_threads)
usleep(100);
return 0;
}
int mscp_scan_join(struct mscp *m)
{
struct mscp_thread *t = &m->scan;
if (t->tid) {
pthread_join(t->tid, NULL);
t->tid = 0;
return t->ret;
}
return 0;
}
int mscp_checkpoint_get_remote(const char *pathname, char *remote, size_t len, int *dir)
{
return checkpoint_load_remote(pathname, remote, len, dir);
}
int mscp_checkpoint_load(struct mscp *m, const char *pathname)
{
struct chunk *c;
unsigned int i;
if (checkpoint_load_paths(pathname, m->path_pool, m->chunk_pool) < 0)
return -1;
/* totaling up bytes to be transferred and set chunk_pool is
* ready instead of the mscp_scan thread */
m->total_bytes = 0;
pool_for_each(m->chunk_pool, c, i) {
m->total_bytes += c->len;
}
chunk_pool_set_ready(m, true);
return 0;
}
int mscp_checkpoint_save(struct mscp *m, const char *pathname)
{
return checkpoint_save(pathname, m->direction, m->ssh_opts->login_name, m->remote,
m->path_pool, m->chunk_pool);
}
static void *mscp_copy_thread(void *arg);
static struct mscp_thread *mscp_copy_thread_spawn(struct mscp *m, int id)
{
struct mscp_thread *t;
int ret;
if (!(t = malloc(sizeof(*t)))) {
priv_set_errv("malloc: %s", strerrno());
return NULL;
}
memset(t, 0, sizeof(*t));
t->m = m;
t->id = id;
if (m->cores == NULL)
t->cpu = -1; /* not pinned to cpu */
else
t->cpu = m->cores[id % m->nr_cores];
if ((ret = pthread_create(&t->tid, NULL, mscp_copy_thread, t)) < 0) {
priv_set_errv("pthread_create: %d", ret);
free(t);
return NULL;
}
return t;
}
int mscp_start(struct mscp *m)
{
struct mscp_thread *t;
int n, ret = 0;
if ((n = pool_size(m->chunk_pool)) < m->opts->nr_threads) {
pr_notice("we have %d chunk(s), set number of connections to %d", n, n);
m->opts->nr_threads = n;
}
pr_notice("threads: %d",m->opts->nr_threads);
pr_notice("bwlimit: %ld bps", m->bw.bps);
for (n = 0; n < m->opts->nr_threads; n++) {
t = mscp_copy_thread_spawn(m, n);
if (!t)
break;
if (pool_push_lock(m->thread_pool, t) < 0) {
priv_set_errv("pool_push_lock: %s", strerrno());
break;
}
}
return n;
}
int mscp_join(struct mscp *m)
{
struct mscp_thread *t;
struct path *p;
unsigned int idx;
size_t total_copied_bytes = 0, nr_copied = 0, nr_tobe_copied = 0;
int n, ret = 0;
/* waiting for scan thread joins... */
ret = mscp_scan_join(m);
/* waiting for copy threads join... */
pool_for_each(m->thread_pool, t, idx) {
pthread_join(t->tid, NULL);
}
pool_for_each(m->thread_pool, t, idx) {
total_copied_bytes += t->copied_bytes;
if (t->ret != 0)
ret = t->ret;
if (t->sftp) {
ssh_sftp_close(t->sftp);
t->sftp = NULL;
}
}
/* count up number of transferred files */
pool_iter_for_each(m->path_pool, p) {
nr_tobe_copied++;
if (p->state == FILE_STATE_DONE) {
nr_copied++;
}
}
if (m->first) {
ssh_sftp_close(m->first);
m->first = NULL;
}
pr_notice("%lu/%lu bytes copied for %lu/%lu files", total_copied_bytes,
m->total_bytes, nr_copied, nr_tobe_copied);
return ret;
}
/* copy thread-related functions */
static void wait_for_interval(int interval)
{
_Atomic static long next;
struct timeval t;
long now;
gettimeofday(&t, NULL);
now = t.tv_sec * 1000000 + t.tv_usec;
if (next - now > 0)
usleep(next - now);
next = now + interval * 1000000;
}
void *mscp_copy_thread(void *arg)
{
sftp_session src_sftp, dst_sftp;
struct mscp_thread *t = arg;
struct mscp *m = t->m;
struct chunk *c;
bool next_chunk_exist;
/* when error occurs, each thread prints error messages
* immediately with pr_* functions. */
if (t->cpu > -1) {
if (set_thread_affinity(pthread_self(), t->cpu) < 0) {
pr_err("set_thread_affinity: %s", priv_get_err());
goto err_out;
}
pr_notice("thread[%d]: pin to cpu core %d", t->id, t->cpu);
}
if (sem_wait(m->sem) < 0) {
pr_err("sem_wait: %s", strerrno());
goto err_out;
}
if ((next_chunk_exist = pool_iter_has_next_lock(m->chunk_pool))) {
if (m->opts->interval > 0)
wait_for_interval(m->opts->interval);
pr_notice("thread[%d]: connecting to %s", t->id, m->remote);
t->sftp = ssh_init_sftp_session(m->remote, m->ssh_opts);
}
if (sem_post(m->sem) < 0) {
pr_err("sem_post: %s", strerrno());
goto err_out;
}
if (!next_chunk_exist) {
pr_notice("thread[%d]: no more connections needed", t->id);
goto out;
}
if (!t->sftp) {
pr_err("thread[%d]: %s", t->id, priv_get_err());
goto err_out;
}
switch (m->direction) {
case MSCP_DIRECTION_L2R:
src_sftp = NULL;
dst_sftp = t->sftp;
break;
case MSCP_DIRECTION_R2L:
src_sftp = t->sftp;
dst_sftp = NULL;
break;
default:
assert(false);
goto err_out; /* not reached */
}
while (1) {
c = pool_iter_next_lock(m->chunk_pool);
if (c == NULL) {
if (!chunk_pool_is_ready(m)) {
/* a new chunk will be added. wait for it. */
usleep(100);
continue;
}
break; /* no more chunks */
}
if ((t->ret = copy_chunk(c, src_sftp, dst_sftp, m->opts->nr_ahead,
m->opts->buf_sz, m->opts->preserve_ts, &m->bw,
&t->copied_bytes)) < 0)
break;
}
if (t->ret < 0) {
pr_err("thread[%d]: copy failed: %s -> %s, 0x%010lx-0x%010lx, %s", t->id,
c->p->path, c->p->dst_path, c->off, c->off + c->len,
priv_get_err());
}
return NULL;
err_out:
t->ret = -1;
return NULL;
out:
t->ret = 0;
return NULL;
}
/* cleanup-related functions */
void mscp_cleanup(struct mscp *m)
{
if (m->first) {
ssh_sftp_close(m->first);
m->first = NULL;
}
pool_zeroize(m->src_pool, free);
pool_zeroize(m->path_pool, (pool_map_f)free_path);
pool_zeroize(m->chunk_pool, free);
pool_zeroize(m->thread_pool, free);
}
void mscp_free(struct mscp *m)
{
pool_destroy(m->src_pool, free);
pool_destroy(m->path_pool, (pool_map_f)free_path);
if (m->remote)
free(m->remote);
if (m->cores)
free(m->cores);
sem_release(m->sem);
free(m);
}
void mscp_get_stats(struct mscp *m, struct mscp_stats *s)
{
struct mscp_thread *t;
unsigned int idx;
s->total = m->total_bytes;
s->done = 0;
pool_for_each(m->thread_pool, t, idx) {
s->done += t->copied_bytes;
}
}
================================================
FILE: src/openbsd-compat/openbsd-compat.h
================================================
#ifndef _OPENBSD_COMPAT_H
#define _OPENBSD_COMPAT_H
#include "config.h"
#ifndef HAVE_STRLCAT
size_t strlcat(char *dst, const char *src, size_t siz);
#endif
#endif /* _OPENBSD_COMPAT_H_ */
================================================
FILE: src/openbsd-compat/strlcat.c
================================================
/* $OpenBSD: strlcat.c,v 1.13 2005/08/08 08:05:37 espie Exp $ */
/*
* Copyright (c) 1998 Todd C. Miller
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* OPENBSD ORIGINAL: lib/libc/string/strlcat.c */
#include "config.h"
#ifndef HAVE_STRLCAT
#include
#include
/*
* Appends src to string dst of size siz (unlike strncat, siz is the
* full size of dst, not space left). At most siz-1 characters
* will be copied. Always NUL terminates (unless siz <= strlen(dst)).
* Returns strlen(src) + MIN(siz, strlen(initial dst)).
* If retval >= siz, truncation occurred.
*/
size_t
strlcat(char *dst, const char *src, size_t siz)
{
char *d = dst;
const char *s = src;
size_t n = siz;
size_t dlen;
/* Find the end of dst and adjust bytes left but don't go past end */
while (n-- != 0 && *d != '\0')
d++;
dlen = d - dst;
n = siz - dlen;
if (n == 0)
return(dlen + strlen(s));
while (*s != '\0') {
if (n != 1) {
*d++ = *s;
n--;
}
s++;
}
*d = '\0';
return(dlen + (s - src)); /* count does not include NUL */
}
#endif /* !HAVE_STRLCAT */
================================================
FILE: src/path.c
================================================
/* SPDX-License-Identifier: GPL-3.0-only */
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
/* paths of copy source resoltion */
static char *resolve_dst_path(const char *src_file_path, struct path_resolve_args *a)
{
char copy[PATH_MAX + 1], dst_file_path[PATH_MAX + 1];
char *prefix;
int offset;
int ret;
strncpy(copy, a->src_path, PATH_MAX);
prefix = dirname(copy);
if (!prefix) {
pr_err("dirname: %s", strerrno());
return NULL;
}
offset = strlen(prefix) + 1;
if (strlen(prefix) == 1) { /* corner cases */
switch (prefix[0]) {
case '.':
offset = 0;
break;
case '/':
offset = 1;
break;
}
}
if (!a->src_path_is_dir && !a->dst_path_is_dir) {
/* src path is file. dst path is (1) file, or (2) does not exist.
* In the second case, we need to put src under the dst.
*/
if (a->dst_path_should_dir)
ret = snprintf(dst_file_path, PATH_MAX, "%s/%s", a->dst_path,
a->src_path + offset);
else
ret = snprintf(dst_file_path, PATH_MAX, "%s", a->dst_path);
}
/* src is file, and dst is dir */
if (!a->src_path_is_dir && a->dst_path_is_dir)
ret = snprintf(dst_file_path, PATH_MAX, "%s/%s", a->dst_path,
a->src_path + offset);
/* both are directory */
if (a->src_path_is_dir && a->dst_path_is_dir)
ret = snprintf(dst_file_path, PATH_MAX, "%s/%s", a->dst_path,
src_file_path + offset);
/* dst path does not exist. change dir name to dst_path */
if (a->src_path_is_dir && !a->dst_path_is_dir)
ret = snprintf(dst_file_path, PATH_MAX, "%s/%s", a->dst_path,
src_file_path + strlen(a->src_path) + 1);
if (ret >= PATH_MAX) {
pr_warn("Too long path: %s", dst_file_path);
return NULL;
}
pr_debug("file: %s -> %s", src_file_path, dst_file_path);
return strndup(dst_file_path, PATH_MAX);
}
/* chunk preparation */
struct chunk *alloc_chunk(struct path *p, size_t off, size_t len)
{
struct chunk *c;
if (!(c = malloc(sizeof(*c)))) {
pr_err("malloc %s", strerrno());
return NULL;
}
memset(c, 0, sizeof(*c));
c->p = p;
c->off = off;
c->len = len;
c->state = CHUNK_STATE_INIT;
refcnt_inc(&p->refcnt);
return c;
}
static int resolve_chunk(struct path *p, size_t size, struct path_resolve_args *a)
{
struct chunk *c;
size_t chunk_sz, off, len;
size_t remaind;
if (a->max_chunk_sz)
chunk_sz = a->max_chunk_sz;
else {
chunk_sz = (size / (a->nr_conn * 4)) & a->chunk_align;
if (chunk_sz <= a->min_chunk_sz)
chunk_sz = a->min_chunk_sz;
}
/* for (size = size; size > 0;) does not create a file (chunk)
* when file size is 0. This do {} while (remaind > 0) creates
* just open/close a 0-byte file.
*/
remaind = size;
do {
off = size - remaind;
len = remaind < chunk_sz ? remaind : chunk_sz;
c = alloc_chunk(p, off, len);
if (!c)
return -1;
remaind -= len;
if (pool_push_lock(a->chunk_pool, c) < 0) {
pr_err("pool_push_lock: %s", strerrno());
return -1;
}
} while (remaind > 0);
return 0;
}
void free_path(struct path *p)
{
if (p->path)
free(p->path);
if (p->dst_path)
free(p->dst_path);
free(p);
}
struct path *alloc_path(char *path, char *dst_path)
{
struct path *p;
if (!(p = malloc(sizeof(*p)))) {
pr_err("malloc: %s", strerrno());
return NULL;
}
memset(p, 0, sizeof(*p));
p->path = path;
p->dst_path = dst_path;
p->state = FILE_STATE_INIT;
lock_init(&p->lock);
p->data = 0;
return p;
}
static int append_path(sftp_session sftp, const char *path, struct stat st,
struct path_resolve_args *a)
{
struct path *p;
char *src, *dst;
if (!(src = strdup(path))) {
pr_err("strdup: %s", strerrno());
return -1;
}
if (!(dst = resolve_dst_path(src, a))) {
free(src);
return -1;
}
if (!(p = alloc_path(src, dst)))
return -1;
if (resolve_chunk(p, st.st_size, a) < 0)
return -1; /* XXX: do not free path becuase chunk(s)
* was added to chunk pool already */
if (pool_push_lock(a->path_pool, p) < 0) {
pr_err("pool_push: %s", strerrno());
goto free_out;
}
*a->total_bytes += st.st_size;
return 0;
free_out:
free_path(p);
return -1;
}
static bool check_path_should_skip(const char *path)
{
int len = strlen(path);
if ((len == 1 && strncmp(path, ".", 1) == 0) ||
(len == 2 && strncmp(path, "..", 2) == 0)) {
return true;
}
return false;
}
static int walk_path_recursive(sftp_session sftp, const char *path,
struct path_resolve_args *a)
{
char next_path[PATH_MAX + 1];
struct dirent *e;
struct stat st;
MDIR *d;
int ret;
if (mscp_stat(path, &st, sftp) < 0) {
pr_err("stat: %s: %s", path, strerrno());
return -1;
}
if (S_ISREG(st.st_mode)) {
/* this path is regular file. it is to be copied */
return append_path(sftp, path, st, a);
}
if (!S_ISDIR(st.st_mode))
return 0; /* not a regular file and not a directory, skip it. */
/* ok, this path is a directory. walk through it. */
if (!(d = mscp_opendir(path, sftp))) {
pr_err("opendir: %s: %s", path, strerrno());
return -1;
}
for (e = mscp_readdir(d); e; e = mscp_readdir(d)) {
if (check_path_should_skip(e->d_name))
continue;
ret = snprintf(next_path, PATH_MAX, "%s/%s", path, e->d_name);
if (ret >= PATH_MAX) {
pr_warn("Too long path: %s/%s", path, e->d_name);
continue;
}
walk_path_recursive(sftp, next_path, a);
/* do not stop even when walk_path_recursive returns
* -1 due to an unreadable file. go to a next
* file. Thus, do not pass error messages via
* priv_set_err() under walk_path_recursive. Print
* the error with pr_err immediately.
*/
}
mscp_closedir(d);
return 0;
}
int walk_src_path(sftp_session src_sftp, const char *src_path,
struct path_resolve_args *a)
{
return walk_path_recursive(src_sftp, src_path, a);
}
/* based on
* https://stackoverflow.com/questions/2336242/recursive-mkdir-system-call-on-unix */
static int touch_dst_path(struct path *p, sftp_session sftp)
{
/* XXX: should reflect the permission of the original directory? */
mode_t mode = S_IRWXU | S_IRWXG | S_IRWXO;
struct stat st;
char path[PATH_MAX];
char *needle;
int ret;
mf *f;
strncpy(path, p->dst_path, sizeof(path));
/* mkdir -p.
* XXX: this may be slow when dst is the remote side. need speed-up. */
for (needle = strchr(path + 1, '/'); needle; needle = strchr(needle + 1, '/')) {
*needle = '\0';
if (mscp_stat(path, &st, sftp) == 0) {
if (S_ISDIR(st.st_mode))
goto next; /* directory exists. go deeper */
else {
priv_set_errv("mscp_stat %s: not a directory", path);
return -1; /* path exists, but not directory. */
}
}
if (errno == ENOENT) {
/* no file on the path. create directory. */
if (mscp_mkdir(path, mode, sftp) < 0) {
priv_set_errv("mscp_mkdir %s: %s", path, strerrno());
return -1;
}
}
next:
*needle = '/';
}
/* Do not set O_TRUNC here. Instead, do mscp_setstat() at the
* end. see https://bugzilla.mindrot.org/show_bug.cgi?id=3431 */
f = mscp_open(p->dst_path, O_WRONLY | O_CREAT, S_IRUSR | S_IWUSR, sftp);
if (!f) {
priv_set_errv("mscp_open %s: %s", p->dst_path, strerrno());
return -1;
}
mscp_close(f);
return 0;
}
static int prepare_dst_path(struct path *p, sftp_session dst_sftp)
{
int ret = 0;
LOCK_ACQUIRE(&p->lock);
if (p->state == FILE_STATE_INIT) {
if (touch_dst_path(p, dst_sftp) < 0) {
ret = -1;
goto out;
}
p->state = FILE_STATE_OPENED;
pr_info("copy start: %s", p->path);
}
out:
LOCK_RELEASE();
return ret;
}
/* functions for copy */
static ssize_t read_to_buf(void *ptr, size_t len, void *userdata)
{
int fd = *((int *)userdata);
return read(fd, ptr, len);
}
static int copy_chunk_l2r(struct chunk *c, int fd, sftp_file sf, int nr_ahead, int buf_sz,
struct bwlimit *bw, size_t *counter)
{
ssize_t read_bytes, remaind, thrown;
int idx, ret;
struct {
uint32_t id;
ssize_t len;
} reqs[nr_ahead];
if (c->len == 0)
return 0;
remaind = thrown = c->len;
for (idx = 0; idx < nr_ahead && thrown > 0; idx++) {
reqs[idx].len = min(thrown, buf_sz);
reqs[idx].len = sftp_async_write(sf, read_to_buf, reqs[idx].len, &fd,
&reqs[idx].id);
if (reqs[idx].len < 0) {
priv_set_errv("sftp_async_write: %s",
sftp_get_ssh_error(sf->sftp));
return -1;
}
thrown -= reqs[idx].len;
bwlimit_wait(bw, reqs[idx].len);
}
for (idx = 0; remaind > 0; idx = (idx + 1) % nr_ahead) {
ret = sftp_async_write_end(sf, reqs[idx].id, 1);
if (ret != SSH_OK) {
priv_set_errv("sftp_async_write_end: %s",
sftp_get_ssh_error(sf->sftp));
return -1;
}
*counter += reqs[idx].len;
remaind -= reqs[idx].len;
if (remaind <= 0)
break;
if (thrown <= 0)
continue;
reqs[idx].len = min(thrown, buf_sz);
reqs[idx].len = sftp_async_write(sf, read_to_buf, reqs[idx].len, &fd,
&reqs[idx].id);
if (reqs[idx].len < 0) {
priv_set_errv("sftp_async_write: %s",
sftp_get_ssh_error(sf->sftp));
return -1;
}
thrown -= reqs[idx].len;
bwlimit_wait(bw, reqs[idx].len);
}
if (remaind < 0) {
priv_set_errv("invalid remaind bytes %ld. "
"last async_write_end bytes %lu.",
remaind, reqs[idx].len);
return -1;
}
return 0;
}
static int copy_chunk_r2l(struct chunk *c, sftp_file sf, int fd,
int nr_ahead, int buf_sz,
struct bwlimit *bw, size_t *counter)
{
ssize_t read_bytes, write_bytes, remain, thrown, len, requested;
sftp_aio reqs[nr_ahead];
char buf[buf_sz];
int i;
if (c->len == 0)
return 0;
remain = thrown = c->len;
for (i = 0; i < nr_ahead && thrown > 0; i++) {
len = min(thrown, sizeof(buf));
requested = sftp_aio_begin_read(sf, len, &reqs[i]);
if (requested == SSH_ERROR) {
priv_set_errv("sftp_aio_begin_read: %d",
sftp_get_error(sf->sftp));
return -1;
}
thrown -= requested;
bwlimit_wait(bw, requested);
}
for (i = 0; remain > 0; i = (i + 1) % nr_ahead) {
read_bytes = sftp_aio_wait_read(&reqs[i], buf, sizeof(buf));
if (read_bytes == SSH_ERROR) {
priv_set_errv("sftp_aio_wait_read: %d",
sftp_get_error(sf->sftp));
return -1;
}
if (thrown > 0) {
len = min(thrown, sizeof(buf));
requested = sftp_aio_begin_read(sf, len, &reqs[i]);
thrown -= requested;
bwlimit_wait(bw, requested);
}
write_bytes = write(fd, buf, read_bytes);
if (write_bytes < 0) {
priv_set_errv("write: %s", strerrno());
return -1;
}
if (write_bytes < read_bytes) {
priv_set_errv("failed to write full bytes");
return -1;
}
*counter += write_bytes;
remain -= write_bytes;
}
if (remain < 0) {
priv_set_errv("invalid remain bytes %ld. last async_read bytes %ld. "
"last write bytes %ld",
remain, read_bytes, write_bytes);
return -1;
}
return 0;
}
static int _copy_chunk(struct chunk *c, mf *s, mf *d, int nr_ahead, int buf_sz,
struct bwlimit *bw, size_t *counter)
{
if (s->local && d->remote) /* local to remote copy */
return copy_chunk_l2r(c, s->local, d->remote, nr_ahead, buf_sz, bw,
counter);
else if (s->remote && d->local) /* remote to local copy */
return copy_chunk_r2l(c, s->remote, d->local, nr_ahead, buf_sz, bw,
counter);
assert(false);
return -1; /* not reached */
}
int copy_chunk(struct chunk *c, sftp_session src_sftp, sftp_session dst_sftp,
int nr_ahead, int buf_sz, bool preserve_ts, struct bwlimit *bw,
size_t *counter)
{
mode_t mode;
int flags;
mf *s, *d;
int ret;
assert((src_sftp && !dst_sftp) || (!src_sftp && dst_sftp));
if (prepare_dst_path(c->p, dst_sftp) < 0)
return -1;
/* open src */
flags = O_RDONLY;
mode = S_IRUSR;
if (!(s = mscp_open(c->p->path, flags, mode, src_sftp))) {
priv_set_errv("mscp_open: %s: %s", c->p->path, strerrno());
return -1;
}
if (mscp_lseek(s, c->off) < 0) {
priv_set_errv("mscp_lseek: %s: %s", c->p->path, strerrno());
return -1;
}
/* open dst */
flags = O_WRONLY;
mode = S_IRUSR | S_IWUSR;
if (!(d = mscp_open(c->p->dst_path, flags, mode, dst_sftp))) {
mscp_close(s);
priv_set_errv("mscp_open: %s: %s", c->p->dst_path, strerrno());
return -1;
}
if (mscp_lseek(d, c->off) < 0) {
priv_set_errv("mscp_lseek: %s: %s", c->p->dst_path, strerrno());
return -1;
}
c->state = CHUNK_STATE_COPING;
pr_debug("copy chunk start: %s 0x%lx-0x%lx", c->p->path, c->off, c->off + c->len);
ret = _copy_chunk(c, s, d, nr_ahead, buf_sz, bw, counter);
pr_debug("copy chunk done: %s 0x%lx-0x%lx", c->p->path, c->off, c->off + c->len);
mscp_close(d);
mscp_close(s);
if (ret < 0)
return ret;
if (refcnt_dec(&c->p->refcnt) == 0) {
struct stat st;
c->p->state = FILE_STATE_DONE;
/* sync stat */
if (mscp_stat(c->p->path, &st, src_sftp) < 0) {
priv_set_errv("mscp_stat: %s: %s", c->p->path, strerrno());
return -1;
}
if (mscp_setstat(c->p->dst_path, &st, preserve_ts, dst_sftp) < 0) {
priv_set_errv("mscp_setstat: %s: %s", c->p->path, strerrno());
return -1;
}
pr_info("copy done: %s", c->p->path);
}
if (ret == 0)
c->state = CHUNK_STATE_DONE;
return ret;
}
================================================
FILE: src/path.h
================================================
/* SPDX-License-Identifier: GPL-3.0-only */
#ifndef _PATH_H_
#define _PATH_H_
#include
#include
#include
#include
#include
#include
#include
#include
struct path {
char *path; /* file path */
char *dst_path; /* copy dst path */
refcnt refcnt; /* number of associated chunks */
lock lock;
int state;
#define FILE_STATE_INIT 0
#define FILE_STATE_OPENED 1
#define FILE_STATE_DONE 2
uint64_t data; /* used by other components, i.e., checkpoint */
};
struct path *alloc_path(char *path, char *dst_path);
struct chunk {
struct path *p;
size_t off; /* offset of this chunk on the file on path p */
size_t len; /* length of this chunk */
int state;
#define CHUNK_STATE_INIT 0
#define CHUNK_STATE_COPING 1
#define CHUNK_STATE_DONE 2
};
struct chunk *alloc_chunk(struct path *p, size_t off, size_t len);
struct path_resolve_args {
size_t *total_bytes;
/* args to resolve src path to dst path */
const char *src_path;
const char *dst_path;
bool src_path_is_dir;
bool dst_path_is_dir;
bool dst_path_should_dir;
/* args to resolve chunks for a path */
pool *path_pool;
pool *chunk_pool;
int nr_conn;
size_t min_chunk_sz;
size_t max_chunk_sz;
size_t chunk_align;
};
/* walk src_path recursivly and fill a->path_pool with found files */
int walk_src_path(sftp_session src_sftp, const char *src_path,
struct path_resolve_args *a);
/* free struct path */
void free_path(struct path *p);
/* copy a chunk. either src_sftp or dst_sftp is not null, and another is null */
int copy_chunk(struct chunk *c, sftp_session src_sftp, sftp_session dst_sftp,
int nr_ahead, int buf_sz, bool preserve_ts, struct bwlimit *bw,
size_t *counter);
#endif /* _PATH_H_ */
================================================
FILE: src/platform.c
================================================
/* SPDX-License-Identifier: GPL-3.0-only */
#ifdef __APPLE__
#include
#include
#include
#include
#elif linux
#define _GNU_SOURCE
#include
#include
#include
#include
#include
#include
#elif __FreeBSD__
#include
#include
#include
#include
#include
#else
#error unsupported platform
#endif
#include
#include
#include
#include
#ifdef __APPLE__
int nr_cpus()
{
int n;
size_t size = sizeof(n);
if (sysctlbyname("machdep.cpu.core_count", &n, &size, NULL, 0) != 0) {
priv_set_errv("failed to get number of cpu cores: %s", strerrno());
return -1;
}
return n;
}
int set_thread_affinity(pthread_t tid, int core)
{
pr_warn("setting thread afinity is not implemented on apple");
return 0;
}
int setutimes(const char *path, struct timespec atime, struct timespec mtime)
{
struct timeval tv[2] = {
{
.tv_sec = atime.tv_sec,
.tv_usec = atime.tv_nsec * 1000,
},
{
.tv_sec = mtime.tv_sec,
.tv_usec = mtime.tv_nsec * 1000,
},
};
return utimes(path, tv);
}
static void random_string(char *buf, size_t size)
{
char chars[] = "abcdefhijklmnopqrstuvwxyz1234567890";
int n, x;
for (n = 0; n < size - 1; n++) {
x = arc4random() % (sizeof(chars) - 1);
buf[n] = chars[x];
}
buf[size - 1] = '\0';
}
sem_t *sem_create(int value)
{
char sem_name[30] = "mscp-";
sem_t *sem;
int n;
n = strlen(sem_name);
random_string(sem_name + n, sizeof(sem_name) - n - 1);
if ((sem = sem_open(sem_name, O_CREAT, 600, value)) == SEM_FAILED)
return NULL;
return sem;
}
int sem_release(sem_t *sem)
{
return sem_close(sem);
}
#endif
#ifdef linux
int nr_cpus()
{
cpu_set_t cpu_set;
if (sched_getaffinity(0, sizeof(cpu_set_t), &cpu_set) == 0)
return CPU_COUNT(&cpu_set);
return -1;
}
#endif
#ifdef __FreeBSD__
int nr_cpus()
{
long nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
return nr_cpus;
}
#endif
#if defined(linux) || defined(__FreeBSD__)
int set_thread_affinity(pthread_t tid, int core)
{
cpu_set_t target_cpu_set;
int ret = 0;
CPU_ZERO(&target_cpu_set);
CPU_SET(core, &target_cpu_set);
ret = pthread_setaffinity_np(tid, sizeof(target_cpu_set), &target_cpu_set);
if (ret < 0)
priv_set_errv("failed to set thread/cpu affinity for core %d: %s", core,
strerrno());
return ret;
}
int setutimes(const char *path, struct timespec atime, struct timespec mtime)
{
struct timespec ts[2] = { atime, mtime };
int fd = open(path, O_WRONLY);
int ret;
if (fd < 0)
return -1;
ret = futimens(fd, ts);
close(fd);
return ret;
}
sem_t *sem_create(int value)
{
sem_t *sem;
if ((sem = malloc(sizeof(*sem))) == NULL)
return NULL;
if (sem_init(sem, 0, value) < 0) {
free(sem);
return NULL;
}
return sem;
}
int sem_release(sem_t *sem)
{
free(sem);
return 0;
}
#endif
================================================
FILE: src/platform.h
================================================
/* SPDX-License-Identifier: GPL-3.0-only */
#ifndef _PLATFORM_H_
#define _PLATFORM_H_
#include
#include
#include
#include
int nr_cpus(void);
int set_thread_affinity(pthread_t tid, int core);
int setutimes(const char *path, struct timespec atime, struct timespec mtime);
/*
* macOS does not support sem_init(). macOS (seems to) releases the
* named semaphore when associated mscp process finished. In linux,
* program (seems to) need to release named semaphore in /dev/shm by
* sem_unlink() explicitly. So, using sem_init() (unnamed semaphore)
* in linux and using sem_open() (named semaphore) in macOS without
* sem_unlink() are reasonable (?).
*/
sem_t *sem_create(int value);
int sem_release(sem_t *sem);
#ifdef HAVE_HTONLL
#include /* Apple has htonll and ntohll in arpa/inet.h */
#endif
/* copied from libssh: libssh/include/libssh/priv.h */
#ifndef HAVE_HTONLL
#ifdef WORDS_BIGENDIAN
#define htonll(x) (x)
#else
#define htonll(x) (((uint64_t)htonl((x)&0xFFFFFFFF) << 32) | htonl((x) >> 32))
#endif
#endif
#ifndef HAVE_NTOHLL
#ifdef WORDS_BIGENDIAN
#define ntohll(x) (x)
#else
#define ntohll(x) (((uint64_t)ntohl((x)&0xFFFFFFFF) << 32) | ntohl((x) >> 32))
#endif
#endif
#endif /* _PLATFORM_H_ */
================================================
FILE: src/pool.c
================================================
/* SPDX-License-Identifier: GPL-3.0-only */
#include
#include
#include
#define DEFAULT_START_SIZE 16
pool *pool_new(void)
{
pool *p;
p = malloc(sizeof(*p));
if (!p)
return NULL;
memset(p, 0, sizeof(*p));
p->array = calloc(DEFAULT_START_SIZE, sizeof(void *));
if (!p->array) {
free(p);
return NULL;
}
p->len = DEFAULT_START_SIZE;
p->num = 0;
lock_init(&p->lock);
return p;
}
void pool_free(pool *p)
{
if (p->array) {
free(p->array);
p->array = NULL;
}
free(p);
}
void pool_zeroize(pool *p, pool_map_f f)
{
void *v;
pool_iter_for_each(p, v) {
f(v);
}
p->num = 0;
}
void pool_destroy(pool *p, pool_map_f f)
{
pool_zeroize(p, f);
pool_free(p);
}
int pool_push(pool *p, void *v)
{
if (p->num == p->len) {
/* expand array */
size_t newlen = p->len * 2;
void *new = realloc(p->array, newlen * sizeof(void *));
if (new == NULL)
return -1;
p->len = newlen;
p->array = new;
}
p->array[p->num] = v;
__sync_synchronize();
p->num++;
return 0;
}
int pool_push_lock(pool *p, void *v)
{
int ret = -1;
pool_lock(p);
ret = pool_push(p, v);
pool_unlock(p);
return ret;
}
void *pool_pop(pool *p)
{
return p->num == 0 ? NULL : p->array[--p->num];
}
void *pool_pop_lock(pool *p)
{
void *v;
pool_lock(p);
v = pool_pop(p);
pool_unlock(p);
return v;
}
void *pool_get(pool *p, unsigned int idx)
{
return p->num <= idx ? NULL : p->array[idx];
}
void *pool_iter_next(pool *p)
{
if (p->num <= p->idx)
return NULL;
void *v = p->array[p->idx];
p->idx++;
return v;
}
void *pool_iter_next_lock(pool *p)
{
void *v = NULL;
pool_lock(p);
v = pool_iter_next(p);
pool_unlock(p);
return v;
}
bool pool_iter_has_next_lock(pool *p)
{
bool next_exist;
pool_lock(p);
next_exist = (p->idx < p->num);
pool_unlock(p);
return next_exist;
}
================================================
FILE: src/pool.h
================================================
/* SPDX-License-Identifier: GPL-3.0-only */
#ifndef _POOL_H_
#define _POOL_H_
#include
#include
#include
/* A pool like a stack with an iterator walking from the bottom to the
* top. The memory foot print for a pool never shrinks. Thus this is
* not suitable for long-term uses. */
struct pool_struct {
void **array;
size_t len; /* length of array */
size_t num; /* number of items in the array */
size_t idx; /* index used dy iter */
lock lock;
};
typedef struct pool_struct pool;
/* allocate a new pool */
pool *pool_new(void);
/* func type applied to each item in a pool */
typedef void (*pool_map_f)(void *v);
/* apply f, which free an item, to all items and set num to 0 */
void pool_zeroize(pool *p, pool_map_f f);
/* free pool->array and pool */
void pool_free(pool *p);
/* free pool->array and pool after applying f to all items in p->array */
void pool_destroy(pool *p, pool_map_f f);
#define pool_lock(p) LOCK_ACQUIRE(&(p->lock))
#define pool_unlock(p) LOCK_RELEASE()
/*
* pool_push() pushes *v to pool *p. pool_push_lock() does this while
* locking *p.
*/
int pool_push(pool *p, void *v);
int pool_push_lock(pool *p, void *v);
/*
* pool_pop() pops the last *v pushed to *p. pool_pop_lock() does this
* while locking *p.
*/
void *pool_pop(pool *p);
void *pool_pop_lock(pool *p);
/* pool_get() returns value indexed by idx */
void *pool_get(pool *p, unsigned int idx);
#define pool_size(p) ((p)->num)
#define pool_is_empty(p) (pool_size(p) == 0)
/*
* pool->idx indicates next *v in an iteration. This has two
* use-cases.
*
* (1) A simple list: just a single thread has a pool, and the thread
* can call pool_iter_for_each() for the pool (not thread safe).
*
* (2) A thread-safe queue: one thread initializes the iterator for a
* pool by pool_iter_init(). Then, multiple threads get a next *v
* concurrently by pool_iter_next_lock(), which means dequeuing. At
* this time, other thread can add new *v by pool_push_lock(), which
* means enqueuing. During this, other threads must not intercept the
* pool by pool_iter_* functions.
*/
#define pool_iter_init(p) (p->idx = 0)
void *pool_iter_next(pool *p);
void *pool_iter_next_lock(pool *p);
/* pool_iter_has_next_lock() returns true if pool_iter_next(_lock)
* function will retrun a next value, otherwise false, which means
* there is no more values in this iteration. */
bool pool_iter_has_next_lock(pool *p);
#define pool_iter_for_each(p, v) \
pool_iter_init(p); \
for (v = pool_iter_next(p); v != NULL; v = pool_iter_next(p))
#define pool_for_each(p, v, idx) \
idx = 0; \
for (v = pool_get(p, idx); v != NULL; v = pool_get(p, ++idx))
#endif /* _POOL_H_ */
================================================
FILE: src/print.c
================================================
/* SPDX-License-Identifier: GPL-3.0-only */
#include
/* message print functions */
static int __print_severity = MSCP_SEVERITY_WARN;
void set_print_severity(int serverity)
{
if (serverity < 0)
__print_severity = -1; /* no print */
else
__print_severity = serverity;
}
int get_print_severity()
{
return __print_severity;
}
================================================
FILE: src/print.h
================================================
/* SPDX-License-Identifier: GPL-3.0-only */
#ifndef _PRINT_H_
#define _PRINT_H_
#include
#include
#include
#include
#include
/* message print. printed messages are passed to application via msg_fd */
void set_print_severity(int severity);
int get_print_severity();
#define __print(fp, severity, fmt, ...) \
do { \
if (severity <= get_print_severity()) { \
fprintf(fp, "\r\033[K" fmt "\n", ##__VA_ARGS__); \
fflush(fp); \
} \
} while (0)
#define pr_err(fmt, ...) __print(stderr, MSCP_SEVERITY_ERR, fmt, ##__VA_ARGS__)
#define pr_warn(fmt, ...) __print(stderr, MSCP_SEVERITY_WARN, fmt, ##__VA_ARGS__)
#define pr_notice(fmt, ...) __print(stdout, MSCP_SEVERITY_NOTICE, fmt, ##__VA_ARGS__)
#define pr_info(fmt, ...) __print(stdout, MSCP_SEVERITY_INFO, fmt, ##__VA_ARGS__)
#define pr_debug(fmt, ...) __print(stdout, MSCP_SEVERITY_DEBUG, fmt, ##__VA_ARGS__)
#endif /* _PRINT_H_ */
================================================
FILE: src/ssh.c
================================================
/* SPDX-License-Identifier: GPL-3.0-only */
#include
#include
#include
#include
#include
#include
#include
#include "libssh/callbacks.h"
#include "libssh/options.h"
static int ssh_verify_known_hosts(ssh_session session);
static int ssh_authenticate_kbdint(ssh_session session);
static int ssh_set_opts(ssh_session ssh, struct mscp_ssh_opts *opts)
{
ssh_set_log_level(opts->debug_level);
if (opts->login_name &&
ssh_options_set(ssh, SSH_OPTIONS_USER, opts->login_name) < 0) {
priv_set_errv("failed to set login name");
return -1;
}
if (opts->port && ssh_options_set(ssh, SSH_OPTIONS_PORT_STR, opts->port) < 0) {
priv_set_errv("failed to set port number");
return -1;
}
if (opts->ai_family &&
ssh_options_set(ssh, SSH_OPTIONS_AI_FAMILY, &opts->ai_family) < 0) {
priv_set_errv("failed to set address family");
return -1;
}
if (opts->identity &&
ssh_options_set(ssh, SSH_OPTIONS_IDENTITY, opts->identity) < 0) {
priv_set_errv("failed to set identity");
return -1;
}
if (opts->cipher) {
if (ssh_options_set(ssh, SSH_OPTIONS_CIPHERS_C_S, opts->cipher) < 0) {
priv_set_errv("failed to set cipher for client to server");
return -1;
}
if (ssh_options_set(ssh, SSH_OPTIONS_CIPHERS_S_C, opts->cipher) < 0) {
priv_set_errv("failed to set cipher for server to client");
return -1;
}
}
if (opts->hmac) {
if (ssh_options_set(ssh, SSH_OPTIONS_HMAC_C_S, opts->hmac) < 0) {
priv_set_errv("failed to set hmac for client to server");
return -1;
}
if (ssh_options_set(ssh, SSH_OPTIONS_HMAC_S_C, opts->hmac) < 0) {
priv_set_errv("failed to set hmac for server to client");
return -1;
}
}
if (opts->compress &&
ssh_options_set(ssh, SSH_OPTIONS_COMPRESSION, opts->compress) < 0) {
priv_set_errv("failed to enable ssh compression");
return -1;
}
if (opts->ccalgo && ssh_options_set(ssh, SSH_OPTIONS_CCALGO, opts->ccalgo) < 0) {
priv_set_errv("failed to set cclago");
return -1;
}
/* if NOT specified to enable Nagle's algorithm, disable it (set TCP_NODELAY) */
if (!opts->enable_nagle) {
int v = 1;
if (ssh_options_set(ssh, SSH_OPTIONS_NODELAY, &v) < 0) {
priv_set_errv("failed to set TCP_NODELAY");
return -1;
}
}
if (opts->config && ssh_options_parse_config(ssh, opts->config) < 0) {
priv_set_errv("failed to parse ssh_config: %s", opts->config);
return -1;
}
if (opts->proxyjump) {
char buf[256];
memset(buf, 0, sizeof(buf));
snprintf(buf, sizeof(buf), "proxyjump=%s", opts->proxyjump);
if (ssh_config_parse_string(ssh, buf) != SSH_OK) {
priv_set_errv("failed to set ssh option: %s", buf);
return -1;
}
}
if (opts->options) {
int n;
for (n = 0; opts->options[n]; n++) {
if (ssh_config_parse_string(ssh, opts->options[n]) != SSH_OK) {
priv_set_errv("failed to set ssh option: %s",
opts->options[n]);
return -1;
}
}
}
return 0;
}
static int ssh_authenticate(ssh_session ssh, struct mscp_ssh_opts *opts)
{
static int auth_bit_mask;
int ret;
if (auth_bit_mask == 0) {
/* the first authentication attempt. try none auth to
* get available auth methods. */
if (ssh_userauth_none(ssh, NULL) == SSH_AUTH_SUCCESS)
return 0;
/* save auth_bit_mask for further authentications */
auth_bit_mask = ssh_userauth_list(ssh, NULL);
}
if (auth_bit_mask & SSH_AUTH_METHOD_PUBLICKEY) {
char *p = opts->passphrase ? opts->passphrase : NULL;
if (ssh_userauth_publickey_auto(ssh, NULL, p) == SSH_AUTH_SUCCESS)
return 0;
}
if (auth_bit_mask & SSH_AUTH_METHOD_PASSWORD) {
if (!opts->password) {
char buf[128] = {};
if (ssh_getpass("Password: ", buf, sizeof(buf), 0, 0) < 0) {
priv_set_errv("ssh_getpass failed");
return -1;
}
if (!(opts->password = strndup(buf, sizeof(buf)))) {
priv_set_errv("strndup: %s", strerrno());
return -1;
}
}
if (ssh_userauth_password(ssh, NULL, opts->password) == SSH_AUTH_SUCCESS)
return 0;
}
auth_bit_mask = ssh_userauth_list(ssh, NULL);
if (auth_bit_mask & SSH_AUTH_METHOD_INTERACTIVE) {
if (ssh_authenticate_kbdint(ssh) == SSH_AUTH_SUCCESS)
return 0;
}
return -1;
}
static int ssh_cache_passphrase(const char *prompt, char *buf, size_t len, int echo,
int verify, void *userdata)
{
struct mscp_ssh_opts *opts = userdata;
/* This function is called on the first time for importing
* priv key file with passphrase. It is not called on the
* second time or after because cached passphrase is passed
* to ssh_userauth_publickey_auto(). */
/* ToDo: use
* ssh_userauth_publickey_auto_get_current_identity() to print
* id for which we ask passphrase */
if (ssh_getpass("Passphrase: ", buf, len, echo, verify) < 0)
return -1;
/* cache the passphrase */
if (opts->passphrase)
free(opts->passphrase);
if (!(opts->passphrase = strndup(buf, len))) {
priv_set_errv("strndup: %s", strerrno());
return -1;
}
return 0;
}
static struct ssh_callbacks_struct cb = {
.auth_function = ssh_cache_passphrase,
.userdata = NULL,
};
static ssh_session ssh_init_session(const char *sshdst, struct mscp_ssh_opts *opts)
{
ssh_session ssh = ssh_new();
ssh_callbacks_init(&cb);
cb.userdata = opts;
ssh_set_callbacks(ssh, &cb);
if (ssh_options_set(ssh, SSH_OPTIONS_HOST, sshdst) != SSH_OK) {
priv_set_errv("failed to set destination host");
goto free_out;
}
if (ssh_set_opts(ssh, opts) != 0)
goto free_out;
if (ssh_connect(ssh) != SSH_OK) {
priv_set_errv("failed to connect ssh server: %s", ssh_get_error(ssh));
goto free_out;
}
if (ssh_authenticate(ssh, opts) != 0) {
priv_set_errv("authentication failed: %s", ssh_get_error(ssh));
goto disconnect_out;
}
if (ssh_verify_known_hosts(ssh) != 0) {
priv_set_errv("ssh_veriy_known_hosts failed");
goto disconnect_out;
}
return ssh;
disconnect_out:
ssh_disconnect(ssh);
free_out:
ssh_free(ssh);
return NULL;
}
sftp_session ssh_init_sftp_session(const char *sshdst, struct mscp_ssh_opts *opts)
{
sftp_session sftp;
ssh_session ssh = ssh_init_session(sshdst, opts);
if (!ssh)
return NULL;
sftp = sftp_new(ssh);
if (!sftp) {
priv_set_errv("failed to allocate sftp session: %s", ssh_get_error(ssh));
goto err_out;
}
if (sftp_init(sftp) != SSH_OK) {
priv_set_errv("failed to initialize sftp session: err code %d",
sftp_get_error(sftp));
goto err_out;
}
return sftp;
err_out:
ssh_disconnect(ssh);
ssh_free(ssh);
return NULL;
}
/* copied from https://api.libssh.org/stable/libssh_tutor_guided_tour.html*/
static int ssh_verify_known_hosts(ssh_session session)
{
enum ssh_known_hosts_e state;
unsigned char *hash = NULL;
ssh_key srv_pubkey = NULL;
size_t hlen;
char buf[10];
char *hexa;
char *p;
int cmp;
int rc;
rc = ssh_get_server_publickey(session, &srv_pubkey);
if (rc < 0) {
return -1;
}
rc = ssh_get_publickey_hash(srv_pubkey, SSH_PUBLICKEY_HASH_SHA1, &hash, &hlen);
ssh_key_free(srv_pubkey);
if (rc < 0) {
return -1;
}
state = ssh_session_is_known_server(session);
switch (state) {
case SSH_KNOWN_HOSTS_OK:
/* OK */
break;
case SSH_KNOWN_HOSTS_CHANGED:
fprintf(stderr, "Host key for server changed: it is now:\n");
//ssh_print_hexa("Public key hash", hash, hlen);
fprintf(stderr, "For security reasons, connection will be stopped\n");
ssh_clean_pubkey_hash(&hash);
return -1;
case SSH_KNOWN_HOSTS_OTHER:
fprintf(stderr, "The host key for this server was not found but an other"
"type of key exists.\n");
fprintf(stderr,
"An attacker might change the default server key to"
"confuse your client into thinking the key does not exist\n");
ssh_clean_pubkey_hash(&hash);
return -1;
case SSH_KNOWN_HOSTS_NOT_FOUND:
fprintf(stderr, "Could not find known host file.\n");
fprintf(stderr, "If you accept the host key here, the file will be"
"automatically created.\n");
/* FALL THROUGH to SSH_SERVER_NOT_KNOWN behavior */
case SSH_KNOWN_HOSTS_UNKNOWN:
hexa = ssh_get_hexa(hash, hlen);
fprintf(stderr, "The server is unknown. Do you trust the host key?\n");
fprintf(stderr, "Public key hash: %s\n", hexa);
fprintf(stderr, "(yes/no): ");
ssh_string_free_char(hexa);
ssh_clean_pubkey_hash(&hash);
p = fgets(buf, sizeof(buf), stdin);
if (p == NULL) {
return -1;
}
cmp = strncasecmp(buf, "yes", 3);
if (cmp != 0) {
return -1;
}
rc = ssh_session_update_known_hosts(session);
if (rc < 0) {
priv_set_errv("%s", ssh_get_error(session));
return -1;
}
break;
case SSH_KNOWN_HOSTS_ERROR:
fprintf(stderr, "known hosts error: %s", ssh_get_error(session));
ssh_clean_pubkey_hash(&hash);
return -1;
}
ssh_clean_pubkey_hash(&hash);
return 0;
}
static int ssh_authenticate_kbdint(ssh_session ssh)
{
/* Copied and bit modified from
* https://api.libssh.org/stable/libssh_tutor_authentication.html */
int rc;
rc = ssh_userauth_kbdint(ssh, NULL, NULL);
while (rc == SSH_AUTH_INFO) {
const char *name, *instruction;
int nprompts, iprompt;
name = ssh_userauth_kbdint_getname(ssh);
instruction = ssh_userauth_kbdint_getinstruction(ssh);
nprompts = ssh_userauth_kbdint_getnprompts(ssh);
if (strlen(name) > 0)
printf("%s\n", name);
if (strlen(instruction) > 0)
printf("%s\n", instruction);
for (iprompt = 0; iprompt < nprompts; iprompt++) {
const char *prompt;
char echo;
prompt = ssh_userauth_kbdint_getprompt(ssh, iprompt, &echo);
if (echo) {
char buf[128], *ptr;
printf("%s", prompt);
if (fgets(buf, sizeof(buf), stdin) == NULL)
return SSH_AUTH_ERROR;
buf[sizeof(buf) - 1] = '\0';
if ((ptr = strchr(buf, '\n')) != NULL)
*ptr = '\0';
if (ssh_userauth_kbdint_setanswer(ssh, iprompt, buf) < 0)
return SSH_AUTH_ERROR;
memset(buf, 0, strlen(buf));
} else {
char *ptr;
ptr = getpass(prompt);
if (ssh_userauth_kbdint_setanswer(ssh, iprompt, ptr) < 0)
return SSH_AUTH_ERROR;
}
}
rc = ssh_userauth_kbdint(ssh, NULL, NULL);
}
return rc;
}
void ssh_sftp_close(sftp_session sftp)
{
ssh_session ssh = sftp_ssh(sftp);
/* XXX: sftp_free is stuck in ssh_poll_ctx_dopoll() when build type is Release.
* skip sftp_free inappropriately...
*/
//sftp_free(sftp);
ssh_disconnect(ssh);
ssh_free(ssh);
}
const char **mscp_ssh_ciphers(void)
{
return ssh_ciphers();
}
const char **mscp_ssh_hmacs(void)
{
return ssh_hmacs();
}
================================================
FILE: src/ssh.h
================================================
/* SPDX-License-Identifier: GPL-3.0-only */
#ifndef _SSH_H_
#define _SSH_H_
#include
#include "libssh/libssh.h"
#include "libssh/sftp.h"
#include
/* ssh_init_sftp_session() creates sftp_session. sshdst accpets
* user@hostname and hostname notations (by libssh).
*/
sftp_session ssh_init_sftp_session(const char *sshdst, struct mscp_ssh_opts *opts);
void ssh_sftp_close(sftp_session sftp);
#define sftp_ssh(sftp) (sftp)->session
#define sftp_get_ssh_error(sftp) ssh_get_error(sftp_ssh(sftp))
#endif /* _SSH_H_ */
================================================
FILE: src/strerrno.c
================================================
/* SPDX-License-Identifier: GPL-3.0-only */
#include
#include
#include
#include
#include
#define STRERRNO_TLS_BUFSIZ 128
__thread char tls_strerrno_buf[STRERRNO_TLS_BUFSIZ];
const char *strerrno(void)
{
snprintf(tls_strerrno_buf, sizeof(tls_strerrno_buf), "%s", "strerror_r error");
strerror_r(errno, tls_strerrno_buf, sizeof(tls_strerrno_buf));
return tls_strerrno_buf;
}
#define PRIV_ERR_BUFSIZ (1 << 12)
__thread char priv_err_buf[PRIV_ERR_BUFSIZ], internal[PRIV_ERR_BUFSIZ];
void priv_set_err(const char *fmt, ...)
{
va_list va;
memset(internal, 0, sizeof(internal));
va_start(va, fmt);
vsnprintf(internal, sizeof(internal), fmt, va);
va_end(va);
snprintf(priv_err_buf, sizeof(priv_err_buf), "%s", internal);
}
const char *priv_get_err()
{
return priv_err_buf;
}
================================================
FILE: src/strerrno.h
================================================
/* SPDX-License-Identifier: GPL-3.0-only */
#ifndef _STRERRNO_
#define _STRERRNO_
#include /* basename() */
/**
* strerrno() returns error message string corresponding to errno.
* strerrno() is thread safe.
*/
const char *strerrno(void);
/**
* priv_set_err() sets an error message into a thread-local private
* buffer. This error message can be accessed via priv_get_err().
*
* The top-level function in a thread should print errors using
* priv_get_err(), while lower-level functions should set error
* messages using priv_set_err().
*/
void priv_set_err(const char *fmt, ...);
/**
* priv_set_errv(), a wrapper for priv_set_err(), just adds filename,
* line, and function name to the error message.
*/
#define priv_set_errv(fmt, ...) \
priv_set_err("[%s:%d:%s] " fmt "\0", basename(__FILE__), __LINE__, __func__, \
##__VA_ARGS__)
/**
* priv_get_err() gets the error message sotred in the thread-local private buffer.
*/
const char *priv_get_err();
#endif /* _STRERRNO_ */
================================================
FILE: test/.gitignore
================================================
__pycache__
================================================
FILE: test/README.md
================================================
This test assumes that the user executing the test can ssh to the
localhost without password.
- Run pytest through ctest.
```console
python3 -m pip install pytest numpy
cd build
cmake ..
ctest --verbose # or `make test ARGS='-V'`
```
================================================
FILE: test/conftest.py
================================================
import pytest
def pytest_addoption(parser):
parser.addoption("--mscp-path", default = "mscp",
help = "path to mscp binary")
@pytest.fixture
def mscp(request):
return request.config.getoption("--mscp-path")
================================================
FILE: test/test_e2e.py
================================================
"""
test_e2e.py: End-to-End test for mscp executable.
"""
import platform
import pytest
import getpass
import datetime
import time
import os
import re
import shutil
from subprocess import check_call, CalledProcessError
from util import File, check_same_md5sum
def run2ok(args, env = None, quiet = False):
cmd = list(map(str, args))
if not quiet:
print("cmd: {}".format(" ".join(cmd)))
check_call(cmd, env = env)
def run2ng(args, env = None, timeout = None, quiet = False):
if timeout:
args = ["timeout", "-s", "INT", timeout] + args
cmd = list(map(str, args))
if not quiet:
print("cmd: {}".format(" ".join(cmd)))
with pytest.raises(CalledProcessError):
check_call(cmd, env = env)
@pytest.fixture(autouse=True)
def cleanup_files():
"""
Cleanup files having the following `prefixes` or matching `paths`.
"""
yield
prefixes = [
"src", "dst",
"non_existent_dstdir",
]
paths = [
"/mscp-test-src", "/tmp/mscp-test-src",
"{}/src".format(os.environ["HOME"]),
"{}/dst".format(os.environ["HOME"]),
"/tmp/mscp_test_ssh_config",
"/home/test/dst",
"/home/test/src",
"checkpoint",
]
def remove(path):
print(f"cleanup remove: {fname}")
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
for fname in os.listdir(os.getcwd()):
for prefix in prefixes:
if fname.startswith(prefix):
remove(fname)
break
for path in paths:
if os.path.exists(path):
remove(path)
""" usage test """
def test_usage(mscp):
run2ng([mscp])
run2ok([mscp, "-h"])
def test_invalid_chunk_size_config(mscp):
run2ng([mscp, "-s", 8 << 20, "-S", 4 << 20])
param_invalid_hostnames = [
(["a:a", "b:b", "c:c"]), (["a:a", "b:b", "c"]), (["a:a", "b", "c:c"]),
(["a", "b:b", "c:c"])
]
@pytest.mark.parametrize("args", param_invalid_hostnames)
def test_nonidentical_hostnames(mscp, args):
run2ng([mscp] + args)
""" copy test """
remote_prefix = "localhost:{}/".format(os.getcwd()) # use current dir
param_remote_prefix = [
("", remote_prefix), (remote_prefix, "")
]
param_single_copy = [
(File("src", size = 64), File("dst")),
(File("src", size = 4096 * 1), File("dst")),
(File("src", size = 128 * 1024 * 1024), File("dst")),
]
@pytest.mark.parametrize("src_prefix, dst_prefix", param_remote_prefix)
@pytest.mark.parametrize("src, dst", param_single_copy)
def test_single_copy(mscp, src_prefix, dst_prefix, src, dst):
src.make()
run2ok([mscp, "-vvv", src_prefix + src.path, dst_prefix + dst.path])
assert check_same_md5sum(src, dst)
@pytest.mark.parametrize("src_prefix, dst_prefix", param_remote_prefix)
def test_failed_to_copy_nonexistent_file(mscp, src_prefix, dst_prefix):
src = "nonexistent_src"
dst = "nonexistent_dst"
run2ng([mscp, "-vvv", src_prefix + src, dst_prefix + dst])
param_double_copy = [
(File("src1", size = 1024 * 1024), File("src2", size = 1024 * 1024),
File("dst/src1"), File("dst/src2")
)
]
@pytest.mark.parametrize("src_prefix, dst_prefix", param_remote_prefix)
@pytest.mark.parametrize("s1, s2, d1, d2", param_double_copy)
def test_double_copy(mscp, src_prefix, dst_prefix, s1, s2, d1, d2):
s1.make()
s2.make()
run2ok([mscp, "-vvv", src_prefix + s1.path, src_prefix + s2.path, dst_prefix + "dst"])
assert check_same_md5sum(s1, d1)
assert check_same_md5sum(s2, d2)
remote_v6_prefix = "[::1]:{}/".format(os.getcwd())
param_remote_v6_prefix = [
("", remote_v6_prefix), (remote_v6_prefix, "")
]
@pytest.mark.parametrize("src_prefix, dst_prefix", param_remote_v6_prefix)
@pytest.mark.parametrize("s1, s2, d1, d2", param_double_copy)
def test_double_copy_with_ipv6_notation(mscp, src_prefix, dst_prefix, s1, s2, d1, d2):
s1.make()
s2.make()
run2ok([mscp, "-vvv",
src_prefix + s1.path, src_prefix + s2.path, dst_prefix + "dst"])
assert check_same_md5sum(s1, d1)
assert check_same_md5sum(s2, d2)
remote_user_v6_prefix = "{}@[::1]:{}/".format(getpass.getuser(), os.getcwd())
param_remote_user_v6_prefix = [
("", remote_user_v6_prefix), (remote_user_v6_prefix, "")
]
@pytest.mark.parametrize("src_prefix, dst_prefix", param_remote_user_v6_prefix)
@pytest.mark.parametrize("s1, s2, d1, d2", param_double_copy)
def test_double_copy_with_user_and_ipv6_notation(mscp, src_prefix, dst_prefix,
s1, s2, d1, d2):
s1.make()
s2.make()
run2ok([mscp, "-vvv",
src_prefix + s1.path, src_prefix + s2.path, dst_prefix + "dst"])
assert check_same_md5sum(s1, d1)
assert check_same_md5sum(s2, d2)
param_dir_copy = [
( "src_dir", "dst_dir",
[ File("src_dir/t1", size = 64),
File("src_dir/t2", size = 4096),
File("src_dir/d1/t3", size = 64),
File("src_dir/d1/d2/t4", size = 128), ],
[ File("dst_dir/t1"),
File("dst_dir/t2"),
File("dst_dir/d1/t3"),
File("dst_dir/d1/d2/t4"), ],
[ File("dst_dir/src_dir/t1"),
File("dst_dir/src_dir/t2"),
File("dst_dir/src_dir/d1/t3"),
File("dst_dir/src_dir/d1/d2/t4"), ],
)
]
"""
`scp remote:src_dir dst_dir` renames src_dir to dst_dir if dst_dir
does not exist. If dst_dir exists, scp copies src_dir to
dst_dir/src_dir. So, this test checks both cases.
"""
@pytest.mark.parametrize("src_prefix, dst_prefix", param_remote_prefix)
@pytest.mark.parametrize("src_dir, dst_dir, src, dst, twice", param_dir_copy)
def test_dir_copy(mscp, src_prefix, dst_prefix, src_dir, dst_dir, src, dst, twice):
for f in src:
f.make()
run2ok([mscp, "-vvv", src_prefix + src_dir, dst_prefix + dst_dir])
for sf, df in zip(src, dst):
assert check_same_md5sum(sf, df)
run2ok([mscp, "-vvv", src_prefix + src_dir, dst_prefix + dst_dir])
for sf, df in zip(src, twice):
assert check_same_md5sum(sf, df)
param_dir_copy_single = [
("src_dir", "dst_dir",
File("src_dir/t1", size = 1024 * 1024),
File("dst_dir/src_dir/t1"),
)
]
@pytest.mark.parametrize("src_prefix, dst_prefix", param_remote_prefix)
@pytest.mark.parametrize("src_dir, dst_dir, src, dst", param_dir_copy_single)
def test_dir_copy_single(mscp, src_prefix, dst_prefix, src_dir, dst_dir, src, dst):
src.make()
os.mkdir(dst_dir)
run2ok([mscp, "-vvv", src_prefix + src_dir, dst_prefix + dst_dir])
assert check_same_md5sum(src, dst)
@pytest.mark.parametrize("src_prefix, dst_prefix", param_remote_prefix)
def test_override_single_file(mscp, src_prefix, dst_prefix):
src = File("src", size = 128).make()
dst = File("dst", size = 128).make()
assert not check_same_md5sum(src, dst)
run2ok([mscp, "-vvv", src_prefix + src.path, dst_prefix + dst.path])
assert check_same_md5sum(src, dst)
absolute_remote_prefix = "localhost:"
param_absolute_remote_prefix = [
("", absolute_remote_prefix), (absolute_remote_prefix, "")
]
@pytest.mark.parametrize("src_prefix, dst_prefix", param_absolute_remote_prefix)
def test_copy_file_under_root_to_dir(mscp, src_prefix, dst_prefix):
src = File("/mscp-test-src", size = 1024).make()
dst = File("/tmp/mscp-test-src")
run2ok([mscp, "-vvv", src_prefix + src.path,
dst_prefix + os.path.dirname(dst.path)])
assert check_same_md5sum(src, dst)
@pytest.mark.parametrize("src_prefix, dst_prefix", param_remote_prefix)
def test_dst_has_suffix_slash(mscp, src_prefix, dst_prefix):
"""
if dst path has suffix '/' like "dir/" and does not exist,
mscp should create dir/ and put dir/src-file-name.
"""
dstdir = "non_existent_dstdir/"
src = File("src", size = 1024 * 1024).make()
dst = File(f"{dstdir}/src")
run2ok([mscp, "-vvv", src_prefix + src.path,
dst_prefix + dstdir])
assert check_same_md5sum(src, dst)
param_tilde_paths = [
("src", "localhost:~/dst"),
("localhost:~/src", "dst"),
]
@pytest.mark.parametrize("src_path, dst_path", param_tilde_paths)
def test_remote_path_contains_tilde(mscp, src_path, dst_path):
"""
if remote path contains '~' as prefix, it should be expanded as '.'.
Note that `~user` notation is not supported yet.
"""
def extract_and_expand(path):
path = path if not ':' in path else path[path.index(':')+1:]
return path.replace('~', os.environ["HOME"])
src_f_path = extract_and_expand(src_path)
dst_f_path = extract_and_expand(dst_path)
src = File(src_f_path, size = 1024 * 1024).make()
dst = File(dst_f_path)
run2ok([mscp, "-vvv", src_path, dst_path])
assert check_same_md5sum(src, dst)
def test_remote_path_contains_tilde2(mscp):
src = File("src", size = 1024 * 1024).make()
dst = File(f"{os.environ['HOME']}/src")
run2ok([mscp, "-vvv", src.path, f"localhost:~"])
assert check_same_md5sum(src, dst)
@pytest.mark.parametrize("src_prefix, dst_prefix", param_remote_prefix)
def test_min_chunk(mscp, src_prefix, dst_prefix):
src = File("src", size = 16 * 1024).make()
dst = File("dst")
run2ok([mscp, "-vvv", "-s", 32768, src_prefix + src.path, dst_prefix + dst.path])
assert check_same_md5sum(src, dst)
def is_alpine():
if os.path.exists("/etc/os-release"):
with open("/etc/os-release", "r") as f:
for line in f:
if line.strip() == "ID=alpine":
return True
return False
param_glob_copy = [
(
"src*", "dstx",
[ File("src1"), File("src2"), File("src3") ],
[ File("dstx/src1"), File("dstx/src2"), File("dstx/src3") ],
),
(
"src*", "dstx",
[ File("src1/s1"), File("src2/s2"), File("src3/s3") ],
[ File("dstx/s1"), File("dstx/s2"), File("dstx/s3") ],
)
]
@pytest.mark.skipif(is_alpine(),
reason = "musl does not implement glob ALTDIRFUNC")
@pytest.mark.parametrize("src_prefix, dst_prefix", param_remote_prefix)
@pytest.mark.parametrize("src_glob_path, dst_path, srcs, dsts", param_glob_copy)
def test_glob_src_path(mscp, src_prefix, dst_prefix,
src_glob_path, dst_path, srcs, dsts):
for src in srcs:
src.make(size = 1024 * 1024)
run2ok([mscp, "-vvv", src_prefix + src_glob_path, dst_prefix + dst_path])
for src, dst in zip(srcs, dsts):
assert check_same_md5sum(src, dst)
@pytest.mark.parametrize("src_prefix, dst_prefix", param_remote_prefix)
def test_thread_affinity(mscp, src_prefix, dst_prefix):
src = File("src", size = 64 * 1024).make()
dst = File("dst")
run2ok([mscp, "-vvv", "-n", 4, "-m", "0x01",
src_prefix + src.path, dst_prefix + dst.path])
assert check_same_md5sum(src, dst)
@pytest.mark.parametrize("src_prefix, dst_prefix", param_remote_prefix)
def test_cannot_override_file_with_dir(mscp, src_prefix, dst_prefix):
src = File("src", size = 128).make()
dst = File("dst").make()
run2ng([mscp, "-vvv", src_prefix + src.path, dst_prefix + "dst/src"])
@pytest.mark.parametrize("src_prefix, dst_prefix", param_remote_prefix)
def test_transfer_zero_bytes(mscp, src_prefix, dst_prefix):
src = File("src", size = 0).make()
dst = File("dst")
run2ok([mscp, "-vvv", src_prefix + src.path, dst_prefix + "dst"])
assert os.path.exists("dst")
@pytest.mark.parametrize("src_prefix, dst_prefix", param_remote_prefix)
def test_override_dst_having_larger_size(mscp, src_prefix, dst_prefix):
src = File("src", size = 1024 * 1024).make()
dst = File("dst", size = 1024 * 1024 * 2).make()
run2ok([mscp, "-vvv", src_prefix + src.path, dst_prefix + "dst"])
assert check_same_md5sum(src, dst)
@pytest.mark.parametrize("src_prefix, dst_prefix", param_remote_prefix)
def test_dont_truncate_dst(mscp, src_prefix, dst_prefix):
f = File("srcanddst", size = 1024 * 1024 * 128).make()
md5_before = f.md5sum()
run2ok([mscp, "-vvv", src_prefix + f.path, dst_prefix + f.path])
md5_after = f.md5sum()
assert md5_before == md5_after
@pytest.mark.parametrize("src_prefix, dst_prefix", param_remote_prefix)
def test_copy_readonly_file(mscp, src_prefix, dst_prefix):
"""When a source file permission is r--r--r--, if chmod(r--r--r--)
runs first on the remote side, following truncate() and setutime()
fail due to permission deneid. So, run chmod() after truncate()
and setutime()
"""
src = File("src", size = 1024 * 1024 * 128, perm = 0o444).make()
dst = File("dst")
run2ok([mscp, "-vvv", src_prefix + src.path, dst_prefix + dst.path])
assert check_same_md5sum(src, dst)
@pytest.mark.parametrize("src_prefix, dst_prefix", param_remote_prefix)
def test_dont_make_conns_more_than_chunks(mscp, src_prefix, dst_prefix):
# copy 100 files with -n 20 -I 1 options. if mscp creates 20 SSH
# connections although all files have been copied, it is error.
srcs = []
dsts = []
for n in range(100):
srcs.append(File("src/src-{:06d}".format(n), size=1024).make())
dsts.append(File("dst/src-{:06d}".format(n)))
start = time.time()
run2ok([mscp, "-v", "-n", "20", "-I", "1",
src_prefix + "src", dst_prefix + "dst"])
end = time.time()
for s, d in zip(srcs, dsts):
assert check_same_md5sum(s, d)
assert((end - start) < 10)
@pytest.mark.parametrize("src_prefix, dst_prefix", param_remote_prefix)
def test_bwlimit(mscp, src_prefix, dst_prefix):
"""Copy 100MB file with 100Mbps bitrate, this requires 8 seconds."""
src = File("src", size = 100 * 1024 * 1024).make()
dst = File("dst")
start = datetime.datetime.now().timestamp()
run2ok([mscp, "-vvv", "-L", "100m", src_prefix + "src", dst_prefix + "dst"])
end = datetime.datetime.now().timestamp()
assert check_same_md5sum(src, dst)
assert end - start > 7
@pytest.mark.parametrize("src_prefix, dst_prefix", param_remote_prefix)
@pytest.mark.parametrize("src, dst", param_single_copy)
def test_set_port_ng(mscp, src_prefix, dst_prefix, src, dst):
src.make()
run2ng([mscp, "-vvv", "-P", 21, src_prefix + src.path, dst_prefix + dst.path])
@pytest.mark.parametrize("src_prefix, dst_prefix", param_remote_prefix)
@pytest.mark.parametrize("src, dst", param_single_copy)
def test_set_port_ok(mscp, src_prefix, dst_prefix, src, dst):
src.make()
run2ok([mscp, "-vvv", "-P", 8022, src_prefix + src.path, dst_prefix + dst.path])
def test_v4only(mscp):
src = File("src", size = 1024).make()
dst = File("dst")
dst_prefix = "localhost:{}/".format(os.getcwd())
run2ok([mscp, "-vvv", "-4", src.path, dst_prefix + dst.path])
assert check_same_md5sum(src, dst)
def test_v6only(mscp):
src = File("src", size = 1024).make()
dst = File("dst")
dst_prefix = "ip6-localhost:{}/".format(os.getcwd())
run2ok([mscp, "-vvv", "-6", src.path, dst_prefix + dst.path])
assert check_same_md5sum(src, dst)
def test_v4_to_v6_should_fail(mscp):
src = File("src", size = 1024).make()
dst = File("dst")
dst_prefix = "[::1]:{}/".format(os.getcwd())
run2ng([mscp, "-vvv", "-4", src.path, dst_prefix + dst.path])
def test_v6_to_v4_should_fail(mscp):
src = File("src", size = 1024).make()
dst = File("dst")
dst_prefix = "127.0.0.1:{}/".format(os.getcwd())
run2ng([mscp, "-vvv", "-6", src.path, dst_prefix + dst.path])
def test_quiet_mode(capsys, mscp):
src = File("src", size = 1024).make()
dst = File("dst")
dst_prefix = "127.0.0.1:{}/".format(os.getcwd())
run2ok([mscp, "-vvv", "-q", src.path, dst_prefix + dst.path], quiet=True)
assert check_same_md5sum(src, dst)
captured = capsys.readouterr()
assert not captured.out
assert not captured.err
@pytest.mark.parametrize("src_prefix, dst_prefix", param_remote_prefix)
def test_set_conn_interval(mscp, src_prefix, dst_prefix):
srcs = []
dsts = []
for x in range(500):
srcs.append(File("src/file{}".format(x), size = 128).make())
dsts.append(File("dst/file{}".format(x)))
run2ok([mscp, "-vvv", "-I", 1, src_prefix + "src", dst_prefix + "dst"])
for src, dst in zip(srcs, dsts):
assert check_same_md5sum(src, dst)
compressions = ["yes", "no", "none"]
@pytest.mark.parametrize("src_prefix, dst_prefix", param_remote_prefix)
@pytest.mark.parametrize("compress", compressions)
def test_compression(mscp, src_prefix, dst_prefix, compress):
src = File("src", size = 1024 * 1024).make()
dst = File("dst", size = 1024 * 1024 * 2).make()
run2ok([mscp, "-vvv", "-C", compress, src_prefix + src.path, dst_prefix + "dst"])
assert check_same_md5sum(src, dst)
@pytest.mark.parametrize("src_prefix, dst_prefix", param_remote_prefix)
def test_ccalgo(mscp, src_prefix, dst_prefix):
src = File("src", size = 1024 * 1024).make()
dst = File("dst").make()
if platform.system() == "Darwin":
# Darwin does not support TCP_CONGESTION
algo = "cubic"
run = run2ng
elif platform.system() == "Linux":
# Linux supports TCP_CONGESTION
with open("/proc/sys/net/ipv4/tcp_allowed_congestion_control", "r") as f:
algo = f.read().strip().split().pop()
run = run2ok
run([mscp, "-vvv", "-g", algo, src_prefix + src.path, dst_prefix + "dst"])
testhost = "mscptestlocalhost"
testhost_prefix = "{}:{}/".format(testhost, os.getcwd()) # use current dir
param_testhost_prefix = [
("", testhost_prefix), (testhost_prefix, "")
]
@pytest.mark.parametrize("src_prefix, dst_prefix", param_testhost_prefix)
def test_config_ok(mscp, src_prefix, dst_prefix):
config = "/tmp/mscp_test_ssh_config"
with open(config, "w") as f:
f.write("host {}\n".format(testhost))
f.write(" hostname localhost\n")
src = File("src", size = 1024 * 1024).make()
dst = File("dst", size = 1024 * 1024 * 2).make()
run2ok([mscp, "-vvv", "-F", config,
src_prefix + src.path, dst_prefix + "dst"])
os.remove(config)
assert check_same_md5sum(src, dst)
@pytest.mark.parametrize("src_prefix, dst_prefix", param_testhost_prefix)
def test_config_ng(mscp, src_prefix, dst_prefix):
config = "/tmp/mscp_test_ssh_config"
with open(config, "w") as f:
f.write("\n") # use empty ssh_config
src = File("src", size = 1024 * 1024).make()
dst = File("dst", size = 1024 * 1024 * 2).make()
run2ng([mscp, "-vvv", "-F", config,
src_prefix + src.path, dst_prefix + "dst"])
os.remove(config)
param_valid_option_ok = [
[ "-o", "Port=8022" ],
[ "-o", "Port=8022", "-o", "User=root" ],
[ "-o", "unknown-option-is-silently-ignored" ],
]
@pytest.mark.parametrize("src_prefix, dst_prefix", param_remote_prefix)
@pytest.mark.parametrize("option", param_valid_option_ok)
def test_inline_option_ok(mscp, src_prefix, dst_prefix, option):
""" change port number with -o option. it should be ok. """
src = File("src", size = 1024 * 1024).make()
dst = File("dst")
run2ok([mscp, "-vvv"] + option +
[src_prefix + src.path, dst_prefix + dst.path])
assert check_same_md5sum(src, dst)
param_valid_option_ng = [
[ "-o", "Port=8023" ],
[ "-o", "User=invaliduser" ],
]
@pytest.mark.parametrize("src_prefix, dst_prefix", param_remote_prefix)
@pytest.mark.parametrize("option", param_valid_option_ng)
def test_inline_option_ng(mscp, src_prefix, dst_prefix, option):
""" change port number with -o option. it should be ng. """
src = File("src", size = 1024 * 1024).make()
dst = File("dst")
run2ng([mscp, "-vvv"] + option +
[src_prefix + src.path, dst_prefix + dst.path])
@pytest.mark.parametrize("src_prefix, dst_prefix", param_remote_prefix)
def test_porxyjump_ok(mscp, src_prefix, dst_prefix):
""" test -J proxyjump option"""
src = File("src", size = 10 * 1024 * 1024).make()
dst = File("dst")
# use small min-chunk-size to use multiple connections
run2ok([mscp, "-n", 4, "-s", 1024 * 1024, "-vvv",
"-J", "localhost:8022",
src_prefix + src.path, dst_prefix + dst.path])
assert check_same_md5sum(src, dst)
@pytest.mark.parametrize("src_prefix, dst_prefix", param_remote_prefix)
def test_porxyjump_ng(mscp, src_prefix, dst_prefix):
""" test -J proxyjump option, invalid jump node causes fail"""
src = File("src", size = 10 * 1024 * 1024).make()
dst = File("dst")
# use small min-chunk-size to use multiple connections
run2ng([mscp, "-n", 4, "-s", 1024 * 1024, "-vvv",
"-J", "invaliduser@localhost:8022",
src_prefix + src.path, dst_prefix + dst.path])
# username test assumes that this test runs inside a container, see Dockerfiles
def test_specify_passphrase_via_env(mscp):
src = File(os.getcwd() + "/src", size = 1024).make()
dst = File("/home/test/dst")
env = os.environ
env["MSCP_SSH_AUTH_PASSPHRASE"] = "keypassphrase"
run2ok([mscp, "-vvv", "-l", "test", "-i", "/home/test/.ssh/id_rsa_test",
src.path, "localhost:" + dst.path], env = env)
assert check_same_md5sum(src, dst)
def test_specify_invalid_passphrase_via_env(mscp):
src = File(os.getcwd() + "/src", size = 1024).make()
dst = File("/home/test/dst")
env = os.environ
env["MSCP_SSH_AUTH_PASSPHRASE"] = "invalid-keypassphrase"
run2ng([mscp, "-vvv", "-l", "test", "-i", "/home/test/.ssh/id_rsa_test",
src.path, "localhost:" + dst.path], env = env)
def test_specify_password_via_env(mscp):
src = File(os.getcwd() + "/src", size = 1024).make()
dst = File("/home/test/dst")
env = os.environ
env["MSCP_SSH_AUTH_PASSWORD"] = "userpassword"
run2ok([mscp, "-vvv", "-l", "test",
src.path, "localhost:" + dst.path], env = env)
assert check_same_md5sum(src, dst)
def test_specify_invalid_password_via_env(mscp):
src = File(os.getcwd() + "/src", size = 1024).make()
dst = File("/home/test/dst")
env = os.environ
env["MSCP_SSH_AUTH_PASSWORD"] = "invalid-userpassword"
run2ng([mscp, "-vvv", "-l", "test",
src.path, "localhost:" + dst.path], env = env)
@pytest.fixture
def move_pubkey_temporally():
"""
mv ~/.ssh/id_* to id_rsa.bak before test, and move it back after test.
"""
sshdir = os.path.join(os.environ["HOME"], ".ssh")
# move pubkeys to /tmp
moved = []
for fname in os.listdir(sshdir):
if re.match(r"^id_[a-z0-9]+$", fname):
moved.append(fname)
shutil.move(f"{sshdir}/{fname}", f"/tmp/{fname}")
yield
# move back the keys
for fname in moved:
shutil.move(f"/tmp/{fname}", f"{sshdir}/{fname}")
@pytest.mark.parametrize("src_prefix, dst_prefix", param_remote_prefix)
def test_passwordauth_without_pubkey(move_pubkey_temporally,
mscp, src_prefix, dst_prefix):
"""
make sure password auth works (by removing public keys)
"""
src = File(os.getcwd() + "/src", size = 1024).make()
dst = File("/home/test/dst")
env = os.environ
env["MSCP_SSH_AUTH_PASSWORD"] = "userpassword"
run2ok([mscp, "-vvv", "-l", "test",
src.path, "localhost:" + dst.path], env = env)
assert check_same_md5sum(src, dst)
@pytest.mark.parametrize("src_prefix, dst_prefix", param_remote_prefix)
def test_10k_files(mscp, src_prefix, dst_prefix):
srcs = []
dsts = []
for n in range(10000):
srcs.append(File("src/src-{:06d}".format(n), size=1024).make())
dsts.append(File("dst/src-{:06d}".format(n)))
run2ok([mscp, "-v", src_prefix + "src", dst_prefix + "dst"])
for s, d in zip(srcs, dsts):
assert check_same_md5sum(s, d)
@pytest.mark.parametrize("src_prefix, dst_prefix", param_remote_prefix)
def test_checkpoint_dump_and_resume(mscp, src_prefix, dst_prefix):
src1 = File("src1", size = 64 * 1024 * 1024).make()
src2 = File("src2", size = 64 * 1024 * 1024).make()
dst1 = File("dst/src1")
dst2 = File("dst/src2")
run2ok([mscp, "-vvv", "-W", "checkpoint", "-D",
src_prefix + "src1", src_prefix + "src2", dst_prefix + "dst"])
assert os.path.exists("checkpoint")
run2ok([mscp, "-vvv", "-R", "checkpoint"])
assert check_same_md5sum(src1, dst1)
assert check_same_md5sum(src2, dst2)
os.remove("checkpoint")
@pytest.mark.parametrize("timeout", [ 1, 2, 3, 4, 5 ])
@pytest.mark.parametrize("src_prefix, dst_prefix", param_remote_prefix)
def test_checkpoint_interrupt_large_file(mscp, timeout, src_prefix, dst_prefix):
"""Copy two 100MB files with 200Mbps -> 4 sec + 4 sec """
src1 = File("src1", size = 100 * 1024 * 1024).make()
src2 = File("src2", size = 100 * 1024 * 1024).make()
dst1 = File("dst/src1")
dst2 = File("dst/src2")
run2ng([mscp, "-vv", "-W", "checkpoint", "-L", "200m",
src_prefix + "src1", src_prefix + "src2", dst_prefix + "dst"],
timeout = timeout)
assert os.path.exists("checkpoint")
run2ok([mscp, "-vv", "-R", "checkpoint"])
assert check_same_md5sum(src1, dst1)
assert check_same_md5sum(src2, dst2)
os.remove("checkpoint")
@pytest.mark.parametrize("timeout", [ 1, 2, 3, 4, 5 ])
@pytest.mark.parametrize("src_prefix, dst_prefix", param_remote_prefix)
def test_checkpoint_interrupt_many_files(mscp, timeout, src_prefix, dst_prefix):
"""Copy 100 1-MB files with 4 connections, and interrupt and
resume the transfer
"""
files = []
for x in range(100):
files.append((
File("src/{:03d}".format(x), size = 1024 * 1024).make(),
File("dst/{:03d}".format(x))
))
run2ng([mscp, "-vv", "-W", "checkpoint", "-L", "80m", "-n", 4,
src_prefix + "src", dst_prefix + "dst"],
timeout = timeout)
assert os.path.exists("checkpoint")
run2ok([mscp, "-vv", "-R", "checkpoint"])
for src, dst in files:
assert check_same_md5sum(src, dst)
os.remove("checkpoint")
================================================
FILE: test/util.py
================================================
import hashlib
import os
def check_same_md5sum(fa, fb):
return (fa.md5sum() == fb.md5sum())
class File():
def __init__(self, path, size = 0, content = "random", perm = 0o664):
if not content in ["zero", "random"]:
raise ValueError("invalid type: {}".format(content))
self.path = path
self.size = size
self.content = content
self.perm = perm
def __repr__(self):
return "".format(self.path, self.size)
def __str__(self):
return self.path
def make(self, size = None):
if size:
self.size = size
d = os.path.dirname(self.path)
if d:
os.makedirs(d, exist_ok = True)
if self.content == "zero":
self.make_content_zero()
elif self.content == "random":
self.make_content_random()
else:
raise ValueError("invalud content type: {}".format(self.content))
os.chmod(self.path, self.perm)
return self
def make_content_zero(self):
with open(self.path, "wb") as f:
f.seek(self.size, 0)
def make_content_random(self):
with open(self.path, "wb") as f:
f.write(os.urandom(self.size))
def cleanup(self, preserve_dir = False):
os.remove(self.path)
if preserve_dir:
return
tmp = os.path.dirname(self.path)
while tmp and not tmp in [".", "/"]:
if len(os.listdir(tmp)) == 0:
os.rmdir(tmp)
tmp = os.path.dirname(tmp)
def md5sum(self):
m = hashlib.md5()
with open(self.path, 'rb') as f:
for chunk in iter(lambda: f.read(4096 * m.block_size), b''):
m.update(chunk)
return m.hexdigest()